repo_name
stringlengths
6
100
path
stringlengths
4
294
copies
stringlengths
1
5
size
stringlengths
4
6
content
stringlengths
606
896k
license
stringclasses
15 values
jnewland/home-assistant
homeassistant/components/glances/sensor.py
3
9346
"""Support gathering system information of hosts which are running glances.""" from datetime import timedelta import logging import voluptuous as vol from homeassistant.components.sensor import PLATFORM_SCHEMA from homeassistant.const import ( CONF_HOST, CONF_NAME, CONF_PORT, CONF_USERNAME, CONF_PASSWORD, CONF_SSL, CONF_VERIFY_SSL, CONF_RESOURCES, STATE_UNAVAILABLE, TEMP_CELSIUS) from homeassistant.exceptions import PlatformNotReady from homeassistant.helpers.aiohttp_client import async_get_clientsession import homeassistant.helpers.config_validation as cv from homeassistant.helpers.entity import Entity from homeassistant.util import Throttle _LOGGER = logging.getLogger(__name__) CONF_VERSION = 'version' DEFAULT_HOST = 'localhost' DEFAULT_NAME = 'Glances' DEFAULT_PORT = '61208' DEFAULT_VERSION = 2 MIN_TIME_BETWEEN_UPDATES = timedelta(minutes=1) SENSOR_TYPES = { 'disk_use_percent': ['Disk used', '%', 'mdi:harddisk'], 'disk_use': ['Disk used', 'GiB', 'mdi:harddisk'], 'disk_free': ['Disk free', 'GiB', 'mdi:harddisk'], 'memory_use_percent': ['RAM used', '%', 'mdi:memory'], 'memory_use': ['RAM used', 'MiB', 'mdi:memory'], 'memory_free': ['RAM free', 'MiB', 'mdi:memory'], 'swap_use_percent': ['Swap used', '%', 'mdi:memory'], 'swap_use': ['Swap used', 'GiB', 'mdi:memory'], 'swap_free': ['Swap free', 'GiB', 'mdi:memory'], 'processor_load': ['CPU load', '15 min', 'mdi:memory'], 'process_running': ['Running', 'Count', 'mdi:memory'], 'process_total': ['Total', 'Count', 'mdi:memory'], 'process_thread': ['Thread', 'Count', 'mdi:memory'], 'process_sleeping': ['Sleeping', 'Count', 'mdi:memory'], 'cpu_use_percent': ['CPU used', '%', 'mdi:memory'], 'cpu_temp': ['CPU Temp', TEMP_CELSIUS, 'mdi:thermometer'], 'docker_active': ['Containers active', '', 'mdi:docker'], 'docker_cpu_use': ['Containers CPU used', '%', 'mdi:docker'], 'docker_memory_use': ['Containers RAM used', 'MiB', 'mdi:docker'], } PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({ vol.Required(CONF_HOST, default=DEFAULT_HOST): cv.string, vol.Optional(CONF_PORT, default=DEFAULT_PORT): cv.port, vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string, vol.Optional(CONF_USERNAME): cv.string, vol.Optional(CONF_PASSWORD): cv.string, vol.Optional(CONF_SSL, default=False): cv.boolean, vol.Optional(CONF_VERIFY_SSL, default=True): cv.boolean, vol.Optional(CONF_RESOURCES, default=['disk_use']): vol.All(cv.ensure_list, [vol.In(SENSOR_TYPES)]), vol.Optional(CONF_VERSION, default=DEFAULT_VERSION): vol.In([2, 3]), }) async def async_setup_platform( hass, config, async_add_entities, discovery_info=None): """Set up the Glances sensors.""" from glances_api import Glances name = config[CONF_NAME] host = config[CONF_HOST] port = config[CONF_PORT] version = config[CONF_VERSION] var_conf = config[CONF_RESOURCES] username = config.get(CONF_USERNAME) password = config.get(CONF_PASSWORD) ssl = config[CONF_SSL] verify_ssl = config[CONF_VERIFY_SSL] session = async_get_clientsession(hass, verify_ssl) glances = GlancesData( Glances(hass.loop, session, host=host, port=port, version=version, username=username, password=password, ssl=ssl)) await glances.async_update() if glances.api.data is None: raise PlatformNotReady dev = [] for resource in var_conf: dev.append(GlancesSensor(glances, name, resource)) async_add_entities(dev, True) class GlancesSensor(Entity): """Implementation of a Glances sensor.""" def __init__(self, glances, name, sensor_type): """Initialize the sensor.""" self.glances = glances self._name = name self.type = sensor_type self._state = None self._unit_of_measurement = SENSOR_TYPES[sensor_type][1] @property def name(self): """Return the name of the sensor.""" return '{} {}'.format(self._name, SENSOR_TYPES[self.type][0]) @property def icon(self): """Icon to use in the frontend, if any.""" return SENSOR_TYPES[self.type][2] @property def unit_of_measurement(self): """Return the unit the value is expressed in.""" return self._unit_of_measurement @property def available(self): """Could the device be accessed during the last update call.""" return self.glances.available @property def state(self): """Return the state of the resources.""" return self._state async def async_update(self): """Get the latest data from REST API.""" await self.glances.async_update() value = self.glances.api.data if value is not None: if self.type == 'disk_use_percent': self._state = value['fs'][0]['percent'] elif self.type == 'disk_use': self._state = round(value['fs'][0]['used'] / 1024**3, 1) elif self.type == 'disk_free': try: self._state = round(value['fs'][0]['free'] / 1024**3, 1) except KeyError: self._state = round((value['fs'][0]['size'] - value['fs'][0]['used']) / 1024**3, 1) elif self.type == 'memory_use_percent': self._state = value['mem']['percent'] elif self.type == 'memory_use': self._state = round(value['mem']['used'] / 1024**2, 1) elif self.type == 'memory_free': self._state = round(value['mem']['free'] / 1024**2, 1) elif self.type == 'swap_use_percent': self._state = value['memswap']['percent'] elif self.type == 'swap_use': self._state = round(value['memswap']['used'] / 1024**3, 1) elif self.type == 'swap_free': self._state = round(value['memswap']['free'] / 1024**3, 1) elif self.type == 'processor_load': # Windows systems don't provide load details try: self._state = value['load']['min15'] except KeyError: self._state = value['cpu']['total'] elif self.type == 'process_running': self._state = value['processcount']['running'] elif self.type == 'process_total': self._state = value['processcount']['total'] elif self.type == 'process_thread': self._state = value['processcount']['thread'] elif self.type == 'process_sleeping': self._state = value['processcount']['sleeping'] elif self.type == 'cpu_use_percent': self._state = value['quicklook']['cpu'] elif self.type == 'cpu_temp': for sensor in value['sensors']: if sensor['label'] in ['CPU', "CPU Temperature", "Package id 0", "Physical id 0", "cpu_thermal 1", "cpu-thermal 1", "exynos-therm 1", "soc_thermal 1"]: self._state = sensor['value'] elif self.type == 'docker_active': count = 0 try: for container in value['docker']['containers']: if container['Status'] == 'running' or \ 'Up' in container['Status']: count += 1 self._state = count except KeyError: self._state = count elif self.type == 'docker_cpu_use': cpu_use = 0.0 try: for container in value['docker']['containers']: if container['Status'] == 'running' or \ 'Up' in container['Status']: cpu_use += container['cpu']['total'] self._state = round(cpu_use, 1) except KeyError: self._state = STATE_UNAVAILABLE elif self.type == 'docker_memory_use': mem_use = 0.0 try: for container in value['docker']['containers']: if container['Status'] == 'running' or \ 'Up' in container['Status']: mem_use += container['memory']['usage'] self._state = round(mem_use / 1024**2, 1) except KeyError: self._state = STATE_UNAVAILABLE class GlancesData: """The class for handling the data retrieval.""" def __init__(self, api): """Initialize the data object.""" self.api = api self.available = True @Throttle(MIN_TIME_BETWEEN_UPDATES) async def async_update(self): """Get the latest data from the Glances REST API.""" from glances_api.exceptions import GlancesApiError try: await self.api.get_data() self.available = True except GlancesApiError: _LOGGER.error("Unable to fetch data from Glances") self.available = False
apache-2.0
SnabbCo/neutron
neutron/db/migration/alembic_migrations/env.py
1
3054
# Copyright 2012 New Dream Network, LLC (DreamHost) # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # # @author: Mark McClain, DreamHost from logging import config as logging_config from alembic import context from sqlalchemy import create_engine, pool from neutron.db import model_base from neutron.openstack.common import importutils DATABASE_QUOTA_DRIVER = 'neutron.extensions._quotav2_driver.DbQuotaDriver' # this is the Alembic Config object, which provides # access to the values within the .ini file in use. config = context.config neutron_config = config.neutron_config # Interpret the config file for Python logging. # This line sets up loggers basically. logging_config.fileConfig(config.config_file_name) plugin_class_path = neutron_config.core_plugin active_plugins = [plugin_class_path] active_plugins += neutron_config.service_plugins for class_path in active_plugins: importutils.import_class(class_path) # set the target for 'autogenerate' support target_metadata = model_base.BASEV2.metadata def run_migrations_offline(): """Run migrations in 'offline' mode. This configures the context with just a URL and not an Engine, though an Engine is acceptable here as well. By skipping the Engine creation we don't even need a DBAPI to be available. Calls to context.execute() here emit the given string to the script output. """ context.configure(url=neutron_config.database.connection) with context.begin_transaction(): context.run_migrations(active_plugins=active_plugins, options=build_options()) def run_migrations_online(): """Run migrations in 'online' mode. In this scenario we need to create an Engine and associate a connection with the context. """ engine = create_engine( neutron_config.database.connection, poolclass=pool.NullPool) connection = engine.connect() context.configure( connection=connection, target_metadata=target_metadata ) try: with context.begin_transaction(): context.run_migrations(active_plugins=active_plugins, options=build_options()) finally: connection.close() def build_options(): return {'folsom_quota_db_enabled': is_db_quota_enabled()} def is_db_quota_enabled(): return neutron_config.QUOTAS.quota_driver == DATABASE_QUOTA_DRIVER if context.is_offline_mode(): run_migrations_offline() else: run_migrations_online()
apache-2.0
JimCircadian/ansible
lib/ansible/modules/cloud/amazon/redshift.py
50
17408
#!/usr/bin/python # Copyright 2014 Jens Carl, Hothead Games Inc. # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) from __future__ import absolute_import, division, print_function __metaclass__ = type ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ['preview'], 'supported_by': 'community'} DOCUMENTATION = ''' --- author: - "Jens Carl (@j-carl), Hothead Games Inc." module: redshift version_added: "2.2" short_description: create, delete, or modify an Amazon Redshift instance description: - Creates, deletes, or modifies amazon Redshift cluster instances. options: command: description: - Specifies the action to take. required: true choices: [ 'create', 'facts', 'delete', 'modify' ] identifier: description: - Redshift cluster identifier. required: true node_type: description: - The node type of the cluster. Must be specified when command=create. choices: ['ds1.xlarge', 'ds1.8xlarge', 'ds2.xlarge', 'ds2.8xlarge', 'dc1.large', 'dc1.8xlarge', 'dc2.large', 'dc2.8xlarge', 'dw1.xlarge', 'dw1.8xlarge', 'dw2.large', 'dw2.8xlarge'] username: description: - Master database username. Used only when command=create. password: description: - Master database password. Used only when command=create. cluster_type: description: - The type of cluster. choices: ['multi-node', 'single-node' ] default: 'single-node' db_name: description: - Name of the database. availability_zone: description: - availability zone in which to launch cluster aliases: ['zone', 'aws_zone'] number_of_nodes: description: - Number of nodes. Only used when cluster_type=multi-node. cluster_subnet_group_name: description: - which subnet to place the cluster aliases: ['subnet'] cluster_security_groups: description: - in which security group the cluster belongs aliases: ['security_groups'] vpc_security_group_ids: description: - VPC security group aliases: ['vpc_security_groups'] skip_final_cluster_snapshot: description: - skip a final snapshot before deleting the cluster. Used only when command=delete. aliases: ['skip_final_snapshot'] default: 'no' version_added: "2.4" final_cluster_snapshot_identifier: description: - identifier of the final snapshot to be created before deleting the cluster. If this parameter is provided, final_cluster_snapshot_identifier must be false. Used only when command=delete. aliases: ['final_snapshot_id'] version_added: "2.4" preferred_maintenance_window: description: - maintenance window aliases: ['maintance_window', 'maint_window'] cluster_parameter_group_name: description: - name of the cluster parameter group aliases: ['param_group_name'] automated_snapshot_retention_period: description: - period when the snapshot take place aliases: ['retention_period'] port: description: - which port the cluster is listining cluster_version: description: - which version the cluster should have aliases: ['version'] choices: ['1.0'] allow_version_upgrade: description: - flag to determinate if upgrade of version is possible aliases: ['version_upgrade'] default: 'yes' publicly_accessible: description: - if the cluster is accessible publicly or not default: 'no' encrypted: description: - if the cluster is encrypted or not default: 'no' elastic_ip: description: - if the cluster has an elastic IP or not new_cluster_identifier: description: - Only used when command=modify. aliases: ['new_identifier'] wait: description: - When command=create, modify or restore then wait for the database to enter the 'available' state. When command=delete wait for the database to be terminated. type: bool default: 'no' wait_timeout: description: - how long before wait gives up, in seconds default: 300 requirements: [ 'boto' ] extends_documentation_fragment: - aws - ec2 ''' EXAMPLES = ''' # Basic cluster provisioning example - redshift: > command=create node_type=ds1.xlarge identifier=new_cluster username=cluster_admin password=1nsecure # Cluster delete example - redshift: command: delete identifier: new_cluster skip_final_cluster_snapshot: true wait: true ''' RETURN = ''' cluster: description: dictionary containing all the cluster information returned: success type: complex contains: identifier: description: Id of the cluster. returned: success type: string sample: "new_redshift_cluster" create_time: description: Time of the cluster creation as timestamp. returned: success type: float sample: 1430158536.308 status: description: Stutus of the cluster. returned: success type: string sample: "available" db_name: description: Name of the database. returned: success type: string sample: "new_db_name" availability_zone: description: Amazon availability zone where the cluster is located. returned: success type: string sample: "us-east-1b" maintenance_window: description: Time frame when maintenance/upgrade are done. returned: success type: string sample: "sun:09:30-sun:10:00" private_ip_address: description: Private IP address of the main node. returned: success type: string sample: "10.10.10.10" public_ip_address: description: Public IP address of the main node. returned: success type: string sample: "0.0.0.0" port: description: Port of the cluster. returned: success type: int sample: 5439 url: description: FQDN of the main cluster node. returned: success type: string sample: "new-redshift_cluster.jfkdjfdkj.us-east-1.redshift.amazonaws.com" ''' import time try: import boto.exception import boto.redshift except ImportError: pass # Taken care of by ec2.HAS_BOTO from ansible.module_utils.basic import AnsibleModule from ansible.module_utils.ec2 import HAS_BOTO, connect_to_aws, ec2_argument_spec, get_aws_connection_info def _collect_facts(resource): """Transfrom cluster information to dict.""" facts = { 'identifier': resource['ClusterIdentifier'], 'create_time': resource['ClusterCreateTime'], 'status': resource['ClusterStatus'], 'username': resource['MasterUsername'], 'db_name': resource['DBName'], 'availability_zone': resource['AvailabilityZone'], 'maintenance_window': resource['PreferredMaintenanceWindow'], 'url': resource['Endpoint']['Address'], 'port': resource['Endpoint']['Port'] } for node in resource['ClusterNodes']: if node['NodeRole'] in ('SHARED', 'LEADER'): facts['private_ip_address'] = node['PrivateIPAddress'] facts['public_ip_address'] = node['PublicIPAddress'] break return facts def create_cluster(module, redshift): """ Create a new cluster module: AnsibleModule object redshift: authenticated redshift connection object Returns: """ identifier = module.params.get('identifier') node_type = module.params.get('node_type') username = module.params.get('username') password = module.params.get('password') wait = module.params.get('wait') wait_timeout = module.params.get('wait_timeout') changed = True # Package up the optional parameters params = {} for p in ('db_name', 'cluster_type', 'cluster_security_groups', 'vpc_security_group_ids', 'cluster_subnet_group_name', 'availability_zone', 'preferred_maintenance_window', 'cluster_parameter_group_name', 'automated_snapshot_retention_period', 'port', 'cluster_version', 'allow_version_upgrade', 'number_of_nodes', 'publicly_accessible', 'encrypted', 'elastic_ip', 'enhanced_vpc_routing'): if p in module.params: params[p] = module.params.get(p) try: redshift.describe_clusters(identifier)['DescribeClustersResponse']['DescribeClustersResult']['Clusters'][0] changed = False except boto.exception.JSONResponseError as e: try: redshift.create_cluster(identifier, node_type, username, password, **params) except boto.exception.JSONResponseError as e: module.fail_json(msg=str(e)) try: resource = redshift.describe_clusters(identifier)['DescribeClustersResponse']['DescribeClustersResult']['Clusters'][0] except boto.exception.JSONResponseError as e: module.fail_json(msg=str(e)) if wait: try: wait_timeout = time.time() + wait_timeout time.sleep(5) while wait_timeout > time.time() and resource['ClusterStatus'] != 'available': time.sleep(5) if wait_timeout <= time.time(): module.fail_json(msg="Timeout waiting for resource %s" % resource.id) resource = redshift.describe_clusters(identifier)['DescribeClustersResponse']['DescribeClustersResult']['Clusters'][0] except boto.exception.JSONResponseError as e: module.fail_json(msg=str(e)) return(changed, _collect_facts(resource)) def describe_cluster(module, redshift): """ Collect data about the cluster. module: Ansible module object redshift: authenticated redshift connection object """ identifier = module.params.get('identifier') try: resource = redshift.describe_clusters(identifier)['DescribeClustersResponse']['DescribeClustersResult']['Clusters'][0] except boto.exception.JSONResponseError as e: module.fail_json(msg=str(e)) return(True, _collect_facts(resource)) def delete_cluster(module, redshift): """ Delete a cluster. module: Ansible module object redshift: authenticated redshift connection object """ identifier = module.params.get('identifier') wait = module.params.get('wait') wait_timeout = module.params.get('wait_timeout') skip_final_cluster_snapshot = module.params.get('skip_final_cluster_snapshot') final_cluster_snapshot_identifier = module.params.get('final_cluster_snapshot_identifier') try: redshift.delete_cluster( identifier, skip_final_cluster_snapshot, final_cluster_snapshot_identifier ) except boto.exception.JSONResponseError as e: module.fail_json(msg=str(e)) if wait: try: wait_timeout = time.time() + wait_timeout resource = redshift.describe_clusters(identifier)['DescribeClustersResponse']['DescribeClustersResult']['Clusters'][0] while wait_timeout > time.time() and resource['ClusterStatus'] != 'deleting': time.sleep(5) if wait_timeout <= time.time(): module.fail_json(msg="Timeout waiting for resource %s" % resource.id) resource = redshift.describe_clusters(identifier)['DescribeClustersResponse']['DescribeClustersResult']['Clusters'][0] except boto.exception.JSONResponseError as e: module.fail_json(msg=str(e)) return(True, {}) def modify_cluster(module, redshift): """ Modify an existing cluster. module: Ansible module object redshift: authenticated redshift connection object """ identifier = module.params.get('identifier') wait = module.params.get('wait') wait_timeout = module.params.get('wait_timeout') # Package up the optional parameters params = {} for p in ('cluster_type', 'cluster_security_groups', 'vpc_security_group_ids', 'cluster_subnet_group_name', 'availability_zone', 'preferred_maintenance_window', 'cluster_parameter_group_name', 'automated_snapshot_retention_period', 'port', 'cluster_version', 'allow_version_upgrade', 'number_of_nodes', 'new_cluster_identifier', 'enhanced_vpc_routing'): if p in module.params: params[p] = module.params.get(p) try: redshift.describe_clusters(identifier)['DescribeClustersResponse']['DescribeClustersResult']['Clusters'][0] except boto.exception.JSONResponseError as e: try: redshift.modify_cluster(identifier, **params) except boto.exception.JSONResponseError as e: module.fail_json(msg=str(e)) try: resource = redshift.describe_clusters(identifier)['DescribeClustersResponse']['DescribeClustersResult']['Clusters'][0] except boto.exception.JSONResponseError as e: module.fail_json(msg=str(e)) if wait: try: wait_timeout = time.time() + wait_timeout time.sleep(5) while wait_timeout > time.time() and resource['ClusterStatus'] != 'available': time.sleep(5) if wait_timeout <= time.time(): module.fail_json(msg="Timeout waiting for resource %s" % resource.id) resource = redshift.describe_clusters(identifier)['DescribeClustersResponse']['DescribeClustersResult']['Clusters'][0] except boto.exception.JSONResponseError as e: # https://github.com/boto/boto/issues/2776 is fixed. module.fail_json(msg=str(e)) return(True, _collect_facts(resource)) def main(): argument_spec = ec2_argument_spec() argument_spec.update(dict( command=dict(choices=['create', 'facts', 'delete', 'modify'], required=True), identifier=dict(required=True), node_type=dict(choices=['ds1.xlarge', 'ds1.8xlarge', 'ds2.xlarge', 'ds2.8xlarge', 'dc1.large', 'dc2.large', 'dc1.8xlarge', 'dw1.xlarge', 'dw1.8xlarge', 'dw2.large', 'dw2.8xlarge'], required=False), username=dict(required=False), password=dict(no_log=True, required=False), db_name=dict(require=False), cluster_type=dict(choices=['multi-node', 'single-node', ], default='single-node'), cluster_security_groups=dict(aliases=['security_groups'], type='list'), vpc_security_group_ids=dict(aliases=['vpc_security_groups'], type='list'), skip_final_cluster_snapshot=dict(aliases=['skip_final_snapshot'], type='bool', default=False), final_cluster_snapshot_identifier=dict(aliases=['final_snapshot_id'], required=False), cluster_subnet_group_name=dict(aliases=['subnet']), availability_zone=dict(aliases=['aws_zone', 'zone']), preferred_maintenance_window=dict(aliases=['maintance_window', 'maint_window']), cluster_parameter_group_name=dict(aliases=['param_group_name']), automated_snapshot_retention_period=dict(aliases=['retention_period']), port=dict(type='int'), cluster_version=dict(aliases=['version'], choices=['1.0']), allow_version_upgrade=dict(aliases=['version_upgrade'], type='bool', default=True), number_of_nodes=dict(type='int'), publicly_accessible=dict(type='bool', default=False), encrypted=dict(type='bool', default=False), elastic_ip=dict(required=False), new_cluster_identifier=dict(aliases=['new_identifier']), enhanced_vpc_routing=dict(type='bool', default=False), wait=dict(type='bool', default=False), wait_timeout=dict(type='int', default=300), )) required_if = [ ('command', 'delete', ['skip_final_cluster_snapshot']), ('skip_final_cluster_snapshot', False, ['final_cluster_snapshot_identifier']) ] module = AnsibleModule( argument_spec=argument_spec, required_if=required_if ) if not HAS_BOTO: module.fail_json(msg='boto v2.9.0+ required for this module') command = module.params.get('command') region, ec2_url, aws_connect_params = get_aws_connection_info(module) if not region: module.fail_json(msg=str("region not specified and unable to determine region from EC2_REGION.")) # connect to the rds endpoint try: conn = connect_to_aws(boto.redshift, region, **aws_connect_params) except boto.exception.JSONResponseError as e: module.fail_json(msg=str(e)) changed = True if command == 'create': (changed, cluster) = create_cluster(module, conn) elif command == 'facts': (changed, cluster) = describe_cluster(module, conn) elif command == 'delete': (changed, cluster) = delete_cluster(module, conn) elif command == 'modify': (changed, cluster) = modify_cluster(module, conn) module.exit_json(changed=changed, cluster=cluster) if __name__ == '__main__': main()
gpl-3.0
lehinevych/cfme_tests
cfme/containers/provider.py
2
9691
from cfme.common.provider import BaseProvider from cfme.fixtures import pytest_selenium as sel from cfme.web_ui import ( Quadicon, Form, AngularSelect, form_buttons, Input, toolbar as tb, InfoBlock, Region ) from cfme.web_ui.menu import nav from cfme.web_ui.tabstrip import TabStripForm from utils import deferred_verpick, version from utils.browser import ensure_browser_open from utils.db import cfmedb from utils.pretty import Pretty from utils.varmeth import variable from . import cfg_btn, mon_btn, pol_btn, details_page nav.add_branch( 'containers_providers', { 'containers_provider_new': lambda _: cfg_btn('Add a New Containers Provider'), 'containers_provider': [ lambda ctx: sel.check(Quadicon(ctx['provider'].name, None).checkbox), { 'containers_provider_edit': lambda _: cfg_btn('Edit Selected Containers Provider'), 'containers_provider_edit_tags': lambda _: pol_btn('Edit Tags') }], 'containers_provider_detail': [ lambda ctx: sel.click(Quadicon(ctx['provider'].name, None)), { 'containers_provider_edit_detail': lambda _: cfg_btn('Edit this Containers Provider'), 'containers_provider_timelines_detail': lambda _: mon_btn('Timelines'), 'containers_provider_edit_tags_detail': lambda _: pol_btn('Edit Tags'), 'containers_provider_topology_detail': lambda _: sel.click(InfoBlock('Overview', 'Topology')) }] } ) properties_form = Form( fields=[ ('type_select', AngularSelect('server_emstype')), ('name_text', Input('name')), ('hostname_text', Input('hostname')), ('port_text', Input('port')) ]) properties_form_56 = TabStripForm( fields=[ ('type_select', AngularSelect('ems_type')), ('name_text', Input('name')) ], tab_fields={ "Default": [ ('hostname_text', Input("default_hostname")), ('port_text', Input("default_api_port")), ('sec_protocol', AngularSelect("default_security_protocol")), ], "Hawkular": [ ('hawkular_hostname', Input("hawkular_hostname")), ('hawkular_api_port', Input("hawkular_api_port")) ], }) prop_region = Region( locators={ 'properties_form': { version.LOWEST: properties_form, '5.6': properties_form_56, } } ) class Provider(BaseProvider, Pretty): pretty_attrs = ['name', 'key', 'zone'] STATS_TO_MATCH = [ 'num_project', 'num_service', 'num_replication_controller', 'num_pod', 'num_node', 'num_container', 'num_image'] # TODO add 'num_image_registry' and 'num_volume' string_name = "Containers" page_name = "containers" detail_page_suffix = 'provider_detail' edit_page_suffix = 'provider_edit_detail' refresh_text = "Refresh items and relationships" quad_name = None _properties_region = prop_region # This will get resolved in common to a real form add_provider_button = deferred_verpick( {version.LOWEST: form_buttons.FormButton("Add this Containers Provider"), '5.6': form_buttons.add}) save_button = deferred_verpick( {version.LOWEST: form_buttons.save, '5.6': form_buttons.angular_save}) def __init__(self, name=None, credentials=None, key=None, zone=None, hostname=None, port=None, provider_data=None): if not credentials: credentials = {} self.name = name self.credentials = credentials self.key = key self.zone = zone self.hostname = hostname self.port = port self.provider_data = provider_data def _on_detail_page(self): """ Returns ``True`` if on the providers detail page, ``False`` if not.""" ensure_browser_open() return sel.is_displayed('//div//h1[contains(., "{} (Summary)")]'.format(self.name)) def load_details(self, refresh=False): if not self._on_detail_page(): self.navigate(detail=True) elif refresh: tb.refresh() def navigate(self, detail=True): if detail is True: if not self._on_detail_page(): sel.force_navigate('containers_provider_detail', context={'provider': self}) else: sel.force_navigate('containers_provider', context={'provider': self}) def get_detail(self, *ident): """ Gets details from the details infoblock Args: *ident: An InfoBlock title, followed by the Key name, e.g. "Relationships", "Images" Returns: A string representing the contents of the InfoBlock's value. """ self.navigate(detail=True) return details_page.infoblock.text(*ident) @variable(alias='db') def num_project(self): return self._num_db_generic('container_projects') @num_project.variant('ui') def num_project_ui(self): return int(self.get_detail("Relationships", "Projects")) @variable(alias='db') def num_service(self): return self._num_db_generic('container_services') @num_service.variant('ui') def num_service_ui(self): return int(self.get_detail("Relationships", "Services")) @variable(alias='db') def num_replication_controller(self): return self._num_db_generic('container_replicators') @num_replication_controller.variant('ui') def num_replication_controller_ui(self): return int(self.get_detail("Relationships", "Replicators")) @variable(alias='db') def num_container_group(self): return self._num_db_generic('container_groups') @num_container_group.variant('ui') def num_container_group_ui(self): return int(self.get_detail("Relationships", "Pods")) @variable(alias='db') def num_pod(self): # potato tomato return self.num_container_group() @num_pod.variant('ui') def num_pod_ui(self): # potato tomato return self.num_container_group(method='ui') @variable(alias='db') def num_node(self): return self._num_db_generic('container_nodes') @num_node.variant('ui') def num_node_ui(self): return int(self.get_detail("Relationships", "Nodes")) @variable(alias='db') def num_container(self): # Containers are linked to providers through container definitions and then through pods res = cfmedb().engine.execute( "SELECT count(*) " "FROM ext_management_systems, container_groups, container_definitions, containers " "WHERE containers.container_definition_id=container_definitions.id " "AND container_definitions.container_group_id=container_groups.id " "AND container_groups.ems_id=ext_management_systems.id " "AND ext_management_systems.name='{}'".format(self.name)) return int(res.first()[0]) @num_container.variant('ui') def num_container_ui(self): return int(self.get_detail("Relationships", "Containers")) @variable(alias='db') def num_image(self): return self._num_db_generic('container_images') @num_image.variant('ui') def num_image_ui(self): return int(self.get_detail("Relationships", "Images")) @variable(alias='db') def num_image_registry(self): return self._num_db_generic('container_image_registries') @num_image_registry.variant('ui') def num_image_registry_ui(self): return int(self.get_detail("Relationships", "Image Registries")) class KubernetesProvider(Provider): def __init__(self, name=None, credentials=None, key=None, zone=None, hostname=None, port=None, provider_data=None): super(KubernetesProvider, self).__init__( name=name, credentials=credentials, key=key, zone=zone, hostname=hostname, port=port, provider_data=provider_data) def _form_mapping(self, create=None, **kwargs): return {'name_text': kwargs.get('name'), 'type_select': create and 'Kubernetes', 'hostname_text': kwargs.get('hostname'), 'port_text': kwargs.get('port'), 'zone_select': kwargs.get('zone')} class OpenshiftProvider(Provider): STATS_TO_MATCH = Provider.STATS_TO_MATCH + ['num_route'] def __init__(self, name=None, credentials=None, key=None, zone=None, hostname=None, port=None, provider_data=None): super(OpenshiftProvider, self).__init__( name=name, credentials=credentials, key=key, zone=zone, hostname=hostname, port=port, provider_data=provider_data) def create(self, validate_credentials=True, **kwargs): # Workaround - randomly fails on 5.5.0.8 with no validation # probably a js wait issue, not reproducible manually super(OpenshiftProvider, self).create(validate_credentials=validate_credentials, **kwargs) def _form_mapping(self, create=None, **kwargs): return {'name_text': kwargs.get('name'), 'type_select': create and 'OpenShift', 'hostname_text': kwargs.get('hostname'), 'port_text': kwargs.get('port'), 'zone_select': kwargs.get('zone')} @variable(alias='db') def num_route(self): return self._num_db_generic('container_routes') @num_route.variant('ui') def num_route_ui(self): return int(self.get_detail("Relationships", "Routes"))
gpl-2.0
christophersu/sublime-evernote
lib/markdown2.py
5
101145
#!/usr/bin/env python # Copyright (c) 2012 Trent Mick. # Copyright (c) 2007-2008 ActiveState Corp. # License: MIT (http://www.opensource.org/licenses/mit-license.php) from __future__ import generators r"""A fast and complete Python implementation of Markdown. [from http://daringfireball.net/projects/markdown/] > Markdown is a text-to-HTML filter; it translates an easy-to-read / > easy-to-write structured text format into HTML. Markdown's text > format is most similar to that of plain text email, and supports > features such as headers, *emphasis*, code blocks, blockquotes, and > links. > > Markdown's syntax is designed not as a generic markup language, but > specifically to serve as a front-end to (X)HTML. You can use span-level > HTML tags anywhere in a Markdown document, and you can use block level > HTML tags (like <div> and <table> as well). Module usage: >>> import markdown2 >>> markdown2.markdown("*boo!*") # or use `html = markdown_path(PATH)` u'<p><em>boo!</em></p>\n' >>> markdowner = Markdown() >>> markdowner.convert("*boo!*") u'<p><em>boo!</em></p>\n' >>> markdowner.convert("**boom!**") u'<p><strong>boom!</strong></p>\n' This implementation of Markdown implements the full "core" syntax plus a number of extras (e.g., code syntax coloring, footnotes) as described on <https://github.com/trentm/python-markdown2/wiki/Extras>. """ cmdln_desc = """A fast and complete Python implementation of Markdown, a text-to-HTML conversion tool for web writers. Supported extra syntax options (see -x|--extras option below and see <https://github.com/trentm/python-markdown2/wiki/Extras> for details): * code-friendly: Disable _ and __ for em and strong. * cuddled-lists: Allow lists to be cuddled to the preceding paragraph. * fenced-code-blocks: Allows a code block to not have to be indented by fencing it with '```' on a line before and after. Based on <http://github.github.com/github-flavored-markdown/> with support for syntax highlighting. * footnotes: Support footnotes as in use on daringfireball.net and implemented in other Markdown processors (tho not in Markdown.pl v1.0.1). * header-ids: Adds "id" attributes to headers. The id value is a slug of the header text. * html-classes: Takes a dict mapping html tag names (lowercase) to a string to use for a "class" tag attribute. Currently only supports "pre" and "code" tags. Add an issue if you require this for other tags. * markdown-in-html: Allow the use of `markdown="1"` in a block HTML tag to have markdown processing be done on its contents. Similar to <http://michelf.com/projects/php-markdown/extra/#markdown-attr> but with some limitations. * metadata: Extract metadata from a leading '---'-fenced block. See <https://github.com/trentm/python-markdown2/issues/77> for details. * nofollow: Add `rel="nofollow"` to add `<a>` tags with an href. See <http://en.wikipedia.org/wiki/Nofollow>. * pyshell: Treats unindented Python interactive shell sessions as <code> blocks. * link-patterns: Auto-link given regex patterns in text (e.g. bug number references, revision number references). * smarty-pants: Replaces ' and " with curly quotation marks or curly apostrophes. Replaces --, ---, ..., and . . . with en dashes, em dashes, and ellipses. * toc: The returned HTML string gets a new "toc_html" attribute which is a Table of Contents for the document. (experimental) * xml: Passes one-liner processing instructions and namespaced XML tags. * tables: Tables using the same format as GFM <https://help.github.com/articles/github-flavored-markdown#tables> and PHP-Markdown Extra <https://michelf.ca/projects/php-markdown/extra/#table>. * wiki-tables: Google Code Wiki-style tables. See <http://code.google.com/p/support/wiki/WikiSyntax#Tables>. """ # Dev Notes: # - Python's regex syntax doesn't have '\z', so I'm using '\Z'. I'm # not yet sure if there implications with this. Compare 'pydoc sre' # and 'perldoc perlre'. __version_info__ = (2, 3, 0) __version__ = '.'.join(map(str, __version_info__)) __author__ = "Trent Mick" import os import sys import re import logging try: from hashlib import md5 except ImportError: from md5 import md5 import optparse from random import random, randint import codecs #---- Python version compat try: from urllib.parse import quote # python3 except ImportError: from urllib import quote # python2 if sys.version_info[:2] < (2,4): from sets import Set as set def reversed(sequence): for i in sequence[::-1]: yield i # Use `bytes` for byte strings and `unicode` for unicode strings (str in Py3). if sys.version_info[0] <= 2: py3 = False try: bytes except NameError: bytes = str base_string_type = basestring elif sys.version_info[0] >= 3: py3 = True unicode = str base_string_type = str #---- globals DEBUG = False log = logging.getLogger("markdown") DEFAULT_TAB_WIDTH = 4 SECRET_SALT = bytes(randint(0, 1000000)) def _hash_text(s): return 'md5-' + md5(SECRET_SALT + s.encode("utf-8")).hexdigest() # Table of hash values for escaped characters: g_escape_table = dict([(ch, _hash_text(ch)) for ch in '\\`*_{}[]()>#+-.!']) #---- exceptions class MarkdownError(Exception): pass #---- public api def markdown_path(path, encoding="utf-8", html4tags=False, tab_width=DEFAULT_TAB_WIDTH, safe_mode=None, extras=None, link_patterns=None, use_file_vars=False): fp = codecs.open(path, 'r', encoding) text = fp.read() fp.close() return Markdown(html4tags=html4tags, tab_width=tab_width, safe_mode=safe_mode, extras=extras, link_patterns=link_patterns, use_file_vars=use_file_vars).convert(text) def markdown(text, html4tags=False, tab_width=DEFAULT_TAB_WIDTH, safe_mode=None, extras=None, link_patterns=None, use_file_vars=False): return Markdown(html4tags=html4tags, tab_width=tab_width, safe_mode=safe_mode, extras=extras, link_patterns=link_patterns, use_file_vars=use_file_vars).convert(text) class Markdown(object): # The dict of "extras" to enable in processing -- a mapping of # extra name to argument for the extra. Most extras do not have an # argument, in which case the value is None. # # This can be set via (a) subclassing and (b) the constructor # "extras" argument. extras = None urls = None titles = None html_blocks = None html_spans = None html_removed_text = "[HTML_REMOVED]" # for compat with markdown.py # Used to track when we're inside an ordered or unordered list # (see _ProcessListItems() for details): list_level = 0 _ws_only_line_re = re.compile(r"^[ \t]+$", re.M) def __init__(self, html4tags=False, tab_width=4, safe_mode=None, extras=None, link_patterns=None, use_file_vars=False): if html4tags: self.empty_element_suffix = ">" else: self.empty_element_suffix = " />" self.tab_width = tab_width # For compatibility with earlier markdown2.py and with # markdown.py's safe_mode being a boolean, # safe_mode == True -> "replace" if safe_mode is True: self.safe_mode = "replace" else: self.safe_mode = safe_mode # Massaging and building the "extras" info. if self.extras is None: self.extras = {} elif not isinstance(self.extras, dict): self.extras = dict([(e, None) for e in self.extras]) if extras: if not isinstance(extras, dict): extras = dict([(e, None) for e in extras]) self.extras.update(extras) assert isinstance(self.extras, dict) if "toc" in self.extras and not "header-ids" in self.extras: self.extras["header-ids"] = None # "toc" implies "header-ids" self._instance_extras = self.extras.copy() self.link_patterns = link_patterns self.use_file_vars = use_file_vars self._outdent_re = re.compile(r'^(\t|[ ]{1,%d})' % tab_width, re.M) self._escape_table = g_escape_table.copy() if "smarty-pants" in self.extras: self._escape_table['"'] = _hash_text('"') self._escape_table["'"] = _hash_text("'") def reset(self): self.urls = {} self.titles = {} self.html_blocks = {} self.html_spans = {} self.list_level = 0 self.extras = self._instance_extras.copy() if "footnotes" in self.extras: self.footnotes = {} self.footnote_ids = [] if "header-ids" in self.extras: self._count_from_header_id = {} # no `defaultdict` in Python 2.4 if "metadata" in self.extras: self.metadata = {} # Per <https://developer.mozilla.org/en-US/docs/HTML/Element/a> "rel" # should only be used in <a> tags with an "href" attribute. _a_nofollow = re.compile(r"<(a)([^>]*href=)", re.IGNORECASE) def convert(self, text): """Convert the given text.""" # Main function. The order in which other subs are called here is # essential. Link and image substitutions need to happen before # _EscapeSpecialChars(), so that any *'s or _'s in the <a> # and <img> tags get encoded. # Clear the global hashes. If we don't clear these, you get conflicts # from other articles when generating a page which contains more than # one article (e.g. an index page that shows the N most recent # articles): self.reset() if not isinstance(text, unicode): #TODO: perhaps shouldn't presume UTF-8 for string input? text = unicode(text, 'utf-8') if self.use_file_vars: # Look for emacs-style file variable hints. emacs_vars = self._get_emacs_vars(text) if "markdown-extras" in emacs_vars: splitter = re.compile("[ ,]+") for e in splitter.split(emacs_vars["markdown-extras"]): if '=' in e: ename, earg = e.split('=', 1) try: earg = int(earg) except ValueError: pass else: ename, earg = e, None self.extras[ename] = earg # Standardize line endings: text = re.sub("\r\n|\r", "\n", text) # Make sure $text ends with a couple of newlines: text += "\n\n" # Convert all tabs to spaces. text = self._detab(text) # Strip any lines consisting only of spaces and tabs. # This makes subsequent regexen easier to write, because we can # match consecutive blank lines with /\n+/ instead of something # contorted like /[ \t]*\n+/ . text = self._ws_only_line_re.sub("", text) # strip metadata from head and extract if "metadata" in self.extras: text = self._extract_metadata(text) text = self.preprocess(text) if "fenced-code-blocks" in self.extras and not self.safe_mode: text = self._do_fenced_code_blocks(text) if self.safe_mode: text = self._hash_html_spans(text) # Turn block-level HTML blocks into hash entries text = self._hash_html_blocks(text, raw=True) if "fenced-code-blocks" in self.extras and self.safe_mode: text = self._do_fenced_code_blocks(text) # Strip link definitions, store in hashes. if "footnotes" in self.extras: # Must do footnotes first because an unlucky footnote defn # looks like a link defn: # [^4]: this "looks like a link defn" text = self._strip_footnote_definitions(text) text = self._strip_link_definitions(text) text = self._run_block_gamut(text) if "footnotes" in self.extras: text = self._add_footnotes(text) text = self.postprocess(text) text = self._unescape_special_chars(text) if self.safe_mode: text = self._unhash_html_spans(text) if "nofollow" in self.extras: text = self._a_nofollow.sub(r'<\1 rel="nofollow"\2', text) text += "\n" rv = UnicodeWithAttrs(text) if "toc" in self.extras: rv._toc = self._toc if "metadata" in self.extras: rv.metadata = self.metadata return rv def postprocess(self, text): """A hook for subclasses to do some postprocessing of the html, if desired. This is called before unescaping of special chars and unhashing of raw HTML spans. """ return text def preprocess(self, text): """A hook for subclasses to do some preprocessing of the Markdown, if desired. This is called after basic formatting of the text, but prior to any extras, safe mode, etc. processing. """ return text # Is metadata if the content starts with '---'-fenced `key: value` # pairs. E.g. (indented for presentation): # --- # foo: bar # another-var: blah blah # --- _metadata_pat = re.compile("""^---[ \t]*\n((?:[ \t]*[^ \t:]+[ \t]*:[^\n]*\n)+)---[ \t]*\n""") def _extract_metadata(self, text): # fast test if not text.startswith("---"): return text match = self._metadata_pat.match(text) if not match: return text tail = text[len(match.group(0)):] metadata_str = match.group(1).strip() for line in metadata_str.split('\n'): key, value = line.split(':', 1) self.metadata[key.strip()] = value.strip() return tail _emacs_oneliner_vars_pat = re.compile(r"-\*-\s*([^\r\n]*?)\s*-\*-", re.UNICODE) # This regular expression is intended to match blocks like this: # PREFIX Local Variables: SUFFIX # PREFIX mode: Tcl SUFFIX # PREFIX End: SUFFIX # Some notes: # - "[ \t]" is used instead of "\s" to specifically exclude newlines # - "(\r\n|\n|\r)" is used instead of "$" because the sre engine does # not like anything other than Unix-style line terminators. _emacs_local_vars_pat = re.compile(r"""^ (?P<prefix>(?:[^\r\n|\n|\r])*?) [\ \t]*Local\ Variables:[\ \t]* (?P<suffix>.*?)(?:\r\n|\n|\r) (?P<content>.*?\1End:) """, re.IGNORECASE | re.MULTILINE | re.DOTALL | re.VERBOSE) def _get_emacs_vars(self, text): """Return a dictionary of emacs-style local variables. Parsing is done loosely according to this spec (and according to some in-practice deviations from this): http://www.gnu.org/software/emacs/manual/html_node/emacs/Specifying-File-Variables.html#Specifying-File-Variables """ emacs_vars = {} SIZE = pow(2, 13) # 8kB # Search near the start for a '-*-'-style one-liner of variables. head = text[:SIZE] if "-*-" in head: match = self._emacs_oneliner_vars_pat.search(head) if match: emacs_vars_str = match.group(1) assert '\n' not in emacs_vars_str emacs_var_strs = [s.strip() for s in emacs_vars_str.split(';') if s.strip()] if len(emacs_var_strs) == 1 and ':' not in emacs_var_strs[0]: # While not in the spec, this form is allowed by emacs: # -*- Tcl -*- # where the implied "variable" is "mode". This form # is only allowed if there are no other variables. emacs_vars["mode"] = emacs_var_strs[0].strip() else: for emacs_var_str in emacs_var_strs: try: variable, value = emacs_var_str.strip().split(':', 1) except ValueError: log.debug("emacs variables error: malformed -*- " "line: %r", emacs_var_str) continue # Lowercase the variable name because Emacs allows "Mode" # or "mode" or "MoDe", etc. emacs_vars[variable.lower()] = value.strip() tail = text[-SIZE:] if "Local Variables" in tail: match = self._emacs_local_vars_pat.search(tail) if match: prefix = match.group("prefix") suffix = match.group("suffix") lines = match.group("content").splitlines(0) #print "prefix=%r, suffix=%r, content=%r, lines: %s"\ # % (prefix, suffix, match.group("content"), lines) # Validate the Local Variables block: proper prefix and suffix # usage. for i, line in enumerate(lines): if not line.startswith(prefix): log.debug("emacs variables error: line '%s' " "does not use proper prefix '%s'" % (line, prefix)) return {} # Don't validate suffix on last line. Emacs doesn't care, # neither should we. if i != len(lines)-1 and not line.endswith(suffix): log.debug("emacs variables error: line '%s' " "does not use proper suffix '%s'" % (line, suffix)) return {} # Parse out one emacs var per line. continued_for = None for line in lines[:-1]: # no var on the last line ("PREFIX End:") if prefix: line = line[len(prefix):] # strip prefix if suffix: line = line[:-len(suffix)] # strip suffix line = line.strip() if continued_for: variable = continued_for if line.endswith('\\'): line = line[:-1].rstrip() else: continued_for = None emacs_vars[variable] += ' ' + line else: try: variable, value = line.split(':', 1) except ValueError: log.debug("local variables error: missing colon " "in local variables entry: '%s'" % line) continue # Do NOT lowercase the variable name, because Emacs only # allows "mode" (and not "Mode", "MoDe", etc.) in this block. value = value.strip() if value.endswith('\\'): value = value[:-1].rstrip() continued_for = variable else: continued_for = None emacs_vars[variable] = value # Unquote values. for var, val in list(emacs_vars.items()): if len(val) > 1 and (val.startswith('"') and val.endswith('"') or val.startswith('"') and val.endswith('"')): emacs_vars[var] = val[1:-1] return emacs_vars # Cribbed from a post by Bart Lateur: # <http://www.nntp.perl.org/group/perl.macperl.anyperl/154> _detab_re = re.compile(r'(.*?)\t', re.M) def _detab_sub(self, match): g1 = match.group(1) return g1 + (' ' * (self.tab_width - len(g1) % self.tab_width)) def _detab(self, text): r"""Remove (leading?) tabs from a file. >>> m = Markdown() >>> m._detab("\tfoo") ' foo' >>> m._detab(" \tfoo") ' foo' >>> m._detab("\t foo") ' foo' >>> m._detab(" foo") ' foo' >>> m._detab(" foo\n\tbar\tblam") ' foo\n bar blam' """ if '\t' not in text: return text return self._detab_re.subn(self._detab_sub, text)[0] # I broke out the html5 tags here and add them to _block_tags_a and # _block_tags_b. This way html5 tags are easy to keep track of. _html5tags = '|article|aside|header|hgroup|footer|nav|section|figure|figcaption' _block_tags_a = 'p|div|h[1-6]|blockquote|pre|table|dl|ol|ul|script|noscript|form|fieldset|iframe|math|ins|del' _block_tags_a += _html5tags _strict_tag_block_re = re.compile(r""" ( # save in \1 ^ # start of line (with re.M) <(%s) # start tag = \2 \b # word break (.*\n)*? # any number of lines, minimally matching </\2> # the matching end tag [ \t]* # trailing spaces/tabs (?=\n+|\Z) # followed by a newline or end of document ) """ % _block_tags_a, re.X | re.M) _block_tags_b = 'p|div|h[1-6]|blockquote|pre|table|dl|ol|ul|script|noscript|form|fieldset|iframe|math' _block_tags_b += _html5tags _liberal_tag_block_re = re.compile(r""" ( # save in \1 ^ # start of line (with re.M) <(%s) # start tag = \2 \b # word break (.*\n)*? # any number of lines, minimally matching .*</\2> # the matching end tag [ \t]* # trailing spaces/tabs (?=\n+|\Z) # followed by a newline or end of document ) """ % _block_tags_b, re.X | re.M) _html_markdown_attr_re = re.compile( r'''\s+markdown\s*=\s*("[0-9a-zA-Z]*"|'[0-9a-zA-Z]*')''') def _hash_html_block_sub(self, match, raw=False): html = match.group(1) if raw and self.safe_mode: html = self._sanitize_html(html) elif 'markdown-in-html' in self.extras and 'markdown' in html: first_line = html.split('\n', 1)[0] m = self._html_markdown_attr_re.search(first_line) if m: if m.group(1)[1:-1] == "1": lines = html.split('\n') middle = '\n'.join(lines[1:-1]) last_line = lines[-1] first_line = first_line[:m.start()] + first_line[m.end():] f_key = _hash_text(first_line) self.html_blocks[f_key] = first_line l_key = _hash_text(last_line) self.html_blocks[l_key] = last_line return ''.join(["\n\n", f_key, "\n\n", middle, "\n\n", l_key, "\n\n"]) else: html = html[:m.start()] + html[m.end():] key = _hash_text(html) self.html_blocks[key] = html return "\n\n" + key + "\n\n" def _hash_html_blocks(self, text, raw=False): """Hashify HTML blocks We only want to do this for block-level HTML tags, such as headers, lists, and tables. That's because we still want to wrap <p>s around "paragraphs" that are wrapped in non-block-level tags, such as anchors, phrase emphasis, and spans. The list of tags we're looking for is hard-coded. @param raw {boolean} indicates if these are raw HTML blocks in the original source. It makes a difference in "safe" mode. """ if '<' not in text: return text # Pass `raw` value into our calls to self._hash_html_block_sub. hash_html_block_sub = _curry(self._hash_html_block_sub, raw=raw) # First, look for nested blocks, e.g.: # <div> # <div> # tags for inner block must be indented. # </div> # </div> # # The outermost tags must start at the left margin for this to match, and # the inner nested divs must be indented. # We need to do this before the next, more liberal match, because the next # match will start at the first `<div>` and stop at the first `</div>`. text = self._strict_tag_block_re.sub(hash_html_block_sub, text) # Now match more liberally, simply from `\n<tag>` to `</tag>\n` text = self._liberal_tag_block_re.sub(hash_html_block_sub, text) # Special case just for <hr />. It was easier to make a special # case than to make the other regex more complicated. if "<hr" in text: _hr_tag_re = _hr_tag_re_from_tab_width(self.tab_width) text = _hr_tag_re.sub(hash_html_block_sub, text) # Special case for standalone HTML comments: if "<!--" in text: start = 0 while True: # Delimiters for next comment block. try: start_idx = text.index("<!--", start) except ValueError: break try: end_idx = text.index("-->", start_idx) + 3 except ValueError: break # Start position for next comment block search. start = end_idx # Validate whitespace before comment. if start_idx: # - Up to `tab_width - 1` spaces before start_idx. for i in range(self.tab_width - 1): if text[start_idx - 1] != ' ': break start_idx -= 1 if start_idx == 0: break # - Must be preceded by 2 newlines or hit the start of # the document. if start_idx == 0: pass elif start_idx == 1 and text[0] == '\n': start_idx = 0 # to match minute detail of Markdown.pl regex elif text[start_idx-2:start_idx] == '\n\n': pass else: break # Validate whitespace after comment. # - Any number of spaces and tabs. while end_idx < len(text): if text[end_idx] not in ' \t': break end_idx += 1 # - Must be following by 2 newlines or hit end of text. if text[end_idx:end_idx+2] not in ('', '\n', '\n\n'): continue # Escape and hash (must match `_hash_html_block_sub`). html = text[start_idx:end_idx] if raw and self.safe_mode: html = self._sanitize_html(html) key = _hash_text(html) self.html_blocks[key] = html text = text[:start_idx] + "\n\n" + key + "\n\n" + text[end_idx:] if "xml" in self.extras: # Treat XML processing instructions and namespaced one-liner # tags as if they were block HTML tags. E.g., if standalone # (i.e. are their own paragraph), the following do not get # wrapped in a <p> tag: # <?foo bar?> # # <xi:include xmlns:xi="http://www.w3.org/2001/XInclude" href="chapter_1.md"/> _xml_oneliner_re = _xml_oneliner_re_from_tab_width(self.tab_width) text = _xml_oneliner_re.sub(hash_html_block_sub, text) return text def _strip_link_definitions(self, text): # Strips link definitions from text, stores the URLs and titles in # hash references. less_than_tab = self.tab_width - 1 # Link defs are in the form: # [id]: url "optional title" _link_def_re = re.compile(r""" ^[ ]{0,%d}\[(.+)\]: # id = \1 [ \t]* \n? # maybe *one* newline [ \t]* <?(.+?)>? # url = \2 [ \t]* (?: \n? # maybe one newline [ \t]* (?<=\s) # lookbehind for whitespace ['"(] ([^\n]*) # title = \3 ['")] [ \t]* )? # title is optional (?:\n+|\Z) """ % less_than_tab, re.X | re.M | re.U) return _link_def_re.sub(self._extract_link_def_sub, text) def _extract_link_def_sub(self, match): id, url, title = match.groups() key = id.lower() # Link IDs are case-insensitive self.urls[key] = self._encode_amps_and_angles(url) if title: self.titles[key] = title return "" def _extract_footnote_def_sub(self, match): id, text = match.groups() text = _dedent(text, skip_first_line=not text.startswith('\n')).strip() normed_id = re.sub(r'\W', '-', id) # Ensure footnote text ends with a couple newlines (for some # block gamut matches). self.footnotes[normed_id] = text + "\n\n" return "" def _strip_footnote_definitions(self, text): """A footnote definition looks like this: [^note-id]: Text of the note. May include one or more indented paragraphs. Where, - The 'note-id' can be pretty much anything, though typically it is the number of the footnote. - The first paragraph may start on the next line, like so: [^note-id]: Text of the note. """ less_than_tab = self.tab_width - 1 footnote_def_re = re.compile(r''' ^[ ]{0,%d}\[\^(.+)\]: # id = \1 [ \t]* ( # footnote text = \2 # First line need not start with the spaces. (?:\s*.*\n+) (?: (?:[ ]{%d} | \t) # Subsequent lines must be indented. .*\n+ )* ) # Lookahead for non-space at line-start, or end of doc. (?:(?=^[ ]{0,%d}\S)|\Z) ''' % (less_than_tab, self.tab_width, self.tab_width), re.X | re.M) return footnote_def_re.sub(self._extract_footnote_def_sub, text) _hr_data = [ ('*', re.compile(r"^[ ]{0,3}\*(.*?)$", re.M)), ('-', re.compile(r"^[ ]{0,3}\-(.*?)$", re.M)), ('_', re.compile(r"^[ ]{0,3}\_(.*?)$", re.M)), ] def _run_block_gamut(self, text): # These are all the transformations that form block-level # tags like paragraphs, headers, and list items. if "fenced-code-blocks" in self.extras: text = self._do_fenced_code_blocks(text) text = self._do_headers(text) # Do Horizontal Rules: # On the number of spaces in horizontal rules: The spec is fuzzy: "If # you wish, you may use spaces between the hyphens or asterisks." # Markdown.pl 1.0.1's hr regexes limit the number of spaces between the # hr chars to one or two. We'll reproduce that limit here. hr = "\n<hr%s"+self.empty_element_suffix+"\n" hr = hr % self._html_class_str_from_tag("hr") for ch, regex in self._hr_data: if ch in text: for m in reversed(list(regex.finditer(text))): tail = m.group(1).rstrip() if not tail.strip(ch + ' ') and tail.count(" ") == 0: start, end = m.span() text = text[:start] + hr + text[end:] text = self._do_lists(text) if "pyshell" in self.extras: text = self._prepare_pyshell_blocks(text) if "wiki-tables" in self.extras: text = self._do_wiki_tables(text) if "tables" in self.extras: text = self._do_tables(text) text = self._do_code_blocks(text) text = self._do_block_quotes(text) # We already ran _HashHTMLBlocks() before, in Markdown(), but that # was to escape raw HTML in the original Markdown source. This time, # we're escaping the markup we've just created, so that we don't wrap # <p> tags around block-level tags. text = self._hash_html_blocks(text) text = self._form_paragraphs(text) return text def _pyshell_block_sub(self, match): lines = match.group(0).splitlines(0) _dedentlines(lines) indent = ' ' * self.tab_width s = ('\n' # separate from possible cuddled paragraph + indent + ('\n'+indent).join(lines) + '\n\n') return s def _prepare_pyshell_blocks(self, text): """Ensure that Python interactive shell sessions are put in code blocks -- even if not properly indented. """ if ">>>" not in text: return text less_than_tab = self.tab_width - 1 _pyshell_block_re = re.compile(r""" ^([ ]{0,%d})>>>[ ].*\n # first line ^(\1.*\S+.*\n)* # any number of subsequent lines ^\n # ends with a blank line """ % less_than_tab, re.M | re.X) return _pyshell_block_re.sub(self._pyshell_block_sub, text) def _table_sub(self, match): head, underline, body = match.groups() table_style = self._html_class_str_from_tag("table") td_style = self._html_class_str_from_tag("td") th_style = self._html_class_str_from_tag("th") tr_style = self._html_class_str_from_tag("tr") tr_odd_style = self._html_class_str_from_tag("tr:odd") tr_even_style = self._html_class_str_from_tag("tr:even") # Determine aligns for columns. cols = [cell.strip() for cell in underline.strip('| \t\n').split('|')] align_from_col_idx = {} for col_idx, col in enumerate(cols): if col[0] == ':' and col[-1] == ':': align_from_col_idx[col_idx] = ' align="center"' elif col[0] == ':': align_from_col_idx[col_idx] = ' align="left"' elif col[-1] == ':': align_from_col_idx[col_idx] = ' align="right"' # thead hlines = ['<table%s>' % table_style, '<thead>', '<tr%s>' % tr_style] cols = [cell.strip() for cell in head.strip('| \t\n').split('|')] for col_idx, col in enumerate(cols): hlines.append(' <th%s%s>%s</th>' % ( align_from_col_idx.get(col_idx, ''), th_style, self._run_span_gamut(col) )) hlines.append('</tr>') hlines.append('</thead>') # tbody hlines.append('<tbody>') for i, line in enumerate(body.strip('\n').split('\n')): hlines.append('<tr%s%s>' % (tr_style, tr_even_style if i % 2 else tr_odd_style)) cols = [cell.strip() for cell in line.strip('| \t\n').split('|')] for col_idx, col in enumerate(cols): hlines.append(' <td%s%s>%s</td>' % ( align_from_col_idx.get(col_idx, ''), td_style, self._run_span_gamut(col) )) hlines.append('</tr>') hlines.append('</tbody>') hlines.append('</table>') return '\n'.join(hlines) + '\n' def _do_tables(self, text): """Copying PHP-Markdown and GFM table syntax. Some regex borrowed from https://github.com/michelf/php-markdown/blob/lib/Michelf/Markdown.php#L2538 """ less_than_tab = self.tab_width - 1 table_re = re.compile(r''' (?:(?<=\n\n)|\A\n?) # leading blank line ^[ ]{0,%d} # allowed whitespace (.*[|].*) \n # $1: header row (at least one pipe) ^[ ]{0,%d} # allowed whitespace ( # $2: underline row # underline row with leading bar (?: \|\ *:?-+:?\ * )+ \|? \n | # or, underline row without leading bar (?: \ *:?-+:?\ *\| )+ (?: \ *:?-+:?\ * )? \n ) ( # $3: data rows (?: ^[ ]{0,%d}(?!\ ) # ensure line begins with 0 to less_than_tab spaces .*\|.* \n )+ ) ''' % (less_than_tab, less_than_tab, less_than_tab), re.M | re.X) return table_re.sub(self._table_sub, text) def _wiki_table_sub(self, match): ttext = match.group(0).strip() rows = [] for line in ttext.splitlines(0): line = line.strip()[2:-2].strip() row = [c.strip() for c in re.split(r'(?<!\\)\|\|', line)] rows.append(row) table_style = self._html_class_str_from_tag("table") td_style = self._html_class_str_from_tag("td") tr_style = self._html_class_str_from_tag("tr") tr_odd_style = self._html_class_str_from_tag("tr:odd") tr_even_style = self._html_class_str_from_tag("tr:even") hlines = ['<table%s>' % table_style, '<tbody>'] for i, row in enumerate(rows): hrow = ['<tr%s%s>' % (tr_style, tr_even_style if i % 2 else tr_odd_style)] for cell in row: hrow.append('<td%s>' % td_style) hrow.append(self._run_span_gamut(cell)) hrow.append('</td>') hrow.append('</tr>') hlines.append(''.join(hrow)) hlines += ['</tbody>', '</table>'] return '\n'.join(hlines) + '\n' def _do_wiki_tables(self, text): # Optimization. if "||" not in text: return text less_than_tab = self.tab_width - 1 wiki_table_re = re.compile(r''' (?:(?<=\n\n)|\A\n?) # leading blank line ^([ ]{0,%d})\|\|.+?\|\|[ ]*\n # first line (^\1\|\|.+?\|\|\n)* # any number of subsequent lines ''' % less_than_tab, re.M | re.X) return wiki_table_re.sub(self._wiki_table_sub, text) def _run_span_gamut(self, text): # These are all the transformations that occur *within* block-level # tags like paragraphs, headers, and list items. text = self._do_code_spans(text) text = self._escape_special_chars(text) # Process anchor and image tags. text = self._do_links(text) # Make links out of things like `<http://example.com/>` # Must come after _do_links(), because you can use < and > # delimiters in inline links like [this](<url>). text = self._do_auto_links(text) if "link-patterns" in self.extras: text = self._do_link_patterns(text) text = self._encode_amps_and_angles(text) text = self._do_italics_and_bold(text) if "smarty-pants" in self.extras: text = self._do_smart_punctuation(text) # Do hard breaks: if "break-on-newline" in self.extras: text = re.sub(r" *\n", "<br%s\n" % self.empty_element_suffix, text) else: text = re.sub(r" {2,}\n", " <br%s\n" % self.empty_element_suffix, text) return text # "Sorta" because auto-links are identified as "tag" tokens. _sorta_html_tokenize_re = re.compile(r""" ( # tag </? (?:\w+) # tag name (?:\s+(?:[\w-]+:)?[\w-]+=(?:".*?"|'.*?'))* # attributes \s*/?> | # auto-link (e.g., <http://www.activestate.com/>) <\w+[^>]*> | <!--.*?--> # comment | <\?.*?\?> # processing instruction ) """, re.X) def _escape_special_chars(self, text): # Python markdown note: the HTML tokenization here differs from # that in Markdown.pl, hence the behaviour for subtle cases can # differ (I believe the tokenizer here does a better job because # it isn't susceptible to unmatched '<' and '>' in HTML tags). # Note, however, that '>' is not allowed in an auto-link URL # here. escaped = [] is_html_markup = False for token in self._sorta_html_tokenize_re.split(text): if is_html_markup: # Within tags/HTML-comments/auto-links, encode * and _ # so they don't conflict with their use in Markdown for # italics and strong. We're replacing each such # character with its corresponding MD5 checksum value; # this is likely overkill, but it should prevent us from # colliding with the escape values by accident. escaped.append(token.replace('*', self._escape_table['*']) .replace('_', self._escape_table['_'])) else: escaped.append(self._encode_backslash_escapes(token)) is_html_markup = not is_html_markup return ''.join(escaped) def _hash_html_spans(self, text): # Used for safe_mode. def _is_auto_link(s): if ':' in s and self._auto_link_re.match(s): return True elif '@' in s and self._auto_email_link_re.match(s): return True return False tokens = [] is_html_markup = False for token in self._sorta_html_tokenize_re.split(text): if is_html_markup and not _is_auto_link(token): sanitized = self._sanitize_html(token) key = _hash_text(sanitized) self.html_spans[key] = sanitized tokens.append(key) else: tokens.append(token) is_html_markup = not is_html_markup return ''.join(tokens) def _unhash_html_spans(self, text): for key, sanitized in list(self.html_spans.items()): text = text.replace(key, sanitized) return text def _sanitize_html(self, s): if self.safe_mode == "replace": return self.html_removed_text elif self.safe_mode == "escape": replacements = [ ('&', '&amp;'), ('<', '&lt;'), ('>', '&gt;'), ] for before, after in replacements: s = s.replace(before, after) return s else: raise MarkdownError("invalid value for 'safe_mode': %r (must be " "'escape' or 'replace')" % self.safe_mode) _inline_link_title = re.compile(r''' ( # \1 [ \t]+ (['"]) # quote char = \2 (?P<title>.*?) \2 )? # title is optional \)$ ''', re.X | re.S) _tail_of_reference_link_re = re.compile(r''' # Match tail of: [text][id] [ ]? # one optional space (?:\n[ ]*)? # one optional newline followed by spaces \[ (?P<id>.*?) \] ''', re.X | re.S) _whitespace = re.compile(r'\s*') _strip_anglebrackets = re.compile(r'<(.*)>.*') def _find_non_whitespace(self, text, start): """Returns the index of the first non-whitespace character in text after (and including) start """ match = self._whitespace.match(text, start) return match.end() def _find_balanced(self, text, start, open_c, close_c): """Returns the index where the open_c and close_c characters balance out - the same number of open_c and close_c are encountered - or the end of string if it's reached before the balance point is found. """ i = start l = len(text) count = 1 while count > 0 and i < l: if text[i] == open_c: count += 1 elif text[i] == close_c: count -= 1 i += 1 return i def _extract_url_and_title(self, text, start): """Extracts the url and (optional) title from the tail of a link""" # text[start] equals the opening parenthesis idx = self._find_non_whitespace(text, start+1) if idx == len(text): return None, None, None end_idx = idx has_anglebrackets = text[idx] == "<" if has_anglebrackets: end_idx = self._find_balanced(text, end_idx+1, "<", ">") end_idx = self._find_balanced(text, end_idx, "(", ")") match = self._inline_link_title.search(text, idx, end_idx) if not match: return None, None, None url, title = text[idx:match.start()], match.group("title") if has_anglebrackets: url = self._strip_anglebrackets.sub(r'\1', url) return url, title, end_idx def _do_links(self, text): """Turn Markdown link shortcuts into XHTML <a> and <img> tags. This is a combination of Markdown.pl's _DoAnchors() and _DoImages(). They are done together because that simplified the approach. It was necessary to use a different approach than Markdown.pl because of the lack of atomic matching support in Python's regex engine used in $g_nested_brackets. """ MAX_LINK_TEXT_SENTINEL = 3000 # markdown2 issue 24 # `anchor_allowed_pos` is used to support img links inside # anchors, but not anchors inside anchors. An anchor's start # pos must be `>= anchor_allowed_pos`. anchor_allowed_pos = 0 curr_pos = 0 while True: # Handle the next link. # The next '[' is the start of: # - an inline anchor: [text](url "title") # - a reference anchor: [text][id] # - an inline img: ![text](url "title") # - a reference img: ![text][id] # - a footnote ref: [^id] # (Only if 'footnotes' extra enabled) # - a footnote defn: [^id]: ... # (Only if 'footnotes' extra enabled) These have already # been stripped in _strip_footnote_definitions() so no # need to watch for them. # - a link definition: [id]: url "title" # These have already been stripped in # _strip_link_definitions() so no need to watch for them. # - not markup: [...anything else... try: start_idx = text.index('[', curr_pos) except ValueError: break text_length = len(text) # Find the matching closing ']'. # Markdown.pl allows *matching* brackets in link text so we # will here too. Markdown.pl *doesn't* currently allow # matching brackets in img alt text -- we'll differ in that # regard. bracket_depth = 0 for p in range(start_idx+1, min(start_idx+MAX_LINK_TEXT_SENTINEL, text_length)): ch = text[p] if ch == ']': bracket_depth -= 1 if bracket_depth < 0: break elif ch == '[': bracket_depth += 1 else: # Closing bracket not found within sentinel length. # This isn't markup. curr_pos = start_idx + 1 continue link_text = text[start_idx+1:p] # Possibly a footnote ref? if "footnotes" in self.extras and link_text.startswith("^"): normed_id = re.sub(r'\W', '-', link_text[1:]) if normed_id in self.footnotes: self.footnote_ids.append(normed_id) # result = '<sup class="footnote-ref" id="fnref-%s">' \ # result = '<sup id="fnref-%s"><a href="#fn-%s">%s</a></sup>' \ # % (normed_id, normed_id, len(self.footnote_ids)) result = '<sup title="%s"%s>%s</sup>' \ % (normed_id, self._html_class_str_from_tag("sup"), len(self.footnote_ids)) text = text[:start_idx] + result + text[p+1:] else: # This id isn't defined, leave the markup alone. curr_pos = p+1 continue # Now determine what this is by the remainder. p += 1 if p == text_length: return text # Inline anchor or img? if text[p] == '(': # attempt at perf improvement url, title, url_end_idx = self._extract_url_and_title(text, p) if url is not None: # Handle an inline anchor or img. is_img = start_idx > 0 and text[start_idx-1] == "!" if is_img: start_idx -= 1 # We've got to encode these to avoid conflicting # with italics/bold. url = url.replace('*', self._escape_table['*']) \ .replace('_', self._escape_table['_']) if title: title_str = ' title="%s"' % ( _xml_escape_attr(title) .replace('*', self._escape_table['*']) .replace('_', self._escape_table['_'])) else: title_str = '' if is_img: img_class_str = self._html_class_str_from_tag("img") result = '<img src="%s" alt="%s"%s%s%s' \ % (url.replace('"', '&quot;'), _xml_escape_attr(link_text), title_str, img_class_str, self.empty_element_suffix) if "smarty-pants" in self.extras: result = result.replace('"', self._escape_table['"']) curr_pos = start_idx + len(result) text = text[:start_idx] + result + text[url_end_idx:] elif start_idx >= anchor_allowed_pos: result_head = '<a href="%s"%s>' % (url, title_str) result = '%s%s</a>' % (result_head, link_text) if "smarty-pants" in self.extras: result = result.replace('"', self._escape_table['"']) # <img> allowed from curr_pos on, <a> from # anchor_allowed_pos on. curr_pos = start_idx + len(result_head) anchor_allowed_pos = start_idx + len(result) text = text[:start_idx] + result + text[url_end_idx:] else: # Anchor not allowed here. curr_pos = start_idx + 1 continue # Reference anchor or img? else: match = self._tail_of_reference_link_re.match(text, p) if match: # Handle a reference-style anchor or img. is_img = start_idx > 0 and text[start_idx-1] == "!" if is_img: start_idx -= 1 link_id = match.group("id").lower() if not link_id: link_id = link_text.lower() # for links like [this][] if link_id in self.urls: url = self.urls[link_id] # We've got to encode these to avoid conflicting # with italics/bold. url = url.replace('*', self._escape_table['*']) \ .replace('_', self._escape_table['_']) title = self.titles.get(link_id) if title: before = title title = _xml_escape_attr(title) \ .replace('*', self._escape_table['*']) \ .replace('_', self._escape_table['_']) title_str = ' title="%s"' % title else: title_str = '' if is_img: img_class_str = self._html_class_str_from_tag("img") result = '<img src="%s" alt="%s"%s%s%s' \ % (url.replace('"', '&quot;'), link_text.replace('"', '&quot;'), title_str, img_class_str, self.empty_element_suffix) if "smarty-pants" in self.extras: result = result.replace('"', self._escape_table['"']) curr_pos = start_idx + len(result) text = text[:start_idx] + result + text[match.end():] elif start_idx >= anchor_allowed_pos: result = '<a href="%s"%s>%s</a>' \ % (url, title_str, link_text) result_head = '<a href="%s"%s>' % (url, title_str) result = '%s%s</a>' % (result_head, link_text) if "smarty-pants" in self.extras: result = result.replace('"', self._escape_table['"']) # <img> allowed from curr_pos on, <a> from # anchor_allowed_pos on. curr_pos = start_idx + len(result_head) anchor_allowed_pos = start_idx + len(result) text = text[:start_idx] + result + text[match.end():] else: # Anchor not allowed here. curr_pos = start_idx + 1 else: # This id isn't defined, leave the markup alone. curr_pos = match.end() continue # Otherwise, it isn't markup. curr_pos = start_idx + 1 return text def header_id_from_text(self, text, prefix, n): """Generate a header id attribute value from the given header HTML content. This is only called if the "header-ids" extra is enabled. Subclasses may override this for different header ids. @param text {str} The text of the header tag @param prefix {str} The requested prefix for header ids. This is the value of the "header-ids" extra key, if any. Otherwise, None. @param n {int} The <hN> tag number, i.e. `1` for an <h1> tag. @returns {str} The value for the header tag's "id" attribute. Return None to not have an id attribute and to exclude this header from the TOC (if the "toc" extra is specified). """ header_id = _slugify(text) if prefix and isinstance(prefix, base_string_type): header_id = prefix + '-' + header_id if header_id in self._count_from_header_id: self._count_from_header_id[header_id] += 1 header_id += '-%s' % self._count_from_header_id[header_id] else: self._count_from_header_id[header_id] = 1 return header_id _toc = None def _toc_add_entry(self, level, id, name): if self._toc is None: self._toc = [] self._toc.append((level, id, self._unescape_special_chars(name))) _h_re = re.compile(r''' (^(.+)[ \t]*\n(=+|-+)[ \t]*\n+) | (^(\#{1,6}) # \1 = string of #'s [ \t]+ (.+?) # \2 = Header text [ \t]* (?<!\\) # ensure not an escaped trailing '#' \#* # optional closing #'s (not counted) \n+ ) ''', re.X | re.M) def _h_sub(self, match): if match.group(1) is not None: # Setext header n = {"=": 1, "-": 2}[match.group(3)[0]] header_group = match.group(2) else: # atx header n = len(match.group(5)) header_group = match.group(6) demote_headers = self.extras.get("demote-headers") if demote_headers: n = min(n + demote_headers, 6) header_id_attr = "" if "header-ids" in self.extras: header_id = self.header_id_from_text(header_group, self.extras["header-ids"], n) if header_id: header_id_attr = ' id="%s"' % header_id html = self._run_span_gamut(header_group) if "toc" in self.extras and header_id: self._toc_add_entry(n, header_id, html) hsty = self._html_class_str_from_tag("h%d" % n) return "<h%d%s%s>%s</h%d>\n\n" % (n, header_id_attr, hsty, html, n) def _do_headers(self, text): # Setext-style headers: # Header 1 # ======== # # Header 2 # -------- # atx-style headers: # # Header 1 # ## Header 2 # ## Header 2 with closing hashes ## # ... # ###### Header 6 text = self._h_re.sub(self._h_sub, text) return text _marker_ul_chars = '*+-' _marker_any = r'(?:[%s]|\d+\.)' % _marker_ul_chars _marker_ul = '(?:[%s])' % _marker_ul_chars _marker_ol = r'(?:\d+\.)' def _list_sub(self, match): lst = match.group(1) lst_type = match.group(3) in self._marker_ul_chars and "ul" or "ol" result = self._process_list_items(lst) if self.list_level: return "<%s>\n%s</%s>\n" % (lst_type, result, lst_type) else: return "<%s>\n%s</%s>\n\n" % (lst_type, result, lst_type) def _do_lists(self, text): # Form HTML ordered (numbered) and unordered (bulleted) lists. # Iterate over each *non-overlapping* list match. pos = 0 while True: # Find the *first* hit for either list style (ul or ol). We # match ul and ol separately to avoid adjacent lists of different # types running into each other (see issue #16). hits = [] for marker_pat in (self._marker_ul, self._marker_ol): less_than_tab = self.tab_width - 1 whole_list = r''' ( # \1 = whole list ( # \2 [ ]{0,%d} (%s) # \3 = first list item marker [ \t]+ (?!\ *\3\ ) # '- - - ...' isn't a list. See 'not_quite_a_list' test case. ) (?:.+?) ( # \4 \Z | \n{2,} (?=\S) (?! # Negative lookahead for another list item marker [ \t]* %s[ \t]+ ) ) ) ''' % (less_than_tab, marker_pat, marker_pat) if self.list_level: # sub-list list_re = re.compile("^"+whole_list, re.X | re.M | re.S) else: list_re = re.compile(r"(?:(?<=\n\n)|\A\n?)"+whole_list, re.X | re.M | re.S) match = list_re.search(text, pos) if match: hits.append((match.start(), match)) if not hits: break hits.sort() match = hits[0][1] start, end = match.span() text = text[:start] + self._list_sub(match) + text[end:] pos = end return text _list_item_re = re.compile(r''' (\n)? # leading line = \1 (^[ \t]*) # leading whitespace = \2 (?P<marker>%s) [ \t]+ # list marker = \3 ((?:.+?) # list item text = \4 (\n{1,2})) # eols = \5 (?= \n* (\Z | \2 (?P<next_marker>%s) [ \t]+)) ''' % (_marker_any, _marker_any), re.M | re.X | re.S) _last_li_endswith_two_eols = False def _list_item_sub(self, match): item = match.group(4) leading_line = match.group(1) # leading_space = match.group(2) marker = match.group(3) start_tag = "" if item[0:4] in ["[ ] ", "[x] "]: start_tag = '%s><en-todo%s/' % \ (' style="list-style-type: none;"' if marker in "+-*" else '', ' checked="true"' if item[1] == 'x' else '') item = item[4:] if leading_line or "\n\n" in item or self._last_li_endswith_two_eols: item = self._run_block_gamut(self._outdent(item)) else: # Recursion for sub-lists: item = self._do_lists(self._outdent(item)) if item.endswith('\n'): item = item[:-1] item = self._run_span_gamut(item) self._last_li_endswith_two_eols = (len(match.group(5)) == 2) return "<li%s>%s</li>\n" % (start_tag, item) def _process_list_items(self, list_str): # Process the contents of a single ordered or unordered list, # splitting it into individual list items. # The $g_list_level global keeps track of when we're inside a list. # Each time we enter a list, we increment it; when we leave a list, # we decrement. If it's zero, we're not in a list anymore. # # We do this because when we're not inside a list, we want to treat # something like this: # # I recommend upgrading to version # 8. Oops, now this line is treated # as a sub-list. # # As a single paragraph, despite the fact that the second line starts # with a digit-period-space sequence. # # Whereas when we're inside a list (or sub-list), that line will be # treated as the start of a sub-list. What a kludge, huh? This is # an aspect of Markdown's syntax that's hard to parse perfectly # without resorting to mind-reading. Perhaps the solution is to # change the syntax rules such that sub-lists must start with a # starting cardinal number; e.g. "1." or "a.". self.list_level += 1 self._last_li_endswith_two_eols = False list_str = list_str.rstrip('\n') + '\n' list_str = self._list_item_re.sub(self._list_item_sub, list_str) self.list_level -= 1 return list_str def _get_pygments_lexer(self, lexer_name): try: from pygments import lexers, util except ImportError: return None try: return lexers.get_lexer_by_name(lexer_name) except util.ClassNotFound: return None def _color_with_pygments(self, codeblock, lexer, **formatter_opts): import pygments import pygments.formatters pre_class_str = self._html_class_str_from_tag("pre") code_class_str = self._html_class_str_from_tag("code") class HtmlCodeFormatter(pygments.formatters.HtmlFormatter): def _wrap_code(self, inner): """A function for use in a Pygments Formatter which wraps in <code> tags. """ pre_style = pre_class_str lang_code = "" if hasattr(lexer, 'orig_name'): lang_code = ' title="%s"' % lexer.orig_name if self.style.background_color and pre_style.startswith(' style'): pre_style = pre_style[0:-1] + 'background-color:' + self.style.background_color + ';"' yield 0, "<pre%s%s>" % (lang_code, pre_style) yield 0, "<code%s>" % code_class_str for tup in inner: yield tup yield 0, "</code>" yield 0, "</pre>" def wrap(self, source, outfile): """Return the source with a code, pre, and div.""" # return self._wrap_div(self._wrap_pre(self._wrap_code(source))) return self._wrap_code(source) formatter_opts.setdefault("cssclass", "codehilite") formatter = HtmlCodeFormatter(**formatter_opts) return pygments.highlight(codeblock, lexer, formatter) def _code_block_sub(self, match, is_fenced_code_block=False): lexer_name = None if is_fenced_code_block: lexer_name = match.group(1) if lexer_name: formatter_opts = self.extras['fenced-code-blocks'] or {} codeblock = match.group(2) codeblock = codeblock[:-1] # drop one trailing newline else: codeblock = match.group(1) codeblock = self._outdent(codeblock) codeblock = self._detab(codeblock) codeblock = codeblock.lstrip('\n') # trim leading newlines codeblock = codeblock.rstrip() # trim trailing whitespace # Note: "code-color" extra is DEPRECATED. if "code-color" in self.extras and codeblock.startswith(":::"): lexer_name, rest = codeblock.split('\n', 1) lexer_name = lexer_name[3:].strip() codeblock = rest.lstrip("\n") # Remove lexer declaration line. formatter_opts = self.extras['code-color'] or {} if lexer_name: lexer = self._get_pygments_lexer(lexer_name) if lexer: setattr(lexer, 'orig_name', lexer_name) colored = self._color_with_pygments(codeblock, lexer, **formatter_opts) return "\n\n%s\n\n" % colored codeblock = self._encode_code(codeblock) pre_class_str = self._html_class_str_from_tag("pre") code_class_str = self._html_class_str_from_tag("code") return "\n\n<pre%s><code%s>%s\n</code></pre>\n\n" % ( pre_class_str, code_class_str, codeblock) def _html_class_str_from_tag(self, tag): """Get the appropriate ' class="..."' string (note the leading space), if any, for the given tag. """ if "inline-css" in self.extras: css_from_tag = self.extras.get("inline-css", {}) if tag in css_from_tag: return ' style="%s"' % css_from_tag[tag] if "html-classes" in self.extras: try: html_classes_from_tag = self.extras["html-classes"] except TypeError: return "" else: if tag in html_classes_from_tag: return ' class="%s"' % html_classes_from_tag[tag] return "" def _do_code_blocks(self, text): """Process Markdown `<pre><code>` blocks.""" code_block_re = re.compile(r''' (?:\n\n|\A\n?) ( # $1 = the code block -- one or more lines, starting with a space/tab (?: (?:[ ]{%d} | \t) # Lines must start with a tab or a tab-width of spaces .*\n+ )+ ) ((?=^[ ]{0,%d}\S)|\Z) # Lookahead for non-space at line-start, or end of doc # Lookahead to make sure this block isn't already in a code block. # Needed when syntax highlighting is being used. (?![^<]*\</code\>) ''' % (self.tab_width, self.tab_width), re.M | re.X) return code_block_re.sub(self._code_block_sub, text) _fenced_code_block_re = re.compile(r''' (?:\n\n|\A\n?) ^```([\w+-]+)?[ \t]*\n # opening fence, $1 = optional lang (.*?) # $2 = code block content ^```[ \t]*\n # closing fence ''', re.M | re.X | re.S) def _fenced_code_block_sub(self, match): return self._code_block_sub(match, is_fenced_code_block=True); def _do_fenced_code_blocks(self, text): """Process ```-fenced unindented code blocks ('fenced-code-blocks' extra).""" return self._fenced_code_block_re.sub(self._fenced_code_block_sub, text) # Rules for a code span: # - backslash escapes are not interpreted in a code span # - to include one or or a run of more backticks the delimiters must # be a longer run of backticks # - cannot start or end a code span with a backtick; pad with a # space and that space will be removed in the emitted HTML # See `test/tm-cases/escapes.text` for a number of edge-case # examples. _code_span_re = re.compile(r''' (?<!\\) (`+) # \1 = Opening run of ` (?!`) # See Note A test/tm-cases/escapes.text (.+?) # \2 = The code block (?<!`) \1 # Matching closer (?!`) ''', re.X | re.S) def _code_span_sub(self, match): c = match.group(2).strip(" \t") c = self._encode_code(c) return "<code%s>%s</code>" % (self._html_class_str_from_tag("inline-code"), c) def _do_code_spans(self, text): # * Backtick quotes are used for <code></code> spans. # # * You can use multiple backticks as the delimiters if you want to # include literal backticks in the code span. So, this input: # # Just type ``foo `bar` baz`` at the prompt. # # Will translate to: # # <p>Just type <code>foo `bar` baz</code> at the prompt.</p> # # There's no arbitrary limit to the number of backticks you # can use as delimters. If you need three consecutive backticks # in your code, use four for delimiters, etc. # # * You can use spaces to get literal backticks at the edges: # # ... type `` `bar` `` ... # # Turns to: # # ... type <code>`bar`</code> ... return self._code_span_re.sub(self._code_span_sub, text) def _encode_code(self, text): """Encode/escape certain characters inside Markdown code runs. The point is that in code, these characters are literals, and lose their special Markdown meanings. """ replacements = [ # Encode all ampersands; HTML entities are not # entities within a Markdown code span. ('&', '&amp;'), # Do the angle bracket song and dance: ('<', '&lt;'), ('>', '&gt;'), ] for before, after in replacements: text = text.replace(before, after) hashed = _hash_text(text) self._escape_table[text] = hashed return hashed _strong_re = re.compile(r"(\*\*|__)(?=\S)(.+?[*_]*)(?<=\S)\1", re.S) _em_re = re.compile(r"(\*|_)(?=\S)(.+?)(?<=\S)\1", re.S) _strike_re = re.compile(r"~~(?=\S)(.+?)(?<=\S)~~", re.S) _underline_re = re.compile(r"==(?=\S)(.+?)(?<=\S)==", re.S) _code_friendly_strong_re = re.compile(r"\*\*(?=\S)(.+?[*_]*)(?<=\S)\*\*", re.S) _code_friendly_em_re = re.compile(r"\*(?=\S)(.+?)(?<=\S)\*", re.S) def _do_italics_and_bold(self, text): # <strong> must go first: if "code-friendly" in self.extras: text = self._code_friendly_strong_re.sub(r"<strong>\1</strong>", text) text = self._code_friendly_em_re.sub(r"<em>\1</em>", text) else: text = self._strong_re.sub(r"<strong>\2</strong>", text) text = self._em_re.sub(r"<em>\2</em>", text) # text = self._strike_re.sub(r"<del>\1</del>", text) # GFM way # text = self._strike_re.sub(r'<span style="text-decoration: line-through;">\1</span>', text) # Evernote way # text = self._underline_re.sub(r'<span style="text-decoration: underline;">\1</span>', text) # Evernote way text = self._strike_re.sub(r'<strike>\1</strike>', text) text = self._underline_re.sub(r'<u>\1</u>', text) return text # "smarty-pants" extra: Very liberal in interpreting a single prime as an # apostrophe; e.g. ignores the fact that "round", "bout", "twer", and # "twixt" can be written without an initial apostrophe. This is fine because # using scare quotes (single quotation marks) is rare. _apostrophe_year_re = re.compile(r"'(\d\d)(?=(\s|,|;|\.|\?|!|$))") _contractions = ["tis", "twas", "twer", "neath", "o", "n", "round", "bout", "twixt", "nuff", "fraid", "sup"] def _do_smart_contractions(self, text): text = self._apostrophe_year_re.sub(r"&#8217;\1", text) for c in self._contractions: text = text.replace("'%s" % c, "&#8217;%s" % c) text = text.replace("'%s" % c.capitalize(), "&#8217;%s" % c.capitalize()) return text # Substitute double-quotes before single-quotes. _opening_single_quote_re = re.compile(r"(?<!\S)'(?=\S)") _opening_double_quote_re = re.compile(r'(?<!\S)"(?=\S)') _closing_single_quote_re = re.compile(r"(?<=\S)'") _closing_double_quote_re = re.compile(r'(?<=\S)"(?=(\s|,|;|\.|\?|!|$))') def _do_smart_punctuation(self, text): """Fancifies 'single quotes', "double quotes", and apostrophes. Converts --, ---, and ... into en dashes, em dashes, and ellipses. Inspiration is: <http://daringfireball.net/projects/smartypants/> See "test/tm-cases/smarty_pants.text" for a full discussion of the support here and <http://code.google.com/p/python-markdown2/issues/detail?id=42> for a discussion of some diversion from the original SmartyPants. """ if "'" in text: # guard for perf text = self._do_smart_contractions(text) text = self._opening_single_quote_re.sub("&#8216;", text) text = self._closing_single_quote_re.sub("&#8217;", text) if '"' in text: # guard for perf text = self._opening_double_quote_re.sub("&#8220;", text) text = self._closing_double_quote_re.sub("&#8221;", text) text = text.replace("---", "&#8212;") text = text.replace("--", "&#8211;") text = text.replace("...", "&#8230;") text = text.replace(" . . . ", "&#8230;") text = text.replace(". . .", "&#8230;") return text _block_quote_re = re.compile(r''' ( # Wrap whole match in \1 ( ^[ \t]*>[ \t]? # '>' at the start of a line .+\n # rest of the first line (.+\n)* # subsequent consecutive lines \n* # blanks )+ ) ''', re.M | re.X) _bq_one_level_re = re.compile('^[ \t]*>[ \t]?', re.M); _html_pre_block_re = re.compile(r'(\s*<pre>.+?</pre>)', re.S) def _dedent_two_spaces_sub(self, match): return re.sub(r'(?m)^ ', '', match.group(1)) def _block_quote_sub(self, match): bq = match.group(1) bq = self._bq_one_level_re.sub('', bq) # trim one level of quoting bq = self._ws_only_line_re.sub('', bq) # trim whitespace-only lines bq = self._run_block_gamut(bq) # recurse bq = re.sub('(?m)^', ' ', bq) # These leading spaces screw with <pre> content, so we need to fix that: bq = self._html_pre_block_re.sub(self._dedent_two_spaces_sub, bq) bqstyle = self._html_class_str_from_tag("blockquote") return "<blockquote%s>\n%s\n</blockquote>\n\n" % (bqstyle, bq) def _do_block_quotes(self, text): if '>' not in text: return text return self._block_quote_re.sub(self._block_quote_sub, text) def _form_paragraphs(self, text): # Strip leading and trailing lines: text = text.strip('\n') # Wrap <p> tags. grafs = [] for i, graf in enumerate(re.split(r"\n{2,}", text)): if graf in self.html_blocks: # Unhashify HTML blocks grafs.append(self.html_blocks[graf]) else: cuddled_list = None if "cuddled-lists" in self.extras: # Need to put back trailing '\n' for `_list_item_re` # match at the end of the paragraph. li = self._list_item_re.search(graf + '\n') # Two of the same list marker in this paragraph: a likely # candidate for a list cuddled to preceding paragraph # text (issue 33). Note the `[-1]` is a quick way to # consider numeric bullets (e.g. "1." and "2.") to be # equal. if (li and len(li.group(2)) <= 3 and li.group("next_marker") and li.group("marker")[-1] == li.group("next_marker")[-1]): start = li.start() cuddled_list = self._do_lists(graf[start:]).rstrip("\n") assert cuddled_list.startswith("<ul>") or cuddled_list.startswith("<ol>") graf = graf[:start] # Wrap <p> tags. graf = self._run_span_gamut(graf) grafs.append("<p>" + graf.lstrip(" \t") + "</p>") if cuddled_list: grafs.append(cuddled_list) return "\n\n".join(grafs) def _add_footnotes(self, text): if self.footnotes: footer = [ '<div title="footnotes"%s>' % self._html_class_str_from_tag("footnotes"), # suppressing hr because it complicates parsing in html2text # ('<hr%s' % self._html_class_str_from_tag("hr")) + self.empty_element_suffix, # setting the title (id is not available in Evernote) for easy detection in html2text '<ol title="footnotes">', ] for i, id in enumerate(self.footnote_ids): if i != 0: footer.append('') # footer.append('<li>') # footer.append('<li id="fn-%s">' % id) footer.append('<li title="fn-%s">' % id) footer.append(self._run_block_gamut(self.footnotes[id])) # backlink = ('<a href="#fnref-%s" ' # # 'class="footnoteBackLink" ' # 'title="Jump back to footnote %d in the text.">' # '&#8617;</a>' % (id, i+1)) backlink = "" if footer[-1].endswith("</p>"): footer[-1] = footer[-1][:-len("</p>")] \ + '&nbsp;' + backlink + "</p>" else: footer.append("\n<p>%s</p>" % backlink) footer.append('</li>') footer.append('</ol>') footer.append('</div>') return text + '\n\n' + '\n'.join(footer) else: return text # Ampersand-encoding based entirely on Nat Irons's Amputator MT plugin: # http://bumppo.net/projects/amputator/ _ampersand_re = re.compile(r'&(?!#?[xX]?(?:[0-9a-fA-F]+|\w+);)') _naked_lt_re = re.compile(r'<(?![a-z/?\$!])', re.I) _naked_gt_re = re.compile(r'''(?<![a-z0-9?!/'"-])>''', re.I) def _encode_amps_and_angles(self, text): # Smart processing for ampersands and angle brackets that need # to be encoded. text = self._ampersand_re.sub('&amp;', text) # Encode naked <'s text = self._naked_lt_re.sub('&lt;', text) # Encode naked >'s # Note: Other markdown implementations (e.g. Markdown.pl, PHP # Markdown) don't do this. text = self._naked_gt_re.sub('&gt;', text) return text def _encode_backslash_escapes(self, text): for ch, escape in list(self._escape_table.items()): text = text.replace("\\"+ch, escape) return text _auto_link_re = re.compile(r'<((https?|ftp):[^\'">\s]+)>', re.I) def _auto_link_sub(self, match): g1 = match.group(1) return '<a href="%s">%s</a>' % (g1, g1) _auto_email_link_re = re.compile(r""" < (?:mailto:)? ( [-.\w]+ \@ [-\w]+(\.[-\w]+)*\.[a-z]+ ) > """, re.I | re.X | re.U) def _auto_email_link_sub(self, match): return self._encode_email_address( self._unescape_special_chars(match.group(1))) def _do_auto_links(self, text): text = self._auto_link_re.sub(self._auto_link_sub, text) text = self._auto_email_link_re.sub(self._auto_email_link_sub, text) return text def _encode_email_address(self, addr): # Input: an email address, e.g. "foo@example.com" # # Output: the email address as a mailto link, with each character # of the address encoded as either a decimal or hex entity, in # the hopes of foiling most address harvesting spam bots. E.g.: # # <a href="&#x6D;&#97;&#105;&#108;&#x74;&#111;:&#102;&#111;&#111;&#64;&#101; # x&#x61;&#109;&#x70;&#108;&#x65;&#x2E;&#99;&#111;&#109;">&#102;&#111;&#111; # &#64;&#101;x&#x61;&#109;&#x70;&#108;&#x65;&#x2E;&#99;&#111;&#109;</a> # # Based on a filter by Matthew Wickline, posted to the BBEdit-Talk # mailing list: <http://tinyurl.com/yu7ue> chars = [_xml_encode_email_char_at_random(ch) for ch in "mailto:" + addr] # Strip the mailto: from the visible part. addr = '<a href="%s">%s</a>' \ % (''.join(chars), ''.join(chars[7:])) return addr def _do_link_patterns(self, text): """Caveat emptor: there isn't much guarding against link patterns being formed inside other standard Markdown links, e.g. inside a [link def][like this]. Dev Notes: *Could* consider prefixing regexes with a negative lookbehind assertion to attempt to guard against this. """ link_from_hash = {} for regex, repl in self.link_patterns: replacements = [] for match in regex.finditer(text): if hasattr(repl, "__call__"): href = repl(match) else: href = match.expand(repl) replacements.append((match.span(), href)) for (start, end), href in reversed(replacements): escaped_href = ( href.replace('"', '&quot;') # b/c of attr quote # To avoid markdown <em> and <strong>: .replace('*', self._escape_table['*']) .replace('_', self._escape_table['_'])) link = '<a href="%s">%s</a>' % (escaped_href, text[start:end]) hash = _hash_text(link) link_from_hash[hash] = link text = text[:start] + hash + text[end:] for hash, link in list(link_from_hash.items()): text = text.replace(hash, link) return text def _unescape_special_chars(self, text): # Swap back in all the special characters we've hidden. for ch, hash in list(self._escape_table.items()): text = text.replace(hash, ch) return text def _outdent(self, text): # Remove one level of line-leading tabs or spaces return self._outdent_re.sub('', text) class MarkdownWithExtras(Markdown): """A markdowner class that enables most extras: - footnotes - code-color (only has effect if 'pygments' Python module on path) These are not included: - pyshell (specific to Python-related documenting) - code-friendly (because it *disables* part of the syntax) - link-patterns (because you need to specify some actual link-patterns anyway) """ extras = ["footnotes", "code-color"] #---- internal support functions class UnicodeWithAttrs(unicode): """A subclass of unicode used for the return value of conversion to possibly attach some attributes. E.g. the "toc_html" attribute when the "toc" extra is used. """ metadata = None _toc = None def toc_html(self): """Return the HTML for the current TOC. This expects the `_toc` attribute to have been set on this instance. """ if self._toc is None: return None def indent(): return ' ' * (len(h_stack) - 1) lines = [] h_stack = [0] # stack of header-level numbers for level, id, name in self._toc: if level > h_stack[-1]: lines.append("%s<ul>" % indent()) h_stack.append(level) elif level == h_stack[-1]: lines[-1] += "</li>" else: while level < h_stack[-1]: h_stack.pop() if not lines[-1].endswith("</li>"): lines[-1] += "</li>" lines.append("%s</ul></li>" % indent()) lines.append('%s<li><a href="#%s">%s</a>' % ( indent(), id, name)) while len(h_stack) > 1: h_stack.pop() if not lines[-1].endswith("</li>"): lines[-1] += "</li>" lines.append("%s</ul>" % indent()) return '\n'.join(lines) + '\n' toc_html = property(toc_html) ## {{{ http://code.activestate.com/recipes/577257/ (r1) _slugify_strip_re = re.compile(r'[^\w\s-]') _slugify_hyphenate_re = re.compile(r'[-\s]+') def _slugify(value): """ Normalizes string, converts to lowercase, removes non-alpha characters, and converts spaces to hyphens. From Django's "django/template/defaultfilters.py". """ import unicodedata value = unicodedata.normalize('NFKD', value).encode('ascii', 'ignore').decode() value = _slugify_strip_re.sub('', value).strip().lower() return _slugify_hyphenate_re.sub('-', value) ## end of http://code.activestate.com/recipes/577257/ }}} # From http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/52549 def _curry(*args, **kwargs): function, args = args[0], args[1:] def result(*rest, **kwrest): combined = kwargs.copy() combined.update(kwrest) return function(*args + rest, **combined) return result # Recipe: regex_from_encoded_pattern (1.0) def _regex_from_encoded_pattern(s): """'foo' -> re.compile(re.escape('foo')) '/foo/' -> re.compile('foo') '/foo/i' -> re.compile('foo', re.I) """ if s.startswith('/') and s.rfind('/') != 0: # Parse it: /PATTERN/FLAGS idx = s.rfind('/') pattern, flags_str = s[1:idx], s[idx+1:] flag_from_char = { "i": re.IGNORECASE, "l": re.LOCALE, "s": re.DOTALL, "m": re.MULTILINE, "u": re.UNICODE, } flags = 0 for char in flags_str: try: flags |= flag_from_char[char] except KeyError: raise ValueError("unsupported regex flag: '%s' in '%s' " "(must be one of '%s')" % (char, s, ''.join(list(flag_from_char.keys())))) return re.compile(s[1:idx], flags) else: # not an encoded regex return re.compile(re.escape(s)) # Recipe: dedent (0.1.2) def _dedentlines(lines, tabsize=8, skip_first_line=False): """_dedentlines(lines, tabsize=8, skip_first_line=False) -> dedented lines "lines" is a list of lines to dedent. "tabsize" is the tab width to use for indent width calculations. "skip_first_line" is a boolean indicating if the first line should be skipped for calculating the indent width and for dedenting. This is sometimes useful for docstrings and similar. Same as dedent() except operates on a sequence of lines. Note: the lines list is modified **in-place**. """ DEBUG = False if DEBUG: print("dedent: dedent(..., tabsize=%d, skip_first_line=%r)"\ % (tabsize, skip_first_line)) indents = [] margin = None for i, line in enumerate(lines): if i == 0 and skip_first_line: continue indent = 0 for ch in line: if ch == ' ': indent += 1 elif ch == '\t': indent += tabsize - (indent % tabsize) elif ch in '\r\n': continue # skip all-whitespace lines else: break else: continue # skip all-whitespace lines if DEBUG: print("dedent: indent=%d: %r" % (indent, line)) if margin is None: margin = indent else: margin = min(margin, indent) if DEBUG: print("dedent: margin=%r" % margin) if margin is not None and margin > 0: for i, line in enumerate(lines): if i == 0 and skip_first_line: continue removed = 0 for j, ch in enumerate(line): if ch == ' ': removed += 1 elif ch == '\t': removed += tabsize - (removed % tabsize) elif ch in '\r\n': if DEBUG: print("dedent: %r: EOL -> strip up to EOL" % line) lines[i] = lines[i][j:] break else: raise ValueError("unexpected non-whitespace char %r in " "line %r while removing %d-space margin" % (ch, line, margin)) if DEBUG: print("dedent: %r: %r -> removed %d/%d"\ % (line, ch, removed, margin)) if removed == margin: lines[i] = lines[i][j+1:] break elif removed > margin: lines[i] = ' '*(removed-margin) + lines[i][j+1:] break else: if removed: lines[i] = lines[i][removed:] return lines def _dedent(text, tabsize=8, skip_first_line=False): """_dedent(text, tabsize=8, skip_first_line=False) -> dedented text "text" is the text to dedent. "tabsize" is the tab width to use for indent width calculations. "skip_first_line" is a boolean indicating if the first line should be skipped for calculating the indent width and for dedenting. This is sometimes useful for docstrings and similar. textwrap.dedent(s), but don't expand tabs to spaces """ lines = text.splitlines(1) _dedentlines(lines, tabsize=tabsize, skip_first_line=skip_first_line) return ''.join(lines) class _memoized(object): """Decorator that caches a function's return value each time it is called. If called later with the same arguments, the cached value is returned, and not re-evaluated. http://wiki.python.org/moin/PythonDecoratorLibrary """ def __init__(self, func): self.func = func self.cache = {} def __call__(self, *args): try: return self.cache[args] except KeyError: self.cache[args] = value = self.func(*args) return value except TypeError: # uncachable -- for instance, passing a list as an argument. # Better to not cache than to blow up entirely. return self.func(*args) def __repr__(self): """Return the function's docstring.""" return self.func.__doc__ def _xml_oneliner_re_from_tab_width(tab_width): """Standalone XML processing instruction regex.""" return re.compile(r""" (?: (?<=\n\n) # Starting after a blank line | # or \A\n? # the beginning of the doc ) ( # save in $1 [ ]{0,%d} (?: <\?\w+\b\s+.*?\?> # XML processing instruction | <\w+:\w+\b\s+.*?/> # namespaced single tag ) [ \t]* (?=\n{2,}|\Z) # followed by a blank line or end of document ) """ % (tab_width - 1), re.X) _xml_oneliner_re_from_tab_width = _memoized(_xml_oneliner_re_from_tab_width) def _hr_tag_re_from_tab_width(tab_width): return re.compile(r""" (?: (?<=\n\n) # Starting after a blank line | # or \A\n? # the beginning of the doc ) ( # save in \1 [ ]{0,%d} <(hr) # start tag = \2 \b # word break ([^<>])*? # /?> # the matching end tag [ \t]* (?=\n{2,}|\Z) # followed by a blank line or end of document ) """ % (tab_width - 1), re.X) _hr_tag_re_from_tab_width = _memoized(_hr_tag_re_from_tab_width) def _xml_escape_attr(attr, skip_single_quote=True): """Escape the given string for use in an HTML/XML tag attribute. By default this doesn't bother with escaping `'` to `&#39;`, presuming that the tag attribute is surrounded by double quotes. """ escaped = (attr .replace('&', '&amp;') .replace('"', '&quot;') .replace('<', '&lt;') .replace('>', '&gt;')) if not skip_single_quote: escaped = escaped.replace("'", "&#39;") return escaped def _xml_encode_email_char_at_random(ch): r = random() # Roughly 10% raw, 45% hex, 45% dec. # '@' *must* be encoded. I [John Gruber] insist. # Issue 26: '_' must be encoded. if r > 0.9 and ch not in "@_": return ch elif r < 0.45: # The [1:] is to drop leading '0': 0x63 -> x63 return '&#%s;' % hex(ord(ch))[1:] else: return '&#%s;' % ord(ch) #---- mainline class _NoReflowFormatter(optparse.IndentedHelpFormatter): """An optparse formatter that does NOT reflow the description.""" def format_description(self, description): return description or "" def _test(): import doctest doctest.testmod() def main(argv=None): if argv is None: argv = sys.argv if not logging.root.handlers: logging.basicConfig() usage = "usage: %prog [PATHS...]" version = "%prog "+__version__ parser = optparse.OptionParser(prog="markdown2", usage=usage, version=version, description=cmdln_desc, formatter=_NoReflowFormatter()) parser.add_option("-v", "--verbose", dest="log_level", action="store_const", const=logging.DEBUG, help="more verbose output") parser.add_option("--encoding", help="specify encoding of text content") parser.add_option("--html4tags", action="store_true", default=False, help="use HTML 4 style for empty element tags") parser.add_option("-s", "--safe", metavar="MODE", dest="safe_mode", help="sanitize literal HTML: 'escape' escapes " "HTML meta chars, 'replace' replaces with an " "[HTML_REMOVED] note") parser.add_option("-x", "--extras", action="append", help="Turn on specific extra features (not part of " "the core Markdown spec). See above.") parser.add_option("--use-file-vars", help="Look for and use Emacs-style 'markdown-extras' " "file var to turn on extras. See " "<https://github.com/trentm/python-markdown2/wiki/Extras>") parser.add_option("--link-patterns-file", help="path to a link pattern file") parser.add_option("--self-test", action="store_true", help="run internal self-tests (some doctests)") parser.add_option("--compare", action="store_true", help="run against Markdown.pl as well (for testing)") parser.set_defaults(log_level=logging.INFO, compare=False, encoding="utf-8", safe_mode=None, use_file_vars=False) opts, paths = parser.parse_args() log.setLevel(opts.log_level) if opts.self_test: return _test() if opts.extras: extras = {} for s in opts.extras: splitter = re.compile("[,;: ]+") for e in splitter.split(s): if '=' in e: ename, earg = e.split('=', 1) try: earg = int(earg) except ValueError: pass else: ename, earg = e, None extras[ename] = earg else: extras = None if opts.link_patterns_file: link_patterns = [] f = open(opts.link_patterns_file) try: for i, line in enumerate(f.readlines()): if not line.strip(): continue if line.lstrip().startswith("#"): continue try: pat, href = line.rstrip().rsplit(None, 1) except ValueError: raise MarkdownError("%s:%d: invalid link pattern line: %r" % (opts.link_patterns_file, i+1, line)) link_patterns.append( (_regex_from_encoded_pattern(pat), href)) finally: f.close() else: link_patterns = None from os.path import join, dirname, abspath, exists markdown_pl = join(dirname(dirname(abspath(__file__))), "test", "Markdown.pl") if not paths: paths = ['-'] for path in paths: if path == '-': text = sys.stdin.read() else: fp = codecs.open(path, 'r', opts.encoding) text = fp.read() fp.close() if opts.compare: from subprocess import Popen, PIPE print("==== Markdown.pl ====") p = Popen('perl %s' % markdown_pl, shell=True, stdin=PIPE, stdout=PIPE, close_fds=True) p.stdin.write(text.encode('utf-8')) p.stdin.close() perl_html = p.stdout.read().decode('utf-8') if py3: sys.stdout.write(perl_html) else: sys.stdout.write(perl_html.encode( sys.stdout.encoding or "utf-8", 'xmlcharrefreplace')) print("==== markdown2.py ====") html = markdown(text, html4tags=opts.html4tags, safe_mode=opts.safe_mode, extras=extras, link_patterns=link_patterns, use_file_vars=opts.use_file_vars) if py3: sys.stdout.write(html) else: sys.stdout.write(html.encode( sys.stdout.encoding or "utf-8", 'xmlcharrefreplace')) if extras and "toc" in extras: log.debug("toc_html: " + html.toc_html.encode(sys.stdout.encoding or "utf-8", 'xmlcharrefreplace')) if opts.compare: test_dir = join(dirname(dirname(abspath(__file__))), "test") if exists(join(test_dir, "test_markdown2.py")): sys.path.insert(0, test_dir) from test_markdown2 import norm_html_from_html norm_html = norm_html_from_html(html) norm_perl_html = norm_html_from_html(perl_html) else: norm_html = html norm_perl_html = perl_html print("==== match? %r ====" % (norm_perl_html == norm_html)) if __name__ == "__main__": sys.exit( main(sys.argv) )
mit
robinro/ansible
lib/ansible/modules/remote_management/hpilo/hponcfg.py
66
2841
#!/usr/bin/python # -*- coding: utf-8 -*- # (c) 2012, Dag Wieers <dag@wieers.com> # # This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see <http://www.gnu.org/licenses/>. ANSIBLE_METADATA = {'metadata_version': '1.0', 'status': ['preview'], 'supported_by': 'community'} DOCUMENTATION = r''' --- module: hponcfg author: Dag Wieers (@dagwieers) version_added: "2.3" short_description: Configure HP iLO interface using hponcfg description: - This modules configures the HP iLO interface using hponcfg. options: path: description: - The XML file as accepted by hponcfg required: true aliases: ['src'] minfw: description: - The minimum firmware level needed requirements: - hponcfg tool notes: - You need a working hponcfg on the target system. ''' EXAMPLES = r''' - name: Example hponcfg configuration XML copy: content: | <ribcl VERSION="2.0"> <login USER_LOGIN="user" PASSWORD="password"> <rib_info MODE="WRITE"> <mod_global_settings> <session_timeout value="0"/> <ssh_status value="Y"/> <ssh_port value="22"/> <serial_cli_status value="3"/> <serial_cli_speed value="5"/> </mod_global_settings> </rib_info> </login> </ribcl> dest: /tmp/enable-ssh.xml - name: Configure HP iLO using enable-ssh.xml hponcfg: src: /tmp/enable-ssh.xml ''' from ansible.module_utils.basic import AnsibleModule def main(): module = AnsibleModule( argument_spec = dict( src = dict(required=True, type='path', aliases=['path']), minfw = dict(type='str'), ) ) # Consider every action a change (not idempotent yet!) changed = True src = module.params['src'] minfw = module.params['minfw'] options = ' -f %s' % src # Add -v for debugging # options += ' -v' if minfw: option += ' -m %s' % minfw rc, stdout, stderr = module.run_command('hponcfg %s' % options) if rc != 0: module.fail_json(rc=rc, msg="Failed to run hponcfg", stdout=stdout, stderr=stderr) module.exit_json(changed=changed, stdout=stdout, stderr=stderr) if __name__ == '__main__': main()
gpl-3.0
zork9/pygame-pyMM
bombertoad.py
1
3050
# Copyright (c) 2013 Johan Ceuppens. # All rights reserved. # Redistribution and use in source and binary forms are permitted # provided that the above copyright notice and this paragraph are # duplicated in all such forms and that any documentation, # advertising materials, and other materials related to such # distribution and use acknowledge that the software was developed # by the Johan Ceuppens. The name of the # Johan Ceuppens may not be used to endorse or promote products derived # from this software without specific prior written permission. # THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR # IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED # WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. # Copyright (C) Johan Ceuppens 2010 # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 2 of the License, or # (at your option) any later version. # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. import pygame from pygame.locals import * from gameobject import * from bullet import * from stateimagelibrary import * import random from time import * from math import * from random import * from rng import * class BomberToad(Gameobject): "Dude on Toad throwing Bombs" def __init__(self,xx,yy): Gameobject.__init__(self, xx, yy) self.w = 100 self.h = 100 self.hitpoints = 2 self.yy = yy self.stimlib = Stateimagelibrary() image = pygame.image.load('./pics/bomber-left-1.bmp').convert() image.set_colorkey((0,0,0)) self.stimlib.addpicture(image) image = pygame.image.load('./pics/bomber-left-2.bmp').convert() image.set_colorkey((0,0,0)) self.stimlib.addpicture(image) image = pygame.image.load('./pics/bomber-left-3.bmp').convert() image.set_colorkey((0,0,0)) self.stimlib.addpicture(image) image = pygame.image.load('./pics/bomber-left-4.bmp').convert() image.set_colorkey((0,0,0)) self.stimlib.addpicture(image) self.counter = 0 def draw(self, screen, room): if randint(0,100) != 100 and self.counter == 0: self.counter = 0 self.stimlib.drawstatic(screen, self.x-40+room.relativex,self.y+room.relativey, 0) else: self.counter += 1 self.stimlib.drawstatic(screen, self.x-40+room.relativex,self.y+room.relativey, self.counter) if self.counter >= 3: self.counter = 0 room.gameobjects.append(Bullet(self.x+room.relativex,self.y+room.relativey, "left")) def update(self,room,player): 1 def fight(self,room,player,keydown = -1): 1
gpl-2.0
djpnewton/bitcoin
contrib/linearize/linearize-hashes.py
14
3041
#!/usr/bin/env python # # linearize-hashes.py: List blocks in a linear, no-fork version of the chain. # # Copyright (c) 2013-2014 The Bitcoin Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. # from __future__ import print_function import json import struct import re import base64 import httplib import sys settings = {} class BitcoinRPC: def __init__(self, host, port, username, password): authpair = "%s:%s" % (username, password) self.authhdr = "Basic %s" % (base64.b64encode(authpair)) self.conn = httplib.HTTPConnection(host, port, False, 30) def execute(self, obj): self.conn.request('POST', '/', json.dumps(obj), { 'Authorization' : self.authhdr, 'Content-type' : 'application/json' }) resp = self.conn.getresponse() if resp is None: print("JSON-RPC: no response", file=sys.stderr) return None body = resp.read() resp_obj = json.loads(body) return resp_obj @staticmethod def build_request(idx, method, params): obj = { 'version' : '1.1', 'method' : method, 'id' : idx } if params is None: obj['params'] = [] else: obj['params'] = params return obj @staticmethod def response_is_error(resp_obj): return 'error' in resp_obj and resp_obj['error'] is not None def get_block_hashes(settings, max_blocks_per_call=10000): rpc = BitcoinRPC(settings['host'], settings['port'], settings['rpcuser'], settings['rpcpassword']) height = settings['min_height'] while height < settings['max_height']+1: num_blocks = min(settings['max_height']+1-height, max_blocks_per_call) batch = [] for x in range(num_blocks): batch.append(rpc.build_request(x, 'getblockhash', [height + x])) reply = rpc.execute(batch) for x,resp_obj in enumerate(reply): if rpc.response_is_error(resp_obj): print('JSON-RPC: error at height', height+x, ': ', resp_obj['error'], file=sys.stderr) exit(1) assert(resp_obj['id'] == x) # assume replies are in-sequence print(resp_obj['result']) height += num_blocks if __name__ == '__main__': if len(sys.argv) != 2: print("Usage: linearize-hashes.py CONFIG-FILE") sys.exit(1) f = open(sys.argv[1]) for line in f: # skip comment lines m = re.search('^\s*#', line) if m: continue # parse key=value lines m = re.search('^(\w+)\s*=\s*(\S.*)$', line) if m is None: continue settings[m.group(1)] = m.group(2) f.close() if 'host' not in settings: settings['host'] = '127.0.0.1' if 'port' not in settings: settings['port'] = 8332 if 'min_height' not in settings: settings['min_height'] = 0 if 'max_height' not in settings: settings['max_height'] = 313000 if 'rpcuser' not in settings or 'rpcpassword' not in settings: print("Missing username and/or password in cfg file", file=stderr) sys.exit(1) settings['port'] = int(settings['port']) settings['min_height'] = int(settings['min_height']) settings['max_height'] = int(settings['max_height']) get_block_hashes(settings)
mit
kustodian/ansible
lib/ansible/module_utils/storage/hpe3par/hpe3par.py
40
2302
# Copyright: (c) 2018, Hewlett Packard Enterprise Development LP # Simplified BSD License (see licenses/simplified_bsd.txt or https://opensource.org/licenses/BSD-2-Clause) from ansible.module_utils import basic def convert_to_binary_multiple(size_with_unit): if size_with_unit is None: return -1 valid_units = ['MiB', 'GiB', 'TiB'] valid_unit = False for unit in valid_units: if size_with_unit.strip().endswith(unit): valid_unit = True size = size_with_unit.split(unit)[0] if float(size) < 0: return -1 if not valid_unit: raise ValueError("%s does not have a valid unit. The unit must be one of %s" % (size_with_unit, valid_units)) size = size_with_unit.replace(" ", "").split('iB')[0] size_kib = basic.human_to_bytes(size) return int(size_kib / (1024 * 1024)) storage_system_spec = { "storage_system_ip": { "required": True, "type": "str" }, "storage_system_username": { "required": True, "type": "str", "no_log": True }, "storage_system_password": { "required": True, "type": "str", "no_log": True }, "secure": { "type": "bool", "default": False } } def cpg_argument_spec(): spec = { "state": { "required": True, "choices": ['present', 'absent'], "type": 'str' }, "cpg_name": { "required": True, "type": "str" }, "domain": { "type": "str" }, "growth_increment": { "type": "str", }, "growth_limit": { "type": "str", }, "growth_warning": { "type": "str", }, "raid_type": { "required": False, "type": "str", "choices": ['R0', 'R1', 'R5', 'R6'] }, "set_size": { "required": False, "type": "int" }, "high_availability": { "type": "str", "choices": ['PORT', 'CAGE', 'MAG'] }, "disk_type": { "type": "str", "choices": ['FC', 'NL', 'SSD'] } } spec.update(storage_system_spec) return spec
gpl-3.0
MadsJensen/agency_connectivity
make_df_hilbert_data.py
1
1383
import numpy as np import pandas as pd import scipy.io as sio from my_settings import * data = sio.loadmat("/home/mje/Projects/agency_connectivity/Data/data_all.mat")[ "data_all"] column_keys = ["subject", "trial", "condition", "shift"] result_df = pd.DataFrame(columns=column_keys) for k, subject in enumerate(subjects): p8_invol_shift = data[k, 3] - np.mean(data[k, 0]) p8_vol_shift = data[k, 2] - np.mean(data[k, 0]) p8_vol_bs_shift = data[k, 1] - np.mean(data[k, 0]) for j in range(89): row = pd.DataFrame([{"trial": int(j), "subject": subject, "condition": "vol_bs", "shift": p8_vol_bs_shift[j + 1][0]}]) result_df = result_df.append(row, ignore_index=True) for j in range(89): row = pd.DataFrame([{"trial": int(j), "subject": subject, "condition": "vol", "shift": p8_vol_shift[j + 1][0]}]) result_df = result_df.append(row, ignore_index=True) for j in range(89): row = pd.DataFrame([{"trial": int(j), "subject": subject, "condition": "invol", "shift": p8_invol_shift[j][0]}]) result_df = result_df.append(row, ignore_index=True)
bsd-3-clause
disconnect3d/pwndbg
pwndbg/commands/next.py
2
3320
#!/usr/bin/env python # -*- coding: utf-8 -*- """ Stepping until an event occurs """ from __future__ import absolute_import from __future__ import division from __future__ import print_function from __future__ import unicode_literals import gdb import pwndbg.commands import pwndbg.next @pwndbg.commands.Command @pwndbg.commands.OnlyWhenRunning def nextjmp(*args): """Breaks at the next jump instruction""" if pwndbg.next.break_next_branch(): pwndbg.commands.context.context() @pwndbg.commands.Command @pwndbg.commands.OnlyWhenRunning def nextjump(*args): """Breaks at the next jump instruction""" nextjmp(*args) @pwndbg.commands.Command @pwndbg.commands.OnlyWhenRunning def nextcall(*args): """Breaks at the next call instruction""" if pwndbg.next.break_next_call(*args): pwndbg.commands.context.context() @pwndbg.commands.Command @pwndbg.commands.OnlyWhenRunning def nextret(*args): """Breaks at next return-like instruction""" if pwndbg.next.break_next_ret(): pwndbg.commands.context.context() @pwndbg.commands.Command @pwndbg.commands.OnlyWhenRunning def stepret(*args): """Breaks at next return-like instruction by 'stepping' to it""" while pwndbg.proc.alive and not pwndbg.next.break_next_ret() and pwndbg.next.break_next_branch(): # Here we are e.g. on a CALL instruction (temporarily breakpointed by `break_next_branch`) # We need to step so that we take this branch instead of ignoring it gdb.execute('si') continue if pwndbg.proc.alive: pwndbg.commands.context.context() @pwndbg.commands.Command @pwndbg.commands.OnlyWhenRunning def nextproginstr(*args): """Breaks at the next instruction that belongs to the running program""" if pwndbg.next.break_on_program_code(): pwndbg.commands.context.context() @pwndbg.commands.Command @pwndbg.commands.OnlyWhenRunning def stepover(*args): """Sets a breakpoint on the instruction after this one""" pwndbg.next.break_on_next(*args) @pwndbg.commands.Command @pwndbg.commands.OnlyWhenRunning def so(*args): """Alias for stepover""" stepover(*args) @pwndbg.commands.Command @pwndbg.commands.OnlyWhenRunning def nextsyscall(*args): """ Breaks at the next syscall not taking branches. """ while pwndbg.proc.alive and not pwndbg.next.break_next_interrupt() and pwndbg.next.break_next_branch(): continue if pwndbg.proc.alive: pwndbg.commands.context.context() @pwndbg.commands.Command @pwndbg.commands.OnlyWhenRunning def nextsc(*args): """ Breaks at the next syscall not taking branches. """ nextsyscall(*args) @pwndbg.commands.Command @pwndbg.commands.OnlyWhenRunning def stepsyscall(*args): """ Breaks at the next syscall by taking branches. """ while pwndbg.proc.alive and not pwndbg.next.break_next_interrupt() and pwndbg.next.break_next_branch(): # Here we are e.g. on a CALL instruction (temporarily breakpointed by `break_next_branch`) # We need to step so that we take this branch instead of ignoring it gdb.execute('si') continue if pwndbg.proc.alive: pwndbg.commands.context.context() @pwndbg.commands.Command @pwndbg.commands.OnlyWhenRunning def stepsc(*args): stepsyscall(*args)
mit
bbengfort/TextBlob
textblob/nltk/parse/featurechart.py
2
22190
# -*- coding: utf-8 -*- # Natural Language Toolkit: Chart Parser for Feature-Based Grammars # # Copyright (C) 2001-2013 NLTK Project # Author: Rob Speer <rspeer@mit.edu> # Peter Ljunglöf <peter.ljunglof@heatherleaf.se> # URL: <http://www.nltk.org/> # For license information, see LICENSE.TXT """ Extension of chart parsing implementation to handle grammars with feature structures as nodes. """ from __future__ import print_function, unicode_literals from nltk.compat import xrange, python_2_unicode_compatible from nltk.featstruct import FeatStruct, unify, FeatStructParser, TYPE, find_variables from nltk.sem import logic from nltk.tree import Tree from nltk.grammar import (Nonterminal, Production, ContextFreeGrammar, FeatStructNonterminal, is_nonterminal, is_terminal) from nltk.parse.chart import (TreeEdge, Chart, ChartParser, EdgeI, FundamentalRule, LeafInitRule, EmptyPredictRule, BottomUpPredictRule, SingleEdgeFundamentalRule, BottomUpPredictCombineRule, CachedTopDownPredictRule, TopDownInitRule) #//////////////////////////////////////////////////////////// # Tree Edge #//////////////////////////////////////////////////////////// @python_2_unicode_compatible class FeatureTreeEdge(TreeEdge): """ A specialized tree edge that allows shared variable bindings between nonterminals on the left-hand side and right-hand side. Each ``FeatureTreeEdge`` contains a set of ``bindings``, i.e., a dictionary mapping from variables to values. If the edge is not complete, then these bindings are simply stored. However, if the edge is complete, then the constructor applies these bindings to every nonterminal in the edge whose symbol implements the interface ``SubstituteBindingsI``. """ def __init__(self, span, lhs, rhs, dot=0, bindings=None): """ Construct a new edge. If the edge is incomplete (i.e., if ``dot<len(rhs)``), then store the bindings as-is. If the edge is complete (i.e., if ``dot==len(rhs)``), then apply the bindings to all nonterminals in ``lhs`` and ``rhs``, and then clear the bindings. See ``TreeEdge`` for a description of the other arguments. """ if bindings is None: bindings = {} # If the edge is complete, then substitute in the bindings, # and then throw them away. (If we didn't throw them away, we # might think that 2 complete edges are different just because # they have different bindings, even though all bindings have # already been applied.) if dot == len(rhs) and bindings: lhs = self._bind(lhs, bindings) rhs = [self._bind(elt, bindings) for elt in rhs] bindings = {} # Initialize the edge. TreeEdge.__init__(self, span, lhs, rhs, dot) self._bindings = bindings self._comparison_key = (self._comparison_key, tuple(sorted(bindings.items()))) @staticmethod def from_production(production, index): """ :return: A new ``TreeEdge`` formed from the given production. The new edge's left-hand side and right-hand side will be taken from ``production``; its span will be ``(index,index)``; and its dot position will be ``0``. :rtype: TreeEdge """ return FeatureTreeEdge(span=(index, index), lhs=production.lhs(), rhs=production.rhs(), dot=0) def move_dot_forward(self, new_end, bindings=None): """ :return: A new ``FeatureTreeEdge`` formed from this edge. The new edge's dot position is increased by ``1``, and its end index will be replaced by ``new_end``. :rtype: FeatureTreeEdge :param new_end: The new end index. :type new_end: int :param bindings: Bindings for the new edge. :type bindings: dict """ return FeatureTreeEdge(span=(self._span[0], new_end), lhs=self._lhs, rhs=self._rhs, dot=self._dot+1, bindings=bindings) def _bind(self, nt, bindings): if not isinstance(nt, FeatStructNonterminal): return nt return nt.substitute_bindings(bindings) def next_with_bindings(self): return self._bind(self.nextsym(), self._bindings) def bindings(self): """ Return a copy of this edge's bindings dictionary. """ return self._bindings.copy() def variables(self): """ :return: The set of variables used by this edge. :rtype: set(Variable) """ return find_variables([self._lhs] + list(self._rhs) + list(self._bindings.keys()) + list(self._bindings.values()), fs_class=FeatStruct) def __str__(self): if self.is_complete(): return TreeEdge.__unicode__(self) else: bindings = '{%s}' % ', '.join('%s: %r' % item for item in sorted(self._bindings.items())) return '%s %s' % (TreeEdge.__unicode__(self), bindings) #//////////////////////////////////////////////////////////// # A specialized Chart for feature grammars #//////////////////////////////////////////////////////////// # TODO: subsumes check when adding new edges class FeatureChart(Chart): """ A Chart for feature grammars. :see: ``Chart`` for more information. """ def select(self, **restrictions): """ Returns an iterator over the edges in this chart. See ``Chart.select`` for more information about the ``restrictions`` on the edges. """ # If there are no restrictions, then return all edges. if restrictions=={}: return iter(self._edges) # Find the index corresponding to the given restrictions. restr_keys = sorted(restrictions.keys()) restr_keys = tuple(restr_keys) # If it doesn't exist, then create it. if restr_keys not in self._indexes: self._add_index(restr_keys) vals = tuple(self._get_type_if_possible(restrictions[key]) for key in restr_keys) return iter(self._indexes[restr_keys].get(vals, [])) def _add_index(self, restr_keys): """ A helper function for ``select``, which creates a new index for a given set of attributes (aka restriction keys). """ # Make sure it's a valid index. for key in restr_keys: if not hasattr(EdgeI, key): raise ValueError('Bad restriction: %s' % key) # Create the index. index = self._indexes[restr_keys] = {} # Add all existing edges to the index. for edge in self._edges: vals = tuple(self._get_type_if_possible(getattr(edge, key)()) for key in restr_keys) index.setdefault(vals, []).append(edge) def _register_with_indexes(self, edge): """ A helper function for ``insert``, which registers the new edge with all existing indexes. """ for (restr_keys, index) in self._indexes.items(): vals = tuple(self._get_type_if_possible(getattr(edge, key)()) for key in restr_keys) index.setdefault(vals, []).append(edge) def _get_type_if_possible(self, item): """ Helper function which returns the ``TYPE`` feature of the ``item``, if it exists, otherwise it returns the ``item`` itself """ if isinstance(item, dict) and TYPE in item: return item[TYPE] else: return item def parses(self, start, tree_class=Tree): trees = [] for edge in self.select(start=0, end=self._num_leaves): if ( (isinstance(edge, FeatureTreeEdge)) and (edge.lhs()[TYPE] == start[TYPE]) and (unify(edge.lhs(), start, rename_vars=True)) ): trees += self.trees(edge, complete=True, tree_class=tree_class) return trees #//////////////////////////////////////////////////////////// # Fundamental Rule #//////////////////////////////////////////////////////////// class FeatureFundamentalRule(FundamentalRule): """ A specialized version of the fundamental rule that operates on nonterminals whose symbols are ``FeatStructNonterminal``s. Rather tha simply comparing the nonterminals for equality, they are unified. Variable bindings from these unifications are collected and stored in the chart using a ``FeatureTreeEdge``. When a complete edge is generated, these bindings are applied to all nonterminals in the edge. The fundamental rule states that: - ``[A -> alpha \* B1 beta][i:j]`` - ``[B2 -> gamma \*][j:k]`` licenses the edge: - ``[A -> alpha B3 \* beta][i:j]`` assuming that B1 and B2 can be unified to generate B3. """ def apply_iter(self, chart, grammar, left_edge, right_edge): # Make sure the rule is applicable. if not (left_edge.end() == right_edge.start() and left_edge.is_incomplete() and right_edge.is_complete() and isinstance(left_edge, FeatureTreeEdge)): return found = right_edge.lhs() nextsym = left_edge.nextsym() if isinstance(right_edge, FeatureTreeEdge): if not is_nonterminal(nextsym): return if left_edge.nextsym()[TYPE] != right_edge.lhs()[TYPE]: return # Create a copy of the bindings. bindings = left_edge.bindings() # We rename vars here, because we don't want variables # from the two different productions to match. found = found.rename_variables(used_vars=left_edge.variables()) # Unify B1 (left_edge.nextsym) with B2 (right_edge.lhs) to # generate B3 (result). result = unify(nextsym, found, bindings, rename_vars=False) if result is None: return else: if nextsym != found: return # Create a copy of the bindings. bindings = left_edge.bindings() # Construct the new edge. new_edge = left_edge.move_dot_forward(right_edge.end(), bindings) # Add it to the chart, with appropriate child pointers. if chart.insert_with_backpointer(new_edge, left_edge, right_edge): yield new_edge class FeatureSingleEdgeFundamentalRule(SingleEdgeFundamentalRule): """ A specialized version of the completer / single edge fundamental rule that operates on nonterminals whose symbols are ``FeatStructNonterminal``s. Rather than simply comparing the nonterminals for equality, they are unified. """ _fundamental_rule = FeatureFundamentalRule() def _apply_complete(self, chart, grammar, right_edge): fr = self._fundamental_rule for left_edge in chart.select(end=right_edge.start(), is_complete=False, nextsym=right_edge.lhs()): for new_edge in fr.apply_iter(chart, grammar, left_edge, right_edge): yield new_edge def _apply_incomplete(self, chart, grammar, left_edge): fr = self._fundamental_rule for right_edge in chart.select(start=left_edge.end(), is_complete=True, lhs=left_edge.nextsym()): for new_edge in fr.apply_iter(chart, grammar, left_edge, right_edge): yield new_edge #//////////////////////////////////////////////////////////// # Top-Down Prediction #//////////////////////////////////////////////////////////// class FeatureTopDownInitRule(TopDownInitRule): def apply_iter(self, chart, grammar): for prod in grammar.productions(lhs=grammar.start()): new_edge = FeatureTreeEdge.from_production(prod, 0) if chart.insert(new_edge, ()): yield new_edge class FeatureTopDownPredictRule(CachedTopDownPredictRule): """ A specialized version of the (cached) top down predict rule that operates on nonterminals whose symbols are ``FeatStructNonterminal``s. Rather than simply comparing the nonterminals for equality, they are unified. The top down expand rule states that: - ``[A -> alpha \* B1 beta][i:j]`` licenses the edge: - ``[B2 -> \* gamma][j:j]`` for each grammar production ``B2 -> gamma``, assuming that B1 and B2 can be unified. """ def apply_iter(self, chart, grammar, edge): if edge.is_complete(): return nextsym, index = edge.nextsym(), edge.end() if not is_nonterminal(nextsym): return # If we've already applied this rule to an edge with the same # next & end, and the chart & grammar have not changed, then # just return (no new edges to add). done = self._done.get((nextsym, index), (None,None)) if done[0] is chart and done[1] is grammar: return for prod in grammar.productions(lhs=edge.nextsym()): # If the left corner in the predicted production is # leaf, it must match with the input. if prod.rhs(): first = prod.rhs()[0] if is_terminal(first): if index >= chart.num_leaves(): continue if first != chart.leaf(index): continue # We rename vars here, because we don't want variables # from the two different productions to match. if unify(prod.lhs(), edge.next_with_bindings(), rename_vars=True): new_edge = FeatureTreeEdge.from_production(prod, edge.end()) if chart.insert(new_edge, ()): yield new_edge # Record the fact that we've applied this rule. self._done[nextsym, index] = (chart, grammar) #//////////////////////////////////////////////////////////// # Bottom-Up Prediction #//////////////////////////////////////////////////////////// class FeatureBottomUpPredictRule(BottomUpPredictRule): def apply_iter(self, chart, grammar, edge): if edge.is_incomplete(): return for prod in grammar.productions(rhs=edge.lhs()): if isinstance(edge, FeatureTreeEdge): _next = prod.rhs()[0] if not is_nonterminal(_next): continue new_edge = FeatureTreeEdge.from_production(prod, edge.start()) if chart.insert(new_edge, ()): yield new_edge class FeatureBottomUpPredictCombineRule(BottomUpPredictCombineRule): def apply_iter(self, chart, grammar, edge): if edge.is_incomplete(): return found = edge.lhs() for prod in grammar.productions(rhs=found): bindings = {} if isinstance(edge, FeatureTreeEdge): _next = prod.rhs()[0] if not is_nonterminal(_next): continue # We rename vars here, because we don't want variables # from the two different productions to match. used_vars = find_variables((prod.lhs(),) + prod.rhs(), fs_class=FeatStruct) found = found.rename_variables(used_vars=used_vars) result = unify(_next, found, bindings, rename_vars=False) if result is None: continue new_edge = (FeatureTreeEdge.from_production(prod, edge.start()) .move_dot_forward(edge.end(), bindings)) if chart.insert(new_edge, (edge,)): yield new_edge class FeatureEmptyPredictRule(EmptyPredictRule): def apply_iter(self, chart, grammar): for prod in grammar.productions(empty=True): for index in xrange(chart.num_leaves() + 1): new_edge = FeatureTreeEdge.from_production(prod, index) if chart.insert(new_edge, ()): yield new_edge #//////////////////////////////////////////////////////////// # Feature Chart Parser #//////////////////////////////////////////////////////////// TD_FEATURE_STRATEGY = [LeafInitRule(), FeatureTopDownInitRule(), FeatureTopDownPredictRule(), FeatureSingleEdgeFundamentalRule()] BU_FEATURE_STRATEGY = [LeafInitRule(), FeatureEmptyPredictRule(), FeatureBottomUpPredictRule(), FeatureSingleEdgeFundamentalRule()] BU_LC_FEATURE_STRATEGY = [LeafInitRule(), FeatureEmptyPredictRule(), FeatureBottomUpPredictCombineRule(), FeatureSingleEdgeFundamentalRule()] class FeatureChartParser(ChartParser): def __init__(self, grammar, strategy=BU_LC_FEATURE_STRATEGY, trace_chart_width=20, chart_class=FeatureChart, **parser_args): ChartParser.__init__(self, grammar, strategy=strategy, trace_chart_width=trace_chart_width, chart_class=chart_class, **parser_args) class FeatureTopDownChartParser(FeatureChartParser): def __init__(self, grammar, **parser_args): FeatureChartParser.__init__(self, grammar, TD_FEATURE_STRATEGY, **parser_args) class FeatureBottomUpChartParser(FeatureChartParser): def __init__(self, grammar, **parser_args): FeatureChartParser.__init__(self, grammar, BU_FEATURE_STRATEGY, **parser_args) class FeatureBottomUpLeftCornerChartParser(FeatureChartParser): def __init__(self, grammar, **parser_args): FeatureChartParser.__init__(self, grammar, BU_LC_FEATURE_STRATEGY, **parser_args) #//////////////////////////////////////////////////////////// # Instantiate Variable Chart #//////////////////////////////////////////////////////////// class InstantiateVarsChart(FeatureChart): """ A specialized chart that 'instantiates' variables whose names start with '@', by replacing them with unique new variables. In particular, whenever a complete edge is added to the chart, any variables in the edge's ``lhs`` whose names start with '@' will be replaced by unique new ``Variable``s. """ def __init__(self, tokens): FeatureChart.__init__(self, tokens) def initialize(self): self._instantiated = set() FeatureChart.initialize(self) def insert(self, edge, child_pointer_list): if edge in self._instantiated: return False self.instantiate_edge(edge) return FeatureChart.insert(self, edge, child_pointer_list) def instantiate_edge(self, edge): """ If the edge is a ``FeatureTreeEdge``, and it is complete, then instantiate all variables whose names start with '@', by replacing them with unique new variables. Note that instantiation is done in-place, since the parsing algorithms might already hold a reference to the edge for future use. """ # If the edge is a leaf, or is not complete, or is # already in the chart, then just return it as-is. if not isinstance(edge, FeatureTreeEdge): return if not edge.is_complete(): return if edge in self._edge_to_cpls: return # Get a list of variables that need to be instantiated. # If there are none, then return as-is. inst_vars = self.inst_vars(edge) if not inst_vars: return # Instantiate the edge! self._instantiated.add(edge) edge._lhs = edge.lhs().substitute_bindings(inst_vars) def inst_vars(self, edge): return dict((var, logic.unique_variable()) for var in edge.lhs().variables() if var.name.startswith('@')) #//////////////////////////////////////////////////////////// # Demo #//////////////////////////////////////////////////////////// def demo_grammar(): from nltk.grammar import parse_fcfg return parse_fcfg(""" S -> NP VP PP -> Prep NP NP -> NP PP VP -> VP PP VP -> Verb NP VP -> Verb NP -> Det[pl=?x] Noun[pl=?x] NP -> "John" NP -> "I" Det -> "the" Det -> "my" Det[-pl] -> "a" Noun[-pl] -> "dog" Noun[-pl] -> "cookie" Verb -> "ate" Verb -> "saw" Prep -> "with" Prep -> "under" """) def demo(should_print_times=True, should_print_grammar=True, should_print_trees=True, should_print_sentence=True, trace=1, parser=FeatureChartParser, sent='I saw John with a dog with my cookie'): import sys, time print() grammar = demo_grammar() if should_print_grammar: print(grammar) print() print("*", parser.__name__) if should_print_sentence: print("Sentence:", sent) tokens = sent.split() t = time.clock() cp = parser(grammar, trace=trace) chart = cp.chart_parse(tokens) trees = chart.parses(grammar.start()) if should_print_times: print("Time: %s" % (time.clock() - t)) if should_print_trees: for tree in trees: print(tree) else: print("Nr trees:", len(trees)) def run_profile(): import profile profile.run('for i in range(1): demo()', '/tmp/profile.out') import pstats p = pstats.Stats('/tmp/profile.out') p.strip_dirs().sort_stats('time', 'cum').print_stats(60) p.strip_dirs().sort_stats('cum', 'time').print_stats(60) if __name__ == '__main__': from nltk.data import load demo() print() grammar = load('grammars/book_grammars/feat0.fcfg') cp = FeatureChartParser(grammar, trace=2) sent = 'Kim likes children' tokens = sent.split() trees = cp.nbest_parse(tokens) for tree in trees: print(tree)
mit
jdf76/plugin.video.youtube
resources/lib/youtube_plugin/kodion/utils/http_server.py
1
21426
# -*- coding: utf-8 -*- """ Copyright (C) 2018-2018 plugin.video.youtube SPDX-License-Identifier: GPL-2.0-only See LICENSES/GPL-2.0-only for more information. """ from six.moves import BaseHTTPServer from six.moves.urllib.parse import parse_qs, urlparse from six.moves import range import json import os import re import requests import socket import xbmc import xbmcaddon import xbmcgui from .. import logger class YouTubeRequestHandler(BaseHTTPServer.BaseHTTPRequestHandler): def __init__(self, request, client_address, server): self.addon_id = 'plugin.video.youtube' addon = xbmcaddon.Addon(self.addon_id) whitelist_ips = addon.getSetting('kodion.http.ip.whitelist') whitelist_ips = ''.join(whitelist_ips.split()) self.whitelist_ips = whitelist_ips.split(',') self.local_ranges = ('10.', '172.16.', '192.168.', '127.0.0.1', 'localhost', '::1') self.chunk_size = 1024 * 64 try: self.base_path = xbmc.translatePath('special://temp/%s' % self.addon_id).decode('utf-8') except AttributeError: self.base_path = xbmc.translatePath('special://temp/%s' % self.addon_id) BaseHTTPServer.BaseHTTPRequestHandler.__init__(self, request, client_address, server) def connection_allowed(self): client_ip = self.client_address[0] log_lines = ['HTTPServer: Connection from |%s|' % client_ip] conn_allowed = client_ip.startswith(self.local_ranges) log_lines.append('Local range: |%s|' % str(conn_allowed)) if not conn_allowed: conn_allowed = client_ip in self.whitelist_ips log_lines.append('Whitelisted: |%s|' % str(conn_allowed)) if not conn_allowed: logger.log_debug('HTTPServer: Connection from |%s| not allowed' % client_ip) else: if self.path != '/ping': logger.log_debug(' '.join(log_lines)) return conn_allowed # noinspection PyPep8Naming def do_GET(self): addon = xbmcaddon.Addon('plugin.video.youtube') dash_proxy_enabled = addon.getSetting('kodion.mpd.videos') == 'true' and addon.getSetting('kodion.video.quality.mpd') == 'true' api_config_enabled = addon.getSetting('youtube.api.config.page') == 'true' if self.path == '/client_ip': client_json = json.dumps({"ip": "{ip}".format(ip=self.client_address[0])}) self.send_response(200) self.send_header('Content-Type', 'application/json; charset=utf-8') self.send_header('Content-Length', len(client_json)) self.end_headers() self.wfile.write(client_json.encode('utf-8')) if self.path != '/ping': logger.log_debug('HTTPServer: Request uri path |{proxy_path}|'.format(proxy_path=self.path)) if not self.connection_allowed(): self.send_error(403) else: if dash_proxy_enabled and self.path.endswith('.mpd'): file_path = os.path.join(self.base_path, self.path.strip('/').strip('\\')) file_chunk = True logger.log_debug('HTTPServer: Request file path |{file_path}|'.format(file_path=file_path.encode('utf-8'))) try: with open(file_path, 'rb') as f: self.send_response(200) self.send_header('Content-Type', 'application/xml+dash') self.send_header('Content-Length', os.path.getsize(file_path)) self.end_headers() while file_chunk: file_chunk = f.read(self.chunk_size) if file_chunk: self.wfile.write(file_chunk) except IOError: response = 'File Not Found: |{proxy_path}| -> |{file_path}|'.format(proxy_path=self.path, file_path=file_path.encode('utf-8')) self.send_error(404, response) elif api_config_enabled and self.path == '/api': html = self.api_config_page() html = html.encode('utf-8') self.send_response(200) self.send_header('Content-Type', 'text/html; charset=utf-8') self.send_header('Content-Length', len(html)) self.end_headers() for chunk in self.get_chunks(html): self.wfile.write(chunk) elif api_config_enabled and self.path.startswith('/api_submit'): addon = xbmcaddon.Addon('plugin.video.youtube') i18n = addon.getLocalizedString xbmc.executebuiltin('Dialog.Close(addonsettings,true)') old_api_key = addon.getSetting('youtube.api.key') old_api_id = addon.getSetting('youtube.api.id') old_api_secret = addon.getSetting('youtube.api.secret') query = urlparse(self.path).query params = parse_qs(query) api_key = params.get('api_key', [None])[0] api_id = params.get('api_id', [None])[0] api_secret = params.get('api_secret', [None])[0] if api_key and api_id and api_secret: footer = i18n(30638) else: footer = u'' if re.search(r'api_key=(?:&|$)', query): api_key = '' if re.search(r'api_id=(?:&|$)', query): api_id = '' if re.search(r'api_secret=(?:&|$)', query): api_secret = '' updated = [] if api_key is not None and api_key != old_api_key: addon.setSetting('youtube.api.key', api_key) updated.append(i18n(30201)) if api_id is not None and api_id != old_api_id: addon.setSetting('youtube.api.id', api_id) updated.append(i18n(30202)) if api_secret is not None and api_secret != old_api_secret: updated.append(i18n(30203)) addon.setSetting('youtube.api.secret', api_secret) if addon.getSetting('youtube.api.key') and addon.getSetting('youtube.api.id') and \ addon.getSetting('youtube.api.secret'): enabled = i18n(30636) else: enabled = i18n(30637) if not updated: updated = i18n(30635) else: updated = i18n(30631) % u', '.join(updated) html = self.api_submit_page(updated, enabled, footer) html = html.encode('utf-8') self.send_response(200) self.send_header('Content-Type', 'text/html; charset=utf-8') self.send_header('Content-Length', len(html)) self.end_headers() for chunk in self.get_chunks(html): self.wfile.write(chunk) elif self.path == '/ping': self.send_error(204) else: self.send_error(501) # noinspection PyPep8Naming def do_HEAD(self): logger.log_debug('HTTPServer: Request uri path |{proxy_path}|'.format(proxy_path=self.path)) if not self.connection_allowed(): self.send_error(403) else: addon = xbmcaddon.Addon('plugin.video.youtube') dash_proxy_enabled = addon.getSetting('kodion.mpd.videos') == 'true' and addon.getSetting('kodion.video.quality.mpd') == 'true' if dash_proxy_enabled and self.path.endswith('.mpd'): file_path = os.path.join(self.base_path, self.path.strip('/').strip('\\')) if not os.path.isfile(file_path): response = 'File Not Found: |{proxy_path}| -> |{file_path}|'.format(proxy_path=self.path, file_path=file_path.encode('utf-8')) self.send_error(404, response) else: self.send_response(200) self.send_header('Content-Type', 'application/xml+dash') self.send_header('Content-Length', os.path.getsize(file_path)) self.end_headers() else: self.send_error(501) # noinspection PyPep8Naming def do_POST(self): logger.log_debug('HTTPServer: Request uri path |{proxy_path}|'.format(proxy_path=self.path)) if not self.connection_allowed(): self.send_error(403) elif self.path.startswith('/widevine'): license_url = xbmcgui.Window(10000).getProperty('plugin.video.youtube-license_url') license_token = xbmcgui.Window(10000).getProperty('plugin.video.youtube-license_token') if not license_url: self.send_error(404) return if not license_token: self.send_error(403) return size_limit = None length = int(self.headers['Content-Length']) post_data = self.rfile.read(length) li_headers = { 'Content-Type': 'application/x-www-form-urlencoded', 'Authorization': 'Bearer %s' % license_token } result = requests.post(url=license_url, headers=li_headers, data=post_data, stream=True) response_length = int(result.headers.get('content-length')) content = result.raw.read(response_length) content_split = content.split('\r\n\r\n'.encode('utf-8')) response_header = content_split[0].decode('utf-8', 'ignore') response_body = content_split[1] response_length = len(response_body) match = re.search(r'^Authorized-Format-Types:\s*(?P<authorized_types>.+?)\r*$', response_header, re.MULTILINE) if match: authorized_types = match.group('authorized_types').split(',') logger.log_debug('HTTPServer: Found authorized formats |{authorized_fmts}|'.format(authorized_fmts=authorized_types)) fmt_to_px = {'SD': (1280 * 528) - 1, 'HD720': 1280 * 720, 'HD': 7680 * 4320} if 'HD' in authorized_types: size_limit = fmt_to_px['HD'] elif 'HD720' in authorized_types: if xbmc.getCondVisibility('system.platform.android') == 1: size_limit = fmt_to_px['HD720'] else: size_limit = fmt_to_px['SD'] elif 'SD' in authorized_types: size_limit = fmt_to_px['SD'] self.send_response(200) if size_limit: self.send_header('X-Limit-Video', 'max={size_limit}px'.format(size_limit=str(size_limit))) for d in list(result.headers.items()): if re.match('^[Cc]ontent-[Ll]ength$', d[0]): self.send_header(d[0], response_length) else: self.send_header(d[0], d[1]) self.end_headers() for chunk in self.get_chunks(response_body): self.wfile.write(chunk) else: self.send_error(501) # noinspection PyShadowingBuiltins def log_message(self, format, *args): return def get_chunks(self, data): for i in range(0, len(data), self.chunk_size): yield data[i:i + self.chunk_size] @staticmethod def api_config_page(): addon = xbmcaddon.Addon('plugin.video.youtube') i18n = addon.getLocalizedString api_key = addon.getSetting('youtube.api.key') api_id = addon.getSetting('youtube.api.id') api_secret = addon.getSetting('youtube.api.secret') html = Pages().api_configuration.get('html') css = Pages().api_configuration.get('css') html = html.format(css=css, title=i18n(30634), api_key_head=i18n(30201), api_id_head=i18n(30202), api_secret_head=i18n(30203), api_id_value=api_id, api_key_value=api_key, api_secret_value=api_secret, submit=i18n(30630), header=i18n(30634)) return html @staticmethod def api_submit_page(updated_keys, enabled, footer): addon = xbmcaddon.Addon('plugin.video.youtube') i18n = addon.getLocalizedString html = Pages().api_submit.get('html') css = Pages().api_submit.get('css') html = html.format(css=css, title=i18n(30634), updated=updated_keys, enabled=enabled, footer=footer, header=i18n(30634)) return html class Pages(object): api_configuration = { 'html': u'<!doctype html>\n<html>\n' u'<head>\n\t<meta charset="utf-8">\n' u'\t<title>{title}</title>\n' u'\t<style>\n{css}\t</style>\n' u'</head>\n<body>\n' u'\t<div class="center">\n' u'\t<h5>{header}</h5>\n' u'\t<form action="/api_submit" class="config_form">\n' u'\t\t<label for="api_key">\n' u'\t\t<span>{api_key_head}</span><input type="text" name="api_key" value="{api_key_value}" size="50"/>\n' u'\t\t</label>\n' u'\t\t<label for="api_id">\n' u'\t\t<span>{api_id_head}</span><input type="text" name="api_id" value="{api_id_value}" size="50"/>\n' u'\t\t</label>\n' u'\t\t<label for="api_secret">\n' u'\t\t<span>{api_secret_head}</span><input type="text" name="api_secret" value="{api_secret_value}" size="50"/>\n' u'\t\t</label>\n' u'\t\t<input type="submit" value="{submit}">\n' u'\t</form>\n' u'\t</div>\n' u'</body>\n</html>', 'css': u'body {\n' u' background: #141718;\n' u'}\n' u'.center {\n' u' margin: auto;\n' u' width: 600px;\n' u' padding: 10px;\n' u'}\n' u'.config_form {\n' u' width: 575px;\n' u' height: 145px;\n' u' font-size: 16px;\n' u' background: #1a2123;\n' u' padding: 30px 30px 15px 30px;\n' u' border: 5px solid #1a2123;\n' u'}\n' u'h5 {\n' u' font-family: Arial, Helvetica, sans-serif;\n' u' font-size: 16px;\n' u' color: #fff;\n' u' font-weight: 600;\n' u' width: 575px;\n' u' height: 20px;\n' u' background: #0f84a5;\n' u' padding: 5px 30px 5px 30px;\n' u' border: 5px solid #0f84a5;\n' u' margin: 0px;\n' u'}\n' u'.config_form input[type=submit],\n' u'.config_form input[type=button],\n' u'.config_form input[type=text],\n' u'.config_form textarea,\n' u'.config_form label {\n' u' font-family: Arial, Helvetica, sans-serif;\n' u' font-size: 16px;\n' u' color: #fff;\n' u'}\n' u'.config_form label {\n' u' display:block;\n' u' margin-bottom: 10px;\n' u'}\n' u'.config_form label > span {\n' u' display: inline-block;\n' u' float: left;\n' u' width: 150px;\n' u'}\n' u'.config_form input[type=text] {\n' u' background: transparent;\n' u' border: none;\n' u' border-bottom: 1px solid #147a96;\n' u' width: 400px;\n' u' outline: none;\n' u' padding: 0px 0px 0px 0px;\n' u'}\n' u'.config_form input[type=text]:focus {\n' u' border-bottom: 1px dashed #0f84a5;\n' u'}\n' u'.config_form input[type=submit],\n' u'.config_form input[type=button] {\n' u' width: 150px;\n' u' background: #141718;\n' u' border: none;\n' u' padding: 8px 0px 8px 10px;\n' u' border-radius: 5px;\n' u' color: #fff;\n' u' margin-top: 10px\n' u'}\n' u'.config_form input[type=submit]:hover,\n' u'.config_form input[type=button]:hover {\n' u' background: #0f84a5;\n' u'}\n' } api_submit = { 'html': u'<!doctype html>\n<html>\n' u'<head>\n\t<meta charset="utf-8">\n' u'\t<title>{title}</title>\n' u'\t<style>\n{css}\t</style>\n' u'</head>\n<body>\n' u'\t<div class="center">\n' u'\t<h5>{header}</h5>\n' u'\t<div class="content">\n' u'\t\t<span>{updated}</span>\n' u'\t\t<span>{enabled}</span>\n' u'\t\t<span>&nbsp;</span>\n' u'\t\t<span>&nbsp;</span>\n' u'\t\t<span>&nbsp;</span>\n' u'\t\t<span>&nbsp;</span>\n' u'\t\t<div class="textcenter">\n' u'\t\t\t<span><small>{footer}</small></span>\n' u'\t\t</div>\n' u'\t</div>\n' u'\t</div>\n' u'</body>\n</html>', 'css': u'body {\n' u' background: #141718;\n' u'}\n' u'.center {\n' u' margin: auto;\n' u' width: 600px;\n' u' padding: 10px;\n' u'}\n' u'.textcenter {\n' u' margin: auto;\n' u' width: 600px;\n' u' padding: 10px;\n' u' text-align: center;\n' u'}\n' u'.content {\n' u' width: 575px;\n' u' height: 145px;\n' u' background: #1a2123;\n' u' padding: 30px 30px 15px 30px;\n' u' border: 5px solid #1a2123;\n' u'}\n' u'h5 {\n' u' font-family: Arial, Helvetica, sans-serif;\n' u' font-size: 16px;\n' u' color: #fff;\n' u' font-weight: 600;\n' u' width: 575px;\n' u' height: 20px;\n' u' background: #0f84a5;\n' u' padding: 5px 30px 5px 30px;\n' u' border: 5px solid #0f84a5;\n' u' margin: 0px;\n' u'}\n' u'span {\n' u' font-family: Arial, Helvetica, sans-serif;\n' u' font-size: 16px;\n' u' color: #fff;\n' u' display: block;\n' u' float: left;\n' u' width: 575px;\n' u'}\n' u'small {\n' u' font-family: Arial, Helvetica, sans-serif;\n' u' font-size: 12px;\n' u' color: #fff;\n' u'}\n' } def get_http_server(address=None, port=None): addon_id = 'plugin.video.youtube' addon = xbmcaddon.Addon(addon_id) address = address if address else addon.getSetting('kodion.http.listen') address = address if re.match(r'^\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}$', address) else '0.0.0.0' port = int(port) if port else 50152 try: server = BaseHTTPServer.HTTPServer((address, port), YouTubeRequestHandler) return server except socket.error as e: logger.log_debug('HTTPServer: Failed to start |{address}:{port}| |{response}|'.format(address=address, port=port, response=str(e))) xbmcgui.Dialog().notification(addon.getAddonInfo('name'), str(e), xbmc.translatePath('special://home/addons/{0!s}/icon.png'.format(addon.getAddonInfo('id'))), 5000, False) return None def is_httpd_live(address=None, port=None): addon_id = 'plugin.video.youtube' addon = xbmcaddon.Addon(addon_id) address = address if address else addon.getSetting('kodion.http.listen') address = '127.0.0.1' if address == '0.0.0.0' else address address = address if re.match(r'^\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}$', address) else '127.0.0.1' port = int(port) if port else 50152 url = 'http://{address}:{port}/ping'.format(address=address, port=port) try: response = requests.get(url) result = response.status_code == 204 if not result: logger.log_debug('HTTPServer: Ping |{address}:{port}| |{response}|'.format(address=address, port=port, response=response.status_code)) return result except: logger.log_debug('HTTPServer: Ping |{address}:{port}| |{response}|'.format(address=address, port=port, response='failed')) return False def get_client_ip_address(address=None, port=None): addon_id = 'plugin.video.youtube' addon = xbmcaddon.Addon(addon_id) address = address if address else addon.getSetting('kodion.http.listen') address = '127.0.0.1' if address == '0.0.0.0' else address address = address if re.match(r'^\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}$', address) else '127.0.0.1' port = int(port) if port else 50152 url = 'http://{address}:{port}/client_ip'.format(address=address, port=port) response = requests.get(url) ip_address = None if response.status_code == 200: response_json = response.json() if response_json: ip_address = response_json.get('ip') return ip_address
gpl-2.0
Denisolt/IEEE-NYIT-MA
local/lib/python2.7/site-packages/django/contrib/gis/forms/fields.py
504
4316
from __future__ import unicode_literals from django import forms from django.contrib.gis.geos import GEOSException, GEOSGeometry from django.utils.translation import ugettext_lazy as _ from .widgets import OpenLayersWidget class GeometryField(forms.Field): """ This is the basic form field for a Geometry. Any textual input that is accepted by GEOSGeometry is accepted by this form. By default, this includes WKT, HEXEWKB, WKB (in a buffer), and GeoJSON. """ widget = OpenLayersWidget geom_type = 'GEOMETRY' default_error_messages = { 'required': _('No geometry value provided.'), 'invalid_geom': _('Invalid geometry value.'), 'invalid_geom_type': _('Invalid geometry type.'), 'transform_error': _('An error occurred when transforming the geometry ' 'to the SRID of the geometry form field.'), } def __init__(self, **kwargs): # Pop out attributes from the database field, or use sensible # defaults (e.g., allow None). self.srid = kwargs.pop('srid', None) self.geom_type = kwargs.pop('geom_type', self.geom_type) super(GeometryField, self).__init__(**kwargs) self.widget.attrs['geom_type'] = self.geom_type def to_python(self, value): """ Transforms the value to a Geometry object. """ if value in self.empty_values: return None if not isinstance(value, GEOSGeometry): try: value = GEOSGeometry(value) except (GEOSException, ValueError, TypeError): raise forms.ValidationError(self.error_messages['invalid_geom'], code='invalid_geom') # Try to set the srid if not value.srid: try: value.srid = self.widget.map_srid except AttributeError: if self.srid: value.srid = self.srid return value def clean(self, value): """ Validates that the input value can be converted to a Geometry object (which is returned). A ValidationError is raised if the value cannot be instantiated as a Geometry. """ geom = super(GeometryField, self).clean(value) if geom is None: return geom # Ensuring that the geometry is of the correct type (indicated # using the OGC string label). if str(geom.geom_type).upper() != self.geom_type and not self.geom_type == 'GEOMETRY': raise forms.ValidationError(self.error_messages['invalid_geom_type'], code='invalid_geom_type') # Transforming the geometry if the SRID was set. if self.srid and self.srid != -1 and self.srid != geom.srid: try: geom.transform(self.srid) except GEOSException: raise forms.ValidationError( self.error_messages['transform_error'], code='transform_error') return geom def has_changed(self, initial, data): """ Compare geographic value of data with its initial value. """ try: data = self.to_python(data) initial = self.to_python(initial) except forms.ValidationError: return True # Only do a geographic comparison if both values are available if initial and data: data.transform(initial.srid) # If the initial value was not added by the browser, the geometry # provided may be slightly different, the first time it is saved. # The comparison is done with a very low tolerance. return not initial.equals_exact(data, tolerance=0.000001) else: # Check for change of state of existence return bool(initial) != bool(data) class GeometryCollectionField(GeometryField): geom_type = 'GEOMETRYCOLLECTION' class PointField(GeometryField): geom_type = 'POINT' class MultiPointField(GeometryField): geom_type = 'MULTIPOINT' class LineStringField(GeometryField): geom_type = 'LINESTRING' class MultiLineStringField(GeometryField): geom_type = 'MULTILINESTRING' class PolygonField(GeometryField): geom_type = 'POLYGON' class MultiPolygonField(GeometryField): geom_type = 'MULTIPOLYGON'
gpl-3.0
AlbertoPeon/invenio
modules/bibfield/lib/functions/get_number_of_comments.py
24
1054
# -*- coding: utf-8 -*- ## ## This file is part of Invenio. ## Copyright (C) 2013 CERN. ## ## Invenio is free software; you can redistribute it and/or ## modify it under the terms of the GNU General Public License as ## published by the Free Software Foundation; either version 2 of the ## License, or (at your option) any later version. ## ## Invenio is distributed in the hope that it will be useful, but ## WITHOUT ANY WARRANTY; without even the implied warranty of ## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU ## General Public License for more details. ## ## You should have received a copy of the GNU General Public License ## along with Invenio; if not, write to the Free Software Foundation, Inc., ## 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA. def get_number_of_comments(recid): """ Returns number of comments for given record. @param recid: @return: Number of comments """ from invenio.webcommentadminlib import get_nb_comments if recid: return get_nb_comments(recid)
gpl-2.0
schodge/python_koans
python3/contemplate_koans.py
95
1286
#!/usr/bin/env python # -*- coding: utf-8 -*- # # Acknowledgment: # # Python Koans is a port of Ruby Koans originally written by Jim Weirich # and Joe O'brien of Edgecase. There are some differences and tweaks specific # to the Python language, but a great deal of it has been copied wholesale. # So thanks guys! # import sys if __name__ == '__main__': if sys.version_info < (3, 0): print("\nThis is the Python 3 version of Python Koans, but you are " + "running it with Python 2!\n\n" "Did you accidentally use the wrong python script? \nTry:\n\n" + " python3 contemplate_koans.py\n") else: if sys.version_info < (3, 3): print("\n" + "********************************************************\n" + "WARNING:\n" + "This version of Python Koans was designed for " + "Python 3.3 or greater.\n" + "Your version of Python is older, so you may run into " + "problems!\n\n" + "But lets see how far we get...\n" + "********************************************************\n") from runner.mountain import Mountain Mountain().walk_the_path(sys.argv)
mit
m0ppers/arangodb
3rdParty/boost/1.61.0/libs/python/pyste/src/Pyste/SingleCodeUnit.py
54
4355
# Copyright Bruno da Silva de Oliveira 2003. Use, modification and # distribution is subject to the Boost Software License, Version 1.0. # (See accompanying file LICENSE_1_0.txt or copy at # http://www.boost.org/LICENSE_1_0.txt) from settings import namespaces import settings from utils import remove_duplicated_lines, left_equals from SmartFile import SmartFile #============================================================================== # SingleCodeUnit #============================================================================== class SingleCodeUnit: ''' Represents a cpp file, where other objects can write in one of the predefined sections. The avaiable sections are: pchinclude - The pre-compiled header area include - The include area of the cpp file declaration - The part before the module definition module - Inside the BOOST_PYTHON_MODULE macro ''' def __init__(self, modulename, filename): self.modulename = modulename self.filename = filename # define the avaiable sections self.code = {} # include section self.code['pchinclude'] = '' # include section self.code['include'] = '' # declaration section (inside namespace) self.code['declaration'] = '' # declaration (outside namespace) self.code['declaration-outside'] = '' # inside BOOST_PYTHON_MACRO self.code['module'] = '' # create the default module definition self.module_definition = 'BOOST_PYTHON_MODULE(%s)' % modulename def Write(self, section, code): 'write the given code in the section of the code unit' if section not in self.code: raise RuntimeError, 'Invalid CodeUnit section: %s' % section self.code[section] += code def Merge(self, other): for section in ('include', 'declaration', 'declaration-outside', 'module'): self.code[section] = self.code[section] + other.code[section] def Section(self, section): return self.code[section] def SetCurrent(self, *args): pass def Current(self): pass def Save(self, append=False): 'Writes this code unit to the filename' space = '\n\n' if not append: flag = 'w' else: flag = 'a' fout = SmartFile(self.filename, flag) fout.write('\n') # includes # boost.python header if self.code['pchinclude']: fout.write(left_equals('PCH')) fout.write(self.code['pchinclude']+'\n') fout.write('#ifdef _MSC_VER\n') fout.write('#pragma hdrstop\n') fout.write('#endif\n') else: fout.write(left_equals('Boost Includes')) fout.write('#include <boost/python.hpp>\n') # include numerical boost for int64 definitions fout.write('#include <boost/cstdint.hpp>\n') fout.write('\n') # other includes if self.code['include']: fout.write(left_equals('Includes')) includes = remove_duplicated_lines(self.code['include']) fout.write(includes) fout.write(space) # using if settings.USING_BOOST_NS and not append: fout.write(left_equals('Using')) fout.write('using namespace boost::python;\n\n') # declarations declaration = self.code['declaration'] declaration_outside = self.code['declaration-outside'] if declaration_outside or declaration: fout.write(left_equals('Declarations')) if declaration_outside: fout.write(declaration_outside + '\n\n') if declaration: pyste_namespace = namespaces.pyste[:-2] fout.write('namespace %s {\n\n' % pyste_namespace) fout.write(declaration) fout.write('\n}// namespace %s\n' % pyste_namespace) fout.write(space) # module fout.write(left_equals('Module')) fout.write(self.module_definition + '\n') fout.write('{\n') fout.write(self.code['module']) fout.write('}\n\n') fout.close()
apache-2.0
kobejean/tensorflow
tensorflow/python/grappler/tf_optimizer.py
43
1840
# Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================= """Provides a proper python API for the symbols exported through swig.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from tensorflow.core.framework import graph_pb2 from tensorflow.python import pywrap_tensorflow as tf_opt from tensorflow.python.framework import errors from tensorflow.python.grappler import cluster as gcluster def OptimizeGraph(rewriter_config, metagraph, verbose=True, graph_id=b'graph_to_optimize', cluster=None): """Optimize the provided metagraph.""" with errors.raise_exception_on_not_ok_status() as status: if cluster is None: cluster = gcluster.Cluster() ret_from_swig = tf_opt.TF_OptimizeGraph(cluster.tf_cluster, rewriter_config.SerializeToString(), metagraph.SerializeToString(), verbose, graph_id, status) if ret_from_swig is None: return None out_graph = graph_pb2.GraphDef().FromString(ret_from_swig) return out_graph
apache-2.0
BigBrother1984/android_external_chromium_org
chrome/test/functional/perf/endure_server.py
68
2536
#!/usr/bin/env python # Copyright (c) 2012 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. """Start an HTTP server which serves Chrome Endure graphs. Usage: python endure_server.py [options] To view Chrome Endure graphs from a browser, run this script to start a local HTTP server that serves the directory where graph code and test results are located. A port will be automatically picked. You can then view the graphs via http://localhost:<GIVEN_PORT>. Examples: >python endure_server.py Start a server which serves the default location <CURRENT_WORKING_DIR>/chrome_graph. >python endure_server.py --graph-dir=/home/user/Document/graph_dir Start a server which serves /home/user/Document/graph_dir which is where your graph code and test results are. """ import BaseHTTPServer import logging import optparse import os import SimpleHTTPServer import sys class HelpFormatter(optparse.IndentedHelpFormatter): """Format the help message of this script.""" def format_description(self, description): """Override to keep original format of the description.""" return description + '\n' if description else '' def _ParseArgs(argv): parser = optparse.OptionParser( usage='%prog [options]', formatter=HelpFormatter(), description=__doc__) parser.add_option( '-g', '--graph-dir', type='string', default=os.path.join(os.getcwd(), 'chrome_graph'), help='The directory that contains graph code ' \ 'and data files of test results. Default value is ' \ '<CURRENT_WORKING_DIR>/chrome_graph') return parser.parse_args(argv) def Run(argv): """Start an HTTP server which serves Chrome Endure graphs.""" logging.basicConfig(format='[%(levelname)s] %(message)s', level=logging.DEBUG) options, _ = _ParseArgs(argv) graph_dir = os.path.abspath(options.graph_dir) cur_dir = os.getcwd() os.chdir(graph_dir) httpd = BaseHTTPServer.HTTPServer( ('', 0), SimpleHTTPServer.SimpleHTTPRequestHandler) try: logging.info('Serving %s at port %d', graph_dir, httpd.server_port) logging.info('View graphs at http://localhost:%d', httpd.server_port) logging.info('Press Ctrl-C to stop the server.') httpd.serve_forever() except KeyboardInterrupt: logging.info('Shutting down ...') httpd.shutdown() finally: os.chdir(cur_dir) return 0 if '__main__' == __name__: sys.exit(Run(sys.argv[1:]))
bsd-3-clause
hellsgate1001/bookit
docs/env/Lib/site-packages/cherrypy/test/test_httpauth.py
12
5768
import cherrypy from cherrypy._cpcompat import md5, sha, ntob from cherrypy.lib import httpauth from cherrypy.test import helper class HTTPAuthTest(helper.CPWebCase): def setup_server(): class Root: def index(self): return "This is public." index.exposed = True class DigestProtected: def index(self): return "Hello %s, you've been authorized." % cherrypy.request.login index.exposed = True class BasicProtected: def index(self): return "Hello %s, you've been authorized." % cherrypy.request.login index.exposed = True class BasicProtected2: def index(self): return "Hello %s, you've been authorized." % cherrypy.request.login index.exposed = True def fetch_users(): return {'test': 'test'} def sha_password_encrypter(password): return sha(ntob(password)).hexdigest() def fetch_password(username): return sha(ntob('test')).hexdigest() conf = {'/digest': {'tools.digest_auth.on': True, 'tools.digest_auth.realm': 'localhost', 'tools.digest_auth.users': fetch_users}, '/basic': {'tools.basic_auth.on': True, 'tools.basic_auth.realm': 'localhost', 'tools.basic_auth.users': {'test': md5(ntob('test')).hexdigest()}}, '/basic2': {'tools.basic_auth.on': True, 'tools.basic_auth.realm': 'localhost', 'tools.basic_auth.users': fetch_password, 'tools.basic_auth.encrypt': sha_password_encrypter}} root = Root() root.digest = DigestProtected() root.basic = BasicProtected() root.basic2 = BasicProtected2() cherrypy.tree.mount(root, config=conf) setup_server = staticmethod(setup_server) def testPublic(self): self.getPage("/") self.assertStatus('200 OK') self.assertHeader('Content-Type', 'text/html;charset=utf-8') self.assertBody('This is public.') def testBasic(self): self.getPage("/basic/") self.assertStatus(401) self.assertHeader('WWW-Authenticate', 'Basic realm="localhost"') self.getPage('/basic/', [('Authorization', 'Basic dGVzdDp0ZX60')]) self.assertStatus(401) self.getPage('/basic/', [('Authorization', 'Basic dGVzdDp0ZXN0')]) self.assertStatus('200 OK') self.assertBody("Hello test, you've been authorized.") def testBasic2(self): self.getPage("/basic2/") self.assertStatus(401) self.assertHeader('WWW-Authenticate', 'Basic realm="localhost"') self.getPage('/basic2/', [('Authorization', 'Basic dGVzdDp0ZX60')]) self.assertStatus(401) self.getPage('/basic2/', [('Authorization', 'Basic dGVzdDp0ZXN0')]) self.assertStatus('200 OK') self.assertBody("Hello test, you've been authorized.") def testDigest(self): self.getPage("/digest/") self.assertStatus(401) value = None for k, v in self.headers: if k.lower() == "www-authenticate": if v.startswith("Digest"): value = v break if value is None: self._handlewebError("Digest authentification scheme was not found") value = value[7:] items = value.split(', ') tokens = {} for item in items: key, value = item.split('=') tokens[key.lower()] = value missing_msg = "%s is missing" bad_value_msg = "'%s' was expecting '%s' but found '%s'" nonce = None if 'realm' not in tokens: self._handlewebError(missing_msg % 'realm') elif tokens['realm'] != '"localhost"': self._handlewebError(bad_value_msg % ('realm', '"localhost"', tokens['realm'])) if 'nonce' not in tokens: self._handlewebError(missing_msg % 'nonce') else: nonce = tokens['nonce'].strip('"') if 'algorithm' not in tokens: self._handlewebError(missing_msg % 'algorithm') elif tokens['algorithm'] != '"MD5"': self._handlewebError(bad_value_msg % ('algorithm', '"MD5"', tokens['algorithm'])) if 'qop' not in tokens: self._handlewebError(missing_msg % 'qop') elif tokens['qop'] != '"auth"': self._handlewebError(bad_value_msg % ('qop', '"auth"', tokens['qop'])) # Test a wrong 'realm' value base_auth = 'Digest username="test", realm="wrong realm", nonce="%s", uri="/digest/", algorithm=MD5, response="%s", qop=auth, nc=%s, cnonce="1522e61005789929"' auth = base_auth % (nonce, '', '00000001') params = httpauth.parseAuthorization(auth) response = httpauth._computeDigestResponse(params, 'test') auth = base_auth % (nonce, response, '00000001') self.getPage('/digest/', [('Authorization', auth)]) self.assertStatus(401) # Test that must pass base_auth = 'Digest username="test", realm="localhost", nonce="%s", uri="/digest/", algorithm=MD5, response="%s", qop=auth, nc=%s, cnonce="1522e61005789929"' auth = base_auth % (nonce, '', '00000001') params = httpauth.parseAuthorization(auth) response = httpauth._computeDigestResponse(params, 'test') auth = base_auth % (nonce, response, '00000001') self.getPage('/digest/', [('Authorization', auth)]) self.assertStatus('200 OK') self.assertBody("Hello test, you've been authorized.")
mit
sounay/flaminggo-test
onadata/apps/logger/migrations/0041_convert_lng_lat_to_points.py
13
12235
# -*- coding: utf-8 -*- from south.v2 import DataMigration from onadata.apps.logger.models.instance import Instance from onadata.libs.utils.model_tools import queryset_iterator class Migration(DataMigration): def forwards(self, orm): "Parse all instance to add geoms." for obj in queryset_iterator(orm['odk_logger.Instance'].objects.all()): instance = Instance.objects.get(pk=obj.pk) instance.save(force=True) def backwards(self, orm): "Write your backwards methods here." models = { u'auth.group': { 'Meta': {'object_name': 'Group'}, u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}), 'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}) }, u'auth.permission': { 'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'}, 'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '50'}) }, u'auth.user': { 'Meta': {'object_name': 'User'}, 'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}), 'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}), 'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), 'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}), 'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}), 'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}), 'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'}) }, u'contenttypes.contenttype': { 'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"}, 'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}) }, 'odk_logger.attachment': { 'Meta': {'object_name': 'Attachment'}, u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'instance': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'attachments'", 'to': "orm['odk_logger.Instance']"}), 'media_file': ('django.db.models.fields.files.FileField', [], {'max_length': '100'}), 'mimetype': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '50', 'blank': 'True'}) }, 'odk_logger.instance': { 'Meta': {'object_name': 'Instance'}, 'date_created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}), 'date_modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}), 'deleted_at': ('django.db.models.fields.DateTimeField', [], {'default': 'None', 'null': 'True'}), 'geom': ('django.contrib.gis.db.models.fields.GeometryCollectionField', [], {'null': 'True'}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'json': ('jsonfield.fields.JSONField', [], {'default': '{}'}), 'status': ('django.db.models.fields.CharField', [], {'default': "u'submitted_via_web'", 'max_length': '20'}), 'survey_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['odk_logger.SurveyType']"}), 'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'surveys'", 'null': 'True', 'to': u"orm['auth.User']"}), 'uuid': ('django.db.models.fields.CharField', [], {'default': "u''", 'max_length': '249'}), 'xform': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'surveys'", 'null': 'True', 'to': "orm['odk_logger.XForm']"}), 'xml': ('django.db.models.fields.TextField', [], {}) }, 'odk_logger.instancehistory': { 'Meta': {'object_name': 'InstanceHistory'}, 'date_created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}), 'date_modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'uuid': ('django.db.models.fields.CharField', [], {'default': "u''", 'max_length': '249'}), 'xform_instance': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'submission_history'", 'to': "orm['odk_logger.Instance']"}), 'xml': ('django.db.models.fields.TextField', [], {}) }, 'odk_logger.note': { 'Meta': {'object_name': 'Note'}, 'date_created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}), 'date_modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'instance': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'notes'", 'to': "orm['odk_logger.Instance']"}), 'note': ('django.db.models.fields.TextField', [], {}) }, 'odk_logger.surveytype': { 'Meta': {'object_name': 'SurveyType'}, u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'slug': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '100'}) }, 'odk_logger.xform': { 'Meta': {'ordering': "('id_string',)", 'unique_together': "(('user', 'id_string'), ('user', 'sms_id_string'))", 'object_name': 'XForm'}, 'allows_sms': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'bamboo_dataset': ('django.db.models.fields.CharField', [], {'default': "u''", 'max_length': '60'}), 'date_created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}), 'date_modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}), 'description': ('django.db.models.fields.TextField', [], {'default': "u''", 'null': 'True'}), 'downloadable': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), 'encrypted': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'has_start_time': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'id_string': ('django.db.models.fields.SlugField', [], {'max_length': '100'}), 'is_crowd_form': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'json': ('django.db.models.fields.TextField', [], {'default': "u''"}), 'last_submission_time': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}), 'num_of_submissions': ('django.db.models.fields.IntegerField', [], {'default': '-1'}), 'shared': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'shared_data': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'sms_id_string': ('django.db.models.fields.SlugField', [], {'default': "''", 'max_length': '100'}), 'surveys_with_geopoints': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'title': ('django.db.models.fields.CharField', [], {'max_length': '64'}), 'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'xforms'", 'null': 'True', 'to': u"orm['auth.User']"}), 'uuid': ('django.db.models.fields.CharField', [], {'default': "u''", 'max_length': '32'}), 'xls': ('django.db.models.fields.files.FileField', [], {'max_length': '100', 'null': 'True'}), 'xml': ('django.db.models.fields.TextField', [], {}) }, 'odk_logger.ziggyinstance': { 'Meta': {'object_name': 'ZiggyInstance'}, 'client_version': ('django.db.models.fields.BigIntegerField', [], {'default': 'None', 'null': 'True'}), 'date_created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}), 'date_deleted': ('django.db.models.fields.DateTimeField', [], {'default': 'None', 'null': 'True'}), 'date_modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}), 'entity_id': ('django.db.models.fields.CharField', [], {'max_length': '249'}), 'form_instance': ('django.db.models.fields.TextField', [], {}), 'form_version': ('django.db.models.fields.CharField', [], {'default': "u'1.0'", 'max_length': '10'}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'instance_id': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '249'}), 'reporter': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'ziggys'", 'to': u"orm['auth.User']"}), 'server_version': ('django.db.models.fields.BigIntegerField', [], {}), 'xform': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'ziggy_submissions'", 'null': 'True', 'to': "orm['odk_logger.XForm']"}) }, u'taggit.tag': { 'Meta': {'object_name': 'Tag'}, u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '100'}), 'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '100'}) }, u'taggit.taggeditem': { 'Meta': {'object_name': 'TaggedItem'}, 'content_type': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'taggit_taggeditem_tagged_items'", 'to': u"orm['contenttypes.ContentType']"}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'object_id': ('django.db.models.fields.IntegerField', [], {'db_index': 'True'}), 'tag': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'taggit_taggeditem_items'", 'to': u"orm['taggit.Tag']"}) } } complete_apps = ['logger'] symmetrical = True
bsd-2-clause
DreamerKing/LightweightHtmlWidgets
publish-rc/v1.0/files/Ipy.Lib/encodings/iso8859_14.py
593
13908
""" Python Character Mapping Codec iso8859_14 generated from 'MAPPINGS/ISO8859/8859-14.TXT' with gencodec.py. """#" import codecs ### Codec APIs class Codec(codecs.Codec): def encode(self,input,errors='strict'): return codecs.charmap_encode(input,errors,encoding_table) def decode(self,input,errors='strict'): return codecs.charmap_decode(input,errors,decoding_table) class IncrementalEncoder(codecs.IncrementalEncoder): def encode(self, input, final=False): return codecs.charmap_encode(input,self.errors,encoding_table)[0] class IncrementalDecoder(codecs.IncrementalDecoder): def decode(self, input, final=False): return codecs.charmap_decode(input,self.errors,decoding_table)[0] class StreamWriter(Codec,codecs.StreamWriter): pass class StreamReader(Codec,codecs.StreamReader): pass ### encodings module API def getregentry(): return codecs.CodecInfo( name='iso8859-14', encode=Codec().encode, decode=Codec().decode, incrementalencoder=IncrementalEncoder, incrementaldecoder=IncrementalDecoder, streamreader=StreamReader, streamwriter=StreamWriter, ) ### Decoding Table decoding_table = ( u'\x00' # 0x00 -> NULL u'\x01' # 0x01 -> START OF HEADING u'\x02' # 0x02 -> START OF TEXT u'\x03' # 0x03 -> END OF TEXT u'\x04' # 0x04 -> END OF TRANSMISSION u'\x05' # 0x05 -> ENQUIRY u'\x06' # 0x06 -> ACKNOWLEDGE u'\x07' # 0x07 -> BELL u'\x08' # 0x08 -> BACKSPACE u'\t' # 0x09 -> HORIZONTAL TABULATION u'\n' # 0x0A -> LINE FEED u'\x0b' # 0x0B -> VERTICAL TABULATION u'\x0c' # 0x0C -> FORM FEED u'\r' # 0x0D -> CARRIAGE RETURN u'\x0e' # 0x0E -> SHIFT OUT u'\x0f' # 0x0F -> SHIFT IN u'\x10' # 0x10 -> DATA LINK ESCAPE u'\x11' # 0x11 -> DEVICE CONTROL ONE u'\x12' # 0x12 -> DEVICE CONTROL TWO u'\x13' # 0x13 -> DEVICE CONTROL THREE u'\x14' # 0x14 -> DEVICE CONTROL FOUR u'\x15' # 0x15 -> NEGATIVE ACKNOWLEDGE u'\x16' # 0x16 -> SYNCHRONOUS IDLE u'\x17' # 0x17 -> END OF TRANSMISSION BLOCK u'\x18' # 0x18 -> CANCEL u'\x19' # 0x19 -> END OF MEDIUM u'\x1a' # 0x1A -> SUBSTITUTE u'\x1b' # 0x1B -> ESCAPE u'\x1c' # 0x1C -> FILE SEPARATOR u'\x1d' # 0x1D -> GROUP SEPARATOR u'\x1e' # 0x1E -> RECORD SEPARATOR u'\x1f' # 0x1F -> UNIT SEPARATOR u' ' # 0x20 -> SPACE u'!' # 0x21 -> EXCLAMATION MARK u'"' # 0x22 -> QUOTATION MARK u'#' # 0x23 -> NUMBER SIGN u'$' # 0x24 -> DOLLAR SIGN u'%' # 0x25 -> PERCENT SIGN u'&' # 0x26 -> AMPERSAND u"'" # 0x27 -> APOSTROPHE u'(' # 0x28 -> LEFT PARENTHESIS u')' # 0x29 -> RIGHT PARENTHESIS u'*' # 0x2A -> ASTERISK u'+' # 0x2B -> PLUS SIGN u',' # 0x2C -> COMMA u'-' # 0x2D -> HYPHEN-MINUS u'.' # 0x2E -> FULL STOP u'/' # 0x2F -> SOLIDUS u'0' # 0x30 -> DIGIT ZERO u'1' # 0x31 -> DIGIT ONE u'2' # 0x32 -> DIGIT TWO u'3' # 0x33 -> DIGIT THREE u'4' # 0x34 -> DIGIT FOUR u'5' # 0x35 -> DIGIT FIVE u'6' # 0x36 -> DIGIT SIX u'7' # 0x37 -> DIGIT SEVEN u'8' # 0x38 -> DIGIT EIGHT u'9' # 0x39 -> DIGIT NINE u':' # 0x3A -> COLON u';' # 0x3B -> SEMICOLON u'<' # 0x3C -> LESS-THAN SIGN u'=' # 0x3D -> EQUALS SIGN u'>' # 0x3E -> GREATER-THAN SIGN u'?' # 0x3F -> QUESTION MARK u'@' # 0x40 -> COMMERCIAL AT u'A' # 0x41 -> LATIN CAPITAL LETTER A u'B' # 0x42 -> LATIN CAPITAL LETTER B u'C' # 0x43 -> LATIN CAPITAL LETTER C u'D' # 0x44 -> LATIN CAPITAL LETTER D u'E' # 0x45 -> LATIN CAPITAL LETTER E u'F' # 0x46 -> LATIN CAPITAL LETTER F u'G' # 0x47 -> LATIN CAPITAL LETTER G u'H' # 0x48 -> LATIN CAPITAL LETTER H u'I' # 0x49 -> LATIN CAPITAL LETTER I u'J' # 0x4A -> LATIN CAPITAL LETTER J u'K' # 0x4B -> LATIN CAPITAL LETTER K u'L' # 0x4C -> LATIN CAPITAL LETTER L u'M' # 0x4D -> LATIN CAPITAL LETTER M u'N' # 0x4E -> LATIN CAPITAL LETTER N u'O' # 0x4F -> LATIN CAPITAL LETTER O u'P' # 0x50 -> LATIN CAPITAL LETTER P u'Q' # 0x51 -> LATIN CAPITAL LETTER Q u'R' # 0x52 -> LATIN CAPITAL LETTER R u'S' # 0x53 -> LATIN CAPITAL LETTER S u'T' # 0x54 -> LATIN CAPITAL LETTER T u'U' # 0x55 -> LATIN CAPITAL LETTER U u'V' # 0x56 -> LATIN CAPITAL LETTER V u'W' # 0x57 -> LATIN CAPITAL LETTER W u'X' # 0x58 -> LATIN CAPITAL LETTER X u'Y' # 0x59 -> LATIN CAPITAL LETTER Y u'Z' # 0x5A -> LATIN CAPITAL LETTER Z u'[' # 0x5B -> LEFT SQUARE BRACKET u'\\' # 0x5C -> REVERSE SOLIDUS u']' # 0x5D -> RIGHT SQUARE BRACKET u'^' # 0x5E -> CIRCUMFLEX ACCENT u'_' # 0x5F -> LOW LINE u'`' # 0x60 -> GRAVE ACCENT u'a' # 0x61 -> LATIN SMALL LETTER A u'b' # 0x62 -> LATIN SMALL LETTER B u'c' # 0x63 -> LATIN SMALL LETTER C u'd' # 0x64 -> LATIN SMALL LETTER D u'e' # 0x65 -> LATIN SMALL LETTER E u'f' # 0x66 -> LATIN SMALL LETTER F u'g' # 0x67 -> LATIN SMALL LETTER G u'h' # 0x68 -> LATIN SMALL LETTER H u'i' # 0x69 -> LATIN SMALL LETTER I u'j' # 0x6A -> LATIN SMALL LETTER J u'k' # 0x6B -> LATIN SMALL LETTER K u'l' # 0x6C -> LATIN SMALL LETTER L u'm' # 0x6D -> LATIN SMALL LETTER M u'n' # 0x6E -> LATIN SMALL LETTER N u'o' # 0x6F -> LATIN SMALL LETTER O u'p' # 0x70 -> LATIN SMALL LETTER P u'q' # 0x71 -> LATIN SMALL LETTER Q u'r' # 0x72 -> LATIN SMALL LETTER R u's' # 0x73 -> LATIN SMALL LETTER S u't' # 0x74 -> LATIN SMALL LETTER T u'u' # 0x75 -> LATIN SMALL LETTER U u'v' # 0x76 -> LATIN SMALL LETTER V u'w' # 0x77 -> LATIN SMALL LETTER W u'x' # 0x78 -> LATIN SMALL LETTER X u'y' # 0x79 -> LATIN SMALL LETTER Y u'z' # 0x7A -> LATIN SMALL LETTER Z u'{' # 0x7B -> LEFT CURLY BRACKET u'|' # 0x7C -> VERTICAL LINE u'}' # 0x7D -> RIGHT CURLY BRACKET u'~' # 0x7E -> TILDE u'\x7f' # 0x7F -> DELETE u'\x80' # 0x80 -> <control> u'\x81' # 0x81 -> <control> u'\x82' # 0x82 -> <control> u'\x83' # 0x83 -> <control> u'\x84' # 0x84 -> <control> u'\x85' # 0x85 -> <control> u'\x86' # 0x86 -> <control> u'\x87' # 0x87 -> <control> u'\x88' # 0x88 -> <control> u'\x89' # 0x89 -> <control> u'\x8a' # 0x8A -> <control> u'\x8b' # 0x8B -> <control> u'\x8c' # 0x8C -> <control> u'\x8d' # 0x8D -> <control> u'\x8e' # 0x8E -> <control> u'\x8f' # 0x8F -> <control> u'\x90' # 0x90 -> <control> u'\x91' # 0x91 -> <control> u'\x92' # 0x92 -> <control> u'\x93' # 0x93 -> <control> u'\x94' # 0x94 -> <control> u'\x95' # 0x95 -> <control> u'\x96' # 0x96 -> <control> u'\x97' # 0x97 -> <control> u'\x98' # 0x98 -> <control> u'\x99' # 0x99 -> <control> u'\x9a' # 0x9A -> <control> u'\x9b' # 0x9B -> <control> u'\x9c' # 0x9C -> <control> u'\x9d' # 0x9D -> <control> u'\x9e' # 0x9E -> <control> u'\x9f' # 0x9F -> <control> u'\xa0' # 0xA0 -> NO-BREAK SPACE u'\u1e02' # 0xA1 -> LATIN CAPITAL LETTER B WITH DOT ABOVE u'\u1e03' # 0xA2 -> LATIN SMALL LETTER B WITH DOT ABOVE u'\xa3' # 0xA3 -> POUND SIGN u'\u010a' # 0xA4 -> LATIN CAPITAL LETTER C WITH DOT ABOVE u'\u010b' # 0xA5 -> LATIN SMALL LETTER C WITH DOT ABOVE u'\u1e0a' # 0xA6 -> LATIN CAPITAL LETTER D WITH DOT ABOVE u'\xa7' # 0xA7 -> SECTION SIGN u'\u1e80' # 0xA8 -> LATIN CAPITAL LETTER W WITH GRAVE u'\xa9' # 0xA9 -> COPYRIGHT SIGN u'\u1e82' # 0xAA -> LATIN CAPITAL LETTER W WITH ACUTE u'\u1e0b' # 0xAB -> LATIN SMALL LETTER D WITH DOT ABOVE u'\u1ef2' # 0xAC -> LATIN CAPITAL LETTER Y WITH GRAVE u'\xad' # 0xAD -> SOFT HYPHEN u'\xae' # 0xAE -> REGISTERED SIGN u'\u0178' # 0xAF -> LATIN CAPITAL LETTER Y WITH DIAERESIS u'\u1e1e' # 0xB0 -> LATIN CAPITAL LETTER F WITH DOT ABOVE u'\u1e1f' # 0xB1 -> LATIN SMALL LETTER F WITH DOT ABOVE u'\u0120' # 0xB2 -> LATIN CAPITAL LETTER G WITH DOT ABOVE u'\u0121' # 0xB3 -> LATIN SMALL LETTER G WITH DOT ABOVE u'\u1e40' # 0xB4 -> LATIN CAPITAL LETTER M WITH DOT ABOVE u'\u1e41' # 0xB5 -> LATIN SMALL LETTER M WITH DOT ABOVE u'\xb6' # 0xB6 -> PILCROW SIGN u'\u1e56' # 0xB7 -> LATIN CAPITAL LETTER P WITH DOT ABOVE u'\u1e81' # 0xB8 -> LATIN SMALL LETTER W WITH GRAVE u'\u1e57' # 0xB9 -> LATIN SMALL LETTER P WITH DOT ABOVE u'\u1e83' # 0xBA -> LATIN SMALL LETTER W WITH ACUTE u'\u1e60' # 0xBB -> LATIN CAPITAL LETTER S WITH DOT ABOVE u'\u1ef3' # 0xBC -> LATIN SMALL LETTER Y WITH GRAVE u'\u1e84' # 0xBD -> LATIN CAPITAL LETTER W WITH DIAERESIS u'\u1e85' # 0xBE -> LATIN SMALL LETTER W WITH DIAERESIS u'\u1e61' # 0xBF -> LATIN SMALL LETTER S WITH DOT ABOVE u'\xc0' # 0xC0 -> LATIN CAPITAL LETTER A WITH GRAVE u'\xc1' # 0xC1 -> LATIN CAPITAL LETTER A WITH ACUTE u'\xc2' # 0xC2 -> LATIN CAPITAL LETTER A WITH CIRCUMFLEX u'\xc3' # 0xC3 -> LATIN CAPITAL LETTER A WITH TILDE u'\xc4' # 0xC4 -> LATIN CAPITAL LETTER A WITH DIAERESIS u'\xc5' # 0xC5 -> LATIN CAPITAL LETTER A WITH RING ABOVE u'\xc6' # 0xC6 -> LATIN CAPITAL LETTER AE u'\xc7' # 0xC7 -> LATIN CAPITAL LETTER C WITH CEDILLA u'\xc8' # 0xC8 -> LATIN CAPITAL LETTER E WITH GRAVE u'\xc9' # 0xC9 -> LATIN CAPITAL LETTER E WITH ACUTE u'\xca' # 0xCA -> LATIN CAPITAL LETTER E WITH CIRCUMFLEX u'\xcb' # 0xCB -> LATIN CAPITAL LETTER E WITH DIAERESIS u'\xcc' # 0xCC -> LATIN CAPITAL LETTER I WITH GRAVE u'\xcd' # 0xCD -> LATIN CAPITAL LETTER I WITH ACUTE u'\xce' # 0xCE -> LATIN CAPITAL LETTER I WITH CIRCUMFLEX u'\xcf' # 0xCF -> LATIN CAPITAL LETTER I WITH DIAERESIS u'\u0174' # 0xD0 -> LATIN CAPITAL LETTER W WITH CIRCUMFLEX u'\xd1' # 0xD1 -> LATIN CAPITAL LETTER N WITH TILDE u'\xd2' # 0xD2 -> LATIN CAPITAL LETTER O WITH GRAVE u'\xd3' # 0xD3 -> LATIN CAPITAL LETTER O WITH ACUTE u'\xd4' # 0xD4 -> LATIN CAPITAL LETTER O WITH CIRCUMFLEX u'\xd5' # 0xD5 -> LATIN CAPITAL LETTER O WITH TILDE u'\xd6' # 0xD6 -> LATIN CAPITAL LETTER O WITH DIAERESIS u'\u1e6a' # 0xD7 -> LATIN CAPITAL LETTER T WITH DOT ABOVE u'\xd8' # 0xD8 -> LATIN CAPITAL LETTER O WITH STROKE u'\xd9' # 0xD9 -> LATIN CAPITAL LETTER U WITH GRAVE u'\xda' # 0xDA -> LATIN CAPITAL LETTER U WITH ACUTE u'\xdb' # 0xDB -> LATIN CAPITAL LETTER U WITH CIRCUMFLEX u'\xdc' # 0xDC -> LATIN CAPITAL LETTER U WITH DIAERESIS u'\xdd' # 0xDD -> LATIN CAPITAL LETTER Y WITH ACUTE u'\u0176' # 0xDE -> LATIN CAPITAL LETTER Y WITH CIRCUMFLEX u'\xdf' # 0xDF -> LATIN SMALL LETTER SHARP S u'\xe0' # 0xE0 -> LATIN SMALL LETTER A WITH GRAVE u'\xe1' # 0xE1 -> LATIN SMALL LETTER A WITH ACUTE u'\xe2' # 0xE2 -> LATIN SMALL LETTER A WITH CIRCUMFLEX u'\xe3' # 0xE3 -> LATIN SMALL LETTER A WITH TILDE u'\xe4' # 0xE4 -> LATIN SMALL LETTER A WITH DIAERESIS u'\xe5' # 0xE5 -> LATIN SMALL LETTER A WITH RING ABOVE u'\xe6' # 0xE6 -> LATIN SMALL LETTER AE u'\xe7' # 0xE7 -> LATIN SMALL LETTER C WITH CEDILLA u'\xe8' # 0xE8 -> LATIN SMALL LETTER E WITH GRAVE u'\xe9' # 0xE9 -> LATIN SMALL LETTER E WITH ACUTE u'\xea' # 0xEA -> LATIN SMALL LETTER E WITH CIRCUMFLEX u'\xeb' # 0xEB -> LATIN SMALL LETTER E WITH DIAERESIS u'\xec' # 0xEC -> LATIN SMALL LETTER I WITH GRAVE u'\xed' # 0xED -> LATIN SMALL LETTER I WITH ACUTE u'\xee' # 0xEE -> LATIN SMALL LETTER I WITH CIRCUMFLEX u'\xef' # 0xEF -> LATIN SMALL LETTER I WITH DIAERESIS u'\u0175' # 0xF0 -> LATIN SMALL LETTER W WITH CIRCUMFLEX u'\xf1' # 0xF1 -> LATIN SMALL LETTER N WITH TILDE u'\xf2' # 0xF2 -> LATIN SMALL LETTER O WITH GRAVE u'\xf3' # 0xF3 -> LATIN SMALL LETTER O WITH ACUTE u'\xf4' # 0xF4 -> LATIN SMALL LETTER O WITH CIRCUMFLEX u'\xf5' # 0xF5 -> LATIN SMALL LETTER O WITH TILDE u'\xf6' # 0xF6 -> LATIN SMALL LETTER O WITH DIAERESIS u'\u1e6b' # 0xF7 -> LATIN SMALL LETTER T WITH DOT ABOVE u'\xf8' # 0xF8 -> LATIN SMALL LETTER O WITH STROKE u'\xf9' # 0xF9 -> LATIN SMALL LETTER U WITH GRAVE u'\xfa' # 0xFA -> LATIN SMALL LETTER U WITH ACUTE u'\xfb' # 0xFB -> LATIN SMALL LETTER U WITH CIRCUMFLEX u'\xfc' # 0xFC -> LATIN SMALL LETTER U WITH DIAERESIS u'\xfd' # 0xFD -> LATIN SMALL LETTER Y WITH ACUTE u'\u0177' # 0xFE -> LATIN SMALL LETTER Y WITH CIRCUMFLEX u'\xff' # 0xFF -> LATIN SMALL LETTER Y WITH DIAERESIS ) ### Encoding table encoding_table=codecs.charmap_build(decoding_table)
gpl-3.0
pyIMS/pyimzML
pyimzml/ImzMLParser.py
2
24463
# -*- coding: utf-8 -*- # Copyright 2015 Dominik Fay # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from bisect import bisect_left, bisect_right import sys import re from pathlib import Path from warnings import warn import numpy as np from pyimzml.metadata import Metadata, SpectrumData from pyimzml.ontology.ontology import convert_cv_param PRECISION_DICT = {"32-bit float": 'f', "64-bit float": 'd', "32-bit integer": 'i', "64-bit integer": 'l'} SIZE_DICT = {'f': 4, 'd': 8, 'i': 4, 'l': 8} INFER_IBD_FROM_IMZML = object() XMLNS_PREFIX = "{http://psi.hupo.org/ms/mzml}" param_group_elname = "referenceableParamGroup" data_processing_elname = "dataProcessing" instrument_confid_elname = "instrumentConfiguration" def choose_iterparse(parse_lib=None): if parse_lib == 'ElementTree': from xml.etree.ElementTree import iterparse elif parse_lib == 'lxml': from lxml.etree import iterparse else: try: from lxml.etree import iterparse except ImportError: from xml.etree.ElementTree import iterparse return iterparse def _get_cv_param(elem, accession, deep=False, convert=False): base = './/' if deep else '' node = elem.find('%s%scvParam[@accession="%s"]' % (base, XMLNS_PREFIX, accession)) if node is not None: if convert: return convert_cv_param(accession, node.get('value')) return node.get('value') class ImzMLParser: """ Parser for imzML 1.1.0 files (see specification here: http://imzml.org/download/imzml/specifications_imzML1.1.0_RC1.pdf). Iteratively reads the .imzML file into memory while pruning the per-spectrum metadata (everything in <spectrumList> elements) during initialization. Returns a spectrum upon calling getspectrum(i). The binary file is read in every call of getspectrum(i). Use enumerate(parser.coordinates) to get all coordinates with their respective index. Coordinates are always 3-dimensional. If the third spatial dimension is not present in the data, it will be set to zero. The global metadata fields in the imzML file are stored in parser.metadata. Spectrum-specific metadata fields are not stored by default due to avoid memory issues, use the `include_spectra_metadata` parameter if spectrum-specific metadata is needed. """ def __init__( self, filename, parse_lib=None, ibd_file=INFER_IBD_FROM_IMZML, include_spectra_metadata=None, ): """ Opens the two files corresponding to the file name, reads the entire .imzML file and extracts required attributes. Does not read any binary data, yet. :param filename: name of the XML file. Must end with .imzML. Binary data file must be named equally but ending with .ibd Alternatively an open file or Buffer Protocol object can be supplied, if ibd_file is also supplied :param parse_lib: XML-parsing library to use: 'ElementTree' or 'lxml', the later will be used if argument not provided :param ibd_file: File or Buffer Protocol object for the .ibd file. Leave blank to infer it from the imzml filename. Set to None if no data from the .ibd file is needed (getspectrum calls will not work) :param include_spectra_metadata: None, 'full', or a list/set of accession IDs. If 'full' is given, parser.spectrum_full_metadata will be populated with a list of complex objects containing the full metadata for each spectrum. If a list or set is given, parser.spectrum_metadata_fields will be populated with a dict mapping accession IDs to lists. Each list will contain the values for that accession ID for each spectrum. Note that for performance reasons, this mode only searches the spectrum itself for the value. It won't check any referenced referenceable param groups if the accession ID isn't present in the spectrum metadata. """ # ElementTree requires the schema location for finding tags (why?) but # fails to read it from the root element. As this should be identical # for all imzML files, it is hard-coded here and prepended before every tag self.sl = "{http://psi.hupo.org/ms/mzml}" # maps each imzML number format to its struct equivalent self.precisionDict = dict(PRECISION_DICT) # maps each number format character to its amount of bytes used self.sizeDict = dict(SIZE_DICT) self.filename = filename self.mzOffsets = [] self.intensityOffsets = [] self.mzLengths = [] self.intensityLengths = [] # list of all (x,y,z) coordinates as tuples. self.coordinates = [] self.root = None self.metadata = None if include_spectra_metadata == 'full': self.spectrum_full_metadata = [] elif include_spectra_metadata is not None: include_spectra_metadata = set(include_spectra_metadata) self.spectrum_metadata_fields = { k: [] for k in include_spectra_metadata } self.mzGroupId = self.intGroupId = self.mzPrecision = self.intensityPrecision = None self.iterparse = choose_iterparse(parse_lib) self.__iter_read_spectrum_meta(include_spectra_metadata) if ibd_file is INFER_IBD_FROM_IMZML: # name of the binary file ibd_filename = self._infer_bin_filename(self.filename) self.m = open(ibd_filename, "rb") else: self.m = ibd_file # Dict for basic imzML metadata other than those required for reading # spectra. See method __readimzmlmeta() self.imzmldict = self.__readimzmlmeta() self.imzmldict['max count of pixels z'] = np.asarray(self.coordinates)[:,2].max() @staticmethod def _infer_bin_filename(imzml_path): imzml_path = Path(imzml_path) ibd_path = [f for f in imzml_path.parent.glob('*') if re.match(r'.+\.ibd', str(f), re.IGNORECASE) and f.stem == imzml_path.stem][0] return str(ibd_path) # system method for use of 'with ... as' def __enter__(self): return self # system method for use of 'with ... as' def __exit__(self, exc_t, exc_v, trace): if self.m is not None: self.m.close() def __iter_read_spectrum_meta(self, include_spectra_metadata): """ This method should only be called by __init__. Reads the data formats, coordinates and offsets from the .imzML file and initializes the respective attributes. While traversing the XML tree, the per-spectrum metadata is pruned, i.e. the <spectrumList> element(s) are left behind empty. Supported accession values for the number formats: "MS:1000521", "MS:1000523", "IMS:1000141" or "IMS:1000142". The string values are "32-bit float", "64-bit float", "32-bit integer", "64-bit integer". """ mz_group = int_group = None slist = None elem_iterator = self.iterparse(self.filename, events=("start", "end")) if sys.version_info > (3,): _, self.root = next(elem_iterator) else: _, self.root = elem_iterator.next() for event, elem in elem_iterator: if elem.tag == self.sl + "spectrumList" and event == "start": self.__process_metadata() slist = elem elif elem.tag == self.sl + "spectrum" and event == "end": self.__process_spectrum(elem, include_spectra_metadata) slist.remove(elem) self.__fix_offsets() def __fix_offsets(self): # clean up the mess after morons who use signed 32-bit where unsigned 64-bit is appropriate def fix(array): fixed = [] delta = 0 prev_value = float('nan') for value in array: if value < 0 and prev_value >= 0: delta += 2**32 fixed.append(value + delta) prev_value = value return fixed self.mzOffsets = fix(self.mzOffsets) self.intensityOffsets = fix(self.intensityOffsets) def __process_metadata(self): if self.metadata is None: self.metadata = Metadata(self.root) for param_id, param_group in self.metadata.referenceable_param_groups.items(): if 'm/z array' in param_group.param_by_name: self.mzGroupId = param_id for name, dtype in self.precisionDict.items(): if name in param_group.param_by_name: self.mzPrecision = dtype if 'intensity array' in param_group.param_by_name: self.intGroupId = param_id for name, dtype in self.precisionDict.items(): if name in param_group.param_by_name: self.intensityPrecision = dtype if not hasattr(self, 'mzPrecision'): raise RuntimeError("Could not determine m/z precision") if not hasattr(self, 'intensityPrecision'): raise RuntimeError("Could not determine intensity precision") def __process_spectrum(self, elem, include_spectra_metadata): arrlistelem = elem.find('%sbinaryDataArrayList' % self.sl) mz_group = None int_group = None for e in arrlistelem: ref = e.find('%sreferenceableParamGroupRef' % self.sl).attrib["ref"] if ref == self.mzGroupId: mz_group = e elif ref == self.intGroupId: int_group = e self.mzOffsets.append(int(_get_cv_param(mz_group, 'IMS:1000102'))) self.mzLengths.append(int(_get_cv_param(mz_group, 'IMS:1000103'))) self.intensityOffsets.append(int(_get_cv_param(int_group, 'IMS:1000102'))) self.intensityLengths.append(int(_get_cv_param(int_group, 'IMS:1000103'))) scan_elem = elem.find('%sscanList/%sscan' % (self.sl, self.sl)) x = _get_cv_param(scan_elem, 'IMS:1000050') y = _get_cv_param(scan_elem, 'IMS:1000051') z = _get_cv_param(scan_elem, 'IMS:1000052') if z is not None: self.coordinates.append((int(x), int(y), int(z))) else: self.coordinates.append((int(x), int(y), 1)) if include_spectra_metadata == 'full': self.spectrum_full_metadata.append( SpectrumData(elem, self.metadata.referenceable_param_groups) ) elif include_spectra_metadata: for param in include_spectra_metadata: value = _get_cv_param(elem, param, deep=True, convert=True) self.spectrum_metadata_fields[param].append(value) def __readimzmlmeta(self): """ DEPRECATED - use self.metadata instead, as it has much greater detail and allows for multiple scan settings / instruments. This method should only be called by __init__. Initializes the imzmldict with frequently used metadata from the .imzML file. :return d: dict containing above mentioned meta data :rtype: dict :raises Warning: if an xml attribute has a number format different from the imzML specification """ d = {} scan_settings_list_elem = self.root.find('%sscanSettingsList' % self.sl) instrument_config_list_elem = self.root.find('%sinstrumentConfigurationList' % self.sl) scan_settings_params = [ ("max count of pixels x", "IMS:1000042"), ("max count of pixels y", "IMS:1000043"), ("max dimension x", "IMS:1000044"), ("max dimension y", "IMS:1000045"), ("pixel size x", "IMS:1000046"), ("pixel size y", "IMS:1000047"), ("matrix solution concentration", "MS:1000835"), ] instrument_config_params = [ ("wavelength", "MS:1000843"), ("focus diameter x", "MS:1000844"), ("focus diameter y", "MS:1000845"), ("pulse energy", "MS:1000846"), ("pulse duration", "MS:1000847"), ("attenuation", "MS:1000848"), ] for name, accession in scan_settings_params: try: val = _get_cv_param(scan_settings_list_elem, accession, deep=True, convert=True) if val is not None: d[name] = val except ValueError: warn(Warning('Wrong data type in XML file. Skipped attribute "%s"' % name)) for name, accession in instrument_config_params: try: val = _get_cv_param(instrument_config_list_elem, accession, deep=True, convert=True) if val is not None: d[name] = val except ValueError: warn(Warning('Wrong data type in XML file. Skipped attribute "%s"' % name)) return d def get_physical_coordinates(self, i): """ For a pixel index i, return the real-world coordinates in nanometers. This is equivalent to multiplying the image coordinates of the given pixel with the pixel size. :param i: the pixel index :return: a tuple of x and y coordinates. :rtype: Tuple[float] :raises KeyError: if the .imzML file does not specify the attributes "pixel size x" and "pixel size y" """ try: pixel_size_x = self.imzmldict["pixel size x"] pixel_size_y = self.imzmldict["pixel size y"] except KeyError: raise KeyError("Could not find all pixel size attributes in imzML file") image_x, image_y = self.coordinates[i][:2] return image_x * pixel_size_x, image_y * pixel_size_y def getspectrum(self, index): """ Reads the spectrum at specified index from the .ibd file. :param index: Index of the desired spectrum in the .imzML file Output: mz_array: numpy.ndarray Sequence of m/z values representing the horizontal axis of the desired mass spectrum intensity_array: numpy.ndarray Sequence of intensity values corresponding to mz_array """ mz_bytes, intensity_bytes = self.get_spectrum_as_string(index) mz_array = np.frombuffer(mz_bytes, dtype=self.mzPrecision) intensity_array = np.frombuffer(intensity_bytes, dtype=self.intensityPrecision) return mz_array, intensity_array def get_spectrum_as_string(self, index): """ Reads m/z array and intensity array of the spectrum at specified location from the binary file as a byte string. The string can be unpacked by the struct module. To get the arrays as numbers, use getspectrum :param index: Index of the desired spectrum in the .imzML file :rtype: Tuple[str, str] Output: mz_string: string where each character represents a byte of the mz array of the spectrum intensity_string: string where each character represents a byte of the intensity array of the spectrum """ offsets = [self.mzOffsets[index], self.intensityOffsets[index]] lengths = [self.mzLengths[index], self.intensityLengths[index]] lengths[0] *= self.sizeDict[self.mzPrecision] lengths[1] *= self.sizeDict[self.intensityPrecision] self.m.seek(offsets[0]) mz_string = self.m.read(lengths[0]) self.m.seek(offsets[1]) intensity_string = self.m.read(lengths[1]) return mz_string, intensity_string def portable_spectrum_reader(self): """ Builds a PortableSpectrumReader that holds the coordinates list and spectrum offsets in the .ibd file so that the .ibd file can be read without opening the .imzML file again. The PortableSpectrumReader can be safely pickled and unpickled, making it useful for reading the spectra in a distributed environment such as PySpark or PyWren. """ return PortableSpectrumReader(self.coordinates, self.mzPrecision, self.mzOffsets, self.mzLengths, self.intensityPrecision, self.intensityOffsets, self.intensityLengths) def getionimage(p, mz_value, tol=0.1, z=1, reduce_func=sum): """ Get an image representation of the intensity distribution of the ion with specified m/z value. By default, the intensity values within the tolerance region are summed. :param p: the ImzMLParser (or anything else with similar attributes) for the desired dataset :param mz_value: m/z value for which the ion image shall be returned :param tol: Absolute tolerance for the m/z value, such that all ions with values mz_value-|tol| <= x <= mz_value+|tol| are included. Defaults to 0.1 :param z: z Value if spectrogram is 3-dimensional. :param reduce_func: the bahaviour for reducing the intensities between mz_value-|tol| and mz_value+|tol| to a single value. Must be a function that takes a sequence as input and outputs a number. By default, the values are summed. :return: numpy matrix with each element representing the ion intensity in this pixel. Can be easily plotted with matplotlib """ tol = abs(tol) im = np.zeros((p.imzmldict["max count of pixels y"], p.imzmldict["max count of pixels x"])) for i, (x, y, z_) in enumerate(p.coordinates): if z_ == 0: UserWarning("z coordinate = 0 present, if you're getting blank images set getionimage(.., .., z=0)") if z_ == z: mzs, ints = map(lambda x: np.asarray(x), p.getspectrum(i)) min_i, max_i = _bisect_spectrum(mzs, mz_value, tol) im[y - 1, x - 1] = reduce_func(ints[min_i:max_i+1]) return im def browse(p): """ Create a per-spectrum metadata browser for the parser. Usage:: # get a list of the instrument configurations used in the first pixel instrument_configurations = browse(p).for_spectrum(0).get_ids("instrumentConfiguration") Currently, ``instrumentConfiguration``, ``dataProcessing`` and ``referenceableParamGroup`` are supported. For browsing all spectra iteratively, you should by all means use **ascending** indices. Doing otherwise can result in quadratic runtime. The following example shows how to retrieve all unique instrumentConfigurations used:: browser = browse(p) all_config_ids = set() for i, _ in enumerate(p.coordinates): all_config_ids.update(browser.for_spectrum(i).get_ids("instrumentConfiguration")) This is a list of ids with which you can find the corresponding ``<instrumentConfiguration>`` tag in the xml tree. :param p: the parser :return: the browser """ return _ImzMLMetaDataBrowser(p.root, p.filename, p.sl) def _bisect_spectrum(mzs, mz_value, tol): ix_l, ix_u = bisect_left(mzs, mz_value - tol), bisect_right(mzs, mz_value + tol) - 1 if ix_l == len(mzs): return len(mzs), len(mzs) if ix_u < 1: return 0, 0 if ix_u == len(mzs): ix_u -= 1 if mzs[ix_l] < (mz_value - tol): ix_l += 1 if mzs[ix_u] > (mz_value + tol): ix_u -= 1 return ix_l, ix_u class _ImzMLMetaDataBrowser(object): def __init__(self, root, fn, sl): self._root = root self._sl = sl self._fn = fn self._iter, self._previous, self._list_elem = None, None, None self.iterparse = choose_iterparse() def for_spectrum(self, i): if self._previous is None or i <= self._previous: self._iter = self.iterparse(self._fn, events=("start", "end")) for event, s in self._iter: if s.tag == self._sl + "spectrumList" and event == "start": self._list_elem = s elif s.tag == self._sl + "spectrum" and event == "end": self._list_elem.remove(s) if s.attrib["index"] == str(i): self._previous = i return _SpectrumMetaDataBrowser(self._root, self._sl, s) class _SpectrumMetaDataBrowser(object): def __init__(self, root, sl, spectrum): self._root = root self._sl = sl self._spectrum = spectrum def get_ids(self, element): param_methods = { param_group_elname: self._find_referenceable_param_groups, data_processing_elname: self._find_data_processing, instrument_confid_elname: self._find_instrument_configurations, } try: return param_methods[element]() except KeyError as e: raise ValueError("Unsupported element: " + str(element)) def _find_referenceable_param_groups(self): param_group_refs = self._spectrum.findall("%sreferenceableParamGroupRef" % self._sl) ids = map(lambda g: g.attrib["ref"], param_group_refs) return ids def _find_instrument_configurations(self): ids = None scan_list = self._spectrum.find("%sscanList" % self._sl) if scan_list: scans = scan_list.findall("%sscan[@instrumentConfigurationRef]" % self._sl) ids = map(lambda s: s.attrib["instrumentConfigurationRef"], scans) if not ids: run = self._root.find("%srun") try: return [run.attrib["defaultInstrumentConfigurationRef"]] except KeyError as _: return list() else: return ids def _find_data_processing(self): try: return self._spectrum.attrib["dataProcessingRef"] except KeyError as _: spectrum_list = self._root.find("%srun/%sspectrumList" % tuple(2 * [self._sl])) try: return [spectrum_list.attrib["defaultDataProcessingRef"]] except KeyError as _: return [] class PortableSpectrumReader(object): """ A pickle-able class for holding the minimal set of data required for reading, without holding any references to open files that wouldn't survive pickling. """ def __init__(self, coordinates, mzPrecision, mzOffsets, mzLengths, intensityPrecision, intensityOffsets, intensityLengths): self.coordinates = coordinates self.mzPrecision = mzPrecision self.mzOffsets = mzOffsets self.mzLengths = mzLengths self.intensityPrecision = intensityPrecision self.intensityOffsets = intensityOffsets self.intensityLengths = intensityLengths def read_spectrum_from_file(self, file, index): """ Reads the spectrum at specified index from the .ibd file. :param file: File or file-like object for the .ibd file :param index: Index of the desired spectrum in the .imzML file Output: mz_array: numpy.ndarray Sequence of m/z values representing the horizontal axis of the desired mass spectrum intensity_array: numpy.ndarray Sequence of intensity values corresponding to mz_array """ file.seek(self.mzOffsets[index]) mz_bytes = file.read(self.mzLengths[index] * SIZE_DICT[self.mzPrecision]) file.seek(self.intensityOffsets[index]) intensity_bytes = file.read(self.intensityLengths[index] * SIZE_DICT[self.intensityPrecision]) mz_array = np.frombuffer(mz_bytes, dtype=self.mzPrecision) intensity_array = np.frombuffer(intensity_bytes, dtype=self.intensityPrecision) return mz_array, intensity_array
apache-2.0
josephsuh/extra-specs
nova/scheduler/multi.py
2
3408
# vim: tabstop=4 shiftwidth=4 softtabstop=4 # Copyright (c) 2010 OpenStack, LLC. # Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Scheduler that allows routing some calls to one driver and others to another. """ from nova import flags from nova.openstack.common import cfg from nova.openstack.common import importutils from nova.scheduler import driver multi_scheduler_opts = [ cfg.StrOpt('compute_scheduler_driver', default='nova.scheduler.' 'filter_scheduler.FilterScheduler', help='Driver to use for scheduling compute calls'), cfg.StrOpt('volume_scheduler_driver', default='nova.scheduler.chance.ChanceScheduler', help='Driver to use for scheduling volume calls'), ] FLAGS = flags.FLAGS FLAGS.register_opts(multi_scheduler_opts) # A mapping of methods to topics so we can figure out which driver to use. # There are currently no compute methods proxied through the map _METHOD_MAP = {'create_volume': 'volume', 'create_volumes': 'volume'} class MultiScheduler(driver.Scheduler): """A scheduler that holds multiple sub-schedulers. This exists to allow flag-driven composibility of schedulers, allowing third parties to integrate custom schedulers more easily. """ def __init__(self): super(MultiScheduler, self).__init__() compute_driver = importutils.import_object( FLAGS.compute_scheduler_driver) volume_driver = importutils.import_object( FLAGS.volume_scheduler_driver) self.drivers = {'compute': compute_driver, 'volume': volume_driver} def __getattr__(self, key): if not key.startswith('schedule_'): raise AttributeError(key) method = key[len('schedule_'):] if method not in _METHOD_MAP: raise AttributeError(key) return getattr(self.drivers[_METHOD_MAP[method]], key) def schedule(self, context, topic, method, *_args, **_kwargs): return self.drivers[topic].schedule(context, topic, method, *_args, **_kwargs) def schedule_run_instance(self, *args, **kwargs): return self.drivers['compute'].schedule_run_instance(*args, **kwargs) def schedule_prep_resize(self, *args, **kwargs): return self.drivers['compute'].schedule_prep_resize(*args, **kwargs) def update_service_capabilities(self, service_name, host, capabilities): # Multi scheduler is only a holder of sub-schedulers, so # pass the capabilities to the schedulers that matter for d in self.drivers.values(): d.update_service_capabilities(service_name, host, capabilities)
apache-2.0
amandersillinois/landlab
landlab/utils/jaggedarray_ma.py
3
9358
"""Store arrays of variable-length arrays implemented with masked arrays. Implements a MaskedJaggedArray class using numpy masked arrays. Examples -------- Create a MaskedJaggedArray that stores link IDs for the links attached to the nodes of a 3x3 grid. >>> from landlab.utils.jaggedarray_ma import MaskedJaggedArray >>> links_at_node = MaskedJaggedArray([ ... [0, 6], ... [1, 7, 0], ... [8, 1], ... [2, 9, 6], ... [3, 10, 2, 7], ... [11, 3, 8], ... [4, 7], ... [5, 10, 4], ... [5, 11]]) Make up some data that provides values at each of the links. >>> value_at_link = np.arange(12, dtype=float) Create another MaskedJaggedArray. Here we store the values at each of the links attached to nodes of the grid. >>> values_at_node = MaskedJaggedArray.empty_like(links_at_node, dtype=float) >>> values_at_node.array = value_at_link[links_at_node.array] Now operate on the link values for each node. >>> values_at_node.foreach_row(np.sum) array([ 6., 8., 9., 17., 22., 22., 11., 19., 16.]) >>> values_at_node.foreach_row(np.min) array([ 0., 0., 1., 2., 2., 3., 4., 4., 5.]) >>> values_at_node.foreach_row(np.ptp) array([ 6., 7., 7., 7., 8., 8., 3., 6., 6.]) """ import numpy as np class MaskedJaggedArray(object): """A container for an array of variable-length arrays. MaskedJaggedArray([row0, row1, ...]) MaskedJaggedArray(values, values_per_row) Examples -------- Create a MaskedJaggedArray with an array of arrays. >>> from landlab.utils.jaggedarray_ma import MaskedJaggedArray >>> x = MaskedJaggedArray([[0, 1, 2], [3, 4]]) >>> x.array array([0, 1, 2, 3, 4]) Create a MaskedJaggedArray as a 1D array and a list or row lengths. >>> x = MaskedJaggedArray([0, 1, 2, 3, 4], (3, 2)) >>> x.array array([0, 1, 2, 3, 4]) """ def __init__(self, *args): """MaskedJaggedArray([row0, row1, ...]) MaskedJaggedArray(values, values_per_row) Examples -------- Create a MaskedJaggedArray with an array of arrays. >>> from landlab.utils.jaggedarray_ma import MaskedJaggedArray >>> x = MaskedJaggedArray([[0, 1, 2], [3, 4]]) >>> x.array array([0, 1, 2, 3, 4]) Create a MaskedJaggedArray as a 1D array and a list or row lengths. >>> x = MaskedJaggedArray([0, 1, 2, 3, 4], (3, 2)) >>> x.array array([0, 1, 2, 3, 4]) """ if len(args) == 1: if isinstance(args[0], np.ma.core.MaskedArray): mat = args[0] else: mat = MaskedJaggedArray.ma_from_list_of_lists(args[0]) else: mat = MaskedJaggedArray.ma_from_flat_array(args[0], args[1]) self._values = mat self._number_of_rows = mat.shape[0] @staticmethod def ma_from_list_of_lists(rows, dtype=None): """Create a masked array from a list of lists. Parameters ---------- rows : array_like or array_like Rows of the jagged array. dtype : np.dtype, optional The data type of the new masked array. Returns ------- np.masked_array A new masked array. """ values_per_row = [len(row) for row in rows] mat = np.ma.masked_all((len(rows), max(values_per_row)), dtype=dtype or int) for (row_number, row) in enumerate(rows): mat[row_number, : len(row)] = row return mat @staticmethod def ma_from_flat_array(array, values_per_row): """Create a masked array from a flat array. Parameters ---------- array : array_like Values of the jagged array. values_per_row : array_like of int Number of values in each row of the jagged array. Returns ------- np.masked_array A new masked array. """ array = np.array(array) mat = np.ma.masked_all( (len(values_per_row), max(values_per_row)), dtype=array.dtype ) offset = 0 for row_number in range(mat.shape[0]): n_valid = values_per_row[row_number] mat[row_number, :n_valid] = array[offset : offset + n_valid] offset += n_valid return mat @property def array(self): """The jagged array as a 1D array. Returns ------- array : A view of the underlying 1D array. Examples -------- >>> from landlab.utils.jaggedarray_ma import MaskedJaggedArray >>> x = MaskedJaggedArray([[0, 1, 2], [3, 4]]) >>> x.array array([0, 1, 2, 3, 4]) >>> x.array = np.array([1, 1, 2, 3, 4]) >>> x.array array([1, 1, 2, 3, 4]) """ return self._values.compressed() @property def masked_array(self): """The jagged array as a masked array. Returns ------- np.masked_array : The underlying masked array. """ return self._values @array.setter def array(self, array): """Set the data of the jagged array from a 1D array. Parameters ---------- array : array_like The new values of the array. """ self._values[~self._values.mask] = array @property def size(self): """Number of array elements. Returns ------- int : Number of values in the array. Examples -------- >>> from landlab.utils.jaggedarray_ma import MaskedJaggedArray >>> x = MaskedJaggedArray([[0, 1, 2], [3, 4]]) >>> x.size 5 """ return self.array.size @property def number_of_rows(self): """Number of array rows. Returns ------- int : Number of rows in the array. Examples -------- >>> from landlab.utils.jaggedarray_ma import MaskedJaggedArray >>> x = MaskedJaggedArray([[0, 1, 2], [3, 4]]) >>> x.number_of_rows == 2 True """ return self._number_of_rows @staticmethod def _offsets_from_values_per_row(values_per_row): """Get offsets into the base array from array lengths. Parameters ---------- values_per_row : array of int The number of values in each row of the MaskedJaggedArray. Returns ------- ndarray An array of offsets. """ offset = np.empty(len(values_per_row) + 1, dtype=int) np.cumsum(values_per_row, out=offset[1:]) offset[0] = 0 return offset @staticmethod def empty_like(jagged, dtype=None): """Create a new MaskedJaggedArray that is like another one. Parameters ---------- jagged : MaskedJaggedArray A MaskedJaggedArray to copy. dtype : np.dtype The data type of the new MaskedJaggedArray. Returns ------- MaskedJaggedArray A new MaskedJaggedArray. """ return MaskedJaggedArray(np.ma.empty_like(jagged.masked_array, dtype=dtype)) def length_of_row(self, row): """Number of values in a given row. Parameters ---------- row : int Index to a row. Returns ------- int : Number of values in the row. Examples -------- >>> from landlab.utils.jaggedarray_ma import MaskedJaggedArray >>> x = MaskedJaggedArray([[0, 1, 2], [3, 4]]) >>> x.length_of_row(0) 3 >>> x.length_of_row(1) 2 """ return len(self.row(row)) def row(self, row): """Get the values of a row as an array. Parameters ---------- row : int Index to a row. Returns ------- array : Values in the row as a slice of the underlying array. Examples -------- >>> from landlab.utils.jaggedarray_ma import MaskedJaggedArray >>> x = MaskedJaggedArray([[0, 1, 2], [3, 4]]) >>> x.row(0) array([0, 1, 2]) >>> x.row(1) array([3, 4]) """ return self._values[row].compressed() def __iter__(self): """Iterate over the rows of the array. Examples -------- >>> from landlab.utils.jaggedarray_ma import MaskedJaggedArray >>> x = MaskedJaggedArray([[0, 1, 2], [3, 4]]) >>> for row in x: row array([0, 1, 2]) array([3, 4]) """ for row in self._values: yield row.compressed() def foreach_row(self, func, out=None): """Apply an operator row-by-row. Examples -------- >>> from landlab.utils.jaggedarray_ma import MaskedJaggedArray >>> x = MaskedJaggedArray([[0, 1, 2], [3, 4]]) >>> x.foreach_row(np.sum) array([3, 7]) >>> out = np.empty(2, dtype=int) >>> x.foreach_row(np.sum, out=out) is out True >>> out array([3, 7]) """ if out is None: return func(self._values, axis=1).compressed() else: return func(self._values, axis=1, out=out)
mit
Jollytown/Garuda
server/garuda/lib/python2.7/site-packages/django/conf/locale/pl/formats.py
82
1153
# -*- encoding: utf-8 -*- # This file is distributed under the same license as the Django package. # from __future__ import unicode_literals # The *_FORMAT strings use the Django date format syntax, # see http://docs.djangoproject.com/en/dev/ref/templates/builtins/#date DATE_FORMAT = 'j E Y' TIME_FORMAT = 'H:i:s' DATETIME_FORMAT = 'j E Y H:i:s' YEAR_MONTH_FORMAT = 'F Y' MONTH_DAY_FORMAT = 'j F' SHORT_DATE_FORMAT = 'd-m-Y' SHORT_DATETIME_FORMAT = 'd-m-Y H:i:s' FIRST_DAY_OF_WEEK = 1 # Monday # The *_INPUT_FORMATS strings use the Python strftime format syntax, # see http://docs.python.org/library/datetime.html#strftime-strptime-behavior DATE_INPUT_FORMATS = ( '%d.%m.%Y', '%d.%m.%y', # '25.10.2006', '25.10.06' '%y-%m-%d', # '06-10-25' # '%d. %B %Y', '%d. %b. %Y', # '25. October 2006', '25. Oct. 2006' ) DATETIME_INPUT_FORMATS = ( '%d.%m.%Y %H:%M:%S', # '25.10.2006 14:30:59' '%d.%m.%Y %H:%M:%S.%f', # '25.10.2006 14:30:59.000200' '%d.%m.%Y %H:%M', # '25.10.2006 14:30' '%d.%m.%Y', # '25.10.2006' ) DECIMAL_SEPARATOR = ',' THOUSAND_SEPARATOR = ' ' NUMBER_GROUPING = 3
mit
colbyga/pychess
lib/pychess/gfx/Pieces.py
22
33741
import re import time import math import cairo from gi.repository import Rsvg from pychess.Utils.const import * from pychess.System import conf from pychess.System.prefix import addDataPrefix from pychess.System.cairoextras import create_cairo_font_face_for_file elemExpr = re.compile(r"([a-zA-Z])\s*([0-9\.,\s]*)\s+|[z]\s+") spaceExpr = re.compile(r"[\s,]+") l = [] def parse(n, psize): yield "def f(c):" s = psize/size for cmd, points in n: pstr = ",".join(str(p*s) for p in points) if cmd == "M": yield "c.rel_move_to(%s)" % pstr elif cmd == "L": yield "c.rel_line_to(%s)" % pstr else: yield "c.rel_curve_to(%s)" % pstr # This has double speed at drawing, but when generating new functions, it # takes about ten times longer. def drawPiece1 (piece, cc, x, y, psize, allWhite=False): cc.save() cc.move_to(x,y) if not psize in parsedPieces[piece.color][piece.sign]: exec("\n ".join(parse(parsedPieces[piece.color][piece.sign][size],psize))) parsedPieces[piece.color][piece.sign][psize] = f cc.fill() cc.restore() def drawPieceReal (piece, cc, psize, allWhite=False): color = WHITE if allWhite else piece.color # Do the actual drawing to the Cairo context for cmd, points in parsedPieces[color][piece.sign][psize]: if cmd == 'M': cc.rel_move_to(*points) elif cmd == 'L': cc.rel_line_to(*points) elif cmd == 'C': cc.rel_curve_to(*points) else: cc.set_source_rgb(1,1,1) cc.fill_preserve() cc.set_source_rgb(0,0,0) def drawPiece2 (piece, cc, x, y, psize, allWhite=False): """Rendering pieces with draw each time method""" cc.save() cc.move_to(x,y) if not psize in parsedPieces[piece.color][piece.sign]: list = [(cmd, [(p*psize/size) for p in points]) for cmd, points in parsedPieces[piece.color][piece.sign][size]] parsedPieces[piece.color][piece.sign][psize] = list drawPieceReal (piece, cc, psize, allWhite) cc.fill() cc.restore() piece_ord = {KING: 0, QUEEN: 1, ROOK: 2, BISHOP: 3, KNIGHT: 4, PAWN: 5} pnames = ('Pawn','Knight','Bishop','Rook','Queen','King') def drawPiece3(piece, context, x, y, psize, allWhite=False): """Rendering pieces using .svg chess figurines""" color = WHITE if allWhite else piece.color if all_in_one: image = svg_pieces w, h = image.props.width/6, image.props.height/2 offset_x = piece_ord[piece.sign]*psize offset_y = 0 if color == BLACK else psize else: image = svg_pieces[color][piece.sign] w, h = image.props.width, image.props.height offset_x = 0 offset_y = 0 context.save() context.rectangle(x, y, psize, psize) context.clip() context.translate(x-offset_x, y-offset_y) context.scale(1.0*psize/w, 1.0*psize/h) context.push_group() if all_in_one: pieceid = '#%s%s' % ('White' if color==0 else 'Black', pnames[piece.sign-1]) image.render_cairo_sub(context, id=pieceid) else: image.render_cairo(context) context.pop_group_to_source() context.paint_with_alpha(piece.opacity) context.restore() def drawPiece4(piece, context, x, y, psize, allWhite=False): """Rendering pieces using .ttf chessfont figurines""" color = WHITE if allWhite else piece.color context.set_font_face(chess_font_face) context.set_font_size(psize) context.move_to(x, y+psize) context.text_path(piece2char[color][piece.sign]) close_path = False for cmd, points in context.copy_path(): if cmd == 0: context.move_to(*points) if close_path: context.set_source_rgb(1,1,1) context.fill_preserve() context.set_source_rgb(0,0,0) close_path = False elif cmd == 1: context.line_to(*points) elif cmd == 2: context.curve_to(*points) else: close_path = True context.fill() # This version has proven itself nearly three times as slow as the "draw each time" method. # At least when drawing one path only. Might be useful when drawing svg def drawPiece5 (piece, cc, x, y, psize, allWhite=False): """Rendering pieces from cache instead of draw each time""" if not piece in surfaceCache: s = cc.get_target().create_similar(cairo.CONTENT_COLOR_ALPHA, int(size), int(size)) ctx = cairo.Context(s) ctx.move_to(0,0) drawPieceReal (piece, ctx, size) ctx.set_source_rgb(0,0,0) ctx.fill() surfaceCache[piece] = s cc.save() cc.set_source_rgb(0,0,0) cc.scale(psize/size, psize/size) cc.translate(x*size/psize, y*size/psize) cc.rectangle (0, 0, int(size), int(size)) # TODO: DOes this give any performance boost? # From cairo thread: # Or paint() instead of fill(). fill() needs a path, so you should do a # rectangle() first. cc.set_source_surface(surfaceCache[piece], 0, 0) cc.fill() cc.restore() surfaceCache = {} size = 800.0 pieces = { BLACK: { KING: "M 653.57940,730.65870 L 671.57940,613.65870 C 725.57940,577.65870 797.57940,514.65870 797.57940,397.65870 C 797.57940,325.65870 734.57940,280.65870 662.57940,280.65870 C 590.57940,280.65870 509.57940,334.65870 509.57940,334.65870 C 509.57940,334.65870 554.57940,190.65870 428.57940,154.65870 L 428.57940,118.65870 L 482.57940,118.65870 L 482.57940,64.658690 L 428.57940,64.658690 L 428.57940,10.658690 L 374.57940,10.658690 L 374.57940,64.658690 L 320.57940,64.658690 L 320.57940,118.65870 L 374.57940,118.65870 L 374.57940,154.65870 C 248.57940,190.65870 293.57940,334.65870 293.57940,334.65870 C 293.57940,334.65870 212.57940,280.65870 140.57940,280.65870 C 68.579380,280.65870 5.5793840,325.65870 5.5793840,397.65870 C 5.5793840,514.65870 77.579380,577.65870 131.57940,613.65870 L 149.57940,730.65870 C 158.57940,757.65870 221.57940,793.65870 401.57940,793.65870 C 581.57940,793.65870 644.57940,757.65870 653.57940,730.65870 z M 374.57940,541.65870 C 329.57940,541.65870 212.57940,550.65870 167.57940,568.65870 C 113.57940,541.65870 59.579380,496.65870 59.579380,406.65870 C 59.579380,352.65870 86.579380,334.65870 149.57940,334.65870 C 212.57940,334.65870 356.57940,397.65870 374.57940,541.65870 z M 428.57940,541.65870 C 446.57940,397.65870 590.57940,334.65870 662.57940,334.65870 C 716.57940,334.65870 743.57940,352.65870 743.57940,406.65870 C 743.57940,496.65870 689.57940,541.65870 635.57940,568.65870 C 590.57940,550.65870 473.57940,541.65870 428.57940,541.65870 z M 617.57940,667.65870 L 608.57940,705.90870 C 437.57940,678.90870 365.57940,678.90870 194.57940,705.90870 L 185.57940,667.65870 C 365.57940,640.65870 437.57940,640.65870 617.57940,667.65870 z M 464.57940,514.65870 C 527.57940,514.65870 581.57940,523.65870 635.57940,541.65870 C 707.57940,487.65870 716.57940,442.65870 716.57940,406.65870 C 716.57940,379.65870 698.57940,361.65870 662.57940,361.65870 C 554.57940,361.65670 473.57940,451.65870 464.57940,514.65870 z M 338.57940,514.65870 C 329.57940,451.65870 239.57940,361.65670 140.57940,361.65870 C 104.57940,361.65870 86.579380,388.65870 86.579380,415.65870 C 86.579380,442.65870 95.579380,487.65870 167.57940,541.65870 C 221.57940,523.65870 275.57940,514.65870 338.57940,514.65870 z ", QUEEN: "M 617.12310,626.00950 C 617.12310,599.00950 627.77310,557.68550 671.12310,536.00950 C 689.12310,527.00950 689.12310,509.00950 689.12310,500.00950 C 689.12310,471.54950 743.12310,203.00950 743.12310,203.00950 C 779.62510,198.74750 796.96610,170.47750 796.96610,144.34650 C 796.96610,112.98940 772.26810,85.907430 738.52810,85.907430 C 710.73310,85.907430 680.56310,109.18740 679.85110,143.63450 C 679.66510,152.63250 681.03910,169.05250 697.43010,186.15650 L 590.12310,392.00950 L 590.12310,158.00950 C 619.03410,151.95050 636.85210,127.48250 636.85210,99.687430 C 636.85210,69.517430 610.72110,42.199430 577.93710,42.199430 C 544.44210,42.199430 519.27910,70.470430 519.73610,102.06340 C 519.97310,118.45540 527.57510,136.03450 545.12410,149.00950 L 464.12410,383.00950 L 428.12410,131.00950 C 452.74310,117.03040 459.86810,97.075430 459.86810,80.208430 C 459.86810,41.011430 428.74910,20.581430 401.19210,20.818430 C 371.26010,21.076430 342.75310,46.950430 342.75310,77.120430 C 342.75310,107.05240 359.14410,122.73150 374.12410,131.00950 L 338.12410,383.00950 L 257.12410,149.00950 C 275.75910,134.13450 282.17310,119.64340 282.17310,98.976430 C 282.17310,77.833430 264.35910,42.199430 223.25910,42.199430 C 190.47610,42.199430 165.29410,70.944430 165.29410,99.926430 C 165.29410,134.84850 190.23910,153.37750 212.12410,158.01050 L 212.12410,392.01050 L 104.12410,185.01050 C 117.78210,170.95350 120.15710,154.06050 120.15710,145.06050 C 120.15710,114.41440 97.114060,85.807430 59.124060,86.010430 C 33.925060,86.146430 3.4010650,109.06240 3.0420650,145.06050 C 2.8040650,168.81650 20.858060,200.88650 59.124060,203.01050 C 59.124060,203.01050 113.12410,473.01050 113.12410,500.01050 C 113.12410,509.01150 113.12410,527.01050 131.12410,536.01050 C 167.12410,554.01150 185.12410,599.01050 185.12410,626.01050 C 185.12410,662.01150 158.12410,698.01150 158.12410,707.01150 C 158.12410,752.01050 320.12410,779.01150 401.12410,778.99150 C 473.12410,778.97350 644.12410,752.01050 644.12410,707.01050 C 644.12410,698.01050 617.12410,671.01050 617.12410,626.01050 L 617.12310,626.00950 z M 594.55210,537.12950 C 583.21810,553.12950 581.21810,558.46250 576.55210,575.12950 C 487.21810,553.79650 327.21810,547.79650 225.88510,575.79650 C 221.21710,557.79650 219.21710,550.46250 208.55110,537.12950 C 325.21710,508.46250 479.21710,506.46250 594.55110,537.12950 L 594.55210,537.12950 z M 570.55210,663.79650 C 555.88510,674.46250 551.21810,682.46250 542.55210,693.79650 C 434.55210,677.12950 363.88410,674.46250 255.21810,693.12950 C 246.55210,679.79650 241.88610,673.12950 229.21810,663.79650 C 341.88610,634.46250 457.88610,644.46250 570.55210,663.79650 L 570.55210,663.79650 z ", ROOK: "M 232.94440,519.29360 L 124.94440,627.29360 L 124.94440,762.29360 L 682.94440,762.29360 L 682.94440,627.29360 L 574.94440,519.29360 L 574.94440,303.29360 L 682.94440,231.29360 L 682.94440,51.293580 L 520.94440,51.293580 L 520.94440,123.29360 L 484.94440,123.29360 L 484.94440,51.293580 L 322.94440,51.293580 L 322.94440,123.29360 L 286.94440,123.29360 L 286.94440,51.293580 L 124.94440,51.293580 L 124.94440,231.29360 L 232.94440,303.29360 L 232.94440,519.29360 z M 268.94440,321.29360 L 268.94440,285.29360 L 538.94440,285.29360 L 538.94440,321.29360 L 268.94440,321.29360 z M 268.94440,537.29360 L 268.94440,501.29360 L 538.94440,501.29360 L 538.94440,537.29360 L 268.94440,537.29360 z ", BISHOP: "M 491.69440,453.44430 L 500.69440,482.69430 C 464.69440,455.69430 338.69440,455.69430 302.69440,482.69430 L 311.69440,453.44430 C 332.69440,432.44430 470.69440,432.44430 491.69440,453.44430 z M 509.69440,518.69430 L 518.69440,545.69430 C 470.69440,521.69430 332.69440,521.69430 284.69440,545.69430 L 293.69440,518.69430 C 338.69440,491.69430 464.69440,491.69430 509.69440,518.69430 z M 797.69440,653.69430 C 797.69440,653.69430 752.69440,635.69430 689.69440,626.69430 C 652.95940,621.44630 599.69440,635.69430 554.69440,626.69430 C 518.69440,617.69430 482.69440,599.69430 482.69440,599.69430 L 572.69440,554.69430 L 545.69440,473.69430 C 545.69440,473.69430 608.69440,446.69430 608.69440,365.69430 C 608.69440,302.69430 563.69440,230.69430 500.69440,194.69430 C 455.13040,168.65830 446.69440,149.69430 446.69440,149.69430 C 446.69440,149.69430 482.69440,131.69430 482.69440,86.694330 C 482.69440,50.694330 455.69440,5.6943260 401.69440,5.6943260 C 347.69440,5.6943260 320.69440,50.694330 320.69440,86.694330 C 320.69440,131.69430 356.69440,149.69430 356.69440,149.69430 C 356.69440,149.69430 348.25840,168.65830 302.69440,194.69430 C 239.69440,230.69430 194.69440,302.69430 194.69440,365.69430 C 194.69440,446.69430 257.69440,473.69430 257.69440,473.69430 L 230.69440,554.69430 L 320.69440,599.69430 C 320.69440,599.69430 284.69440,617.69430 248.69440,626.69430 C 204.17340,637.82430 146.99540,621.93730 113.69440,626.69430 C 50.694360,635.69430 5.6943640,653.69430 5.6943640,653.69430 L 50.694360,797.69430 C 113.69440,779.69430 122.69440,779.69430 176.69440,770.69430 C 209.78640,765.17930 291.51040,774.42230 329.69440,761.69430 C 383.69440,743.69430 401.69440,716.69430 401.69440,716.69430 C 401.69440,716.69430 419.69440,743.69430 473.69440,761.69430 C 511.87840,774.42230 598.40740,767.15830 626.69440,770.69430 C 681.01640,777.48430 752.69440,797.69430 752.69440,797.69430 L 797.69440,653.69430 L 797.69440,653.69430 z M 428.69440,392.69430 L 374.69440,392.69430 L 374.69440,356.69430 L 338.69440,356.69430 L 338.69440,302.69430 L 374.69440,302.69430 L 374.69440,266.69430 L 428.69440,266.69430 L 428.69440,302.69430 L 464.69440,302.69430 L 464.69440,356.69430 L 428.69440,356.69430 L 428.69440,392.69430 z ", KNIGHT: "M 84.310370,730.48460 L 564.28850,729.48460 C 563.97550,600.58860 477.97550,556.58860 485.00550,477.74860 L 587.06050,552.58860 C 611.11150,581.44960 637.05150,594.72560 657.36750,594.91660 C 671.53450,595.04960 633.37050,547.08060 627.37050,536.08060 C 653.37050,535.08060 689.37050,585.08060 718.38750,574.11560 C 739.54850,566.12160 754.01750,540.22060 753.06850,502.24260 C 751.70850,447.81260 690.47450,367.52960 667.34250,266.83660 C 641.48850,160.69960 611.91250,147.09260 595.58450,141.64850 L 595.22350,64.085560 L 513.57950,123.95850 L 467.31450,43.675570 L 421.04950,138.92750 C 260.48350,91.300560 89.752370,428.40260 84.309370,730.48460 L 84.310370,730.48460 z M 125.87840,697.92560 C 125.87840,436.61260 289.76850,168.92760 381.72850,167.10560 C 399.37150,167.41260 415.37150,173.41260 415.32750,179.85360 C 415.24050,192.63260 399.02750,197.15260 379.90750,197.15260 C 307.97850,199.88460 158.65640,453.00260 156.83540,695.19560 C 156.83540,713.40460 127.70040,712.49460 125.87940,697.92560 L 125.87840,697.92560 z M 678.74350,471.34160 C 684.09050,477.57960 689.68150,486.16560 689.86350,492.19160 C 690.09450,499.83660 684.07150,505.86160 678.28050,503.54360 C 672.48850,501.22760 665.53850,488.25260 660.90550,485.70560 C 656.27250,483.15660 642.14050,481.30360 642.37250,474.81660 C 642.60450,468.32960 652.10250,462.53760 657.66250,462.76960 C 663.22250,463.00160 675.96450,468.09760 678.74450,471.34160 L 678.74350,471.34160 z M 520.98750,218.08460 C 534.62350,223.81160 559.71450,235.26460 577.44150,255.99260 C 594.62350,278.90060 595.98650,304.80860 596.53150,323.35560 C 566.80450,326.90060 541.87450,318.25160 529.44150,290.90060 C 521.25950,272.90060 520.98650,239.62960 520.98650,218.08460 L 520.98750,218.08460 z ", PAWN: "M 688.02380,750.97630 L 688.02380,624.97630 C 688.02380,579.97630 661.62380,452.47630 553.02380,408.97630 C 598.02380,354.97630 607.02380,255.97630 517.02380,192.97630 C 544.02380,156.97630 517.02380,30.976220 409.02380,30.976220 C 301.02380,30.976220 274.02380,156.97630 301.02380,192.97630 C 211.02380,255.97630 220.02380,354.97630 265.02380,408.97630 C 157.02380,453.97630 130.02380,579.97630 130.02380,624.97630 L 130.02380,750.97630 L 688.02380,750.97630 z " }, WHITE: { KING: "M 648.50000,730.65870 L 666.50000,613.65870 C 720.50000,577.65870 792.50000,514.65870 792.50000,397.65870 C 792.50000,325.65870 729.50000,280.65870 657.50000,280.65870 C 585.50000,280.65870 504.50000,334.65870 504.50000,334.65870 C 504.50000,334.65870 549.50000,190.65870 423.50000,154.65870 L 423.50000,118.65870 L 477.50000,118.65870 L 477.50000,64.658690 L 423.50000,64.658690 L 423.50000,10.658690 L 369.50000,10.658690 L 369.50000,64.658690 L 315.50000,64.658690 L 315.50000,118.65870 L 369.50000,118.65870 L 369.50000,154.65870 C 243.50000,190.65870 288.50000,334.65870 288.50000,334.65870 C 288.50000,334.65870 207.50000,280.65870 135.50000,280.65870 C 63.500000,280.65870 0.50000000,325.65870 0.50000000,397.65870 C 0.50000000,514.65870 72.500000,577.65870 126.50000,613.65870 L 144.50000,730.65870 C 153.50000,757.65870 216.50000,793.65870 396.50000,793.65870 C 576.50000,793.65870 639.50000,757.65870 648.50000,730.65870 z M 396.50000,451.65870 C 396.50000,451.65870 333.50000,343.65870 333.50000,280.65870 C 333.50000,217.65870 369.50000,208.65870 396.50000,208.65870 C 423.50000,208.65870 459.50000,226.65870 459.50000,280.65870 C 459.50000,334.65870 396.50000,451.65870 396.50000,451.65870 z M 369.50000,541.65870 C 324.50000,541.65870 207.50000,550.65870 162.50000,568.65870 C 108.50000,541.65870 54.500000,496.65870 54.500000,406.65870 C 54.500000,352.65870 81.500000,334.65870 144.50000,334.65870 C 207.50000,334.65870 351.50000,397.65870 369.50000,541.65870 z M 423.50000,541.65870 C 441.50000,397.65870 585.50000,334.65870 657.50000,334.65870 C 711.50000,334.65870 738.50000,352.65870 738.50000,406.65870 C 738.50000,496.65870 684.50000,541.65870 630.50000,568.65870 C 585.50000,550.65870 468.50000,541.65870 423.50000,541.65870 z M 612.50000,613.65870 L 603.50000,685.65870 C 432.50000,658.65870 360.50000,658.65870 189.50000,685.65870 L 180.50000,613.65870 C 360.50000,586.65870 432.50000,586.65870 612.50000,613.65870 z M 549.50000,730.65870 C 468.50000,748.65870 441.50000,748.65870 396.50000,748.65870 C 351.50000,748.65870 324.50000,748.65870 243.50000,730.65870 C 324.50000,712.65870 342.50000,712.65870 396.50000,712.65870 C 450.50000,712.65870 468.50000,712.65870 549.50000,730.65870 z ", QUEEN: "M 764.60380,143.65350 C 764.80680,155.66150 755.91880,166.72550 742.61880,166.56350 C 727.46980,166.37750 719.85480,154.92450 719.70980,144.57750 C 719.52480,131.46050 729.68780,121.66950 742.24980,121.66950 C 754.99780,121.66950 764.41980,132.75350 764.60380,143.65350 L 764.60380,143.65350 z M 619.66280,626.00950 C 619.66280,599.00950 630.31280,557.68550 673.66280,536.00950 C 691.66280,527.00950 691.66280,509.00950 691.66280,500.00950 C 691.66280,471.54950 745.66280,203.00950 745.66280,203.00950 C 782.16480,198.74750 799.50580,170.47750 799.50580,144.34650 C 799.50580,112.98950 774.80780,85.907570 741.06780,85.907570 C 713.27280,85.907570 683.10280,109.18750 682.39080,143.63450 C 682.20480,152.63250 683.57880,169.05250 699.96980,186.15650 L 592.66280,392.00950 L 592.66280,158.00950 C 621.57380,151.95050 639.39180,127.48250 639.39180,99.687540 C 639.39180,69.517570 613.26080,42.199570 580.47680,42.199570 C 546.98180,42.199570 521.81880,70.470570 522.27580,102.06350 C 522.51280,118.45550 530.11480,136.03450 547.66380,149.00950 L 466.66380,383.00950 L 430.66380,131.00950 C 455.28280,117.03050 462.40880,97.075540 462.40880,80.208570 C 462.40880,41.011570 431.28880,20.581570 403.73180,20.818570 C 373.79980,21.076570 345.29380,46.950570 345.29380,77.120570 C 345.29380,107.05250 361.68480,122.73150 376.66380,131.00950 L 340.66380,383.00950 L 259.66380,149.00950 C 278.29980,134.13450 284.71380,119.64350 284.71380,98.976540 C 284.71380,77.833570 266.89880,42.199570 225.79980,42.199570 C 193.01680,42.199570 167.83480,70.944570 167.83480,99.926540 C 167.83480,134.84850 192.77880,153.37750 214.66380,158.01050 L 214.66380,392.01050 L 106.66380,185.01050 C 120.32180,170.95350 122.69780,154.06050 122.69780,145.06050 C 122.69780,114.41450 99.654770,85.807570 61.663770,86.010570 C 36.464770,86.146570 5.9407760,109.06250 5.5817760,145.06050 C 5.3447760,168.81650 23.398770,200.88650 61.663770,203.01050 C 61.663770,203.01050 115.66380,473.01050 115.66380,500.01050 C 115.66380,509.01150 115.66380,527.01050 133.66380,536.01050 C 169.66380,554.01150 187.66380,599.01050 187.66380,626.01050 C 187.66380,662.01150 160.66380,698.01150 160.66380,707.01150 C 160.66380,752.01050 322.66380,779.01150 403.66380,778.99150 C 475.66380,778.97350 646.66380,752.01050 646.66380,707.01050 C 646.66380,698.01050 619.66380,671.01050 619.66380,626.01050 L 619.66280,626.00950 z M 87.606770,144.04950 C 87.809770,156.05650 78.921770,167.12050 65.621770,166.95850 C 50.472770,166.77350 42.857770,155.31950 42.712770,144.97350 C 42.527770,131.85650 52.690770,122.06450 65.252770,122.06450 C 78.000770,122.06450 87.422770,133.14950 87.606770,144.04950 z M 603.61080,99.656540 C 603.81380,111.66350 594.92580,122.72750 581.62580,122.56550 C 566.47680,122.38050 558.86180,110.92650 558.71680,100.58050 C 558.53180,87.463540 568.69480,77.671570 581.25680,77.671570 C 594.00480,77.671570 603.42680,88.756540 603.61080,99.656540 L 603.61080,99.656540 z M 426.61880,78.157570 C 426.82180,90.165540 417.93380,101.22950 404.63380,101.06750 C 389.48480,100.88150 381.86980,89.428540 381.72480,79.081570 C 381.53980,65.964570 391.70280,56.173570 404.26480,56.173570 C 417.01280,56.173570 426.43480,67.257570 426.61880,78.157570 z M 249.12780,100.65650 C 249.33080,112.66350 240.44280,123.72750 227.14280,123.56550 C 211.99380,123.38050 204.37880,111.92650 204.23380,101.58050 C 204.04880,88.463540 214.21180,78.671570 226.77380,78.671570 C 239.52180,78.671570 248.94380,89.756540 249.12780,100.65650 z M 578.63980,575.93450 C 569.63980,591.93450 563.63980,630.93450 573.63980,663.93450 C 467.97280,643.60050 338.63980,637.93450 231.63980,663.93450 C 238.63980,638.93450 240.63980,613.93450 227.63980,575.93450 C 320.63980,544.93450 515.63980,553.93450 578.63980,575.93450 L 578.63980,575.93450 z M 537.63980,707.93450 C 489.97280,725.93450 429.97280,726.26850 399.97280,726.26850 C 369.97280,726.26850 308.97280,723.93450 264.63980,708.93450 C 317.63980,697.93450 362.30680,695.60050 397.30680,695.60050 C 432.30680,695.60050 497.97280,700.26850 537.63980,707.93450 z M 210.32980,536.94050 C 210.32980,536.94050 205.66280,530.27350 200.99580,524.94050 C 210.32980,522.27350 232.99580,509.60650 244.32980,494.94050 C 291.66280,508.94050 316.32980,498.94050 350.99580,476.27350 C 384.32980,494.94050 417.66280,494.27350 458.99580,474.94050 C 486.32980,498.27350 515.66280,504.27350 559.66280,495.60650 C 576.32980,512.94050 586.99580,518.94050 604.32980,525.60650 L 596.32980,536.94050 C 454.32980,506.94050 358.99580,506.27350 210.32980,536.94050 L 210.32980,536.94050 z M 691.30580,290.55250 L 654.25080,486.80250 C 626.80380,493.20650 606.21880,481.76950 593.40980,465.30150 L 691.30580,290.55250 z M 553.20180,247.31050 L 550.98680,445.24750 C 523.09080,454.98950 508.03480,450.56150 487.66580,434.17750 L 553.20180,247.31050 z M 401.76580,233.14150 L 433.64780,429.30650 C 416.82180,441.26250 388.48180,443.47650 369.44080,428.74850 L 401.76580,233.14150 z M 252.98380,254.39550 L 318.96280,441.26250 C 304.35080,457.64650 279.55280,464.28750 255.64180,452.77550 L 252.98480,254.39550 L 252.98380,254.39550 z M 116.63980,294.93450 L 212.13980,463.43450 C 201.63980,481.43450 175.13980,492.43450 151.13980,485.43450 L 116.63980,294.93450 z ", ROOK: "M 227.86510,504.05560 L 119.86510,612.05560 L 119.86510,747.05560 L 677.86510,747.05560 L 677.86510,612.05560 L 569.86510,504.05560 L 569.86510,288.05560 L 677.86510,216.05560 L 677.86510,36.055570 L 515.86510,36.055570 L 515.86510,108.05560 L 479.86510,108.05560 L 479.86510,36.055570 L 317.86510,36.055570 L 317.86510,108.05560 L 281.86510,108.05560 L 281.86510,36.055570 L 119.86510,36.055570 L 119.86510,216.05560 L 227.86510,288.05560 L 227.86510,504.05560 z M 623.86510,90.055570 L 623.86510,180.05560 L 515.86510,252.05560 L 281.86510,252.05560 L 173.86510,180.05560 L 173.86510,90.055570 L 227.86510,90.055570 L 227.86510,162.05560 L 371.86510,162.05560 L 371.86510,90.055570 L 425.86510,90.055570 L 425.86510,162.05560 L 569.86510,162.05560 L 569.86510,90.055570 L 623.86510,90.055570 z M 515.86510,315.05560 L 515.86510,468.05560 L 281.86510,468.05560 L 281.86510,315.05560 L 515.86510,315.05560 z M 623.86510,657.05560 L 623.86510,693.05560 L 173.86510,693.05560 L 173.86510,657.05560 L 623.86510,657.05560 z M 515.86510,531.05560 L 596.86510,603.05560 L 200.86510,603.05560 L 281.86510,531.05560 L 515.86510,531.05560 z ", BISHOP: "M 404.23410,59.693330 C 422.23410,59.693330 431.23410,68.693330 431.23410,86.693330 C 431.23410,104.69330 422.23410,113.69330 404.23410,113.69330 C 386.23410,113.69330 377.23410,104.69330 377.23410,86.693330 C 377.23410,68.693330 386.23410,59.693330 404.23410,59.693330 z M 404.23410,167.69330 C 440.23410,221.69330 458.23410,221.69330 503.23410,257.69330 C 548.23410,293.69330 557.23410,338.69330 557.23410,374.69430 C 557.23410,410.69330 536.23410,432.29530 512.23410,446.69430 C 512.23410,446.69430 476.23410,428.69430 404.23410,428.69430 C 332.23410,428.69430 296.23410,446.69430 296.23410,446.69430 C 296.23410,446.69430 251.23410,410.69330 251.23410,374.69430 C 251.23410,338.69330 260.23410,293.69330 305.23410,257.69330 C 350.23410,221.69330 368.23410,221.69330 404.23410,167.69330 z M 503.23410,482.69430 L 512.23410,509.69430 C 467.23410,491.69430 341.23410,491.69430 296.23410,509.69430 L 305.23410,482.69430 C 341.23410,464.69430 467.23410,464.69430 503.23410,482.69430 z M 404.23410,536.69430 C 440.23410,536.69530 494.23410,545.69430 494.23410,545.69430 C 494.23410,545.69430 440.23410,554.69430 404.23410,554.69430 C 368.23410,554.69430 314.23410,545.69530 314.23410,545.69530 C 314.23410,545.69530 368.23410,536.69330 404.23410,536.69430 z M 440.23410,635.69430 C 494.23410,671.69430 503.59610,666.60330 539.23410,671.69430 C 602.23410,680.69430 628.16110,676.01530 656.23410,680.69430 C 710.23410,689.69430 737.23410,698.69430 737.23410,698.69430 L 719.23410,743.69430 C 719.23410,743.69430 710.66410,732.18430 665.23410,725.69430 C 602.23410,716.69430 548.23410,716.69430 503.23410,707.69430 C 458.23410,698.69430 422.23410,680.69430 404.23410,662.69430 C 386.84810,680.08030 350.23410,698.69430 305.23410,707.69430 C 260.23410,716.69430 207.48310,712.84430 143.23410,725.69430 C 98.234040,734.69430 89.234040,743.69430 89.234040,743.69430 L 71.234040,698.69430 C 71.234040,698.69430 98.234040,689.69430 152.23410,680.69430 C 176.77810,676.60330 206.23410,680.69430 269.23410,671.69430 C 305.96910,666.44630 314.23410,671.69430 368.23410,635.69430 L 440.23410,635.69430 z M 431.23410,266.69430 L 377.23410,266.69430 L 377.23410,302.69430 L 341.23410,302.69430 L 341.23410,356.69430 L 377.23410,356.69430 L 377.23410,392.69430 L 431.23410,392.69430 L 431.23410,356.69430 L 467.23410,356.69430 L 467.23410,302.69430 L 431.23410,302.69430 L 431.23410,266.69430 z M 800.23410,653.69430 C 800.23410,653.69430 755.23410,635.69430 692.23410,626.69430 C 655.49910,621.44630 602.23410,635.69430 557.23410,626.69430 C 521.23410,617.69430 485.23410,599.69430 485.23410,599.69430 L 575.23410,554.69430 L 548.23410,473.69430 C 548.23410,473.69430 611.23410,446.69430 611.23410,365.69430 C 611.23410,302.69430 566.23410,230.69430 503.23410,194.69430 C 457.67010,168.65830 449.23410,149.69430 449.23410,149.69430 C 449.23410,149.69430 485.23410,131.69430 485.23410,86.694330 C 485.23410,50.694330 458.23410,5.6943260 404.23410,5.6943260 C 350.23410,5.6943260 323.23410,50.694330 323.23410,86.694330 C 323.23410,131.69430 359.23410,149.69430 359.23410,149.69430 C 359.23410,149.69430 350.79810,168.65830 305.23410,194.69430 C 242.23410,230.69430 197.23410,302.69430 197.23410,365.69430 C 197.23410,446.69430 260.23410,473.69430 260.23410,473.69430 L 233.23410,554.69430 L 323.23410,599.69430 C 323.23410,599.69430 287.23410,617.69430 251.23410,626.69430 C 206.71310,637.82430 149.53510,621.93730 116.23410,626.69430 C 53.234040,635.69430 8.2340370,653.69430 8.2340370,653.69430 L 53.234040,797.69430 C 116.23410,779.69430 125.23410,779.69430 179.23410,770.69430 C 212.32610,765.17930 294.05010,774.42230 332.23410,761.69430 C 386.23410,743.69430 404.23410,716.69430 404.23410,716.69430 C 404.23410,716.69430 422.23410,743.69430 476.23410,761.69430 C 514.41810,774.42230 600.94710,767.15830 629.23410,770.69430 C 683.55610,777.48430 755.23410,797.69430 755.23410,797.69430 L 800.23410,653.69430 L 800.23410,653.69430 z ", KNIGHT: "M 76.688770,727.94590 L 556.66680,726.94590 C 556.35380,598.04990 470.35380,554.04990 477.38380,475.20990 L 579.43880,550.04990 C 620.25980,599.03590 666.52580,603.11790 681.49380,574.54390 C 715.51280,581.34690 746.80780,554.13390 745.44780,499.70390 C 744.08780,445.27390 682.85280,364.99090 659.72080,264.29690 C 633.86680,158.15990 604.29080,144.55290 587.96280,139.10890 L 587.60180,61.545870 L 505.95780,121.41890 L 459.69280,41.135870 L 413.42780,136.38790 C 252.86280,88.761870 82.131770,425.86390 76.688770,727.94590 z M 505.95780,677.95990 L 126.31380,677.95990 C 205.68580,187.71690 362.68580,154.71690 437.92180,193.53890 L 462.41480,144.55290 L 481.46480,178.57090 L 567.19080,198.98090 L 576.71580,189.45790 C 597.12680,205.78590 608.14980,284.03590 634.35280,350.71790 C 662.29280,421.82190 697.90980,477.31990 697.82080,496.98190 C 697.68580,526.71790 689.01880,532.71790 671.96680,525.55790 C 663.68580,512.04990 655.01880,500.71790 639.30980,499.70390 C 632.35280,500.04990 621.14380,503.00890 631.68580,509.71790 C 646.35280,519.04990 642.75180,540.16490 642.75180,540.16490 C 616.01880,518.71790 515.25280,437.01890 459.69280,401.73090 C 442.35180,390.71690 426.68480,380.71690 414.68480,350.21690 C 390.82180,377.37090 416.35180,430.71690 431.68480,444.04890 C 408.35180,536.04890 484.35180,618.71690 505.95680,677.95890 L 505.95780,677.95990 z M 682.04780,490.00990 C 682.04780,485.18590 675.98380,472.91790 669.91880,467.95690 C 663.85480,462.99390 654.48180,460.23590 648.96880,460.37490 C 643.45580,460.51390 634.49680,465.74990 634.90980,472.36690 C 635.32280,478.98190 647.17680,480.77490 651.86280,482.56590 C 656.54880,484.35690 662.88880,496.62590 669.50480,500.75990 C 676.12080,504.89390 682.04780,496.67790 682.04780,490.00990 L 682.04780,490.00990 z M 588.45680,320.97290 C 588.11380,280.48690 578.16380,263.67390 566.49780,249.26390 C 554.83180,234.85190 512.97280,215.29490 512.97280,215.29490 C 512.97280,215.29490 508.16880,266.41790 525.66780,296.26990 C 543.16680,326.11990 570.61480,321.31690 588.45680,320.97290 L 588.45680,320.97290 z ", PAWN: "M 688.02380,753.51590 L 688.02380,627.51590 C 688.02380,582.51590 661.62380,455.01590 553.02380,411.51590 C 598.02380,357.51590 607.02380,258.51590 517.02380,195.51590 C 544.02380,159.51590 517.02380,33.515900 409.02380,33.515900 C 301.02380,33.515900 274.02380,159.51590 301.02380,195.51590 C 211.02380,258.51590 220.02380,357.51590 265.02380,411.51590 C 157.02380,456.51590 130.02380,582.51590 130.02380,627.51590 L 130.02380,753.51590 L 688.02380,753.51590 z M 409.02380,87.515900 C 490.02380,87.515900 490.02380,177.51590 454.02380,213.51590 C 562.02380,258.51590 535.02380,375.51590 481.02380,429.51590 C 571.02380,456.51590 634.02380,546.51590 634.02380,609.51590 L 634.02380,699.51590 L 184.02380,699.51590 L 184.02380,609.51590 C 184.02380,546.51590 247.02380,456.51590 337.02380,429.51590 C 283.02380,375.51590 256.02380,258.51590 364.02380,213.51590 C 328.02380,177.51590 328.02380,87.515900 409.02380,87.515900 z " } } parsedPieces = [[[], [], [], [], [], [], []], \ [[], [], [], [], [], [], []]] for color in (WHITE, BLACK): for piece in range(PAWN, KING+1): list = [] thep = [0,0] for g1, g2 in elemExpr.findall(pieces[color][piece]): if g2: points = [float(s) for s in spaceExpr.split(g2)] list += [(g1, [f-thep[i%2] for i,f in enumerate(points)])] thep = points[-2:] elif g1 == 'z': list += [('z', (0,0))] else: continue parsedPieces[color][piece] = {size:list} pieces = (PAWN, KNIGHT, BISHOP, ROOK, QUEEN, KING) def get_svg_pieces(svgdir): """Load figurines from .svg files""" if all_in_one: rsvg_handles = Rsvg.Handle.new_from_file(addDataPrefix("pieces/%s/%s.svg" % (svgdir, svgdir))) else: rsvg_handles = [[None]*7, [None]*7] for c, color in ((WHITE, 'white'), (BLACK, 'black')): for p in pieces: rsvg_handles[c][p] = Rsvg.Handle.new_from_file(addDataPrefix("pieces/%s/%s%s.svg" % (svgdir, color[0], reprSign[p].lower()))) return rsvg_handles def get_chess_font_face(name): """Set chess font and char mapping for a chess .ttf""" name = name[4:] if name in ('alpha', 'berlin', 'cheq'): char_map = ('phbrqk', 'ojntwl') else: char_map = ('pnbrqk', 'omvtwl') piece_chars = [[None]*7, [None]*7] for color in (WHITE, BLACK): for piece, char in zip(pieces, char_map[color]): piece_chars[color][piece] = char face = create_cairo_font_face_for_file(addDataPrefix("pieces/ttf/%s.ttf" % name)) return face, piece_chars all_in_one = None drawPiece = None svg_pieces = None chess_font_face = None piece2char = None def set_piece_theme(piece_set): global all_in_one global drawPiece global svg_pieces global chess_font_face global piece2char piece_set = piece_set.lower() if piece_set == 'pychess': drawPiece = drawPiece2 elif piece_set.startswith("ttf-"): drawPiece = drawPiece4 try: chess_font_face, piece2char = get_chess_font_face(piece_set) except: drawPiece = drawPiece2 elif piece_set in ('celtic','eyes', 'fantasy', 'fantasy_alt', 'freak', 'prmi', 'skulls', 'spatial'): all_in_one = True drawPiece = drawPiece3 svg_pieces = get_svg_pieces(piece_set) else: all_in_one = False drawPiece = drawPiece3 try: svg_pieces = get_svg_pieces(piece_set) except: drawPiece = drawPiece2 set_piece_theme(conf.get("pieceTheme", "pychess"))
gpl-3.0
InnovArul/codesmart
Assignments/Jul-Nov-2017/reinforcement_learning_udemy/rl/monte_carlo_soft_epsilon.py
1
3861
from __future__ import print_function import numpy as np from grid import standard_grid, negative_grid from iterative_policy_evaluation import print_values, print_policy import matplotlib.pyplot as plt from monte_carlo_exploring_starts import max_dict EPS = 1e-4 GAMMA = 0.9 ALL_POSSIBLE_ACTIONS = {'U', 'D', 'L', 'R'} def random_action(a, eps=0.1): p = np.random.random() if(p < 1 - eps): return a else: return np.random.choice(list(ALL_POSSIBLE_ACTIONS)) # monte carlo sampling - finding out optimal policy (policy iteration) def play_game(grid, policy): all_states = list(grid.actions.keys()) state = (2, 0) # instead of taking random action at first step, consider the action which is probabilistic with the policy a = random_action(policy[state]) grid.set_state(state) states_actions_rewards = [(state, a, 0)] # action is corresponding to the one which is going to be taken while True: r = grid.move(a) state = grid.current_state() #print(prev_state) # if game over, break the loop if grid.game_over(): states_actions_rewards.append((state, None, r)) # agent has hit the wall and we should not allow it to happen break else: # collect the next action that we are gonna take and insert into the trace a = random_action(policy[state]) states_actions_rewards.append((state, a, r)) # calculate the returns by working backwards from terminal state G = 0 states_actions_returns = [] for i, state_action_reward in enumerate(reversed(states_actions_rewards)): state, action, reward = state_action_reward if i != 0: states_actions_returns.append((state, action, G)) G = reward + GAMMA * G states_actions_returns.reverse() return states_actions_returns def max_dict(hash): max_key = None max_val = float('-inf') for k in hash: if(hash[k] > max_val): max_key, max_val = k, hash[k] return max_key, max_val if __name__ == '__main__': #grid = standard_grid() grid = negative_grid(-0.1) print('grid') print_values(grid.rewards, grid) # init random policy policy = {} for s in grid.actions: policy[s] = np.random.choice(list(ALL_POSSIBLE_ACTIONS)) print('policy') print_policy(policy, grid) # initialioze Q(s, a) Q = {} returns = {} # buffer to hold all the returns for a state during monte-carlo game plays for s in grid.actions: # if state is non terminal Q[s] = {} for a in ALL_POSSIBLE_ACTIONS: # for all the possible actions, initialize Q(s,a) Q[s][a] = 0 returns[(s, a)] = [] # deltas deltas = [] for sample in range(5000): if sample % 500 == 0: print(sample) biggest_change = 0 # generate an episode and adapt Q(s, a) states_actions_returns = play_game(grid, policy) seen_states_actions = set() for s, a, G in states_actions_returns: key = (s, a) if s not in seen_states_actions: old_q = Q[s][a] returns[key].append(G) Q[s][a] = np.mean(returns[key]) seen_states_actions.add(key) biggest_change = max(biggest_change, abs(G - old_q)) deltas.append(biggest_change) # policy improvement for s in Q: policy[s] = max_dict(Q[s])[0] plt.plot(deltas) plt.show() V = {} # policy improvement for s in Q: V[s] = max_dict(Q[s])[1] print('grid') print_values(V, grid) print('policy') print_policy(policy, grid)
gpl-2.0
frederick-masterton/django
tests/file_uploads/views.py
21
4851
from __future__ import unicode_literals import hashlib import json import os from django.core.files.uploadedfile import UploadedFile from django.http import HttpResponse, HttpResponseServerError from django.utils import six from django.utils.encoding import force_bytes, smart_str from .models import FileModel from .tests import UNICODE_FILENAME, UPLOAD_TO from .uploadhandler import QuotaUploadHandler, ErroringUploadHandler def file_upload_view(request): """ Check that a file upload can be updated into the POST dictionary without going pear-shaped. """ form_data = request.POST.copy() form_data.update(request.FILES) if isinstance(form_data.get('file_field'), UploadedFile) and isinstance(form_data['name'], six.text_type): # If a file is posted, the dummy client should only post the file name, # not the full path. if os.path.dirname(form_data['file_field'].name) != '': return HttpResponseServerError() return HttpResponse('') else: return HttpResponseServerError() def file_upload_view_verify(request): """ Use the sha digest hash to verify the uploaded contents. """ form_data = request.POST.copy() form_data.update(request.FILES) for key, value in form_data.items(): if key.endswith('_hash'): continue if key + '_hash' not in form_data: continue submitted_hash = form_data[key + '_hash'] if isinstance(value, UploadedFile): new_hash = hashlib.sha1(value.read()).hexdigest() else: new_hash = hashlib.sha1(force_bytes(value)).hexdigest() if new_hash != submitted_hash: return HttpResponseServerError() # Adding large file to the database should succeed largefile = request.FILES['file_field2'] obj = FileModel() obj.testfile.save(largefile.name, largefile) return HttpResponse('') def file_upload_unicode_name(request): # Check to see if unicode name came through properly. if not request.FILES['file_unicode'].name.endswith(UNICODE_FILENAME): return HttpResponseServerError() response = None # Check to make sure the exotic characters are preserved even # through file save. uni_named_file = request.FILES['file_unicode'] obj = FileModel.objects.create(testfile=uni_named_file) full_name = '%s/%s' % (UPLOAD_TO, uni_named_file.name) if not os.path.exists(full_name): response = HttpResponseServerError() # Cleanup the object with its exotic file name immediately. # (shutil.rmtree used elsewhere in the tests to clean up the # upload directory has been seen to choke on unicode # filenames on Windows.) obj.delete() os.unlink(full_name) if response: return response else: return HttpResponse('') def file_upload_echo(request): """ Simple view to echo back info about uploaded files for tests. """ r = dict((k, f.name) for k, f in request.FILES.items()) return HttpResponse(json.dumps(r)) def file_upload_echo_content(request): """ Simple view to echo back the content of uploaded files for tests. """ r = dict((k, f.read().decode('utf-8')) for k, f in request.FILES.items()) return HttpResponse(json.dumps(r)) def file_upload_quota(request): """ Dynamically add in an upload handler. """ request.upload_handlers.insert(0, QuotaUploadHandler()) return file_upload_echo(request) def file_upload_quota_broken(request): """ You can't change handlers after reading FILES; this view shouldn't work. """ response = file_upload_echo(request) request.upload_handlers.insert(0, QuotaUploadHandler()) return response def file_upload_getlist_count(request): """ Check the .getlist() function to ensure we receive the correct number of files. """ file_counts = {} for key in request.FILES.keys(): file_counts[key] = len(request.FILES.getlist(key)) return HttpResponse(json.dumps(file_counts)) def file_upload_errors(request): request.upload_handlers.insert(0, ErroringUploadHandler()) return file_upload_echo(request) def file_upload_filename_case_view(request): """ Check adding the file to the database will preserve the filename case. """ file = request.FILES['file_field'] obj = FileModel() obj.testfile.save(file.name, file) return HttpResponse('%d' % obj.pk) def file_upload_content_type_extra(request): """ Simple view to echo back extra content-type parameters. """ params = {} for file_name, uploadedfile in request.FILES.items(): params[file_name] = dict([ (k, smart_str(v)) for k, v in uploadedfile.content_type_extra.items() ]) return HttpResponse(json.dumps(params))
bsd-3-clause
kdwink/intellij-community
python/lib/Lib/site-packages/django/contrib/messages/tests/base.py
73
17612
import warnings from django import http from django.test import TestCase from django.conf import settings from django.utils.translation import ugettext_lazy from django.utils.unittest import skipIf from django.contrib.messages import constants, utils, get_level, set_level from django.contrib.messages.api import MessageFailure from django.contrib.messages.storage import default_storage, base from django.contrib.messages.storage.base import Message from django.core.urlresolvers import reverse from django.contrib.auth.models import User def skipUnlessAuthIsInstalled(func): return skipIf( 'django.contrib.auth' not in settings.INSTALLED_APPS, "django.contrib.auth isn't installed")(func) def add_level_messages(storage): """ Adds 6 messages from different levels (including a custom one) to a storage instance. """ storage.add(constants.INFO, 'A generic info message') storage.add(29, 'Some custom level') storage.add(constants.DEBUG, 'A debugging message', extra_tags='extra-tag') storage.add(constants.WARNING, 'A warning') storage.add(constants.ERROR, 'An error') storage.add(constants.SUCCESS, 'This was a triumph.') class BaseTest(TestCase): storage_class = default_storage restore_settings = ['MESSAGE_LEVEL', 'MESSAGE_TAGS'] urls = 'django.contrib.messages.tests.urls' levels = { 'debug': constants.DEBUG, 'info': constants.INFO, 'success': constants.SUCCESS, 'warning': constants.WARNING, 'error': constants.ERROR, } def setUp(self): self._remembered_settings = {} for setting in self.restore_settings: if hasattr(settings, setting): self._remembered_settings[setting] = getattr(settings, setting) delattr(settings._wrapped, setting) # Backup these manually because we do not want them deleted. self._middleware_classes = settings.MIDDLEWARE_CLASSES self._template_context_processors = \ settings.TEMPLATE_CONTEXT_PROCESSORS self._installed_apps = settings.INSTALLED_APPS self._message_storage = settings.MESSAGE_STORAGE settings.MESSAGE_STORAGE = '%s.%s' % (self.storage_class.__module__, self.storage_class.__name__) self.save_warnings_state() warnings.filterwarnings('ignore', category=DeprecationWarning, module='django.contrib.auth.models') def tearDown(self): for setting in self.restore_settings: self.restore_setting(setting) # Restore these manually (see above). settings.MIDDLEWARE_CLASSES = self._middleware_classes settings.TEMPLATE_CONTEXT_PROCESSORS = \ self._template_context_processors settings.INSTALLED_APPS = self._installed_apps settings.MESSAGE_STORAGE = self._message_storage self.restore_warnings_state() def restore_setting(self, setting): if setting in self._remembered_settings: value = self._remembered_settings.pop(setting) setattr(settings, setting, value) elif hasattr(settings, setting): delattr(settings._wrapped, setting) def get_request(self): return http.HttpRequest() def get_response(self): return http.HttpResponse() def get_storage(self, data=None): """ Returns the storage backend, setting its loaded data to the ``data`` argument. This method avoids the storage ``_get`` method from getting called so that other parts of the storage backend can be tested independent of the message retrieval logic. """ storage = self.storage_class(self.get_request()) storage._loaded_data = data or [] return storage def test_add(self): storage = self.get_storage() self.assertFalse(storage.added_new) storage.add(constants.INFO, 'Test message 1') self.assertTrue(storage.added_new) storage.add(constants.INFO, 'Test message 2', extra_tags='tag') self.assertEqual(len(storage), 2) def test_add_lazy_translation(self): storage = self.get_storage() response = self.get_response() storage.add(constants.INFO, ugettext_lazy('lazy message')) storage.update(response) storing = self.stored_messages_count(storage, response) self.assertEqual(storing, 1) def test_no_update(self): storage = self.get_storage() response = self.get_response() storage.update(response) storing = self.stored_messages_count(storage, response) self.assertEqual(storing, 0) def test_add_update(self): storage = self.get_storage() response = self.get_response() storage.add(constants.INFO, 'Test message 1') storage.add(constants.INFO, 'Test message 1', extra_tags='tag') storage.update(response) storing = self.stored_messages_count(storage, response) self.assertEqual(storing, 2) def test_existing_add_read_update(self): storage = self.get_existing_storage() response = self.get_response() storage.add(constants.INFO, 'Test message 3') list(storage) # Simulates a read storage.update(response) storing = self.stored_messages_count(storage, response) self.assertEqual(storing, 0) def test_existing_read_add_update(self): storage = self.get_existing_storage() response = self.get_response() list(storage) # Simulates a read storage.add(constants.INFO, 'Test message 3') storage.update(response) storing = self.stored_messages_count(storage, response) self.assertEqual(storing, 1) def test_full_request_response_cycle(self): """ With the message middleware enabled, tests that messages are properly stored and then retrieved across the full request/redirect/response cycle. """ settings.MESSAGE_LEVEL = constants.DEBUG data = { 'messages': ['Test message %d' % x for x in xrange(10)], } show_url = reverse('django.contrib.messages.tests.urls.show') for level in ('debug', 'info', 'success', 'warning', 'error'): add_url = reverse('django.contrib.messages.tests.urls.add', args=(level,)) response = self.client.post(add_url, data, follow=True) self.assertRedirects(response, show_url) self.assertTrue('messages' in response.context) messages = [Message(self.levels[level], msg) for msg in data['messages']] self.assertEqual(list(response.context['messages']), messages) for msg in data['messages']: self.assertContains(response, msg) def test_with_template_response(self): settings.MESSAGE_LEVEL = constants.DEBUG data = { 'messages': ['Test message %d' % x for x in xrange(10)], } show_url = reverse('django.contrib.messages.tests.urls.show_template_response') for level in self.levels.keys(): add_url = reverse('django.contrib.messages.tests.urls.add_template_response', args=(level,)) response = self.client.post(add_url, data, follow=True) self.assertRedirects(response, show_url) self.assertTrue('messages' in response.context) for msg in data['messages']: self.assertContains(response, msg) # there shouldn't be any messages on second GET request response = self.client.get(show_url) for msg in data['messages']: self.assertNotContains(response, msg) def test_multiple_posts(self): """ Tests that messages persist properly when multiple POSTs are made before a GET. """ settings.MESSAGE_LEVEL = constants.DEBUG data = { 'messages': ['Test message %d' % x for x in xrange(10)], } show_url = reverse('django.contrib.messages.tests.urls.show') messages = [] for level in ('debug', 'info', 'success', 'warning', 'error'): messages.extend([Message(self.levels[level], msg) for msg in data['messages']]) add_url = reverse('django.contrib.messages.tests.urls.add', args=(level,)) self.client.post(add_url, data) response = self.client.get(show_url) self.assertTrue('messages' in response.context) self.assertEqual(list(response.context['messages']), messages) for msg in data['messages']: self.assertContains(response, msg) @skipUnlessAuthIsInstalled def test_middleware_disabled_auth_user(self): """ Tests that the messages API successfully falls back to using user.message_set to store messages directly when the middleware is disabled. """ settings.MESSAGE_LEVEL = constants.DEBUG user = User.objects.create_user('test', 'test@example.com', 'test') self.client.login(username='test', password='test') settings.INSTALLED_APPS = list(settings.INSTALLED_APPS) settings.INSTALLED_APPS.remove( 'django.contrib.messages', ) settings.MIDDLEWARE_CLASSES = list(settings.MIDDLEWARE_CLASSES) settings.MIDDLEWARE_CLASSES.remove( 'django.contrib.messages.middleware.MessageMiddleware', ) settings.TEMPLATE_CONTEXT_PROCESSORS = \ list(settings.TEMPLATE_CONTEXT_PROCESSORS) settings.TEMPLATE_CONTEXT_PROCESSORS.remove( 'django.contrib.messages.context_processors.messages', ) data = { 'messages': ['Test message %d' % x for x in xrange(10)], } show_url = reverse('django.contrib.messages.tests.urls.show') for level in ('debug', 'info', 'success', 'warning', 'error'): add_url = reverse('django.contrib.messages.tests.urls.add', args=(level,)) response = self.client.post(add_url, data, follow=True) self.assertRedirects(response, show_url) self.assertTrue('messages' in response.context) context_messages = list(response.context['messages']) for msg in data['messages']: self.assertTrue(msg in context_messages) self.assertContains(response, msg) def test_middleware_disabled_anon_user(self): """ Tests that, when the middleware is disabled and a user is not logged in, an exception is raised when one attempts to store a message. """ settings.MESSAGE_LEVEL = constants.DEBUG settings.INSTALLED_APPS = list(settings.INSTALLED_APPS) settings.INSTALLED_APPS.remove( 'django.contrib.messages', ) settings.MIDDLEWARE_CLASSES = list(settings.MIDDLEWARE_CLASSES) settings.MIDDLEWARE_CLASSES.remove( 'django.contrib.messages.middleware.MessageMiddleware', ) settings.TEMPLATE_CONTEXT_PROCESSORS = \ list(settings.TEMPLATE_CONTEXT_PROCESSORS) settings.TEMPLATE_CONTEXT_PROCESSORS.remove( 'django.contrib.messages.context_processors.messages', ) data = { 'messages': ['Test message %d' % x for x in xrange(10)], } show_url = reverse('django.contrib.messages.tests.urls.show') for level in ('debug', 'info', 'success', 'warning', 'error'): add_url = reverse('django.contrib.messages.tests.urls.add', args=(level,)) self.assertRaises(MessageFailure, self.client.post, add_url, data, follow=True) def test_middleware_disabled_anon_user_fail_silently(self): """ Tests that, when the middleware is disabled and a user is not logged in, an exception is not raised if 'fail_silently' = True """ settings.MESSAGE_LEVEL = constants.DEBUG settings.INSTALLED_APPS = list(settings.INSTALLED_APPS) settings.INSTALLED_APPS.remove( 'django.contrib.messages', ) settings.MIDDLEWARE_CLASSES = list(settings.MIDDLEWARE_CLASSES) settings.MIDDLEWARE_CLASSES.remove( 'django.contrib.messages.middleware.MessageMiddleware', ) settings.TEMPLATE_CONTEXT_PROCESSORS = \ list(settings.TEMPLATE_CONTEXT_PROCESSORS) settings.TEMPLATE_CONTEXT_PROCESSORS.remove( 'django.contrib.messages.context_processors.messages', ) data = { 'messages': ['Test message %d' % x for x in xrange(10)], 'fail_silently': True, } show_url = reverse('django.contrib.messages.tests.urls.show') for level in ('debug', 'info', 'success', 'warning', 'error'): add_url = reverse('django.contrib.messages.tests.urls.add', args=(level,)) response = self.client.post(add_url, data, follow=True) self.assertRedirects(response, show_url) self.assertTrue('messages' in response.context) self.assertEqual(list(response.context['messages']), []) def stored_messages_count(self, storage, response): """ Returns the number of messages being stored after a ``storage.update()`` call. """ raise NotImplementedError('This method must be set by a subclass.') def test_get(self): raise NotImplementedError('This method must be set by a subclass.') def get_existing_storage(self): return self.get_storage([Message(constants.INFO, 'Test message 1'), Message(constants.INFO, 'Test message 2', extra_tags='tag')]) def test_existing_read(self): """ Tests that reading the existing storage doesn't cause the data to be lost. """ storage = self.get_existing_storage() self.assertFalse(storage.used) # After iterating the storage engine directly, the used flag is set. data = list(storage) self.assert_(storage.used) # The data does not disappear because it has been iterated. self.assertEqual(data, list(storage)) def test_existing_add(self): storage = self.get_existing_storage() self.assertFalse(storage.added_new) storage.add(constants.INFO, 'Test message 3') self.assert_(storage.added_new) def test_default_level(self): # get_level works even with no storage on the request. request = self.get_request() self.assertEqual(get_level(request), constants.INFO) # get_level returns the default level if it hasn't been set. storage = self.get_storage() request._messages = storage self.assertEqual(get_level(request), constants.INFO) # Only messages of sufficient level get recorded. add_level_messages(storage) self.assertEqual(len(storage), 5) def test_low_level(self): request = self.get_request() storage = self.storage_class(request) request._messages = storage self.assert_(set_level(request, 5)) self.assertEqual(get_level(request), 5) add_level_messages(storage) self.assertEqual(len(storage), 6) def test_high_level(self): request = self.get_request() storage = self.storage_class(request) request._messages = storage self.assert_(set_level(request, 30)) self.assertEqual(get_level(request), 30) add_level_messages(storage) self.assertEqual(len(storage), 2) def test_settings_level(self): request = self.get_request() storage = self.storage_class(request) settings.MESSAGE_LEVEL = 29 self.assertEqual(get_level(request), 29) add_level_messages(storage) self.assertEqual(len(storage), 3) def test_tags(self): storage = self.get_storage() storage.level = 0 add_level_messages(storage) tags = [msg.tags for msg in storage] self.assertEqual(tags, ['info', '', 'extra-tag debug', 'warning', 'error', 'success']) def test_custom_tags(self): settings.MESSAGE_TAGS = { constants.INFO: 'info', constants.DEBUG: '', constants.WARNING: '', constants.ERROR: 'bad', 29: 'custom', } # LEVEL_TAGS is a constant defined in the # django.contrib.messages.storage.base module, so after changing # settings.MESSAGE_TAGS, we need to update that constant too. base.LEVEL_TAGS = utils.get_level_tags() try: storage = self.get_storage() storage.level = 0 add_level_messages(storage) tags = [msg.tags for msg in storage] self.assertEqual(tags, ['info', 'custom', 'extra-tag', '', 'bad', 'success']) finally: # Ensure the level tags constant is put back like we found it. self.restore_setting('MESSAGE_TAGS') base.LEVEL_TAGS = utils.get_level_tags()
apache-2.0
naturalness/sensibility
sensibility/language/java/__init__.py
1
6245
#!/usr/bin/env python3 # -*- coding: UTF-8 -*- # Copyright 2017 Eddie Antonio Santos <easantos@ualberta.ca> # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import atexit import os import sys import token from io import BytesIO from keyword import iskeyword from pathlib import Path from typing import ( Any, AnyStr, Callable, IO, Iterable, Optional, Tuple, Union, overload, cast ) import javac_parser from .. import Language, SourceSummary from ...lexical_analysis import Lexeme, Location, Position, Token from ...vocabulary import NoSourceRepresentationError, Vocabulary, Vind here = Path(__file__).parent class JavaVocabulary(Vocabulary): """ The vocabulary, except it returns from """ first_entry_num = len(Vocabulary.SPECIAL_ENTRIES) def __init__(self, entries: Iterable[str], reprs: Iterable[str]) -> None: super().__init__(entries) # Create a look-up table for source representations. # The special tokens <unk>, <s>, </s> have NO reprs, thus are not # stored. self._index2repr = tuple(reprs) assert len(self._index2text) == self.first_entry_num + len(self._index2repr) def to_source_text(self, idx: Vind) -> str: if idx < self.first_entry_num: raise NoSourceRepresentationError(idx) return self._index2repr[idx - self.first_entry_num] @staticmethod def load() -> 'JavaVocabulary': entries = [] reprs = [] # Load from a tab-separated-values file with open(here / 'vocabulary.tsv') as vocab_file: first_entry = JavaVocabulary.first_entry_num for expected_num, line in enumerate(vocab_file, start=first_entry): # src_repr -- source representation num, entry, src_repr = line.rstrip().split() assert expected_num == int(num) entries.append(entry) reprs.append(src_repr) return JavaVocabulary(entries, reprs) def to_str(source: Union[str, bytes, IO[bytes]]) -> str: """ Coerce an input format to a Unicode string. """ if isinstance(source, str): return source elif isinstance(source, bytes): # XXX: Assume it's UTF-8 encoded! return source.decode('UTF-8') else: raise NotImplementedError class LazyVocabulary: def __init__(self, fn): self.fn = fn def __get__(self, obj, value): if not hasattr(self, 'value'): self.value = self.fn() return self.value class JavaToken(Token): """ HACK: javac_parser has some... interesting ideas about normalization. so add a `_raw` field to the token. """ # TODO: fix with upstream (javac_parser) to return a sensible value for the normalized value __slots__ = ('_raw',) def __init__(self, *, _raw: str, name: str, value: str, start: Position, end: Position) -> None: super().__init__(name=name, value=value, start=start, end=end) self._raw = _raw def __repr__(self) -> str: cls = type(self).__name__ return (f"{cls}(_raw={self._raw!r}" f"name={self.name!r}, value={self.value!r}, " f"start={self.start!r}, end={self.end!r})") class Java(Language): """ Defines the Java 8 programming language. """ extensions = {'.java'} vocabulary = cast(Vocabulary, LazyVocabulary(JavaVocabulary.load)) @property def java(self): """ Lazily start up the Java server. This decreases the chances of things going horribly wrong when two seperate process initialize the Java language instance around the same time. """ if not hasattr(self, '_java_server'): self._java_server = javac_parser.Java() # Py4j usually crashes as Python is cleaning up after exit() so # decrement the servers' reference count to lessen the chance of # that happening. @atexit.register def remove_reference(): del self._java_server return self._java_server def tokenize(self, source: Union[str, bytes, IO[bytes]]) -> Iterable[Token]: tokens = self.java.lex(to_str(source)) # Each token is a tuple with the following structure # (reproduced from javac_parser.py): # 1. Lexeme type # 2. Value (as it appears in the source file) # 3. A 2-tuple of start line, start column # 4. A 2-tuple of end line, end column # 5. A whitespace-free representation of the value for name, raw_value, start, end, normalized in tokens: # Omit the EOF token, as it's only useful for the parser. if name == 'EOF': continue # Take the NORMALIZED value, as Java allows unicode escapes in # ARBITRARY tokens and then things get hairy here. yield JavaToken(_raw=raw_value, name=name, value=normalized, start=Position(line=start[0], column=start[1]), end=Position(line=end[0], column=end[1])) def check_syntax(self, source: Union[str, bytes]) -> bool: return self.java.get_num_parse_errors(to_str(source)) == 0 def summarize_tokens(self, source: Iterable[Token]) -> SourceSummary: toks = [tok for tok in source if tok.name != 'EOF'] slines = set(line for tok in toks for line in tok.lines) return SourceSummary(n_tokens=len(toks), sloc=len(slines)) def vocabularize_tokens(self, source: Iterable[Token]) -> Iterable[Tuple[Location, str]]: for token in source: yield token.location, token.name java: Language = Java()
apache-2.0
edisonlz/fruit
web_project/base/site-packages/django/db/models/__init__.py
107
1423
from functools import wraps from django.core.exceptions import ObjectDoesNotExist, ImproperlyConfigured from django.db.models.loading import get_apps, get_app_paths, get_app, get_models, get_model, register_models, UnavailableApp from django.db.models.query import Q from django.db.models.expressions import F from django.db.models.manager import Manager from django.db.models.base import Model from django.db.models.aggregates import * from django.db.models.fields import * from django.db.models.fields.subclassing import SubfieldBase from django.db.models.fields.files import FileField, ImageField from django.db.models.fields.related import ForeignKey, ForeignObject, OneToOneField, ManyToManyField, ManyToOneRel, ManyToManyRel, OneToOneRel from django.db.models.deletion import CASCADE, PROTECT, SET, SET_NULL, SET_DEFAULT, DO_NOTHING, ProtectedError from django.db.models import signals def permalink(func): """ Decorator that calls urlresolvers.reverse() to return a URL using parameters returned by the decorated function "func". "func" should be a function that returns a tuple in one of the following formats: (viewname, viewargs) (viewname, viewargs, viewkwargs) """ from django.core.urlresolvers import reverse @wraps(func) def inner(*args, **kwargs): bits = func(*args, **kwargs) return reverse(bits[0], None, *bits[1:3]) return inner
apache-2.0
brextonpham/python-Ultron
imdb/_exceptions.py
128
1663
""" _exceptions module (imdb package). This module provides the exception hierarchy used by the imdb package. Copyright 2004-2009 Davide Alberani <da@erlug.linux.it> This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA """ import logging class IMDbError(Exception): """Base class for every exception raised by the imdb package.""" _logger = logging.getLogger('imdbpy') def __init__(self, *args, **kwargs): """Initialize the exception and pass the message to the log system.""" # Every raised exception also dispatch a critical log. self._logger.critical('%s exception raised; args: %s; kwds: %s', self.__class__.__name__, args, kwargs, exc_info=True) Exception.__init__(self, *args, **kwargs) class IMDbDataAccessError(IMDbError): """Exception raised when is not possible to access needed data.""" pass class IMDbParserError(IMDbError): """Exception raised when an error occurred parsing the data.""" pass
mit
brianwoo/django-tutorial
build/Django/tests/utils_tests/test_module_loading.py
7
11263
import imp import os import sys import unittest import warnings from importlib import import_module from zipimport import zipimporter from django.core.exceptions import ImproperlyConfigured from django.test import SimpleTestCase, ignore_warnings, modify_settings from django.test.utils import extend_sys_path from django.utils import six from django.utils._os import upath from django.utils.deprecation import RemovedInDjango19Warning from django.utils.module_loading import ( autodiscover_modules, import_by_path, import_string, module_has_submodule, ) class DefaultLoader(unittest.TestCase): def setUp(self): sys.meta_path.insert(0, ProxyFinder()) def tearDown(self): sys.meta_path.pop(0) def test_loader(self): "Normal module existence can be tested" test_module = import_module('utils_tests.test_module') test_no_submodule = import_module( 'utils_tests.test_no_submodule') # An importable child self.assertTrue(module_has_submodule(test_module, 'good_module')) mod = import_module('utils_tests.test_module.good_module') self.assertEqual(mod.content, 'Good Module') # A child that exists, but will generate an import error if loaded self.assertTrue(module_has_submodule(test_module, 'bad_module')) self.assertRaises(ImportError, import_module, 'utils_tests.test_module.bad_module') # A child that doesn't exist self.assertFalse(module_has_submodule(test_module, 'no_such_module')) self.assertRaises(ImportError, import_module, 'utils_tests.test_module.no_such_module') # A child that doesn't exist, but is the name of a package on the path self.assertFalse(module_has_submodule(test_module, 'django')) self.assertRaises(ImportError, import_module, 'utils_tests.test_module.django') # Don't be confused by caching of import misses import types # NOQA: causes attempted import of utils_tests.types self.assertFalse(module_has_submodule(sys.modules['utils_tests'], 'types')) # A module which doesn't have a __path__ (so no submodules) self.assertFalse(module_has_submodule(test_no_submodule, 'anything')) self.assertRaises(ImportError, import_module, 'utils_tests.test_no_submodule.anything') class EggLoader(unittest.TestCase): def setUp(self): self.egg_dir = '%s/eggs' % os.path.dirname(upath(__file__)) def tearDown(self): sys.path_importer_cache.clear() sys.modules.pop('egg_module.sub1.sub2.bad_module', None) sys.modules.pop('egg_module.sub1.sub2.good_module', None) sys.modules.pop('egg_module.sub1.sub2', None) sys.modules.pop('egg_module.sub1', None) sys.modules.pop('egg_module.bad_module', None) sys.modules.pop('egg_module.good_module', None) sys.modules.pop('egg_module', None) def test_shallow_loader(self): "Module existence can be tested inside eggs" egg_name = '%s/test_egg.egg' % self.egg_dir with extend_sys_path(egg_name): egg_module = import_module('egg_module') # An importable child self.assertTrue(module_has_submodule(egg_module, 'good_module')) mod = import_module('egg_module.good_module') self.assertEqual(mod.content, 'Good Module') # A child that exists, but will generate an import error if loaded self.assertTrue(module_has_submodule(egg_module, 'bad_module')) self.assertRaises(ImportError, import_module, 'egg_module.bad_module') # A child that doesn't exist self.assertFalse(module_has_submodule(egg_module, 'no_such_module')) self.assertRaises(ImportError, import_module, 'egg_module.no_such_module') def test_deep_loader(self): "Modules deep inside an egg can still be tested for existence" egg_name = '%s/test_egg.egg' % self.egg_dir with extend_sys_path(egg_name): egg_module = import_module('egg_module.sub1.sub2') # An importable child self.assertTrue(module_has_submodule(egg_module, 'good_module')) mod = import_module('egg_module.sub1.sub2.good_module') self.assertEqual(mod.content, 'Deep Good Module') # A child that exists, but will generate an import error if loaded self.assertTrue(module_has_submodule(egg_module, 'bad_module')) self.assertRaises(ImportError, import_module, 'egg_module.sub1.sub2.bad_module') # A child that doesn't exist self.assertFalse(module_has_submodule(egg_module, 'no_such_module')) self.assertRaises(ImportError, import_module, 'egg_module.sub1.sub2.no_such_module') @ignore_warnings(category=RemovedInDjango19Warning) class ModuleImportTestCase(unittest.TestCase): def test_import_by_path(self): cls = import_by_path('django.utils.module_loading.import_by_path') self.assertEqual(cls, import_by_path) # Test exceptions raised for path in ('no_dots_in_path', 'unexistent.path', 'utils_tests.unexistent'): self.assertRaises(ImproperlyConfigured, import_by_path, path) with self.assertRaises(ImproperlyConfigured) as cm: import_by_path('unexistent.module.path', error_prefix="Foo") self.assertTrue(str(cm.exception).startswith('Foo')) def test_import_error_traceback(self): """Test preserving the original traceback on an ImportError.""" try: import_by_path('test_module.bad_module.content') except ImproperlyConfigured: traceback = sys.exc_info()[2] self.assertIsNotNone(traceback.tb_next.tb_next, 'Should have more than the calling frame in the traceback.') def test_import_by_path_pending_deprecation_warning(self): with warnings.catch_warnings(record=True) as w: warnings.simplefilter('always', category=RemovedInDjango19Warning) cls = import_by_path('django.utils.module_loading.import_by_path') self.assertEqual(cls, import_by_path) self.assertEqual(len(w), 1) self.assertTrue(issubclass(w[-1].category, RemovedInDjango19Warning)) self.assertIn('deprecated', str(w[-1].message)) def test_import_string(self): cls = import_string('django.utils.module_loading.import_string') self.assertEqual(cls, import_string) # Test exceptions raised self.assertRaises(ImportError, import_string, 'no_dots_in_path') self.assertRaises(ImportError, import_string, 'unexistent.path') msg = 'Module "utils_tests" does not define a "unexistent" attribute' with six.assertRaisesRegex(self, ImportError, msg): import_string('utils_tests.unexistent') @modify_settings(INSTALLED_APPS={'append': 'utils_tests.test_module'}) class AutodiscoverModulesTestCase(SimpleTestCase): def tearDown(self): sys.path_importer_cache.clear() sys.modules.pop('utils_tests.test_module.another_bad_module', None) sys.modules.pop('utils_tests.test_module.another_good_module', None) sys.modules.pop('utils_tests.test_module.bad_module', None) sys.modules.pop('utils_tests.test_module.good_module', None) sys.modules.pop('utils_tests.test_module', None) def test_autodiscover_modules_found(self): autodiscover_modules('good_module') def test_autodiscover_modules_not_found(self): autodiscover_modules('missing_module') def test_autodiscover_modules_found_but_bad_module(self): with six.assertRaisesRegex(self, ImportError, "No module named '?a_package_name_that_does_not_exist'?"): autodiscover_modules('bad_module') def test_autodiscover_modules_several_one_bad_module(self): with six.assertRaisesRegex(self, ImportError, "No module named '?a_package_name_that_does_not_exist'?"): autodiscover_modules('good_module', 'bad_module') def test_autodiscover_modules_several_found(self): autodiscover_modules('good_module', 'another_good_module') def test_autodiscover_modules_several_found_with_registry(self): from .test_module import site autodiscover_modules('good_module', 'another_good_module', register_to=site) self.assertEqual(site._registry, {'lorem': 'ipsum'}) def test_validate_registry_keeps_intact(self): from .test_module import site with six.assertRaisesRegex(self, Exception, "Some random exception."): autodiscover_modules('another_bad_module', register_to=site) self.assertEqual(site._registry, {}) def test_validate_registry_resets_after_erroneous_module(self): from .test_module import site with six.assertRaisesRegex(self, Exception, "Some random exception."): autodiscover_modules('another_good_module', 'another_bad_module', register_to=site) self.assertEqual(site._registry, {'lorem': 'ipsum'}) def test_validate_registry_resets_after_missing_module(self): from .test_module import site autodiscover_modules('does_not_exist', 'another_good_module', 'does_not_exist2', register_to=site) self.assertEqual(site._registry, {'lorem': 'ipsum'}) class ProxyFinder(object): def __init__(self): self._cache = {} def find_module(self, fullname, path=None): tail = fullname.rsplit('.', 1)[-1] try: fd, fn, info = imp.find_module(tail, path) if fullname in self._cache: old_fd = self._cache[fullname][0] if old_fd: old_fd.close() self._cache[fullname] = (fd, fn, info) except ImportError: return None else: return self # this is a loader as well def load_module(self, fullname): if fullname in sys.modules: return sys.modules[fullname] fd, fn, info = self._cache[fullname] try: return imp.load_module(fullname, fd, fn, info) finally: if fd: fd.close() class TestFinder(object): def __init__(self, *args, **kwargs): self.importer = zipimporter(*args, **kwargs) def find_module(self, path): importer = self.importer.find_module(path) if importer is None: return return TestLoader(importer) class TestLoader(object): def __init__(self, importer): self.importer = importer def load_module(self, name): mod = self.importer.load_module(name) mod.__loader__ = self return mod class CustomLoader(EggLoader): """The Custom Loader test is exactly the same as the EggLoader, but it uses a custom defined Loader and Finder that is intentionally split into two classes. Although the EggLoader combines both functions into one class, this isn't required. """ def setUp(self): super(CustomLoader, self).setUp() sys.path_hooks.insert(0, TestFinder) sys.path_importer_cache.clear() def tearDown(self): super(CustomLoader, self).tearDown() sys.path_hooks.pop(0)
gpl-3.0
brenca/electron
script/dump-symbols.py
22
1970
#!/usr/bin/env python import os import sys from lib.config import PLATFORM from lib.util import electron_gyp, execute, rm_rf SOURCE_ROOT = os.path.abspath(os.path.dirname(os.path.dirname(__file__))) DIST_DIR = os.path.join(SOURCE_ROOT, 'dist') OUT_DIR = os.path.join(SOURCE_ROOT, 'out', 'R') CHROMIUM_DIR = os.path.join(SOURCE_ROOT, 'vendor', 'brightray', 'vendor', 'download', 'libchromiumcontent', 'static_library') def main(destination): # if PLATFORM == 'win32': # register_required_dll() rm_rf(destination) (project_name, product_name) = get_names_from_gyp() if PLATFORM in ['darwin', 'linux']: generate_breakpad_symbols = os.path.join(SOURCE_ROOT, 'tools', 'posix', 'generate_breakpad_symbols.py') if PLATFORM == 'darwin': start = os.path.join(OUT_DIR, '{0}.app'.format(product_name), 'Contents', 'MacOS', product_name) else: start = os.path.join(OUT_DIR, project_name) args = [ '--build-dir={0}'.format(OUT_DIR), '--binary={0}'.format(start), '--symbols-dir={0}'.format(destination), '--libchromiumcontent-dir={0}'.format(CHROMIUM_DIR), '--clear', '--jobs=16', ] else: generate_breakpad_symbols = os.path.join(SOURCE_ROOT, 'tools', 'win', 'generate_breakpad_symbols.py') args = [ '--symbols-dir={0}'.format(destination), '--jobs=16', os.path.relpath(OUT_DIR), ] execute([sys.executable, generate_breakpad_symbols] + args) def register_required_dll(): register = os.path.join(SOURCE_ROOT, 'tools', 'win', 'register_msdia80_dll.js') execute(['node.exe', os.path.relpath(register)]); def get_names_from_gyp(): variables = electron_gyp() return (variables['project_name%'], variables['product_name%']) if __name__ == '__main__': sys.exit(main(sys.argv[1]))
mit
okumura/gyp
tools/pretty_gyp.py
2618
4756
#!/usr/bin/env python # Copyright (c) 2012 Google Inc. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. """Pretty-prints the contents of a GYP file.""" import sys import re # Regex to remove comments when we're counting braces. COMMENT_RE = re.compile(r'\s*#.*') # Regex to remove quoted strings when we're counting braces. # It takes into account quoted quotes, and makes sure that the quotes match. # NOTE: It does not handle quotes that span more than one line, or # cases where an escaped quote is preceeded by an escaped backslash. QUOTE_RE_STR = r'(?P<q>[\'"])(.*?)(?<![^\\][\\])(?P=q)' QUOTE_RE = re.compile(QUOTE_RE_STR) def comment_replace(matchobj): return matchobj.group(1) + matchobj.group(2) + '#' * len(matchobj.group(3)) def mask_comments(input): """Mask the quoted strings so we skip braces inside quoted strings.""" search_re = re.compile(r'(.*?)(#)(.*)') return [search_re.sub(comment_replace, line) for line in input] def quote_replace(matchobj): return "%s%s%s%s" % (matchobj.group(1), matchobj.group(2), 'x'*len(matchobj.group(3)), matchobj.group(2)) def mask_quotes(input): """Mask the quoted strings so we skip braces inside quoted strings.""" search_re = re.compile(r'(.*?)' + QUOTE_RE_STR) return [search_re.sub(quote_replace, line) for line in input] def do_split(input, masked_input, search_re): output = [] mask_output = [] for (line, masked_line) in zip(input, masked_input): m = search_re.match(masked_line) while m: split = len(m.group(1)) line = line[:split] + r'\n' + line[split:] masked_line = masked_line[:split] + r'\n' + masked_line[split:] m = search_re.match(masked_line) output.extend(line.split(r'\n')) mask_output.extend(masked_line.split(r'\n')) return (output, mask_output) def split_double_braces(input): """Masks out the quotes and comments, and then splits appropriate lines (lines that matche the double_*_brace re's above) before indenting them below. These are used to split lines which have multiple braces on them, so that the indentation looks prettier when all laid out (e.g. closing braces make a nice diagonal line). """ double_open_brace_re = re.compile(r'(.*?[\[\{\(,])(\s*)([\[\{\(])') double_close_brace_re = re.compile(r'(.*?[\]\}\)],?)(\s*)([\]\}\)])') masked_input = mask_quotes(input) masked_input = mask_comments(masked_input) (output, mask_output) = do_split(input, masked_input, double_open_brace_re) (output, mask_output) = do_split(output, mask_output, double_close_brace_re) return output def count_braces(line): """keeps track of the number of braces on a given line and returns the result. It starts at zero and subtracts for closed braces, and adds for open braces. """ open_braces = ['[', '(', '{'] close_braces = [']', ')', '}'] closing_prefix_re = re.compile(r'(.*?[^\s\]\}\)]+.*?)([\]\}\)],?)\s*$') cnt = 0 stripline = COMMENT_RE.sub(r'', line) stripline = QUOTE_RE.sub(r"''", stripline) for char in stripline: for brace in open_braces: if char == brace: cnt += 1 for brace in close_braces: if char == brace: cnt -= 1 after = False if cnt > 0: after = True # This catches the special case of a closing brace having something # other than just whitespace ahead of it -- we don't want to # unindent that until after this line is printed so it stays with # the previous indentation level. if cnt < 0 and closing_prefix_re.match(stripline): after = True return (cnt, after) def prettyprint_input(lines): """Does the main work of indenting the input based on the brace counts.""" indent = 0 basic_offset = 2 last_line = "" for line in lines: if COMMENT_RE.match(line): print line else: line = line.strip('\r\n\t ') # Otherwise doesn't strip \r on Unix. if len(line) > 0: (brace_diff, after) = count_braces(line) if brace_diff != 0: if after: print " " * (basic_offset * indent) + line indent += brace_diff else: indent += brace_diff print " " * (basic_offset * indent) + line else: print " " * (basic_offset * indent) + line else: print "" last_line = line def main(): if len(sys.argv) > 1: data = open(sys.argv[1]).read().splitlines() else: data = sys.stdin.read().splitlines() # Split up the double braces. lines = split_double_braces(data) # Indent and print the output. prettyprint_input(lines) return 0 if __name__ == '__main__': sys.exit(main())
bsd-3-clause
Dandandan/wikiprogramming
jsrepl/extern/python/closured/lib/python2.7/shlex.py
306
11137
# -*- coding: iso-8859-1 -*- """A lexical analyzer class for simple shell-like syntaxes.""" # Module and documentation by Eric S. Raymond, 21 Dec 1998 # Input stacking and error message cleanup added by ESR, March 2000 # push_source() and pop_source() made explicit by ESR, January 2001. # Posix compliance, split(), string arguments, and # iterator interface by Gustavo Niemeyer, April 2003. import os.path import sys from collections import deque try: from cStringIO import StringIO except ImportError: from StringIO import StringIO __all__ = ["shlex", "split"] class shlex: "A lexical analyzer class for simple shell-like syntaxes." def __init__(self, instream=None, infile=None, posix=False): if isinstance(instream, basestring): instream = StringIO(instream) if instream is not None: self.instream = instream self.infile = infile else: self.instream = sys.stdin self.infile = None self.posix = posix if posix: self.eof = None else: self.eof = '' self.commenters = '#' self.wordchars = ('abcdfeghijklmnopqrstuvwxyz' 'ABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789_') if self.posix: self.wordchars += ('ßàáâãäåæçèéêëìíîïðñòóôõöøùúûüýþÿ' 'ÀÁÂÃÄÅÆÇÈÉÊËÌÍÎÏÐÑÒÓÔÕÖØÙÚÛÜÝÞ') self.whitespace = ' \t\r\n' self.whitespace_split = False self.quotes = '\'"' self.escape = '\\' self.escapedquotes = '"' self.state = ' ' self.pushback = deque() self.lineno = 1 self.debug = 0 self.token = '' self.filestack = deque() self.source = None if self.debug: print 'shlex: reading from %s, line %d' \ % (self.instream, self.lineno) def push_token(self, tok): "Push a token onto the stack popped by the get_token method" if self.debug >= 1: print "shlex: pushing token " + repr(tok) self.pushback.appendleft(tok) def push_source(self, newstream, newfile=None): "Push an input source onto the lexer's input source stack." if isinstance(newstream, basestring): newstream = StringIO(newstream) self.filestack.appendleft((self.infile, self.instream, self.lineno)) self.infile = newfile self.instream = newstream self.lineno = 1 if self.debug: if newfile is not None: print 'shlex: pushing to file %s' % (self.infile,) else: print 'shlex: pushing to stream %s' % (self.instream,) def pop_source(self): "Pop the input source stack." self.instream.close() (self.infile, self.instream, self.lineno) = self.filestack.popleft() if self.debug: print 'shlex: popping to %s, line %d' \ % (self.instream, self.lineno) self.state = ' ' def get_token(self): "Get a token from the input stream (or from stack if it's nonempty)" if self.pushback: tok = self.pushback.popleft() if self.debug >= 1: print "shlex: popping token " + repr(tok) return tok # No pushback. Get a token. raw = self.read_token() # Handle inclusions if self.source is not None: while raw == self.source: spec = self.sourcehook(self.read_token()) if spec: (newfile, newstream) = spec self.push_source(newstream, newfile) raw = self.get_token() # Maybe we got EOF instead? while raw == self.eof: if not self.filestack: return self.eof else: self.pop_source() raw = self.get_token() # Neither inclusion nor EOF if self.debug >= 1: if raw != self.eof: print "shlex: token=" + repr(raw) else: print "shlex: token=EOF" return raw def read_token(self): quoted = False escapedstate = ' ' while True: nextchar = self.instream.read(1) if nextchar == '\n': self.lineno = self.lineno + 1 if self.debug >= 3: print "shlex: in state", repr(self.state), \ "I see character:", repr(nextchar) if self.state is None: self.token = '' # past end of file break elif self.state == ' ': if not nextchar: self.state = None # end of file break elif nextchar in self.whitespace: if self.debug >= 2: print "shlex: I see whitespace in whitespace state" if self.token or (self.posix and quoted): break # emit current token else: continue elif nextchar in self.commenters: self.instream.readline() self.lineno = self.lineno + 1 elif self.posix and nextchar in self.escape: escapedstate = 'a' self.state = nextchar elif nextchar in self.wordchars: self.token = nextchar self.state = 'a' elif nextchar in self.quotes: if not self.posix: self.token = nextchar self.state = nextchar elif self.whitespace_split: self.token = nextchar self.state = 'a' else: self.token = nextchar if self.token or (self.posix and quoted): break # emit current token else: continue elif self.state in self.quotes: quoted = True if not nextchar: # end of file if self.debug >= 2: print "shlex: I see EOF in quotes state" # XXX what error should be raised here? raise ValueError, "No closing quotation" if nextchar == self.state: if not self.posix: self.token = self.token + nextchar self.state = ' ' break else: self.state = 'a' elif self.posix and nextchar in self.escape and \ self.state in self.escapedquotes: escapedstate = self.state self.state = nextchar else: self.token = self.token + nextchar elif self.state in self.escape: if not nextchar: # end of file if self.debug >= 2: print "shlex: I see EOF in escape state" # XXX what error should be raised here? raise ValueError, "No escaped character" # In posix shells, only the quote itself or the escape # character may be escaped within quotes. if escapedstate in self.quotes and \ nextchar != self.state and nextchar != escapedstate: self.token = self.token + self.state self.token = self.token + nextchar self.state = escapedstate elif self.state == 'a': if not nextchar: self.state = None # end of file break elif nextchar in self.whitespace: if self.debug >= 2: print "shlex: I see whitespace in word state" self.state = ' ' if self.token or (self.posix and quoted): break # emit current token else: continue elif nextchar in self.commenters: self.instream.readline() self.lineno = self.lineno + 1 if self.posix: self.state = ' ' if self.token or (self.posix and quoted): break # emit current token else: continue elif self.posix and nextchar in self.quotes: self.state = nextchar elif self.posix and nextchar in self.escape: escapedstate = 'a' self.state = nextchar elif nextchar in self.wordchars or nextchar in self.quotes \ or self.whitespace_split: self.token = self.token + nextchar else: self.pushback.appendleft(nextchar) if self.debug >= 2: print "shlex: I see punctuation in word state" self.state = ' ' if self.token: break # emit current token else: continue result = self.token self.token = '' if self.posix and not quoted and result == '': result = None if self.debug > 1: if result: print "shlex: raw token=" + repr(result) else: print "shlex: raw token=EOF" return result def sourcehook(self, newfile): "Hook called on a filename to be sourced." if newfile[0] == '"': newfile = newfile[1:-1] # This implements cpp-like semantics for relative-path inclusion. if isinstance(self.infile, basestring) and not os.path.isabs(newfile): newfile = os.path.join(os.path.dirname(self.infile), newfile) return (newfile, open(newfile, "r")) def error_leader(self, infile=None, lineno=None): "Emit a C-compiler-like, Emacs-friendly error-message leader." if infile is None: infile = self.infile if lineno is None: lineno = self.lineno return "\"%s\", line %d: " % (infile, lineno) def __iter__(self): return self def next(self): token = self.get_token() if token == self.eof: raise StopIteration return token def split(s, comments=False, posix=True): lex = shlex(s, posix=posix) lex.whitespace_split = True if not comments: lex.commenters = '' return list(lex) if __name__ == '__main__': if len(sys.argv) == 1: lexer = shlex() else: file = sys.argv[1] lexer = shlex(open(file), file) while 1: tt = lexer.get_token() if tt: print "Token: " + repr(tt) else: break
mit
xjnny/NRPhoto
node_modules/node-gyp/gyp/pylib/gyp/generator/ninja_test.py
1843
1786
#!/usr/bin/env python # Copyright (c) 2012 Google Inc. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. """ Unit tests for the ninja.py file. """ import gyp.generator.ninja as ninja import unittest import StringIO import sys import TestCommon class TestPrefixesAndSuffixes(unittest.TestCase): def test_BinaryNamesWindows(self): # These cannot run on non-Windows as they require a VS installation to # correctly handle variable expansion. if sys.platform.startswith('win'): writer = ninja.NinjaWriter('foo', 'wee', '.', '.', 'build.ninja', '.', 'build.ninja', 'win') spec = { 'target_name': 'wee' } self.assertTrue(writer.ComputeOutputFileName(spec, 'executable'). endswith('.exe')) self.assertTrue(writer.ComputeOutputFileName(spec, 'shared_library'). endswith('.dll')) self.assertTrue(writer.ComputeOutputFileName(spec, 'static_library'). endswith('.lib')) def test_BinaryNamesLinux(self): writer = ninja.NinjaWriter('foo', 'wee', '.', '.', 'build.ninja', '.', 'build.ninja', 'linux') spec = { 'target_name': 'wee' } self.assertTrue('.' not in writer.ComputeOutputFileName(spec, 'executable')) self.assertTrue(writer.ComputeOutputFileName(spec, 'shared_library'). startswith('lib')) self.assertTrue(writer.ComputeOutputFileName(spec, 'static_library'). startswith('lib')) self.assertTrue(writer.ComputeOutputFileName(spec, 'shared_library'). endswith('.so')) self.assertTrue(writer.ComputeOutputFileName(spec, 'static_library'). endswith('.a')) if __name__ == '__main__': unittest.main()
gpl-2.0
postlund/pyatv
tests/support/test_dns.py
1
10011
"""Unit tests for pyatv.support.dns""" import io import typing import pytest from pyatv.support import dns @pytest.mark.parametrize( "name,expected", ( ("_http._tcp.local", (None, "_http._tcp", "local")), ("foo._http._tcp.local", ("foo", "_http._tcp", "local")), ("foo.bar._http._tcp.local", ("foo.bar", "_http._tcp", "local")), ), ids=("ptr", "no_dot", "with_dot"), ) def test_happy_service_instance_names(name, expected): assert dns.ServiceInstanceName.split_name(name) == expected @pytest.mark.parametrize( "name", ( "_http.local", "._tcp.local", "_http.foo._tcp.local", "_tcp._http.local", ), ids=("no_proto", "no_service", "split", "reversed"), ) def test_sad_service_instance_names(name): with pytest.raises(ValueError): dns.ServiceInstanceName.split_name(name) # mapping is test_id: tuple(name, expected_raw) encode_domain_names = { "root": (".", b"\x00"), "empty": ("", b"\x00"), "example.com": ("example.com", b"\x07example\x03com\x00"), "example.com_list": (["example", "com"], b"\x07example\x03com\x00"), "unicode": ("Bücher.example", b"\x07B\xc3\xbccher\x07example\x00"), "dotted_instance": ( "Dot.Within._http._tcp.example.local", b"\x0aDot.Within\x05_http\x04_tcp\x07example\x05local\x00", ), "dotted_instance_list": ( ["Dot.Within", "_http", "_tcp", "example", "local"], b"\x0aDot.Within\x05_http\x04_tcp\x07example\x05local\x00", ), "truncated_ascii": ( ( "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ" "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ" ".test" ), ( b"\x3fabcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijk" b"\x04test" b"\x00" ), ), "truncated_unicode": ( ( # The 'a' is at the beginning to force the codepoints to be split at 63 # bytes. The next line is also at the right length to be below 88 characters # even if each kana is counted as a double-width character. Additionally, # this sequence is NF*D* normalized, not NFC (which is what is used for # Net-Unicode). "aがあいうえおかきくけこさしすせそたちつてとなにぬねのはひふへほまみむめも" ".test" ), ( b"\x3d" b"a\xe3\x81\x8c\xe3\x81\x82\xe3\x81\x84\xe3\x81\x86\xe3\x81\x88\xe3\x81\x8a" b"\xe3\x81\x8b\xe3\x81\x8d\xe3\x81\x8f\xe3\x81\x91\xe3\x81\x93\xe3\x81\x95" b"\xe3\x81\x97\xe3\x81\x99\xe3\x81\x9b\xe3\x81\x9d\xe3\x81\x9f\xe3\x81\xa1" b"\xe3\x81\xa4\xe3\x81\xa6" b"\x04test" b"\x00" ), ), } @pytest.mark.parametrize( "name,expected_raw", [pytest.param(*value, id=key) for key, value in encode_domain_names.items()], ) def test_qname_encode(name, expected_raw): assert dns.qname_encode(name) == expected_raw # mapping is test_id: tuple(raw_name, offset, expected_name, expected_offset) # If expected offset is None, it means len(raw_name), otherwise it's like an array index # (positive is from the beginning, negative from the end) decode_domain_names = { "simple": (b"\x03foo\x07example\x03com\x00", 0, "foo.example.com", None), "null": (b"\00", 0, "", None), "compressed": (b"aaaa\x04test\x00\x05label\xC0\x04\xAB\xCD", 10, "label.test", -2), # This case has two levels of compression "multi_compressed": ( b"aaaa\x04test\x00\x05label\xC0\x04\x03foo\xC0\x0A\xAB\xCD", 18, "foo.label.test", -2, ), # Taken straight from the Internationalized Domain name Wikipedia page "idna": (b"\x0Dxn--bcher-kva\x07example\x00", 0, "bücher.example", None), # Taken from issue #919. Apple puts a non-breaking space between "Apple" and "TV". "nbsp": ( b"\x10Apple\xc2\xa0TV (4167)\x05local\x00", 0, "Apple\xa0TV (4167).local", None, ), # This is a doozy of a test case; it's covering a couple different areas of Unicode, # as well as exercising that DNS-SD allows dots in instance names. "unicode": ( ( b"\x1d\xe5\xb1\x85\xe9\x96\x93 Apple\xc2\xa0TV. En Espa\xc3\xb1ol" b"\x05local" b"\x00" ), 0, "居間 Apple TV. En Español.local", None, ), } @pytest.mark.parametrize( "raw_name,offset,expected_name,expected_offset", [pytest.param(*value, id=key) for key, value in decode_domain_names.items()], ) def test_domain_name_parsing( raw_name: bytes, offset: int, expected_name: str, expected_offset: typing.Optional[int], ): with io.BytesIO(raw_name) as buffer: buffer.seek(offset) name = dns.parse_domain_name(buffer) assert name == expected_name if expected_offset is None: assert buffer.tell() == len(raw_name) else: # if expected_offset is positive, this will wrap around to the beginning, if # it's negative it won't. raw_len = len(raw_name) assert buffer.tell() == (raw_len + expected_offset) % raw_len # mapping is test_id: tuple(encoded_data, expected_data, expected_offset) # If expected offset is None, it means len(raw_name), otherwise it's like an array index # (positive is from the beginning, negative from the end) decode_strings = { "null": (b"\x00", b"", None), # 63 is significant because that's the max length for a domain label, but not a # character-string (they have similar encodings). "len_63": (b"\x3F" + (63 * b"0"), (63 * b"0"), None), # For similar reasons as 63, 64 is significant because it would set only one of the # flag bits for name compression if domain-name encoding is assumed. "len_64": (b"\x40" + (64 * b"0"), (64 * b"0"), None), # Ditto for 128, but the other flag "len_128": (b"\x80" + (128 * b"0"), (128 * b"0"), None), # ...and 192 is both flags "len_192": (b"\xC0" + (192 * b"0"), (192 * b"0"), None), # 255 is the max length a character-string can be "len_255": (b"\xFF" + (255 * b"0"), (255 * b"0"), None), "trailing": (b"\x0A" + (10 * b"2") + (17 * b"9"), (10 * b"2"), -17), } @pytest.mark.parametrize( "encoded_data,expected_data,expected_offset", [pytest.param(*value, id=key) for key, value in decode_strings.items()], ) def test_string_parsing( encoded_data: bytes, expected_data: bytes, expected_offset: typing.Optional[int], ): with io.BytesIO(encoded_data) as buffer: name = dns.parse_string(buffer) assert name == expected_data if expected_offset is None: assert buffer.tell() == len(encoded_data) else: # if expected_offset is positive, this will wrap around to the beginning, if # it's negative it won't. data_len = len(encoded_data) assert buffer.tell() == (data_len + expected_offset) % data_len def test_dns_sd_txt_parse_single(): """Test that a TXT RDATA section with one key can be parsed properly.""" data = b"\x07foo=bar" extra_data = data + b"\xDE\xAD\xBE\xEF" * 3 with io.BytesIO(extra_data) as buffer: txt_dict = dns.parse_txt_dict(buffer, len(data)) assert buffer.tell() == len(data) assert txt_dict == {"foo": b"bar"} def test_dns_sd_txt_parse_multiple(): """Test that a TXT RDATA section with multiple keys can be parsed properly.""" data = b"\x07foo=bar\x09spam=eggs" extra_data = data + b"\xDE\xAD\xBE\xEF" * 2 with io.BytesIO(extra_data) as buffer: txt_dict = dns.parse_txt_dict(buffer, len(data)) assert buffer.tell() == len(data) assert txt_dict == {"foo": b"bar", "spam": b"eggs"} def test_dns_sd_txt_parse_binary(): """Test that a TXT RDATA section with a binary value can be parsed properly.""" # 0xfeed can't be decoded as UTF-8 or ASCII, so it'll thrown an error if it's not # being treated as binary data. data = b"\x06foo=\xFE\xED" extra_data = data + b"\xDE\xAD\xBE\xEF" * 3 with io.BytesIO(extra_data) as buffer: txt_dict = dns.parse_txt_dict(buffer, len(data)) assert buffer.tell() == len(data) assert txt_dict == {"foo": b"\xFE\xED"} def test_dns_sd_txt_parse_long(): """Test that a TXT RDATA section with a long value can be parsed properly.""" # If TXT records are being parsed the same way domain names are, this won't work as # the data is too long to fit in a label. data = b"\xCCfoo=" + b"\xCA\xFE" * 100 extra_data = data + b"\xDE\xAD\xBE\xEF" * 3 with io.BytesIO(extra_data) as buffer: txt_dict = dns.parse_txt_dict(buffer, len(data)) assert buffer.tell() == len(data) assert txt_dict == {"foo": b"\xCA\xFE" * 100} @pytest.mark.parametrize( "record_type,data,expected", [ (dns.QueryType.A, b"\x0A\x00\x00\x2A", "10.0.0.42"), (dns.QueryType.PTR, b"\x03foo\x07example\x03com\x00", "foo.example.com"), (dns.QueryType.TXT, b"\x07foo=bar", {"foo": b"bar"}), ( dns.QueryType.SRV, b"\x00\x0A\x00\x00\x00\x50\x03foo\x07example\x03com\x00", { "priority": 10, "weight": 0, "port": 80, "target": "foo.example.com", }, ), ], # Use the name of the record type as the test id ids=( t.name for t in ( dns.QueryType.A, dns.QueryType.PTR, dns.QueryType.TXT, dns.QueryType.SRV, ) ), ) def test_parse_rdata( record_type: dns.QueryType, data: bytes, expected: typing.Any, ): with io.BytesIO(data) as buffer: assert record_type.parse_rdata(buffer, len(data)) == expected assert buffer.tell() == len(data)
mit
Ale-/civics
apps/models/migrations/0028_auto_20170924_1153.py
1
1318
# -*- coding: utf-8 -*- # Generated by Django 1.10.6 on 2017-09-24 11:53 from __future__ import unicode_literals import apps.models.utils from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('models', '0027_auto_20170922_1554'), ] operations = [ migrations.AlterField( model_name='event', name='image', field=models.ImageField(blank=True, help_text='Sube una imagen representativa del evento haciendo click en la imagen inferior. La imagen ha de tener ancho mínimo de 300 píxeles y máximo de 1920, y altura mínima de 300 píxeles y máxima de 1280. Formatos permitidos: PNG, JPG, JPEG.', upload_to=apps.models.utils.RenameCivicsImage('images/events/'), verbose_name='Imagen'), ), migrations.AlterField( model_name='initiative', name='image', field=models.ImageField(blank=True, help_text='Sube una imagen representativa de la iniciativa haciendo click en la imagen inferior. La imagen ha de tener ancho mínimo de 300 píxeles y máximo de 1920, y altura mínima de 300 píxeles y máxima de 1280. Formatos permitidos: PNG, JPG, JPEG.', upload_to=apps.models.utils.RenameCivicsImage('images/initiatives/'), verbose_name='Imagen'), ), ]
gpl-3.0
sajithaliyanage/Travel_SriLanka
node_modules/node-gyp/gyp/pylib/gyp/generator/ninja.py
1284
100329
# Copyright (c) 2013 Google Inc. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. import collections import copy import hashlib import json import multiprocessing import os.path import re import signal import subprocess import sys import gyp import gyp.common from gyp.common import OrderedSet import gyp.msvs_emulation import gyp.MSVSUtil as MSVSUtil import gyp.xcode_emulation from cStringIO import StringIO from gyp.common import GetEnvironFallback import gyp.ninja_syntax as ninja_syntax generator_default_variables = { 'EXECUTABLE_PREFIX': '', 'EXECUTABLE_SUFFIX': '', 'STATIC_LIB_PREFIX': 'lib', 'STATIC_LIB_SUFFIX': '.a', 'SHARED_LIB_PREFIX': 'lib', # Gyp expects the following variables to be expandable by the build # system to the appropriate locations. Ninja prefers paths to be # known at gyp time. To resolve this, introduce special # variables starting with $! and $| (which begin with a $ so gyp knows it # should be treated specially, but is otherwise an invalid # ninja/shell variable) that are passed to gyp here but expanded # before writing out into the target .ninja files; see # ExpandSpecial. # $! is used for variables that represent a path and that can only appear at # the start of a string, while $| is used for variables that can appear # anywhere in a string. 'INTERMEDIATE_DIR': '$!INTERMEDIATE_DIR', 'SHARED_INTERMEDIATE_DIR': '$!PRODUCT_DIR/gen', 'PRODUCT_DIR': '$!PRODUCT_DIR', 'CONFIGURATION_NAME': '$|CONFIGURATION_NAME', # Special variables that may be used by gyp 'rule' targets. # We generate definitions for these variables on the fly when processing a # rule. 'RULE_INPUT_ROOT': '${root}', 'RULE_INPUT_DIRNAME': '${dirname}', 'RULE_INPUT_PATH': '${source}', 'RULE_INPUT_EXT': '${ext}', 'RULE_INPUT_NAME': '${name}', } # Placates pylint. generator_additional_non_configuration_keys = [] generator_additional_path_sections = [] generator_extra_sources_for_rules = [] generator_filelist_paths = None generator_supports_multiple_toolsets = gyp.common.CrossCompileRequested() def StripPrefix(arg, prefix): if arg.startswith(prefix): return arg[len(prefix):] return arg def QuoteShellArgument(arg, flavor): """Quote a string such that it will be interpreted as a single argument by the shell.""" # Rather than attempting to enumerate the bad shell characters, just # whitelist common OK ones and quote anything else. if re.match(r'^[a-zA-Z0-9_=.\\/-]+$', arg): return arg # No quoting necessary. if flavor == 'win': return gyp.msvs_emulation.QuoteForRspFile(arg) return "'" + arg.replace("'", "'" + '"\'"' + "'") + "'" def Define(d, flavor): """Takes a preprocessor define and returns a -D parameter that's ninja- and shell-escaped.""" if flavor == 'win': # cl.exe replaces literal # characters with = in preprocesor definitions for # some reason. Octal-encode to work around that. d = d.replace('#', '\\%03o' % ord('#')) return QuoteShellArgument(ninja_syntax.escape('-D' + d), flavor) def AddArch(output, arch): """Adds an arch string to an output path.""" output, extension = os.path.splitext(output) return '%s.%s%s' % (output, arch, extension) class Target(object): """Target represents the paths used within a single gyp target. Conceptually, building a single target A is a series of steps: 1) actions/rules/copies generates source/resources/etc. 2) compiles generates .o files 3) link generates a binary (library/executable) 4) bundle merges the above in a mac bundle (Any of these steps can be optional.) From a build ordering perspective, a dependent target B could just depend on the last output of this series of steps. But some dependent commands sometimes need to reach inside the box. For example, when linking B it needs to get the path to the static library generated by A. This object stores those paths. To keep things simple, member variables only store concrete paths to single files, while methods compute derived values like "the last output of the target". """ def __init__(self, type): # Gyp type ("static_library", etc.) of this target. self.type = type # File representing whether any input dependencies necessary for # dependent actions have completed. self.preaction_stamp = None # File representing whether any input dependencies necessary for # dependent compiles have completed. self.precompile_stamp = None # File representing the completion of actions/rules/copies, if any. self.actions_stamp = None # Path to the output of the link step, if any. self.binary = None # Path to the file representing the completion of building the bundle, # if any. self.bundle = None # On Windows, incremental linking requires linking against all the .objs # that compose a .lib (rather than the .lib itself). That list is stored # here. In this case, we also need to save the compile_deps for the target, # so that the the target that directly depends on the .objs can also depend # on those. self.component_objs = None self.compile_deps = None # Windows only. The import .lib is the output of a build step, but # because dependents only link against the lib (not both the lib and the # dll) we keep track of the import library here. self.import_lib = None def Linkable(self): """Return true if this is a target that can be linked against.""" return self.type in ('static_library', 'shared_library') def UsesToc(self, flavor): """Return true if the target should produce a restat rule based on a TOC file.""" # For bundles, the .TOC should be produced for the binary, not for # FinalOutput(). But the naive approach would put the TOC file into the # bundle, so don't do this for bundles for now. if flavor == 'win' or self.bundle: return False return self.type in ('shared_library', 'loadable_module') def PreActionInput(self, flavor): """Return the path, if any, that should be used as a dependency of any dependent action step.""" if self.UsesToc(flavor): return self.FinalOutput() + '.TOC' return self.FinalOutput() or self.preaction_stamp def PreCompileInput(self): """Return the path, if any, that should be used as a dependency of any dependent compile step.""" return self.actions_stamp or self.precompile_stamp def FinalOutput(self): """Return the last output of the target, which depends on all prior steps.""" return self.bundle or self.binary or self.actions_stamp # A small discourse on paths as used within the Ninja build: # All files we produce (both at gyp and at build time) appear in the # build directory (e.g. out/Debug). # # Paths within a given .gyp file are always relative to the directory # containing the .gyp file. Call these "gyp paths". This includes # sources as well as the starting directory a given gyp rule/action # expects to be run from. We call the path from the source root to # the gyp file the "base directory" within the per-.gyp-file # NinjaWriter code. # # All paths as written into the .ninja files are relative to the build # directory. Call these paths "ninja paths". # # We translate between these two notions of paths with two helper # functions: # # - GypPathToNinja translates a gyp path (i.e. relative to the .gyp file) # into the equivalent ninja path. # # - GypPathToUniqueOutput translates a gyp path into a ninja path to write # an output file; the result can be namespaced such that it is unique # to the input file name as well as the output target name. class NinjaWriter(object): def __init__(self, hash_for_rules, target_outputs, base_dir, build_dir, output_file, toplevel_build, output_file_name, flavor, toplevel_dir=None): """ base_dir: path from source root to directory containing this gyp file, by gyp semantics, all input paths are relative to this build_dir: path from source root to build output toplevel_dir: path to the toplevel directory """ self.hash_for_rules = hash_for_rules self.target_outputs = target_outputs self.base_dir = base_dir self.build_dir = build_dir self.ninja = ninja_syntax.Writer(output_file) self.toplevel_build = toplevel_build self.output_file_name = output_file_name self.flavor = flavor self.abs_build_dir = None if toplevel_dir is not None: self.abs_build_dir = os.path.abspath(os.path.join(toplevel_dir, build_dir)) self.obj_ext = '.obj' if flavor == 'win' else '.o' if flavor == 'win': # See docstring of msvs_emulation.GenerateEnvironmentFiles(). self.win_env = {} for arch in ('x86', 'x64'): self.win_env[arch] = 'environment.' + arch # Relative path from build output dir to base dir. build_to_top = gyp.common.InvertRelativePath(build_dir, toplevel_dir) self.build_to_base = os.path.join(build_to_top, base_dir) # Relative path from base dir to build dir. base_to_top = gyp.common.InvertRelativePath(base_dir, toplevel_dir) self.base_to_build = os.path.join(base_to_top, build_dir) def ExpandSpecial(self, path, product_dir=None): """Expand specials like $!PRODUCT_DIR in |path|. If |product_dir| is None, assumes the cwd is already the product dir. Otherwise, |product_dir| is the relative path to the product dir. """ PRODUCT_DIR = '$!PRODUCT_DIR' if PRODUCT_DIR in path: if product_dir: path = path.replace(PRODUCT_DIR, product_dir) else: path = path.replace(PRODUCT_DIR + '/', '') path = path.replace(PRODUCT_DIR + '\\', '') path = path.replace(PRODUCT_DIR, '.') INTERMEDIATE_DIR = '$!INTERMEDIATE_DIR' if INTERMEDIATE_DIR in path: int_dir = self.GypPathToUniqueOutput('gen') # GypPathToUniqueOutput generates a path relative to the product dir, # so insert product_dir in front if it is provided. path = path.replace(INTERMEDIATE_DIR, os.path.join(product_dir or '', int_dir)) CONFIGURATION_NAME = '$|CONFIGURATION_NAME' path = path.replace(CONFIGURATION_NAME, self.config_name) return path def ExpandRuleVariables(self, path, root, dirname, source, ext, name): if self.flavor == 'win': path = self.msvs_settings.ConvertVSMacros( path, config=self.config_name) path = path.replace(generator_default_variables['RULE_INPUT_ROOT'], root) path = path.replace(generator_default_variables['RULE_INPUT_DIRNAME'], dirname) path = path.replace(generator_default_variables['RULE_INPUT_PATH'], source) path = path.replace(generator_default_variables['RULE_INPUT_EXT'], ext) path = path.replace(generator_default_variables['RULE_INPUT_NAME'], name) return path def GypPathToNinja(self, path, env=None): """Translate a gyp path to a ninja path, optionally expanding environment variable references in |path| with |env|. See the above discourse on path conversions.""" if env: if self.flavor == 'mac': path = gyp.xcode_emulation.ExpandEnvVars(path, env) elif self.flavor == 'win': path = gyp.msvs_emulation.ExpandMacros(path, env) if path.startswith('$!'): expanded = self.ExpandSpecial(path) if self.flavor == 'win': expanded = os.path.normpath(expanded) return expanded if '$|' in path: path = self.ExpandSpecial(path) assert '$' not in path, path return os.path.normpath(os.path.join(self.build_to_base, path)) def GypPathToUniqueOutput(self, path, qualified=True): """Translate a gyp path to a ninja path for writing output. If qualified is True, qualify the resulting filename with the name of the target. This is necessary when e.g. compiling the same path twice for two separate output targets. See the above discourse on path conversions.""" path = self.ExpandSpecial(path) assert not path.startswith('$'), path # Translate the path following this scheme: # Input: foo/bar.gyp, target targ, references baz/out.o # Output: obj/foo/baz/targ.out.o (if qualified) # obj/foo/baz/out.o (otherwise) # (and obj.host instead of obj for cross-compiles) # # Why this scheme and not some other one? # 1) for a given input, you can compute all derived outputs by matching # its path, even if the input is brought via a gyp file with '..'. # 2) simple files like libraries and stamps have a simple filename. obj = 'obj' if self.toolset != 'target': obj += '.' + self.toolset path_dir, path_basename = os.path.split(path) assert not os.path.isabs(path_dir), ( "'%s' can not be absolute path (see crbug.com/462153)." % path_dir) if qualified: path_basename = self.name + '.' + path_basename return os.path.normpath(os.path.join(obj, self.base_dir, path_dir, path_basename)) def WriteCollapsedDependencies(self, name, targets, order_only=None): """Given a list of targets, return a path for a single file representing the result of building all the targets or None. Uses a stamp file if necessary.""" assert targets == filter(None, targets), targets if len(targets) == 0: assert not order_only return None if len(targets) > 1 or order_only: stamp = self.GypPathToUniqueOutput(name + '.stamp') targets = self.ninja.build(stamp, 'stamp', targets, order_only=order_only) self.ninja.newline() return targets[0] def _SubninjaNameForArch(self, arch): output_file_base = os.path.splitext(self.output_file_name)[0] return '%s.%s.ninja' % (output_file_base, arch) def WriteSpec(self, spec, config_name, generator_flags): """The main entry point for NinjaWriter: write the build rules for a spec. Returns a Target object, which represents the output paths for this spec. Returns None if there are no outputs (e.g. a settings-only 'none' type target).""" self.config_name = config_name self.name = spec['target_name'] self.toolset = spec['toolset'] config = spec['configurations'][config_name] self.target = Target(spec['type']) self.is_standalone_static_library = bool( spec.get('standalone_static_library', 0)) # Track if this target contains any C++ files, to decide if gcc or g++ # should be used for linking. self.uses_cpp = False self.is_mac_bundle = gyp.xcode_emulation.IsMacBundle(self.flavor, spec) self.xcode_settings = self.msvs_settings = None if self.flavor == 'mac': self.xcode_settings = gyp.xcode_emulation.XcodeSettings(spec) if self.flavor == 'win': self.msvs_settings = gyp.msvs_emulation.MsvsSettings(spec, generator_flags) arch = self.msvs_settings.GetArch(config_name) self.ninja.variable('arch', self.win_env[arch]) self.ninja.variable('cc', '$cl_' + arch) self.ninja.variable('cxx', '$cl_' + arch) self.ninja.variable('cc_host', '$cl_' + arch) self.ninja.variable('cxx_host', '$cl_' + arch) self.ninja.variable('asm', '$ml_' + arch) if self.flavor == 'mac': self.archs = self.xcode_settings.GetActiveArchs(config_name) if len(self.archs) > 1: self.arch_subninjas = dict( (arch, ninja_syntax.Writer( OpenOutput(os.path.join(self.toplevel_build, self._SubninjaNameForArch(arch)), 'w'))) for arch in self.archs) # Compute predepends for all rules. # actions_depends is the dependencies this target depends on before running # any of its action/rule/copy steps. # compile_depends is the dependencies this target depends on before running # any of its compile steps. actions_depends = [] compile_depends = [] # TODO(evan): it is rather confusing which things are lists and which # are strings. Fix these. if 'dependencies' in spec: for dep in spec['dependencies']: if dep in self.target_outputs: target = self.target_outputs[dep] actions_depends.append(target.PreActionInput(self.flavor)) compile_depends.append(target.PreCompileInput()) actions_depends = filter(None, actions_depends) compile_depends = filter(None, compile_depends) actions_depends = self.WriteCollapsedDependencies('actions_depends', actions_depends) compile_depends = self.WriteCollapsedDependencies('compile_depends', compile_depends) self.target.preaction_stamp = actions_depends self.target.precompile_stamp = compile_depends # Write out actions, rules, and copies. These must happen before we # compile any sources, so compute a list of predependencies for sources # while we do it. extra_sources = [] mac_bundle_depends = [] self.target.actions_stamp = self.WriteActionsRulesCopies( spec, extra_sources, actions_depends, mac_bundle_depends) # If we have actions/rules/copies, we depend directly on those, but # otherwise we depend on dependent target's actions/rules/copies etc. # We never need to explicitly depend on previous target's link steps, # because no compile ever depends on them. compile_depends_stamp = (self.target.actions_stamp or compile_depends) # Write out the compilation steps, if any. link_deps = [] sources = extra_sources + spec.get('sources', []) if sources: if self.flavor == 'mac' and len(self.archs) > 1: # Write subninja file containing compile and link commands scoped to # a single arch if a fat binary is being built. for arch in self.archs: self.ninja.subninja(self._SubninjaNameForArch(arch)) pch = None if self.flavor == 'win': gyp.msvs_emulation.VerifyMissingSources( sources, self.abs_build_dir, generator_flags, self.GypPathToNinja) pch = gyp.msvs_emulation.PrecompiledHeader( self.msvs_settings, config_name, self.GypPathToNinja, self.GypPathToUniqueOutput, self.obj_ext) else: pch = gyp.xcode_emulation.MacPrefixHeader( self.xcode_settings, self.GypPathToNinja, lambda path, lang: self.GypPathToUniqueOutput(path + '-' + lang)) link_deps = self.WriteSources( self.ninja, config_name, config, sources, compile_depends_stamp, pch, spec) # Some actions/rules output 'sources' that are already object files. obj_outputs = [f for f in sources if f.endswith(self.obj_ext)] if obj_outputs: if self.flavor != 'mac' or len(self.archs) == 1: link_deps += [self.GypPathToNinja(o) for o in obj_outputs] else: print "Warning: Actions/rules writing object files don't work with " \ "multiarch targets, dropping. (target %s)" % spec['target_name'] elif self.flavor == 'mac' and len(self.archs) > 1: link_deps = collections.defaultdict(list) compile_deps = self.target.actions_stamp or actions_depends if self.flavor == 'win' and self.target.type == 'static_library': self.target.component_objs = link_deps self.target.compile_deps = compile_deps # Write out a link step, if needed. output = None is_empty_bundle = not link_deps and not mac_bundle_depends if link_deps or self.target.actions_stamp or actions_depends: output = self.WriteTarget(spec, config_name, config, link_deps, compile_deps) if self.is_mac_bundle: mac_bundle_depends.append(output) # Bundle all of the above together, if needed. if self.is_mac_bundle: output = self.WriteMacBundle(spec, mac_bundle_depends, is_empty_bundle) if not output: return None assert self.target.FinalOutput(), output return self.target def _WinIdlRule(self, source, prebuild, outputs): """Handle the implicit VS .idl rule for one source file. Fills |outputs| with files that are generated.""" outdir, output, vars, flags = self.msvs_settings.GetIdlBuildData( source, self.config_name) outdir = self.GypPathToNinja(outdir) def fix_path(path, rel=None): path = os.path.join(outdir, path) dirname, basename = os.path.split(source) root, ext = os.path.splitext(basename) path = self.ExpandRuleVariables( path, root, dirname, source, ext, basename) if rel: path = os.path.relpath(path, rel) return path vars = [(name, fix_path(value, outdir)) for name, value in vars] output = [fix_path(p) for p in output] vars.append(('outdir', outdir)) vars.append(('idlflags', flags)) input = self.GypPathToNinja(source) self.ninja.build(output, 'idl', input, variables=vars, order_only=prebuild) outputs.extend(output) def WriteWinIdlFiles(self, spec, prebuild): """Writes rules to match MSVS's implicit idl handling.""" assert self.flavor == 'win' if self.msvs_settings.HasExplicitIdlRulesOrActions(spec): return [] outputs = [] for source in filter(lambda x: x.endswith('.idl'), spec['sources']): self._WinIdlRule(source, prebuild, outputs) return outputs def WriteActionsRulesCopies(self, spec, extra_sources, prebuild, mac_bundle_depends): """Write out the Actions, Rules, and Copies steps. Return a path representing the outputs of these steps.""" outputs = [] if self.is_mac_bundle: mac_bundle_resources = spec.get('mac_bundle_resources', [])[:] else: mac_bundle_resources = [] extra_mac_bundle_resources = [] if 'actions' in spec: outputs += self.WriteActions(spec['actions'], extra_sources, prebuild, extra_mac_bundle_resources) if 'rules' in spec: outputs += self.WriteRules(spec['rules'], extra_sources, prebuild, mac_bundle_resources, extra_mac_bundle_resources) if 'copies' in spec: outputs += self.WriteCopies(spec['copies'], prebuild, mac_bundle_depends) if 'sources' in spec and self.flavor == 'win': outputs += self.WriteWinIdlFiles(spec, prebuild) stamp = self.WriteCollapsedDependencies('actions_rules_copies', outputs) if self.is_mac_bundle: xcassets = self.WriteMacBundleResources( extra_mac_bundle_resources + mac_bundle_resources, mac_bundle_depends) partial_info_plist = self.WriteMacXCassets(xcassets, mac_bundle_depends) self.WriteMacInfoPlist(partial_info_plist, mac_bundle_depends) return stamp def GenerateDescription(self, verb, message, fallback): """Generate and return a description of a build step. |verb| is the short summary, e.g. ACTION or RULE. |message| is a hand-written description, or None if not available. |fallback| is the gyp-level name of the step, usable as a fallback. """ if self.toolset != 'target': verb += '(%s)' % self.toolset if message: return '%s %s' % (verb, self.ExpandSpecial(message)) else: return '%s %s: %s' % (verb, self.name, fallback) def WriteActions(self, actions, extra_sources, prebuild, extra_mac_bundle_resources): # Actions cd into the base directory. env = self.GetToolchainEnv() all_outputs = [] for action in actions: # First write out a rule for the action. name = '%s_%s' % (action['action_name'], self.hash_for_rules) description = self.GenerateDescription('ACTION', action.get('message', None), name) is_cygwin = (self.msvs_settings.IsRuleRunUnderCygwin(action) if self.flavor == 'win' else False) args = action['action'] depfile = action.get('depfile', None) if depfile: depfile = self.ExpandSpecial(depfile, self.base_to_build) pool = 'console' if int(action.get('ninja_use_console', 0)) else None rule_name, _ = self.WriteNewNinjaRule(name, args, description, is_cygwin, env, pool, depfile=depfile) inputs = [self.GypPathToNinja(i, env) for i in action['inputs']] if int(action.get('process_outputs_as_sources', False)): extra_sources += action['outputs'] if int(action.get('process_outputs_as_mac_bundle_resources', False)): extra_mac_bundle_resources += action['outputs'] outputs = [self.GypPathToNinja(o, env) for o in action['outputs']] # Then write out an edge using the rule. self.ninja.build(outputs, rule_name, inputs, order_only=prebuild) all_outputs += outputs self.ninja.newline() return all_outputs def WriteRules(self, rules, extra_sources, prebuild, mac_bundle_resources, extra_mac_bundle_resources): env = self.GetToolchainEnv() all_outputs = [] for rule in rules: # Skip a rule with no action and no inputs. if 'action' not in rule and not rule.get('rule_sources', []): continue # First write out a rule for the rule action. name = '%s_%s' % (rule['rule_name'], self.hash_for_rules) args = rule['action'] description = self.GenerateDescription( 'RULE', rule.get('message', None), ('%s ' + generator_default_variables['RULE_INPUT_PATH']) % name) is_cygwin = (self.msvs_settings.IsRuleRunUnderCygwin(rule) if self.flavor == 'win' else False) pool = 'console' if int(rule.get('ninja_use_console', 0)) else None rule_name, args = self.WriteNewNinjaRule( name, args, description, is_cygwin, env, pool) # TODO: if the command references the outputs directly, we should # simplify it to just use $out. # Rules can potentially make use of some special variables which # must vary per source file. # Compute the list of variables we'll need to provide. special_locals = ('source', 'root', 'dirname', 'ext', 'name') needed_variables = set(['source']) for argument in args: for var in special_locals: if '${%s}' % var in argument: needed_variables.add(var) def cygwin_munge(path): # pylint: disable=cell-var-from-loop if is_cygwin: return path.replace('\\', '/') return path inputs = [self.GypPathToNinja(i, env) for i in rule.get('inputs', [])] # If there are n source files matching the rule, and m additional rule # inputs, then adding 'inputs' to each build edge written below will # write m * n inputs. Collapsing reduces this to m + n. sources = rule.get('rule_sources', []) num_inputs = len(inputs) if prebuild: num_inputs += 1 if num_inputs > 2 and len(sources) > 2: inputs = [self.WriteCollapsedDependencies( rule['rule_name'], inputs, order_only=prebuild)] prebuild = [] # For each source file, write an edge that generates all the outputs. for source in sources: source = os.path.normpath(source) dirname, basename = os.path.split(source) root, ext = os.path.splitext(basename) # Gather the list of inputs and outputs, expanding $vars if possible. outputs = [self.ExpandRuleVariables(o, root, dirname, source, ext, basename) for o in rule['outputs']] if int(rule.get('process_outputs_as_sources', False)): extra_sources += outputs was_mac_bundle_resource = source in mac_bundle_resources if was_mac_bundle_resource or \ int(rule.get('process_outputs_as_mac_bundle_resources', False)): extra_mac_bundle_resources += outputs # Note: This is n_resources * n_outputs_in_rule. Put to-be-removed # items in a set and remove them all in a single pass if this becomes # a performance issue. if was_mac_bundle_resource: mac_bundle_resources.remove(source) extra_bindings = [] for var in needed_variables: if var == 'root': extra_bindings.append(('root', cygwin_munge(root))) elif var == 'dirname': # '$dirname' is a parameter to the rule action, which means # it shouldn't be converted to a Ninja path. But we don't # want $!PRODUCT_DIR in there either. dirname_expanded = self.ExpandSpecial(dirname, self.base_to_build) extra_bindings.append(('dirname', cygwin_munge(dirname_expanded))) elif var == 'source': # '$source' is a parameter to the rule action, which means # it shouldn't be converted to a Ninja path. But we don't # want $!PRODUCT_DIR in there either. source_expanded = self.ExpandSpecial(source, self.base_to_build) extra_bindings.append(('source', cygwin_munge(source_expanded))) elif var == 'ext': extra_bindings.append(('ext', ext)) elif var == 'name': extra_bindings.append(('name', cygwin_munge(basename))) else: assert var == None, repr(var) outputs = [self.GypPathToNinja(o, env) for o in outputs] if self.flavor == 'win': # WriteNewNinjaRule uses unique_name for creating an rsp file on win. extra_bindings.append(('unique_name', hashlib.md5(outputs[0]).hexdigest())) self.ninja.build(outputs, rule_name, self.GypPathToNinja(source), implicit=inputs, order_only=prebuild, variables=extra_bindings) all_outputs.extend(outputs) return all_outputs def WriteCopies(self, copies, prebuild, mac_bundle_depends): outputs = [] env = self.GetToolchainEnv() for copy in copies: for path in copy['files']: # Normalize the path so trailing slashes don't confuse us. path = os.path.normpath(path) basename = os.path.split(path)[1] src = self.GypPathToNinja(path, env) dst = self.GypPathToNinja(os.path.join(copy['destination'], basename), env) outputs += self.ninja.build(dst, 'copy', src, order_only=prebuild) if self.is_mac_bundle: # gyp has mac_bundle_resources to copy things into a bundle's # Resources folder, but there's no built-in way to copy files to other # places in the bundle. Hence, some targets use copies for this. Check # if this file is copied into the current bundle, and if so add it to # the bundle depends so that dependent targets get rebuilt if the copy # input changes. if dst.startswith(self.xcode_settings.GetBundleContentsFolderPath()): mac_bundle_depends.append(dst) return outputs def WriteMacBundleResources(self, resources, bundle_depends): """Writes ninja edges for 'mac_bundle_resources'.""" xcassets = [] for output, res in gyp.xcode_emulation.GetMacBundleResources( generator_default_variables['PRODUCT_DIR'], self.xcode_settings, map(self.GypPathToNinja, resources)): output = self.ExpandSpecial(output) if os.path.splitext(output)[-1] != '.xcassets': isBinary = self.xcode_settings.IsBinaryOutputFormat(self.config_name) self.ninja.build(output, 'mac_tool', res, variables=[('mactool_cmd', 'copy-bundle-resource'), \ ('binary', isBinary)]) bundle_depends.append(output) else: xcassets.append(res) return xcassets def WriteMacXCassets(self, xcassets, bundle_depends): """Writes ninja edges for 'mac_bundle_resources' .xcassets files. This add an invocation of 'actool' via the 'mac_tool.py' helper script. It assumes that the assets catalogs define at least one imageset and thus an Assets.car file will be generated in the application resources directory. If this is not the case, then the build will probably be done at each invocation of ninja.""" if not xcassets: return extra_arguments = {} settings_to_arg = { 'XCASSETS_APP_ICON': 'app-icon', 'XCASSETS_LAUNCH_IMAGE': 'launch-image', } settings = self.xcode_settings.xcode_settings[self.config_name] for settings_key, arg_name in settings_to_arg.iteritems(): value = settings.get(settings_key) if value: extra_arguments[arg_name] = value partial_info_plist = None if extra_arguments: partial_info_plist = self.GypPathToUniqueOutput( 'assetcatalog_generated_info.plist') extra_arguments['output-partial-info-plist'] = partial_info_plist outputs = [] outputs.append( os.path.join( self.xcode_settings.GetBundleResourceFolder(), 'Assets.car')) if partial_info_plist: outputs.append(partial_info_plist) keys = QuoteShellArgument(json.dumps(extra_arguments), self.flavor) extra_env = self.xcode_settings.GetPerTargetSettings() env = self.GetSortedXcodeEnv(additional_settings=extra_env) env = self.ComputeExportEnvString(env) bundle_depends.extend(self.ninja.build( outputs, 'compile_xcassets', xcassets, variables=[('env', env), ('keys', keys)])) return partial_info_plist def WriteMacInfoPlist(self, partial_info_plist, bundle_depends): """Write build rules for bundle Info.plist files.""" info_plist, out, defines, extra_env = gyp.xcode_emulation.GetMacInfoPlist( generator_default_variables['PRODUCT_DIR'], self.xcode_settings, self.GypPathToNinja) if not info_plist: return out = self.ExpandSpecial(out) if defines: # Create an intermediate file to store preprocessed results. intermediate_plist = self.GypPathToUniqueOutput( os.path.basename(info_plist)) defines = ' '.join([Define(d, self.flavor) for d in defines]) info_plist = self.ninja.build( intermediate_plist, 'preprocess_infoplist', info_plist, variables=[('defines',defines)]) env = self.GetSortedXcodeEnv(additional_settings=extra_env) env = self.ComputeExportEnvString(env) if partial_info_plist: intermediate_plist = self.GypPathToUniqueOutput('merged_info.plist') info_plist = self.ninja.build( intermediate_plist, 'merge_infoplist', [partial_info_plist, info_plist]) keys = self.xcode_settings.GetExtraPlistItems(self.config_name) keys = QuoteShellArgument(json.dumps(keys), self.flavor) isBinary = self.xcode_settings.IsBinaryOutputFormat(self.config_name) self.ninja.build(out, 'copy_infoplist', info_plist, variables=[('env', env), ('keys', keys), ('binary', isBinary)]) bundle_depends.append(out) def WriteSources(self, ninja_file, config_name, config, sources, predepends, precompiled_header, spec): """Write build rules to compile all of |sources|.""" if self.toolset == 'host': self.ninja.variable('ar', '$ar_host') self.ninja.variable('cc', '$cc_host') self.ninja.variable('cxx', '$cxx_host') self.ninja.variable('ld', '$ld_host') self.ninja.variable('ldxx', '$ldxx_host') self.ninja.variable('nm', '$nm_host') self.ninja.variable('readelf', '$readelf_host') if self.flavor != 'mac' or len(self.archs) == 1: return self.WriteSourcesForArch( self.ninja, config_name, config, sources, predepends, precompiled_header, spec) else: return dict((arch, self.WriteSourcesForArch( self.arch_subninjas[arch], config_name, config, sources, predepends, precompiled_header, spec, arch=arch)) for arch in self.archs) def WriteSourcesForArch(self, ninja_file, config_name, config, sources, predepends, precompiled_header, spec, arch=None): """Write build rules to compile all of |sources|.""" extra_defines = [] if self.flavor == 'mac': cflags = self.xcode_settings.GetCflags(config_name, arch=arch) cflags_c = self.xcode_settings.GetCflagsC(config_name) cflags_cc = self.xcode_settings.GetCflagsCC(config_name) cflags_objc = ['$cflags_c'] + \ self.xcode_settings.GetCflagsObjC(config_name) cflags_objcc = ['$cflags_cc'] + \ self.xcode_settings.GetCflagsObjCC(config_name) elif self.flavor == 'win': asmflags = self.msvs_settings.GetAsmflags(config_name) cflags = self.msvs_settings.GetCflags(config_name) cflags_c = self.msvs_settings.GetCflagsC(config_name) cflags_cc = self.msvs_settings.GetCflagsCC(config_name) extra_defines = self.msvs_settings.GetComputedDefines(config_name) # See comment at cc_command for why there's two .pdb files. pdbpath_c = pdbpath_cc = self.msvs_settings.GetCompilerPdbName( config_name, self.ExpandSpecial) if not pdbpath_c: obj = 'obj' if self.toolset != 'target': obj += '.' + self.toolset pdbpath = os.path.normpath(os.path.join(obj, self.base_dir, self.name)) pdbpath_c = pdbpath + '.c.pdb' pdbpath_cc = pdbpath + '.cc.pdb' self.WriteVariableList(ninja_file, 'pdbname_c', [pdbpath_c]) self.WriteVariableList(ninja_file, 'pdbname_cc', [pdbpath_cc]) self.WriteVariableList(ninja_file, 'pchprefix', [self.name]) else: cflags = config.get('cflags', []) cflags_c = config.get('cflags_c', []) cflags_cc = config.get('cflags_cc', []) # Respect environment variables related to build, but target-specific # flags can still override them. if self.toolset == 'target': cflags_c = (os.environ.get('CPPFLAGS', '').split() + os.environ.get('CFLAGS', '').split() + cflags_c) cflags_cc = (os.environ.get('CPPFLAGS', '').split() + os.environ.get('CXXFLAGS', '').split() + cflags_cc) elif self.toolset == 'host': cflags_c = (os.environ.get('CPPFLAGS_host', '').split() + os.environ.get('CFLAGS_host', '').split() + cflags_c) cflags_cc = (os.environ.get('CPPFLAGS_host', '').split() + os.environ.get('CXXFLAGS_host', '').split() + cflags_cc) defines = config.get('defines', []) + extra_defines self.WriteVariableList(ninja_file, 'defines', [Define(d, self.flavor) for d in defines]) if self.flavor == 'win': self.WriteVariableList(ninja_file, 'asmflags', map(self.ExpandSpecial, asmflags)) self.WriteVariableList(ninja_file, 'rcflags', [QuoteShellArgument(self.ExpandSpecial(f), self.flavor) for f in self.msvs_settings.GetRcflags(config_name, self.GypPathToNinja)]) include_dirs = config.get('include_dirs', []) env = self.GetToolchainEnv() if self.flavor == 'win': include_dirs = self.msvs_settings.AdjustIncludeDirs(include_dirs, config_name) self.WriteVariableList(ninja_file, 'includes', [QuoteShellArgument('-I' + self.GypPathToNinja(i, env), self.flavor) for i in include_dirs]) if self.flavor == 'win': midl_include_dirs = config.get('midl_include_dirs', []) midl_include_dirs = self.msvs_settings.AdjustMidlIncludeDirs( midl_include_dirs, config_name) self.WriteVariableList(ninja_file, 'midl_includes', [QuoteShellArgument('-I' + self.GypPathToNinja(i, env), self.flavor) for i in midl_include_dirs]) pch_commands = precompiled_header.GetPchBuildCommands(arch) if self.flavor == 'mac': # Most targets use no precompiled headers, so only write these if needed. for ext, var in [('c', 'cflags_pch_c'), ('cc', 'cflags_pch_cc'), ('m', 'cflags_pch_objc'), ('mm', 'cflags_pch_objcc')]: include = precompiled_header.GetInclude(ext, arch) if include: ninja_file.variable(var, include) arflags = config.get('arflags', []) self.WriteVariableList(ninja_file, 'cflags', map(self.ExpandSpecial, cflags)) self.WriteVariableList(ninja_file, 'cflags_c', map(self.ExpandSpecial, cflags_c)) self.WriteVariableList(ninja_file, 'cflags_cc', map(self.ExpandSpecial, cflags_cc)) if self.flavor == 'mac': self.WriteVariableList(ninja_file, 'cflags_objc', map(self.ExpandSpecial, cflags_objc)) self.WriteVariableList(ninja_file, 'cflags_objcc', map(self.ExpandSpecial, cflags_objcc)) self.WriteVariableList(ninja_file, 'arflags', map(self.ExpandSpecial, arflags)) ninja_file.newline() outputs = [] has_rc_source = False for source in sources: filename, ext = os.path.splitext(source) ext = ext[1:] obj_ext = self.obj_ext if ext in ('cc', 'cpp', 'cxx'): command = 'cxx' self.uses_cpp = True elif ext == 'c' or (ext == 'S' and self.flavor != 'win'): command = 'cc' elif ext == 's' and self.flavor != 'win': # Doesn't generate .o.d files. command = 'cc_s' elif (self.flavor == 'win' and ext == 'asm' and not self.msvs_settings.HasExplicitAsmRules(spec)): command = 'asm' # Add the _asm suffix as msvs is capable of handling .cc and # .asm files of the same name without collision. obj_ext = '_asm.obj' elif self.flavor == 'mac' and ext == 'm': command = 'objc' elif self.flavor == 'mac' and ext == 'mm': command = 'objcxx' self.uses_cpp = True elif self.flavor == 'win' and ext == 'rc': command = 'rc' obj_ext = '.res' has_rc_source = True else: # Ignore unhandled extensions. continue input = self.GypPathToNinja(source) output = self.GypPathToUniqueOutput(filename + obj_ext) if arch is not None: output = AddArch(output, arch) implicit = precompiled_header.GetObjDependencies([input], [output], arch) variables = [] if self.flavor == 'win': variables, output, implicit = precompiled_header.GetFlagsModifications( input, output, implicit, command, cflags_c, cflags_cc, self.ExpandSpecial) ninja_file.build(output, command, input, implicit=[gch for _, _, gch in implicit], order_only=predepends, variables=variables) outputs.append(output) if has_rc_source: resource_include_dirs = config.get('resource_include_dirs', include_dirs) self.WriteVariableList(ninja_file, 'resource_includes', [QuoteShellArgument('-I' + self.GypPathToNinja(i, env), self.flavor) for i in resource_include_dirs]) self.WritePchTargets(ninja_file, pch_commands) ninja_file.newline() return outputs def WritePchTargets(self, ninja_file, pch_commands): """Writes ninja rules to compile prefix headers.""" if not pch_commands: return for gch, lang_flag, lang, input in pch_commands: var_name = { 'c': 'cflags_pch_c', 'cc': 'cflags_pch_cc', 'm': 'cflags_pch_objc', 'mm': 'cflags_pch_objcc', }[lang] map = { 'c': 'cc', 'cc': 'cxx', 'm': 'objc', 'mm': 'objcxx', } cmd = map.get(lang) ninja_file.build(gch, cmd, input, variables=[(var_name, lang_flag)]) def WriteLink(self, spec, config_name, config, link_deps): """Write out a link step. Fills out target.binary. """ if self.flavor != 'mac' or len(self.archs) == 1: return self.WriteLinkForArch( self.ninja, spec, config_name, config, link_deps) else: output = self.ComputeOutput(spec) inputs = [self.WriteLinkForArch(self.arch_subninjas[arch], spec, config_name, config, link_deps[arch], arch=arch) for arch in self.archs] extra_bindings = [] build_output = output if not self.is_mac_bundle: self.AppendPostbuildVariable(extra_bindings, spec, output, output) # TODO(yyanagisawa): more work needed to fix: # https://code.google.com/p/gyp/issues/detail?id=411 if (spec['type'] in ('shared_library', 'loadable_module') and not self.is_mac_bundle): extra_bindings.append(('lib', output)) self.ninja.build([output, output + '.TOC'], 'solipo', inputs, variables=extra_bindings) else: self.ninja.build(build_output, 'lipo', inputs, variables=extra_bindings) return output def WriteLinkForArch(self, ninja_file, spec, config_name, config, link_deps, arch=None): """Write out a link step. Fills out target.binary. """ command = { 'executable': 'link', 'loadable_module': 'solink_module', 'shared_library': 'solink', }[spec['type']] command_suffix = '' implicit_deps = set() solibs = set() order_deps = set() if 'dependencies' in spec: # Two kinds of dependencies: # - Linkable dependencies (like a .a or a .so): add them to the link line. # - Non-linkable dependencies (like a rule that generates a file # and writes a stamp file): add them to implicit_deps extra_link_deps = set() for dep in spec['dependencies']: target = self.target_outputs.get(dep) if not target: continue linkable = target.Linkable() if linkable: new_deps = [] if (self.flavor == 'win' and target.component_objs and self.msvs_settings.IsUseLibraryDependencyInputs(config_name)): new_deps = target.component_objs if target.compile_deps: order_deps.add(target.compile_deps) elif self.flavor == 'win' and target.import_lib: new_deps = [target.import_lib] elif target.UsesToc(self.flavor): solibs.add(target.binary) implicit_deps.add(target.binary + '.TOC') else: new_deps = [target.binary] for new_dep in new_deps: if new_dep not in extra_link_deps: extra_link_deps.add(new_dep) link_deps.append(new_dep) final_output = target.FinalOutput() if not linkable or final_output != target.binary: implicit_deps.add(final_output) extra_bindings = [] if self.uses_cpp and self.flavor != 'win': extra_bindings.append(('ld', '$ldxx')) output = self.ComputeOutput(spec, arch) if arch is None and not self.is_mac_bundle: self.AppendPostbuildVariable(extra_bindings, spec, output, output) is_executable = spec['type'] == 'executable' # The ldflags config key is not used on mac or win. On those platforms # linker flags are set via xcode_settings and msvs_settings, respectively. env_ldflags = os.environ.get('LDFLAGS', '').split() if self.flavor == 'mac': ldflags = self.xcode_settings.GetLdflags(config_name, self.ExpandSpecial(generator_default_variables['PRODUCT_DIR']), self.GypPathToNinja, arch) ldflags = env_ldflags + ldflags elif self.flavor == 'win': manifest_base_name = self.GypPathToUniqueOutput( self.ComputeOutputFileName(spec)) ldflags, intermediate_manifest, manifest_files = \ self.msvs_settings.GetLdflags(config_name, self.GypPathToNinja, self.ExpandSpecial, manifest_base_name, output, is_executable, self.toplevel_build) ldflags = env_ldflags + ldflags self.WriteVariableList(ninja_file, 'manifests', manifest_files) implicit_deps = implicit_deps.union(manifest_files) if intermediate_manifest: self.WriteVariableList( ninja_file, 'intermediatemanifest', [intermediate_manifest]) command_suffix = _GetWinLinkRuleNameSuffix( self.msvs_settings.IsEmbedManifest(config_name)) def_file = self.msvs_settings.GetDefFile(self.GypPathToNinja) if def_file: implicit_deps.add(def_file) else: # Respect environment variables related to build, but target-specific # flags can still override them. ldflags = env_ldflags + config.get('ldflags', []) if is_executable and len(solibs): rpath = 'lib/' if self.toolset != 'target': rpath += self.toolset ldflags.append(r'-Wl,-rpath=\$$ORIGIN/%s' % rpath) ldflags.append('-Wl,-rpath-link=%s' % rpath) self.WriteVariableList(ninja_file, 'ldflags', map(self.ExpandSpecial, ldflags)) library_dirs = config.get('library_dirs', []) if self.flavor == 'win': library_dirs = [self.msvs_settings.ConvertVSMacros(l, config_name) for l in library_dirs] library_dirs = ['/LIBPATH:' + QuoteShellArgument(self.GypPathToNinja(l), self.flavor) for l in library_dirs] else: library_dirs = [QuoteShellArgument('-L' + self.GypPathToNinja(l), self.flavor) for l in library_dirs] libraries = gyp.common.uniquer(map(self.ExpandSpecial, spec.get('libraries', []))) if self.flavor == 'mac': libraries = self.xcode_settings.AdjustLibraries(libraries, config_name) elif self.flavor == 'win': libraries = self.msvs_settings.AdjustLibraries(libraries) self.WriteVariableList(ninja_file, 'libs', library_dirs + libraries) linked_binary = output if command in ('solink', 'solink_module'): extra_bindings.append(('soname', os.path.split(output)[1])) extra_bindings.append(('lib', gyp.common.EncodePOSIXShellArgument(output))) if self.flavor != 'win': link_file_list = output if self.is_mac_bundle: # 'Dependency Framework.framework/Versions/A/Dependency Framework' -> # 'Dependency Framework.framework.rsp' link_file_list = self.xcode_settings.GetWrapperName() if arch: link_file_list += '.' + arch link_file_list += '.rsp' # If an rspfile contains spaces, ninja surrounds the filename with # quotes around it and then passes it to open(), creating a file with # quotes in its name (and when looking for the rsp file, the name # makes it through bash which strips the quotes) :-/ link_file_list = link_file_list.replace(' ', '_') extra_bindings.append( ('link_file_list', gyp.common.EncodePOSIXShellArgument(link_file_list))) if self.flavor == 'win': extra_bindings.append(('binary', output)) if ('/NOENTRY' not in ldflags and not self.msvs_settings.GetNoImportLibrary(config_name)): self.target.import_lib = output + '.lib' extra_bindings.append(('implibflag', '/IMPLIB:%s' % self.target.import_lib)) pdbname = self.msvs_settings.GetPDBName( config_name, self.ExpandSpecial, output + '.pdb') output = [output, self.target.import_lib] if pdbname: output.append(pdbname) elif not self.is_mac_bundle: output = [output, output + '.TOC'] else: command = command + '_notoc' elif self.flavor == 'win': extra_bindings.append(('binary', output)) pdbname = self.msvs_settings.GetPDBName( config_name, self.ExpandSpecial, output + '.pdb') if pdbname: output = [output, pdbname] if len(solibs): extra_bindings.append(('solibs', gyp.common.EncodePOSIXShellList(solibs))) ninja_file.build(output, command + command_suffix, link_deps, implicit=list(implicit_deps), order_only=list(order_deps), variables=extra_bindings) return linked_binary def WriteTarget(self, spec, config_name, config, link_deps, compile_deps): extra_link_deps = any(self.target_outputs.get(dep).Linkable() for dep in spec.get('dependencies', []) if dep in self.target_outputs) if spec['type'] == 'none' or (not link_deps and not extra_link_deps): # TODO(evan): don't call this function for 'none' target types, as # it doesn't do anything, and we fake out a 'binary' with a stamp file. self.target.binary = compile_deps self.target.type = 'none' elif spec['type'] == 'static_library': self.target.binary = self.ComputeOutput(spec) if (self.flavor not in ('mac', 'openbsd', 'netbsd', 'win') and not self.is_standalone_static_library): self.ninja.build(self.target.binary, 'alink_thin', link_deps, order_only=compile_deps) else: variables = [] if self.xcode_settings: libtool_flags = self.xcode_settings.GetLibtoolflags(config_name) if libtool_flags: variables.append(('libtool_flags', libtool_flags)) if self.msvs_settings: libflags = self.msvs_settings.GetLibFlags(config_name, self.GypPathToNinja) variables.append(('libflags', libflags)) if self.flavor != 'mac' or len(self.archs) == 1: self.AppendPostbuildVariable(variables, spec, self.target.binary, self.target.binary) self.ninja.build(self.target.binary, 'alink', link_deps, order_only=compile_deps, variables=variables) else: inputs = [] for arch in self.archs: output = self.ComputeOutput(spec, arch) self.arch_subninjas[arch].build(output, 'alink', link_deps[arch], order_only=compile_deps, variables=variables) inputs.append(output) # TODO: It's not clear if libtool_flags should be passed to the alink # call that combines single-arch .a files into a fat .a file. self.AppendPostbuildVariable(variables, spec, self.target.binary, self.target.binary) self.ninja.build(self.target.binary, 'alink', inputs, # FIXME: test proving order_only=compile_deps isn't # needed. variables=variables) else: self.target.binary = self.WriteLink(spec, config_name, config, link_deps) return self.target.binary def WriteMacBundle(self, spec, mac_bundle_depends, is_empty): assert self.is_mac_bundle package_framework = spec['type'] in ('shared_library', 'loadable_module') output = self.ComputeMacBundleOutput() if is_empty: output += '.stamp' variables = [] self.AppendPostbuildVariable(variables, spec, output, self.target.binary, is_command_start=not package_framework) if package_framework and not is_empty: variables.append(('version', self.xcode_settings.GetFrameworkVersion())) self.ninja.build(output, 'package_framework', mac_bundle_depends, variables=variables) else: self.ninja.build(output, 'stamp', mac_bundle_depends, variables=variables) self.target.bundle = output return output def GetToolchainEnv(self, additional_settings=None): """Returns the variables toolchain would set for build steps.""" env = self.GetSortedXcodeEnv(additional_settings=additional_settings) if self.flavor == 'win': env = self.GetMsvsToolchainEnv( additional_settings=additional_settings) return env def GetMsvsToolchainEnv(self, additional_settings=None): """Returns the variables Visual Studio would set for build steps.""" return self.msvs_settings.GetVSMacroEnv('$!PRODUCT_DIR', config=self.config_name) def GetSortedXcodeEnv(self, additional_settings=None): """Returns the variables Xcode would set for build steps.""" assert self.abs_build_dir abs_build_dir = self.abs_build_dir return gyp.xcode_emulation.GetSortedXcodeEnv( self.xcode_settings, abs_build_dir, os.path.join(abs_build_dir, self.build_to_base), self.config_name, additional_settings) def GetSortedXcodePostbuildEnv(self): """Returns the variables Xcode would set for postbuild steps.""" postbuild_settings = {} # CHROMIUM_STRIP_SAVE_FILE is a chromium-specific hack. # TODO(thakis): It would be nice to have some general mechanism instead. strip_save_file = self.xcode_settings.GetPerTargetSetting( 'CHROMIUM_STRIP_SAVE_FILE') if strip_save_file: postbuild_settings['CHROMIUM_STRIP_SAVE_FILE'] = strip_save_file return self.GetSortedXcodeEnv(additional_settings=postbuild_settings) def AppendPostbuildVariable(self, variables, spec, output, binary, is_command_start=False): """Adds a 'postbuild' variable if there is a postbuild for |output|.""" postbuild = self.GetPostbuildCommand(spec, output, binary, is_command_start) if postbuild: variables.append(('postbuilds', postbuild)) def GetPostbuildCommand(self, spec, output, output_binary, is_command_start): """Returns a shell command that runs all the postbuilds, and removes |output| if any of them fails. If |is_command_start| is False, then the returned string will start with ' && '.""" if not self.xcode_settings or spec['type'] == 'none' or not output: return '' output = QuoteShellArgument(output, self.flavor) postbuilds = gyp.xcode_emulation.GetSpecPostbuildCommands(spec, quiet=True) if output_binary is not None: postbuilds = self.xcode_settings.AddImplicitPostbuilds( self.config_name, os.path.normpath(os.path.join(self.base_to_build, output)), QuoteShellArgument( os.path.normpath(os.path.join(self.base_to_build, output_binary)), self.flavor), postbuilds, quiet=True) if not postbuilds: return '' # Postbuilds expect to be run in the gyp file's directory, so insert an # implicit postbuild to cd to there. postbuilds.insert(0, gyp.common.EncodePOSIXShellList( ['cd', self.build_to_base])) env = self.ComputeExportEnvString(self.GetSortedXcodePostbuildEnv()) # G will be non-null if any postbuild fails. Run all postbuilds in a # subshell. commands = env + ' (' + \ ' && '.join([ninja_syntax.escape(command) for command in postbuilds]) command_string = (commands + '); G=$$?; ' # Remove the final output if any postbuild failed. '((exit $$G) || rm -rf %s) ' % output + '&& exit $$G)') if is_command_start: return '(' + command_string + ' && ' else: return '$ && (' + command_string def ComputeExportEnvString(self, env): """Given an environment, returns a string looking like 'export FOO=foo; export BAR="${FOO} bar;' that exports |env| to the shell.""" export_str = [] for k, v in env: export_str.append('export %s=%s;' % (k, ninja_syntax.escape(gyp.common.EncodePOSIXShellArgument(v)))) return ' '.join(export_str) def ComputeMacBundleOutput(self): """Return the 'output' (full output path) to a bundle output directory.""" assert self.is_mac_bundle path = generator_default_variables['PRODUCT_DIR'] return self.ExpandSpecial( os.path.join(path, self.xcode_settings.GetWrapperName())) def ComputeOutputFileName(self, spec, type=None): """Compute the filename of the final output for the current target.""" if not type: type = spec['type'] default_variables = copy.copy(generator_default_variables) CalculateVariables(default_variables, {'flavor': self.flavor}) # Compute filename prefix: the product prefix, or a default for # the product type. DEFAULT_PREFIX = { 'loadable_module': default_variables['SHARED_LIB_PREFIX'], 'shared_library': default_variables['SHARED_LIB_PREFIX'], 'static_library': default_variables['STATIC_LIB_PREFIX'], 'executable': default_variables['EXECUTABLE_PREFIX'], } prefix = spec.get('product_prefix', DEFAULT_PREFIX.get(type, '')) # Compute filename extension: the product extension, or a default # for the product type. DEFAULT_EXTENSION = { 'loadable_module': default_variables['SHARED_LIB_SUFFIX'], 'shared_library': default_variables['SHARED_LIB_SUFFIX'], 'static_library': default_variables['STATIC_LIB_SUFFIX'], 'executable': default_variables['EXECUTABLE_SUFFIX'], } extension = spec.get('product_extension') if extension: extension = '.' + extension else: extension = DEFAULT_EXTENSION.get(type, '') if 'product_name' in spec: # If we were given an explicit name, use that. target = spec['product_name'] else: # Otherwise, derive a name from the target name. target = spec['target_name'] if prefix == 'lib': # Snip out an extra 'lib' from libs if appropriate. target = StripPrefix(target, 'lib') if type in ('static_library', 'loadable_module', 'shared_library', 'executable'): return '%s%s%s' % (prefix, target, extension) elif type == 'none': return '%s.stamp' % target else: raise Exception('Unhandled output type %s' % type) def ComputeOutput(self, spec, arch=None): """Compute the path for the final output of the spec.""" type = spec['type'] if self.flavor == 'win': override = self.msvs_settings.GetOutputName(self.config_name, self.ExpandSpecial) if override: return override if arch is None and self.flavor == 'mac' and type in ( 'static_library', 'executable', 'shared_library', 'loadable_module'): filename = self.xcode_settings.GetExecutablePath() else: filename = self.ComputeOutputFileName(spec, type) if arch is None and 'product_dir' in spec: path = os.path.join(spec['product_dir'], filename) return self.ExpandSpecial(path) # Some products go into the output root, libraries go into shared library # dir, and everything else goes into the normal place. type_in_output_root = ['executable', 'loadable_module'] if self.flavor == 'mac' and self.toolset == 'target': type_in_output_root += ['shared_library', 'static_library'] elif self.flavor == 'win' and self.toolset == 'target': type_in_output_root += ['shared_library'] if arch is not None: # Make sure partial executables don't end up in a bundle or the regular # output directory. archdir = 'arch' if self.toolset != 'target': archdir = os.path.join('arch', '%s' % self.toolset) return os.path.join(archdir, AddArch(filename, arch)) elif type in type_in_output_root or self.is_standalone_static_library: return filename elif type == 'shared_library': libdir = 'lib' if self.toolset != 'target': libdir = os.path.join('lib', '%s' % self.toolset) return os.path.join(libdir, filename) else: return self.GypPathToUniqueOutput(filename, qualified=False) def WriteVariableList(self, ninja_file, var, values): assert not isinstance(values, str) if values is None: values = [] ninja_file.variable(var, ' '.join(values)) def WriteNewNinjaRule(self, name, args, description, is_cygwin, env, pool, depfile=None): """Write out a new ninja "rule" statement for a given command. Returns the name of the new rule, and a copy of |args| with variables expanded.""" if self.flavor == 'win': args = [self.msvs_settings.ConvertVSMacros( arg, self.base_to_build, config=self.config_name) for arg in args] description = self.msvs_settings.ConvertVSMacros( description, config=self.config_name) elif self.flavor == 'mac': # |env| is an empty list on non-mac. args = [gyp.xcode_emulation.ExpandEnvVars(arg, env) for arg in args] description = gyp.xcode_emulation.ExpandEnvVars(description, env) # TODO: we shouldn't need to qualify names; we do it because # currently the ninja rule namespace is global, but it really # should be scoped to the subninja. rule_name = self.name if self.toolset == 'target': rule_name += '.' + self.toolset rule_name += '.' + name rule_name = re.sub('[^a-zA-Z0-9_]', '_', rule_name) # Remove variable references, but not if they refer to the magic rule # variables. This is not quite right, as it also protects these for # actions, not just for rules where they are valid. Good enough. protect = [ '${root}', '${dirname}', '${source}', '${ext}', '${name}' ] protect = '(?!' + '|'.join(map(re.escape, protect)) + ')' description = re.sub(protect + r'\$', '_', description) # gyp dictates that commands are run from the base directory. # cd into the directory before running, and adjust paths in # the arguments to point to the proper locations. rspfile = None rspfile_content = None args = [self.ExpandSpecial(arg, self.base_to_build) for arg in args] if self.flavor == 'win': rspfile = rule_name + '.$unique_name.rsp' # The cygwin case handles this inside the bash sub-shell. run_in = '' if is_cygwin else ' ' + self.build_to_base if is_cygwin: rspfile_content = self.msvs_settings.BuildCygwinBashCommandLine( args, self.build_to_base) else: rspfile_content = gyp.msvs_emulation.EncodeRspFileList(args) command = ('%s gyp-win-tool action-wrapper $arch ' % sys.executable + rspfile + run_in) else: env = self.ComputeExportEnvString(env) command = gyp.common.EncodePOSIXShellList(args) command = 'cd %s; ' % self.build_to_base + env + command # GYP rules/actions express being no-ops by not touching their outputs. # Avoid executing downstream dependencies in this case by specifying # restat=1 to ninja. self.ninja.rule(rule_name, command, description, depfile=depfile, restat=True, pool=pool, rspfile=rspfile, rspfile_content=rspfile_content) self.ninja.newline() return rule_name, args def CalculateVariables(default_variables, params): """Calculate additional variables for use in the build (called by gyp).""" global generator_additional_non_configuration_keys global generator_additional_path_sections flavor = gyp.common.GetFlavor(params) if flavor == 'mac': default_variables.setdefault('OS', 'mac') default_variables.setdefault('SHARED_LIB_SUFFIX', '.dylib') default_variables.setdefault('SHARED_LIB_DIR', generator_default_variables['PRODUCT_DIR']) default_variables.setdefault('LIB_DIR', generator_default_variables['PRODUCT_DIR']) # Copy additional generator configuration data from Xcode, which is shared # by the Mac Ninja generator. import gyp.generator.xcode as xcode_generator generator_additional_non_configuration_keys = getattr(xcode_generator, 'generator_additional_non_configuration_keys', []) generator_additional_path_sections = getattr(xcode_generator, 'generator_additional_path_sections', []) global generator_extra_sources_for_rules generator_extra_sources_for_rules = getattr(xcode_generator, 'generator_extra_sources_for_rules', []) elif flavor == 'win': exts = gyp.MSVSUtil.TARGET_TYPE_EXT default_variables.setdefault('OS', 'win') default_variables['EXECUTABLE_SUFFIX'] = '.' + exts['executable'] default_variables['STATIC_LIB_PREFIX'] = '' default_variables['STATIC_LIB_SUFFIX'] = '.' + exts['static_library'] default_variables['SHARED_LIB_PREFIX'] = '' default_variables['SHARED_LIB_SUFFIX'] = '.' + exts['shared_library'] # Copy additional generator configuration data from VS, which is shared # by the Windows Ninja generator. import gyp.generator.msvs as msvs_generator generator_additional_non_configuration_keys = getattr(msvs_generator, 'generator_additional_non_configuration_keys', []) generator_additional_path_sections = getattr(msvs_generator, 'generator_additional_path_sections', []) gyp.msvs_emulation.CalculateCommonVariables(default_variables, params) else: operating_system = flavor if flavor == 'android': operating_system = 'linux' # Keep this legacy behavior for now. default_variables.setdefault('OS', operating_system) default_variables.setdefault('SHARED_LIB_SUFFIX', '.so') default_variables.setdefault('SHARED_LIB_DIR', os.path.join('$!PRODUCT_DIR', 'lib')) default_variables.setdefault('LIB_DIR', os.path.join('$!PRODUCT_DIR', 'obj')) def ComputeOutputDir(params): """Returns the path from the toplevel_dir to the build output directory.""" # generator_dir: relative path from pwd to where make puts build files. # Makes migrating from make to ninja easier, ninja doesn't put anything here. generator_dir = os.path.relpath(params['options'].generator_output or '.') # output_dir: relative path from generator_dir to the build directory. output_dir = params.get('generator_flags', {}).get('output_dir', 'out') # Relative path from source root to our output files. e.g. "out" return os.path.normpath(os.path.join(generator_dir, output_dir)) def CalculateGeneratorInputInfo(params): """Called by __init__ to initialize generator values based on params.""" # E.g. "out/gypfiles" toplevel = params['options'].toplevel_dir qualified_out_dir = os.path.normpath(os.path.join( toplevel, ComputeOutputDir(params), 'gypfiles')) global generator_filelist_paths generator_filelist_paths = { 'toplevel': toplevel, 'qualified_out_dir': qualified_out_dir, } def OpenOutput(path, mode='w'): """Open |path| for writing, creating directories if necessary.""" gyp.common.EnsureDirExists(path) return open(path, mode) def CommandWithWrapper(cmd, wrappers, prog): wrapper = wrappers.get(cmd, '') if wrapper: return wrapper + ' ' + prog return prog def GetDefaultConcurrentLinks(): """Returns a best-guess for a number of concurrent links.""" pool_size = int(os.environ.get('GYP_LINK_CONCURRENCY', 0)) if pool_size: return pool_size if sys.platform in ('win32', 'cygwin'): import ctypes class MEMORYSTATUSEX(ctypes.Structure): _fields_ = [ ("dwLength", ctypes.c_ulong), ("dwMemoryLoad", ctypes.c_ulong), ("ullTotalPhys", ctypes.c_ulonglong), ("ullAvailPhys", ctypes.c_ulonglong), ("ullTotalPageFile", ctypes.c_ulonglong), ("ullAvailPageFile", ctypes.c_ulonglong), ("ullTotalVirtual", ctypes.c_ulonglong), ("ullAvailVirtual", ctypes.c_ulonglong), ("sullAvailExtendedVirtual", ctypes.c_ulonglong), ] stat = MEMORYSTATUSEX() stat.dwLength = ctypes.sizeof(stat) ctypes.windll.kernel32.GlobalMemoryStatusEx(ctypes.byref(stat)) # VS 2015 uses 20% more working set than VS 2013 and can consume all RAM # on a 64 GB machine. mem_limit = max(1, stat.ullTotalPhys / (5 * (2 ** 30))) # total / 5GB hard_cap = max(1, int(os.environ.get('GYP_LINK_CONCURRENCY_MAX', 2**32))) return min(mem_limit, hard_cap) elif sys.platform.startswith('linux'): if os.path.exists("/proc/meminfo"): with open("/proc/meminfo") as meminfo: memtotal_re = re.compile(r'^MemTotal:\s*(\d*)\s*kB') for line in meminfo: match = memtotal_re.match(line) if not match: continue # Allow 8Gb per link on Linux because Gold is quite memory hungry return max(1, int(match.group(1)) / (8 * (2 ** 20))) return 1 elif sys.platform == 'darwin': try: avail_bytes = int(subprocess.check_output(['sysctl', '-n', 'hw.memsize'])) # A static library debug build of Chromium's unit_tests takes ~2.7GB, so # 4GB per ld process allows for some more bloat. return max(1, avail_bytes / (4 * (2 ** 30))) # total / 4GB except: return 1 else: # TODO(scottmg): Implement this for other platforms. return 1 def _GetWinLinkRuleNameSuffix(embed_manifest): """Returns the suffix used to select an appropriate linking rule depending on whether the manifest embedding is enabled.""" return '_embed' if embed_manifest else '' def _AddWinLinkRules(master_ninja, embed_manifest): """Adds link rules for Windows platform to |master_ninja|.""" def FullLinkCommand(ldcmd, out, binary_type): resource_name = { 'exe': '1', 'dll': '2', }[binary_type] return '%(python)s gyp-win-tool link-with-manifests $arch %(embed)s ' \ '%(out)s "%(ldcmd)s" %(resname)s $mt $rc "$intermediatemanifest" ' \ '$manifests' % { 'python': sys.executable, 'out': out, 'ldcmd': ldcmd, 'resname': resource_name, 'embed': embed_manifest } rule_name_suffix = _GetWinLinkRuleNameSuffix(embed_manifest) use_separate_mspdbsrv = ( int(os.environ.get('GYP_USE_SEPARATE_MSPDBSRV', '0')) != 0) dlldesc = 'LINK%s(DLL) $binary' % rule_name_suffix.upper() dllcmd = ('%s gyp-win-tool link-wrapper $arch %s ' '$ld /nologo $implibflag /DLL /OUT:$binary ' '@$binary.rsp' % (sys.executable, use_separate_mspdbsrv)) dllcmd = FullLinkCommand(dllcmd, '$binary', 'dll') master_ninja.rule('solink' + rule_name_suffix, description=dlldesc, command=dllcmd, rspfile='$binary.rsp', rspfile_content='$libs $in_newline $ldflags', restat=True, pool='link_pool') master_ninja.rule('solink_module' + rule_name_suffix, description=dlldesc, command=dllcmd, rspfile='$binary.rsp', rspfile_content='$libs $in_newline $ldflags', restat=True, pool='link_pool') # Note that ldflags goes at the end so that it has the option of # overriding default settings earlier in the command line. exe_cmd = ('%s gyp-win-tool link-wrapper $arch %s ' '$ld /nologo /OUT:$binary @$binary.rsp' % (sys.executable, use_separate_mspdbsrv)) exe_cmd = FullLinkCommand(exe_cmd, '$binary', 'exe') master_ninja.rule('link' + rule_name_suffix, description='LINK%s $binary' % rule_name_suffix.upper(), command=exe_cmd, rspfile='$binary.rsp', rspfile_content='$in_newline $libs $ldflags', pool='link_pool') def GenerateOutputForConfig(target_list, target_dicts, data, params, config_name): options = params['options'] flavor = gyp.common.GetFlavor(params) generator_flags = params.get('generator_flags', {}) # build_dir: relative path from source root to our output files. # e.g. "out/Debug" build_dir = os.path.normpath( os.path.join(ComputeOutputDir(params), config_name)) toplevel_build = os.path.join(options.toplevel_dir, build_dir) master_ninja_file = OpenOutput(os.path.join(toplevel_build, 'build.ninja')) master_ninja = ninja_syntax.Writer(master_ninja_file, width=120) # Put build-time support tools in out/{config_name}. gyp.common.CopyTool(flavor, toplevel_build) # Grab make settings for CC/CXX. # The rules are # - The priority from low to high is gcc/g++, the 'make_global_settings' in # gyp, the environment variable. # - If there is no 'make_global_settings' for CC.host/CXX.host or # 'CC_host'/'CXX_host' enviroment variable, cc_host/cxx_host should be set # to cc/cxx. if flavor == 'win': ar = 'lib.exe' # cc and cxx must be set to the correct architecture by overriding with one # of cl_x86 or cl_x64 below. cc = 'UNSET' cxx = 'UNSET' ld = 'link.exe' ld_host = '$ld' else: ar = 'ar' cc = 'cc' cxx = 'c++' ld = '$cc' ldxx = '$cxx' ld_host = '$cc_host' ldxx_host = '$cxx_host' ar_host = 'ar' cc_host = None cxx_host = None cc_host_global_setting = None cxx_host_global_setting = None clang_cl = None nm = 'nm' nm_host = 'nm' readelf = 'readelf' readelf_host = 'readelf' build_file, _, _ = gyp.common.ParseQualifiedTarget(target_list[0]) make_global_settings = data[build_file].get('make_global_settings', []) build_to_root = gyp.common.InvertRelativePath(build_dir, options.toplevel_dir) wrappers = {} for key, value in make_global_settings: if key == 'AR': ar = os.path.join(build_to_root, value) if key == 'AR.host': ar_host = os.path.join(build_to_root, value) if key == 'CC': cc = os.path.join(build_to_root, value) if cc.endswith('clang-cl'): clang_cl = cc if key == 'CXX': cxx = os.path.join(build_to_root, value) if key == 'CC.host': cc_host = os.path.join(build_to_root, value) cc_host_global_setting = value if key == 'CXX.host': cxx_host = os.path.join(build_to_root, value) cxx_host_global_setting = value if key == 'LD': ld = os.path.join(build_to_root, value) if key == 'LD.host': ld_host = os.path.join(build_to_root, value) if key == 'NM': nm = os.path.join(build_to_root, value) if key == 'NM.host': nm_host = os.path.join(build_to_root, value) if key == 'READELF': readelf = os.path.join(build_to_root, value) if key == 'READELF.host': readelf_host = os.path.join(build_to_root, value) if key.endswith('_wrapper'): wrappers[key[:-len('_wrapper')]] = os.path.join(build_to_root, value) # Support wrappers from environment variables too. for key, value in os.environ.iteritems(): if key.lower().endswith('_wrapper'): key_prefix = key[:-len('_wrapper')] key_prefix = re.sub(r'\.HOST$', '.host', key_prefix) wrappers[key_prefix] = os.path.join(build_to_root, value) if flavor == 'win': configs = [target_dicts[qualified_target]['configurations'][config_name] for qualified_target in target_list] shared_system_includes = None if not generator_flags.get('ninja_use_custom_environment_files', 0): shared_system_includes = \ gyp.msvs_emulation.ExtractSharedMSVSSystemIncludes( configs, generator_flags) cl_paths = gyp.msvs_emulation.GenerateEnvironmentFiles( toplevel_build, generator_flags, shared_system_includes, OpenOutput) for arch, path in cl_paths.iteritems(): if clang_cl: # If we have selected clang-cl, use that instead. path = clang_cl command = CommandWithWrapper('CC', wrappers, QuoteShellArgument(path, 'win')) if clang_cl: # Use clang-cl to cross-compile for x86 or x86_64. command += (' -m32' if arch == 'x86' else ' -m64') master_ninja.variable('cl_' + arch, command) cc = GetEnvironFallback(['CC_target', 'CC'], cc) master_ninja.variable('cc', CommandWithWrapper('CC', wrappers, cc)) cxx = GetEnvironFallback(['CXX_target', 'CXX'], cxx) master_ninja.variable('cxx', CommandWithWrapper('CXX', wrappers, cxx)) if flavor == 'win': master_ninja.variable('ld', ld) master_ninja.variable('idl', 'midl.exe') master_ninja.variable('ar', ar) master_ninja.variable('rc', 'rc.exe') master_ninja.variable('ml_x86', 'ml.exe') master_ninja.variable('ml_x64', 'ml64.exe') master_ninja.variable('mt', 'mt.exe') else: master_ninja.variable('ld', CommandWithWrapper('LINK', wrappers, ld)) master_ninja.variable('ldxx', CommandWithWrapper('LINK', wrappers, ldxx)) master_ninja.variable('ar', GetEnvironFallback(['AR_target', 'AR'], ar)) if flavor != 'mac': # Mac does not use readelf/nm for .TOC generation, so avoiding polluting # the master ninja with extra unused variables. master_ninja.variable( 'nm', GetEnvironFallback(['NM_target', 'NM'], nm)) master_ninja.variable( 'readelf', GetEnvironFallback(['READELF_target', 'READELF'], readelf)) if generator_supports_multiple_toolsets: if not cc_host: cc_host = cc if not cxx_host: cxx_host = cxx master_ninja.variable('ar_host', GetEnvironFallback(['AR_host'], ar_host)) master_ninja.variable('nm_host', GetEnvironFallback(['NM_host'], nm_host)) master_ninja.variable('readelf_host', GetEnvironFallback(['READELF_host'], readelf_host)) cc_host = GetEnvironFallback(['CC_host'], cc_host) cxx_host = GetEnvironFallback(['CXX_host'], cxx_host) # The environment variable could be used in 'make_global_settings', like # ['CC.host', '$(CC)'] or ['CXX.host', '$(CXX)'], transform them here. if '$(CC)' in cc_host and cc_host_global_setting: cc_host = cc_host_global_setting.replace('$(CC)', cc) if '$(CXX)' in cxx_host and cxx_host_global_setting: cxx_host = cxx_host_global_setting.replace('$(CXX)', cxx) master_ninja.variable('cc_host', CommandWithWrapper('CC.host', wrappers, cc_host)) master_ninja.variable('cxx_host', CommandWithWrapper('CXX.host', wrappers, cxx_host)) if flavor == 'win': master_ninja.variable('ld_host', ld_host) else: master_ninja.variable('ld_host', CommandWithWrapper( 'LINK', wrappers, ld_host)) master_ninja.variable('ldxx_host', CommandWithWrapper( 'LINK', wrappers, ldxx_host)) master_ninja.newline() master_ninja.pool('link_pool', depth=GetDefaultConcurrentLinks()) master_ninja.newline() deps = 'msvc' if flavor == 'win' else 'gcc' if flavor != 'win': master_ninja.rule( 'cc', description='CC $out', command=('$cc -MMD -MF $out.d $defines $includes $cflags $cflags_c ' '$cflags_pch_c -c $in -o $out'), depfile='$out.d', deps=deps) master_ninja.rule( 'cc_s', description='CC $out', command=('$cc $defines $includes $cflags $cflags_c ' '$cflags_pch_c -c $in -o $out')) master_ninja.rule( 'cxx', description='CXX $out', command=('$cxx -MMD -MF $out.d $defines $includes $cflags $cflags_cc ' '$cflags_pch_cc -c $in -o $out'), depfile='$out.d', deps=deps) else: # TODO(scottmg) Separate pdb names is a test to see if it works around # http://crbug.com/142362. It seems there's a race between the creation of # the .pdb by the precompiled header step for .cc and the compilation of # .c files. This should be handled by mspdbsrv, but rarely errors out with # c1xx : fatal error C1033: cannot open program database # By making the rules target separate pdb files this might be avoided. cc_command = ('ninja -t msvc -e $arch ' + '-- ' '$cc /nologo /showIncludes /FC ' '@$out.rsp /c $in /Fo$out /Fd$pdbname_c ') cxx_command = ('ninja -t msvc -e $arch ' + '-- ' '$cxx /nologo /showIncludes /FC ' '@$out.rsp /c $in /Fo$out /Fd$pdbname_cc ') master_ninja.rule( 'cc', description='CC $out', command=cc_command, rspfile='$out.rsp', rspfile_content='$defines $includes $cflags $cflags_c', deps=deps) master_ninja.rule( 'cxx', description='CXX $out', command=cxx_command, rspfile='$out.rsp', rspfile_content='$defines $includes $cflags $cflags_cc', deps=deps) master_ninja.rule( 'idl', description='IDL $in', command=('%s gyp-win-tool midl-wrapper $arch $outdir ' '$tlb $h $dlldata $iid $proxy $in ' '$midl_includes $idlflags' % sys.executable)) master_ninja.rule( 'rc', description='RC $in', # Note: $in must be last otherwise rc.exe complains. command=('%s gyp-win-tool rc-wrapper ' '$arch $rc $defines $resource_includes $rcflags /fo$out $in' % sys.executable)) master_ninja.rule( 'asm', description='ASM $out', command=('%s gyp-win-tool asm-wrapper ' '$arch $asm $defines $includes $asmflags /c /Fo $out $in' % sys.executable)) if flavor != 'mac' and flavor != 'win': master_ninja.rule( 'alink', description='AR $out', command='rm -f $out && $ar rcs $arflags $out $in') master_ninja.rule( 'alink_thin', description='AR $out', command='rm -f $out && $ar rcsT $arflags $out $in') # This allows targets that only need to depend on $lib's API to declare an # order-only dependency on $lib.TOC and avoid relinking such downstream # dependencies when $lib changes only in non-public ways. # The resulting string leaves an uninterpolated %{suffix} which # is used in the final substitution below. mtime_preserving_solink_base = ( 'if [ ! -e $lib -o ! -e $lib.TOC ]; then ' '%(solink)s && %(extract_toc)s > $lib.TOC; else ' '%(solink)s && %(extract_toc)s > $lib.tmp && ' 'if ! cmp -s $lib.tmp $lib.TOC; then mv $lib.tmp $lib.TOC ; ' 'fi; fi' % { 'solink': '$ld -shared $ldflags -o $lib -Wl,-soname=$soname %(suffix)s', 'extract_toc': ('{ $readelf -d $lib | grep SONAME ; ' '$nm -gD -f p $lib | cut -f1-2 -d\' \'; }')}) master_ninja.rule( 'solink', description='SOLINK $lib', restat=True, command=mtime_preserving_solink_base % {'suffix': '@$link_file_list'}, rspfile='$link_file_list', rspfile_content= '-Wl,--whole-archive $in $solibs -Wl,--no-whole-archive $libs', pool='link_pool') master_ninja.rule( 'solink_module', description='SOLINK(module) $lib', restat=True, command=mtime_preserving_solink_base % {'suffix': '@$link_file_list'}, rspfile='$link_file_list', rspfile_content='-Wl,--start-group $in -Wl,--end-group $solibs $libs', pool='link_pool') master_ninja.rule( 'link', description='LINK $out', command=('$ld $ldflags -o $out ' '-Wl,--start-group $in -Wl,--end-group $solibs $libs'), pool='link_pool') elif flavor == 'win': master_ninja.rule( 'alink', description='LIB $out', command=('%s gyp-win-tool link-wrapper $arch False ' '$ar /nologo /ignore:4221 /OUT:$out @$out.rsp' % sys.executable), rspfile='$out.rsp', rspfile_content='$in_newline $libflags') _AddWinLinkRules(master_ninja, embed_manifest=True) _AddWinLinkRules(master_ninja, embed_manifest=False) else: master_ninja.rule( 'objc', description='OBJC $out', command=('$cc -MMD -MF $out.d $defines $includes $cflags $cflags_objc ' '$cflags_pch_objc -c $in -o $out'), depfile='$out.d', deps=deps) master_ninja.rule( 'objcxx', description='OBJCXX $out', command=('$cxx -MMD -MF $out.d $defines $includes $cflags $cflags_objcc ' '$cflags_pch_objcc -c $in -o $out'), depfile='$out.d', deps=deps) master_ninja.rule( 'alink', description='LIBTOOL-STATIC $out, POSTBUILDS', command='rm -f $out && ' './gyp-mac-tool filter-libtool libtool $libtool_flags ' '-static -o $out $in' '$postbuilds') master_ninja.rule( 'lipo', description='LIPO $out, POSTBUILDS', command='rm -f $out && lipo -create $in -output $out$postbuilds') master_ninja.rule( 'solipo', description='SOLIPO $out, POSTBUILDS', command=( 'rm -f $lib $lib.TOC && lipo -create $in -output $lib$postbuilds &&' '%(extract_toc)s > $lib.TOC' % { 'extract_toc': '{ otool -l $lib | grep LC_ID_DYLIB -A 5; ' 'nm -gP $lib | cut -f1-2 -d\' \' | grep -v U$$; true; }'})) # Record the public interface of $lib in $lib.TOC. See the corresponding # comment in the posix section above for details. solink_base = '$ld %(type)s $ldflags -o $lib %(suffix)s' mtime_preserving_solink_base = ( 'if [ ! -e $lib -o ! -e $lib.TOC ] || ' # Always force dependent targets to relink if this library # reexports something. Handling this correctly would require # recursive TOC dumping but this is rare in practice, so punt. 'otool -l $lib | grep -q LC_REEXPORT_DYLIB ; then ' '%(solink)s && %(extract_toc)s > $lib.TOC; ' 'else ' '%(solink)s && %(extract_toc)s > $lib.tmp && ' 'if ! cmp -s $lib.tmp $lib.TOC; then ' 'mv $lib.tmp $lib.TOC ; ' 'fi; ' 'fi' % { 'solink': solink_base, 'extract_toc': '{ otool -l $lib | grep LC_ID_DYLIB -A 5; ' 'nm -gP $lib | cut -f1-2 -d\' \' | grep -v U$$; true; }'}) solink_suffix = '@$link_file_list$postbuilds' master_ninja.rule( 'solink', description='SOLINK $lib, POSTBUILDS', restat=True, command=mtime_preserving_solink_base % {'suffix': solink_suffix, 'type': '-shared'}, rspfile='$link_file_list', rspfile_content='$in $solibs $libs', pool='link_pool') master_ninja.rule( 'solink_notoc', description='SOLINK $lib, POSTBUILDS', restat=True, command=solink_base % {'suffix':solink_suffix, 'type': '-shared'}, rspfile='$link_file_list', rspfile_content='$in $solibs $libs', pool='link_pool') master_ninja.rule( 'solink_module', description='SOLINK(module) $lib, POSTBUILDS', restat=True, command=mtime_preserving_solink_base % {'suffix': solink_suffix, 'type': '-bundle'}, rspfile='$link_file_list', rspfile_content='$in $solibs $libs', pool='link_pool') master_ninja.rule( 'solink_module_notoc', description='SOLINK(module) $lib, POSTBUILDS', restat=True, command=solink_base % {'suffix': solink_suffix, 'type': '-bundle'}, rspfile='$link_file_list', rspfile_content='$in $solibs $libs', pool='link_pool') master_ninja.rule( 'link', description='LINK $out, POSTBUILDS', command=('$ld $ldflags -o $out ' '$in $solibs $libs$postbuilds'), pool='link_pool') master_ninja.rule( 'preprocess_infoplist', description='PREPROCESS INFOPLIST $out', command=('$cc -E -P -Wno-trigraphs -x c $defines $in -o $out && ' 'plutil -convert xml1 $out $out')) master_ninja.rule( 'copy_infoplist', description='COPY INFOPLIST $in', command='$env ./gyp-mac-tool copy-info-plist $in $out $binary $keys') master_ninja.rule( 'merge_infoplist', description='MERGE INFOPLISTS $in', command='$env ./gyp-mac-tool merge-info-plist $out $in') master_ninja.rule( 'compile_xcassets', description='COMPILE XCASSETS $in', command='$env ./gyp-mac-tool compile-xcassets $keys $in') master_ninja.rule( 'mac_tool', description='MACTOOL $mactool_cmd $in', command='$env ./gyp-mac-tool $mactool_cmd $in $out $binary') master_ninja.rule( 'package_framework', description='PACKAGE FRAMEWORK $out, POSTBUILDS', command='./gyp-mac-tool package-framework $out $version$postbuilds ' '&& touch $out') if flavor == 'win': master_ninja.rule( 'stamp', description='STAMP $out', command='%s gyp-win-tool stamp $out' % sys.executable) master_ninja.rule( 'copy', description='COPY $in $out', command='%s gyp-win-tool recursive-mirror $in $out' % sys.executable) else: master_ninja.rule( 'stamp', description='STAMP $out', command='${postbuilds}touch $out') master_ninja.rule( 'copy', description='COPY $in $out', command='rm -rf $out && cp -af $in $out') master_ninja.newline() all_targets = set() for build_file in params['build_files']: for target in gyp.common.AllTargets(target_list, target_dicts, os.path.normpath(build_file)): all_targets.add(target) all_outputs = set() # target_outputs is a map from qualified target name to a Target object. target_outputs = {} # target_short_names is a map from target short name to a list of Target # objects. target_short_names = {} # short name of targets that were skipped because they didn't contain anything # interesting. # NOTE: there may be overlap between this an non_empty_target_names. empty_target_names = set() # Set of non-empty short target names. # NOTE: there may be overlap between this an empty_target_names. non_empty_target_names = set() for qualified_target in target_list: # qualified_target is like: third_party/icu/icu.gyp:icui18n#target build_file, name, toolset = \ gyp.common.ParseQualifiedTarget(qualified_target) this_make_global_settings = data[build_file].get('make_global_settings', []) assert make_global_settings == this_make_global_settings, ( "make_global_settings needs to be the same for all targets. %s vs. %s" % (this_make_global_settings, make_global_settings)) spec = target_dicts[qualified_target] if flavor == 'mac': gyp.xcode_emulation.MergeGlobalXcodeSettingsToSpec(data[build_file], spec) # If build_file is a symlink, we must not follow it because there's a chance # it could point to a path above toplevel_dir, and we cannot correctly deal # with that case at the moment. build_file = gyp.common.RelativePath(build_file, options.toplevel_dir, False) qualified_target_for_hash = gyp.common.QualifiedTarget(build_file, name, toolset) hash_for_rules = hashlib.md5(qualified_target_for_hash).hexdigest() base_path = os.path.dirname(build_file) obj = 'obj' if toolset != 'target': obj += '.' + toolset output_file = os.path.join(obj, base_path, name + '.ninja') ninja_output = StringIO() writer = NinjaWriter(hash_for_rules, target_outputs, base_path, build_dir, ninja_output, toplevel_build, output_file, flavor, toplevel_dir=options.toplevel_dir) target = writer.WriteSpec(spec, config_name, generator_flags) if ninja_output.tell() > 0: # Only create files for ninja files that actually have contents. with OpenOutput(os.path.join(toplevel_build, output_file)) as ninja_file: ninja_file.write(ninja_output.getvalue()) ninja_output.close() master_ninja.subninja(output_file) if target: if name != target.FinalOutput() and spec['toolset'] == 'target': target_short_names.setdefault(name, []).append(target) target_outputs[qualified_target] = target if qualified_target in all_targets: all_outputs.add(target.FinalOutput()) non_empty_target_names.add(name) else: empty_target_names.add(name) if target_short_names: # Write a short name to build this target. This benefits both the # "build chrome" case as well as the gyp tests, which expect to be # able to run actions and build libraries by their short name. master_ninja.newline() master_ninja.comment('Short names for targets.') for short_name in target_short_names: master_ninja.build(short_name, 'phony', [x.FinalOutput() for x in target_short_names[short_name]]) # Write phony targets for any empty targets that weren't written yet. As # short names are not necessarily unique only do this for short names that # haven't already been output for another target. empty_target_names = empty_target_names - non_empty_target_names if empty_target_names: master_ninja.newline() master_ninja.comment('Empty targets (output for completeness).') for name in sorted(empty_target_names): master_ninja.build(name, 'phony') if all_outputs: master_ninja.newline() master_ninja.build('all', 'phony', list(all_outputs)) master_ninja.default(generator_flags.get('default_target', 'all')) master_ninja_file.close() def PerformBuild(data, configurations, params): options = params['options'] for config in configurations: builddir = os.path.join(options.toplevel_dir, 'out', config) arguments = ['ninja', '-C', builddir] print 'Building [%s]: %s' % (config, arguments) subprocess.check_call(arguments) def CallGenerateOutputForConfig(arglist): # Ignore the interrupt signal so that the parent process catches it and # kills all multiprocessing children. signal.signal(signal.SIGINT, signal.SIG_IGN) (target_list, target_dicts, data, params, config_name) = arglist GenerateOutputForConfig(target_list, target_dicts, data, params, config_name) def GenerateOutput(target_list, target_dicts, data, params): # Update target_dicts for iOS device builds. target_dicts = gyp.xcode_emulation.CloneConfigurationForDeviceAndEmulator( target_dicts) user_config = params.get('generator_flags', {}).get('config', None) if gyp.common.GetFlavor(params) == 'win': target_list, target_dicts = MSVSUtil.ShardTargets(target_list, target_dicts) target_list, target_dicts = MSVSUtil.InsertLargePdbShims( target_list, target_dicts, generator_default_variables) if user_config: GenerateOutputForConfig(target_list, target_dicts, data, params, user_config) else: config_names = target_dicts[target_list[0]]['configurations'].keys() if params['parallel']: try: pool = multiprocessing.Pool(len(config_names)) arglists = [] for config_name in config_names: arglists.append( (target_list, target_dicts, data, params, config_name)) pool.map(CallGenerateOutputForConfig, arglists) except KeyboardInterrupt, e: pool.terminate() raise e else: for config_name in config_names: GenerateOutputForConfig(target_list, target_dicts, data, params, config_name)
apache-2.0
baixuexue123/djmo
utils/csv_response_.py
1
2274
# -*- coding: utf-8 -*- from __future__ import unicode_literals, absolute_import import csv import codecs import cStringIO from django.http import StreamingHttpResponse from django.views.generic import View """ 流式响应StreamingHttpResponse可以快速,节省内存地产生一个大型文件 """ class Echo(object): """An object that implements just the write method of the file-like interface.""" def write(self, value): """Write the value by returning it, instead of storing in a buffer.""" return value class UnicodeWriter(object): """ A CSV writer which will write rows to CSV file "f", which is encoded in the given encoding. """ def __init__(self, f, dialect=csv.excel, encoding='utf-8', **kwargs): # Redirect output to a queue self.queue = cStringIO.StringIO() self.writer = csv.writer(self.queue, dialect=dialect, **kwargs) self.stream = f self.encoder = codecs.getincrementalencoder(encoding)() def writerow(self, row): self.writer.writerow([handle_column(s) for s in row]) # Fetch UTF-8 output from the queue ... data = self.queue.getvalue() data = data.decode("utf-8") # ... and reencode it into the target encoding data = self.encoder.encode(data) # write to the target stream value = self.stream.write(data) # empty queue self.queue.truncate(0) return value def writerows(self, rows): for row in rows: self.writerow(row) class ExampleView(View): headers = ('一些', '表头') def get(self, request): result = ( ('第一行', '数据1'), ('第二行', '数据2') ) echoer = Echo() writer = UnicodeWriter(echoer) def csv_iterator(): yield codecs.BOM_UTF8 yield writer.writerow(self.headers) for column in result: yield writer.writerow(column) response = StreamingHttpResponse( (row for row in csv_iterator()), content_type="text/csv;charset=utf-8" ) response['Content-Disposition'] = 'attachment;filename="example.csv"' return response
mit
gunan/tensorflow
tensorflow/python/feature_column/feature_column_v2.py
1
192558
# Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """This API defines FeatureColumn abstraction. FeatureColumns provide a high level abstraction for ingesting and representing features. FeatureColumns are also the primary way of encoding features for canned `tf.estimator.Estimator`s. When using FeatureColumns with `Estimators`, the type of feature column you should choose depends on (1) the feature type and (2) the model type. 1. Feature type: * Continuous features can be represented by `numeric_column`. * Categorical features can be represented by any `categorical_column_with_*` column: - `categorical_column_with_vocabulary_list` - `categorical_column_with_vocabulary_file` - `categorical_column_with_hash_bucket` - `categorical_column_with_identity` - `weighted_categorical_column` 2. Model type: * Deep neural network models (`DNNClassifier`, `DNNRegressor`). Continuous features can be directly fed into deep neural network models. age_column = numeric_column("age") To feed sparse features into DNN models, wrap the column with `embedding_column` or `indicator_column`. `indicator_column` is recommended for features with only a few possible values. For features with many possible values, to reduce the size of your model, `embedding_column` is recommended. embedded_dept_column = embedding_column( categorical_column_with_vocabulary_list( "department", ["math", "philosophy", ...]), dimension=10) * Wide (aka linear) models (`LinearClassifier`, `LinearRegressor`). Sparse features can be fed directly into linear models. They behave like an indicator column but with an efficient implementation. dept_column = categorical_column_with_vocabulary_list("department", ["math", "philosophy", "english"]) It is recommended that continuous features be bucketized before being fed into linear models. bucketized_age_column = bucketized_column( source_column=age_column, boundaries=[18, 25, 30, 35, 40, 45, 50, 55, 60, 65]) Sparse features can be crossed (also known as conjuncted or combined) in order to form non-linearities, and then fed into linear models. cross_dept_age_column = crossed_column( columns=["department", bucketized_age_column], hash_bucket_size=1000) Example of building canned `Estimator`s using FeatureColumns: ```python # Define features and transformations deep_feature_columns = [age_column, embedded_dept_column] wide_feature_columns = [dept_column, bucketized_age_column, cross_dept_age_column] # Build deep model estimator = DNNClassifier( feature_columns=deep_feature_columns, hidden_units=[500, 250, 50]) estimator.train(...) # Or build a wide model estimator = LinearClassifier( feature_columns=wide_feature_columns) estimator.train(...) # Or build a wide and deep model! estimator = DNNLinearCombinedClassifier( linear_feature_columns=wide_feature_columns, dnn_feature_columns=deep_feature_columns, dnn_hidden_units=[500, 250, 50]) estimator.train(...) ``` FeatureColumns can also be transformed into a generic input layer for custom models using `input_layer`. Example of building model using FeatureColumns, this can be used in a `model_fn` which is given to the {tf.estimator.Estimator}: ```python # Building model via layers deep_feature_columns = [age_column, embedded_dept_column] columns_to_tensor = parse_feature_columns_from_examples( serialized=my_data, feature_columns=deep_feature_columns) first_layer = input_layer( features=columns_to_tensor, feature_columns=deep_feature_columns) second_layer = fully_connected(first_layer, ...) ``` NOTE: Functions prefixed with "_" indicate experimental or private parts of the API subject to change, and should not be relied upon! """ from __future__ import absolute_import from __future__ import division from __future__ import print_function import abc import collections import math import re import numpy as np import six from tensorflow.python.eager import context from tensorflow.python.feature_column import feature_column as fc_old from tensorflow.python.feature_column import utils as fc_utils from tensorflow.python.framework import dtypes from tensorflow.python.framework import ops from tensorflow.python.framework import sparse_tensor as sparse_tensor_lib from tensorflow.python.framework import tensor_shape # TODO(b/118385027): Dependency on keras can be problematic if Keras moves out # of the main repo. from tensorflow.python.keras import initializers from tensorflow.python.keras.engine import training as keras_training from tensorflow.python.keras.engine.base_layer import Layer from tensorflow.python.keras.utils import generic_utils from tensorflow.python.ops import array_ops from tensorflow.python.ops import check_ops from tensorflow.python.ops import control_flow_ops from tensorflow.python.ops import embedding_ops from tensorflow.python.ops import lookup_ops from tensorflow.python.ops import math_ops from tensorflow.python.ops import nn_ops from tensorflow.python.ops import parsing_ops from tensorflow.python.ops import sparse_ops from tensorflow.python.ops import string_ops from tensorflow.python.ops import variable_scope from tensorflow.python.ops import variables from tensorflow.python.platform import gfile from tensorflow.python.platform import tf_logging as logging from tensorflow.python.training import checkpoint_utils from tensorflow.python.training.tracking import base as trackable from tensorflow.python.training.tracking import data_structures from tensorflow.python.training.tracking import tracking from tensorflow.python.util import deprecation from tensorflow.python.util import nest from tensorflow.python.util.compat import collections_abc from tensorflow.python.util.tf_export import tf_export _FEATURE_COLUMN_DEPRECATION_DATE = None _FEATURE_COLUMN_DEPRECATION = ('The old _FeatureColumn APIs are being ' 'deprecated. Please use the new FeatureColumn ' 'APIs instead.') class StateManager(object): """Manages the state associated with FeatureColumns. Some `FeatureColumn`s create variables or resources to assist their computation. The `StateManager` is responsible for creating and storing these objects since `FeatureColumn`s are supposed to be stateless configuration only. """ def create_variable(self, feature_column, name, shape, dtype=None, trainable=True, use_resource=True, initializer=None): """Creates a new variable. Args: feature_column: A `FeatureColumn` object this variable corresponds to. name: variable name. shape: variable shape. dtype: The type of the variable. Defaults to `self.dtype` or `float32`. trainable: Whether this variable is trainable or not. use_resource: If true, we use resource variables. Otherwise we use RefVariable. initializer: initializer instance (callable). Returns: The created variable. """ del feature_column, name, shape, dtype, trainable, use_resource, initializer raise NotImplementedError('StateManager.create_variable') def add_variable(self, feature_column, var): """Adds an existing variable to the state. Args: feature_column: A `FeatureColumn` object to associate this variable with. var: The variable. """ del feature_column, var raise NotImplementedError('StateManager.add_variable') def get_variable(self, feature_column, name): """Returns an existing variable. Args: feature_column: A `FeatureColumn` object this variable corresponds to. name: variable name. """ del feature_column, name raise NotImplementedError('StateManager.get_var') def add_resource(self, feature_column, name, resource): """Creates a new resource. Resources can be things such as tables, variables, trackables, etc. Args: feature_column: A `FeatureColumn` object this resource corresponds to. name: Name of the resource. resource: The resource. Returns: The created resource. """ del feature_column, name, resource raise NotImplementedError('StateManager.add_resource') def has_resource(self, feature_column, name): """Returns true iff a resource with same name exists. Resources can be things such as tables, variables, trackables, etc. Args: feature_column: A `FeatureColumn` object this variable corresponds to. name: Name of the resource. """ del feature_column, name raise NotImplementedError('StateManager.has_resource') def get_resource(self, feature_column, name): """Returns an already created resource. Resources can be things such as tables, variables, trackables, etc. Args: feature_column: A `FeatureColumn` object this variable corresponds to. name: Name of the resource. """ del feature_column, name raise NotImplementedError('StateManager.get_resource') class _StateManagerImpl(StateManager): """Manages the state of DenseFeatures and LinearLayer.""" def __init__(self, layer, trainable): """Creates an _StateManagerImpl object. Args: layer: The input layer this state manager is associated with. trainable: Whether by default, variables created are trainable or not. """ self._trainable = trainable self._layer = layer if self._layer is not None and not hasattr(self._layer, '_resources'): self._layer._resources = data_structures.Mapping() # pylint: disable=protected-access self._cols_to_vars_map = collections.defaultdict(lambda: {}) self._cols_to_resources_map = collections.defaultdict(lambda: {}) def create_variable(self, feature_column, name, shape, dtype=None, trainable=True, use_resource=True, initializer=None): if name in self._cols_to_vars_map[feature_column]: raise ValueError('Variable already exists.') # We explicitly track these variables since `name` is not guaranteed to be # unique and disable manual tracking that the add_weight call does. with trackable.no_manual_dependency_tracking_scope(self._layer): var = self._layer.add_weight( name=name, shape=shape, dtype=dtype, initializer=initializer, trainable=self._trainable and trainable, use_resource=use_resource, # TODO(rohanj): Get rid of this hack once we have a mechanism for # specifying a default partitioner for an entire layer. In that case, # the default getter for Layers should work. getter=variable_scope.get_variable) if isinstance(var, variables.PartitionedVariable): for v in var: part_name = name + '/' + str(v._get_save_slice_info().var_offset[0]) # pylint: disable=protected-access self._layer._track_trackable(v, feature_column.name + '/' + part_name) # pylint: disable=protected-access else: if isinstance(var, trackable.Trackable): self._layer._track_trackable(var, feature_column.name + '/' + name) # pylint: disable=protected-access self._cols_to_vars_map[feature_column][name] = var return var def get_variable(self, feature_column, name): if name in self._cols_to_vars_map[feature_column]: return self._cols_to_vars_map[feature_column][name] raise ValueError('Variable does not exist.') def add_resource(self, feature_column, resource_name, resource): self._cols_to_resources_map[feature_column][resource_name] = resource # pylint: disable=protected-access if self._layer is not None and isinstance(resource, trackable.Trackable): # Add trackable resources to the layer for serialization. if feature_column.name not in self._layer._resources: self._layer._resources[feature_column.name] = data_structures.Mapping() if resource_name not in self._layer._resources[feature_column.name]: self._layer._resources[feature_column.name][resource_name] = resource # pylint: enable=protected-access def has_resource(self, feature_column, resource_name): return resource_name in self._cols_to_resources_map[feature_column] def get_resource(self, feature_column, resource_name): if (feature_column not in self._cols_to_resources_map or resource_name not in self._cols_to_resources_map[feature_column]): raise ValueError('Resource does not exist.') return self._cols_to_resources_map[feature_column][resource_name] class _StateManagerImplV2(_StateManagerImpl): """Manages the state of DenseFeatures.""" def create_variable(self, feature_column, name, shape, dtype=None, trainable=True, use_resource=True, initializer=None): if name in self._cols_to_vars_map[feature_column]: raise ValueError('Variable already exists.') # We explicitly track these variables since `name` is not guaranteed to be # unique and disable manual tracking that the add_weight call does. with trackable.no_manual_dependency_tracking_scope(self._layer): var = self._layer.add_weight( name=name, shape=shape, dtype=dtype, initializer=initializer, trainable=self._trainable and trainable, use_resource=use_resource) if isinstance(var, trackable.Trackable): self._layer._track_trackable(var, feature_column.name + '/' + name) # pylint: disable=protected-access self._cols_to_vars_map[feature_column][name] = var return var class _BaseFeaturesLayer(Layer): """Base class for DenseFeatures and SequenceFeatures. Defines common methods and helpers. Args: feature_columns: An iterable containing the FeatureColumns to use as inputs to your model. expected_column_type: Expected class for provided feature columns. trainable: Boolean, whether the layer's variables will be updated via gradient descent during training. name: Name to give to the DenseFeatures. **kwargs: Keyword arguments to construct a layer. Raises: ValueError: if an item in `feature_columns` doesn't match `expected_column_type`. """ def __init__(self, feature_columns, expected_column_type, trainable, name, partitioner=None, **kwargs): super(_BaseFeaturesLayer, self).__init__( name=name, trainable=trainable, **kwargs) self._feature_columns = _normalize_feature_columns(feature_columns) self._state_manager = _StateManagerImpl(self, self.trainable) self._partitioner = partitioner for column in self._feature_columns: if not isinstance(column, expected_column_type): raise ValueError( 'Items of feature_columns must be a {}. ' 'You can wrap a categorical column with an ' 'embedding_column or indicator_column. Given: {}'.format( expected_column_type, column)) def build(self, _): for column in self._feature_columns: with variable_scope._pure_variable_scope( # pylint: disable=protected-access self.name, partitioner=self._partitioner): with variable_scope._pure_variable_scope( # pylint: disable=protected-access _sanitize_column_name_for_variable_scope(column.name)): column.create_state(self._state_manager) super(_BaseFeaturesLayer, self).build(None) def _output_shape(self, input_shape, num_elements): """Computes expected output shape of the layer or a column's dense tensor. Args: input_shape: Tensor or array with batch shape. num_elements: Size of the last dimension of the output. Returns: Tuple with output shape. """ raise NotImplementedError('Calling an abstract method.') def compute_output_shape(self, input_shape): total_elements = 0 for column in self._feature_columns: total_elements += column.variable_shape.num_elements() return self._target_shape(input_shape, total_elements) def _process_dense_tensor(self, column, tensor): """Reshapes the dense tensor output of a column based on expected shape. Args: column: A DenseColumn or SequenceDenseColumn object. tensor: A dense tensor obtained from the same column. Returns: Reshaped dense tensor.""" num_elements = column.variable_shape.num_elements() target_shape = self._target_shape(array_ops.shape(tensor), num_elements) return array_ops.reshape(tensor, shape=target_shape) def _verify_and_concat_tensors(self, output_tensors): """Verifies and concatenates the dense output of several columns.""" _verify_static_batch_size_equality(output_tensors, self._feature_columns) return array_ops.concat(output_tensors, -1) def get_config(self): # Import here to avoid circular imports. from tensorflow.python.feature_column import serialization # pylint: disable=g-import-not-at-top column_configs = serialization.serialize_feature_columns( self._feature_columns) config = {'feature_columns': column_configs} config['partitioner'] = generic_utils.serialize_keras_object( self._partitioner) base_config = super( # pylint: disable=bad-super-call _BaseFeaturesLayer, self).get_config() return dict(list(base_config.items()) + list(config.items())) @classmethod def from_config(cls, config, custom_objects=None): # Import here to avoid circular imports. from tensorflow.python.feature_column import serialization # pylint: disable=g-import-not-at-top config_cp = config.copy() config_cp['feature_columns'] = serialization.deserialize_feature_columns( config['feature_columns'], custom_objects=custom_objects) config_cp['partitioner'] = generic_utils.deserialize_keras_object( config['partitioner'], custom_objects) return cls(**config_cp) class _LinearModelLayer(Layer): """Layer that contains logic for `LinearModel`.""" def __init__(self, feature_columns, units=1, sparse_combiner='sum', trainable=True, name=None, **kwargs): super(_LinearModelLayer, self).__init__( name=name, trainable=trainable, **kwargs) self._feature_columns = _normalize_feature_columns(feature_columns) for column in self._feature_columns: if not isinstance(column, (DenseColumn, CategoricalColumn)): raise ValueError( 'Items of feature_columns must be either a ' 'DenseColumn or CategoricalColumn. Given: {}'.format(column)) self._units = units self._sparse_combiner = sparse_combiner self._state_manager = _StateManagerImpl(self, self.trainable) self.bias = None def build(self, _): # We need variable scopes for now because we want the variable partitioning # information to percolate down. We also use _pure_variable_scope's here # since we want to open up a name_scope in the `call` method while creating # the ops. with variable_scope._pure_variable_scope(self.name): # pylint: disable=protected-access for column in self._feature_columns: with variable_scope._pure_variable_scope( # pylint: disable=protected-access _sanitize_column_name_for_variable_scope(column.name)): # Create the state for each feature column column.create_state(self._state_manager) # Create a weight variable for each column. if isinstance(column, CategoricalColumn): first_dim = column.num_buckets else: first_dim = column.variable_shape.num_elements() self._state_manager.create_variable( column, name='weights', dtype=dtypes.float32, shape=(first_dim, self._units), initializer=initializers.zeros(), trainable=self.trainable) # Create a bias variable. self.bias = self.add_variable( name='bias_weights', dtype=dtypes.float32, shape=[self._units], initializer=initializers.zeros(), trainable=self.trainable, use_resource=True, # TODO(rohanj): Get rid of this hack once we have a mechanism for # specifying a default partitioner for an entire layer. In that case, # the default getter for Layers should work. getter=variable_scope.get_variable) super(_LinearModelLayer, self).build(None) def call(self, features): if not isinstance(features, dict): raise ValueError('We expected a dictionary here. Instead we got: {}' .format(features)) with ops.name_scope(self.name): transformation_cache = FeatureTransformationCache(features) weighted_sums = [] for column in self._feature_columns: with ops.name_scope( _sanitize_column_name_for_variable_scope(column.name)): # All the weights used in the linear model are owned by the state # manager associated with this Linear Model. weight_var = self._state_manager.get_variable(column, 'weights') weighted_sum = _create_weighted_sum( column=column, transformation_cache=transformation_cache, state_manager=self._state_manager, sparse_combiner=self._sparse_combiner, weight_var=weight_var) weighted_sums.append(weighted_sum) _verify_static_batch_size_equality(weighted_sums, self._feature_columns) predictions_no_bias = math_ops.add_n( weighted_sums, name='weighted_sum_no_bias') predictions = nn_ops.bias_add( predictions_no_bias, self.bias, name='weighted_sum') return predictions def get_config(self): # Import here to avoid circular imports. from tensorflow.python.feature_column import serialization # pylint: disable=g-import-not-at-top column_configs = serialization.serialize_feature_columns( self._feature_columns) config = { 'feature_columns': column_configs, 'units': self._units, 'sparse_combiner': self._sparse_combiner } base_config = super( # pylint: disable=bad-super-call _LinearModelLayer, self).get_config() return dict(list(base_config.items()) + list(config.items())) @classmethod def from_config(cls, config, custom_objects=None): # Import here to avoid circular imports. from tensorflow.python.feature_column import serialization # pylint: disable=g-import-not-at-top config_cp = config.copy() columns = serialization.deserialize_feature_columns( config_cp['feature_columns'], custom_objects=custom_objects) del config_cp['feature_columns'] return cls(feature_columns=columns, **config_cp) # TODO(tanzheny): Cleanup it with respect to Premade model b/132690565. class LinearModel(keras_training.Model): """Produces a linear prediction `Tensor` based on given `feature_columns`. This layer generates a weighted sum based on output dimension `units`. Weighted sum refers to logits in classification problems. It refers to the prediction itself for linear regression problems. Note on supported columns: `LinearLayer` treats categorical columns as `indicator_column`s. To be specific, assume the input as `SparseTensor` looks like: ```python shape = [2, 2] { [0, 0]: "a" [1, 0]: "b" [1, 1]: "c" } ``` `linear_model` assigns weights for the presence of "a", "b", "c' implicitly, just like `indicator_column`, while `input_layer` explicitly requires wrapping each of categorical columns with an `embedding_column` or an `indicator_column`. Example of usage: ```python price = numeric_column('price') price_buckets = bucketized_column(price, boundaries=[0., 10., 100., 1000.]) keywords = categorical_column_with_hash_bucket("keywords", 10K) keywords_price = crossed_column('keywords', price_buckets, ...) columns = [price_buckets, keywords, keywords_price ...] linear_model = LinearLayer(columns) features = tf.io.parse_example(..., features=make_parse_example_spec(columns)) prediction = linear_model(features) ``` """ def __init__(self, feature_columns, units=1, sparse_combiner='sum', trainable=True, name=None, **kwargs): """Constructs a LinearLayer. Args: feature_columns: An iterable containing the FeatureColumns to use as inputs to your model. All items should be instances of classes derived from `_FeatureColumn`s. units: An integer, dimensionality of the output space. Default value is 1. sparse_combiner: A string specifying how to reduce if a categorical column is multivalent. Except `numeric_column`, almost all columns passed to `linear_model` are considered as categorical columns. It combines each categorical column independently. Currently "mean", "sqrtn" and "sum" are supported, with "sum" the default for linear model. "sqrtn" often achieves good accuracy, in particular with bag-of-words columns. * "sum": do not normalize features in the column * "mean": do l1 normalization on features in the column * "sqrtn": do l2 normalization on features in the column For example, for two features represented as the categorical columns: ```python # Feature 1 shape = [2, 2] { [0, 0]: "a" [0, 1]: "b" [1, 0]: "c" } # Feature 2 shape = [2, 3] { [0, 0]: "d" [1, 0]: "e" [1, 1]: "f" [1, 2]: "g" } ``` with `sparse_combiner` as "mean", the linear model outputs conceptually are ``` y_0 = 1.0 / 2.0 * ( w_a + w_ b) + w_c + b_0 y_1 = w_d + 1.0 / 3.0 * ( w_e + w_ f + w_g) + b_1 ``` where `y_i` is the output, `b_i` is the bias, and `w_x` is the weight assigned to the presence of `x` in the input features. trainable: If `True` also add the variable to the graph collection `GraphKeys.TRAINABLE_VARIABLES` (see `tf.Variable`). name: Name to give to the Linear Model. All variables and ops created will be scoped by this name. **kwargs: Keyword arguments to construct a layer. Raises: ValueError: if an item in `feature_columns` is neither a `DenseColumn` nor `CategoricalColumn`. """ super(LinearModel, self).__init__(name=name, **kwargs) self.layer = _LinearModelLayer( feature_columns, units, sparse_combiner, trainable, name=self.name, **kwargs) def call(self, features): """Returns a `Tensor` the represents the predictions of a linear model. Args: features: A mapping from key to tensors. `_FeatureColumn`s look up via these keys. For example `numeric_column('price')` will look at 'price' key in this dict. Values are `Tensor` or `SparseTensor` depending on corresponding `_FeatureColumn`. Returns: A `Tensor` which represents predictions/logits of a linear model. Its shape is (batch_size, units) and its dtype is `float32`. Raises: ValueError: If features are not a dictionary. """ return self.layer(features) @property def bias(self): return self.layer.bias def _transform_features_v2(features, feature_columns, state_manager): """Returns transformed features based on features columns passed in. Please note that most probably you would not need to use this function. Please check `input_layer` and `linear_model` to see whether they will satisfy your use case or not. Example: ```python # Define features and transformations crosses_a_x_b = crossed_column( columns=["sparse_feature_a", "sparse_feature_b"], hash_bucket_size=10000) price_buckets = bucketized_column( source_column=numeric_column("price"), boundaries=[...]) columns = [crosses_a_x_b, price_buckets] features = tf.io.parse_example(..., features=make_parse_example_spec(columns)) transformed = transform_features(features=features, feature_columns=columns) assertCountEqual(columns, transformed.keys()) ``` Args: features: A mapping from key to tensors. `FeatureColumn`s look up via these keys. For example `numeric_column('price')` will look at 'price' key in this dict. Values can be a `SparseTensor` or a `Tensor` depends on corresponding `FeatureColumn`. feature_columns: An iterable containing all the `FeatureColumn`s. state_manager: A StateManager object that holds the FeatureColumn state. Returns: A `dict` mapping `FeatureColumn` to `Tensor` and `SparseTensor` values. """ feature_columns = _normalize_feature_columns(feature_columns) outputs = {} with ops.name_scope( None, default_name='transform_features', values=features.values()): transformation_cache = FeatureTransformationCache(features) for column in feature_columns: with ops.name_scope( None, default_name=_sanitize_column_name_for_variable_scope(column.name)): outputs[column] = transformation_cache.get(column, state_manager) return outputs @tf_export('feature_column.make_parse_example_spec', v1=[]) def make_parse_example_spec_v2(feature_columns): """Creates parsing spec dictionary from input feature_columns. The returned dictionary can be used as arg 'features' in `tf.io.parse_example`. Typical usage example: ```python # Define features and transformations feature_a = tf.feature_column.categorical_column_with_vocabulary_file(...) feature_b = tf.feature_column.numeric_column(...) feature_c_bucketized = tf.feature_column.bucketized_column( tf.feature_column.numeric_column("feature_c"), ...) feature_a_x_feature_c = tf.feature_column.crossed_column( columns=["feature_a", feature_c_bucketized], ...) feature_columns = set( [feature_b, feature_c_bucketized, feature_a_x_feature_c]) features = tf.io.parse_example( serialized=serialized_examples, features=tf.feature_column.make_parse_example_spec(feature_columns)) ``` For the above example, make_parse_example_spec would return the dict: ```python { "feature_a": parsing_ops.VarLenFeature(tf.string), "feature_b": parsing_ops.FixedLenFeature([1], dtype=tf.float32), "feature_c": parsing_ops.FixedLenFeature([1], dtype=tf.float32) } ``` Args: feature_columns: An iterable containing all feature columns. All items should be instances of classes derived from `FeatureColumn`. Returns: A dict mapping each feature key to a `FixedLenFeature` or `VarLenFeature` value. Raises: ValueError: If any of the given `feature_columns` is not a `FeatureColumn` instance. """ result = {} for column in feature_columns: if not isinstance(column, FeatureColumn): raise ValueError('All feature_columns must be FeatureColumn instances. ' 'Given: {}'.format(column)) config = column.parse_example_spec for key, value in six.iteritems(config): if key in result and value != result[key]: raise ValueError( 'feature_columns contain different parse_spec for key ' '{}. Given {} and {}'.format(key, value, result[key])) result.update(config) return result @tf_export('feature_column.embedding_column') def embedding_column(categorical_column, dimension, combiner='mean', initializer=None, ckpt_to_load_from=None, tensor_name_in_ckpt=None, max_norm=None, trainable=True, use_safe_embedding_lookup=True): """`DenseColumn` that converts from sparse, categorical input. Use this when your inputs are sparse, but you want to convert them to a dense representation (e.g., to feed to a DNN). Inputs must be a `CategoricalColumn` created by any of the `categorical_column_*` function. Here is an example of using `embedding_column` with `DNNClassifier`: ```python video_id = categorical_column_with_identity( key='video_id', num_buckets=1000000, default_value=0) columns = [embedding_column(video_id, 9),...] estimator = tf.estimator.DNNClassifier(feature_columns=columns, ...) label_column = ... def input_fn(): features = tf.io.parse_example( ..., features=make_parse_example_spec(columns + [label_column])) labels = features.pop(label_column.name) return features, labels estimator.train(input_fn=input_fn, steps=100) ``` Here is an example using `embedding_column` with model_fn: ```python def model_fn(features, ...): video_id = categorical_column_with_identity( key='video_id', num_buckets=1000000, default_value=0) columns = [embedding_column(video_id, 9),...] dense_tensor = input_layer(features, columns) # Form DNN layers, calculate loss, and return EstimatorSpec. ... ``` Args: categorical_column: A `CategoricalColumn` created by a `categorical_column_with_*` function. This column produces the sparse IDs that are inputs to the embedding lookup. dimension: An integer specifying dimension of the embedding, must be > 0. combiner: A string specifying how to reduce if there are multiple entries in a single row. Currently 'mean', 'sqrtn' and 'sum' are supported, with 'mean' the default. 'sqrtn' often achieves good accuracy, in particular with bag-of-words columns. Each of this can be thought as example level normalizations on the column. For more information, see `tf.embedding_lookup_sparse`. initializer: A variable initializer function to be used in embedding variable initialization. If not specified, defaults to `truncated_normal_initializer` with mean `0.0` and standard deviation `1/sqrt(dimension)`. ckpt_to_load_from: String representing checkpoint name/pattern from which to restore column weights. Required if `tensor_name_in_ckpt` is not `None`. tensor_name_in_ckpt: Name of the `Tensor` in `ckpt_to_load_from` from which to restore the column weights. Required if `ckpt_to_load_from` is not `None`. max_norm: If not `None`, embedding values are l2-normalized to this value. trainable: Whether or not the embedding is trainable. Default is True. use_safe_embedding_lookup: If true, uses safe_embedding_lookup_sparse instead of embedding_lookup_sparse. safe_embedding_lookup_sparse ensures there are no empty rows and all weights and ids are positive at the expense of extra compute cost. This only applies to rank 2 (NxM) shaped input tensors. Defaults to true, consider turning off if the above checks are not needed. Note that having empty rows will not trigger any error though the output result might be 0 or omitted. Returns: `DenseColumn` that converts from sparse input. Raises: ValueError: if `dimension` not > 0. ValueError: if exactly one of `ckpt_to_load_from` and `tensor_name_in_ckpt` is specified. ValueError: if `initializer` is specified and is not callable. RuntimeError: If eager execution is enabled. """ if (dimension is None) or (dimension < 1): raise ValueError('Invalid dimension {}.'.format(dimension)) if (ckpt_to_load_from is None) != (tensor_name_in_ckpt is None): raise ValueError('Must specify both `ckpt_to_load_from` and ' '`tensor_name_in_ckpt` or none of them.') if (initializer is not None) and (not callable(initializer)): raise ValueError('initializer must be callable if specified. ' 'Embedding of column_name: {}'.format( categorical_column.name)) if initializer is None: initializer = initializers.truncated_normal( mean=0.0, stddev=1 / math.sqrt(dimension)) return EmbeddingColumn( categorical_column=categorical_column, dimension=dimension, combiner=combiner, initializer=initializer, ckpt_to_load_from=ckpt_to_load_from, tensor_name_in_ckpt=tensor_name_in_ckpt, max_norm=max_norm, trainable=trainable, use_safe_embedding_lookup=use_safe_embedding_lookup) @tf_export(v1=['feature_column.shared_embedding_columns']) def shared_embedding_columns(categorical_columns, dimension, combiner='mean', initializer=None, shared_embedding_collection_name=None, ckpt_to_load_from=None, tensor_name_in_ckpt=None, max_norm=None, trainable=True, use_safe_embedding_lookup=True): """List of dense columns that convert from sparse, categorical input. This is similar to `embedding_column`, except that it produces a list of embedding columns that share the same embedding weights. Use this when your inputs are sparse and of the same type (e.g. watched and impression video IDs that share the same vocabulary), and you want to convert them to a dense representation (e.g., to feed to a DNN). Inputs must be a list of categorical columns created by any of the `categorical_column_*` function. They must all be of the same type and have the same arguments except `key`. E.g. they can be categorical_column_with_vocabulary_file with the same vocabulary_file. Some or all columns could also be weighted_categorical_column. Here is an example embedding of two features for a DNNClassifier model: ```python watched_video_id = categorical_column_with_vocabulary_file( 'watched_video_id', video_vocabulary_file, video_vocabulary_size) impression_video_id = categorical_column_with_vocabulary_file( 'impression_video_id', video_vocabulary_file, video_vocabulary_size) columns = shared_embedding_columns( [watched_video_id, impression_video_id], dimension=10) estimator = tf.estimator.DNNClassifier(feature_columns=columns, ...) label_column = ... def input_fn(): features = tf.io.parse_example( ..., features=make_parse_example_spec(columns + [label_column])) labels = features.pop(label_column.name) return features, labels estimator.train(input_fn=input_fn, steps=100) ``` Here is an example using `shared_embedding_columns` with model_fn: ```python def model_fn(features, ...): watched_video_id = categorical_column_with_vocabulary_file( 'watched_video_id', video_vocabulary_file, video_vocabulary_size) impression_video_id = categorical_column_with_vocabulary_file( 'impression_video_id', video_vocabulary_file, video_vocabulary_size) columns = shared_embedding_columns( [watched_video_id, impression_video_id], dimension=10) dense_tensor = input_layer(features, columns) # Form DNN layers, calculate loss, and return EstimatorSpec. ... ``` Args: categorical_columns: List of categorical columns created by a `categorical_column_with_*` function. These columns produce the sparse IDs that are inputs to the embedding lookup. All columns must be of the same type and have the same arguments except `key`. E.g. they can be categorical_column_with_vocabulary_file with the same vocabulary_file. Some or all columns could also be weighted_categorical_column. dimension: An integer specifying dimension of the embedding, must be > 0. combiner: A string specifying how to reduce if there are multiple entries in a single row. Currently 'mean', 'sqrtn' and 'sum' are supported, with 'mean' the default. 'sqrtn' often achieves good accuracy, in particular with bag-of-words columns. Each of this can be thought as example level normalizations on the column. For more information, see `tf.embedding_lookup_sparse`. initializer: A variable initializer function to be used in embedding variable initialization. If not specified, defaults to `truncated_normal_initializer` with mean `0.0` and standard deviation `1/sqrt(dimension)`. shared_embedding_collection_name: Optional name of the collection where shared embedding weights are added. If not given, a reasonable name will be chosen based on the names of `categorical_columns`. This is also used in `variable_scope` when creating shared embedding weights. ckpt_to_load_from: String representing checkpoint name/pattern from which to restore column weights. Required if `tensor_name_in_ckpt` is not `None`. tensor_name_in_ckpt: Name of the `Tensor` in `ckpt_to_load_from` from which to restore the column weights. Required if `ckpt_to_load_from` is not `None`. max_norm: If not `None`, each embedding is clipped if its l2-norm is larger than this value, before combining. trainable: Whether or not the embedding is trainable. Default is True. use_safe_embedding_lookup: If true, uses safe_embedding_lookup_sparse instead of embedding_lookup_sparse. safe_embedding_lookup_sparse ensures there are no empty rows and all weights and ids are positive at the expense of extra compute cost. This only applies to rank 2 (NxM) shaped input tensors. Defaults to true, consider turning off if the above checks are not needed. Note that having empty rows will not trigger any error though the output result might be 0 or omitted. Returns: A list of dense columns that converts from sparse input. The order of results follows the ordering of `categorical_columns`. Raises: ValueError: if `dimension` not > 0. ValueError: if any of the given `categorical_columns` is of different type or has different arguments than the others. ValueError: if exactly one of `ckpt_to_load_from` and `tensor_name_in_ckpt` is specified. ValueError: if `initializer` is specified and is not callable. RuntimeError: if eager execution is enabled. """ if context.executing_eagerly(): raise RuntimeError('shared_embedding_columns are not supported when eager ' 'execution is enabled.') if (dimension is None) or (dimension < 1): raise ValueError('Invalid dimension {}.'.format(dimension)) if (ckpt_to_load_from is None) != (tensor_name_in_ckpt is None): raise ValueError('Must specify both `ckpt_to_load_from` and ' '`tensor_name_in_ckpt` or none of them.') if (initializer is not None) and (not callable(initializer)): raise ValueError('initializer must be callable if specified.') if initializer is None: initializer = initializers.truncated_normal( mean=0.0, stddev=1. / math.sqrt(dimension)) # Sort the columns so the default collection name is deterministic even if the # user passes columns from an unsorted collection, such as dict.values(). sorted_columns = sorted(categorical_columns, key=lambda x: x.name) c0 = sorted_columns[0] num_buckets = c0._num_buckets # pylint: disable=protected-access if not isinstance(c0, fc_old._CategoricalColumn): # pylint: disable=protected-access raise ValueError( 'All categorical_columns must be subclasses of _CategoricalColumn. ' 'Given: {}, of type: {}'.format(c0, type(c0))) while isinstance( c0, (fc_old._WeightedCategoricalColumn, WeightedCategoricalColumn, # pylint: disable=protected-access fc_old._SequenceCategoricalColumn, SequenceCategoricalColumn)): # pylint: disable=protected-access c0 = c0.categorical_column for c in sorted_columns[1:]: while isinstance( c, (fc_old._WeightedCategoricalColumn, WeightedCategoricalColumn, # pylint: disable=protected-access fc_old._SequenceCategoricalColumn, SequenceCategoricalColumn)): # pylint: disable=protected-access c = c.categorical_column if not isinstance(c, type(c0)): raise ValueError( 'To use shared_embedding_column, all categorical_columns must have ' 'the same type, or be weighted_categorical_column or sequence column ' 'of the same type. Given column: {} of type: {} does not match given ' 'column: {} of type: {}'.format(c0, type(c0), c, type(c))) if num_buckets != c._num_buckets: # pylint: disable=protected-access raise ValueError( 'To use shared_embedding_column, all categorical_columns must have ' 'the same number of buckets. Given column: {} with buckets: {} does ' 'not match column: {} with buckets: {}'.format( c0, num_buckets, c, c._num_buckets)) # pylint: disable=protected-access if not shared_embedding_collection_name: shared_embedding_collection_name = '_'.join(c.name for c in sorted_columns) shared_embedding_collection_name += '_shared_embedding' result = [] for column in categorical_columns: result.append( fc_old._SharedEmbeddingColumn( # pylint: disable=protected-access categorical_column=column, initializer=initializer, dimension=dimension, combiner=combiner, shared_embedding_collection_name=shared_embedding_collection_name, ckpt_to_load_from=ckpt_to_load_from, tensor_name_in_ckpt=tensor_name_in_ckpt, max_norm=max_norm, trainable=trainable, use_safe_embedding_lookup=use_safe_embedding_lookup)) return result @tf_export('feature_column.shared_embeddings', v1=[]) def shared_embedding_columns_v2(categorical_columns, dimension, combiner='mean', initializer=None, shared_embedding_collection_name=None, ckpt_to_load_from=None, tensor_name_in_ckpt=None, max_norm=None, trainable=True, use_safe_embedding_lookup=True): """List of dense columns that convert from sparse, categorical input. This is similar to `embedding_column`, except that it produces a list of embedding columns that share the same embedding weights. Use this when your inputs are sparse and of the same type (e.g. watched and impression video IDs that share the same vocabulary), and you want to convert them to a dense representation (e.g., to feed to a DNN). Inputs must be a list of categorical columns created by any of the `categorical_column_*` function. They must all be of the same type and have the same arguments except `key`. E.g. they can be categorical_column_with_vocabulary_file with the same vocabulary_file. Some or all columns could also be weighted_categorical_column. Here is an example embedding of two features for a DNNClassifier model: ```python watched_video_id = categorical_column_with_vocabulary_file( 'watched_video_id', video_vocabulary_file, video_vocabulary_size) impression_video_id = categorical_column_with_vocabulary_file( 'impression_video_id', video_vocabulary_file, video_vocabulary_size) columns = shared_embedding_columns( [watched_video_id, impression_video_id], dimension=10) estimator = tf.estimator.DNNClassifier(feature_columns=columns, ...) label_column = ... def input_fn(): features = tf.io.parse_example( ..., features=make_parse_example_spec(columns + [label_column])) labels = features.pop(label_column.name) return features, labels estimator.train(input_fn=input_fn, steps=100) ``` Here is an example using `shared_embedding_columns` with model_fn: ```python def model_fn(features, ...): watched_video_id = categorical_column_with_vocabulary_file( 'watched_video_id', video_vocabulary_file, video_vocabulary_size) impression_video_id = categorical_column_with_vocabulary_file( 'impression_video_id', video_vocabulary_file, video_vocabulary_size) columns = shared_embedding_columns( [watched_video_id, impression_video_id], dimension=10) dense_tensor = input_layer(features, columns) # Form DNN layers, calculate loss, and return EstimatorSpec. ... ``` Args: categorical_columns: List of categorical columns created by a `categorical_column_with_*` function. These columns produce the sparse IDs that are inputs to the embedding lookup. All columns must be of the same type and have the same arguments except `key`. E.g. they can be categorical_column_with_vocabulary_file with the same vocabulary_file. Some or all columns could also be weighted_categorical_column. dimension: An integer specifying dimension of the embedding, must be > 0. combiner: A string specifying how to reduce if there are multiple entries in a single row. Currently 'mean', 'sqrtn' and 'sum' are supported, with 'mean' the default. 'sqrtn' often achieves good accuracy, in particular with bag-of-words columns. Each of this can be thought as example level normalizations on the column. For more information, see `tf.embedding_lookup_sparse`. initializer: A variable initializer function to be used in embedding variable initialization. If not specified, defaults to `truncated_normal_initializer` with mean `0.0` and standard deviation `1/sqrt(dimension)`. shared_embedding_collection_name: Optional collective name of these columns. If not given, a reasonable name will be chosen based on the names of `categorical_columns`. ckpt_to_load_from: String representing checkpoint name/pattern from which to restore column weights. Required if `tensor_name_in_ckpt` is not `None`. tensor_name_in_ckpt: Name of the `Tensor` in `ckpt_to_load_from` from which to restore the column weights. Required if `ckpt_to_load_from` is not `None`. max_norm: If not `None`, each embedding is clipped if its l2-norm is larger than this value, before combining. trainable: Whether or not the embedding is trainable. Default is True. use_safe_embedding_lookup: If true, uses safe_embedding_lookup_sparse instead of embedding_lookup_sparse. safe_embedding_lookup_sparse ensures there are no empty rows and all weights and ids are positive at the expense of extra compute cost. This only applies to rank 2 (NxM) shaped input tensors. Defaults to true, consider turning off if the above checks are not needed. Note that having empty rows will not trigger any error though the output result might be 0 or omitted. Returns: A list of dense columns that converts from sparse input. The order of results follows the ordering of `categorical_columns`. Raises: ValueError: if `dimension` not > 0. ValueError: if any of the given `categorical_columns` is of different type or has different arguments than the others. ValueError: if exactly one of `ckpt_to_load_from` and `tensor_name_in_ckpt` is specified. ValueError: if `initializer` is specified and is not callable. RuntimeError: if eager execution is enabled. """ if context.executing_eagerly(): raise RuntimeError('shared_embedding_columns are not supported when eager ' 'execution is enabled.') if (dimension is None) or (dimension < 1): raise ValueError('Invalid dimension {}.'.format(dimension)) if (ckpt_to_load_from is None) != (tensor_name_in_ckpt is None): raise ValueError('Must specify both `ckpt_to_load_from` and ' '`tensor_name_in_ckpt` or none of them.') if (initializer is not None) and (not callable(initializer)): raise ValueError('initializer must be callable if specified.') if initializer is None: initializer = initializers.truncated_normal( mean=0.0, stddev=1. / math.sqrt(dimension)) # Sort the columns so the default collection name is deterministic even if the # user passes columns from an unsorted collection, such as dict.values(). sorted_columns = sorted(categorical_columns, key=lambda x: x.name) c0 = sorted_columns[0] num_buckets = c0.num_buckets if not isinstance(c0, CategoricalColumn): raise ValueError( 'All categorical_columns must be subclasses of CategoricalColumn. ' 'Given: {}, of type: {}'.format(c0, type(c0))) while isinstance(c0, (WeightedCategoricalColumn, SequenceCategoricalColumn)): c0 = c0.categorical_column for c in sorted_columns[1:]: while isinstance(c, (WeightedCategoricalColumn, SequenceCategoricalColumn)): c = c.categorical_column if not isinstance(c, type(c0)): raise ValueError( 'To use shared_embedding_column, all categorical_columns must have ' 'the same type, or be weighted_categorical_column or sequence column ' 'of the same type. Given column: {} of type: {} does not match given ' 'column: {} of type: {}'.format(c0, type(c0), c, type(c))) if num_buckets != c.num_buckets: raise ValueError( 'To use shared_embedding_column, all categorical_columns must have ' 'the same number of buckets. Given column: {} with buckets: {} does ' 'not match column: {} with buckets: {}'.format( c0, num_buckets, c, c.num_buckets)) if not shared_embedding_collection_name: shared_embedding_collection_name = '_'.join(c.name for c in sorted_columns) shared_embedding_collection_name += '_shared_embedding' column_creator = SharedEmbeddingColumnCreator( dimension, initializer, ckpt_to_load_from, tensor_name_in_ckpt, num_buckets, trainable, shared_embedding_collection_name, use_safe_embedding_lookup) result = [] for column in categorical_columns: result.append( column_creator( categorical_column=column, combiner=combiner, max_norm=max_norm)) return result @tf_export('feature_column.numeric_column') def numeric_column(key, shape=(1,), default_value=None, dtype=dtypes.float32, normalizer_fn=None): """Represents real valued or numerical features. Example: ```python price = numeric_column('price') columns = [price, ...] features = tf.io.parse_example(..., features=make_parse_example_spec(columns)) dense_tensor = input_layer(features, columns) # or bucketized_price = bucketized_column(price, boundaries=[...]) columns = [bucketized_price, ...] features = tf.io.parse_example(..., features=make_parse_example_spec(columns)) linear_prediction = linear_model(features, columns) ``` Args: key: A unique string identifying the input feature. It is used as the column name and the dictionary key for feature parsing configs, feature `Tensor` objects, and feature columns. shape: An iterable of integers specifies the shape of the `Tensor`. An integer can be given which means a single dimension `Tensor` with given width. The `Tensor` representing the column will have the shape of [batch_size] + `shape`. default_value: A single value compatible with `dtype` or an iterable of values compatible with `dtype` which the column takes on during `tf.Example` parsing if data is missing. A default value of `None` will cause `tf.io.parse_example` to fail if an example does not contain this column. If a single value is provided, the same value will be applied as the default value for every item. If an iterable of values is provided, the shape of the `default_value` should be equal to the given `shape`. dtype: defines the type of values. Default value is `tf.float32`. Must be a non-quantized, real integer or floating point type. normalizer_fn: If not `None`, a function that can be used to normalize the value of the tensor after `default_value` is applied for parsing. Normalizer function takes the input `Tensor` as its argument, and returns the output `Tensor`. (e.g. lambda x: (x - 3.0) / 4.2). Please note that even though the most common use case of this function is normalization, it can be used for any kind of Tensorflow transformations. Returns: A `NumericColumn`. Raises: TypeError: if any dimension in shape is not an int ValueError: if any dimension in shape is not a positive integer TypeError: if `default_value` is an iterable but not compatible with `shape` TypeError: if `default_value` is not compatible with `dtype`. ValueError: if `dtype` is not convertible to `tf.float32`. """ shape = _check_shape(shape, key) if not (dtype.is_integer or dtype.is_floating): raise ValueError('dtype must be convertible to float. ' 'dtype: {}, key: {}'.format(dtype, key)) default_value = fc_utils.check_default_value( shape, default_value, dtype, key) if normalizer_fn is not None and not callable(normalizer_fn): raise TypeError( 'normalizer_fn must be a callable. Given: {}'.format(normalizer_fn)) fc_utils.assert_key_is_string(key) return NumericColumn( key, shape=shape, default_value=default_value, dtype=dtype, normalizer_fn=normalizer_fn) @tf_export('feature_column.bucketized_column') def bucketized_column(source_column, boundaries): """Represents discretized dense input bucketed by `boundaries`. Buckets include the left boundary, and exclude the right boundary. Namely, `boundaries=[0., 1., 2.]` generates buckets `(-inf, 0.)`, `[0., 1.)`, `[1., 2.)`, and `[2., +inf)`. For example, if the inputs are ```python boundaries = [0, 10, 100] input tensor = [[-5, 10000] [150, 10] [5, 100]] ``` then the output will be ```python output = [[0, 3] [3, 2] [1, 3]] ``` Example: ```python price = tf.feature_column.numeric_column('price') bucketized_price = tf.feature_column.bucketized_column( price, boundaries=[...]) columns = [bucketized_price, ...] features = tf.io.parse_example( ..., features=tf.feature_column.make_parse_example_spec(columns)) dense_tensor = tf.keras.layers.DenseFeatures(columns)(features) ``` A `bucketized_column` can also be crossed with another categorical column using `crossed_column`: ```python price = tf.feature_column.numeric_column('price') # bucketized_column converts numerical feature to a categorical one. bucketized_price = tf.feature_column.bucketized_column( price, boundaries=[...]) # 'keywords' is a string feature. price_x_keywords = tf.feature_column.crossed_column( [bucketized_price, 'keywords'], 50K) columns = [price_x_keywords, ...] features = tf.io.parse_example( ..., features=tf.feature_column.make_parse_example_spec(columns)) dense_tensor = tf.keras.layers.DenseFeatures(columns)(features) linear_model = tf.keras.experimental.LinearModel(units=...)(dense_tensor) ``` Args: source_column: A one-dimensional dense column which is generated with `numeric_column`. boundaries: A sorted list or tuple of floats specifying the boundaries. Returns: A `BucketizedColumn`. Raises: ValueError: If `source_column` is not a numeric column, or if it is not one-dimensional. ValueError: If `boundaries` is not a sorted list or tuple. """ if not isinstance(source_column, (NumericColumn, fc_old._NumericColumn)): # pylint: disable=protected-access raise ValueError( 'source_column must be a column generated with numeric_column(). ' 'Given: {}'.format(source_column)) if len(source_column.shape) > 1: raise ValueError( 'source_column must be one-dimensional column. ' 'Given: {}'.format(source_column)) if not boundaries: raise ValueError('boundaries must not be empty.') if not (isinstance(boundaries, list) or isinstance(boundaries, tuple)): raise ValueError('boundaries must be a sorted list.') for i in range(len(boundaries) - 1): if boundaries[i] >= boundaries[i + 1]: raise ValueError('boundaries must be a sorted list.') return BucketizedColumn(source_column, tuple(boundaries)) @tf_export('feature_column.categorical_column_with_hash_bucket') def categorical_column_with_hash_bucket(key, hash_bucket_size, dtype=dtypes.string): """Represents sparse feature where ids are set by hashing. Use this when your sparse features are in string or integer format, and you want to distribute your inputs into a finite number of buckets by hashing. output_id = Hash(input_feature_string) % bucket_size for string type input. For int type input, the value is converted to its string representation first and then hashed by the same formula. For input dictionary `features`, `features[key]` is either `Tensor` or `SparseTensor`. If `Tensor`, missing values can be represented by `-1` for int and `''` for string, which will be dropped by this feature column. Example: ```python keywords = categorical_column_with_hash_bucket("keywords", 10K) columns = [keywords, ...] features = tf.io.parse_example(..., features=make_parse_example_spec(columns)) linear_prediction = linear_model(features, columns) # or keywords_embedded = embedding_column(keywords, 16) columns = [keywords_embedded, ...] features = tf.io.parse_example(..., features=make_parse_example_spec(columns)) dense_tensor = input_layer(features, columns) ``` Args: key: A unique string identifying the input feature. It is used as the column name and the dictionary key for feature parsing configs, feature `Tensor` objects, and feature columns. hash_bucket_size: An int > 1. The number of buckets. dtype: The type of features. Only string and integer types are supported. Returns: A `HashedCategoricalColumn`. Raises: ValueError: `hash_bucket_size` is not greater than 1. ValueError: `dtype` is neither string nor integer. """ if hash_bucket_size is None: raise ValueError('hash_bucket_size must be set. ' 'key: {}'.format(key)) if hash_bucket_size < 1: raise ValueError('hash_bucket_size must be at least 1. ' 'hash_bucket_size: {}, key: {}'.format( hash_bucket_size, key)) fc_utils.assert_key_is_string(key) fc_utils.assert_string_or_int(dtype, prefix='column_name: {}'.format(key)) return HashedCategoricalColumn(key, hash_bucket_size, dtype) @tf_export(v1=['feature_column.categorical_column_with_vocabulary_file']) def categorical_column_with_vocabulary_file(key, vocabulary_file, vocabulary_size=None, num_oov_buckets=0, default_value=None, dtype=dtypes.string): """A `CategoricalColumn` with a vocabulary file. Use this when your inputs are in string or integer format, and you have a vocabulary file that maps each value to an integer ID. By default, out-of-vocabulary values are ignored. Use either (but not both) of `num_oov_buckets` and `default_value` to specify how to include out-of-vocabulary values. For input dictionary `features`, `features[key]` is either `Tensor` or `SparseTensor`. If `Tensor`, missing values can be represented by `-1` for int and `''` for string, which will be dropped by this feature column. Example with `num_oov_buckets`: File '/us/states.txt' contains 50 lines, each with a 2-character U.S. state abbreviation. All inputs with values in that file are assigned an ID 0-49, corresponding to its line number. All other values are hashed and assigned an ID 50-54. ```python states = categorical_column_with_vocabulary_file( key='states', vocabulary_file='/us/states.txt', vocabulary_size=50, num_oov_buckets=5) columns = [states, ...] features = tf.io.parse_example(..., features=make_parse_example_spec(columns)) linear_prediction = linear_model(features, columns) ``` Example with `default_value`: File '/us/states.txt' contains 51 lines - the first line is 'XX', and the other 50 each have a 2-character U.S. state abbreviation. Both a literal 'XX' in input, and other values missing from the file, will be assigned ID 0. All others are assigned the corresponding line number 1-50. ```python states = categorical_column_with_vocabulary_file( key='states', vocabulary_file='/us/states.txt', vocabulary_size=51, default_value=0) columns = [states, ...] features = tf.io.parse_example(..., features=make_parse_example_spec(columns)) linear_prediction, _, _ = linear_model(features, columns) ``` And to make an embedding with either: ```python columns = [embedding_column(states, 3),...] features = tf.io.parse_example(..., features=make_parse_example_spec(columns)) dense_tensor = input_layer(features, columns) ``` Args: key: A unique string identifying the input feature. It is used as the column name and the dictionary key for feature parsing configs, feature `Tensor` objects, and feature columns. vocabulary_file: The vocabulary file name. vocabulary_size: Number of the elements in the vocabulary. This must be no greater than length of `vocabulary_file`, if less than length, later values are ignored. If None, it is set to the length of `vocabulary_file`. num_oov_buckets: Non-negative integer, the number of out-of-vocabulary buckets. All out-of-vocabulary inputs will be assigned IDs in the range `[vocabulary_size, vocabulary_size+num_oov_buckets)` based on a hash of the input value. A positive `num_oov_buckets` can not be specified with `default_value`. default_value: The integer ID value to return for out-of-vocabulary feature values, defaults to `-1`. This can not be specified with a positive `num_oov_buckets`. dtype: The type of features. Only string and integer types are supported. Returns: A `CategoricalColumn` with a vocabulary file. Raises: ValueError: `vocabulary_file` is missing or cannot be opened. ValueError: `vocabulary_size` is missing or < 1. ValueError: `num_oov_buckets` is a negative integer. ValueError: `num_oov_buckets` and `default_value` are both specified. ValueError: `dtype` is neither string nor integer. """ return categorical_column_with_vocabulary_file_v2( key, vocabulary_file, vocabulary_size, dtype, default_value, num_oov_buckets) @tf_export('feature_column.categorical_column_with_vocabulary_file', v1=[]) def categorical_column_with_vocabulary_file_v2(key, vocabulary_file, vocabulary_size=None, dtype=dtypes.string, default_value=None, num_oov_buckets=0): """A `CategoricalColumn` with a vocabulary file. Use this when your inputs are in string or integer format, and you have a vocabulary file that maps each value to an integer ID. By default, out-of-vocabulary values are ignored. Use either (but not both) of `num_oov_buckets` and `default_value` to specify how to include out-of-vocabulary values. For input dictionary `features`, `features[key]` is either `Tensor` or `SparseTensor`. If `Tensor`, missing values can be represented by `-1` for int and `''` for string, which will be dropped by this feature column. Example with `num_oov_buckets`: File `'/us/states.txt'` contains 50 lines, each with a 2-character U.S. state abbreviation. All inputs with values in that file are assigned an ID 0-49, corresponding to its line number. All other values are hashed and assigned an ID 50-54. ```python states = categorical_column_with_vocabulary_file( key='states', vocabulary_file='/us/states.txt', vocabulary_size=50, num_oov_buckets=5) columns = [states, ...] features = tf.io.parse_example(..., features=make_parse_example_spec(columns)) linear_prediction = linear_model(features, columns) ``` Example with `default_value`: File `'/us/states.txt'` contains 51 lines - the first line is `'XX'`, and the other 50 each have a 2-character U.S. state abbreviation. Both a literal `'XX'` in input, and other values missing from the file, will be assigned ID 0. All others are assigned the corresponding line number 1-50. ```python states = categorical_column_with_vocabulary_file( key='states', vocabulary_file='/us/states.txt', vocabulary_size=51, default_value=0) columns = [states, ...] features = tf.io.parse_example(..., features=make_parse_example_spec(columns)) linear_prediction, _, _ = linear_model(features, columns) ``` And to make an embedding with either: ```python columns = [embedding_column(states, 3),...] features = tf.io.parse_example(..., features=make_parse_example_spec(columns)) dense_tensor = input_layer(features, columns) ``` Args: key: A unique string identifying the input feature. It is used as the column name and the dictionary key for feature parsing configs, feature `Tensor` objects, and feature columns. vocabulary_file: The vocabulary file name. vocabulary_size: Number of the elements in the vocabulary. This must be no greater than length of `vocabulary_file`, if less than length, later values are ignored. If None, it is set to the length of `vocabulary_file`. dtype: The type of features. Only string and integer types are supported. default_value: The integer ID value to return for out-of-vocabulary feature values, defaults to `-1`. This can not be specified with a positive `num_oov_buckets`. num_oov_buckets: Non-negative integer, the number of out-of-vocabulary buckets. All out-of-vocabulary inputs will be assigned IDs in the range `[vocabulary_size, vocabulary_size+num_oov_buckets)` based on a hash of the input value. A positive `num_oov_buckets` can not be specified with `default_value`. Returns: A `CategoricalColumn` with a vocabulary file. Raises: ValueError: `vocabulary_file` is missing or cannot be opened. ValueError: `vocabulary_size` is missing or < 1. ValueError: `num_oov_buckets` is a negative integer. ValueError: `num_oov_buckets` and `default_value` are both specified. ValueError: `dtype` is neither string nor integer. """ if not vocabulary_file: raise ValueError('Missing vocabulary_file in {}.'.format(key)) if vocabulary_size is None: if not gfile.Exists(vocabulary_file): raise ValueError('vocabulary_file in {} does not exist.'.format(key)) with gfile.GFile(vocabulary_file, mode='rb') as f: vocabulary_size = sum(1 for _ in f) logging.info( 'vocabulary_size = %d in %s is inferred from the number of elements ' 'in the vocabulary_file %s.', vocabulary_size, key, vocabulary_file) # `vocabulary_size` isn't required for lookup, but it is for `_num_buckets`. if vocabulary_size < 1: raise ValueError('Invalid vocabulary_size in {}.'.format(key)) if num_oov_buckets: if default_value is not None: raise ValueError( 'Can\'t specify both num_oov_buckets and default_value in {}.'.format( key)) if num_oov_buckets < 0: raise ValueError('Invalid num_oov_buckets {} in {}.'.format( num_oov_buckets, key)) fc_utils.assert_string_or_int(dtype, prefix='column_name: {}'.format(key)) fc_utils.assert_key_is_string(key) return VocabularyFileCategoricalColumn( key=key, vocabulary_file=vocabulary_file, vocabulary_size=vocabulary_size, num_oov_buckets=0 if num_oov_buckets is None else num_oov_buckets, default_value=-1 if default_value is None else default_value, dtype=dtype) @tf_export('feature_column.categorical_column_with_vocabulary_list') def categorical_column_with_vocabulary_list(key, vocabulary_list, dtype=None, default_value=-1, num_oov_buckets=0): """A `CategoricalColumn` with in-memory vocabulary. Use this when your inputs are in string or integer format, and you have an in-memory vocabulary mapping each value to an integer ID. By default, out-of-vocabulary values are ignored. Use either (but not both) of `num_oov_buckets` and `default_value` to specify how to include out-of-vocabulary values. For input dictionary `features`, `features[key]` is either `Tensor` or `SparseTensor`. If `Tensor`, missing values can be represented by `-1` for int and `''` for string, which will be dropped by this feature column. Example with `num_oov_buckets`: In the following example, each input in `vocabulary_list` is assigned an ID 0-3 corresponding to its index (e.g., input 'B' produces output 2). All other inputs are hashed and assigned an ID 4-5. ```python colors = categorical_column_with_vocabulary_list( key='colors', vocabulary_list=('R', 'G', 'B', 'Y'), num_oov_buckets=2) columns = [colors, ...] features = tf.io.parse_example(..., features=make_parse_example_spec(columns)) linear_prediction, _, _ = linear_model(features, columns) ``` Example with `default_value`: In the following example, each input in `vocabulary_list` is assigned an ID 0-4 corresponding to its index (e.g., input 'B' produces output 3). All other inputs are assigned `default_value` 0. ```python colors = categorical_column_with_vocabulary_list( key='colors', vocabulary_list=('X', 'R', 'G', 'B', 'Y'), default_value=0) columns = [colors, ...] features = tf.io.parse_example(..., features=make_parse_example_spec(columns)) linear_prediction, _, _ = linear_model(features, columns) ``` And to make an embedding with either: ```python columns = [embedding_column(colors, 3),...] features = tf.io.parse_example(..., features=make_parse_example_spec(columns)) dense_tensor = input_layer(features, columns) ``` Args: key: A unique string identifying the input feature. It is used as the column name and the dictionary key for feature parsing configs, feature `Tensor` objects, and feature columns. vocabulary_list: An ordered iterable defining the vocabulary. Each feature is mapped to the index of its value (if present) in `vocabulary_list`. Must be castable to `dtype`. dtype: The type of features. Only string and integer types are supported. If `None`, it will be inferred from `vocabulary_list`. default_value: The integer ID value to return for out-of-vocabulary feature values, defaults to `-1`. This can not be specified with a positive `num_oov_buckets`. num_oov_buckets: Non-negative integer, the number of out-of-vocabulary buckets. All out-of-vocabulary inputs will be assigned IDs in the range `[len(vocabulary_list), len(vocabulary_list)+num_oov_buckets)` based on a hash of the input value. A positive `num_oov_buckets` can not be specified with `default_value`. Returns: A `CategoricalColumn` with in-memory vocabulary. Raises: ValueError: if `vocabulary_list` is empty, or contains duplicate keys. ValueError: `num_oov_buckets` is a negative integer. ValueError: `num_oov_buckets` and `default_value` are both specified. ValueError: if `dtype` is not integer or string. """ if (vocabulary_list is None) or (len(vocabulary_list) < 1): raise ValueError( 'vocabulary_list {} must be non-empty, column_name: {}'.format( vocabulary_list, key)) if len(set(vocabulary_list)) != len(vocabulary_list): raise ValueError( 'Duplicate keys in vocabulary_list {}, column_name: {}'.format( vocabulary_list, key)) vocabulary_dtype = dtypes.as_dtype(np.array(vocabulary_list).dtype) if num_oov_buckets: if default_value != -1: raise ValueError( 'Can\'t specify both num_oov_buckets and default_value in {}.'.format( key)) if num_oov_buckets < 0: raise ValueError('Invalid num_oov_buckets {} in {}.'.format( num_oov_buckets, key)) fc_utils.assert_string_or_int( vocabulary_dtype, prefix='column_name: {} vocabulary'.format(key)) if dtype is None: dtype = vocabulary_dtype elif dtype.is_integer != vocabulary_dtype.is_integer: raise ValueError( 'dtype {} and vocabulary dtype {} do not match, column_name: {}'.format( dtype, vocabulary_dtype, key)) fc_utils.assert_string_or_int(dtype, prefix='column_name: {}'.format(key)) fc_utils.assert_key_is_string(key) return VocabularyListCategoricalColumn( key=key, vocabulary_list=tuple(vocabulary_list), dtype=dtype, default_value=default_value, num_oov_buckets=num_oov_buckets) @tf_export('feature_column.categorical_column_with_identity') def categorical_column_with_identity(key, num_buckets, default_value=None): """A `CategoricalColumn` that returns identity values. Use this when your inputs are integers in the range `[0, num_buckets)`, and you want to use the input value itself as the categorical ID. Values outside this range will result in `default_value` if specified, otherwise it will fail. Typically, this is used for contiguous ranges of integer indexes, but it doesn't have to be. This might be inefficient, however, if many of IDs are unused. Consider `categorical_column_with_hash_bucket` in that case. For input dictionary `features`, `features[key]` is either `Tensor` or `SparseTensor`. If `Tensor`, missing values can be represented by `-1` for int and `''` for string, which will be dropped by this feature column. In the following examples, each input in the range `[0, 1000000)` is assigned the same value. All other inputs are assigned `default_value` 0. Note that a literal 0 in inputs will result in the same default ID. Linear model: ```python video_id = categorical_column_with_identity( key='video_id', num_buckets=1000000, default_value=0) columns = [video_id, ...] features = tf.io.parse_example(..., features=make_parse_example_spec(columns)) linear_prediction, _, _ = linear_model(features, columns) ``` Embedding for a DNN model: ```python columns = [embedding_column(video_id, 9),...] features = tf.io.parse_example(..., features=make_parse_example_spec(columns)) dense_tensor = input_layer(features, columns) ``` Args: key: A unique string identifying the input feature. It is used as the column name and the dictionary key for feature parsing configs, feature `Tensor` objects, and feature columns. num_buckets: Range of inputs and outputs is `[0, num_buckets)`. default_value: If set, values outside of range `[0, num_buckets)` will be replaced with this value. If not set, values >= num_buckets will cause a failure while values < 0 will be dropped. Returns: A `CategoricalColumn` that returns identity values. Raises: ValueError: if `num_buckets` is less than one. ValueError: if `default_value` is not in range `[0, num_buckets)`. """ if num_buckets < 1: raise ValueError( 'num_buckets {} < 1, column_name {}'.format(num_buckets, key)) if (default_value is not None) and ( (default_value < 0) or (default_value >= num_buckets)): raise ValueError( 'default_value {} not in range [0, {}), column_name {}'.format( default_value, num_buckets, key)) fc_utils.assert_key_is_string(key) return IdentityCategoricalColumn( key=key, number_buckets=num_buckets, default_value=default_value) @tf_export('feature_column.indicator_column') def indicator_column(categorical_column): """Represents multi-hot representation of given categorical column. - For DNN model, `indicator_column` can be used to wrap any `categorical_column_*` (e.g., to feed to DNN). Consider to Use `embedding_column` if the number of buckets/unique(values) are large. - For Wide (aka linear) model, `indicator_column` is the internal representation for categorical column when passing categorical column directly (as any element in feature_columns) to `linear_model`. See `linear_model` for details. ```python name = indicator_column(categorical_column_with_vocabulary_list( 'name', ['bob', 'george', 'wanda'])) columns = [name, ...] features = tf.io.parse_example(..., features=make_parse_example_spec(columns)) dense_tensor = input_layer(features, columns) dense_tensor == [[1, 0, 0]] # If "name" bytes_list is ["bob"] dense_tensor == [[1, 0, 1]] # If "name" bytes_list is ["bob", "wanda"] dense_tensor == [[2, 0, 0]] # If "name" bytes_list is ["bob", "bob"] ``` Args: categorical_column: A `CategoricalColumn` which is created by `categorical_column_with_*` or `crossed_column` functions. Returns: An `IndicatorColumn`. Raises: ValueError: If `categorical_column` is not CategoricalColumn type. """ if not isinstance(categorical_column, (CategoricalColumn, fc_old._CategoricalColumn)): # pylint: disable=protected-access raise ValueError( 'Unsupported input type. Input must be a CategoricalColumn. ' 'Given: {}'.format(categorical_column)) return IndicatorColumn(categorical_column) @tf_export('feature_column.weighted_categorical_column') def weighted_categorical_column(categorical_column, weight_feature_key, dtype=dtypes.float32): """Applies weight values to a `CategoricalColumn`. Use this when each of your sparse inputs has both an ID and a value. For example, if you're representing text documents as a collection of word frequencies, you can provide 2 parallel sparse input features ('terms' and 'frequencies' below). Example: Input `tf.Example` objects: ```proto [ features { feature { key: "terms" value {bytes_list {value: "very" value: "model"}} } feature { key: "frequencies" value {float_list {value: 0.3 value: 0.1}} } }, features { feature { key: "terms" value {bytes_list {value: "when" value: "course" value: "human"}} } feature { key: "frequencies" value {float_list {value: 0.4 value: 0.1 value: 0.2}} } } ] ``` ```python categorical_column = categorical_column_with_hash_bucket( column_name='terms', hash_bucket_size=1000) weighted_column = weighted_categorical_column( categorical_column=categorical_column, weight_feature_key='frequencies') columns = [weighted_column, ...] features = tf.io.parse_example(..., features=make_parse_example_spec(columns)) linear_prediction, _, _ = linear_model(features, columns) ``` This assumes the input dictionary contains a `SparseTensor` for key 'terms', and a `SparseTensor` for key 'frequencies'. These 2 tensors must have the same indices and dense shape. Args: categorical_column: A `CategoricalColumn` created by `categorical_column_with_*` functions. weight_feature_key: String key for weight values. dtype: Type of weights, such as `tf.float32`. Only float and integer weights are supported. Returns: A `CategoricalColumn` composed of two sparse features: one represents id, the other represents weight (value) of the id feature in that example. Raises: ValueError: if `dtype` is not convertible to float. """ if (dtype is None) or not (dtype.is_integer or dtype.is_floating): raise ValueError('dtype {} is not convertible to float.'.format(dtype)) return WeightedCategoricalColumn( categorical_column=categorical_column, weight_feature_key=weight_feature_key, dtype=dtype) @tf_export('feature_column.crossed_column') def crossed_column(keys, hash_bucket_size, hash_key=None): """Returns a column for performing crosses of categorical features. Crossed features will be hashed according to `hash_bucket_size`. Conceptually, the transformation can be thought of as: Hash(cartesian product of features) % `hash_bucket_size` For example, if the input features are: * SparseTensor referred by first key: ```python shape = [2, 2] { [0, 0]: "a" [1, 0]: "b" [1, 1]: "c" } ``` * SparseTensor referred by second key: ```python shape = [2, 1] { [0, 0]: "d" [1, 0]: "e" } ``` then crossed feature will look like: ```python shape = [2, 2] { [0, 0]: Hash64("d", Hash64("a")) % hash_bucket_size [1, 0]: Hash64("e", Hash64("b")) % hash_bucket_size [1, 1]: Hash64("e", Hash64("c")) % hash_bucket_size } ``` Here is an example to create a linear model with crosses of string features: ```python keywords_x_doc_terms = crossed_column(['keywords', 'doc_terms'], 50K) columns = [keywords_x_doc_terms, ...] features = tf.io.parse_example(..., features=make_parse_example_spec(columns)) linear_prediction = linear_model(features, columns) ``` You could also use vocabulary lookup before crossing: ```python keywords = categorical_column_with_vocabulary_file( 'keywords', '/path/to/vocabulary/file', vocabulary_size=1K) keywords_x_doc_terms = crossed_column([keywords, 'doc_terms'], 50K) columns = [keywords_x_doc_terms, ...] features = tf.io.parse_example(..., features=make_parse_example_spec(columns)) linear_prediction = linear_model(features, columns) ``` If an input feature is of numeric type, you can use `categorical_column_with_identity`, or `bucketized_column`, as in the example: ```python # vertical_id is an integer categorical feature. vertical_id = categorical_column_with_identity('vertical_id', 10K) price = numeric_column('price') # bucketized_column converts numerical feature to a categorical one. bucketized_price = bucketized_column(price, boundaries=[...]) vertical_id_x_price = crossed_column([vertical_id, bucketized_price], 50K) columns = [vertical_id_x_price, ...] features = tf.io.parse_example(..., features=make_parse_example_spec(columns)) linear_prediction = linear_model(features, columns) ``` To use crossed column in DNN model, you need to add it in an embedding column as in this example: ```python vertical_id_x_price = crossed_column([vertical_id, bucketized_price], 50K) vertical_id_x_price_embedded = embedding_column(vertical_id_x_price, 10) dense_tensor = input_layer(features, [vertical_id_x_price_embedded, ...]) ``` Args: keys: An iterable identifying the features to be crossed. Each element can be either: * string: Will use the corresponding feature which must be of string type. * `CategoricalColumn`: Will use the transformed tensor produced by this column. Does not support hashed categorical column. hash_bucket_size: An int > 1. The number of buckets. hash_key: Specify the hash_key that will be used by the `FingerprintCat64` function to combine the crosses fingerprints on SparseCrossOp (optional). Returns: A `CrossedColumn`. Raises: ValueError: If `len(keys) < 2`. ValueError: If any of the keys is neither a string nor `CategoricalColumn`. ValueError: If any of the keys is `HashedCategoricalColumn`. ValueError: If `hash_bucket_size < 1`. """ if not hash_bucket_size or hash_bucket_size < 1: raise ValueError('hash_bucket_size must be > 1. ' 'hash_bucket_size: {}'.format(hash_bucket_size)) if not keys or len(keys) < 2: raise ValueError( 'keys must be a list with length > 1. Given: {}'.format(keys)) for key in keys: if (not isinstance(key, six.string_types) and not isinstance(key, (CategoricalColumn, fc_old._CategoricalColumn))): # pylint: disable=protected-access raise ValueError( 'Unsupported key type. All keys must be either string, or ' 'categorical column except HashedCategoricalColumn. ' 'Given: {}'.format(key)) if isinstance(key, (HashedCategoricalColumn, fc_old._HashedCategoricalColumn)): # pylint: disable=protected-access raise ValueError( 'categorical_column_with_hash_bucket is not supported for crossing. ' 'Hashing before crossing will increase probability of collision. ' 'Instead, use the feature name as a string. Given: {}'.format(key)) return CrossedColumn( keys=tuple(keys), hash_bucket_size=hash_bucket_size, hash_key=hash_key) @six.add_metaclass(abc.ABCMeta) class FeatureColumn(object): """Represents a feature column abstraction. WARNING: Do not subclass this layer unless you know what you are doing: the API is subject to future changes. To distinguish between the concept of a feature family and a specific binary feature within a family, we refer to a feature family like "country" as a feature column. For example, we can have a feature in a `tf.Example` format: {key: "country", value: [ "US" ]} In this example the value of feature is "US" and "country" refers to the column of the feature. This class is an abstract class. Users should not create instances of this. """ @abc.abstractproperty def name(self): """Returns string. Used for naming.""" pass def __lt__(self, other): """Allows feature columns to be sorted in Python 3 as they are in Python 2. Feature columns need to occasionally be sortable, for example when used as keys in a features dictionary passed to a layer. In CPython, `__lt__` must be defined for all objects in the sequence being sorted. If any objects in teh sequence being sorted do not have an `__lt__` method compatible with feature column objects (such as strings), then CPython will fall back to using the `__gt__` method below. https://docs.python.org/3/library/stdtypes.html#list.sort Args: other: The other object to compare to. Returns: True if the string representation of this object is lexicographically less than the string representation of `other`. For FeatureColumn objects, this looks like "<__main__.FeatureColumn object at 0xa>". """ return str(self) < str(other) def __gt__(self, other): """Allows feature columns to be sorted in Python 3 as they are in Python 2. Feature columns need to occasionally be sortable, for example when used as keys in a features dictionary passed to a layer. `__gt__` is called when the "other" object being compared during the sort does not have `__lt__` defined. Example: ``` # __lt__ only class class A(): def __lt__(self, other): return str(self) < str(other) a = A() a < "b" # True "0" < a # Error # __lt__ and __gt__ class class B(): def __lt__(self, other): return str(self) < str(other) def __gt__(self, other): return str(self) > str(other) b = B() b < "c" # True "0" < b # True ``` Args: other: The other object to compare to. Returns: True if the string representation of this object is lexicographically greater than the string representation of `other`. For FeatureColumn objects, this looks like "<__main__.FeatureColumn object at 0xa>". """ return str(self) > str(other) @abc.abstractmethod def transform_feature(self, transformation_cache, state_manager): """Returns intermediate representation (usually a `Tensor`). Uses `transformation_cache` to create an intermediate representation (usually a `Tensor`) that other feature columns can use. Example usage of `transformation_cache`: Let's say a Feature column depends on raw feature ('raw') and another `FeatureColumn` (input_fc). To access corresponding `Tensor`s, transformation_cache will be used as follows: ```python raw_tensor = transformation_cache.get('raw', state_manager) fc_tensor = transformation_cache.get(input_fc, state_manager) ``` Args: transformation_cache: A `FeatureTransformationCache` object to access features. state_manager: A `StateManager` to create / access resources such as lookup tables. Returns: Transformed feature `Tensor`. """ pass @abc.abstractproperty def parse_example_spec(self): """Returns a `tf.Example` parsing spec as dict. It is used for get_parsing_spec for `tf.io.parse_example`. Returned spec is a dict from keys ('string') to `VarLenFeature`, `FixedLenFeature`, and other supported objects. Please check documentation of `tf.io.parse_example` for all supported spec objects. Let's say a Feature column depends on raw feature ('raw') and another `FeatureColumn` (input_fc). One possible implementation of parse_example_spec is as follows: ```python spec = {'raw': tf.io.FixedLenFeature(...)} spec.update(input_fc.parse_example_spec) return spec ``` """ pass def create_state(self, state_manager): """Uses the `state_manager` to create state for the FeatureColumn. Args: state_manager: A `StateManager` to create / access resources such as lookup tables and variables. """ pass @abc.abstractproperty def _is_v2_column(self): """Returns whether this FeatureColumn is fully conformant to the new API. This is needed for composition type cases where an EmbeddingColumn etc. might take in old categorical columns as input and then we want to use the old API. """ pass @abc.abstractproperty def parents(self): """Returns a list of immediate raw feature and FeatureColumn dependencies. For example: # For the following feature columns a = numeric_column('f1') c = crossed_column(a, 'f2') # The expected parents are: a.parents = ['f1'] c.parents = [a, 'f2'] """ pass def get_config(self): """Returns the config of the feature column. A FeatureColumn config is a Python dictionary (serializable) containing the configuration of a FeatureColumn. The same FeatureColumn can be reinstantiated later from this configuration. The config of a feature column does not include information about feature columns depending on it nor the FeatureColumn class name. Example with (de)serialization practices followed in this file: ```python class SerializationExampleFeatureColumn( FeatureColumn, collections.namedtuple( 'SerializationExampleFeatureColumn', ('dimension', 'parent', 'dtype', 'normalizer_fn'))): def get_config(self): # Create a dict from the namedtuple. # Python attribute literals can be directly copied from / to the config. # For example 'dimension', assuming it is an integer literal. config = dict(zip(self._fields, self)) # (De)serialization of parent FeatureColumns should use the provided # (de)serialize_feature_column() methods that take care of de-duping. config['parent'] = serialize_feature_column(self.parent) # Many objects provide custom (de)serialization e.g: for tf.DType # tf.DType.name, tf.as_dtype() can be used. config['dtype'] = self.dtype.name # Non-trivial dependencies should be Keras-(de)serializable. config['normalizer_fn'] = generic_utils.serialize_keras_object( self.normalizer_fn) return config @classmethod def from_config(cls, config, custom_objects=None, columns_by_name=None): # This should do the inverse transform from `get_config` and construct # the namedtuple. kwargs = config.copy() kwargs['parent'] = deserialize_feature_column( config['parent'], custom_objects, columns_by_name) kwargs['dtype'] = dtypes.as_dtype(config['dtype']) kwargs['normalizer_fn'] = generic_utils.deserialize_keras_object( config['normalizer_fn'], custom_objects=custom_objects) return cls(**kwargs) ``` Returns: A serializable Dict that can be used to deserialize the object with from_config. """ return self._get_config() def _get_config(self): raise NotImplementedError('Must be implemented in subclasses.') @classmethod def from_config(cls, config, custom_objects=None, columns_by_name=None): """Creates a FeatureColumn from its config. This method should be the reverse of `get_config`, capable of instantiating the same FeatureColumn from the config dictionary. See `get_config` for an example of common (de)serialization practices followed in this file. TODO(b/118939620): This is a private method until consensus is reached on supporting object deserialization deduping within Keras. Args: config: A Dict config acquired with `get_config`. custom_objects: Optional dictionary mapping names (strings) to custom classes or functions to be considered during deserialization. columns_by_name: A Dict[String, FeatureColumn] of existing columns in order to avoid duplication. Should be passed to any calls to deserialize_feature_column(). Returns: A FeatureColumn for the input config. """ return cls._from_config(config, custom_objects, columns_by_name) @classmethod def _from_config(cls, config, custom_objects=None, columns_by_name=None): raise NotImplementedError('Must be implemented in subclasses.') class DenseColumn(FeatureColumn): """Represents a column which can be represented as `Tensor`. Some examples of this type are: numeric_column, embedding_column, indicator_column. """ @abc.abstractproperty def variable_shape(self): """`TensorShape` of `get_dense_tensor`, without batch dimension.""" pass @abc.abstractmethod def get_dense_tensor(self, transformation_cache, state_manager): """Returns a `Tensor`. The output of this function will be used by model-builder-functions. For example the pseudo code of `input_layer` will be like: ```python def input_layer(features, feature_columns, ...): outputs = [fc.get_dense_tensor(...) for fc in feature_columns] return tf.concat(outputs) ``` Args: transformation_cache: A `FeatureTransformationCache` object to access features. state_manager: A `StateManager` to create / access resources such as lookup tables. Returns: `Tensor` of shape [batch_size] + `variable_shape`. """ pass def is_feature_column_v2(feature_columns): """Returns True if all feature columns are V2.""" for feature_column in feature_columns: if not isinstance(feature_column, FeatureColumn): return False if not feature_column._is_v2_column: # pylint: disable=protected-access return False return True def _create_weighted_sum(column, transformation_cache, state_manager, sparse_combiner, weight_var): """Creates a weighted sum for a dense/categorical column for linear_model.""" if isinstance(column, CategoricalColumn): return _create_categorical_column_weighted_sum( column=column, transformation_cache=transformation_cache, state_manager=state_manager, sparse_combiner=sparse_combiner, weight_var=weight_var) else: return _create_dense_column_weighted_sum( column=column, transformation_cache=transformation_cache, state_manager=state_manager, weight_var=weight_var) def _create_dense_column_weighted_sum(column, transformation_cache, state_manager, weight_var): """Create a weighted sum of a dense column for linear_model.""" tensor = column.get_dense_tensor(transformation_cache, state_manager) num_elements = column.variable_shape.num_elements() batch_size = array_ops.shape(tensor)[0] tensor = array_ops.reshape(tensor, shape=(batch_size, num_elements)) return math_ops.matmul(tensor, weight_var, name='weighted_sum') class CategoricalColumn(FeatureColumn): """Represents a categorical feature. A categorical feature typically handled with a `tf.sparse.SparseTensor` of IDs. """ IdWeightPair = collections.namedtuple( # pylint: disable=invalid-name 'IdWeightPair', ('id_tensor', 'weight_tensor')) @abc.abstractproperty def num_buckets(self): """Returns number of buckets in this sparse feature.""" pass @abc.abstractmethod def get_sparse_tensors(self, transformation_cache, state_manager): """Returns an IdWeightPair. `IdWeightPair` is a pair of `SparseTensor`s which represents ids and weights. `IdWeightPair.id_tensor` is typically a `batch_size` x `num_buckets` `SparseTensor` of `int64`. `IdWeightPair.weight_tensor` is either a `SparseTensor` of `float` or `None` to indicate all weights should be taken to be 1. If specified, `weight_tensor` must have exactly the same shape and indices as `sp_ids`. Expected `SparseTensor` is same as parsing output of a `VarLenFeature` which is a ragged matrix. Args: transformation_cache: A `FeatureTransformationCache` object to access features. state_manager: A `StateManager` to create / access resources such as lookup tables. """ pass def _create_categorical_column_weighted_sum( column, transformation_cache, state_manager, sparse_combiner, weight_var): # pylint: disable=g-doc-return-or-yield,g-doc-args """Create a weighted sum of a categorical column for linear_model. Note to maintainer: As implementation details, the weighted sum is implemented via embedding_lookup_sparse toward efficiency. Mathematically, they are the same. To be specific, conceptually, categorical column can be treated as multi-hot vector. Say: ```python x = [0 0 1] # categorical column input w = [a b c] # weights ``` The weighted sum is `c` in this case, which is same as `w[2]`. Another example is ```python x = [0 1 1] # categorical column input w = [a b c] # weights ``` The weighted sum is `b + c` in this case, which is same as `w[2] + w[3]`. For both cases, we can implement weighted sum via embedding_lookup with sparse_combiner = "sum". """ sparse_tensors = column.get_sparse_tensors(transformation_cache, state_manager) id_tensor = sparse_ops.sparse_reshape(sparse_tensors.id_tensor, [ array_ops.shape(sparse_tensors.id_tensor)[0], -1 ]) weight_tensor = sparse_tensors.weight_tensor if weight_tensor is not None: weight_tensor = sparse_ops.sparse_reshape( weight_tensor, [array_ops.shape(weight_tensor)[0], -1]) return embedding_ops.safe_embedding_lookup_sparse( weight_var, id_tensor, sparse_weights=weight_tensor, combiner=sparse_combiner, name='weighted_sum') class SequenceDenseColumn(FeatureColumn): """Represents dense sequence data.""" TensorSequenceLengthPair = collections.namedtuple( # pylint: disable=invalid-name 'TensorSequenceLengthPair', ('dense_tensor', 'sequence_length')) @abc.abstractmethod def get_sequence_dense_tensor(self, transformation_cache, state_manager): """Returns a `TensorSequenceLengthPair`. Args: transformation_cache: A `FeatureTransformationCache` object to access features. state_manager: A `StateManager` to create / access resources such as lookup tables. """ pass class FeatureTransformationCache(object): """Handles caching of transformations while building the model. `FeatureColumn` specifies how to digest an input column to the network. Some feature columns require data transformations. This class caches those transformations. Some features may be used in more than one place. For example, one can use a bucketized feature by itself and a cross with it. In that case we should create only one bucketization op instead of creating ops for each feature column separately. To handle re-use of transformed columns, `FeatureTransformationCache` caches all previously transformed columns. Example: We're trying to use the following `FeatureColumn`s: ```python bucketized_age = fc.bucketized_column(fc.numeric_column("age"), ...) keywords = fc.categorical_column_with_hash_buckets("keywords", ...) age_X_keywords = fc.crossed_column([bucketized_age, "keywords"]) ... = linear_model(features, [bucketized_age, keywords, age_X_keywords] ``` If we transform each column independently, then we'll get duplication of bucketization (one for cross, one for bucketization itself). The `FeatureTransformationCache` eliminates this duplication. """ def __init__(self, features): """Creates a `FeatureTransformationCache`. Args: features: A mapping from feature column to objects that are `Tensor` or `SparseTensor`, or can be converted to same via `sparse_tensor.convert_to_tensor_or_sparse_tensor`. A `string` key signifies a base feature (not-transformed). A `FeatureColumn` key means that this `Tensor` is the output of an existing `FeatureColumn` which can be reused. """ self._features = features.copy() self._feature_tensors = {} def get(self, key, state_manager, training=None): """Returns a `Tensor` for the given key. A `str` key is used to access a base feature (not-transformed). When a `FeatureColumn` is passed, the transformed feature is returned if it already exists, otherwise the given `FeatureColumn` is asked to provide its transformed output, which is then cached. Args: key: a `str` or a `FeatureColumn`. state_manager: A StateManager object that holds the FeatureColumn state. training: Boolean indicating whether to the column is being used in training mode. This argument is passed to the transform_feature method of any `FeatureColumn` that takes a `training` argument. For example, if a `FeatureColumn` performed dropout, it could expose a `training` argument to control whether the dropout should be applied. Returns: The transformed `Tensor` corresponding to the `key`. Raises: ValueError: if key is not found or a transformed `Tensor` cannot be computed. """ if key in self._feature_tensors: # FeatureColumn is already transformed or converted. return self._feature_tensors[key] if key in self._features: feature_tensor = self._get_raw_feature_as_tensor(key) self._feature_tensors[key] = feature_tensor return feature_tensor if isinstance(key, six.string_types): raise ValueError('Feature {} is not in features dictionary.'.format(key)) if not isinstance(key, FeatureColumn): raise TypeError('"key" must be either a "str" or "FeatureColumn". ' 'Provided: {}'.format(key)) column = key logging.debug('Transforming feature_column %s.', column) # Some columns may need information about whether the transformation is # happening in training or prediction mode, but not all columns expose this # argument. try: transformed = column.transform_feature( self, state_manager, training=training) except TypeError: transformed = column.transform_feature(self, state_manager) if transformed is None: raise ValueError('Column {} is not supported.'.format(column.name)) self._feature_tensors[column] = transformed return transformed def _get_raw_feature_as_tensor(self, key): """Gets the raw_feature (keyed by `key`) as `tensor`. The raw feature is converted to (sparse) tensor and maybe expand dim. For both `Tensor` and `SparseTensor`, the rank will be expanded (to 2) if the rank is 1. This supports dynamic rank also. For rank 0 raw feature, will error out as it is not supported. Args: key: A `str` key to access the raw feature. Returns: A `Tensor` or `SparseTensor`. Raises: ValueError: if the raw feature has rank 0. """ raw_feature = self._features[key] feature_tensor = sparse_tensor_lib.convert_to_tensor_or_sparse_tensor( raw_feature) def expand_dims(input_tensor): # Input_tensor must have rank 1. if isinstance(input_tensor, sparse_tensor_lib.SparseTensor): return sparse_ops.sparse_reshape( input_tensor, [array_ops.shape(input_tensor)[0], 1]) else: return array_ops.expand_dims(input_tensor, -1) rank = feature_tensor.get_shape().ndims if rank is not None: if rank == 0: raise ValueError( 'Feature (key: {}) cannot have rank 0. Given: {}'.format( key, feature_tensor)) return feature_tensor if rank != 1 else expand_dims(feature_tensor) # Handle dynamic rank. with ops.control_dependencies([ check_ops.assert_positive( array_ops.rank(feature_tensor), message='Feature (key: {}) cannot have rank 0. Given: {}'.format( key, feature_tensor))]): return control_flow_ops.cond( math_ops.equal(1, array_ops.rank(feature_tensor)), lambda: expand_dims(feature_tensor), lambda: feature_tensor) # TODO(ptucker): Move to third_party/tensorflow/python/ops/sparse_ops.py def _to_sparse_input_and_drop_ignore_values(input_tensor, ignore_value=None): """Converts a `Tensor` to a `SparseTensor`, dropping ignore_value cells. If `input_tensor` is already a `SparseTensor`, just return it. Args: input_tensor: A string or integer `Tensor`. ignore_value: Entries in `dense_tensor` equal to this value will be absent from the resulting `SparseTensor`. If `None`, default value of `dense_tensor`'s dtype will be used ('' for `str`, -1 for `int`). Returns: A `SparseTensor` with the same shape as `input_tensor`. Raises: ValueError: when `input_tensor`'s rank is `None`. """ input_tensor = sparse_tensor_lib.convert_to_tensor_or_sparse_tensor( input_tensor) if isinstance(input_tensor, sparse_tensor_lib.SparseTensor): return input_tensor with ops.name_scope(None, 'to_sparse_input', (input_tensor, ignore_value,)): if ignore_value is None: if input_tensor.dtype == dtypes.string: # Exception due to TF strings are converted to numpy objects by default. ignore_value = '' elif input_tensor.dtype.is_integer: ignore_value = -1 # -1 has a special meaning of missing feature else: # NOTE: `as_numpy_dtype` is a property, so with the parentheses this is # constructing a new numpy object of the given type, which yields the # default value for that type. ignore_value = input_tensor.dtype.as_numpy_dtype() ignore_value = math_ops.cast( ignore_value, input_tensor.dtype, name='ignore_value') indices = array_ops.where_v2( math_ops.not_equal(input_tensor, ignore_value), name='indices') return sparse_tensor_lib.SparseTensor( indices=indices, values=array_ops.gather_nd(input_tensor, indices, name='values'), dense_shape=array_ops.shape( input_tensor, out_type=dtypes.int64, name='dense_shape')) def _normalize_feature_columns(feature_columns): """Normalizes the `feature_columns` input. This method converts the `feature_columns` to list type as best as it can. In addition, verifies the type and other parts of feature_columns, required by downstream library. Args: feature_columns: The raw feature columns, usually passed by users. Returns: The normalized feature column list. Raises: ValueError: for any invalid inputs, such as empty, duplicated names, etc. """ if isinstance(feature_columns, FeatureColumn): feature_columns = [feature_columns] if isinstance(feature_columns, collections_abc.Iterator): feature_columns = list(feature_columns) if isinstance(feature_columns, dict): raise ValueError('Expected feature_columns to be iterable, found dict.') for column in feature_columns: if not isinstance(column, FeatureColumn): raise ValueError('Items of feature_columns must be a FeatureColumn. ' 'Given (type {}): {}.'.format(type(column), column)) if not feature_columns: raise ValueError('feature_columns must not be empty.') name_to_column = {} for column in feature_columns: if column.name in name_to_column: raise ValueError('Duplicate feature column name found for columns: {} ' 'and {}. This usually means that these columns refer to ' 'same base feature. Either one must be discarded or a ' 'duplicated but renamed item must be inserted in ' 'features dict.'.format(column, name_to_column[column.name])) name_to_column[column.name] = column return sorted(feature_columns, key=lambda x: x.name) class NumericColumn( DenseColumn, fc_old._DenseColumn, # pylint: disable=protected-access collections.namedtuple( 'NumericColumn', ('key', 'shape', 'default_value', 'dtype', 'normalizer_fn'))): """see `numeric_column`.""" @property def _is_v2_column(self): return True @property def name(self): """See `FeatureColumn` base class.""" return self.key @property def parse_example_spec(self): """See `FeatureColumn` base class.""" return { self.key: parsing_ops.FixedLenFeature(self.shape, self.dtype, self.default_value) } @property @deprecation.deprecated(_FEATURE_COLUMN_DEPRECATION_DATE, _FEATURE_COLUMN_DEPRECATION) def _parse_example_spec(self): return self.parse_example_spec def _transform_input_tensor(self, input_tensor): if isinstance(input_tensor, sparse_tensor_lib.SparseTensor): raise ValueError( 'The corresponding Tensor of numerical column must be a Tensor. ' 'SparseTensor is not supported. key: {}'.format(self.key)) if self.normalizer_fn is not None: input_tensor = self.normalizer_fn(input_tensor) return math_ops.cast(input_tensor, dtypes.float32) @deprecation.deprecated(_FEATURE_COLUMN_DEPRECATION_DATE, _FEATURE_COLUMN_DEPRECATION) def _transform_feature(self, inputs): input_tensor = inputs.get(self.key) return self._transform_input_tensor(input_tensor) def transform_feature(self, transformation_cache, state_manager): """See `FeatureColumn` base class. In this case, we apply the `normalizer_fn` to the input tensor. Args: transformation_cache: A `FeatureTransformationCache` object to access features. state_manager: A `StateManager` to create / access resources such as lookup tables. Returns: Normalized input tensor. Raises: ValueError: If a SparseTensor is passed in. """ input_tensor = transformation_cache.get(self.key, state_manager) return self._transform_input_tensor(input_tensor) @property def variable_shape(self): """See `DenseColumn` base class.""" return tensor_shape.TensorShape(self.shape) @property @deprecation.deprecated(_FEATURE_COLUMN_DEPRECATION_DATE, _FEATURE_COLUMN_DEPRECATION) def _variable_shape(self): return self.variable_shape def get_dense_tensor(self, transformation_cache, state_manager): """Returns dense `Tensor` representing numeric feature. Args: transformation_cache: A `FeatureTransformationCache` object to access features. state_manager: A `StateManager` to create / access resources such as lookup tables. Returns: Dense `Tensor` created within `transform_feature`. """ # Feature has been already transformed. Return the intermediate # representation created by _transform_feature. return transformation_cache.get(self, state_manager) @deprecation.deprecated(_FEATURE_COLUMN_DEPRECATION_DATE, _FEATURE_COLUMN_DEPRECATION) def _get_dense_tensor(self, inputs, weight_collections=None, trainable=None): del weight_collections del trainable return inputs.get(self) @property def parents(self): """See 'FeatureColumn` base class.""" return [self.key] def get_config(self): """See 'FeatureColumn` base class.""" config = dict(zip(self._fields, self)) config['normalizer_fn'] = generic_utils.serialize_keras_object( self.normalizer_fn) config['dtype'] = self.dtype.name return config @classmethod def from_config(cls, config, custom_objects=None, columns_by_name=None): """See 'FeatureColumn` base class.""" _check_config_keys(config, cls._fields) kwargs = _standardize_and_copy_config(config) kwargs['normalizer_fn'] = generic_utils.deserialize_keras_object( config['normalizer_fn'], custom_objects=custom_objects) kwargs['dtype'] = dtypes.as_dtype(config['dtype']) return cls(**kwargs) class BucketizedColumn( DenseColumn, CategoricalColumn, fc_old._DenseColumn, # pylint: disable=protected-access fc_old._CategoricalColumn, # pylint: disable=protected-access collections.namedtuple('BucketizedColumn', ('source_column', 'boundaries'))): """See `bucketized_column`.""" @property def _is_v2_column(self): return (isinstance(self.source_column, FeatureColumn) and self.source_column._is_v2_column) # pylint: disable=protected-access @property def name(self): """See `FeatureColumn` base class.""" return '{}_bucketized'.format(self.source_column.name) @property def parse_example_spec(self): """See `FeatureColumn` base class.""" return self.source_column.parse_example_spec @property @deprecation.deprecated(_FEATURE_COLUMN_DEPRECATION_DATE, _FEATURE_COLUMN_DEPRECATION) def _parse_example_spec(self): return self.source_column._parse_example_spec # pylint: disable=protected-access @deprecation.deprecated(_FEATURE_COLUMN_DEPRECATION_DATE, _FEATURE_COLUMN_DEPRECATION) def _transform_feature(self, inputs): """Returns bucketized categorical `source_column` tensor.""" source_tensor = inputs.get(self.source_column) return math_ops._bucketize( # pylint: disable=protected-access source_tensor, boundaries=self.boundaries) def transform_feature(self, transformation_cache, state_manager): """Returns bucketized categorical `source_column` tensor.""" source_tensor = transformation_cache.get(self.source_column, state_manager) return math_ops._bucketize( # pylint: disable=protected-access source_tensor, boundaries=self.boundaries) @property def variable_shape(self): """See `DenseColumn` base class.""" return tensor_shape.TensorShape( tuple(self.source_column.shape) + (len(self.boundaries) + 1,)) @property @deprecation.deprecated(_FEATURE_COLUMN_DEPRECATION_DATE, _FEATURE_COLUMN_DEPRECATION) def _variable_shape(self): return self.variable_shape def _get_dense_tensor_for_input_tensor(self, input_tensor): return array_ops.one_hot( indices=math_ops.cast(input_tensor, dtypes.int64), depth=len(self.boundaries) + 1, on_value=1., off_value=0.) def get_dense_tensor(self, transformation_cache, state_manager): """Returns one hot encoded dense `Tensor`.""" input_tensor = transformation_cache.get(self, state_manager) return self._get_dense_tensor_for_input_tensor(input_tensor) @deprecation.deprecated(_FEATURE_COLUMN_DEPRECATION_DATE, _FEATURE_COLUMN_DEPRECATION) def _get_dense_tensor(self, inputs, weight_collections=None, trainable=None): del weight_collections del trainable input_tensor = inputs.get(self) return self._get_dense_tensor_for_input_tensor(input_tensor) @property def num_buckets(self): """See `CategoricalColumn` base class.""" # By construction, source_column is always one-dimensional. return (len(self.boundaries) + 1) * self.source_column.shape[0] @property @deprecation.deprecated(_FEATURE_COLUMN_DEPRECATION_DATE, _FEATURE_COLUMN_DEPRECATION) def _num_buckets(self): return self.num_buckets def _get_sparse_tensors_for_input_tensor(self, input_tensor): batch_size = array_ops.shape(input_tensor)[0] # By construction, source_column is always one-dimensional. source_dimension = self.source_column.shape[0] i1 = array_ops.reshape( array_ops.tile( array_ops.expand_dims(math_ops.range(0, batch_size), 1), [1, source_dimension]), (-1,)) i2 = array_ops.tile(math_ops.range(0, source_dimension), [batch_size]) # Flatten the bucket indices and unique them across dimensions # E.g. 2nd dimension indices will range from k to 2*k-1 with k buckets bucket_indices = ( array_ops.reshape(input_tensor, (-1,)) + (len(self.boundaries) + 1) * i2) indices = math_ops.cast( array_ops.transpose(array_ops.stack((i1, i2))), dtypes.int64) dense_shape = math_ops.cast( array_ops.stack([batch_size, source_dimension]), dtypes.int64) sparse_tensor = sparse_tensor_lib.SparseTensor( indices=indices, values=bucket_indices, dense_shape=dense_shape) return CategoricalColumn.IdWeightPair(sparse_tensor, None) def get_sparse_tensors(self, transformation_cache, state_manager): """Converts dense inputs to SparseTensor so downstream code can use it.""" input_tensor = transformation_cache.get(self, state_manager) return self._get_sparse_tensors_for_input_tensor(input_tensor) @deprecation.deprecated(_FEATURE_COLUMN_DEPRECATION_DATE, _FEATURE_COLUMN_DEPRECATION) def _get_sparse_tensors(self, inputs, weight_collections=None, trainable=None): """Converts dense inputs to SparseTensor so downstream code can use it.""" del weight_collections del trainable input_tensor = inputs.get(self) return self._get_sparse_tensors_for_input_tensor(input_tensor) @property def parents(self): """See 'FeatureColumn` base class.""" return [self.source_column] def get_config(self): """See 'FeatureColumn` base class.""" from tensorflow.python.feature_column.serialization import serialize_feature_column # pylint: disable=g-import-not-at-top config = dict(zip(self._fields, self)) config['source_column'] = serialize_feature_column(self.source_column) return config @classmethod def from_config(cls, config, custom_objects=None, columns_by_name=None): """See 'FeatureColumn` base class.""" from tensorflow.python.feature_column.serialization import deserialize_feature_column # pylint: disable=g-import-not-at-top _check_config_keys(config, cls._fields) kwargs = _standardize_and_copy_config(config) kwargs['source_column'] = deserialize_feature_column( config['source_column'], custom_objects, columns_by_name) return cls(**kwargs) class EmbeddingColumn( DenseColumn, SequenceDenseColumn, fc_old._DenseColumn, # pylint: disable=protected-access fc_old._SequenceDenseColumn, # pylint: disable=protected-access collections.namedtuple( 'EmbeddingColumn', ('categorical_column', 'dimension', 'combiner', 'initializer', 'ckpt_to_load_from', 'tensor_name_in_ckpt', 'max_norm', 'trainable', 'use_safe_embedding_lookup'))): """See `embedding_column`.""" def __new__(cls, categorical_column, dimension, combiner, initializer, ckpt_to_load_from, tensor_name_in_ckpt, max_norm, trainable, use_safe_embedding_lookup=True): return super(EmbeddingColumn, cls).__new__( cls, categorical_column=categorical_column, dimension=dimension, combiner=combiner, initializer=initializer, ckpt_to_load_from=ckpt_to_load_from, tensor_name_in_ckpt=tensor_name_in_ckpt, max_norm=max_norm, trainable=trainable, use_safe_embedding_lookup=use_safe_embedding_lookup) @property def _is_v2_column(self): return (isinstance(self.categorical_column, FeatureColumn) and self.categorical_column._is_v2_column) # pylint: disable=protected-access @property def name(self): """See `FeatureColumn` base class.""" return '{}_embedding'.format(self.categorical_column.name) @property def parse_example_spec(self): """See `FeatureColumn` base class.""" return self.categorical_column.parse_example_spec @property @deprecation.deprecated(_FEATURE_COLUMN_DEPRECATION_DATE, _FEATURE_COLUMN_DEPRECATION) def _parse_example_spec(self): return self.categorical_column._parse_example_spec # pylint: disable=protected-access def transform_feature(self, transformation_cache, state_manager): """Transforms underlying `categorical_column`.""" return transformation_cache.get(self.categorical_column, state_manager) @deprecation.deprecated(_FEATURE_COLUMN_DEPRECATION_DATE, _FEATURE_COLUMN_DEPRECATION) def _transform_feature(self, inputs): return inputs.get(self.categorical_column) @property def variable_shape(self): """See `DenseColumn` base class.""" return tensor_shape.TensorShape([self.dimension]) @property @deprecation.deprecated(_FEATURE_COLUMN_DEPRECATION_DATE, _FEATURE_COLUMN_DEPRECATION) def _variable_shape(self): return self.variable_shape def create_state(self, state_manager): """Creates the embedding lookup variable.""" default_num_buckets = (self.categorical_column.num_buckets if self._is_v2_column else self.categorical_column._num_buckets) # pylint: disable=protected-access num_buckets = getattr(self.categorical_column, 'num_buckets', default_num_buckets) embedding_shape = (num_buckets, self.dimension) state_manager.create_variable( self, name='embedding_weights', shape=embedding_shape, dtype=dtypes.float32, trainable=self.trainable, use_resource=True, initializer=self.initializer) def _get_dense_tensor_internal_helper(self, sparse_tensors, embedding_weights): sparse_ids = sparse_tensors.id_tensor sparse_weights = sparse_tensors.weight_tensor if self.ckpt_to_load_from is not None: to_restore = embedding_weights if isinstance(to_restore, variables.PartitionedVariable): to_restore = to_restore._get_variable_list() # pylint: disable=protected-access checkpoint_utils.init_from_checkpoint(self.ckpt_to_load_from, { self.tensor_name_in_ckpt: to_restore }) sparse_id_rank = tensor_shape.dimension_value( sparse_ids.dense_shape.get_shape()[0]) embedding_lookup_sparse = embedding_ops.safe_embedding_lookup_sparse if (not self.use_safe_embedding_lookup and sparse_id_rank is not None and sparse_id_rank <= 2): embedding_lookup_sparse = embedding_ops.embedding_lookup_sparse # Return embedding lookup result. return embedding_lookup_sparse( embedding_weights, sparse_ids, sparse_weights, combiner=self.combiner, name='%s_weights' % self.name, max_norm=self.max_norm) def _get_dense_tensor_internal(self, sparse_tensors, state_manager): """Private method that follows the signature of get_dense_tensor.""" embedding_weights = state_manager.get_variable( self, name='embedding_weights') return self._get_dense_tensor_internal_helper(sparse_tensors, embedding_weights) def _old_get_dense_tensor_internal(self, sparse_tensors, weight_collections, trainable): """Private method that follows the signature of _get_dense_tensor.""" embedding_shape = (self.categorical_column._num_buckets, self.dimension) # pylint: disable=protected-access if (weight_collections and ops.GraphKeys.GLOBAL_VARIABLES not in weight_collections): weight_collections.append(ops.GraphKeys.GLOBAL_VARIABLES) embedding_weights = variable_scope.get_variable( name='embedding_weights', shape=embedding_shape, dtype=dtypes.float32, initializer=self.initializer, trainable=self.trainable and trainable, collections=weight_collections) return self._get_dense_tensor_internal_helper(sparse_tensors, embedding_weights) def get_dense_tensor(self, transformation_cache, state_manager): """Returns tensor after doing the embedding lookup. Args: transformation_cache: A `FeatureTransformationCache` object to access features. state_manager: A `StateManager` to create / access resources such as lookup tables. Returns: Embedding lookup tensor. Raises: ValueError: `categorical_column` is SequenceCategoricalColumn. """ if isinstance(self.categorical_column, SequenceCategoricalColumn): raise ValueError( 'In embedding_column: {}. ' 'categorical_column must not be of type SequenceCategoricalColumn. ' 'Suggested fix A: If you wish to use DenseFeatures, use a ' 'non-sequence categorical_column_with_*. ' 'Suggested fix B: If you wish to create sequence input, use ' 'SequenceFeatures instead of DenseFeatures. ' 'Given (type {}): {}'.format(self.name, type(self.categorical_column), self.categorical_column)) # Get sparse IDs and weights. sparse_tensors = self.categorical_column.get_sparse_tensors( transformation_cache, state_manager) return self._get_dense_tensor_internal(sparse_tensors, state_manager) @deprecation.deprecated(_FEATURE_COLUMN_DEPRECATION_DATE, _FEATURE_COLUMN_DEPRECATION) def _get_dense_tensor(self, inputs, weight_collections=None, trainable=None): if isinstance( self.categorical_column, (SequenceCategoricalColumn, fc_old._SequenceCategoricalColumn)): # pylint: disable=protected-access raise ValueError( 'In embedding_column: {}. ' 'categorical_column must not be of type _SequenceCategoricalColumn. ' 'Suggested fix A: If you wish to use DenseFeatures, use a ' 'non-sequence categorical_column_with_*. ' 'Suggested fix B: If you wish to create sequence input, use ' 'SequenceFeatures instead of DenseFeatures. ' 'Given (type {}): {}'.format(self.name, type(self.categorical_column), self.categorical_column)) sparse_tensors = self.categorical_column._get_sparse_tensors( # pylint: disable=protected-access inputs, weight_collections, trainable) return self._old_get_dense_tensor_internal(sparse_tensors, weight_collections, trainable) def get_sequence_dense_tensor(self, transformation_cache, state_manager): """See `SequenceDenseColumn` base class.""" if not isinstance(self.categorical_column, SequenceCategoricalColumn): raise ValueError( 'In embedding_column: {}. ' 'categorical_column must be of type SequenceCategoricalColumn ' 'to use SequenceFeatures. ' 'Suggested fix: Use one of sequence_categorical_column_with_*. ' 'Given (type {}): {}'.format(self.name, type(self.categorical_column), self.categorical_column)) sparse_tensors = self.categorical_column.get_sparse_tensors( transformation_cache, state_manager) dense_tensor = self._get_dense_tensor_internal(sparse_tensors, state_manager) sequence_length = fc_utils.sequence_length_from_sparse_tensor( sparse_tensors.id_tensor) return SequenceDenseColumn.TensorSequenceLengthPair( dense_tensor=dense_tensor, sequence_length=sequence_length) @deprecation.deprecated(_FEATURE_COLUMN_DEPRECATION_DATE, _FEATURE_COLUMN_DEPRECATION) def _get_sequence_dense_tensor(self, inputs, weight_collections=None, trainable=None): if not isinstance( self.categorical_column, (SequenceCategoricalColumn, fc_old._SequenceCategoricalColumn)): # pylint: disable=protected-access raise ValueError( 'In embedding_column: {}. ' 'categorical_column must be of type SequenceCategoricalColumn ' 'to use SequenceFeatures. ' 'Suggested fix: Use one of sequence_categorical_column_with_*. ' 'Given (type {}): {}'.format(self.name, type(self.categorical_column), self.categorical_column)) sparse_tensors = self.categorical_column._get_sparse_tensors(inputs) # pylint: disable=protected-access dense_tensor = self._old_get_dense_tensor_internal( sparse_tensors, weight_collections=weight_collections, trainable=trainable) sequence_length = fc_utils.sequence_length_from_sparse_tensor( sparse_tensors.id_tensor) return SequenceDenseColumn.TensorSequenceLengthPair( dense_tensor=dense_tensor, sequence_length=sequence_length) @property def parents(self): """See 'FeatureColumn` base class.""" return [self.categorical_column] def get_config(self): """See 'FeatureColumn` base class.""" from tensorflow.python.feature_column.serialization import serialize_feature_column # pylint: disable=g-import-not-at-top config = dict(zip(self._fields, self)) config['categorical_column'] = serialize_feature_column( self.categorical_column) config['initializer'] = initializers.serialize(self.initializer) return config @classmethod def from_config(cls, config, custom_objects=None, columns_by_name=None): """See 'FeatureColumn` base class.""" if 'use_safe_embedding_lookup' not in config: config['use_safe_embedding_lookup'] = True from tensorflow.python.feature_column.serialization import deserialize_feature_column # pylint: disable=g-import-not-at-top _check_config_keys(config, cls._fields) kwargs = _standardize_and_copy_config(config) kwargs['categorical_column'] = deserialize_feature_column( config['categorical_column'], custom_objects, columns_by_name) kwargs['initializer'] = initializers.deserialize( config['initializer'], custom_objects=custom_objects) return cls(**kwargs) def _raise_shared_embedding_column_error(): raise ValueError('SharedEmbeddingColumns are not supported in ' '`linear_model` or `input_layer`. Please use ' '`DenseFeatures` or `LinearModel` instead.') class SharedEmbeddingColumnCreator(tracking.AutoTrackable): def __init__(self, dimension, initializer, ckpt_to_load_from, tensor_name_in_ckpt, num_buckets, trainable, name='shared_embedding_column_creator', use_safe_embedding_lookup=True): self._dimension = dimension self._initializer = initializer self._ckpt_to_load_from = ckpt_to_load_from self._tensor_name_in_ckpt = tensor_name_in_ckpt self._num_buckets = num_buckets self._trainable = trainable self._name = name self._use_safe_embedding_lookup = use_safe_embedding_lookup # Map from graph keys to embedding_weight variables. self._embedding_weights = {} def __call__(self, categorical_column, combiner, max_norm): return SharedEmbeddingColumn(categorical_column, self, combiner, max_norm, self._use_safe_embedding_lookup) @property def embedding_weights(self): key = ops.get_default_graph()._graph_key # pylint: disable=protected-access if key not in self._embedding_weights: embedding_shape = (self._num_buckets, self._dimension) var = variable_scope.get_variable( name=self._name, shape=embedding_shape, dtype=dtypes.float32, initializer=self._initializer, trainable=self._trainable) if self._ckpt_to_load_from is not None: to_restore = var if isinstance(to_restore, variables.PartitionedVariable): to_restore = to_restore._get_variable_list() # pylint: disable=protected-access checkpoint_utils.init_from_checkpoint( self._ckpt_to_load_from, {self._tensor_name_in_ckpt: to_restore}) self._embedding_weights[key] = var return self._embedding_weights[key] @property def dimension(self): return self._dimension class SharedEmbeddingColumn( DenseColumn, SequenceDenseColumn, fc_old._DenseColumn, # pylint: disable=protected-access fc_old._SequenceDenseColumn, # pylint: disable=protected-access collections.namedtuple( 'SharedEmbeddingColumn', ('categorical_column', 'shared_embedding_column_creator', 'combiner', 'max_norm', 'use_safe_embedding_lookup'))): """See `embedding_column`.""" def __new__(cls, categorical_column, shared_embedding_column_creator, combiner, max_norm, use_safe_embedding_lookup=True): return super(SharedEmbeddingColumn, cls).__new__( cls, categorical_column=categorical_column, shared_embedding_column_creator=shared_embedding_column_creator, combiner=combiner, max_norm=max_norm, use_safe_embedding_lookup=use_safe_embedding_lookup) @property def _is_v2_column(self): return True @property def name(self): """See `FeatureColumn` base class.""" return '{}_shared_embedding'.format(self.categorical_column.name) @property def parse_example_spec(self): """See `FeatureColumn` base class.""" return self.categorical_column.parse_example_spec @property def _parse_example_spec(self): return _raise_shared_embedding_column_error() def transform_feature(self, transformation_cache, state_manager): """See `FeatureColumn` base class.""" return transformation_cache.get(self.categorical_column, state_manager) def _transform_feature(self, inputs): return _raise_shared_embedding_column_error() @property def variable_shape(self): """See `DenseColumn` base class.""" return tensor_shape.TensorShape( [self.shared_embedding_column_creator.dimension]) @property def _variable_shape(self): return _raise_shared_embedding_column_error() def _get_dense_tensor_internal(self, transformation_cache, state_manager): """Private method that follows the signature of _get_dense_tensor.""" # This method is called from a variable_scope with name _var_scope_name, # which is shared among all shared embeddings. Open a name_scope here, so # that the ops for different columns have distinct names. with ops.name_scope(None, default_name=self.name): # Get sparse IDs and weights. sparse_tensors = self.categorical_column.get_sparse_tensors( transformation_cache, state_manager) sparse_ids = sparse_tensors.id_tensor sparse_weights = sparse_tensors.weight_tensor embedding_weights = self.shared_embedding_column_creator.embedding_weights sparse_id_rank = tensor_shape.dimension_value( sparse_ids.dense_shape.get_shape()[0]) embedding_lookup_sparse = embedding_ops.safe_embedding_lookup_sparse if (not self.use_safe_embedding_lookup and sparse_id_rank is not None and sparse_id_rank <= 2): embedding_lookup_sparse = (embedding_ops.embedding_lookup_sparse) # Return embedding lookup result. return embedding_lookup_sparse( embedding_weights, sparse_ids, sparse_weights, combiner=self.combiner, name='%s_weights' % self.name, max_norm=self.max_norm) def get_dense_tensor(self, transformation_cache, state_manager): """Returns the embedding lookup result.""" if isinstance(self.categorical_column, SequenceCategoricalColumn): raise ValueError( 'In embedding_column: {}. ' 'categorical_column must not be of type SequenceCategoricalColumn. ' 'Suggested fix A: If you wish to use DenseFeatures, use a ' 'non-sequence categorical_column_with_*. ' 'Suggested fix B: If you wish to create sequence input, use ' 'SequenceFeatures instead of DenseFeatures. ' 'Given (type {}): {}'.format(self.name, type(self.categorical_column), self.categorical_column)) return self._get_dense_tensor_internal(transformation_cache, state_manager) def _get_dense_tensor(self, inputs, weight_collections=None, trainable=None): return _raise_shared_embedding_column_error() def get_sequence_dense_tensor(self, transformation_cache, state_manager): """See `SequenceDenseColumn` base class.""" if not isinstance(self.categorical_column, SequenceCategoricalColumn): raise ValueError( 'In embedding_column: {}. ' 'categorical_column must be of type SequenceCategoricalColumn ' 'to use SequenceFeatures. ' 'Suggested fix: Use one of sequence_categorical_column_with_*. ' 'Given (type {}): {}'.format(self.name, type(self.categorical_column), self.categorical_column)) dense_tensor = self._get_dense_tensor_internal(transformation_cache, state_manager) sparse_tensors = self.categorical_column.get_sparse_tensors( transformation_cache, state_manager) sequence_length = fc_utils.sequence_length_from_sparse_tensor( sparse_tensors.id_tensor) return SequenceDenseColumn.TensorSequenceLengthPair( dense_tensor=dense_tensor, sequence_length=sequence_length) def _get_sequence_dense_tensor(self, inputs, weight_collections=None, trainable=None): return _raise_shared_embedding_column_error() @property def parents(self): """See 'FeatureColumn` base class.""" return [self.categorical_column] def _check_shape(shape, key): """Returns shape if it's valid, raises error otherwise.""" assert shape is not None if not nest.is_sequence(shape): shape = [shape] shape = tuple(shape) for dimension in shape: if not isinstance(dimension, int): raise TypeError('shape dimensions must be integer. ' 'shape: {}, key: {}'.format(shape, key)) if dimension < 1: raise ValueError('shape dimensions must be greater than 0. ' 'shape: {}, key: {}'.format(shape, key)) return shape class HashedCategoricalColumn( CategoricalColumn, fc_old._CategoricalColumn, # pylint: disable=protected-access collections.namedtuple('HashedCategoricalColumn', ('key', 'hash_bucket_size', 'dtype'))): """see `categorical_column_with_hash_bucket`.""" @property def _is_v2_column(self): return True @property def name(self): """See `FeatureColumn` base class.""" return self.key @property def parse_example_spec(self): """See `FeatureColumn` base class.""" return {self.key: parsing_ops.VarLenFeature(self.dtype)} @property @deprecation.deprecated(_FEATURE_COLUMN_DEPRECATION_DATE, _FEATURE_COLUMN_DEPRECATION) def _parse_example_spec(self): return self.parse_example_spec def _transform_input_tensor(self, input_tensor): """Hashes the values in the feature_column.""" if not isinstance(input_tensor, sparse_tensor_lib.SparseTensor): raise ValueError('SparseColumn input must be a SparseTensor.') fc_utils.assert_string_or_int( input_tensor.dtype, prefix='column_name: {} input_tensor'.format(self.key)) if self.dtype.is_integer != input_tensor.dtype.is_integer: raise ValueError( 'Column dtype and SparseTensors dtype must be compatible. ' 'key: {}, column dtype: {}, tensor dtype: {}'.format( self.key, self.dtype, input_tensor.dtype)) if self.dtype == dtypes.string: sparse_values = input_tensor.values else: sparse_values = string_ops.as_string(input_tensor.values) sparse_id_values = string_ops.string_to_hash_bucket_fast( sparse_values, self.hash_bucket_size, name='lookup') return sparse_tensor_lib.SparseTensor( input_tensor.indices, sparse_id_values, input_tensor.dense_shape) def transform_feature(self, transformation_cache, state_manager): """Hashes the values in the feature_column.""" input_tensor = _to_sparse_input_and_drop_ignore_values( transformation_cache.get(self.key, state_manager)) return self._transform_input_tensor(input_tensor) @deprecation.deprecated(_FEATURE_COLUMN_DEPRECATION_DATE, _FEATURE_COLUMN_DEPRECATION) def _transform_feature(self, inputs): input_tensor = _to_sparse_input_and_drop_ignore_values(inputs.get(self.key)) return self._transform_input_tensor(input_tensor) @property def num_buckets(self): """Returns number of buckets in this sparse feature.""" return self.hash_bucket_size @property @deprecation.deprecated(_FEATURE_COLUMN_DEPRECATION_DATE, _FEATURE_COLUMN_DEPRECATION) def _num_buckets(self): return self.num_buckets def get_sparse_tensors(self, transformation_cache, state_manager): """See `CategoricalColumn` base class.""" return CategoricalColumn.IdWeightPair( transformation_cache.get(self, state_manager), None) @deprecation.deprecated(_FEATURE_COLUMN_DEPRECATION_DATE, _FEATURE_COLUMN_DEPRECATION) def _get_sparse_tensors(self, inputs, weight_collections=None, trainable=None): del weight_collections del trainable return CategoricalColumn.IdWeightPair(inputs.get(self), None) @property def parents(self): """See 'FeatureColumn` base class.""" return [self.key] def get_config(self): """See 'FeatureColumn` base class.""" config = dict(zip(self._fields, self)) config['dtype'] = self.dtype.name return config @classmethod def from_config(cls, config, custom_objects=None, columns_by_name=None): """See 'FeatureColumn` base class.""" _check_config_keys(config, cls._fields) kwargs = _standardize_and_copy_config(config) kwargs['dtype'] = dtypes.as_dtype(config['dtype']) return cls(**kwargs) class VocabularyFileCategoricalColumn( CategoricalColumn, fc_old._CategoricalColumn, # pylint: disable=protected-access collections.namedtuple('VocabularyFileCategoricalColumn', ('key', 'vocabulary_file', 'vocabulary_size', 'num_oov_buckets', 'dtype', 'default_value'))): """See `categorical_column_with_vocabulary_file`.""" @property def _is_v2_column(self): return True @property def name(self): """See `FeatureColumn` base class.""" return self.key @property def parse_example_spec(self): """See `FeatureColumn` base class.""" return {self.key: parsing_ops.VarLenFeature(self.dtype)} @property @deprecation.deprecated(_FEATURE_COLUMN_DEPRECATION_DATE, _FEATURE_COLUMN_DEPRECATION) def _parse_example_spec(self): return self.parse_example_spec def _transform_input_tensor(self, input_tensor, state_manager=None): """Creates a lookup table for the vocabulary.""" if self.dtype.is_integer != input_tensor.dtype.is_integer: raise ValueError( 'Column dtype and SparseTensors dtype must be compatible. ' 'key: {}, column dtype: {}, tensor dtype: {}'.format( self.key, self.dtype, input_tensor.dtype)) fc_utils.assert_string_or_int( input_tensor.dtype, prefix='column_name: {} input_tensor'.format(self.key)) key_dtype = self.dtype if input_tensor.dtype.is_integer: # `index_table_from_file` requires 64-bit integer keys. key_dtype = dtypes.int64 input_tensor = math_ops.cast(input_tensor, dtypes.int64) name = '{}_lookup'.format(self.key) if state_manager is None or not state_manager.has_resource(self, name): with ops.init_scope(): table = lookup_ops.index_table_from_file( vocabulary_file=self.vocabulary_file, num_oov_buckets=self.num_oov_buckets, vocab_size=self.vocabulary_size, default_value=self.default_value, key_dtype=key_dtype, name=name) if state_manager is not None: state_manager.add_resource(self, name, table) else: # Reuse the table from the previous run. table = state_manager.get_resource(self, name) return table.lookup(input_tensor) def transform_feature(self, transformation_cache, state_manager): """Creates a lookup table for the vocabulary.""" input_tensor = _to_sparse_input_and_drop_ignore_values( transformation_cache.get(self.key, state_manager)) return self._transform_input_tensor(input_tensor, state_manager) @deprecation.deprecated(_FEATURE_COLUMN_DEPRECATION_DATE, _FEATURE_COLUMN_DEPRECATION) def _transform_feature(self, inputs): input_tensor = _to_sparse_input_and_drop_ignore_values(inputs.get(self.key)) return self._transform_input_tensor(input_tensor) @property def num_buckets(self): """Returns number of buckets in this sparse feature.""" return self.vocabulary_size + self.num_oov_buckets @property @deprecation.deprecated(_FEATURE_COLUMN_DEPRECATION_DATE, _FEATURE_COLUMN_DEPRECATION) def _num_buckets(self): return self.num_buckets def get_sparse_tensors(self, transformation_cache, state_manager): """See `CategoricalColumn` base class.""" return CategoricalColumn.IdWeightPair( transformation_cache.get(self, state_manager), None) @deprecation.deprecated(_FEATURE_COLUMN_DEPRECATION_DATE, _FEATURE_COLUMN_DEPRECATION) def _get_sparse_tensors(self, inputs, weight_collections=None, trainable=None): del weight_collections del trainable return CategoricalColumn.IdWeightPair(inputs.get(self), None) @property def parents(self): """See 'FeatureColumn` base class.""" return [self.key] def get_config(self): """See 'FeatureColumn` base class.""" config = dict(zip(self._fields, self)) config['dtype'] = self.dtype.name return config @classmethod def from_config(cls, config, custom_objects=None, columns_by_name=None): """See 'FeatureColumn` base class.""" _check_config_keys(config, cls._fields) kwargs = _standardize_and_copy_config(config) kwargs['dtype'] = dtypes.as_dtype(config['dtype']) return cls(**kwargs) class VocabularyListCategoricalColumn( CategoricalColumn, fc_old._CategoricalColumn, # pylint: disable=protected-access collections.namedtuple( 'VocabularyListCategoricalColumn', ('key', 'vocabulary_list', 'dtype', 'default_value', 'num_oov_buckets')) ): """See `categorical_column_with_vocabulary_list`.""" @property def _is_v2_column(self): return True @property def name(self): """See `FeatureColumn` base class.""" return self.key @property def parse_example_spec(self): """See `FeatureColumn` base class.""" return {self.key: parsing_ops.VarLenFeature(self.dtype)} @property @deprecation.deprecated(_FEATURE_COLUMN_DEPRECATION_DATE, _FEATURE_COLUMN_DEPRECATION) def _parse_example_spec(self): return self.parse_example_spec def _transform_input_tensor(self, input_tensor, state_manager=None): """Creates a lookup table for the vocabulary list.""" if self.dtype.is_integer != input_tensor.dtype.is_integer: raise ValueError( 'Column dtype and SparseTensors dtype must be compatible. ' 'key: {}, column dtype: {}, tensor dtype: {}'.format( self.key, self.dtype, input_tensor.dtype)) fc_utils.assert_string_or_int( input_tensor.dtype, prefix='column_name: {} input_tensor'.format(self.key)) key_dtype = self.dtype if input_tensor.dtype.is_integer: # `index_table_from_tensor` requires 64-bit integer keys. key_dtype = dtypes.int64 input_tensor = math_ops.cast(input_tensor, dtypes.int64) name = '{}_lookup'.format(self.key) if state_manager is None or not state_manager.has_resource(self, name): with ops.init_scope(): table = lookup_ops.index_table_from_tensor( vocabulary_list=tuple(self.vocabulary_list), default_value=self.default_value, num_oov_buckets=self.num_oov_buckets, dtype=key_dtype, name=name) if state_manager is not None: state_manager.add_resource(self, name, table) else: # Reuse the table from the previous run. table = state_manager.get_resource(self, name) return table.lookup(input_tensor) def transform_feature(self, transformation_cache, state_manager): """Creates a lookup table for the vocabulary list.""" input_tensor = _to_sparse_input_and_drop_ignore_values( transformation_cache.get(self.key, state_manager)) return self._transform_input_tensor(input_tensor, state_manager) @deprecation.deprecated(_FEATURE_COLUMN_DEPRECATION_DATE, _FEATURE_COLUMN_DEPRECATION) def _transform_feature(self, inputs): input_tensor = _to_sparse_input_and_drop_ignore_values(inputs.get(self.key)) return self._transform_input_tensor(input_tensor) @property def num_buckets(self): """Returns number of buckets in this sparse feature.""" return len(self.vocabulary_list) + self.num_oov_buckets @property @deprecation.deprecated(_FEATURE_COLUMN_DEPRECATION_DATE, _FEATURE_COLUMN_DEPRECATION) def _num_buckets(self): return self.num_buckets def get_sparse_tensors(self, transformation_cache, state_manager): """See `CategoricalColumn` base class.""" return CategoricalColumn.IdWeightPair( transformation_cache.get(self, state_manager), None) @deprecation.deprecated(_FEATURE_COLUMN_DEPRECATION_DATE, _FEATURE_COLUMN_DEPRECATION) def _get_sparse_tensors(self, inputs, weight_collections=None, trainable=None): del weight_collections del trainable return CategoricalColumn.IdWeightPair(inputs.get(self), None) @property def parents(self): """See 'FeatureColumn` base class.""" return [self.key] def get_config(self): """See 'FeatureColumn` base class.""" config = dict(zip(self._fields, self)) config['dtype'] = self.dtype.name return config @classmethod def from_config(cls, config, custom_objects=None, columns_by_name=None): """See 'FeatureColumn` base class.""" _check_config_keys(config, cls._fields) kwargs = _standardize_and_copy_config(config) kwargs['dtype'] = dtypes.as_dtype(config['dtype']) return cls(**kwargs) class IdentityCategoricalColumn( CategoricalColumn, fc_old._CategoricalColumn, # pylint: disable=protected-access collections.namedtuple('IdentityCategoricalColumn', ('key', 'number_buckets', 'default_value'))): """See `categorical_column_with_identity`.""" @property def _is_v2_column(self): return True @property def name(self): """See `FeatureColumn` base class.""" return self.key @property def parse_example_spec(self): """See `FeatureColumn` base class.""" return {self.key: parsing_ops.VarLenFeature(dtypes.int64)} @property @deprecation.deprecated(_FEATURE_COLUMN_DEPRECATION_DATE, _FEATURE_COLUMN_DEPRECATION) def _parse_example_spec(self): return self.parse_example_spec def _transform_input_tensor(self, input_tensor): """Returns a SparseTensor with identity values.""" if not input_tensor.dtype.is_integer: raise ValueError( 'Invalid input, not integer. key: {} dtype: {}'.format( self.key, input_tensor.dtype)) values = input_tensor.values if input_tensor.values.dtype != dtypes.int64: values = math_ops.cast(values, dtypes.int64, name='values') if self.default_value is not None: values = math_ops.cast(input_tensor.values, dtypes.int64, name='values') num_buckets = math_ops.cast( self.num_buckets, dtypes.int64, name='num_buckets') zero = math_ops.cast(0, dtypes.int64, name='zero') # Assign default for out-of-range values. values = array_ops.where_v2( math_ops.logical_or( values < zero, values >= num_buckets, name='out_of_range'), array_ops.fill( dims=array_ops.shape(values), value=math_ops.cast(self.default_value, dtypes.int64), name='default_values'), values) return sparse_tensor_lib.SparseTensor( indices=input_tensor.indices, values=values, dense_shape=input_tensor.dense_shape) def transform_feature(self, transformation_cache, state_manager): """Returns a SparseTensor with identity values.""" input_tensor = _to_sparse_input_and_drop_ignore_values( transformation_cache.get(self.key, state_manager)) return self._transform_input_tensor(input_tensor) @deprecation.deprecated(_FEATURE_COLUMN_DEPRECATION_DATE, _FEATURE_COLUMN_DEPRECATION) def _transform_feature(self, inputs): input_tensor = _to_sparse_input_and_drop_ignore_values(inputs.get(self.key)) return self._transform_input_tensor(input_tensor) @property def num_buckets(self): """Returns number of buckets in this sparse feature.""" return self.number_buckets @property @deprecation.deprecated(_FEATURE_COLUMN_DEPRECATION_DATE, _FEATURE_COLUMN_DEPRECATION) def _num_buckets(self): return self.num_buckets def get_sparse_tensors(self, transformation_cache, state_manager): """See `CategoricalColumn` base class.""" return CategoricalColumn.IdWeightPair( transformation_cache.get(self, state_manager), None) @deprecation.deprecated(_FEATURE_COLUMN_DEPRECATION_DATE, _FEATURE_COLUMN_DEPRECATION) def _get_sparse_tensors(self, inputs, weight_collections=None, trainable=None): del weight_collections del trainable return CategoricalColumn.IdWeightPair(inputs.get(self), None) @property def parents(self): """See 'FeatureColumn` base class.""" return [self.key] def get_config(self): """See 'FeatureColumn` base class.""" return dict(zip(self._fields, self)) @classmethod def from_config(cls, config, custom_objects=None, columns_by_name=None): """See 'FeatureColumn` base class.""" _check_config_keys(config, cls._fields) kwargs = _standardize_and_copy_config(config) return cls(**kwargs) class WeightedCategoricalColumn( CategoricalColumn, fc_old._CategoricalColumn, # pylint: disable=protected-access collections.namedtuple( 'WeightedCategoricalColumn', ('categorical_column', 'weight_feature_key', 'dtype'))): """See `weighted_categorical_column`.""" @property def _is_v2_column(self): return (isinstance(self.categorical_column, FeatureColumn) and self.categorical_column._is_v2_column) # pylint: disable=protected-access @property def name(self): """See `FeatureColumn` base class.""" return '{}_weighted_by_{}'.format( self.categorical_column.name, self.weight_feature_key) @property def parse_example_spec(self): """See `FeatureColumn` base class.""" config = self.categorical_column.parse_example_spec if self.weight_feature_key in config: raise ValueError('Parse config {} already exists for {}.'.format( config[self.weight_feature_key], self.weight_feature_key)) config[self.weight_feature_key] = parsing_ops.VarLenFeature(self.dtype) return config @property @deprecation.deprecated(_FEATURE_COLUMN_DEPRECATION_DATE, _FEATURE_COLUMN_DEPRECATION) def _parse_example_spec(self): config = self.categorical_column._parse_example_spec # pylint: disable=protected-access if self.weight_feature_key in config: raise ValueError('Parse config {} already exists for {}.'.format( config[self.weight_feature_key], self.weight_feature_key)) config[self.weight_feature_key] = parsing_ops.VarLenFeature(self.dtype) return config @property def num_buckets(self): """See `DenseColumn` base class.""" return self.categorical_column.num_buckets @property @deprecation.deprecated(_FEATURE_COLUMN_DEPRECATION_DATE, _FEATURE_COLUMN_DEPRECATION) def _num_buckets(self): return self.categorical_column._num_buckets # pylint: disable=protected-access def _transform_weight_tensor(self, weight_tensor): if weight_tensor is None: raise ValueError('Missing weights {}.'.format(self.weight_feature_key)) weight_tensor = sparse_tensor_lib.convert_to_tensor_or_sparse_tensor( weight_tensor) if self.dtype != weight_tensor.dtype.base_dtype: raise ValueError('Bad dtype, expected {}, but got {}.'.format( self.dtype, weight_tensor.dtype)) if not isinstance(weight_tensor, sparse_tensor_lib.SparseTensor): # The weight tensor can be a regular Tensor. In this case, sparsify it. weight_tensor = _to_sparse_input_and_drop_ignore_values( weight_tensor, ignore_value=0.0) if not weight_tensor.dtype.is_floating: weight_tensor = math_ops.cast(weight_tensor, dtypes.float32) return weight_tensor def transform_feature(self, transformation_cache, state_manager): """Applies weights to tensor generated from `categorical_column`'.""" weight_tensor = transformation_cache.get(self.weight_feature_key, state_manager) sparse_weight_tensor = self._transform_weight_tensor(weight_tensor) sparse_categorical_tensor = _to_sparse_input_and_drop_ignore_values( transformation_cache.get(self.categorical_column, state_manager)) return (sparse_categorical_tensor, sparse_weight_tensor) @deprecation.deprecated(_FEATURE_COLUMN_DEPRECATION_DATE, _FEATURE_COLUMN_DEPRECATION) def _transform_feature(self, inputs): """Applies weights to tensor generated from `categorical_column`'.""" weight_tensor = inputs.get(self.weight_feature_key) weight_tensor = self._transform_weight_tensor(weight_tensor) return (inputs.get(self.categorical_column), weight_tensor) def get_sparse_tensors(self, transformation_cache, state_manager): """See `CategoricalColumn` base class.""" tensors = transformation_cache.get(self, state_manager) return CategoricalColumn.IdWeightPair(tensors[0], tensors[1]) @deprecation.deprecated(_FEATURE_COLUMN_DEPRECATION_DATE, _FEATURE_COLUMN_DEPRECATION) def _get_sparse_tensors(self, inputs, weight_collections=None, trainable=None): del weight_collections del trainable tensors = inputs.get(self) return CategoricalColumn.IdWeightPair(tensors[0], tensors[1]) @property def parents(self): """See 'FeatureColumn` base class.""" return [self.categorical_column, self.weight_feature_key] def get_config(self): """See 'FeatureColumn` base class.""" from tensorflow.python.feature_column.serialization import serialize_feature_column # pylint: disable=g-import-not-at-top config = dict(zip(self._fields, self)) config['categorical_column'] = serialize_feature_column( self.categorical_column) config['dtype'] = self.dtype.name return config @classmethod def from_config(cls, config, custom_objects=None, columns_by_name=None): """See 'FeatureColumn` base class.""" from tensorflow.python.feature_column.serialization import deserialize_feature_column # pylint: disable=g-import-not-at-top _check_config_keys(config, cls._fields) kwargs = _standardize_and_copy_config(config) kwargs['categorical_column'] = deserialize_feature_column( config['categorical_column'], custom_objects, columns_by_name) kwargs['dtype'] = dtypes.as_dtype(config['dtype']) return cls(**kwargs) class CrossedColumn( CategoricalColumn, fc_old._CategoricalColumn, # pylint: disable=protected-access collections.namedtuple('CrossedColumn', ('keys', 'hash_bucket_size', 'hash_key'))): """See `crossed_column`.""" @property def _is_v2_column(self): for key in _collect_leaf_level_keys(self): if isinstance(key, six.string_types): continue if not isinstance(key, FeatureColumn): return False if not key._is_v2_column: # pylint: disable=protected-access return False return True @property def name(self): """See `FeatureColumn` base class.""" feature_names = [] for key in _collect_leaf_level_keys(self): if isinstance(key, (FeatureColumn, fc_old._FeatureColumn)): # pylint: disable=protected-access feature_names.append(key.name) else: # key must be a string feature_names.append(key) return '_X_'.join(sorted(feature_names)) @property def parse_example_spec(self): """See `FeatureColumn` base class.""" config = {} for key in self.keys: if isinstance(key, FeatureColumn): config.update(key.parse_example_spec) elif isinstance(key, fc_old._FeatureColumn): # pylint: disable=protected-access config.update(key._parse_example_spec) # pylint: disable=protected-access else: # key must be a string config.update({key: parsing_ops.VarLenFeature(dtypes.string)}) return config @property @deprecation.deprecated(_FEATURE_COLUMN_DEPRECATION_DATE, _FEATURE_COLUMN_DEPRECATION) def _parse_example_spec(self): return self.parse_example_spec def transform_feature(self, transformation_cache, state_manager): """Generates a hashed sparse cross from the input tensors.""" feature_tensors = [] for key in _collect_leaf_level_keys(self): if isinstance(key, six.string_types): feature_tensors.append(transformation_cache.get(key, state_manager)) elif isinstance(key, (fc_old._CategoricalColumn, CategoricalColumn)): # pylint: disable=protected-access ids_and_weights = key.get_sparse_tensors(transformation_cache, state_manager) if ids_and_weights.weight_tensor is not None: raise ValueError( 'crossed_column does not support weight_tensor, but the given ' 'column populates weight_tensor. ' 'Given column: {}'.format(key.name)) feature_tensors.append(ids_and_weights.id_tensor) else: raise ValueError('Unsupported column type. Given: {}'.format(key)) return sparse_ops.sparse_cross_hashed( inputs=feature_tensors, num_buckets=self.hash_bucket_size, hash_key=self.hash_key) @deprecation.deprecated(_FEATURE_COLUMN_DEPRECATION_DATE, _FEATURE_COLUMN_DEPRECATION) def _transform_feature(self, inputs): """Generates a hashed sparse cross from the input tensors.""" feature_tensors = [] for key in _collect_leaf_level_keys(self): if isinstance(key, six.string_types): feature_tensors.append(inputs.get(key)) elif isinstance(key, (CategoricalColumn, fc_old._CategoricalColumn)): # pylint: disable=protected-access ids_and_weights = key._get_sparse_tensors(inputs) # pylint: disable=protected-access if ids_and_weights.weight_tensor is not None: raise ValueError( 'crossed_column does not support weight_tensor, but the given ' 'column populates weight_tensor. ' 'Given column: {}'.format(key.name)) feature_tensors.append(ids_and_weights.id_tensor) else: raise ValueError('Unsupported column type. Given: {}'.format(key)) return sparse_ops.sparse_cross_hashed( inputs=feature_tensors, num_buckets=self.hash_bucket_size, hash_key=self.hash_key) @property def num_buckets(self): """Returns number of buckets in this sparse feature.""" return self.hash_bucket_size @property @deprecation.deprecated(_FEATURE_COLUMN_DEPRECATION_DATE, _FEATURE_COLUMN_DEPRECATION) def _num_buckets(self): return self.num_buckets def get_sparse_tensors(self, transformation_cache, state_manager): """See `CategoricalColumn` base class.""" return CategoricalColumn.IdWeightPair( transformation_cache.get(self, state_manager), None) @deprecation.deprecated(_FEATURE_COLUMN_DEPRECATION_DATE, _FEATURE_COLUMN_DEPRECATION) def _get_sparse_tensors(self, inputs, weight_collections=None, trainable=None): """See `CategoricalColumn` base class.""" del weight_collections del trainable return CategoricalColumn.IdWeightPair(inputs.get(self), None) @property def parents(self): """See 'FeatureColumn` base class.""" return list(self.keys) def get_config(self): """See 'FeatureColumn` base class.""" from tensorflow.python.feature_column.serialization import serialize_feature_column # pylint: disable=g-import-not-at-top config = dict(zip(self._fields, self)) config['keys'] = tuple([serialize_feature_column(fc) for fc in self.keys]) return config @classmethod def from_config(cls, config, custom_objects=None, columns_by_name=None): """See 'FeatureColumn` base class.""" from tensorflow.python.feature_column.serialization import deserialize_feature_column # pylint: disable=g-import-not-at-top _check_config_keys(config, cls._fields) kwargs = _standardize_and_copy_config(config) kwargs['keys'] = tuple([ deserialize_feature_column(c, custom_objects, columns_by_name) for c in config['keys'] ]) return cls(**kwargs) def _collect_leaf_level_keys(cross): """Collects base keys by expanding all nested crosses. Args: cross: A `CrossedColumn`. Returns: A list of strings or `CategoricalColumn` instances. """ leaf_level_keys = [] for k in cross.keys: if isinstance(k, CrossedColumn): leaf_level_keys.extend(_collect_leaf_level_keys(k)) else: leaf_level_keys.append(k) return leaf_level_keys def _prune_invalid_ids(sparse_ids, sparse_weights): """Prune invalid IDs (< 0) from the input ids and weights.""" is_id_valid = math_ops.greater_equal(sparse_ids.values, 0) if sparse_weights is not None: is_id_valid = math_ops.logical_and( is_id_valid, array_ops.ones_like(sparse_weights.values, dtype=dtypes.bool)) sparse_ids = sparse_ops.sparse_retain(sparse_ids, is_id_valid) if sparse_weights is not None: sparse_weights = sparse_ops.sparse_retain(sparse_weights, is_id_valid) return sparse_ids, sparse_weights def _prune_invalid_weights(sparse_ids, sparse_weights): """Prune invalid weights (< 0) from the input ids and weights.""" if sparse_weights is not None: is_weights_valid = math_ops.greater(sparse_weights.values, 0) sparse_ids = sparse_ops.sparse_retain(sparse_ids, is_weights_valid) sparse_weights = sparse_ops.sparse_retain(sparse_weights, is_weights_valid) return sparse_ids, sparse_weights class IndicatorColumn( DenseColumn, SequenceDenseColumn, fc_old._DenseColumn, # pylint: disable=protected-access fc_old._SequenceDenseColumn, # pylint: disable=protected-access collections.namedtuple('IndicatorColumn', ('categorical_column'))): """Represents a one-hot column for use in deep networks. Args: categorical_column: A `CategoricalColumn` which is created by `categorical_column_with_*` function. """ @property def _is_v2_column(self): return (isinstance(self.categorical_column, FeatureColumn) and self.categorical_column._is_v2_column) # pylint: disable=protected-access @property def name(self): """See `FeatureColumn` base class.""" return '{}_indicator'.format(self.categorical_column.name) def _transform_id_weight_pair(self, id_weight_pair, size): id_tensor = id_weight_pair.id_tensor weight_tensor = id_weight_pair.weight_tensor # If the underlying column is weighted, return the input as a dense tensor. if weight_tensor is not None: weighted_column = sparse_ops.sparse_merge( sp_ids=id_tensor, sp_values=weight_tensor, vocab_size=int(size)) # Remove (?, -1) index. weighted_column = sparse_ops.sparse_slice(weighted_column, [0, 0], weighted_column.dense_shape) # Use scatter_nd to merge duplicated indices if existed, # instead of sparse_tensor_to_dense. return array_ops.scatter_nd(weighted_column.indices, weighted_column.values, weighted_column.dense_shape) dense_id_tensor = sparse_ops.sparse_tensor_to_dense( id_tensor, default_value=-1) # One hot must be float for tf.concat reasons since all other inputs to # input_layer are float32. one_hot_id_tensor = array_ops.one_hot( dense_id_tensor, depth=size, on_value=1.0, off_value=0.0) # Reduce to get a multi-hot per example. return math_ops.reduce_sum(one_hot_id_tensor, axis=[-2]) def transform_feature(self, transformation_cache, state_manager): """Returns dense `Tensor` representing feature. Args: transformation_cache: A `FeatureTransformationCache` object to access features. state_manager: A `StateManager` to create / access resources such as lookup tables. Returns: Transformed feature `Tensor`. Raises: ValueError: if input rank is not known at graph building time. """ id_weight_pair = self.categorical_column.get_sparse_tensors( transformation_cache, state_manager) return self._transform_id_weight_pair(id_weight_pair, self.variable_shape[-1]) @deprecation.deprecated(_FEATURE_COLUMN_DEPRECATION_DATE, _FEATURE_COLUMN_DEPRECATION) def _transform_feature(self, inputs): id_weight_pair = self.categorical_column._get_sparse_tensors(inputs) # pylint: disable=protected-access return self._transform_id_weight_pair(id_weight_pair, self._variable_shape[-1]) @property def parse_example_spec(self): """See `FeatureColumn` base class.""" return self.categorical_column.parse_example_spec @property @deprecation.deprecated(_FEATURE_COLUMN_DEPRECATION_DATE, _FEATURE_COLUMN_DEPRECATION) def _parse_example_spec(self): return self.categorical_column._parse_example_spec # pylint: disable=protected-access @property def variable_shape(self): """Returns a `TensorShape` representing the shape of the dense `Tensor`.""" if isinstance(self.categorical_column, FeatureColumn): return tensor_shape.TensorShape([1, self.categorical_column.num_buckets]) else: return tensor_shape.TensorShape([1, self.categorical_column._num_buckets]) # pylint: disable=protected-access @property @deprecation.deprecated(_FEATURE_COLUMN_DEPRECATION_DATE, _FEATURE_COLUMN_DEPRECATION) def _variable_shape(self): return tensor_shape.TensorShape([1, self.categorical_column._num_buckets]) # pylint: disable=protected-access def get_dense_tensor(self, transformation_cache, state_manager): """Returns dense `Tensor` representing feature. Args: transformation_cache: A `FeatureTransformationCache` object to access features. state_manager: A `StateManager` to create / access resources such as lookup tables. Returns: Dense `Tensor` created within `transform_feature`. Raises: ValueError: If `categorical_column` is a `SequenceCategoricalColumn`. """ if isinstance(self.categorical_column, SequenceCategoricalColumn): raise ValueError( 'In indicator_column: {}. ' 'categorical_column must not be of type SequenceCategoricalColumn. ' 'Suggested fix A: If you wish to use DenseFeatures, use a ' 'non-sequence categorical_column_with_*. ' 'Suggested fix B: If you wish to create sequence input, use ' 'SequenceFeatures instead of DenseFeatures. ' 'Given (type {}): {}'.format(self.name, type(self.categorical_column), self.categorical_column)) # Feature has been already transformed. Return the intermediate # representation created by transform_feature. return transformation_cache.get(self, state_manager) @deprecation.deprecated(_FEATURE_COLUMN_DEPRECATION_DATE, _FEATURE_COLUMN_DEPRECATION) def _get_dense_tensor(self, inputs, weight_collections=None, trainable=None): del weight_collections del trainable if isinstance( self.categorical_column, (SequenceCategoricalColumn, fc_old._SequenceCategoricalColumn)): # pylint: disable=protected-access raise ValueError( 'In indicator_column: {}. ' 'categorical_column must not be of type _SequenceCategoricalColumn. ' 'Suggested fix A: If you wish to use DenseFeatures, use a ' 'non-sequence categorical_column_with_*. ' 'Suggested fix B: If you wish to create sequence input, use ' 'SequenceFeatures instead of DenseFeatures. ' 'Given (type {}): {}'.format(self.name, type(self.categorical_column), self.categorical_column)) # Feature has been already transformed. Return the intermediate # representation created by transform_feature. return inputs.get(self) def get_sequence_dense_tensor(self, transformation_cache, state_manager): """See `SequenceDenseColumn` base class.""" if not isinstance(self.categorical_column, SequenceCategoricalColumn): raise ValueError( 'In indicator_column: {}. ' 'categorical_column must be of type SequenceCategoricalColumn ' 'to use SequenceFeatures. ' 'Suggested fix: Use one of sequence_categorical_column_with_*. ' 'Given (type {}): {}'.format(self.name, type(self.categorical_column), self.categorical_column)) # Feature has been already transformed. Return the intermediate # representation created by transform_feature. dense_tensor = transformation_cache.get(self, state_manager) sparse_tensors = self.categorical_column.get_sparse_tensors( transformation_cache, state_manager) sequence_length = fc_utils.sequence_length_from_sparse_tensor( sparse_tensors.id_tensor) return SequenceDenseColumn.TensorSequenceLengthPair( dense_tensor=dense_tensor, sequence_length=sequence_length) @deprecation.deprecated(_FEATURE_COLUMN_DEPRECATION_DATE, _FEATURE_COLUMN_DEPRECATION) def _get_sequence_dense_tensor(self, inputs, weight_collections=None, trainable=None): # Do nothing with weight_collections and trainable since no variables are # created in this function. del weight_collections del trainable if not isinstance( self.categorical_column, (SequenceCategoricalColumn, fc_old._SequenceCategoricalColumn)): # pylint: disable=protected-access raise ValueError( 'In indicator_column: {}. ' 'categorical_column must be of type _SequenceCategoricalColumn ' 'to use SequenceFeatures. ' 'Suggested fix: Use one of sequence_categorical_column_with_*. ' 'Given (type {}): {}'.format(self.name, type(self.categorical_column), self.categorical_column)) # Feature has been already transformed. Return the intermediate # representation created by _transform_feature. dense_tensor = inputs.get(self) sparse_tensors = self.categorical_column._get_sparse_tensors(inputs) # pylint: disable=protected-access sequence_length = fc_utils.sequence_length_from_sparse_tensor( sparse_tensors.id_tensor) return SequenceDenseColumn.TensorSequenceLengthPair( dense_tensor=dense_tensor, sequence_length=sequence_length) @property def parents(self): """See 'FeatureColumn` base class.""" return [self.categorical_column] def get_config(self): """See 'FeatureColumn` base class.""" from tensorflow.python.feature_column.serialization import serialize_feature_column # pylint: disable=g-import-not-at-top config = dict(zip(self._fields, self)) config['categorical_column'] = serialize_feature_column( self.categorical_column) return config @classmethod def from_config(cls, config, custom_objects=None, columns_by_name=None): """See 'FeatureColumn` base class.""" from tensorflow.python.feature_column.serialization import deserialize_feature_column # pylint: disable=g-import-not-at-top _check_config_keys(config, cls._fields) kwargs = _standardize_and_copy_config(config) kwargs['categorical_column'] = deserialize_feature_column( config['categorical_column'], custom_objects, columns_by_name) return cls(**kwargs) def _verify_static_batch_size_equality(tensors, columns): """Verify equality between static batch sizes. Args: tensors: iterable of input tensors. columns: Corresponding feature columns. Raises: ValueError: in case of mismatched batch sizes. """ # bath_size is a Dimension object. expected_batch_size = None for i in range(0, len(tensors)): batch_size = tensor_shape.Dimension(tensor_shape.dimension_value( tensors[i].shape[0])) if batch_size.value is not None: if expected_batch_size is None: bath_size_column_index = i expected_batch_size = batch_size elif not expected_batch_size.is_compatible_with(batch_size): raise ValueError( 'Batch size (first dimension) of each feature must be same. ' 'Batch size of columns ({}, {}): ({}, {})'.format( columns[bath_size_column_index].name, columns[i].name, expected_batch_size, batch_size)) class SequenceCategoricalColumn( CategoricalColumn, fc_old._SequenceCategoricalColumn, # pylint: disable=protected-access collections.namedtuple('SequenceCategoricalColumn', ('categorical_column'))): """Represents sequences of categorical data.""" @property def _is_v2_column(self): return (isinstance(self.categorical_column, FeatureColumn) and self.categorical_column._is_v2_column) # pylint: disable=protected-access @property def name(self): """See `FeatureColumn` base class.""" return self.categorical_column.name @property def parse_example_spec(self): """See `FeatureColumn` base class.""" return self.categorical_column.parse_example_spec @property @deprecation.deprecated(_FEATURE_COLUMN_DEPRECATION_DATE, _FEATURE_COLUMN_DEPRECATION) def _parse_example_spec(self): return self.categorical_column._parse_example_spec # pylint: disable=protected-access def transform_feature(self, transformation_cache, state_manager): """See `FeatureColumn` base class.""" return self.categorical_column.transform_feature(transformation_cache, state_manager) @deprecation.deprecated(_FEATURE_COLUMN_DEPRECATION_DATE, _FEATURE_COLUMN_DEPRECATION) def _transform_feature(self, inputs): return self.categorical_column._transform_feature(inputs) # pylint: disable=protected-access @property def num_buckets(self): """Returns number of buckets in this sparse feature.""" return self.categorical_column.num_buckets @property @deprecation.deprecated(_FEATURE_COLUMN_DEPRECATION_DATE, _FEATURE_COLUMN_DEPRECATION) def _num_buckets(self): return self.categorical_column._num_buckets # pylint: disable=protected-access def _get_sparse_tensors_helper(self, sparse_tensors): id_tensor = sparse_tensors.id_tensor weight_tensor = sparse_tensors.weight_tensor # Expands third dimension, if necessary so that embeddings are not # combined during embedding lookup. If the tensor is already 3D, leave # as-is. shape = array_ops.shape(id_tensor) # Compute the third dimension explicitly instead of setting it to -1, as # that doesn't work for dynamically shaped tensors with 0-length at runtime. # This happens for empty sequences. target_shape = [shape[0], shape[1], math_ops.reduce_prod(shape[2:])] id_tensor = sparse_ops.sparse_reshape(id_tensor, target_shape) if weight_tensor is not None: weight_tensor = sparse_ops.sparse_reshape(weight_tensor, target_shape) return CategoricalColumn.IdWeightPair(id_tensor, weight_tensor) def get_sparse_tensors(self, transformation_cache, state_manager): """Returns an IdWeightPair. `IdWeightPair` is a pair of `SparseTensor`s which represents ids and weights. `IdWeightPair.id_tensor` is typically a `batch_size` x `num_buckets` `SparseTensor` of `int64`. `IdWeightPair.weight_tensor` is either a `SparseTensor` of `float` or `None` to indicate all weights should be taken to be 1. If specified, `weight_tensor` must have exactly the same shape and indices as `sp_ids`. Expected `SparseTensor` is same as parsing output of a `VarLenFeature` which is a ragged matrix. Args: transformation_cache: A `FeatureTransformationCache` object to access features. state_manager: A `StateManager` to create / access resources such as lookup tables. """ sparse_tensors = self.categorical_column.get_sparse_tensors( transformation_cache, state_manager) return self._get_sparse_tensors_helper(sparse_tensors) @deprecation.deprecated(_FEATURE_COLUMN_DEPRECATION_DATE, _FEATURE_COLUMN_DEPRECATION) def _get_sparse_tensors(self, inputs, weight_collections=None, trainable=None): sparse_tensors = self.categorical_column._get_sparse_tensors(inputs) # pylint: disable=protected-access return self._get_sparse_tensors_helper(sparse_tensors) @property def parents(self): """See 'FeatureColumn` base class.""" return [self.categorical_column] def get_config(self): """See 'FeatureColumn` base class.""" from tensorflow.python.feature_column.serialization import serialize_feature_column # pylint: disable=g-import-not-at-top config = dict(zip(self._fields, self)) config['categorical_column'] = serialize_feature_column( self.categorical_column) return config @classmethod def from_config(cls, config, custom_objects=None, columns_by_name=None): """See 'FeatureColumn` base class.""" from tensorflow.python.feature_column.serialization import deserialize_feature_column # pylint: disable=g-import-not-at-top _check_config_keys(config, cls._fields) kwargs = _standardize_and_copy_config(config) kwargs['categorical_column'] = deserialize_feature_column( config['categorical_column'], custom_objects, columns_by_name) return cls(**kwargs) def _check_config_keys(config, expected_keys): """Checks that a config has all expected_keys.""" if set(config.keys()) != set(expected_keys): raise ValueError('Invalid config: {}, expected keys: {}'.format( config, expected_keys)) def _standardize_and_copy_config(config): """Returns a shallow copy of config with lists turned to tuples. Keras serialization uses nest to listify everything. This causes problems with the NumericColumn shape, which becomes unhashable. We could try to solve this on the Keras side, but that would require lots of tracking to avoid changing existing behavior. Instead, we ensure here that we revive correctly. Args: config: dict that will be used to revive a Feature Column Returns: Shallow copy of config with lists turned to tuples. """ kwargs = config.copy() for k, v in kwargs.items(): if isinstance(v, list): kwargs[k] = tuple(v) return kwargs def _sanitize_column_name_for_variable_scope(name): """Sanitizes user-provided feature names for use as variable scopes.""" invalid_char = re.compile('[^A-Za-z0-9_.\\-]') return invalid_char.sub('_', name)
apache-2.0
cynicaldevil/servo
tests/wpt/web-platform-tests/tools/webdriver/webdriver/client.py
17
11863
# This Source Code Form is subject to the terms of the Mozilla Public # License, v. 2.0. If a copy of the MPL was not distributed with this # file, You can obtain one at http://mozilla.org/MPL/2.0/. import urlparse import error import transport element_key = "element-6066-11e4-a52e-4f735466cecf" def command(func): def inner(self, *args, **kwargs): if hasattr(self, "session"): session = self.session else: session = self if session.session_id is None: session.start() assert session.session_id != None return func(self, *args, **kwargs) inner.__name__ = func.__name__ inner.__doc__ = func.__doc__ return inner class Timeouts(object): def __init__(self, session): self.session = session self._script = 30 self._load = 0 self._implicit_wait = 0 def _set_timeouts(self, name, value): body = {"type": name, "ms": value * 1000} return self.session.send_command("POST", "timeouts", body) @property def script(self): return self._script @script.setter def script(self, value): self._set_timeouts("script", value) self._script = value @property def load(self): return self._load @load.setter def set_load(self, value): self._set_timeouts("page load", value) self._script = value @property def implicit_wait(self): return self._implicit_wait @implicit_wait.setter def implicit_wait(self, value): self._set_timeouts("implicit wait", value) self._implicit_wait = value class Window(object): def __init__(self, session): self.session = session @property @command def size(self): resp = self.session.send_command("GET", "window/size") return (resp["width"], resp["height"]) @size.setter @command def size(self, (width, height)): body = {"width": width, "height": height} self.session.send_command("POST", "window/size", body) @property @command def position(self): resp = self.session.send_command("GET", "window/position") return (resp["x"], resp["y"]) @position.setter @command def position(self, (x, y)): body = {"x": x, "y": y} self.session.send_command("POST", "window/position", body) @property @command def maximize(self): return self.session.send_command("POST", "window/maximize") class Find(object): def __init__(self, session): self.session = session @command def css(self, selector, all=True): return self._find_element("css selector", selector, all) def _find_element(self, strategy, selector, all): route = "elements" if all else "element" body = {"using": strategy, "value": selector} data = self.session.send_command("POST", route, body, key="value") if all: rv = [self.session._element(item) for item in data] else: rv = self.session._element(data) return rv class Cookies(object): def __init__(self, session): self.session = session def __getitem__(self, name): self.session.send_command("GET", "cookie/%s" % name, {}, key="value") def __setitem__(self, name, value): cookie = {"name": name, "value": None} if isinstance(name, (str, unicode)): cookie["value"] = value elif hasattr(value, "value"): cookie["value"] = value.value self.session.send_command("POST", "cookie/%s" % name, {}, key="value") class UserPrompt(object): def __init__(self, session): self.session = session @command def dismiss(self): self.session.send_command("POST", "alert/dismiss") @command def accept(self): self.session.send_command("POST", "alert/accept") @property @command def text(self): return self.session.send_command("GET", "alert/text", key="value") @text.setter @command def text(self, value): body = {"value": list(value)} self.session.send_command("POST", "alert/text", body=body) class Session(object): def __init__(self, host, port, url_prefix="/", desired_capabilities=None, required_capabilities=None, timeout=transport.HTTP_TIMEOUT, extension=None): self.transport = transport.HTTPWireProtocol( host, port, url_prefix, timeout=timeout) self.desired_capabilities = desired_capabilities self.required_capabilities = required_capabilities self.session_id = None self.timeouts = None self.window = None self.find = None self._element_cache = {} self.extension = None self.extension_cls = extension self.timeouts = Timeouts(self) self.window = Window(self) self.find = Find(self) self.alert = UserPrompt(self) def __enter__(self): self.start() return self def __exit__(self, *args, **kwargs): self.end() def __del__(self): self.end() def start(self): if self.session_id is not None: return body = {} caps = {} if self.desired_capabilities is not None: caps["desiredCapabilities"] = self.desired_capabilities if self.required_capabilities is not None: caps["requiredCapabilities"] = self.required_capabilities #body["capabilities"] = caps body = caps resp = self.transport.send("POST", "session", body=body) self.session_id = resp["sessionId"] if self.extension_cls: self.extension = self.extension_cls(self) return resp["value"] def end(self): if self.session_id is None: return url = "session/%s" % self.session_id self.transport.send("DELETE", url) self.session_id = None self.timeouts = None self.window = None self.find = None self.extension = None def send_command(self, method, url, body=None, key=None): if self.session_id is None: raise error.SessionNotCreatedException() url = urlparse.urljoin("session/%s/" % self.session_id, url) return self.transport.send(method, url, body, key=key) @property @command def url(self): return self.send_command("GET", "url", key="value") @url.setter @command def url(self, url): if urlparse.urlsplit(url).netloc is None: return self.url(url) body = {"url": url} return self.send_command("POST", "url", body) @command def back(self): return self.send_command("POST", "back") @command def forward(self): return self.send_command("POST", "forward") @command def refresh(self): return self.send_command("POST", "refresh") @property @command def title(self): return self.send_command("GET", "title", key="value") @property @command def window_handle(self): return self.send_command("GET", "window_handle", key="value") @window_handle.setter @command def window_handle(self, handle): body = {"handle": handle} return self.send_command("POST", "window", body=body) def switch_frame(self, frame): if frame == "parent": url = "frame/parent" body = None else: url = "frame" if isinstance(frame, Element): body = {"id": frame.json()} else: body = {"id": frame} return self.send_command("POST", url, body) @command def close(self): return self.send_command("DELETE", "window_handle") @property @command def handles(self): return self.send_command("GET", "window_handles", key="value") @property @command def active_element(self): data = self.send_command("GET", "element/active", key="value") if data is not None: return self._element(data) def _element(self, data): elem_id = data[element_key] assert elem_id if elem_id in self._element_cache: return self._element_cache[elem_id] return Element(self, elem_id) @command def cookies(self, name=None): if name is None: url = "cookie" else: url = "cookie/%s" % name return self.send_command("GET", url, {}, key="value") @command def set_cookie(self, name, value, path=None, domain=None, secure=None, expiry=None): body = {"name": name, "value": value} if path is not None: body["path"] = path if domain is not None: body["domain"] = domain if secure is not None: body["secure"] = secure if expiry is not None: body["expiry"] = expiry self.send_command("POST", "cookie", {"cookie": body}) def delete_cookie(self, name=None): if name is None: url = "cookie" else: url = "cookie/%s" % name self.send_command("DELETE", url, {}, key="value") #[...] @command def execute_script(self, script, args=None): if args is None: args = [] body = { "script": script, "args": args } return self.send_command("POST", "execute", body, key="value") @command def execute_async_script(self, script, args=None): if args is None: args = [] body = { "script": script, "args": args } return self.send_command("POST", "execute_async", body, key="value") #[...] @command def screenshot(self): return self.send_command("GET", "screenshot", key="value") class Element(object): def __init__(self, session, id): self.session = session self.id = id assert id not in self.session._element_cache self.session._element_cache[self.id] = self def json(self): return {element_key: self.id} @property def session_id(self): return self.session.session_id def url(self, suffix): return "element/%s/%s" % (self.id, suffix) @command def find_element(self, strategy, selector): body = {"using": strategy, "value": selector} elem = self.session.send_command("POST", self.url("element"), body, key="value") return self.session.element(elem) @command def click(self): self.session.send_command("POST", self.url("click"), {}) @command def tap(self): self.session.send_command("POST", self.url("tap"), {}) @command def clear(self): self.session.send_command("POST", self.url("clear"), {}) @command def send_keys(self, keys): if isinstance(keys, (str, unicode)): keys = [char for char in keys] body = {"value": keys} return self.session.send_command("POST", self.url("value"), body) @property @command def text(self): return self.session.send_command("GET", self.url("text")) @property @command def name(self): return self.session.send_command("GET", self.url("name")) @command def style(self, property_name): return self.session.send_command("GET", self.url("css/%s" % property_name)) @property @command def rect(self): return self.session.send_command("GET", self.url("rect")) @command def attribute(self, name): return self.session.send_command("GET", self.url("attribute/%s" % name))
mpl-2.0
megaserg/pants
src/python/pants/util/dirutil.py
4
9209
# coding=utf-8 # Copyright 2014 Pants project contributors (see CONTRIBUTORS.md). # Licensed under the Apache License, Version 2.0 (see LICENSE). from __future__ import (absolute_import, division, generators, nested_scopes, print_function, unicode_literals, with_statement) import atexit import errno import os import shutil import stat import tempfile import threading import uuid from collections import defaultdict from pants.util.strutil import ensure_text def fast_relpath(path, start): """A prefix-based relpath, with no normalization or support for returning `..`.""" if not path.startswith(start): raise ValueError('{} is not a prefix of {}'.format(start, path)) if len(path) == len(start): # Items are identical: the relative path is empty. return '' elif len(start) == 0: # Empty prefix. return path elif start[-1] == '/': # The prefix indicates that it is a directory. return path[len(start):] elif path[len(start)] == '/': # The suffix indicates that the prefix is a directory. return path[len(start)+1:] else: raise ValueError('{} is not a directory containing {}'.format(start, path)) def safe_mkdir(directory, clean=False): """Ensure a directory is present. If it's not there, create it. If it is, no-op. If clean is True, ensure the dir is empty.""" if clean: safe_rmtree(directory) try: os.makedirs(directory) except OSError as e: if e.errno != errno.EEXIST: raise def safe_mkdir_for(path): """Ensure that the parent directory for a file is present. If it's not there, create it. If it is, no-op. """ safe_mkdir(os.path.dirname(path), clean=False) def safe_file_dump(path, content): safe_mkdir_for(path) with open(path, 'w') as outfile: outfile.write(content) def safe_walk(path, **kwargs): """Just like os.walk, but ensures that the returned values are unicode objects. This isn't strictly safe, in that it is possible that some paths will not be decodeable, but that case is rare, and the only alternative is to somehow avoid all interaction between paths and unicode objects, which seems especially tough in the presence of unicode_literals. See e.g. https://mail.python.org/pipermail/python-dev/2008-December/083856.html """ # If os.walk is given a text argument, it yields text values; if it # is given a binary argument, it yields binary values. return os.walk(ensure_text(path), **kwargs) _MKDTEMP_CLEANER = None _MKDTEMP_DIRS = defaultdict(set) _MKDTEMP_LOCK = threading.RLock() def _mkdtemp_atexit_cleaner(): for td in _MKDTEMP_DIRS.pop(os.getpid(), []): safe_rmtree(td) def _mkdtemp_unregister_cleaner(): global _MKDTEMP_CLEANER _MKDTEMP_CLEANER = None def _mkdtemp_register_cleaner(cleaner): global _MKDTEMP_CLEANER if not cleaner: return assert callable(cleaner) if _MKDTEMP_CLEANER is None: atexit.register(cleaner) _MKDTEMP_CLEANER = cleaner def safe_mkdtemp(cleaner=_mkdtemp_atexit_cleaner, **kw): """Create a temporary directory that is cleaned up on process exit. Arguments are as to tempfile.mkdtemp. """ # Proper lock sanitation on fork [issue 6721] would be desirable here. with _MKDTEMP_LOCK: return register_rmtree(tempfile.mkdtemp(**kw), cleaner=cleaner) def register_rmtree(directory, cleaner=_mkdtemp_atexit_cleaner): """Register an existing directory to be cleaned up at process exit.""" with _MKDTEMP_LOCK: _mkdtemp_register_cleaner(cleaner) _MKDTEMP_DIRS[os.getpid()].add(directory) return directory def safe_rmtree(directory): """Delete a directory if it's present. If it's not present, no-op.""" shutil.rmtree(directory, ignore_errors=True) def safe_open(filename, *args, **kwargs): """Open a file safely, ensuring that its directory exists.""" safe_mkdir(os.path.dirname(filename)) return open(filename, *args, **kwargs) def safe_delete(filename): """Delete a file safely. If it's not present, no-op.""" try: os.unlink(filename) except OSError as e: if e.errno != errno.ENOENT: raise def safe_concurrent_rename(src, dst): """Rename src to dst, ignoring errors due to dst already existing. Useful when concurrent processes may attempt to create dst, and it doesn't matter who wins. """ # Delete dst, in case it existed (with old content) even before any concurrent processes # attempted this write. This ensures that at least one process writes the new content. if os.path.isdir(src): # Note that dst may not exist, so we test for the type of src. safe_rmtree(dst) else: safe_delete(dst) try: shutil.move(src, dst) except IOError as e: if e.errno != errno.EEXIST: raise def safe_concurrent_create(func, path): """Safely execute code that creates a file at a well-known path. Useful when concurrent processes may attempt to create a file, and it doesn't matter who wins. :param func: A callable that takes a single path argument and creates a file at that path. :param path: The path to execute the callable on. :return: func(path)'s return value. """ safe_mkdir_for(path) tmp_path = '{0}.tmp.{1}'.format(path, uuid.uuid4().hex) ret = func(tmp_path) safe_concurrent_rename(tmp_path, path) return ret def chmod_plus_x(path): """Equivalent of unix `chmod a+x path`""" path_mode = os.stat(path).st_mode path_mode &= int('777', 8) if path_mode & stat.S_IRUSR: path_mode |= stat.S_IXUSR if path_mode & stat.S_IRGRP: path_mode |= stat.S_IXGRP if path_mode & stat.S_IROTH: path_mode |= stat.S_IXOTH os.chmod(path, path_mode) def relative_symlink(source_path, link_path): """Create a symlink at link_path pointing to relative source :param source_path: Absolute path to source file :param link_path: Absolute path to intended symlink :raises ValueError if source_path or link_path are not unique, absolute paths :raises OSError on failure UNLESS file already exists or no such file/directory """ if not os.path.isabs(source_path): raise ValueError("Path for source:{} must be absolute".format(source_path)) if not os.path.isabs(link_path): raise ValueError("Path for link:{} must be absolute".format(link_path)) if source_path == link_path: raise ValueError("Path for link is identical to source:{}".format(source_path)) try: if os.path.lexists(link_path): os.unlink(link_path) rel_path = os.path.relpath(source_path, os.path.dirname(link_path)) os.symlink(rel_path, link_path) except OSError as e: # Another run may beat us to deletion or creation. if not (e.errno == errno.EEXIST or e.errno == errno.ENOENT): raise def relativize_path(path, rootdir): # Note that we can't test for length and return the shorter of the two, because we need these # paths to be stable across systems (e.g., because they get embedded in analysis files), # and this choice might be inconsistent across systems. So we assume the relpath is always # shorter. We relativize because of a known case of very long full path prefixes on Mesos, # so this seems like the right heuristic. # Note also that we mustn't call realpath on the path - we need to preserve the symlink structure. return os.path.relpath(path, rootdir) # When running pants under mesos/aurora, the sandbox pathname can be very long. Since it gets # prepended to most components in the classpath (some from ivy, the rest from the build), # in some runs the classpath gets too big and exceeds ARG_MAX. # We prevent this by using paths relative to the current working directory. def relativize_paths(paths, rootdir): return [relativize_path(path, rootdir) for path in paths] def touch(path, times=None): """Equivalent of unix `touch path`. :path: The file to touch. :times Either a tuple of (atime, mtime) or else a single time to use for both. If not specified both atime and mtime are updated to the current time. """ if times: if len(times) > 2: raise ValueError('times must either be a tuple of (atime, mtime) or else a single time value ' 'to use for both.') if len(times) == 1: times = (times, times) with safe_open(path, 'a'): os.utime(path, times) def get_basedir(path): """Returns the base directory of a path. Examples: get_basedir('foo/bar/baz') --> 'foo' get_basedir('/foo/bar/baz') --> '' get_basedir('foo') --> 'foo' """ return path[:path.index(os.sep)] if os.sep in path else path def rm_rf(name): """Remove a file or a directory similarly to running `rm -rf <name>` in a UNIX shell. :param str name: the name of the file or directory to remove. :raises: OSError on error. """ if not os.path.exists(name): return try: # Avoid using safe_rmtree so we can detect failures. shutil.rmtree(name) except OSError as e: if e.errno == errno.ENOTDIR: # 'Not a directory', but a file. Attempt to os.unlink the file, raising OSError on failure. safe_delete(name) elif e.errno != errno.ENOENT: # Pass on 'No such file or directory', otherwise re-raise OSError to surface perm issues etc. raise
apache-2.0
kmspriyatham/symath
scipy/scipy/stats/tests/test_morestats.py
2
20092
# Author: Travis Oliphant, 2002 # # Further enhancements and tests added by numerous SciPy developers. # from __future__ import division, print_function, absolute_import import warnings from numpy.testing import TestCase, run_module_suite, assert_array_equal, \ assert_almost_equal, assert_array_less, assert_array_almost_equal, \ assert_raises, assert_, assert_allclose, assert_equal from numpy.testing.utils import WarningManager import scipy.stats as stats import numpy as np from numpy.random import RandomState g1 = [1.006, 0.996, 0.998, 1.000, 0.992, 0.993, 1.002, 0.999, 0.994, 1.000] g2 = [0.998, 1.006, 1.000, 1.002, 0.997, 0.998, 0.996, 1.000, 1.006, 0.988] g3 = [0.991, 0.987, 0.997, 0.999, 0.995, 0.994, 1.000, 0.999, 0.996, 0.996] g4 = [1.005, 1.002, 0.994, 1.000, 0.995, 0.994, 0.998, 0.996, 1.002, 0.996] g5 = [0.998, 0.998, 0.982, 0.990, 1.002, 0.984, 0.996, 0.993, 0.980, 0.996] g6 = [1.009, 1.013, 1.009, 0.997, 0.988, 1.002, 0.995, 0.998, 0.981, 0.996] g7 = [0.990, 1.004, 0.996, 1.001, 0.998, 1.000, 1.018, 1.010, 0.996, 1.002] g8 = [0.998, 1.000, 1.006, 1.000, 1.002, 0.996, 0.998, 0.996, 1.002, 1.006] g9 = [1.002, 0.998, 0.996, 0.995, 0.996, 1.004, 1.004, 0.998, 0.999, 0.991] g10 = [0.991, 0.995, 0.984, 0.994, 0.997, 0.997, 0.991, 0.998, 1.004, 0.997] class TestShapiro(TestCase): def test_basic(self): x1 = [0.11,7.87,4.61,10.14,7.95,3.14,0.46, 4.43,0.21,4.75,0.71,1.52,3.24, 0.93,0.42,4.97,9.53,4.55,0.47,6.66] w,pw = stats.shapiro(x1) assert_almost_equal(w,0.90047299861907959,6) assert_almost_equal(pw,0.042089745402336121,6) x2 = [1.36,1.14,2.92,2.55,1.46,1.06,5.27,-1.11, 3.48,1.10,0.88,-0.51,1.46,0.52,6.20,1.69, 0.08,3.67,2.81,3.49] w,pw = stats.shapiro(x2) assert_almost_equal(w,0.9590270,6) assert_almost_equal(pw,0.52460,3) def test_bad_arg(self): # Length of x is less than 3. x = [1] assert_raises(ValueError, stats.shapiro, x) class TestAnderson(TestCase): def test_normal(self): rs = RandomState(1234567890) x1 = rs.standard_exponential(size=50) x2 = rs.standard_normal(size=50) A,crit,sig = stats.anderson(x1) assert_array_less(crit[:-1], A) A,crit,sig = stats.anderson(x2) assert_array_less(A, crit[-2:]) def test_expon(self): rs = RandomState(1234567890) x1 = rs.standard_exponential(size=50) x2 = rs.standard_normal(size=50) A,crit,sig = stats.anderson(x1,'expon') assert_array_less(A, crit[-2:]) olderr = np.seterr(all='ignore') try: A,crit,sig = stats.anderson(x2,'expon') finally: np.seterr(**olderr) assert_(A > crit[-1]) def test_bad_arg(self): assert_raises(ValueError, stats.anderson, [1], dist='plate_of_shrimp') class TestAnsari(TestCase): def test_small(self): x = [1,2,3,3,4] y = [3,2,6,1,6,1,4,1] W, pval = stats.ansari(x,y) assert_almost_equal(W,23.5,11) assert_almost_equal(pval,0.13499256881897437,11) def test_approx(self): ramsay = np.array((111, 107, 100, 99, 102, 106, 109, 108, 104, 99, 101, 96, 97, 102, 107, 113, 116, 113, 110, 98)) parekh = np.array((107, 108, 106, 98, 105, 103, 110, 105, 104, 100, 96, 108, 103, 104, 114, 114, 113, 108, 106, 99)) warn_ctx = WarningManager() warn_ctx.__enter__() try: warnings.filterwarnings('ignore', message="Ties preclude use of exact statistic.") W, pval = stats.ansari(ramsay, parekh) finally: warn_ctx.__exit__() assert_almost_equal(W,185.5,11) assert_almost_equal(pval,0.18145819972867083,11) def test_exact(self): W,pval = stats.ansari([1,2,3,4],[15,5,20,8,10,12]) assert_almost_equal(W,10.0,11) assert_almost_equal(pval,0.533333333333333333,7) def test_bad_arg(self): assert_raises(ValueError, stats.ansari, [], [1]) assert_raises(ValueError, stats.ansari, [1], []) class TestBartlett(TestCase): def test_data(self): args = [g1, g2, g3, g4, g5, g6, g7, g8, g9, g10] T, pval = stats.bartlett(*args) assert_almost_equal(T,20.78587342806484,7) assert_almost_equal(pval,0.0136358632781,7) def test_bad_arg(self): """Too few args raises ValueError.""" assert_raises(ValueError, stats.bartlett, [1]) class TestLevene(TestCase): def test_data(self): args = [g1, g2, g3, g4, g5, g6, g7, g8, g9, g10] W, pval = stats.levene(*args) assert_almost_equal(W,1.7059176930008939,7) assert_almost_equal(pval,0.0990829755522,7) def test_trimmed1(self): """Test that center='trimmed' gives the same result as center='mean' when proportiontocut=0.""" W1, pval1 = stats.levene(g1, g2, g3, center='mean') W2, pval2 = stats.levene(g1, g2, g3, center='trimmed', proportiontocut=0.0) assert_almost_equal(W1, W2) assert_almost_equal(pval1, pval2) def test_trimmed2(self): x = [1.2, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 100.0] y = [0.0, 3.0, 3.5, 4.0, 4.5, 5.0, 5.5, 200.0] np.random.seed(1234) x2 = np.random.permutation(x) # Use center='trimmed' W0, pval0 = stats.levene(x, y, center='trimmed', proportiontocut=0.125) W1, pval1 = stats.levene(x2, y, center='trimmed', proportiontocut=0.125) # Trim the data here, and use center='mean' W2, pval2 = stats.levene(x[1:-1], y[1:-1], center='mean') # Result should be the same. assert_almost_equal(W0, W2) assert_almost_equal(W1, W2) assert_almost_equal(pval1, pval2) def test_equal_mean_median(self): x = np.linspace(-1,1,21) np.random.seed(1234) x2 = np.random.permutation(x) y = x**3 W1, pval1 = stats.levene(x, y, center='mean') W2, pval2 = stats.levene(x2, y, center='median') assert_almost_equal(W1, W2) assert_almost_equal(pval1, pval2) def test_bad_keyword(self): x = np.linspace(-1,1,21) assert_raises(TypeError, stats.levene, x, x, portiontocut=0.1) def test_bad_center_value(self): x = np.linspace(-1,1,21) assert_raises(ValueError, stats.levene, x, x, center='trim') def test_too_few_args(self): assert_raises(ValueError, stats.levene, [1]) class TestBinomP(TestCase): def test_data(self): pval = stats.binom_test(100,250) assert_almost_equal(pval,0.0018833009350757682,11) pval = stats.binom_test(201,405) assert_almost_equal(pval,0.92085205962670713,11) pval = stats.binom_test([682,243],p=3.0/4) assert_almost_equal(pval,0.38249155957481695,11) def test_bad_len_x(self): """Length of x must be 1 or 2.""" assert_raises(ValueError, stats.binom_test, [1,2,3]) def test_bad_n(self): """len(x) is 1, but n is invalid.""" # Missing n assert_raises(ValueError, stats.binom_test, [100]) # n less than x[0] assert_raises(ValueError, stats.binom_test, [100], n=50) def test_bad_p(self): assert_raises(ValueError, stats.binom_test, [50, 50], p=2.0) class TestFindRepeats(TestCase): def test_basic(self): a = [1,2,3,4,1,2,3,4,1,2,5] res,nums = stats.find_repeats(a) assert_array_equal(res,[1,2,3,4]) assert_array_equal(nums,[3,3,2,2]) def test_empty_result(self): # Check that empty arrays are returned when there are no repeats. a = [10, 20, 50, 30, 40] repeated, counts = stats.find_repeats(a) assert_array_equal(repeated, []) assert_array_equal(counts, []) class TestFligner(TestCase): def test_data(self): # numbers from R: fligner.test in package stats x1 = np.arange(5) assert_array_almost_equal(stats.fligner(x1,x1**2), (3.2282229927203536, 0.072379187848207877), 11) def test_trimmed1(self): """Test that center='trimmed' gives the same result as center='mean' when proportiontocut=0.""" Xsq1, pval1 = stats.fligner(g1, g2, g3, center='mean') Xsq2, pval2 = stats.fligner(g1, g2, g3, center='trimmed', proportiontocut=0.0) assert_almost_equal(Xsq1, Xsq2) assert_almost_equal(pval1, pval2) def test_trimmed2(self): x = [1.2, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 100.0] y = [0.0, 3.0, 3.5, 4.0, 4.5, 5.0, 5.5, 200.0] # Use center='trimmed' Xsq1, pval1 = stats.fligner(x, y, center='trimmed', proportiontocut=0.125) # Trim the data here, and use center='mean' Xsq2, pval2 = stats.fligner(x[1:-1], y[1:-1], center='mean') # Result should be the same. assert_almost_equal(Xsq1, Xsq2) assert_almost_equal(pval1, pval2) # The following test looks reasonable at first, but fligner() uses the # function stats.rankdata(), and in one of the cases in this test, # there are ties, while in the other (because of normal rounding # errors) there are not. This difference leads to differences in the # third significant digit of W. # #def test_equal_mean_median(self): # x = np.linspace(-1,1,21) # y = x**3 # W1, pval1 = stats.fligner(x, y, center='mean') # W2, pval2 = stats.fligner(x, y, center='median') # assert_almost_equal(W1, W2) # assert_almost_equal(pval1, pval2) def test_bad_keyword(self): x = np.linspace(-1,1,21) assert_raises(TypeError, stats.fligner, x, x, portiontocut=0.1) def test_bad_center_value(self): x = np.linspace(-1,1,21) assert_raises(ValueError, stats.fligner, x, x, center='trim') def test_bad_num_args(self): """Too few args raises ValueError.""" assert_raises(ValueError, stats.fligner, [1]) class TestMood(TestCase): def test_mood(self): # numbers from R: mood.test in package stats x1 = np.arange(5) assert_array_almost_equal(stats.mood(x1, x1**2), (-1.3830857299399906, 0.16663858066771478), 11) def test_mood_order_of_args(self): # z should change sign when the order of arguments changes, pvalue # should not change np.random.seed(1234) x1 = np.random.randn(10, 1) x2 = np.random.randn(15, 1) z1, p1 = stats.mood(x1, x2) z2, p2 = stats.mood(x2, x1) assert_array_almost_equal([z1, p1], [-z2, p2]) def test_mood_with_axis_none(self): #Test with axis = None, compare with results from R x1 = [-0.626453810742332, 0.183643324222082, -0.835628612410047, 1.59528080213779, 0.329507771815361, -0.820468384118015, 0.487429052428485, 0.738324705129217, 0.575781351653492, -0.305388387156356, 1.51178116845085, 0.389843236411431, -0.621240580541804, -2.2146998871775, 1.12493091814311, -0.0449336090152309, -0.0161902630989461, 0.943836210685299, 0.821221195098089, 0.593901321217509] x2 = [-0.896914546624981, 0.184849184646742, 1.58784533120882, -1.13037567424629, -0.0802517565509893, 0.132420284381094, 0.707954729271733, -0.23969802417184, 1.98447393665293, -0.138787012119665, 0.417650750792556, 0.981752777463662, -0.392695355503813, -1.03966897694891, 1.78222896030858, -2.31106908460517, 0.878604580921265, 0.035806718015226, 1.01282869212708, 0.432265154539617, 2.09081920524915, -1.19992581964387, 1.58963820029007, 1.95465164222325, 0.00493777682814261, -2.45170638784613, 0.477237302613617, -0.596558168631403, 0.792203270299649, 0.289636710177348] x1 = np.array(x1) x2 = np.array(x2) x1.shape = (10, 2) x2.shape = (15, 2) assert_array_almost_equal(stats.mood(x1, x2, axis=None), [-1.31716607555, 0.18778296257]) def test_mood_2d(self): # Test if the results of mood test in 2-D case are consistent with the # R result for the same inputs. Numbers from R mood.test(). ny = 5 np.random.seed(1234) x1 = np.random.randn(10, ny) x2 = np.random.randn(15, ny) z_vectest, pval_vectest = stats.mood(x1, x2) for j in range(ny): assert_array_almost_equal([z_vectest[j], pval_vectest[j]], stats.mood(x1[:, j], x2[:, j])) # inverse order of dimensions x1 = x1.transpose() x2 = x2.transpose() z_vectest, pval_vectest = stats.mood(x1, x2, axis=1) for i in range(ny): # check axis handling is self consistent assert_array_almost_equal([z_vectest[i], pval_vectest[i]], stats.mood(x1[i, :], x2[i, :])) def test_mood_3d(self): shape = (10, 5, 6) np.random.seed(1234) x1 = np.random.randn(*shape) x2 = np.random.randn(*shape) for axis in range(3): z_vectest, pval_vectest = stats.mood(x1, x2, axis=axis) # Tests that result for 3-D arrays is equal to that for the # same calculation on a set of 1-D arrays taken from the # 3-D array axes_idx = ([1, 2], [0, 2], [0, 1]) # the two axes != axis for i in range(shape[axes_idx[axis][0]]): for j in range(shape[axes_idx[axis][1]]): if axis == 0: slice1 = x1[:, i, j] slice2 = x2[:, i, j] elif axis == 1: slice1 = x1[i, :, j] slice2 = x2[i, :, j] else: slice1 = x1[i, j, :] slice2 = x2[i, j, :] assert_array_almost_equal([z_vectest[i, j], pval_vectest[i, j]], stats.mood(slice1, slice2)) def test_mood_bad_arg(self): # Raise ValueError when the sum of the lengths of the args is less than 3 assert_raises(ValueError, stats.mood, [1], []) def test_wilcoxon_bad_arg(): """Raise ValueError when two args of different lengths are given or zero_method is unknwon""" assert_raises(ValueError, stats.wilcoxon, [1], [1,2]) assert_raises(ValueError, stats.wilcoxon, [1,2], [1,2], "dummy") def test_mvsdist_bad_arg(): """Raise ValueError if fewer than two data points are given.""" data = [1] assert_raises(ValueError, stats.mvsdist, data) def test_kstat_bad_arg(): """Raise ValueError if n > 4 or n > 1.""" data = [1] n = 10 assert_raises(ValueError, stats.kstat, data, n=n) def test_kstatvar_bad_arg(): """Raise ValueError is n is not 1 or 2.""" data = [1] n = 10 assert_raises(ValueError, stats.kstatvar, data, n=n) def test_probplot_bad_arg(): """Raise ValueError when given an invalid distribution.""" data = [1] assert_raises(ValueError, stats.probplot, data, dist="plate_of_shrimp") def test_ppcc_max_bad_arg(): """Raise ValueError when given an invalid distribution.""" data = [1] assert_raises(ValueError, stats.ppcc_max, data, dist="plate_of_shrimp") def test_boxcox_bad_arg(): """Raise ValueError if any data value is negative.""" x = np.array([-1]) assert_raises(ValueError, stats.boxcox, x) class TestCircFuncs(TestCase): def test_circfuncs(self): x = np.array([355,5,2,359,10,350]) M = stats.circmean(x, high=360) Mval = 0.167690146 assert_allclose(M, Mval, rtol=1e-7) V = stats.circvar(x, high=360) Vval = 42.51955609 assert_allclose(V, Vval, rtol=1e-7) S = stats.circstd(x, high=360) Sval = 6.520702116 assert_allclose(S, Sval, rtol=1e-7) def test_circfuncs_small(self): x = np.array([20,21,22,18,19,20.5,19.2]) M1 = x.mean() M2 = stats.circmean(x, high=360) assert_allclose(M2, M1, rtol=1e-5) V1 = x.var() V2 = stats.circvar(x, high=360) assert_allclose(V2, V1, rtol=1e-4) S1 = x.std() S2 = stats.circstd(x, high=360) assert_allclose(S2, S1, rtol=1e-4) def test_circmean_axis(self): x = np.array([[355,5,2,359,10,350], [351,7,4,352,9,349], [357,9,8,358,4,356]]) M1 = stats.circmean(x, high=360) M2 = stats.circmean(x.ravel(), high=360) assert_allclose(M1, M2, rtol=1e-14) M1 = stats.circmean(x, high=360, axis=1) M2 = [stats.circmean(x[i], high=360) for i in range(x.shape[0])] assert_allclose(M1, M2, rtol=1e-14) M1 = stats.circmean(x, high=360, axis=0) M2 = [stats.circmean(x[:,i], high=360) for i in range(x.shape[1])] assert_allclose(M1, M2, rtol=1e-14) def test_circvar_axis(self): x = np.array([[355,5,2,359,10,350], [351,7,4,352,9,349], [357,9,8,358,4,356]]) V1 = stats.circvar(x, high=360) V2 = stats.circvar(x.ravel(), high=360) assert_allclose(V1, V2, rtol=1e-11) V1 = stats.circvar(x, high=360, axis=1) V2 = [stats.circvar(x[i], high=360) for i in range(x.shape[0])] assert_allclose(V1, V2, rtol=1e-11) V1 = stats.circvar(x, high=360, axis=0) V2 = [stats.circvar(x[:,i], high=360) for i in range(x.shape[1])] assert_allclose(V1, V2, rtol=1e-11) def test_circstd_axis(self): x = np.array([[355,5,2,359,10,350], [351,7,4,352,9,349], [357,9,8,358,4,356]]) S1 = stats.circstd(x, high=360) S2 = stats.circstd(x.ravel(), high=360) assert_allclose(S1, S2, rtol=1e-11) S1 = stats.circstd(x, high=360, axis=1) S2 = [stats.circstd(x[i], high=360) for i in range(x.shape[0])] assert_allclose(S1, S2, rtol=1e-11) S1 = stats.circstd(x, high=360, axis=0) S2 = [stats.circstd(x[:,i], high=360) for i in range(x.shape[1])] assert_allclose(S1, S2, rtol=1e-11) def test_circfuncs_array_like(self): x = [355,5,2,359,10,350] assert_allclose(stats.circmean(x, high=360), 0.167690146, rtol=1e-7) assert_allclose(stats.circvar(x, high=360), 42.51955609, rtol=1e-7) assert_allclose(stats.circstd(x, high=360), 6.520702116, rtol=1e-7) def test_empty(self): assert_(np.isnan(stats.circmean([]))) assert_(np.isnan(stats.circstd([]))) assert_(np.isnan(stats.circvar([]))) def test_accuracy_wilcoxon(): freq = [1, 4, 16, 15, 8, 4, 5, 1, 2] nums = range(-4, 5) x = np.concatenate([[u] * v for u, v in zip(nums, freq)]) y = np.zeros(x.size) T, p = stats.wilcoxon(x, y, "pratt") assert_allclose(T, 423) assert_allclose(p, 0.00197547303533107) T, p = stats.wilcoxon(x, y, "zsplit") assert_allclose(T, 441) assert_allclose(p, 0.0032145343172473055) T, p = stats.wilcoxon(x, y, "wilcox") assert_allclose(T, 327) assert_allclose(p, 0.00641346115861) # Test the 'correction' option, using values computed in R with: # > wilcox.test(x, y, paired=TRUE, exact=FALSE, correct={FALSE,TRUE}) x = np.array([120, 114, 181, 188, 180, 146, 121, 191, 132, 113, 127, 112]) y = np.array([133, 143, 119, 189, 112, 199, 198, 113, 115, 121, 142, 187]) T, p = stats.wilcoxon(x, y, correction=False) assert_equal(T, 34) assert_allclose(p, 0.6948866, rtol=1e-6) T, p = stats.wilcoxon(x, y, correction=True) assert_equal(T, 34) assert_allclose(p, 0.7240817, rtol=1e-6) if __name__ == "__main__": run_module_suite()
apache-2.0
apinsard/qtile
libqtile/widget/backlight.py
10
3007
# Copyright (c) 2012 Tim Neumann # Copyright (c) 2012, 2014 Tycho Andersen # Copyright (c) 2013 Tao Sauvage # Copyright (c) 2014 Sean Vig # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. import os from . import base BACKLIGHT_DIR = '/sys/class/backlight' FORMAT = '{percent: 2.0%}' class Backlight(base.InLoopPollText): """ A simple widget to show the current brightness of a monitor. """ filenames = {} orientations = base.ORIENTATION_HORIZONTAL defaults = [ ('backlight_name', 'acpi_video0', 'ACPI name of a backlight device'), ( 'brightness_file', 'brightness', 'Name of file with the ' 'current brightness in /sys/class/backlight/backlight_name' ), ( 'max_brightness_file', 'max_brightness', 'Name of file with the ' 'maximum brightness in /sys/class/backlight/backlight_name' ), ('update_interval', .2, 'The delay in seconds between updates'), ] def __init__(self, **config): base.InLoopPollText.__init__(self, **config) self.add_defaults(Backlight.defaults) def _load_file(self, name): try: path = os.path.join(BACKLIGHT_DIR, self.backlight_name, name) with open(path, 'r') as f: return f.read().strip() except IOError: return False except Exception: self.log.exception("Failed to get %s" % name) def _get_info(self): try: info = { 'brightness': float(self._load_file(self.brightness_file)), 'max': float(self._load_file(self.max_brightness_file)), } except TypeError: return False return info def poll(self): info = self._get_info() if info is False: return 'Error' percent = info['brightness'] / info['max'] return FORMAT.format(percent=percent)
mit
ClockworkOrigins/m2etis
configurator/quicktest/reporting/direct_null_1m_02-09-13.py
1
2085
__author__ = 'amw' from quicktest.Reporting import plot3d, plot2d, create_dataset, create_plot_matrix # Initialise database config db_config = {"hostname": "localhost", "port": "27017", "db_name": "simulations", "collection_name": "02-09-13"} # Create plot tag = "direct_null_1m_020913" x_param = "parameters.numSubs" y_param = "parameters.numToSend_4" z_param = "results.<Channel 4>: Latency - All nodes - Avgmean" filter_list = [{"parameters.packetSize": "16B"}, {"parameters.packetSize": "32B"}, {"parameters.packetSize": "64B"}, {"parameters.packetSize": "128B"}, {"parameters.packetSize": "1024B"} ] plot3d(db_config, tag, x_param, y_param, z_param, filter_list, 2, 3, "Direct Null: Average Latency to all nodes") y_param = "results.<Channel 4>: Latency - All nodes - Avgmean" filter_list = [{"parameters.packetSize": "16B", "parameters.numToSend_4": "1"}, {"parameters.packetSize": "16B", "parameters.numToSend_4": "3"}, {"parameters.packetSize": "16B", "parameters.numToSend_4": "5"}, {"parameters.packetSize": "16B", "parameters.numToSend_4": "7"}, {"parameters.packetSize": "16B", "parameters.numToSend_4": "10"}, {"parameters.packetSize": "16B", "parameters.numToSend_4": "15"}, {"parameters.packetSize": "16B", "parameters.numToSend_4": "20"}, {"parameters.packetSize": "16B", "parameters.numToSend_4": "30"} ] # generate the 2d plots. the arguments "numCols" and "numRows" describe the layout of the resulting matrix of plots plot2d(db_config, tag, x_param, y_param, filter_list, 3, 3, "Direct Null: Average Latency to all nodes") y_param = "results.<Channel 4>: Latency - All nodes - Minmean" plot2d(db_config, tag, x_param, y_param, filter_list, 3, 3, "Direct Null: Minimum Latency to all nodes") y_param = "results.<Channel 4>: Latency - All nodes - Maxmean" plot2d(db_config, tag, x_param, y_param, filter_list, 3, 3, "Direct Null; Maximum Latency to all nodes")
apache-2.0
xujun10110/golismero
tools/sqlmap/plugins/dbms/hsqldb/fingerprint.py
7
5038
#!/usr/bin/env python """ Copyright (c) 2006-2013 sqlmap developers (http://sqlmap.org/) See the file 'doc/COPYING' for copying permission """ import re from lib.core.common import Backend from lib.core.common import Format from lib.core.common import unArrayizeValue from lib.core.data import conf from lib.core.data import kb from lib.core.data import logger from lib.core.enums import DBMS from lib.core.session import setDbms from lib.core.settings import HSQLDB_ALIASES from lib.core.settings import UNKNOWN_DBMS_VERSION from lib.request import inject from plugins.generic.fingerprint import Fingerprint as GenericFingerprint class Fingerprint(GenericFingerprint): def __init__(self): GenericFingerprint.__init__(self, DBMS.HSQLDB) def getFingerprint(self): value = "" wsOsFp = Format.getOs("web server", kb.headersFp) if wsOsFp and not hasattr(conf, "api"): value += "%s\n" % wsOsFp if kb.data.banner: dbmsOsFp = Format.getOs("back-end DBMS", kb.bannerFp) if dbmsOsFp and not hasattr(conf, "api"): value += "%s\n" % dbmsOsFp value += "back-end DBMS: " actVer = Format.getDbms() if not conf.extensiveFp: value += actVer return value blank = " " * 15 value += "active fingerprint: %s" % actVer if kb.bannerFp: banVer = kb.bannerFp["dbmsVersion"] if 'dbmsVersion' in kb.bannerFp else None if re.search("-log$", kb.data.banner): banVer += ", logging enabled" banVer = Format.getDbms([banVer] if banVer else None) value += "\n%sbanner parsing fingerprint: %s" % (blank, banVer) htmlErrorFp = Format.getErrorParsedDBMSes() if htmlErrorFp: value += "\n%shtml error message fingerprint: %s" % (blank, htmlErrorFp) return value def checkDbms(self): """ References for fingerprint: DATABASE_VERSION() version 2.2.6 added two-arg REPLACE functio REPLACE('a','a') compared to REPLACE('a','a','d') version 2.2.5 added SYSTIMESTAMP function version 2.2.3 added REGEXPR_SUBSTRING and REGEXPR_SUBSTRING_ARRAY functions version 2.2.0 added support for ROWNUM() function version 2.1.0 added MEDIAN aggregate function version < 2.0.1 added support for datetime ROUND and TRUNC functions version 2.0.0 added VALUES support version 1.8.0.4 Added org.hsqldbdb.Library function, getDatabaseFullProductVersion to return the full version string, including the 4th digit (e.g 1.8.0.4). version 1.7.2 CASE statements added and INFORMATION_SCHEMA """ if not conf.extensiveFp and (Backend.isDbmsWithin(HSQLDB_ALIASES) \ or conf.dbms in HSQLDB_ALIASES) and Backend.getVersion() and \ Backend.getVersion() != UNKNOWN_DBMS_VERSION: v = Backend.getVersion().replace(">", "") v = v.replace("=", "") v = v.replace(" ", "") Backend.setVersion(v) setDbms("%s %s" % (DBMS.HSQLDB, Backend.getVersion())) if Backend.isVersionGreaterOrEqualThan("1.7.2"): kb.data.has_information_schema = True self.getBanner() return True infoMsg = "testing %s" % DBMS.HSQLDB logger.info(infoMsg) result = inject.checkBooleanExpression("CASEWHEN(1=1,1,0)=1") if result: infoMsg = "confirming %s" % DBMS.HSQLDB logger.info(infoMsg) result = inject.checkBooleanExpression("ROUNDMAGIC(PI())>=3") if not result: warnMsg = "the back-end DBMS is not %s" % DBMS.HSQLDB logger.warn(warnMsg) return False else: kb.data.has_information_schema = True Backend.setVersion(">= 1.7.2") setDbms("%s 1.7.2" % DBMS.HSQLDB) banner = self.getBanner() if banner: Backend.setVersion("= %s" % banner) else: if inject.checkBooleanExpression("(SELECT [RANDNUM] FROM (VALUES(0)))=[RANDNUM]"): Backend.setVersionList([">= 2.0.0", "< 2.3.0"]) else: banner = unArrayizeValue(inject.getValue("\"org.hsqldbdb.Library.getDatabaseFullProductVersion\"()", safeCharEncode=True)) if banner: Backend.setVersion("= %s" % banner) else: Backend.setVersionList([">= 1.7.2", "< 1.8.0"]) return True else: warnMsg = "the back-end DBMS is not %s or is < 1.7.2" % DBMS.HSQLDB logger.warn(warnMsg) return False def getHostname(self): warnMsg = "on HSQLDB it is not possible to enumerate the hostname" logger.warn(warnMsg)
gpl-2.0
YaoQ/faceplusplus-demo
hello.py
1
2906
#!/usr/bin/env python2 # Import system libraries and define helper functions import time import sys import os import os.path from pprint import pformat # First import the API class from the SDK from facepp import API from facepp import File def print_result(hint, result): def encode(obj): if type(obj) is unicode: return obj.encode('utf-8') if type(obj) is dict: return {encode(k): encode(v) for (k, v) in obj.iteritems()} if type(obj) is list: return [encode(i) for i in obj] return obj print hint result = encode(result) print '\n'.join([' ' + i for i in pformat(result, width = 75).split('\n')]) def init(): fdir = os.path.dirname(__file__) with open(os.path.join(fdir, 'apikey.cfg')) as f: exec(f.read()) srv = locals().get('SERVER') return API(API_KEY, API_SECRET, srv = srv) # In this tutorial, you will learn how to call Face ++ APIs and implement a # simple App which could recognize a face image in 3 candidates. api = init() # Here are the person names and their face images IMAGE_DIR = 'http://cn.faceplusplus.com/static/resources/python_demo/' PERSONS = [ ('Jim Parsons', IMAGE_DIR + '1.jpg'), ('Leonardo DiCaprio', IMAGE_DIR + '2.jpg'), ('Andy Liu', IMAGE_DIR + '3.jpg') ] TARGET_IMAGE = IMAGE_DIR + '4.jpg' # Step 1: Detect faces in the 3 pictures and find out their positions and # attributes FACES = {name: api.detection.detect(url = url) for name, url in PERSONS} for name, face in FACES.iteritems(): print_result(name, face) # Step 2: create persons using the face_id for name, face in FACES.iteritems(): rst = api.person.create( person_name = name, face_id = face['face'][0]['face_id']) print_result('create person {}'.format(name), rst) # Step 3: create a new group and add those persons in it rst = api.group.create(group_name = 'standard') print_result('create group', rst) rst = api.group.add_person(group_name = 'standard', person_name = FACES.iterkeys()) print_result('add these persons to group', rst) # Step 4: train the model rst = api.train.identify(group_name = 'standard') print_result('train', rst) # wait for training to complete rst = api.wait_async(rst['session_id']) print_result('wait async', rst) # Step 5: recognize face in a new image rst = api.recognition.identify(group_name = 'standard', url = TARGET_IMAGE) print_result('recognition result', rst) print '=' * 60 print 'The person with highest confidence:', \ rst['face'][0]['candidate'][0]['person_name'] # Finally, delete the persons and group because they are no longer needed api.group.delete(group_name = 'standard') api.person.delete(person_name = FACES.iterkeys()) # Congratulations! You have finished this tutorial, and you can continue # reading our API document and start writing your own App using Face++ API! # Enjoy :)
gpl-2.0
noskill/virt-manager
virtManager/connect.py
1
15892
# # Copyright (C) 2006, 2013 Red Hat, Inc. # Copyright (C) 2006 Daniel P. Berrange <berrange@redhat.com> # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, # MA 02110-1301 USA. # import os import logging import socket from gi.repository import Gio from gi.repository import GObject from gi.repository import Gtk from . import uiutil from .baseclass import vmmGObjectUI (HV_QEMU, HV_XEN, HV_LXC, HV_QEMU_SESSION, HV_BHYVE) = range(5) (CONN_SSH, CONN_TCP, CONN_TLS) = range(3) def current_user(): try: import getpass return getpass.getuser() except: return "" def default_conn_user(conn): if conn == CONN_SSH: return "root" return current_user() class vmmConnect(vmmGObjectUI): __gsignals__ = { "completed": (GObject.SignalFlags.RUN_FIRST, None, [str, bool]), "cancelled": (GObject.SignalFlags.RUN_FIRST, None, []), } def __init__(self): vmmGObjectUI.__init__(self, "connect.ui", "vmm-open-connection") self.builder.connect_signals({ "on_hypervisor_changed": self.hypervisor_changed, "on_transport_changed": self.transport_changed, "on_hostname_combo_changed": self.hostname_combo_changed, "on_connect_remote_toggled": self.connect_remote_toggled, "on_username_entry_changed": self.username_changed, "on_hostname_changed": self.hostname_changed, "on_cancel_clicked": self.cancel, "on_connect_clicked": self.open_conn, "on_vmm_open_connection_delete_event": self.cancel, }) self.browser = None self.browser_sigs = [] # Set this if we can't resolve 'hostname.local': means avahi # prob isn't configured correctly, and we should strip .local self.can_resolve_local = None # Plain hostname resolve failed, means we should just use IP addr self.can_resolve_hostname = None self.set_initial_state() self.dbus = None self.avahiserver = None try: self.dbus = Gio.bus_get_sync(Gio.BusType.SYSTEM, None) self.avahiserver = Gio.DBusProxy.new_sync(self.dbus, 0, None, "org.freedesktop.Avahi", "/", "org.freedesktop.Avahi.Server", None) except Exception, e: logging.debug("Couldn't contact avahi: %s", str(e)) self.reset_state() @staticmethod def default_uri(always_system=False): if os.path.exists('/var/lib/xen'): if (os.path.exists('/dev/xen/evtchn') or os.path.exists("/proc/xen")): return 'xen:///' if (os.path.exists("/usr/bin/qemu") or os.path.exists("/usr/bin/qemu-kvm") or os.path.exists("/usr/bin/kvm") or os.path.exists("/usr/libexec/qemu-kvm")): if always_system or os.geteuid() == 0: return "qemu:///system" else: return "qemu:///session" return None def cancel(self, ignore1=None, ignore2=None): logging.debug("Cancelling open connection") self.close() self.emit("cancelled") return 1 def close(self, ignore1=None, ignore2=None): logging.debug("Closing open connection") self.topwin.hide() if self.browser: for obj, sig in self.browser_sigs: obj.disconnect(sig) self.browser_sigs = [] self.browser = None def show(self, parent, reset_state=True): logging.debug("Showing open connection") if reset_state: self.reset_state() self.topwin.set_transient_for(parent) self.topwin.present() self.start_browse() def _cleanup(self): pass def set_initial_state(self): self.widget("connect").grab_default() combo = self.widget("hypervisor") model = Gtk.ListStore(str) model.append(["QEMU/KVM"]) model.append(["Xen"]) model.append(["LXC (Linux Containers)"]) model.append(["QEMU/KVM user session"]) if self.config.with_bhyve: model.append(["Bhyve"]) combo.set_model(model) uiutil.set_combo_text_column(combo, 0) combo = self.widget("transport") model = Gtk.ListStore(str) model.append(["SSH"]) model.append(["TCP (SASL, Kerberos)"]) model.append(["SSL/TLS with certificates"]) combo.set_model(model) uiutil.set_combo_text_column(combo, 0) # Hostname combo box entry hostListModel = Gtk.ListStore(str, str, str) host = self.widget("hostname") host.set_model(hostListModel) host.set_entry_text_column(2) hostListModel.set_sort_column_id(2, Gtk.SortType.ASCENDING) def reset_state(self): self.set_default_hypervisor() self.widget("transport").set_active(0) self.widget("autoconnect").set_sensitive(True) self.widget("autoconnect").set_active(True) self.widget("hostname").get_model().clear() self.widget("hostname").get_child().set_text("") self.widget("connect-remote").set_active(False) self.widget("username-entry").set_text("") self.connect_remote_toggled(self.widget("connect-remote")) self.populate_uri() def is_remote(self): # Whether user is requesting a remote connection return self.widget("connect-remote").get_active() def set_default_hypervisor(self): default = self.default_uri(always_system=True) if not default or default.startswith("qemu"): self.widget("hypervisor").set_active(HV_QEMU) elif default.startswith("xen"): self.widget("hypervisor").set_active(HV_XEN) def add_service(self, interface, protocol, name, typ, domain, flags): ignore = flags try: # Async service resolving res = self.avahiserver.ServiceResolverNew("(iisssiu)", interface, protocol, name, typ, domain, -1, 0) resint = Gio.DBusProxy.new_sync(self.dbus, 0, None, "org.freedesktop.Avahi", res, "org.freedesktop.Avahi.ServiceResolver", None) def cb(proxy, sender, signal, args): ignore = proxy ignore = sender if signal == "Found": self.add_conn_to_list(*args) sig = resint.connect("g-signal", cb) self.browser_sigs.append((resint, sig)) except Exception, e: logging.exception(e) def remove_service(self, interface, protocol, name, typ, domain, flags): ignore = domain ignore = protocol ignore = flags ignore = interface ignore = typ try: model = self.widget("hostname").get_model() name = str(name) for row in model: if row[0] == name: model.remove(row.iter) except Exception, e: logging.exception(e) def add_conn_to_list(self, interface, protocol, name, typ, domain, host, aprotocol, address, port, text, flags): ignore = domain ignore = protocol ignore = flags ignore = interface ignore = typ ignore = text ignore = aprotocol ignore = port try: model = self.widget("hostname").get_model() for row in model: if row[2] == str(name): # Already present in list return host = self.sanitize_hostname(str(host)) model.append([str(address), str(host), str(name)]) except Exception, e: logging.exception(e) def start_browse(self): if self.browser or not self.avahiserver: return # Call method to create new browser, and get back an object path for it. interface = -1 # physical interface to use? -1 is unspec protocol = 0 # 0 = IPv4, 1 = IPv6, -1 = Unspecified service = '_libvirt._tcp' # Service name to poll for flags = 0 # Extra option flags domain = "" # Domain to browse in. NULL uses default bpath = self.avahiserver.ServiceBrowserNew("(iissu)", interface, protocol, service, domain, flags) # Create browser interface for the new object self.browser = Gio.DBusProxy.new_sync(self.dbus, 0, None, "org.freedesktop.Avahi", bpath, "org.freedesktop.Avahi.ServiceBrowser", None) def cb(proxy, sender, signal, args): ignore = proxy ignore = sender if signal == "ItemNew": self.add_service(*args) elif signal == "ItemRemove": self.remove_service(*args) self.browser_sigs.append((self.browser, self.browser.connect("g-signal", cb))) def hostname_combo_changed(self, src): model = src.get_model() txt = src.get_child().get_text() row = None for currow in model: if currow[2] == txt: row = currow break if not row: return ip = row[0] host = row[1] entry = host if not entry: entry = ip self.widget("hostname").get_child().set_text(entry) def hostname_changed(self, src_ignore): self.populate_uri() def hypervisor_changed(self, src): is_session = (src.get_active() == HV_QEMU_SESSION) uiutil.set_grid_row_visible( self.widget("session-warning-box"), is_session) uiutil.set_grid_row_visible( self.widget("connect-remote"), not is_session) uiutil.set_grid_row_visible( self.widget("username-entry"), not is_session) uiutil.set_grid_row_visible( self.widget("hostname"), not is_session) uiutil.set_grid_row_visible( self.widget("transport"), not is_session) if is_session: self.widget("connect-remote").set_active(False) self.populate_uri() def username_changed(self, src_ignore): self.populate_uri() def connect_remote_toggled(self, src_ignore): is_remote = self.is_remote() self.widget("hostname").set_sensitive(is_remote) self.widget("transport").set_sensitive(is_remote) self.widget("autoconnect").set_active(not is_remote) self.widget("username-entry").set_sensitive(is_remote) self.populate_default_user() self.populate_uri() def transport_changed(self, src_ignore): self.populate_default_user() self.populate_uri() def populate_uri(self): uri = self.generate_uri() self.widget("uri-entry").set_text(uri) def populate_default_user(self): conn = self.widget("transport").get_active() default_user = default_conn_user(conn) self.widget("username-entry").set_text(default_user) def generate_uri(self): hv = self.widget("hypervisor").get_active() conn = self.widget("transport").get_active() host = self.widget("hostname").get_child().get_text().strip() user = self.widget("username-entry").get_text() is_remote = self.is_remote() hvstr = "" if hv == HV_XEN: hvstr = "xen" elif hv == HV_QEMU or hv == HV_QEMU_SESSION: hvstr = "qemu" elif hv == HV_BHYVE: hvstr = "bhyve" else: hvstr = "lxc" addrstr = "" if user: addrstr += user + "@" addrstr += host hoststr = "" if not is_remote: hoststr = ":///" else: if conn == CONN_TLS: hoststr = "+tls://" if conn == CONN_SSH: hoststr = "+ssh://" if conn == CONN_TCP: hoststr = "+tcp://" hoststr += addrstr + "/" uri = hvstr + hoststr if hv in (HV_QEMU, HV_BHYVE): uri += "system" elif hv == HV_QEMU_SESSION: uri += "session" return uri def validate(self): is_remote = self.is_remote() host = self.widget("hostname").get_child().get_text() if is_remote and not host: return self.err.val_err(_("A hostname is required for " "remote connections.")) return True def open_conn(self, ignore): if not self.validate(): return auto = False if self.widget("autoconnect").get_sensitive(): auto = self.widget("autoconnect").get_active() uri = self.generate_uri() logging.debug("Generate URI=%s, auto=%s", uri, auto) self.close() self.emit("completed", uri, auto) def sanitize_hostname(self, host): if host == "linux" or host == "localhost": host = "" if host.startswith("linux-"): tmphost = host[6:] try: long(tmphost) host = "" except ValueError: pass if host: host = self.check_resolve_host(host) return host def check_resolve_host(self, host): # Try to resolve hostname # # Avahi always uses 'hostname.local', but for some reason # fedora 12 out of the box can't resolve '.local' names # Attempt to resolve the name. If it fails, remove .local # if present, and try again if host.endswith(".local"): if self.can_resolve_local is False: host = host[:-6] elif self.can_resolve_local is None: try: socket.getaddrinfo(host, None) except: logging.debug("Couldn't resolve host '%s'. Stripping " "'.local' and retrying.", host) self.can_resolve_local = False host = self.check_resolve_host(host[:-6]) else: self.can_resolve_local = True else: if self.can_resolve_hostname is False: host = "" elif self.can_resolve_hostname is None: try: socket.getaddrinfo(host, None) except: logging.debug("Couldn't resolve host '%s'. Disabling " "host name resolution, only using IP addr", host) self.can_resolve_hostname = False else: self.can_resolve_hostname = True return host
gpl-2.0
wesley1001/formulize
libraries/phpopenid/admin/gettlds.py
126
1061
""" Fetch the current TLD list from the IANA Web site, parse it, and print an expression suitable for direct insertion into each library's trust root validation module Usage: python gettlds.py (php|python|ruby) Then cut-n-paste. """ import urllib2 import sys langs = { 'php': (r"'/\.(", "'", "|", "|' .", r")\.?$/'"), 'python': ("['", "'", "', '", "',", "']"), 'ruby': ("%w'", "", " ", "", "'"), } lang = sys.argv[1] prefix, line_prefix, separator, line_suffix, suffix = langs[lang] f = urllib2.urlopen('http://data.iana.org/TLD/tlds-alpha-by-domain.txt') tlds = [] output_line = "" for input_line in f: if input_line.startswith('#'): continue tld = input_line.strip().lower() new_output_line = output_line + prefix + tld if len(new_output_line) > 60: print output_line + line_suffix output_line = line_prefix + tld else: output_line = new_output_line prefix = separator print output_line + suffix
gpl-2.0
RPI-OPENEDX/edx-platform
common/test/acceptance/tests/studio/test_studio_container.py
45
53260
""" Acceptance tests for Studio related to the container page. The container page is used both for displaying units, and for displaying containers within units. """ from nose.plugins.attrib import attr from unittest import skip from ...fixtures.course import XBlockFixtureDesc from ...pages.studio.component_editor import ComponentEditorView, ComponentVisibilityEditorView from ...pages.studio.container import ContainerPage from ...pages.studio.html_component_editor import HtmlComponentEditorView from ...pages.studio.utils import add_discussion, drag from ...pages.lms.courseware import CoursewarePage from ...pages.lms.staff_view import StaffPage from ...tests.helpers import create_user_partition_json import datetime from bok_choy.promise import Promise, EmptyPromise from base_studio_test import ContainerBase from xmodule.partitions.partitions import Group class NestedVerticalTest(ContainerBase): def populate_course_fixture(self, course_fixture): """ Sets up a course structure with nested verticals. """ self.container_title = "" self.group_a = "Group A" self.group_b = "Group B" self.group_empty = "Group Empty" self.group_a_item_1 = "Group A Item 1" self.group_a_item_2 = "Group A Item 2" self.group_b_item_1 = "Group B Item 1" self.group_b_item_2 = "Group B Item 2" self.group_a_handle = 0 self.group_a_item_1_handle = 1 self.group_a_item_2_handle = 2 self.group_empty_handle = 3 self.group_b_handle = 4 self.group_b_item_1_handle = 5 self.group_b_item_2_handle = 6 self.group_a_item_1_action_index = 0 self.group_a_item_2_action_index = 1 self.duplicate_label = "Duplicate of '{0}'" self.discussion_label = "Discussion" course_fixture.add_children( XBlockFixtureDesc('chapter', 'Test Section').add_children( XBlockFixtureDesc('sequential', 'Test Subsection').add_children( XBlockFixtureDesc('vertical', 'Test Unit').add_children( XBlockFixtureDesc('vertical', 'Test Container').add_children( XBlockFixtureDesc('vertical', 'Group A').add_children( XBlockFixtureDesc('html', self.group_a_item_1), XBlockFixtureDesc('html', self.group_a_item_2) ), XBlockFixtureDesc('vertical', 'Group Empty'), XBlockFixtureDesc('vertical', 'Group B').add_children( XBlockFixtureDesc('html', self.group_b_item_1), XBlockFixtureDesc('html', self.group_b_item_2) ) ) ) ) ) ) @skip("Flaky: 01/16/2015") @attr('shard_1') class DragAndDropTest(NestedVerticalTest): """ Tests of reordering within the container page. """ def drag_and_verify(self, source, target, expected_ordering): self.do_action_and_verify( lambda (container): drag(container, source, target, 40), expected_ordering ) def test_reorder_in_group(self): """ Drag Group A Item 2 before Group A Item 1. """ expected_ordering = [{self.container_title: [self.group_a, self.group_empty, self.group_b]}, {self.group_a: [self.group_a_item_2, self.group_a_item_1]}, {self.group_b: [self.group_b_item_1, self.group_b_item_2]}, {self.group_empty: []}] self.drag_and_verify(self.group_a_item_2_handle, self.group_a_item_1_handle, expected_ordering) def test_drag_to_top(self): """ Drag Group A Item 1 to top level (outside of Group A). """ expected_ordering = [{self.container_title: [self.group_a_item_1, self.group_a, self.group_empty, self.group_b]}, {self.group_a: [self.group_a_item_2]}, {self.group_b: [self.group_b_item_1, self.group_b_item_2]}, {self.group_empty: []}] self.drag_and_verify(self.group_a_item_1_handle, self.group_a_handle, expected_ordering) def test_drag_into_different_group(self): """ Drag Group B Item 1 into Group A (first element). """ expected_ordering = [{self.container_title: [self.group_a, self.group_empty, self.group_b]}, {self.group_a: [self.group_b_item_1, self.group_a_item_1, self.group_a_item_2]}, {self.group_b: [self.group_b_item_2]}, {self.group_empty: []}] self.drag_and_verify(self.group_b_item_1_handle, self.group_a_item_1_handle, expected_ordering) def test_drag_group_into_group(self): """ Drag Group B into Group A (first element). """ expected_ordering = [{self.container_title: [self.group_a, self.group_empty]}, {self.group_a: [self.group_b, self.group_a_item_1, self.group_a_item_2]}, {self.group_b: [self.group_b_item_1, self.group_b_item_2]}, {self.group_empty: []}] self.drag_and_verify(self.group_b_handle, self.group_a_item_1_handle, expected_ordering) def test_drag_after_addition(self): """ Add some components and then verify that drag and drop still works. """ group_a_menu = 0 def add_new_components_and_rearrange(container): # Add a video component to Group 1 add_discussion(container, group_a_menu) # Duplicate the first item in Group A container.duplicate(self.group_a_item_1_action_index) first_handle = self.group_a_item_1_handle # Drag newly added video component to top. drag(container, first_handle + 3, first_handle, 40) # Drag duplicated component to top. drag(container, first_handle + 2, first_handle, 40) duplicate_label = self.duplicate_label.format(self.group_a_item_1) expected_ordering = [{self.container_title: [self.group_a, self.group_empty, self.group_b]}, {self.group_a: [duplicate_label, self.discussion_label, self.group_a_item_1, self.group_a_item_2]}, {self.group_b: [self.group_b_item_1, self.group_b_item_2]}, {self.group_empty: []}] self.do_action_and_verify(add_new_components_and_rearrange, expected_ordering) @attr('shard_1') class AddComponentTest(NestedVerticalTest): """ Tests of adding a component to the container page. """ def add_and_verify(self, menu_index, expected_ordering): self.do_action_and_verify( lambda (container): add_discussion(container, menu_index), expected_ordering ) def test_add_component_in_group(self): group_b_menu = 2 expected_ordering = [{self.container_title: [self.group_a, self.group_empty, self.group_b]}, {self.group_a: [self.group_a_item_1, self.group_a_item_2]}, {self.group_b: [self.group_b_item_1, self.group_b_item_2, self.discussion_label]}, {self.group_empty: []}] self.add_and_verify(group_b_menu, expected_ordering) def test_add_component_in_empty_group(self): group_empty_menu = 1 expected_ordering = [{self.container_title: [self.group_a, self.group_empty, self.group_b]}, {self.group_a: [self.group_a_item_1, self.group_a_item_2]}, {self.group_b: [self.group_b_item_1, self.group_b_item_2]}, {self.group_empty: [self.discussion_label]}] self.add_and_verify(group_empty_menu, expected_ordering) def test_add_component_in_container(self): container_menu = 3 expected_ordering = [{self.container_title: [self.group_a, self.group_empty, self.group_b, self.discussion_label]}, {self.group_a: [self.group_a_item_1, self.group_a_item_2]}, {self.group_b: [self.group_b_item_1, self.group_b_item_2]}, {self.group_empty: []}] self.add_and_verify(container_menu, expected_ordering) @attr('shard_1') class DuplicateComponentTest(NestedVerticalTest): """ Tests of duplicating a component on the container page. """ def duplicate_and_verify(self, source_index, expected_ordering): self.do_action_and_verify( lambda (container): container.duplicate(source_index), expected_ordering ) def test_duplicate_first_in_group(self): duplicate_label = self.duplicate_label.format(self.group_a_item_1) expected_ordering = [{self.container_title: [self.group_a, self.group_empty, self.group_b]}, {self.group_a: [self.group_a_item_1, duplicate_label, self.group_a_item_2]}, {self.group_b: [self.group_b_item_1, self.group_b_item_2]}, {self.group_empty: []}] self.duplicate_and_verify(self.group_a_item_1_action_index, expected_ordering) def test_duplicate_second_in_group(self): duplicate_label = self.duplicate_label.format(self.group_a_item_2) expected_ordering = [{self.container_title: [self.group_a, self.group_empty, self.group_b]}, {self.group_a: [self.group_a_item_1, self.group_a_item_2, duplicate_label]}, {self.group_b: [self.group_b_item_1, self.group_b_item_2]}, {self.group_empty: []}] self.duplicate_and_verify(self.group_a_item_2_action_index, expected_ordering) def test_duplicate_the_duplicate(self): first_duplicate_label = self.duplicate_label.format(self.group_a_item_1) second_duplicate_label = self.duplicate_label.format(first_duplicate_label) expected_ordering = [ {self.container_title: [self.group_a, self.group_empty, self.group_b]}, {self.group_a: [self.group_a_item_1, first_duplicate_label, second_duplicate_label, self.group_a_item_2]}, {self.group_b: [self.group_b_item_1, self.group_b_item_2]}, {self.group_empty: []} ] def duplicate_twice(container): container.duplicate(self.group_a_item_1_action_index) container.duplicate(self.group_a_item_1_action_index + 1) self.do_action_and_verify(duplicate_twice, expected_ordering) @attr('shard_1') class DeleteComponentTest(NestedVerticalTest): """ Tests of deleting a component from the container page. """ def delete_and_verify(self, source_index, expected_ordering): self.do_action_and_verify( lambda (container): container.delete(source_index), expected_ordering ) def test_delete_first_in_group(self): expected_ordering = [{self.container_title: [self.group_a, self.group_empty, self.group_b]}, {self.group_a: [self.group_a_item_2]}, {self.group_b: [self.group_b_item_1, self.group_b_item_2]}, {self.group_empty: []}] # Group A itself has a delete icon now, so item_1 is index 1 instead of 0. group_a_item_1_delete_index = 1 self.delete_and_verify(group_a_item_1_delete_index, expected_ordering) @attr('shard_1') class EditContainerTest(NestedVerticalTest): """ Tests of editing a container. """ def modify_display_name_and_verify(self, component): """ Helper method for changing a display name. """ modified_name = 'modified' self.assertNotEqual(component.name, modified_name) component.edit() component_editor = ComponentEditorView(self.browser, component.locator) component_editor.set_field_value_and_save('Display Name', modified_name) self.assertEqual(component.name, modified_name) def test_edit_container_on_unit_page(self): """ Test the "edit" button on a container appearing on the unit page. """ unit = self.go_to_unit_page() component = unit.xblocks[1] self.modify_display_name_and_verify(component) def test_edit_container_on_container_page(self): """ Test the "edit" button on a container appearing on the container page. """ container = self.go_to_nested_container_page() self.modify_display_name_and_verify(container) def test_edit_raw_html(self): """ Test the raw html editing functionality. """ modified_content = "<p>modified content</p>" #navigate to and open the component for editing unit = self.go_to_unit_page() container = unit.xblocks[1].go_to_container() component = container.xblocks[1].children[0] component.edit() html_editor = HtmlComponentEditorView(self.browser, component.locator) html_editor.set_content_and_save(modified_content, raw=True) #note we're expecting the <p> tags to have been removed self.assertEqual(component.student_content, "modified content") @attr('shard_3') class EditVisibilityModalTest(ContainerBase): """ Tests of the visibility settings modal for components on the unit page. """ VISIBILITY_LABEL_ALL = 'All Students and Staff' VISIBILITY_LABEL_SPECIFIC = 'Specific Content Groups' MISSING_GROUP_LABEL = 'Deleted Content Group\nContent group no longer exists. Please choose another or allow access to All Students and staff' VALIDATION_ERROR_LABEL = 'This component has validation issues.' VALIDATION_ERROR_MESSAGE = 'Error:\nThis component refers to deleted or invalid content groups.' GROUP_VISIBILITY_MESSAGE = 'Some content in this unit is visible only to particular content groups' def setUp(self): super(EditVisibilityModalTest, self).setUp() # Set up a cohort-schemed user partition self.course_fixture._update_xblock(self.course_fixture._course_location, { "metadata": { u"user_partitions": [ create_user_partition_json( 0, 'Configuration Dogs, Cats', 'Content Group Partition', [Group("0", 'Dogs'), Group("1", 'Cats')], scheme="cohort" ) ], }, }) self.container_page = self.go_to_unit_page() self.html_component = self.container_page.xblocks[1] def populate_course_fixture(self, course_fixture): """ Populate a simple course a section, subsection, and unit, and HTML component. """ course_fixture.add_children( XBlockFixtureDesc('chapter', 'Test Section').add_children( XBlockFixtureDesc('sequential', 'Test Subsection').add_children( XBlockFixtureDesc('vertical', 'Test Unit').add_children( XBlockFixtureDesc('html', 'Html Component') ) ) ) ) def edit_component_visibility(self, component): """ Edit the visibility of an xblock on the container page. """ component.edit_visibility() return ComponentVisibilityEditorView(self.browser, component.locator) def verify_selected_labels(self, visibility_editor, expected_labels): """ Verify that a visibility editor's selected labels match the expected ones. """ # If anything other than 'All Students and Staff', is selected, # 'Specific Content Groups' should be selected as well. if expected_labels != [self.VISIBILITY_LABEL_ALL]: expected_labels.append(self.VISIBILITY_LABEL_SPECIFIC) self.assertItemsEqual(expected_labels, [option.text for option in visibility_editor.selected_options]) def select_and_verify_saved(self, component, labels, expected_labels=None): """ Edit the visibility of an xblock on the container page and verify that the edit persists. If provided, verify that `expected_labels` are selected after save, otherwise expect that `labels` are selected after save. Note that `labels` are labels which should be clicked, but not necessarily checked. """ if expected_labels is None: expected_labels = labels # Make initial edit(s) and save visibility_editor = self.edit_component_visibility(component) for label in labels: visibility_editor.select_option(label, save=False) visibility_editor.save() # Re-open the modal and inspect its selected inputs visibility_editor = self.edit_component_visibility(component) self.verify_selected_labels(visibility_editor, expected_labels) visibility_editor.save() def verify_component_validation_error(self, component): """ Verify that we see validation errors for the given component. """ self.assertTrue(component.has_validation_error) self.assertEqual(component.validation_error_text, self.VALIDATION_ERROR_LABEL) self.assertEqual([self.VALIDATION_ERROR_MESSAGE], component.validation_error_messages) def verify_visibility_set(self, component, is_set): """ Verify that the container page shows that component visibility settings have been edited if `is_set` is True; otherwise verify that the container page shows no such information. """ if is_set: self.assertIn(self.GROUP_VISIBILITY_MESSAGE, self.container_page.sidebar_visibility_message) self.assertTrue(component.has_group_visibility_set) else: self.assertNotIn(self.GROUP_VISIBILITY_MESSAGE, self.container_page.sidebar_visibility_message) self.assertFalse(component.has_group_visibility_set) def update_component(self, component, metadata): """ Update a component's metadata and refresh the page. """ self.course_fixture._update_xblock(component.locator, {'metadata': metadata}) self.browser.refresh() self.container_page.wait_for_page() def remove_missing_groups(self, visibility_editor, component): """ Deselect the missing groups for a component. After save, verify that there are no missing group messages in the modal and that there is no validation error on the component. """ for option in visibility_editor.selected_options: if option.text == self.MISSING_GROUP_LABEL: option.click() visibility_editor.save() visibility_editor = self.edit_component_visibility(component) self.assertNotIn(self.MISSING_GROUP_LABEL, [item.text for item in visibility_editor.all_options]) visibility_editor.cancel() self.assertFalse(component.has_validation_error) def test_default_selection(self): """ Scenario: The component visibility modal selects visible to all by default. Given I have a unit with one component When I go to the container page for that unit And I open the visibility editor modal for that unit's component Then the default visibility selection should be 'All Students and Staff' And the container page should not display the content visibility warning """ self.verify_selected_labels(self.edit_component_visibility(self.html_component), [self.VISIBILITY_LABEL_ALL]) self.verify_visibility_set(self.html_component, False) def test_reset_to_all_students_and_staff(self): """ Scenario: The component visibility modal can be set to be visible to all students and staff. Given I have a unit with one component When I go to the container page for that unit And I open the visibility editor modal for that unit's component And I select 'Dogs' And I save the modal Then the container page should display the content visibility warning And I re-open the visibility editor modal for that unit's component And I select 'All Students and Staff' And I save the modal Then the visibility selection should be 'All Students and Staff' And the container page should not display the content visibility warning """ self.select_and_verify_saved(self.html_component, ['Dogs']) self.verify_visibility_set(self.html_component, True) self.select_and_verify_saved(self.html_component, [self.VISIBILITY_LABEL_ALL]) self.verify_visibility_set(self.html_component, False) def test_select_single_content_group(self): """ Scenario: The component visibility modal can be set to be visible to one content group. Given I have a unit with one component When I go to the container page for that unit And I open the visibility editor modal for that unit's component And I select 'Dogs' And I save the modal Then the visibility selection should be 'Dogs' and 'Specific Content Groups' And the container page should display the content visibility warning """ self.select_and_verify_saved(self.html_component, ['Dogs']) self.verify_visibility_set(self.html_component, True) def test_select_multiple_content_groups(self): """ Scenario: The component visibility modal can be set to be visible to multiple content groups. Given I have a unit with one component When I go to the container page for that unit And I open the visibility editor modal for that unit's component And I select 'Dogs' and 'Cats' And I save the modal Then the visibility selection should be 'Dogs', 'Cats', and 'Specific Content Groups' And the container page should display the content visibility warning """ self.select_and_verify_saved(self.html_component, ['Dogs', 'Cats']) self.verify_visibility_set(self.html_component, True) def test_select_zero_content_groups(self): """ Scenario: The component visibility modal can not be set to be visible to 'Specific Content Groups' without selecting those specific groups. Given I have a unit with one component When I go to the container page for that unit And I open the visibility editor modal for that unit's component And I select 'Specific Content Groups' And I save the modal Then the visibility selection should be 'All Students and Staff' And the container page should not display the content visibility warning """ self.select_and_verify_saved( self.html_component, [self.VISIBILITY_LABEL_SPECIFIC], expected_labels=[self.VISIBILITY_LABEL_ALL] ) self.verify_visibility_set(self.html_component, False) def test_missing_groups(self): """ Scenario: The component visibility modal shows a validation error when visibility is set to multiple unknown group ids. Given I have a unit with one component And that component's group access specifies multiple invalid group ids When I go to the container page for that unit Then I should see a validation error message on that unit's component And I open the visibility editor modal for that unit's component Then I should see that I have selected multiple deleted groups And the container page should display the content visibility warning And I de-select the missing groups And I save the modal Then the visibility selection should be 'All Students and Staff' And I should not see any validation errors on the component And the container page should not display the content visibility warning """ self.update_component(self.html_component, {'group_access': {0: [2, 3]}}) self.verify_component_validation_error(self.html_component) visibility_editor = self.edit_component_visibility(self.html_component) self.verify_selected_labels(visibility_editor, [self.MISSING_GROUP_LABEL] * 2) self.remove_missing_groups(visibility_editor, self.html_component) self.verify_visibility_set(self.html_component, False) def test_found_and_missing_groups(self): """ Scenario: The component visibility modal shows a validation error when visibility is set to multiple unknown group ids and multiple known group ids. Given I have a unit with one component And that component's group access specifies multiple invalid and valid group ids When I go to the container page for that unit Then I should see a validation error message on that unit's component And I open the visibility editor modal for that unit's component Then I should see that I have selected multiple deleted groups And the container page should display the content visibility warning And I de-select the missing groups And I save the modal Then the visibility selection should be the names of the valid groups. And I should not see any validation errors on the component And the container page should display the content visibility warning """ self.update_component(self.html_component, {'group_access': {0: [0, 1, 2, 3]}}) self.verify_component_validation_error(self.html_component) visibility_editor = self.edit_component_visibility(self.html_component) self.verify_selected_labels(visibility_editor, ['Dogs', 'Cats'] + [self.MISSING_GROUP_LABEL] * 2) self.remove_missing_groups(visibility_editor, self.html_component) visibility_editor = self.edit_component_visibility(self.html_component) self.verify_selected_labels(visibility_editor, ['Dogs', 'Cats']) self.verify_visibility_set(self.html_component, True) @attr('shard_1') class UnitPublishingTest(ContainerBase): """ Tests of the publishing control and related widgets on the Unit page. """ PUBLISHED_STATUS = "Publishing Status\nPublished (not yet released)" PUBLISHED_LIVE_STATUS = "Publishing Status\nPublished and Live" DRAFT_STATUS = "Publishing Status\nDraft (Unpublished changes)" LOCKED_STATUS = "Publishing Status\nVisible to Staff Only" RELEASE_TITLE_RELEASED = "RELEASED:" RELEASE_TITLE_RELEASE = "RELEASE:" LAST_PUBLISHED = 'Last published' LAST_SAVED = 'Draft saved on' def populate_course_fixture(self, course_fixture): """ Sets up a course structure with a unit and a single HTML child. """ self.html_content = '<p><strong>Body of HTML Unit.</strong></p>' self.courseware = CoursewarePage(self.browser, self.course_id) past_start_date = datetime.datetime(1974, 6, 22) self.past_start_date_text = "Jun 22, 1974 at 00:00 UTC" future_start_date = datetime.datetime(2100, 9, 13) course_fixture.add_children( XBlockFixtureDesc('chapter', 'Test Section').add_children( XBlockFixtureDesc('sequential', 'Test Subsection').add_children( XBlockFixtureDesc('vertical', 'Test Unit').add_children( XBlockFixtureDesc('html', 'Test html', data=self.html_content) ) ) ), XBlockFixtureDesc( 'chapter', 'Unlocked Section', metadata={'start': past_start_date.isoformat()} ).add_children( XBlockFixtureDesc('sequential', 'Unlocked Subsection').add_children( XBlockFixtureDesc('vertical', 'Unlocked Unit').add_children( XBlockFixtureDesc('problem', '<problem></problem>', data=self.html_content) ) ) ), XBlockFixtureDesc('chapter', 'Section With Locked Unit').add_children( XBlockFixtureDesc( 'sequential', 'Subsection With Locked Unit', metadata={'start': past_start_date.isoformat()} ).add_children( XBlockFixtureDesc( 'vertical', 'Locked Unit', metadata={'visible_to_staff_only': True} ).add_children( XBlockFixtureDesc('discussion', '', data=self.html_content) ) ) ), XBlockFixtureDesc( 'chapter', 'Unreleased Section', metadata={'start': future_start_date.isoformat()} ).add_children( XBlockFixtureDesc('sequential', 'Unreleased Subsection').add_children( XBlockFixtureDesc('vertical', 'Unreleased Unit') ) ) ) def test_publishing(self): """ Scenario: The publish title changes based on whether or not draft content exists Given I have a published unit with no unpublished changes When I go to the unit page in Studio Then the title in the Publish information box is "Published and Live" And the Publish button is disabled And the last published text contains "Last published" And the last saved text contains "Last published" And when I add a component to the unit Then the title in the Publish information box is "Draft (Unpublished changes)" And the last saved text contains "Draft saved on" And the Publish button is enabled And when I click the Publish button Then the title in the Publish information box is "Published and Live" And the last published text contains "Last published" And the last saved text contains "Last published" """ unit = self.go_to_unit_page() self._verify_publish_title(unit, self.PUBLISHED_LIVE_STATUS) # Start date set in course fixture to 1970. self._verify_release_date_info( unit, self.RELEASE_TITLE_RELEASED, 'Jan 01, 1970 at 00:00 UTC\nwith Section "Test Section"' ) self._verify_last_published_and_saved(unit, self.LAST_PUBLISHED, self.LAST_PUBLISHED) # Should not be able to click on Publish action -- but I don't know how to test that it is not clickable. # TODO: continue discussion with Muhammad and Jay about this. # Add a component to the page so it will have unpublished changes. add_discussion(unit) self._verify_publish_title(unit, self.DRAFT_STATUS) self._verify_last_published_and_saved(unit, self.LAST_PUBLISHED, self.LAST_SAVED) unit.publish_action.click() unit.wait_for_ajax() self._verify_publish_title(unit, self.PUBLISHED_LIVE_STATUS) self._verify_last_published_and_saved(unit, self.LAST_PUBLISHED, self.LAST_PUBLISHED) def test_discard_changes(self): """ Scenario: The publish title changes after "Discard Changes" is clicked Given I have a published unit with no unpublished changes When I go to the unit page in Studio Then the Discard Changes button is disabled And I add a component to the unit Then the title in the Publish information box is "Draft (Unpublished changes)" And the Discard Changes button is enabled And when I click the Discard Changes button Then the title in the Publish information box is "Published and Live" """ unit = self.go_to_unit_page() add_discussion(unit) self._verify_publish_title(unit, self.DRAFT_STATUS) unit.discard_changes() self._verify_publish_title(unit, self.PUBLISHED_LIVE_STATUS) def test_view_live_no_changes(self): """ Scenario: "View Live" shows published content in LMS Given I have a published unit with no unpublished changes When I go to the unit page in Studio Then the View Live button is enabled And when I click on the View Live button Then I see the published content in LMS """ unit = self.go_to_unit_page() self._view_published_version(unit) self._verify_components_visible(['html']) def test_view_live_changes(self): """ Scenario: "View Live" does not show draft content in LMS Given I have a published unit with no unpublished changes When I go to the unit page in Studio And when I add a component to the unit And when I click on the View Live button Then I see the published content in LMS And I do not see the unpublished component """ unit = self.go_to_unit_page() add_discussion(unit) self._view_published_version(unit) self._verify_components_visible(['html']) self.assertEqual(self.html_content, self.courseware.xblock_component_html_content(0)) def test_view_live_after_publish(self): """ Scenario: "View Live" shows newly published content Given I have a published unit with no unpublished changes When I go to the unit page in Studio And when I add a component to the unit And when I click the Publish button And when I click on the View Live button Then I see the newly published component """ unit = self.go_to_unit_page() add_discussion(unit) unit.publish_action.click() self._view_published_version(unit) self._verify_components_visible(['html', 'discussion']) def test_initially_unlocked_visible_to_students(self): """ Scenario: An unlocked unit with release date in the past is visible to students Given I have a published unlocked unit with release date in the past When I go to the unit page in Studio Then the unit has a warning that it is visible to students And it is marked as "RELEASED" with release date in the past visible And when I click on the View Live Button And when I view the course as a student Then I see the content in the unit """ unit = self.go_to_unit_page("Unlocked Section", "Unlocked Subsection", "Unlocked Unit") self._verify_publish_title(unit, self.PUBLISHED_LIVE_STATUS) self.assertTrue(unit.currently_visible_to_students) self._verify_release_date_info( unit, self.RELEASE_TITLE_RELEASED, self.past_start_date_text + '\n' + 'with Section "Unlocked Section"' ) self._view_published_version(unit) self._verify_student_view_visible(['problem']) def test_locked_visible_to_staff_only(self): """ Scenario: After locking a unit with release date in the past, it is only visible to staff Given I have a published unlocked unit with release date in the past When I go to the unit page in Studio And when I select "Hide from students" Then the unit does not have a warning that it is visible to students And the unit does not display inherited staff lock And when I click on the View Live Button Then I see the content in the unit when logged in as staff And when I view the course as a student Then I do not see any content in the unit """ unit = self.go_to_unit_page("Unlocked Section", "Unlocked Subsection", "Unlocked Unit") checked = unit.toggle_staff_lock() self.assertTrue(checked) self.assertFalse(unit.currently_visible_to_students) self.assertFalse(unit.shows_inherited_staff_lock()) self._verify_publish_title(unit, self.LOCKED_STATUS) self._view_published_version(unit) # Will initially be in staff view, locked component should be visible. self._verify_components_visible(['problem']) # Switch to student view and verify not visible self._verify_student_view_locked() def test_initially_locked_not_visible_to_students(self): """ Scenario: A locked unit with release date in the past is not visible to students Given I have a published locked unit with release date in the past When I go to the unit page in Studio Then the unit does not have a warning that it is visible to students And it is marked as "RELEASE" with release date in the past visible And when I click on the View Live Button And when I view the course as a student Then I do not see any content in the unit """ unit = self.go_to_unit_page("Section With Locked Unit", "Subsection With Locked Unit", "Locked Unit") self._verify_publish_title(unit, self.LOCKED_STATUS) self.assertFalse(unit.currently_visible_to_students) self._verify_release_date_info( unit, self.RELEASE_TITLE_RELEASE, self.past_start_date_text + '\n' + 'with Subsection "Subsection With Locked Unit"' ) self._view_published_version(unit) self._verify_student_view_locked() def test_unlocked_visible_to_all(self): """ Scenario: After unlocking a unit with release date in the past, it is visible to both students and staff Given I have a published unlocked unit with release date in the past When I go to the unit page in Studio And when I deselect "Hide from students" Then the unit does have a warning that it is visible to students And when I click on the View Live Button Then I see the content in the unit when logged in as staff And when I view the course as a student Then I see the content in the unit """ unit = self.go_to_unit_page("Section With Locked Unit", "Subsection With Locked Unit", "Locked Unit") checked = unit.toggle_staff_lock() self.assertFalse(checked) self._verify_publish_title(unit, self.PUBLISHED_LIVE_STATUS) self.assertTrue(unit.currently_visible_to_students) self._view_published_version(unit) # Will initially be in staff view, components always visible. self._verify_components_visible(['discussion']) # Switch to student view and verify visible. self._verify_student_view_visible(['discussion']) def test_explicit_lock_overrides_implicit_subsection_lock_information(self): """ Scenario: A unit's explicit staff lock hides its inherited subsection staff lock information Given I have a course with sections, subsections, and units And I have enabled explicit staff lock on a subsection When I visit the unit page Then the unit page shows its inherited staff lock And I enable explicit staff locking Then the unit page does not show its inherited staff lock And when I disable explicit staff locking Then the unit page now shows its inherited staff lock """ self.outline.visit() self.outline.expand_all_subsections() subsection = self.outline.section_at(0).subsection_at(0) unit = subsection.unit_at(0) subsection.set_staff_lock(True) unit_page = unit.go_to() self._verify_explicit_lock_overrides_implicit_lock_information(unit_page) def test_explicit_lock_overrides_implicit_section_lock_information(self): """ Scenario: A unit's explicit staff lock hides its inherited subsection staff lock information Given I have a course with sections, subsections, and units And I have enabled explicit staff lock on a section When I visit the unit page Then the unit page shows its inherited staff lock And I enable explicit staff locking Then the unit page does not show its inherited staff lock And when I disable explicit staff locking Then the unit page now shows its inherited staff lock """ self.outline.visit() self.outline.expand_all_subsections() section = self.outline.section_at(0) unit = section.subsection_at(0).unit_at(0) section.set_staff_lock(True) unit_page = unit.go_to() self._verify_explicit_lock_overrides_implicit_lock_information(unit_page) def test_published_unit_with_draft_child(self): """ Scenario: A published unit with a draft child can be published Given I have a published unit with no unpublished changes When I go to the unit page in Studio And edit the content of the only component Then the content changes And the title in the Publish information box is "Draft (Unpublished changes)" And when I click the Publish button Then the title in the Publish information box is "Published and Live" And when I click the View Live button Then I see the changed content in LMS """ modified_content = 'modified content' unit = self.go_to_unit_page() component = unit.xblocks[1] component.edit() HtmlComponentEditorView(self.browser, component.locator).set_content_and_save(modified_content) self.assertEqual(component.student_content, modified_content) self._verify_publish_title(unit, self.DRAFT_STATUS) unit.publish_action.click() unit.wait_for_ajax() self._verify_publish_title(unit, self.PUBLISHED_LIVE_STATUS) self._view_published_version(unit) self.assertTrue(modified_content in self.courseware.xblock_component_html_content(0)) def test_cancel_does_not_create_draft(self): """ Scenario: Editing a component and then canceling does not create a draft version (TNL-399) Given I have a published unit with no unpublished changes When I go to the unit page in Studio And edit the content of an HTML component and then press cancel Then the content does not change And the title in the Publish information box is "Published and Live" And when I reload the page Then the title in the Publish information box is "Published and Live" """ unit = self.go_to_unit_page() component = unit.xblocks[1] component.edit() HtmlComponentEditorView(self.browser, component.locator).set_content_and_cancel("modified content") self.assertEqual(component.student_content, "Body of HTML Unit.") self._verify_publish_title(unit, self.PUBLISHED_LIVE_STATUS) self.browser.refresh() unit.wait_for_page() self._verify_publish_title(unit, self.PUBLISHED_LIVE_STATUS) def test_delete_child_in_published_unit(self): """ Scenario: A published unit can be published again after deleting a child Given I have a published unit with no unpublished changes When I go to the unit page in Studio And delete the only component Then the title in the Publish information box is "Draft (Unpublished changes)" And when I click the Publish button Then the title in the Publish information box is "Published and Live" And when I click the View Live button Then I see an empty unit in LMS """ unit = self.go_to_unit_page() unit.delete(0) self._verify_publish_title(unit, self.DRAFT_STATUS) unit.publish_action.click() unit.wait_for_ajax() self._verify_publish_title(unit, self.PUBLISHED_LIVE_STATUS) self._view_published_version(unit) self.assertEqual(0, self.courseware.num_xblock_components) def test_published_not_live(self): """ Scenario: The publish title displays correctly for units that are not live Given I have a published unit with no unpublished changes that releases in the future When I go to the unit page in Studio Then the title in the Publish information box is "Published (not yet released)" And when I add a component to the unit Then the title in the Publish information box is "Draft (Unpublished changes)" And when I click the Publish button Then the title in the Publish information box is "Published (not yet released)" """ unit = self.go_to_unit_page('Unreleased Section', 'Unreleased Subsection', 'Unreleased Unit') self._verify_publish_title(unit, self.PUBLISHED_STATUS) add_discussion(unit) self._verify_publish_title(unit, self.DRAFT_STATUS) unit.publish_action.click() unit.wait_for_ajax() self._verify_publish_title(unit, self.PUBLISHED_STATUS) def _view_published_version(self, unit): """ Goes to the published version, then waits for the browser to load the page. """ unit.view_published_version() self.assertEqual(len(self.browser.window_handles), 2) self.courseware.wait_for_page() def _verify_and_return_staff_page(self): """ Verifies that the browser is on the staff page and returns a StaffPage. """ page = StaffPage(self.browser, self.course_id) EmptyPromise(page.is_browser_on_page, 'Browser is on staff page in LMS').fulfill() return page def _verify_student_view_locked(self): """ Verifies no component is visible when viewing as a student. """ self._verify_and_return_staff_page().set_staff_view_mode('Student') self.assertEqual(0, self.courseware.num_xblock_components) def _verify_student_view_visible(self, expected_components): """ Verifies expected components are visible when viewing as a student. """ self._verify_and_return_staff_page().set_staff_view_mode('Student') self._verify_components_visible(expected_components) def _verify_components_visible(self, expected_components): """ Verifies the expected components are visible (and there are no extras). """ self.assertEqual(len(expected_components), self.courseware.num_xblock_components) for index, component in enumerate(expected_components): self.assertEqual(component, self.courseware.xblock_component_type(index)) def _verify_release_date_info(self, unit, expected_title, expected_date): """ Verifies how the release date is displayed in the publishing sidebar. """ self.assertEqual(expected_title, unit.release_title) self.assertEqual(expected_date, unit.release_date) def _verify_publish_title(self, unit, expected_title): """ Waits for the publish title to change to the expected value. """ def wait_for_title_change(): return (unit.publish_title == expected_title, unit.publish_title) Promise(wait_for_title_change, "Publish title incorrect. Found '" + unit.publish_title + "'").fulfill() def _verify_last_published_and_saved(self, unit, expected_published_prefix, expected_saved_prefix): """ Verifies that last published and last saved messages respectively contain the given strings. """ self.assertTrue(expected_published_prefix in unit.last_published_text) self.assertTrue(expected_saved_prefix in unit.last_saved_text) def _verify_explicit_lock_overrides_implicit_lock_information(self, unit_page): """ Verifies that a unit with inherited staff lock does not display inherited information when explicitly locked. """ self.assertTrue(unit_page.shows_inherited_staff_lock()) unit_page.toggle_staff_lock(inherits_staff_lock=True) self.assertFalse(unit_page.shows_inherited_staff_lock()) unit_page.toggle_staff_lock(inherits_staff_lock=True) self.assertTrue(unit_page.shows_inherited_staff_lock()) # TODO: need to work with Jay/Christine to get testing of "Preview" working. # def test_preview(self): # unit = self.go_to_unit_page() # add_discussion(unit) # unit.preview() # self.assertEqual(2, self.courseware.num_xblock_components) # self.assertEqual('html', self.courseware.xblock_component_type(0)) # self.assertEqual('discussion', self.courseware.xblock_component_type(1)) @attr('shard_3') class DisplayNameTest(ContainerBase): """ Test consistent use of display_name_with_default """ def populate_course_fixture(self, course_fixture): """ Sets up a course structure with nested verticals. """ course_fixture.add_children( XBlockFixtureDesc('chapter', 'Test Section').add_children( XBlockFixtureDesc('sequential', 'Test Subsection').add_children( XBlockFixtureDesc('vertical', 'Test Unit').add_children( XBlockFixtureDesc('vertical', None) ) ) ) ) def test_display_name_default(self): """ Scenario: Given that an XBlock with a dynamic display name has been added to the course, When I view the unit page and note the display name of the block, Then I see the dynamically generated display name, And when I then go to the container page for that same block, Then I see the same generated display name. """ # Unfortunately no blocks in the core platform implement display_name_with_default # in an interesting way for this test, so we are just testing for consistency and not # the actual value. unit = self.go_to_unit_page() test_block = unit.xblocks[1] title_on_unit_page = test_block.name container = test_block.go_to_container() self.assertEqual(container.name, title_on_unit_page) @attr('shard_3') class ProblemCategoryTabsTest(ContainerBase): """ Test to verify tabs in problem category. """ def setUp(self, is_staff=True): super(ProblemCategoryTabsTest, self).setUp(is_staff=is_staff) def populate_course_fixture(self, course_fixture): """ Sets up course structure. """ course_fixture.add_children( XBlockFixtureDesc('chapter', 'Test Section').add_children( XBlockFixtureDesc('sequential', 'Test Subsection').add_children( XBlockFixtureDesc('vertical', 'Test Unit') ) ) ) def test_correct_tabs_present(self): """ Scenario: Verify that correct tabs are present in problem category. Given I am a staff user When I go to unit page Then I only see `Common Problem Types` and `Advanced` tabs in `problem` category """ self.go_to_unit_page() page = ContainerPage(self.browser, None) self.assertEqual(page.get_category_tab_names('problem'), ['Common Problem Types', 'Advanced']) def test_common_problem_types_tab(self): """ Scenario: Verify that correct components are present in Common Problem Types tab. Given I am a staff user When I go to unit page Then I see correct components under `Common Problem Types` tab in `problem` category """ self.go_to_unit_page() page = ContainerPage(self.browser, None) expected_components = [ "Blank Common Problem", "Checkboxes", "Dropdown", "Multiple Choice", "Numerical Input", "Text Input", "Checkboxes with Hints and Feedback", "Dropdown with Hints and Feedback", "Multiple Choice with Hints and Feedback", "Numerical Input with Hints and Feedback", "Text Input with Hints and Feedback", ] self.assertEqual(page.get_category_tab_components('problem', 1), expected_components)
agpl-3.0
sravangottapu/Ip_Scanner
ip_scanner.py
1
1187
import threading import time import os import re import shlex import _thread import sys import subprocess alive = True f = open("list.txt","w") class myThread(threading.Thread): def __init__(self,var,ip): threading.Thread.__init__(self) self.var = var self.ip = ip def run(self): if(alive): ping_ip(self.var,self.ip) #self._stop.set() print("Thread Exited") def ping_ip(cmd,ip): try: output = subprocess.check_output(cmd) f.write(ip) f.write("\n") print(ip + "Reachable") except: print(ip + "Not Reachable") first = input("Enter the first Ip") second = input("Enter the second Ip") first = int(first) second = int(second) ping = "ping " c1 = "-c1 " start = time.time() cmd_no_ip = ping + c1 t_end = time.time() + 2 for i in range(first,second): ip = "172.16.114."+str(i) cmd = cmd_no_ip + ip cmd = shlex.split(cmd) try: thread1 = myThread(cmd,ip) thread1.start() thread1.join(1) except: print("Not thread" + ip) end = time.time() end = end - start alive = False print("Total Time" + str(end)) sys.exit() quit()
gpl-3.0
mbj4668/pyang
pyang/plugins/threegpp.py
1
11115
"""3GPP usage guidelines plugin See 3GPP TS 32.160 clause 6.2 Copyright Ericsson 2020 Author balazs.lengyel@ericsson.com Revision 2020-11-25 Checks implemented 6.2.1.2 Module name starts with _3gpp- 6.2.1.3 namespace pattern urn:3gpp:sa5:<module-name> 6.2.1.4-a prefix ends with 3gpp 6.2.1.4-b prefix.length <= 10 char 6.2.1.5 yang 1.1 missing 6.2.1.5 yang 1.1 incorrect 6.2.1.6-a anydata 6.2.1.6-b anyxml 6.2.1.6-c rpc 6.2.1.6-d deviation 6.2.1.9 description not needed for enum, bit, choice, container, leaf-list, leaf, typedef, grouping, augment, uses 6.2.1.b-a module-description-missing 6.2.1.b-b module-organization-missing 6.2.1.b-c module-organization includes 3gpp 6.2.1.b-d module-contact-missing 6.2.1.b-d module-contact-incorrect 6.2.1.c module-reference-missing 6.2.1.c module-reference-incorrect 6.2.1.d-a module-revision-missing 6.2.1.d-a module-revision-reference-missing 6.2.1.e default meaning 6.2.1.f-a linelength > 80 6.2.1.f-b no-tabs 6.2.1.f-c no-strange-chars 6.2.1.f-d no-CR-chars 6.2-a no-containers """ import optparse import re import io import sys from pyang import plugin from pyang import statements from pyang import error from pyang.error import err_add from pyang.plugins import lint def pyang_plugin_init(): plugin.register_plugin(THREEGPPlugin()) class THREEGPPlugin(lint.LintPlugin): def __init__(self): lint.LintPlugin.__init__(self) self.modulename_prefixes = ['_3gpp'] def add_opts(self, optparser): optlist = [ optparse.make_option("--3gpp", dest="threegpp", action="store_true", help="Validate the module(s) according to " \ "3GPP rules."), ] optparser.add_options(optlist) def setup_ctx(self, ctx): if not ctx.opts.threegpp: return self._setup_ctx(ctx) error.add_error_code( '3GPP_BAD_NAMESPACE_VALUE', 3, '3GPP: the namespace should be urn:3gpp:sa5:%s') statements.add_validation_fun( 'grammar', ['namespace'], lambda ctx, s: self.v_chk_namespace(ctx, s)) error.add_error_code( '3GPP_BAD_PREFIX_VALUE', 3, '3GPP: the prefix should end with 3gpp') error.add_error_code( '3GPP_TOO_LONG_PREFIX', 3, '3GPP: the prefix should not be longer than 13 characters') statements.add_validation_fun( 'grammar', ['prefix'], lambda ctx, s: self.v_chk_prefix(ctx, s)) error.add_error_code( '3GPP_BAD_YANG_VERSION', 3, '3GPP: the yang-version should be 1.1') statements.add_validation_fun( 'grammar', ['yang-version'], lambda ctx, s: self.v_chk_yang_version(ctx, s)) # check that yang-version is present. If not, # it defaults to 1. which is bad for 3GPP statements.add_validation_fun( 'grammar', ['module'], lambda ctx, s: self.v_chk_yang_version_present(ctx, s)) error.add_error_code( '3GPP_STATEMENT_NOT_ALLOWED', 3, ('3GPP: YANG statements anydata, anyxml, deviation, rpc ' 'should not be used')) statements.add_validation_fun( 'grammar', ['anydata' , 'anyxml' , 'deviation' , 'rpc'], lambda ctx, s: self.v_chk_not_allowed_statements(ctx, s)) error.add_error_code( '3GPP_BAD_ORGANIZATION', 3, '3GPP: organization statement must include 3GPP') statements.add_validation_fun( 'grammar', ['organization'], lambda ctx, s: self.v_chk_organization(ctx, s)) error.add_error_code( '3GPP_BAD_CONTACT', 3, '3GPP: incorrect contact statement') statements.add_validation_fun( 'grammar', ['contact'], lambda ctx, s: self.v_chk_contact(ctx, s)) error.add_error_code( '3GPP_MISSING_MODULE_REFERENCE', 3, '3GPP: the module should have a reference substatement') statements.add_validation_fun( 'grammar', ['module'], lambda ctx, s: self.v_chk_module_reference_present(ctx, s)) error.add_error_code( '3GPP_BAD_MODULE_REFERENCE', 3, '3GPP: the module\'s reference substatement is incorrect') statements.add_validation_fun( 'grammar', ['reference'], lambda ctx, s: self.v_chk_module_reference(ctx, s)) error.add_error_code( '3GPP_TAB_IN_FILE', 3, '3GPP: tab characters should not be used in YANG modules') error.add_error_code( '3GPP_WHITESPACE_AT_END_OF_LINE', 3, '3GPP: extra whitespace should not be added at the end of the line') error.add_error_code( '3GPP_LONG_LINE', 3, '3GPP: line longer than 80 characters') error.add_error_code( '3GPP_CR_IN_FILE', 3, ('3GPP: Carriage-return characters should not be used. ' 'End-of-line should be just one LF character')) error.add_error_code( '3GPP_NON_ASCII', 4, '3GPP: the module should only use ASCII characters') statements.add_validation_fun( 'grammar', ['module'], lambda ctx, s: self.v_chk_3gpp_format(ctx, s)) error.add_error_code( '3GPP_LIMITED_CONTAINER_USE', 4, ('3GPP: containers should only be used to contain the attributes ' 'of a class')) statements.add_validation_fun( 'grammar', ['container'], lambda ctx, s: self.v_chk_limited_container_use(ctx, s)) def pre_validate_ctx(self, ctx, modules): if ctx.opts.threegpp: ctx.canonical = False return def v_chk_namespace(self, ctx, stmt): r = 'urn:3gpp:sa5:' + stmt.i_module.arg +'$' if re.match(r, stmt.arg) is None: err_add(ctx.errors, stmt.pos, '3GPP_BAD_NAMESPACE_VALUE', stmt.i_module.arg) def v_chk_prefix(self, ctx, stmt): if stmt.parent.keyword != 'module' : return r = '.+3gpp$' if re.match(r, stmt.arg) is None: err_add(ctx.errors, stmt.pos, '3GPP_BAD_PREFIX_VALUE',()) if len(stmt.arg) > 13 : err_add(ctx.errors, stmt.pos, '3GPP_TOO_LONG_PREFIX',()) def v_chk_yang_version_present(self, ctx, stmt): yang_version_present = False for stmt in stmt.substmts: if stmt.keyword == 'yang-version' : yang_version_present = True if not(yang_version_present) : err_add(ctx.errors, stmt.pos, '3GPP_BAD_YANG_VERSION',()) def v_chk_yang_version(self, ctx, stmt): r = '1.1' if re.match(r, stmt.arg) is None: err_add(ctx.errors, stmt.pos, '3GPP_BAD_YANG_VERSION',()) def v_chk_not_allowed_statements(self, ctx, stmt): err_add(ctx.errors, stmt.pos, '3GPP_STATEMENT_NOT_ALLOWED',()) def v_chk_organization(self, ctx, stmt): r = '3GPP' if re.search(r, stmt.arg, re.IGNORECASE) is None: err_add(ctx.errors, stmt.pos, '3GPP_BAD_ORGANIZATION',()) def v_chk_contact(self, ctx, stmt): if stmt.arg != ('https://www.3gpp.org/DynaReport/' 'TSG-WG--S5--officials.htm?Itemid=464'): err_add(ctx.errors, stmt.pos, '3GPP_BAD_CONTACT',()) def v_chk_module_reference_present(self, ctx, stmt): module_reference_present = False for stmt in stmt.substmts: if stmt.keyword == 'reference' : module_reference_present = True if not(module_reference_present) : err_add(ctx.errors, stmt.pos, '3GPP_MISSING_MODULE_REFERENCE',()) def v_chk_module_reference(self, ctx, stmt): if stmt.parent.keyword != 'module' : return if not(stmt.arg.startswith('3GPP TS ')) : err_add(ctx.errors, stmt.pos, '3GPP_BAD_MODULE_REFERENCE',()) def v_chk_3gpp_format(self, ctx, stmt): if (not(stmt.arg.startswith("_3gpp"))): return filename = stmt.pos.ref try: fd = io.open(filename, "r", encoding="utf-8", newline='') pos = error.Position(stmt.pos.ref) pos.top = stmt lineno = 0 for line in fd: lineno += 1 pos.line = lineno # no tabs if (line.find('\t') != -1 ): err_add(ctx.errors, pos, '3GPP_TAB_IN_FILE',()) # no whitespace after the line # removed for now as there are just too many of these # errors # if (re.search('.*\s+\n',line) != None ): # err_add(ctx.errors, self.pos, # '3GPP_WHITESPACE_AT_END_OF_LINE',()) # lines shorter then 80 char if (len(line) > 82 ): err_add(ctx.errors, pos, '3GPP_LONG_LINE',()) # EOL should be just NL no CR if (line.find('\r') != -1 ): err_add(ctx.errors, pos, '3GPP_CR_IN_FILE',()) # only us-ascii chars try: line.encode('ascii') except UnicodeEncodeError: err_add(ctx.errors, pos, '3GPP_NON_ASCII',()) except IOError as ex: sys.stderr.write("error %s: %s\n" % (filename, ex)) sys.exit(1) except UnicodeDecodeError as ex: s = str(ex).replace('utf-8', 'utf8') sys.stderr.write("%s: unicode error: %s\n" % (filename, s)) sys.exit(1) def v_chk_limited_container_use(self, ctx, stmt): if stmt.arg != 'attributes' or stmt.parent.keyword != 'list' : err_add(ctx.errors, stmt.pos, '3GPP_LIMITED_CONTAINER_USE',()) def post_validate_ctx(self, ctx, modules): if not ctx.opts.threegpp: return """Remove some lint errors that 3GPP considers acceptable""" for ctx_error in ctx.errors[:]: if ((ctx_error[1] == "LINT_MISSING_REQUIRED_SUBSTMT" or ctx_error[1] == "LINT_MISSING_RECOMMENDED_SUBSTMT") and ctx_error[2][2] == 'description' and (ctx_error[2][1] == 'enum' or ctx_error[2][1] == 'bit' or ctx_error[2][1] == 'choice' or ctx_error[2][1] == 'container' or ctx_error[2][1] == 'leaf-list' or ctx_error[2][1] == 'leaf' or ctx_error[2][1] == 'typedef' or ctx_error[2][1] == 'grouping' or ctx_error[2][1] == 'augment' or ctx_error[2][1] == 'uses')): # remove error from ctx ctx.errors.remove(ctx_error) return
isc
danfairs/django-modelmerge
testproject/testproject/wsgi.py
6
1144
""" WSGI config for testproject project. This module contains the WSGI application used by Django's development server and any production WSGI deployments. It should expose a module-level variable named ``application``. Django's ``runserver`` and ``runfcgi`` commands discover this application via the ``WSGI_APPLICATION`` setting. Usually you will have the standard Django WSGI application here, but it also might make sense to replace the whole Django WSGI application with a custom one that later delegates to the Django one. For example, you could introduce WSGI middleware here, or combine a Django application with an application of another framework. """ import os os.environ.setdefault("DJANGO_SETTINGS_MODULE", "testproject.settings") # This application object is used by any WSGI server configured to use this # file. This includes Django's development server, if the WSGI_APPLICATION # setting points here. from django.core.wsgi import get_wsgi_application application = get_wsgi_application() # Apply WSGI middleware here. # from helloworld.wsgi import HelloWorldApplication # application = HelloWorldApplication(application)
bsd-3-clause
burzillibus/RobHome
venv/lib/python2.7/site-packages/docutils/utils/math/tex2mathml_extern.py
16
5634
#!/usr/bin/env python # -*- coding: utf-8 -*- # :Id: $Id: tex2mathml_extern.py 7861 2015-04-10 23:48:51Z milde $ # :Copyright: © 2015 Günter Milde. # :License: Released under the terms of the `2-Clause BSD license`_, in short: # # Copying and distribution of this file, with or without modification, # are permitted in any medium without royalty provided the copyright # notice and this notice are preserved. # This file is offered as-is, without any warranty. # # .. _2-Clause BSD license: http://www.spdx.org/licenses/BSD-2-Clause # Wrappers for TeX->MathML conversion by external tools # ===================================================== import subprocess document_template = r"""\documentclass{article} \usepackage{amsmath} \begin{document} %s \end{document} """ def latexml(math_code, reporter=None): """Convert LaTeX math code to MathML with LaTeXML_ .. _LaTeXML: http://dlmf.nist.gov/LaTeXML/ """ p = subprocess.Popen(['latexml', '-', # read from stdin # '--preload=amsmath', '--inputencoding=utf8', ], stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE, close_fds=True) p.stdin.write((document_template % math_code).encode('utf8')) p.stdin.close() latexml_code = p.stdout.read() latexml_err = p.stderr.read().decode('utf8') if reporter and latexml_err.find('Error') >= 0 or not latexml_code: reporter.error(latexml_err) post_p = subprocess.Popen(['latexmlpost', '-', '--nonumbersections', '--format=xhtml', # '--linelength=78', # experimental '--' ], stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE, close_fds=True) post_p.stdin.write(latexml_code) post_p.stdin.close() result = post_p.stdout.read().decode('utf8') post_p_err = post_p.stderr.read().decode('utf8') if reporter and post_p_err.find('Error') >= 0 or not result: reporter.error(post_p_err) # extract MathML code: start,end = result.find('<math'), result.find('</math>')+7 result = result[start:end] if 'class="ltx_ERROR' in result: raise SyntaxError(result) return result def ttm(math_code, reporter=None): """Convert LaTeX math code to MathML with TtM_ .. _TtM: http://hutchinson.belmont.ma.us/tth/mml/ """ p = subprocess.Popen(['ttm', # '-i', # italic font for equations. Default roman. '-u', # unicode character encoding. (Default iso-8859-1). '-r', # output raw MathML (no preamble or postlude) ], stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE, close_fds=True) p.stdin.write((document_template % math_code).encode('utf8')) p.stdin.close() result = p.stdout.read() err = p.stderr.read().decode('utf8') if err.find('**** Unknown') >= 0: msg = '\n'.join([line for line in err.splitlines() if line.startswith('****')]) raise SyntaxError('\nMessage from external converter TtM:\n'+ msg) if reporter and err.find('**** Error') >= 0 or not result: reporter.error(err) start,end = result.find('<math'), result.find('</math>')+7 result = result[start:end] return result def blahtexml(math_code, inline=True, reporter=None): """Convert LaTeX math code to MathML with blahtexml_ .. _blahtexml: http://gva.noekeon.org/blahtexml/ """ options = ['--mathml', '--indented', '--spacing', 'moderate', '--mathml-encoding', 'raw', '--other-encoding', 'raw', '--doctype-xhtml+mathml', '--annotate-TeX', ] if inline: mathmode_arg = '' else: mathmode_arg = 'mode="display"' options.append('--displaymath') p = subprocess.Popen(['blahtexml']+options, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE, close_fds=True) p.stdin.write(math_code.encode('utf8')) p.stdin.close() result = p.stdout.read().decode('utf8') err = p.stderr.read().decode('utf8') print err if result.find('<error>') >= 0: raise SyntaxError('\nMessage from external converter blahtexml:\n' +result[result.find('<message>')+9:result.find('</message>')]) if reporter and (err.find('**** Error') >= 0 or not result): reporter.error(err) start,end = result.find('<markup>')+9, result.find('</markup>') result = ('<math xmlns="http://www.w3.org/1998/Math/MathML"%s>\n' '%s</math>\n') % (mathmode_arg, result[start:end]) return result # self-test if __name__ == "__main__": example = ur'\frac{\partial \sin^2(\alpha)}{\partial \vec r} \varpi \, \text{Grüße}' # print latexml(example).encode('utf8') # print ttm(example)#.encode('utf8') print blahtexml(example).encode('utf8')
mit
shrimpboyho/git.js
emscript/python/2.7.5.1_32bit/Lib/test/test_plistlib.py
79
7403
# Copyright (C) 2003 Python Software Foundation import unittest import plistlib import os import datetime from test import test_support # This test data was generated through Cocoa's NSDictionary class TESTDATA = """<?xml version="1.0" encoding="UTF-8"?> <!DOCTYPE plist PUBLIC "-//Apple//DTD PLIST 1.0//EN" \ "http://www.apple.com/DTDs/PropertyList-1.0.dtd"> <plist version="1.0"> <dict> <key>aDate</key> <date>2004-10-26T10:33:33Z</date> <key>aDict</key> <dict> <key>aFalseValue</key> <false/> <key>aTrueValue</key> <true/> <key>aUnicodeValue</key> <string>M\xc3\xa4ssig, Ma\xc3\x9f</string> <key>anotherString</key> <string>&lt;hello &amp; 'hi' there!&gt;</string> <key>deeperDict</key> <dict> <key>a</key> <integer>17</integer> <key>b</key> <real>32.5</real> <key>c</key> <array> <integer>1</integer> <integer>2</integer> <string>text</string> </array> </dict> </dict> <key>aFloat</key> <real>0.5</real> <key>aList</key> <array> <string>A</string> <string>B</string> <integer>12</integer> <real>32.5</real> <array> <integer>1</integer> <integer>2</integer> <integer>3</integer> </array> </array> <key>aString</key> <string>Doodah</string> <key>anInt</key> <integer>728</integer> <key>nestedData</key> <array> <data> PGxvdHMgb2YgYmluYXJ5IGd1bms+AAECAzxsb3RzIG9mIGJpbmFyeSBndW5r PgABAgM8bG90cyBvZiBiaW5hcnkgZ3Vuaz4AAQIDPGxvdHMgb2YgYmluYXJ5 IGd1bms+AAECAzxsb3RzIG9mIGJpbmFyeSBndW5rPgABAgM8bG90cyBvZiBi aW5hcnkgZ3Vuaz4AAQIDPGxvdHMgb2YgYmluYXJ5IGd1bms+AAECAzxsb3Rz IG9mIGJpbmFyeSBndW5rPgABAgM8bG90cyBvZiBiaW5hcnkgZ3Vuaz4AAQID PGxvdHMgb2YgYmluYXJ5IGd1bms+AAECAw== </data> </array> <key>someData</key> <data> PGJpbmFyeSBndW5rPg== </data> <key>someMoreData</key> <data> PGxvdHMgb2YgYmluYXJ5IGd1bms+AAECAzxsb3RzIG9mIGJpbmFyeSBndW5rPgABAgM8 bG90cyBvZiBiaW5hcnkgZ3Vuaz4AAQIDPGxvdHMgb2YgYmluYXJ5IGd1bms+AAECAzxs b3RzIG9mIGJpbmFyeSBndW5rPgABAgM8bG90cyBvZiBiaW5hcnkgZ3Vuaz4AAQIDPGxv dHMgb2YgYmluYXJ5IGd1bms+AAECAzxsb3RzIG9mIGJpbmFyeSBndW5rPgABAgM8bG90 cyBvZiBiaW5hcnkgZ3Vuaz4AAQIDPGxvdHMgb2YgYmluYXJ5IGd1bms+AAECAw== </data> <key>\xc3\x85benraa</key> <string>That was a unicode key.</string> </dict> </plist> """.replace(" " * 8, "\t") # Apple as well as plistlib.py output hard tabs class TestPlistlib(unittest.TestCase): def tearDown(self): try: os.unlink(test_support.TESTFN) except: pass def _create(self): pl = dict( aString="Doodah", aList=["A", "B", 12, 32.5, [1, 2, 3]], aFloat = 0.5, anInt = 728, aDict=dict( anotherString="<hello & 'hi' there!>", aUnicodeValue=u'M\xe4ssig, Ma\xdf', aTrueValue=True, aFalseValue=False, deeperDict=dict(a=17, b=32.5, c=[1, 2, "text"]), ), someData = plistlib.Data("<binary gunk>"), someMoreData = plistlib.Data("<lots of binary gunk>\0\1\2\3" * 10), nestedData = [plistlib.Data("<lots of binary gunk>\0\1\2\3" * 10)], aDate = datetime.datetime(2004, 10, 26, 10, 33, 33), ) pl[u'\xc5benraa'] = "That was a unicode key." return pl def test_create(self): pl = self._create() self.assertEqual(pl["aString"], "Doodah") self.assertEqual(pl["aDict"]["aFalseValue"], False) def test_io(self): pl = self._create() plistlib.writePlist(pl, test_support.TESTFN) pl2 = plistlib.readPlist(test_support.TESTFN) self.assertEqual(dict(pl), dict(pl2)) def test_string(self): pl = self._create() data = plistlib.writePlistToString(pl) pl2 = plistlib.readPlistFromString(data) self.assertEqual(dict(pl), dict(pl2)) data2 = plistlib.writePlistToString(pl2) self.assertEqual(data, data2) def test_indentation_array(self): data = [[[[[[[[{'test': plistlib.Data(b'aaaaaa')}]]]]]]]] self.assertEqual(plistlib.readPlistFromString(plistlib.writePlistToString(data)), data) def test_indentation_dict(self): data = {'1': {'2': {'3': {'4': {'5': {'6': {'7': {'8': {'9': plistlib.Data(b'aaaaaa')}}}}}}}}} self.assertEqual(plistlib.readPlistFromString(plistlib.writePlistToString(data)), data) def test_indentation_dict_mix(self): data = {'1': {'2': [{'3': [[[[[{'test': plistlib.Data(b'aaaaaa')}]]]]]}]}} self.assertEqual(plistlib.readPlistFromString(plistlib.writePlistToString(data)), data) def test_appleformatting(self): pl = plistlib.readPlistFromString(TESTDATA) data = plistlib.writePlistToString(pl) self.assertEqual(data, TESTDATA, "generated data was not identical to Apple's output") def test_appleformattingfromliteral(self): pl = self._create() pl2 = plistlib.readPlistFromString(TESTDATA) self.assertEqual(dict(pl), dict(pl2), "generated data was not identical to Apple's output") def test_stringio(self): from StringIO import StringIO f = StringIO() pl = self._create() plistlib.writePlist(pl, f) pl2 = plistlib.readPlist(StringIO(f.getvalue())) self.assertEqual(dict(pl), dict(pl2)) def test_cstringio(self): from cStringIO import StringIO f = StringIO() pl = self._create() plistlib.writePlist(pl, f) pl2 = plistlib.readPlist(StringIO(f.getvalue())) self.assertEqual(dict(pl), dict(pl2)) def test_controlcharacters(self): for i in range(128): c = chr(i) testString = "string containing %s" % c if i >= 32 or c in "\r\n\t": # \r, \n and \t are the only legal control chars in XML plistlib.writePlistToString(testString) else: self.assertRaises(ValueError, plistlib.writePlistToString, testString) def test_nondictroot(self): test1 = "abc" test2 = [1, 2, 3, "abc"] result1 = plistlib.readPlistFromString(plistlib.writePlistToString(test1)) result2 = plistlib.readPlistFromString(plistlib.writePlistToString(test2)) self.assertEqual(test1, result1) self.assertEqual(test2, result2) def test_main(): test_support.run_unittest(TestPlistlib) if __name__ == '__main__': test_main()
gpl-2.0
seibert/numba
numba/core/typeconv/castgraph.py
7
4075
from collections import defaultdict from functools import total_ordering import enum class Conversion(enum.IntEnum): """ A conversion kind from one type to the other. The enum members are ordered from stricter to looser. """ # The two types are identical exact = 1 # The two types are of the same kind, the destination type has more # extension or precision than the source type (e.g. float32 -> float64, # or int32 -> int64) promote = 2 # The source type can be converted to the destination type without loss # of information (e.g. int32 -> int64). Note that the conversion may # still fail explicitly at runtime (e.g. Optional(int32) -> int32) safe = 3 # The conversion may appear to succeed at runtime while losing information # or precision (e.g. int32 -> uint32, float64 -> float32, int64 -> int32, # etc.) unsafe = 4 # This value is only used internally nil = 99 class CastSet(object): """A set of casting rules. There is at most one rule per target type. """ def __init__(self): self._rels = {} def insert(self, to, rel): old = self.get(to) setrel = min(rel, old) self._rels[to] = setrel return old != setrel def items(self): return self._rels.items() def get(self, item): return self._rels.get(item, Conversion.nil) def __len__(self): return len(self._rels) def __repr__(self): body = ["{rel}({ty})".format(rel=rel, ty=ty) for ty, rel in self._rels.items()] return "{" + ', '.join(body) + "}" def __contains__(self, item): return item in self._rels def __iter__(self): return iter(self._rels.keys()) def __getitem__(self, item): return self._rels[item] class TypeGraph(object): """A graph that maintains the casting relationship of all types. This simplifies the definition of casting rules by automatically propagating the rules. """ def __init__(self, callback=None): """ Args ---- - callback: callable or None It is called for each new casting rule with (from_type, to_type, castrel). """ assert callback is None or callable(callback) self._forwards = defaultdict(CastSet) self._backwards = defaultdict(set) self._callback = callback def get(self, ty): return self._forwards[ty] def propagate(self, a, b, baserel): backset = self._backwards[a] # Forward propagate the relationship to all nodes that b leads to for child in self._forwards[b]: rel = max(baserel, self._forwards[b][child]) if a != child: if self._forwards[a].insert(child, rel): self._callback(a, child, rel) self._backwards[child].add(a) # Propagate the relationship from nodes that connects to a for backnode in backset: if backnode != child: backrel = max(rel, self._forwards[backnode][a]) if self._forwards[backnode].insert(child, backrel): self._callback(backnode, child, backrel) self._backwards[child].add(backnode) # Every node that leads to a connects to b for child in self._backwards[a]: rel = max(baserel, self._forwards[child][a]) if b != child: if self._forwards[child].insert(b, rel): self._callback(child, b, rel) self._backwards[b].add(child) def insert_rule(self, a, b, rel): self._forwards[a].insert(b, rel) self._callback(a, b, rel) self._backwards[b].add(a) self.propagate(a, b, rel) def promote(self, a, b): self.insert_rule(a, b, Conversion.promote) def safe(self, a, b): self.insert_rule(a, b, Conversion.safe) def unsafe(self, a, b): self.insert_rule(a, b, Conversion.unsafe)
bsd-2-clause
silvio/elbe
elbepack/xmldefaults.py
1
4019
import random import string import sys armel_defaults = { "arch": "armel", "size": "20G", "mem": "256", "interpreter": "qemu-system-arm", "userinterpr": "qemu-arm-static", "console": "ttyAMA0,115200n1", "machine": "versatilepb", "nicmodel": "smc91c111" } armel_virtio_defaults = { "arch": "armel", "size": "20G", "mem": "256", "interpreter": "qemu-system-arm-virtio", "userinterpr": "qemu-arm-static", "console": "ttyAMA0,115200n1", "machine": "versatilepb", "nicmodel": "smc91c111" } armhf_defaults = { "arch": "armhf", "size": "20G", "mem": "256", "interpreter": "qemu-system-arm", "userinterpr": "qemu-arm-static", "console": "ttyAMA0,115200n1", "machine": "versatilepb -cpu cortex-a9", "nicmodel": "smc91c111" } armhf_virtio_defaults = { "arch": "armhf", "size": "20G", "mem": "256", "interpreter": "qemu-system-arm-virtio", "userinterpr": "qemu-arm-static", "console": "ttyAMA0,115200n1", "machine": "versatilepb -cpu cortex-a9", "nicmodel": "virtio" } ppc_defaults = { "arch": "powerpc", "size": "20G", "mem": "256", "interpreter": "qemu-system-ppc", "userinterpr": "qemu-ppc-static", "console": "ttyPZ0,115200n1", "machine": "mac99", "nicmodel": "rtl8139" } amd64_defaults = { "arch": "amd64", "size": "20G", "mem": "1024", "interpreter": "kvm", "console": "ttyS0,115200n1", "machine": "pc", "nicmodel": "virtio" } i386_defaults = { "arch": "i386", "size": "20G", "mem": "1024", "interpreter": "kvm", "console": "ttyS0,115200n1", "machine": "pc", "nicmodel": "virtio" } defaults = { "armel": armel_defaults, "armel-virtio": armel_virtio_defaults, "armhf": armhf_defaults, "armhf-virtio": armhf_virtio_defaults, "ppc": ppc_defaults, "amd64": amd64_defaults, "i386": i386_defaults, "nodefaults": {} } xml_field_path = { "arch": "project/buildimage/arch", "size": "project/buildimage/size", "mem": "project/buildimage/mem", "interpreter": "project/buildimage/interpreter", "console": "project/buildimage/console", "machine": "project/buildimage/machine", "nicmodel": "project/buildimage/NIC/model" } def get_random_mac(): binaddr = [random.randint(0,256) for i in range(6) ] binaddr[0] &= 0xfe binaddr[0] |= 0x02 s = map( lambda x: "%02x" % x, binaddr ) return string.join( s, ":" ) class ElbeDefaults(object): def __init__(self, build_type): if not defaults.has_key(build_type): print "Please specify a valid buildtype." print "Valid buildtypes:" print defaults.keys() sys.exit(20) self.defaults = defaults[build_type] self.defaults["nicmac"] = get_random_mac() def __getitem__( self, key ): if self.defaults.has_key( key ): return self.defaults[key] print "No Default value has been Provided" print "Either use a valid buildtype, or provide the field in the xml File." print "The location in the xml is here:" print xml_field_path[key] sys.exit(20)
gpl-3.0
malayaleecoder/servo
tests/wpt/css-tests/tools/manifest/manifest.py
89
12720
import json import os from collections import defaultdict from item import item_types, ManualTest, WebdriverSpecTest, Stub, RefTest, TestharnessTest from log import get_logger from sourcefile import SourceFile from utils import from_os_path, to_os_path CURRENT_VERSION = 2 class ManifestError(Exception): pass class ManifestVersionMismatch(ManifestError): pass class Manifest(object): def __init__(self, git_rev=None, url_base="/"): # Dict of item_type: {path: set(manifest_items)} self._data = dict((item_type, defaultdict(set)) for item_type in item_types) self.rev = git_rev self.url_base = url_base self.local_changes = LocalChanges(self) # reftest nodes arranged as {path: set(manifest_items)} self.reftest_nodes = defaultdict(set) self.reftest_nodes_by_url = {} def _included_items(self, include_types=None): if include_types is None: include_types = item_types for item_type in include_types: paths = self._data[item_type].copy() for local_types, local_paths in self.local_changes.itertypes(item_type): for path, items in local_paths.iteritems(): paths[path] = items for path in self.local_changes.iterdeleted(): if path in paths: del paths[path] yield item_type, paths def contains_path(self, path): return any(path in paths for _, paths in self._included_items()) def add(self, item): if item is None: return is_reference = False if isinstance(item, RefTest): self.reftest_nodes[item.path].add(item) self.reftest_nodes_by_url[item.url] = item is_reference = item.is_reference if not is_reference: self._add(item) item.manifest = self def _add(self, item): self._data[item.item_type][item.path].add(item) def extend(self, items): for item in items: self.add(item) def remove_path(self, path): for item_type in item_types: if path in self._data[item_type]: del self._data[item_type][path] def itertypes(self, *types): if not types: types = None for item_type, items in self._included_items(types): for item in sorted(items.items()): yield item def __iter__(self): for item in self.itertypes(): yield item def __getitem__(self, path): for _, paths in self._included_items(): if path in paths: return paths[path] raise KeyError def get_reference(self, url): if url in self.local_changes.reftest_nodes_by_url: return self.local_changes.reftest_nodes_by_url[url] if url in self.reftest_nodes_by_url: return self.reftest_nodes_by_url[url] return None def _committed_with_path(self, rel_path): rv = set() for paths_items in self._data.itervalues(): rv |= paths_items.get(rel_path, set()) if rel_path in self.reftest_nodes: rv |= self.reftest_nodes[rel_path] return rv def _committed_paths(self): rv = set() for paths_items in self._data.itervalues(): rv |= set(paths_items.keys()) return rv def update(self, tests_root, url_base, new_rev, committed_changes=None, local_changes=None, remove_missing_local=False): if local_changes is None: local_changes = {} if committed_changes is not None: for rel_path, status in committed_changes: self.remove_path(rel_path) if status == "modified": use_committed = rel_path in local_changes source_file = SourceFile(tests_root, rel_path, url_base, use_committed=use_committed) self.extend(source_file.manifest_items()) self.local_changes = LocalChanges(self) local_paths = set() for rel_path, status in local_changes.iteritems(): local_paths.add(rel_path) if status == "modified": existing_items = self._committed_with_path(rel_path) source_file = SourceFile(tests_root, rel_path, url_base, use_committed=False) local_items = set(source_file.manifest_items()) updated_items = local_items - existing_items self.local_changes.extend(updated_items) else: self.local_changes.add_deleted(rel_path) if remove_missing_local: for path in self._committed_paths() - local_paths: self.local_changes.add_deleted(path) self.update_reftests() if new_rev is not None: self.rev = new_rev self.url_base = url_base def update_reftests(self): reftest_nodes = self.reftest_nodes.copy() for path, items in self.local_changes.reftest_nodes.iteritems(): reftest_nodes[path] |= items #TODO: remove locally deleted files tests = set() for items in reftest_nodes.values(): tests |= set(item for item in items if not item.is_reference) has_inbound = set() for path, items in reftest_nodes.iteritems(): for item in items: for ref_url, ref_type in item.references: has_inbound.add(ref_url) if self.local_changes.reftest_nodes: target = self.local_changes else: target = self #TODO: Warn if there exist unreachable reftest nodes for path, items in reftest_nodes.iteritems(): for item in items: if item.url in has_inbound: continue target._data["reftest"][path].add(item) def to_json(self): out_items = { item_type: sorted( test.to_json() for _, tests in items.iteritems() for test in tests ) for item_type, items in self._data.iteritems() } reftest_nodes = {from_os_path(key): [v.to_json() for v in value] for key, value in self.reftest_nodes.iteritems()} rv = {"url_base": self.url_base, "rev": self.rev, "local_changes": self.local_changes.to_json(), "items": out_items, "reftest_nodes": reftest_nodes, "version": CURRENT_VERSION} return rv @classmethod def from_json(cls, tests_root, obj): version = obj.get("version") if version != CURRENT_VERSION: raise ManifestVersionMismatch self = cls(git_rev=obj["rev"], url_base=obj.get("url_base", "/")) if not hasattr(obj, "iteritems"): raise ManifestError item_classes = {"testharness": TestharnessTest, "reftest": RefTest, "manual": ManualTest, "stub": Stub, "wdspec": WebdriverSpecTest} source_files = {} for k, values in obj["items"].iteritems(): if k not in item_types: raise ManifestError for v in values: manifest_item = item_classes[k].from_json(self, tests_root, v, source_files=source_files) self._add(manifest_item) for path, values in obj["reftest_nodes"].iteritems(): path = to_os_path(path) for v in values: item = RefTest.from_json(self, tests_root, v, source_files=source_files) self.reftest_nodes[path].add(item) self.reftest_nodes_by_url[v["url"]] = item self.local_changes = LocalChanges.from_json(self, tests_root, obj["local_changes"], source_files=source_files) return self class LocalChanges(object): def __init__(self, manifest): self.manifest = manifest self._data = dict((item_type, defaultdict(set)) for item_type in item_types) self._deleted = set() self.reftest_nodes = defaultdict(set) self.reftest_nodes_by_url = {} def add(self, item): if item is None: return is_reference = False if isinstance(item, RefTest): self.reftest_nodes[item.path].add(item) self.reftest_nodes_by_url[item.url] = item is_reference = item.is_reference if not is_reference: self._add(item) item.manifest = self.manifest def _add(self, item): self._data[item.item_type][item.path].add(item) def extend(self, items): for item in items: self.add(item) def add_deleted(self, path): self._deleted.add(path) def is_deleted(self, path): return path in self._deleted def itertypes(self, *types): for item_type in types: yield item_type, self._data[item_type] def iterdeleted(self): for item in self._deleted: yield item def __getitem__(self, item_type): return self._data[item_type] def to_json(self): reftest_nodes = {from_os_path(key): [v.to_json() for v in value] for key, value in self.reftest_nodes.iteritems()} rv = {"items": defaultdict(dict), "reftest_nodes": reftest_nodes, "deleted": [from_os_path(path) for path in self._deleted]} for test_type, paths in self._data.iteritems(): for path, tests in paths.iteritems(): path = from_os_path(path) rv["items"][test_type][path] = [test.to_json() for test in tests] return rv @classmethod def from_json(cls, manifest, tests_root, obj, source_files=None): self = cls(manifest) if not hasattr(obj, "iteritems"): raise ManifestError item_classes = {"testharness": TestharnessTest, "reftest": RefTest, "manual": ManualTest, "stub": Stub, "wdspec": WebdriverSpecTest} for test_type, paths in obj["items"].iteritems(): for path, tests in paths.iteritems(): for test in tests: manifest_item = item_classes[test_type].from_json(manifest, tests_root, test, source_files=source_files) self.add(manifest_item) for path, values in obj["reftest_nodes"].iteritems(): path = to_os_path(path) for v in values: item = RefTest.from_json(self.manifest, tests_root, v, source_files=source_files) self.reftest_nodes[path].add(item) self.reftest_nodes_by_url[item.url] = item for item in obj["deleted"]: self.add_deleted(to_os_path(item)) return self def load(tests_root, manifest): logger = get_logger() # "manifest" is a path or file-like object. if isinstance(manifest, basestring): if os.path.exists(manifest): logger.debug("Opening manifest at %s" % manifest) else: logger.debug("Creating new manifest at %s" % manifest) try: with open(manifest) as f: rv = Manifest.from_json(tests_root, json.load(f)) except IOError: rv = Manifest(None) return rv return Manifest.from_json(tests_root, json.load(manifest)) def write(manifest, manifest_path): with open(manifest_path, "wb") as f: json.dump(manifest.to_json(), f, sort_keys=True, indent=2, separators=(',', ': ')) f.write("\n")
mpl-2.0
FreekingDean/home-assistant
homeassistant/components/switch/hook.py
12
4951
""" Support Hook, available at hooksmarthome.com. For more details about this platform, please refer to the documentation at https://home-assistant.io/components/switch.hook/ """ import logging import asyncio import voluptuous as vol import async_timeout import aiohttp from homeassistant.components.switch import (SwitchDevice, PLATFORM_SCHEMA) from homeassistant.const import CONF_PASSWORD, CONF_USERNAME from homeassistant.helpers.aiohttp_client import async_get_clientsession import homeassistant.helpers.config_validation as cv _LOGGER = logging.getLogger(__name__) HOOK_ENDPOINT = 'https://api.gethook.io/v1/' TIMEOUT = 10 PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({ vol.Required(CONF_USERNAME): cv.string, vol.Required(CONF_PASSWORD): cv.string, }) @asyncio.coroutine def async_setup_platform(hass, config, async_add_devices, discovery_info=None): """Set up Hook by getting the access token and list of actions.""" username = config.get(CONF_USERNAME) password = config.get(CONF_PASSWORD) websession = async_get_clientsession(hass) response = None try: with async_timeout.timeout(TIMEOUT, loop=hass.loop): response = yield from websession.post( '{}{}'.format(HOOK_ENDPOINT, 'user/login'), data={ 'username': username, 'password': password}) data = yield from response.json() except (asyncio.TimeoutError, aiohttp.errors.ClientError, aiohttp.errors.ClientDisconnectedError) as error: _LOGGER.error("Failed authentication API call: %s", error) return False finally: if response is not None: yield from response.release() try: token = data['data']['token'] except KeyError: _LOGGER.error("No token. Check username and password") return False response = None try: with async_timeout.timeout(TIMEOUT, loop=hass.loop): response = yield from websession.get( '{}{}'.format(HOOK_ENDPOINT, 'device'), params={"token": data['data']['token']}) data = yield from response.json() except (asyncio.TimeoutError, aiohttp.errors.ClientError, aiohttp.errors.ClientDisconnectedError) as error: _LOGGER.error("Failed getting devices: %s", error) return False finally: if response is not None: yield from response.release() yield from async_add_devices( HookSmartHome( hass, token, d['device_id'], d['device_name']) for lst in data['data'] for d in lst) class HookSmartHome(SwitchDevice): """Representation of a Hook device, allowing on and off commands.""" def __init__(self, hass, token, device_id, device_name): """Initialize the switch.""" self.hass = hass self._token = token self._state = False self._id = device_id self._name = device_name _LOGGER.debug( "Creating Hook object: ID: %s Name: %s", self._id, self._name) @property def name(self): """Return the name of the switch.""" return self._name @property def is_on(self): """Return true if device is on.""" return self._state @asyncio.coroutine def _send(self, url): """Send the url to the Hook API.""" response = None try: _LOGGER.debug("Sending: %s", url) websession = async_get_clientsession(self.hass) with async_timeout.timeout(TIMEOUT, loop=self.hass.loop): response = yield from websession.get( url, params={"token": self._token}) data = yield from response.json() except (asyncio.TimeoutError, aiohttp.errors.ClientError, aiohttp.errors.ClientDisconnectedError) as error: _LOGGER.error("Failed setting state: %s", error) return False finally: if response is not None: yield from response.release() _LOGGER.debug("Got: %s", data) return data['return_value'] == '1' @asyncio.coroutine def async_turn_on(self): """Turn the device on asynchronously.""" _LOGGER.debug("Turning on: %s", self._name) url = '{}{}{}{}'.format( HOOK_ENDPOINT, 'device/trigger/', self._id, '/On') success = yield from self._send(url) self._state = success @asyncio.coroutine def async_turn_off(self): """Turn the device off asynchronously.""" _LOGGER.debug("Turning off: %s", self._name) url = '{}{}{}{}'.format( HOOK_ENDPOINT, 'device/trigger/', self._id, '/Off') success = yield from self._send(url) # If it wasn't successful, keep state as true self._state = not success
mit
bramalingam/bioformats
components/xsd-fu/python/configobj.py
10
85768
# configobj.py # A config file reader/writer that supports nested sections in config files. # Copyright (C) 2005-2009 Michael Foord, Nicola Larosa # E-mail: fuzzyman AT voidspace DOT org DOT uk # nico AT tekNico DOT net # ConfigObj 4 # http://www.voidspace.org.uk/python/configobj.html # Released subject to the BSD License # Please see http://www.voidspace.org.uk/python/license.shtml # Scripts maintained at http://www.voidspace.org.uk/python/index.shtml # For information about bugfixes, updates and support, please join the # ConfigObj mailing list: # http://lists.sourceforge.net/lists/listinfo/configobj-develop # Comments, suggestions and bug reports welcome. from __future__ import generators import sys import os import re compiler = None try: import compiler except ImportError: # for IronPython pass try: from codecs import BOM_UTF8, BOM_UTF16, BOM_UTF16_BE, BOM_UTF16_LE except ImportError: # Python 2.2 does not have these # UTF-8 BOM_UTF8 = '\xef\xbb\xbf' # UTF-16, little endian BOM_UTF16_LE = '\xff\xfe' # UTF-16, big endian BOM_UTF16_BE = '\xfe\xff' if sys.byteorder == 'little': # UTF-16, native endianness BOM_UTF16 = BOM_UTF16_LE else: # UTF-16, native endianness BOM_UTF16 = BOM_UTF16_BE # A dictionary mapping BOM to # the encoding to decode with, and what to set the # encoding attribute to. BOMS = { BOM_UTF8: ('utf_8', None), BOM_UTF16_BE: ('utf16_be', 'utf_16'), BOM_UTF16_LE: ('utf16_le', 'utf_16'), BOM_UTF16: ('utf_16', 'utf_16'), } # All legal variants of the BOM codecs. # TODO: the list of aliases is not meant to be exhaustive, is there a # better way ? BOM_LIST = { 'utf_16': 'utf_16', 'u16': 'utf_16', 'utf16': 'utf_16', 'utf-16': 'utf_16', 'utf16_be': 'utf16_be', 'utf_16_be': 'utf16_be', 'utf-16be': 'utf16_be', 'utf16_le': 'utf16_le', 'utf_16_le': 'utf16_le', 'utf-16le': 'utf16_le', 'utf_8': 'utf_8', 'u8': 'utf_8', 'utf': 'utf_8', 'utf8': 'utf_8', 'utf-8': 'utf_8', } # Map of encodings to the BOM to write. BOM_SET = { 'utf_8': BOM_UTF8, 'utf_16': BOM_UTF16, 'utf16_be': BOM_UTF16_BE, 'utf16_le': BOM_UTF16_LE, None: BOM_UTF8 } def match_utf8(encoding): return BOM_LIST.get(encoding.lower()) == 'utf_8' # Quote strings used for writing values squot = "'%s'" dquot = '"%s"' noquot = "%s" wspace_plus = ' \r\n\v\t\'"' tsquot = '"""%s"""' tdquot = "'''%s'''" try: enumerate except NameError: def enumerate(obj): """enumerate for Python 2.2.""" i = -1 for item in obj: i += 1 yield i, item # Sentinel for use in getattr calls to replace hasattr MISSING = object() __version__ = '4.6.0' __revision__ = '$Id: configobj.py 156 2006-01-31 14:57:08Z fuzzyman $' __docformat__ = "restructuredtext en" __all__ = ( '__version__', 'DEFAULT_INDENT_TYPE', 'DEFAULT_INTERPOLATION', 'ConfigObjError', 'NestingError', 'ParseError', 'DuplicateError', 'ConfigspecError', 'ConfigObj', 'SimpleVal', 'InterpolationError', 'InterpolationLoopError', 'MissingInterpolationOption', 'RepeatSectionError', 'ReloadError', 'UnreprError', 'UnknownType', '__docformat__', 'flatten_errors', ) DEFAULT_INTERPOLATION = 'configparser' DEFAULT_INDENT_TYPE = ' ' MAX_INTERPOL_DEPTH = 10 OPTION_DEFAULTS = { 'interpolation': True, 'raise_errors': False, 'list_values': True, 'create_empty': False, 'file_error': False, 'configspec': None, 'stringify': True, # option may be set to one of ('', ' ', '\t') 'indent_type': None, 'encoding': None, 'default_encoding': None, 'unrepr': False, 'write_empty_values': False, } def getObj(s): s = "a=" + s if compiler is None: raise ImportError('compiler module not available') p = compiler.parse(s) return p.getChildren()[1].getChildren()[0].getChildren()[1] class UnknownType(Exception): pass class Builder(object): def build(self, o): m = getattr(self, 'build_' + o.__class__.__name__, None) if m is None: raise UnknownType(o.__class__.__name__) return m(o) def build_List(self, o): return map(self.build, o.getChildren()) def build_Const(self, o): return o.value def build_Dict(self, o): d = {} i = iter(map(self.build, o.getChildren())) for el in i: d[el] = i.next() return d def build_Tuple(self, o): return tuple(self.build_List(o)) def build_Name(self, o): if o.name == 'None': return None if o.name == 'True': return True if o.name == 'False': return False # An undefined Name raise UnknownType('Undefined Name') def build_Add(self, o): real, imag = map(self.build_Const, o.getChildren()) try: real = float(real) except TypeError: raise UnknownType('Add') if not isinstance(imag, complex) or imag.real != 0.0: raise UnknownType('Add') return real+imag def build_Getattr(self, o): parent = self.build(o.expr) return getattr(parent, o.attrname) def build_UnarySub(self, o): return -self.build_Const(o.getChildren()[0]) def build_UnaryAdd(self, o): return self.build_Const(o.getChildren()[0]) _builder = Builder() def unrepr(s): if not s: return s return _builder.build(getObj(s)) class ConfigObjError(SyntaxError): """ This is the base class for all errors that ConfigObj raises. It is a subclass of SyntaxError. """ def __init__(self, message='', line_number=None, line=''): self.line = line self.line_number = line_number SyntaxError.__init__(self, message) class NestingError(ConfigObjError): """ This error indicates a level of nesting that doesn't match. """ class ParseError(ConfigObjError): """ This error indicates that a line is badly written. It is neither a valid ``key = value`` line, nor a valid section marker line. """ class ReloadError(IOError): """ A 'reload' operation failed. This exception is a subclass of ``IOError``. """ def __init__(self): IOError.__init__(self, 'reload failed, filename is not set.') class DuplicateError(ConfigObjError): """ The keyword or section specified already exists. """ class ConfigspecError(ConfigObjError): """ An error occured whilst parsing a configspec. """ class InterpolationError(ConfigObjError): """Base class for the two interpolation errors.""" class InterpolationLoopError(InterpolationError): """Maximum interpolation depth exceeded in string interpolation.""" def __init__(self, option): InterpolationError.__init__( self, 'interpolation loop detected in value "%s".' % option) class RepeatSectionError(ConfigObjError): """ This error indicates additional sections in a section with a ``__many__`` (repeated) section. """ class MissingInterpolationOption(InterpolationError): """A value specified for interpolation was missing.""" def __init__(self, option): InterpolationError.__init__( self, 'missing option "%s" in interpolation.' % option) class UnreprError(ConfigObjError): """An error parsing in unrepr mode.""" class InterpolationEngine(object): """ A helper class to help perform string interpolation. This class is an abstract base class; its descendants perform the actual work. """ # compiled regexp to use in self.interpolate() _KEYCRE = re.compile(r"%\(([^)]*)\)s") def __init__(self, section): # the Section instance that "owns" this engine self.section = section def interpolate(self, key, value): def recursive_interpolate(key, value, section, backtrail): """The function that does the actual work. ``value``: the string we're trying to interpolate. ``section``: the section in which that string was found ``backtrail``: a dict to keep track of where we've been, to detect and prevent infinite recursion loops This is similar to a depth-first-search algorithm. """ # Have we been here already? if (key, section.name) in backtrail: # Yes - infinite loop detected raise InterpolationLoopError(key) # Place a marker on our backtrail so we won't come back here again backtrail[(key, section.name)] = 1 # Now start the actual work match = self._KEYCRE.search(value) while match: # The actual parsing of the match is implementation-dependent, # so delegate to our helper function k, v, s = self._parse_match(match) if k is None: # That's the signal that no further interpolation is needed replacement = v else: # Further interpolation may be needed to obtain final value replacement = recursive_interpolate(k, v, s, backtrail) # Replace the matched string with its final value start, end = match.span() value = ''.join((value[:start], replacement, value[end:])) new_search_start = start + len(replacement) # Pick up the next interpolation key, if any, for next time # through the while loop match = self._KEYCRE.search(value, new_search_start) # Now safe to come back here again; remove marker from backtrail del backtrail[(key, section.name)] return value # Back in interpolate(), all we have to do is kick off the recursive # function with appropriate starting values value = recursive_interpolate(key, value, self.section, {}) return value def _fetch(self, key): """Helper function to fetch values from owning section. Returns a 2-tuple: the value, and the section where it was found. """ # switch off interpolation before we try and fetch anything ! save_interp = self.section.main.interpolation self.section.main.interpolation = False # Start at section that "owns" this InterpolationEngine current_section = self.section while True: # try the current section first val = current_section.get(key) if val is not None: break # try "DEFAULT" next val = current_section.get('DEFAULT', {}).get(key) if val is not None: break # move up to parent and try again # top-level's parent is itself if current_section.parent is current_section: # reached top level, time to give up break current_section = current_section.parent # restore interpolation to previous value before returning self.section.main.interpolation = save_interp if val is None: raise MissingInterpolationOption(key) return val, current_section def _parse_match(self, match): """Implementation-dependent helper function. Will be passed a match object corresponding to the interpolation key we just found (e.g., "%(foo)s" or "$foo"). Should look up that key in the appropriate config file section (using the ``_fetch()`` helper function) and return a 3-tuple: (key, value, section) ``key`` is the name of the key we're looking for ``value`` is the value found for that key ``section`` is a reference to the section where it was found ``key`` and ``section`` should be None if no further interpolation should be performed on the resulting value (e.g., if we interpolated "$$" and returned "$"). """ raise NotImplementedError() class ConfigParserInterpolation(InterpolationEngine): """Behaves like ConfigParser.""" _KEYCRE = re.compile(r"%\(([^)]*)\)s") def _parse_match(self, match): key = match.group(1) value, section = self._fetch(key) return key, value, section class TemplateInterpolation(InterpolationEngine): """Behaves like string.Template.""" _delimiter = '$' _KEYCRE = re.compile(r""" \$(?: (?P<escaped>\$) | # Two $ signs (?P<named>[_a-z][_a-z0-9]*) | # $name format {(?P<braced>[^}]*)} # ${name} format ) """, re.IGNORECASE | re.VERBOSE) def _parse_match(self, match): # Valid name (in or out of braces): fetch value from section key = match.group('named') or match.group('braced') if key is not None: value, section = self._fetch(key) return key, value, section # Escaped delimiter (e.g., $$): return single delimiter if match.group('escaped') is not None: # Return None for key and section to indicate it's time to stop return None, self._delimiter, None # Anything else: ignore completely, just return it unchanged return None, match.group(), None interpolation_engines = { 'configparser': ConfigParserInterpolation, 'template': TemplateInterpolation, } def __newobj__(cls, *args): # Hack for pickle return cls.__new__(cls, *args) class Section(dict): """ A dictionary-like object that represents a section in a config file. It does string interpolation if the 'interpolation' attribute of the 'main' object is set to True. Interpolation is tried first from this object, then from the 'DEFAULT' section of this object, next from the parent and its 'DEFAULT' section, and so on until the main object is reached. A Section will behave like an ordered dictionary - following the order of the ``scalars`` and ``sections`` attributes. You can use this to change the order of members. Iteration follows the order: scalars, then sections. """ def __setstate__(self, state): dict.update(self, state[0]) self.__dict__.update(state[1]) def __reduce__(self): state = (dict(self), self.__dict__) return (__newobj__, (self.__class__,), state) def __init__(self, parent, depth, main, indict=None, name=None): """ * parent is the section above * depth is the depth level of this section * main is the main ConfigObj * indict is a dictionary to initialise the section with """ if indict is None: indict = {} dict.__init__(self) # used for nesting level *and* interpolation self.parent = parent # used for the interpolation attribute self.main = main # level of nesting depth of this Section self.depth = depth # purely for information self.name = name # self._initialise() # we do this explicitly so that __setitem__ is used properly # (rather than just passing to ``dict.__init__``) for entry, value in indict.iteritems(): self[entry] = value def _initialise(self): # the sequence of scalar values in this Section self.scalars = [] # the sequence of sections in this Section self.sections = [] # for comments :-) self.comments = {} self.inline_comments = {} # the configspec self.configspec = None # for defaults self.defaults = [] self.default_values = {} def _interpolate(self, key, value): try: # do we already have an interpolation engine? engine = self._interpolation_engine except AttributeError: # not yet: first time running _interpolate(), so pick the engine name = self.main.interpolation if name: # note that "if name:" would be incorrect here # backwards-compatibility: interpolation=True means use # default name = DEFAULT_INTERPOLATION # so that "Template", "template", etc. all work name = name.lower() class_ = interpolation_engines.get(name, None) if class_ is None: # invalid value for self.main.interpolation self.main.interpolation = False return value else: # save reference to engine so we don't have to do this again engine = self._interpolation_engine = class_(self) # let the engine do the actual work return engine.interpolate(key, value) def __getitem__(self, key): """Fetch the item and do string interpolation.""" val = dict.__getitem__(self, key) if self.main.interpolation and isinstance(val, basestring): return self._interpolate(key, val) return val def __setitem__(self, key, value, unrepr=False): """ Correctly set a value. Making dictionary values Section instances. (We have to special case 'Section' instances - which are also dicts) Keys must be strings. Values need only be strings (or lists of strings) if ``main.stringify`` is set. ``unrepr`` must be set when setting a value to a dictionary, without creating a new sub-section. """ if not isinstance(key, basestring): raise ValueError('The key "%s" is not a string.' % key) # add the comment if key not in self.comments: self.comments[key] = [] self.inline_comments[key] = '' # remove the entry from defaults if key in self.defaults: self.defaults.remove(key) # if isinstance(value, Section): if key not in self: self.sections.append(key) dict.__setitem__(self, key, value) elif isinstance(value, dict) and not unrepr: # First create the new depth level, # then create the section if key not in self: self.sections.append(key) new_depth = self.depth + 1 dict.__setitem__( self, key, Section( self, new_depth, self.main, indict=value, name=key)) else: if key not in self: self.scalars.append(key) if not self.main.stringify: if isinstance(value, basestring): pass elif isinstance(value, (list, tuple)): for entry in value: if not isinstance(entry, basestring): raise TypeError( 'Value is not a string "%s".' % entry) else: raise TypeError('Value is not a string "%s".' % value) dict.__setitem__(self, key, value) def __delitem__(self, key): """Remove items from the sequence when deleting.""" dict. __delitem__(self, key) if key in self.scalars: self.scalars.remove(key) else: self.sections.remove(key) del self.comments[key] del self.inline_comments[key] def get(self, key, default=None): """A version of ``get`` that doesn't bypass string interpolation.""" try: return self[key] except KeyError: return default def update(self, indict): """ A version of update that uses our ``__setitem__``. """ for entry in indict: self[entry] = indict[entry] def pop(self, key, *args): """ 'D.pop(k[,d]) -> v, remove specified key and return the corresponding value. If key is not found, d is returned if given, otherwise KeyError is raised' """ val = dict.pop(self, key, *args) if key in self.scalars: del self.comments[key] del self.inline_comments[key] self.scalars.remove(key) elif key in self.sections: del self.comments[key] del self.inline_comments[key] self.sections.remove(key) if self.main.interpolation and isinstance(val, basestring): return self._interpolate(key, val) return val def popitem(self): """Pops the first (key,val)""" sequence = (self.scalars + self.sections) if not sequence: raise KeyError(": 'popitem(): dictionary is empty'") key = sequence[0] val = self[key] del self[key] return key, val def clear(self): """ A version of clear that also affects scalars/sections Also clears comments and configspec. Leaves other attributes alone : depth/main/parent are not affected """ dict.clear(self) self.scalars = [] self.sections = [] self.comments = {} self.inline_comments = {} self.configspec = None def setdefault(self, key, default=None): """A version of setdefault that sets sequence if appropriate.""" try: return self[key] except KeyError: self[key] = default return self[key] def items(self): """D.items() -> list of D's (key, value) pairs, as 2-tuples""" return zip((self.scalars + self.sections), self.values()) def keys(self): """D.keys() -> list of D's keys""" return (self.scalars + self.sections) def values(self): """D.values() -> list of D's values""" return [self[key] for key in (self.scalars + self.sections)] def iteritems(self): """D.iteritems() -> an iterator over the (key, value) items of D""" return iter(self.items()) def iterkeys(self): """D.iterkeys() -> an iterator over the keys of D""" return iter((self.scalars + self.sections)) __iter__ = iterkeys def itervalues(self): """D.itervalues() -> an iterator over the values of D""" return iter(self.values()) def __repr__(self): """x.__repr__() <==> repr(x)""" return '{%s}' % ', '.join([ ('%s: %s' % (repr(key), repr(self[key]))) for key in (self.scalars + self.sections)]) __str__ = __repr__ __str__.__doc__ = "x.__str__() <==> str(x)" # Extra methods - not in a normal dictionary def dict(self): """ Return a deepcopy of self as a dictionary. All members that are ``Section`` instances are recursively turned to ordinary dictionaries - by calling their ``dict`` method. >>> n = a.dict() >>> n == a 1 >>> n is a 0 """ newdict = {} for entry in self: this_entry = self[entry] if isinstance(this_entry, Section): this_entry = this_entry.dict() elif isinstance(this_entry, list): # create a copy rather than a reference this_entry = list(this_entry) elif isinstance(this_entry, tuple): # create a copy rather than a reference this_entry = tuple(this_entry) newdict[entry] = this_entry return newdict def merge(self, indict): """ A recursive update - useful for merging config files. >>> a = '''[section1] ... option1 = True ... [[subsection]] ... more_options = False ... # end of file'''.splitlines() >>> b = '''# File is user.ini ... [section1] ... option1 = False ... # end of file'''.splitlines() >>> c1 = ConfigObj(b) >>> c2 = ConfigObj(a) >>> c2.merge(c1) >>> c2 ConfigObj({'section1': {'option1': 'False', 'subsection': {'more_options': 'False'}}}) """ for key, val in indict.items(): if (key in self and isinstance(self[key], dict) and isinstance(val, dict)): self[key].merge(val) else: self[key] = val def rename(self, oldkey, newkey): """ Change a keyname to another, without changing position in sequence. Implemented so that transformations can be made on keys, as well as on values. (used by encode and decode) Also renames comments. """ if oldkey in self.scalars: the_list = self.scalars elif oldkey in self.sections: the_list = self.sections else: raise KeyError('Key "%s" not found.' % oldkey) pos = the_list.index(oldkey) # val = self[oldkey] dict.__delitem__(self, oldkey) dict.__setitem__(self, newkey, val) the_list.remove(oldkey) the_list.insert(pos, newkey) comm = self.comments[oldkey] inline_comment = self.inline_comments[oldkey] del self.comments[oldkey] del self.inline_comments[oldkey] self.comments[newkey] = comm self.inline_comments[newkey] = inline_comment def walk(self, function, raise_errors=True, call_on_sections=False, **keywargs): """ Walk every member and call a function on the keyword and value. Return a dictionary of the return values If the function raises an exception, raise the errror unless ``raise_errors=False``, in which case set the return value to ``False``. Any unrecognised keyword arguments you pass to walk, will be pased on to the function you pass in. Note: if ``call_on_sections`` is ``True`` then - on encountering a subsection, *first* the function is called for the *whole* subsection, and then recurses into it's members. This means your function must be able to handle strings, dictionaries and lists. This allows you to change the key of subsections as well as for ordinary members. The return value when called on the whole subsection has to be discarded. See the encode and decode methods for examples, including functions. .. admonition:: caution You can use ``walk`` to transform the names of members of a section but you mustn't add or delete members. >>> config = '''[XXXXsection] ... XXXXkey = XXXXvalue'''.splitlines() >>> cfg = ConfigObj(config) >>> cfg ConfigObj({'XXXXsection': {'XXXXkey': 'XXXXvalue'}}) >>> def transform(section, key): ... val = section[key] ... newkey = key.replace('XXXX', 'CLIENT1') ... section.rename(key, newkey) ... if isinstance(val, (tuple, list, dict)): ... pass ... else: ... val = val.replace('XXXX', 'CLIENT1') ... section[newkey] = val >>> cfg.walk(transform, call_on_sections=True) {'CLIENT1section': {'CLIENT1key': None}} >>> cfg ConfigObj({'CLIENT1section': {'CLIENT1key': 'CLIENT1value'}}) """ out = {} # scalars first for i in range(len(self.scalars)): entry = self.scalars[i] try: val = function(self, entry, **keywargs) # bound again in case name has changed entry = self.scalars[i] out[entry] = val except Exception: if raise_errors: raise else: entry = self.scalars[i] out[entry] = False # then sections for i in range(len(self.sections)): entry = self.sections[i] if call_on_sections: try: function(self, entry, **keywargs) except Exception: if raise_errors: raise else: entry = self.sections[i] out[entry] = False # bound again in case name has changed entry = self.sections[i] # previous result is discarded out[entry] = self[entry].walk( function, raise_errors=raise_errors, call_on_sections=call_on_sections, **keywargs) return out def as_bool(self, key): """ Accepts a key as input. The corresponding value must be a string or the objects (``True`` or 1) or (``False`` or 0). We allow 0 and 1 to retain compatibility with Python 2.2. If the string is one of ``True``, ``On``, ``Yes``, or ``1`` it returns ``True``. If the string is one of ``False``, ``Off``, ``No``, or ``0`` it returns ``False``. ``as_bool`` is not case sensitive. Any other input will raise a ``ValueError``. >>> a = ConfigObj() >>> a['a'] = 'fish' >>> a.as_bool('a') Traceback (most recent call last): ValueError: Value "fish" is neither True nor False >>> a['b'] = 'True' >>> a.as_bool('b') 1 >>> a['b'] = 'off' >>> a.as_bool('b') 0 """ val = self[key] if val: return True elif not val: return False else: try: if not isinstance(val, basestring): # TODO: Why do we raise a KeyError here? raise KeyError() else: return self.main._bools[val.lower()] except KeyError: raise ValueError('Value "%s" is neither True nor False' % val) def as_int(self, key): """ A convenience method which coerces the specified value to an integer. If the value is an invalid literal for ``int``, a ``ValueError`` will be raised. >>> a = ConfigObj() >>> a['a'] = 'fish' >>> a.as_int('a') Traceback (most recent call last): ValueError: invalid literal for int() with base 10: 'fish' >>> a['b'] = '1' >>> a.as_int('b') 1 >>> a['b'] = '3.2' >>> a.as_int('b') Traceback (most recent call last): ValueError: invalid literal for int() with base 10: '3.2' """ return int(self[key]) def as_float(self, key): """ A convenience method which coerces the specified value to a float. If the value is an invalid literal for ``float``, a ``ValueError`` will be raised. >>> a = ConfigObj() >>> a['a'] = 'fish' >>> a.as_float('a') Traceback (most recent call last): ValueError: invalid literal for float(): fish >>> a['b'] = '1' >>> a.as_float('b') 1.0 >>> a['b'] = '3.2' >>> a.as_float('b') 3.2000000000000002 """ return float(self[key]) def as_list(self, key): """ A convenience method which fetches the specified value, guaranteeing that it is a list. >>> a = ConfigObj() >>> a['a'] = 1 >>> a.as_list('a') [1] >>> a['a'] = (1,) >>> a.as_list('a') [1] >>> a['a'] = [1] >>> a.as_list('a') [1] """ result = self[key] if isinstance(result, (tuple, list)): return list(result) return [result] def restore_default(self, key): """ Restore (and return) default value for the specified key. This method will only work for a ConfigObj that was created with a configspec and has been validated. If there is no default value for this key, ``KeyError`` is raised. """ default = self.default_values[key] dict.__setitem__(self, key, default) if key not in self.defaults: self.defaults.append(key) return default def restore_defaults(self): """ Recursively restore default values to all members that have them. This method will only work for a ConfigObj that was created with a configspec and has been validated. It doesn't delete or modify entries without default values. """ for key in self.default_values: self.restore_default(key) for section in self.sections: self[section].restore_defaults() class ConfigObj(Section): """An object to read, create, and write config files.""" _keyword = re.compile( r'''^ # line start (\s*) # indentation ( # keyword (?:".*?")| # double quotes (?:'.*?')| # single quotes (?:[^'"=].*?) # no quotes ) \s*=\s* # divider (.*) # value (including list values and comments) $ # line end ''', re.VERBOSE) _sectionmarker = re.compile( r'''^ (\s*) # 1: indentation ((?:\[\s*)+) # 2: section marker open ( # 3: section name open (?:"\s*\S.*?\s*")| # at least one non-space with double quotes (?:'\s*\S.*?\s*')| # at least one non-space with single quotes (?:[^'"\s].*?) # at least one non-space unquoted ) # section name close ((?:\s*\])+) # 4: section marker close \s*(\#.*)? # 5: optional comment $''', re.VERBOSE) # this regexp pulls list values out as a single string # or single values and comments # FIXME: this regex adds a '' to the end of comma terminated lists # workaround in ``_handle_value`` _valueexp = re.compile( r'''^ (?: (?: ( (?: (?: (?:".*?")| # double quotes (?:'.*?')| # single quotes (?:[^'",\#][^,\#]*?) # unquoted ) \s*,\s* # comma )* # match all list items ending in a comma (if any) ) ( (?:".*?")| # double quotes (?:'.*?')| # single quotes (?:[^'",\#\s][^,]*?)| # unquoted (?:(?<!,)) # Empty value )? # last item in a list - or string value )| (,) # alternatively a single comma - empty list ) \s*(\#.*)? # optional comment $''', re.VERBOSE) # use findall to get the members of a list value _listvalueexp = re.compile( r''' ( (?:".*?")| # double quotes (?:'.*?')| # single quotes (?:[^'",\#].*?) # unquoted ) \s*,\s* # comma ''', re.VERBOSE) # this regexp is used for the value # when lists are switched off _nolistvalue = re.compile( r'''^ ( (?:".*?")| # double quotes (?:'.*?')| # single quotes (?:[^'"\#].*?)| # unquoted (?:) # Empty value ) \s*(\#.*)? # optional comment $''', re.VERBOSE) # regexes for finding triple quoted values on one line _single_line_single = re.compile(r"^'''(.*?)'''\s*(#.*)?$") _single_line_double = re.compile(r'^"""(.*?)"""\s*(#.*)?$') _multi_line_single = re.compile(r"^(.*?)'''\s*(#.*)?$") _multi_line_double = re.compile(r'^(.*?)"""\s*(#.*)?$') _triple_quote = { "'''": (_single_line_single, _multi_line_single), '"""': (_single_line_double, _multi_line_double), } # Used by the ``istrue`` Section method _bools = { 'yes': True, 'no': False, 'on': True, 'off': False, '1': True, '0': False, 'true': True, 'false': False, } def __init__(self, infile=None, options=None, _inspec=False, **kwargs): """ Parse a config file or create a config file object. ``ConfigObj(infile=None, options=None, **kwargs)`` """ self._inspec = _inspec # init the superclass Section.__init__(self, self, 0, self) infile = infile or [] options = dict(options or {}) # keyword arguments take precedence over an options dictionary options.update(kwargs) if _inspec: options['list_values'] = False defaults = OPTION_DEFAULTS.copy() # TODO: check the values too. for entry in options: if entry not in defaults: raise TypeError('Unrecognised option "%s".' % entry) # Add any explicit options to the defaults defaults.update(options) self._initialise(defaults) configspec = defaults['configspec'] self._original_configspec = configspec self._load(infile, configspec) def _load(self, infile, configspec): if isinstance(infile, basestring): self.filename = infile if os.path.isfile(infile): h = open(infile, 'rb') infile = h.read() or [] h.close() elif self.file_error: # raise an error if the file doesn't exist raise IOError('Config file not found: "%s".' % self.filename) else: # file doesn't already exist if self.create_empty: # this is a good test that the filename specified # isn't impossible - like on a non-existent device h = open(infile, 'w') h.write('') h.close() infile = [] elif isinstance(infile, (list, tuple)): infile = list(infile) elif isinstance(infile, dict): # initialise self # the Section class handles creating subsections if isinstance(infile, ConfigObj): # get a copy of our ConfigObj infile = infile.dict() for entry in infile: self[entry] = infile[entry] del self._errors if configspec is not None: self._handle_configspec(configspec) else: self.configspec = None return elif getattr(infile, 'read', MISSING) is not MISSING: # This supports file like objects infile = infile.read() or [] # needs splitting into lines - but needs doing *after* decoding # in case it's not an 8 bit encoding else: raise TypeError('infile must be a filename, file like object,' ' or list of lines.') if infile: # don't do it for the empty ConfigObj infile = self._handle_bom(infile) # infile is now *always* a list # # Set the newlines attribute (first line ending it finds) # and strip trailing '\n' or '\r' from lines for line in infile: if (not line) or (line[-1] not in ('\r', '\n', '\r\n')): continue for end in ('\r\n', '\n', '\r'): if line.endswith(end): self.newlines = end break break infile = [line.rstrip('\r\n') for line in infile] self._parse(infile) # if we had any errors, now is the time to raise them if self._errors: info = "at line %s." % self._errors[0].line_number if len(self._errors) > 1: msg = ("Parsing failed with several errors.\nFirst error %s" % info) error = ConfigObjError(msg) else: error = self._errors[0] # set the errors attribute; it's a list of tuples: # (error_type, message, line_number) error.errors = self._errors # set the config attribute error.config = self raise error # delete private attributes del self._errors if configspec is None: self.configspec = None else: self._handle_configspec(configspec) def _initialise(self, options=None): if options is None: options = OPTION_DEFAULTS # initialise a few variables self.filename = None self._errors = [] self.raise_errors = options['raise_errors'] self.interpolation = options['interpolation'] self.list_values = options['list_values'] self.create_empty = options['create_empty'] self.file_error = options['file_error'] self.stringify = options['stringify'] self.indent_type = options['indent_type'] self.encoding = options['encoding'] self.default_encoding = options['default_encoding'] self.BOM = False self.newlines = None self.write_empty_values = options['write_empty_values'] self.unrepr = options['unrepr'] self.initial_comment = [] self.final_comment = [] self.configspec = None if self._inspec: self.list_values = False # Clear section attributes as well Section._initialise(self) def __repr__(self): return ('ConfigObj({%s})' % ', '.join([('%s: %s' % (repr(key), repr(self[key]))) for key in (self.scalars + self.sections)])) def _handle_bom(self, infile): """ Handle any BOM, and decode if necessary. If an encoding is specified, that *must* be used - but the BOM should still be removed (and the BOM attribute set). (If the encoding is wrongly specified, then a BOM for an alternative encoding won't be discovered or removed.) If an encoding is not specified, UTF8 or UTF16 BOM will be detected and removed. The BOM attribute will be set. UTF16 will be decoded to unicode. NOTE: This method must not be called with an empty ``infile``. Specifying the *wrong* encoding is likely to cause a ``UnicodeDecodeError``. ``infile`` must always be returned as a list of lines, but may be passed in as a single string. """ if ((self.encoding is not None) and (self.encoding.lower() not in BOM_LIST)): # No need to check for a BOM # the encoding specified doesn't have one # just decode return self._decode(infile, self.encoding) if isinstance(infile, (list, tuple)): line = infile[0] else: line = infile if self.encoding is not None: # encoding explicitly supplied # And it could have an associated BOM # TODO: if encoding is just UTF16 - we ought to check for both # TODO: big endian and little endian versions. enc = BOM_LIST[self.encoding.lower()] if enc == 'utf_16': # For UTF16 we try big endian and little endian for BOM, (encoding, final_encoding) in BOMS.items(): if not final_encoding: # skip UTF8 continue if infile.startswith(BOM): # BOM discovered # self.BOM = True # Don't need to remove BOM return self._decode(infile, encoding) # If we get this far, will *probably* raise a DecodeError # As it doesn't appear to start with a BOM return self._decode(infile, self.encoding) # Must be UTF8 BOM = BOM_SET[enc] if not line.startswith(BOM): return self._decode(infile, self.encoding) newline = line[len(BOM):] # BOM removed if isinstance(infile, (list, tuple)): infile[0] = newline else: infile = newline self.BOM = True return self._decode(infile, self.encoding) # No encoding specified - so we need to check for UTF8/UTF16 for BOM, (encoding, final_encoding) in BOMS.items(): if not line.startswith(BOM): continue else: # BOM discovered self.encoding = final_encoding if not final_encoding: self.BOM = True # UTF8 # remove BOM newline = line[len(BOM):] if isinstance(infile, (list, tuple)): infile[0] = newline else: infile = newline # UTF8 - don't decode if isinstance(infile, basestring): return infile.splitlines(True) else: return infile # UTF16 - have to decode return self._decode(infile, encoding) # No BOM discovered and no encoding specified, just return if isinstance(infile, basestring): # infile read from a file will be a single string return infile.splitlines(True) return infile def _a_to_u(self, aString): """Decode ASCII strings to unicode if a self.encoding is specified.""" if self.encoding: return aString.decode('ascii') else: return aString def _decode(self, infile, encoding): """ Decode infile to unicode. Using the specified encoding. if is a string, it also needs converting to a list. """ if isinstance(infile, basestring): # can't be unicode # NOTE: Could raise a ``UnicodeDecodeError`` return infile.decode(encoding).splitlines(True) for i, line in enumerate(infile): if not isinstance(line, unicode): # NOTE: The isinstance test here handles mixed lists of # unicode/string # NOTE: But the decode will break on any non-string values # NOTE: Or could raise a ``UnicodeDecodeError`` infile[i] = line.decode(encoding) return infile def _decode_element(self, line): """Decode element to unicode if necessary.""" if not self.encoding: return line if isinstance(line, str) and self.default_encoding: return line.decode(self.default_encoding) return line def _str(self, value): """ Used by ``stringify`` within validate, to turn non-string values into strings. """ if not isinstance(value, basestring): return str(value) else: return value def _parse(self, infile): """Actually parse the config file.""" temp_list_values = self.list_values if self.unrepr: self.list_values = False comment_list = [] done_start = False this_section = self maxline = len(infile) - 1 cur_index = -1 reset_comment = False while cur_index < maxline: if reset_comment: comment_list = [] cur_index += 1 line = infile[cur_index] sline = line.strip() # do we have anything on the line ? if not sline or sline.startswith('#'): reset_comment = False comment_list.append(line) continue if not done_start: # preserve initial comment self.initial_comment = comment_list comment_list = [] done_start = True reset_comment = True # first we check if it's a section marker mat = self._sectionmarker.match(line) if mat is not None: # is a section line (indent, sect_open, sect_name, sect_close, comment) = \ mat.groups() if indent and (self.indent_type is None): self.indent_type = indent cur_depth = sect_open.count('[') if cur_depth != sect_close.count(']'): self._handle_error( "Cannot compute the section depth at line %s.", NestingError, infile, cur_index) continue if cur_depth < this_section.depth: # the new section is dropping back to a previous level try: parent = self._match_depth(this_section, cur_depth).parent except SyntaxError: self._handle_error( "Cannot compute nesting level at line %s.", NestingError, infile, cur_index) continue elif cur_depth == this_section.depth: # the new section is a sibling of the current section parent = this_section.parent elif cur_depth == this_section.depth + 1: # the new section is a child the current section parent = this_section else: self._handle_error("Section too nested at line %s.", NestingError, infile, cur_index) sect_name = self._unquote(sect_name) if sect_name in parent: self._handle_error('Duplicate section name at line %s.', DuplicateError, infile, cur_index) continue # create the new section this_section = Section( parent, cur_depth, self, name=sect_name) parent[sect_name] = this_section parent.inline_comments[sect_name] = comment parent.comments[sect_name] = comment_list continue # # it's not a section marker, # so it should be a valid ``key = value`` line mat = self._keyword.match(line) if mat is None: # it neither matched as a keyword # or a section marker self._handle_error( 'Invalid line at line "%s".', ParseError, infile, cur_index) else: # is a keyword value # value will include any inline comment (indent, key, value) = mat.groups() if indent and (self.indent_type is None): self.indent_type = indent # check for a multiline value if value[:3] in ['"""', "'''"]: try: (value, comment, cur_index) = self._multiline( value, infile, cur_index, maxline) except SyntaxError: self._handle_error( 'Parse error in value at line %s.', ParseError, infile, cur_index) continue else: if self.unrepr: comment = '' try: value = unrepr(value) except Exception, e: if type(e) == UnknownType: msg = ('Unknown name or type in value at' ' line %s.') else: msg = 'Parse error in value at line %s.' self._handle_error(msg, UnreprError, infile, cur_index) continue else: if self.unrepr: comment = '' try: value = unrepr(value) except Exception, e: if isinstance(e, UnknownType): msg = ('Unknown name or type in value at line' ' %s.') else: msg = 'Parse error in value at line %s.' self._handle_error(msg, UnreprError, infile, cur_index) continue else: # extract comment and lists try: (value, comment) = self._handle_value(value) except SyntaxError: self._handle_error( 'Parse error in value at line %s.', ParseError, infile, cur_index) continue # key = self._unquote(key) if key in this_section: self._handle_error( 'Duplicate keyword name at line %s.', DuplicateError, infile, cur_index) continue # add the key. # we set unrepr because if we have got this far we will never # be creating a new section this_section.__setitem__(key, value, unrepr=True) this_section.inline_comments[key] = comment this_section.comments[key] = comment_list continue # if self.indent_type is None: # no indentation used, set the type accordingly self.indent_type = '' # preserve the final comment if not self and not self.initial_comment: self.initial_comment = comment_list elif not reset_comment: self.final_comment = comment_list self.list_values = temp_list_values def _match_depth(self, sect, depth): """ Given a section and a depth level, walk back through the sections parents to see if the depth level matches a previous section. Return a reference to the right section, or raise a SyntaxError. """ while depth < sect.depth: if sect is sect.parent: # we've reached the top level already raise SyntaxError() sect = sect.parent if sect.depth == depth: return sect # shouldn't get here raise SyntaxError() def _handle_error(self, text, ErrorClass, infile, cur_index): """ Handle an error according to the error settings. Either raise the error or store it. The error will have occured at ``cur_index`` """ line = infile[cur_index] cur_index += 1 message = text % cur_index error = ErrorClass(message, cur_index, line) if self.raise_errors: # raise the error - parsing stops here raise error # store the error # reraise when parsing has finished self._errors.append(error) def _unquote(self, value): """Return an unquoted version of a value""" if (value[0] == value[-1]) and (value[0] in ('"', "'")): value = value[1:-1] return value def _quote(self, value, multiline=True): """ Return a safely quoted version of a value. Raise a ConfigObjError if the value cannot be safely quoted. If multiline is ``True`` (default) then use triple quotes if necessary. * Don't quote values that don't need it. * Recursively quote members of a list and return a comma joined list. * Multiline is ``False`` for lists. * Obey list syntax for empty and single member lists. If ``list_values=False`` then the value is only quoted if it contains a ``\\n`` (is multiline) or '#'. If ``write_empty_values`` is set, and the value is an empty string, it won't be quoted. """ if multiline and self.write_empty_values and value == '': # Only if multiline is set, so that it is used for values not # keys, and not values that are part of a list return '' if multiline and isinstance(value, (list, tuple)): if not value: return ',' elif len(value) == 1: return self._quote(value[0], multiline=False) + ',' return ', '.join([self._quote(val, multiline=False) for val in value]) if not isinstance(value, basestring): if self.stringify: value = str(value) else: raise TypeError('Value "%s" is not a string.' % value) if not value: return '""' no_lists_no_quotes = (not self.list_values and '\n' not in value and '#' not in value) need_triple = multiline and ((("'" in value) and ('"' in value)) or ('\n' in value)) hash_triple_quote = (multiline and not need_triple and ("'" in value) and ('"' in value) and ('#' in value)) check_for_single = ((no_lists_no_quotes or not need_triple) and not hash_triple_quote) if check_for_single: if not self.list_values: # we don't quote if ``list_values=False`` quot = noquot # for normal values either single or double quotes will do elif '\n' in value: # will only happen if multiline is off - e.g. '\n' in key raise ConfigObjError('Value "%s" cannot be safely quoted.' % value) elif ((value[0] not in wspace_plus) and (value[-1] not in wspace_plus) and (',' not in value)): quot = noquot else: quot = self._get_single_quote(value) else: # if value has '\n' or "'" *and* '"', it will need triple quotes quot = self._get_triple_quote(value) if quot == noquot and '#' in value and self.list_values: quot = self._get_single_quote(value) return quot % value def _get_single_quote(self, value): if ("'" in value) and ('"' in value): raise ConfigObjError('Value "%s" cannot be safely quoted.' % value) elif '"' in value: quot = squot else: quot = dquot return quot def _get_triple_quote(self, value): if (value.find('"""') != -1) and (value.find("'''") != -1): raise ConfigObjError('Value "%s" cannot be safely quoted.' % value) if value.find('"""') == -1: quot = tdquot else: quot = tsquot return quot def _handle_value(self, value): """ Given a value string, unquote, remove comment, handle lists. (including empty and single member lists) """ if self._inspec: # Parsing a configspec so don't handle comments return (value, '') # do we look for lists in values ? if not self.list_values: mat = self._nolistvalue.match(value) if mat is None: raise SyntaxError() # NOTE: we don't unquote here return mat.groups() # mat = self._valueexp.match(value) if mat is None: # the value is badly constructed, probably badly quoted, # or an invalid list raise SyntaxError() (list_values, single, empty_list, comment) = mat.groups() if (list_values == '') and (single is None): # change this if you want to accept empty values raise SyntaxError() # NOTE: note there is no error handling from here if the regex # is wrong: then incorrect values will slip through if empty_list is not None: # the single comma - meaning an empty list return ([], comment) if single is not None: # handle empty values if list_values and not single: # FIXME: the '' is a workaround because our regex now matches # '' at the end of a list if it has a trailing comma single = None else: single = single or '""' single = self._unquote(single) if list_values == '': # not a list value return (single, comment) the_list = self._listvalueexp.findall(list_values) the_list = [self._unquote(val) for val in the_list] if single is not None: the_list += [single] return (the_list, comment) def _multiline(self, value, infile, cur_index, maxline): """Extract the value, where we are in a multiline situation.""" quot = value[:3] newvalue = value[3:] single_line = self._triple_quote[quot][0] multi_line = self._triple_quote[quot][1] mat = single_line.match(value) if mat is not None: retval = list(mat.groups()) retval.append(cur_index) return retval elif newvalue.find(quot) != -1: # somehow the triple quote is missing raise SyntaxError() # while cur_index < maxline: cur_index += 1 newvalue += '\n' line = infile[cur_index] if line.find(quot) == -1: newvalue += line else: # end of multiline, process it break else: # we've got to the end of the config, oops... raise SyntaxError() mat = multi_line.match(line) if mat is None: # a badly formed line raise SyntaxError() (value, comment) = mat.groups() return (newvalue + value, comment, cur_index) def _handle_configspec(self, configspec): """Parse the configspec.""" # FIXME: Should we check that the configspec was created with the # correct settings ? (i.e. ``list_values=False``) if not isinstance(configspec, ConfigObj): try: configspec = ConfigObj(configspec, raise_errors=True, file_error=True, _inspec=True) except ConfigObjError, e: # FIXME: Should these errors have a reference # to the already parsed ConfigObj ? raise ConfigspecError('Parsing configspec failed: %s' % e) except IOError, e: raise IOError('Reading configspec failed: %s' % e) self.configspec = configspec def _set_configspec(self, section, copy): """ Called by validate. Handles setting the configspec on subsections including sections to be validated by __many__ """ configspec = section.configspec many = configspec.get('__many__') if isinstance(many, dict): for entry in section.sections: if entry not in configspec: section[entry].configspec = many for entry in configspec.sections: if entry == '__many__': continue if entry not in section: section[entry] = {} if copy: # copy comments section.comments[entry] = configspec.comments.get(entry, []) section.inline_comments[entry] = \ configspec.inline_comments.get(entry, '') # Could be a scalar when we expect a section if isinstance(section[entry], Section): section[entry].configspec = configspec[entry] def _write_line(self, indent_string, entry, this_entry, comment): """Write an individual line, for the write method""" # NOTE: the calls to self._quote here handles non-StringType values. if not self.unrepr: val = self._decode_element(self._quote(this_entry)) else: val = repr(this_entry) return '%s%s%s%s%s' % (indent_string, self._decode_element(self._quote(entry, multiline=False)), self._a_to_u(' = '), val, self._decode_element(comment)) def _write_marker(self, indent_string, depth, entry, comment): """Write a section marker line""" return '%s%s%s%s%s' % (indent_string, self._a_to_u('[' * depth), self._quote(self._decode_element(entry), multiline=False), self._a_to_u(']' * depth), self._decode_element(comment)) def _handle_comment(self, comment): """Deal with a comment.""" if not comment: return '' start = self.indent_type if not comment.startswith('#'): start += self._a_to_u(' # ') return (start + comment) # Public methods def write(self, outfile=None, section=None): """ Write the current ConfigObj as a file tekNico: FIXME: use StringIO instead of real files >>> filename = a.filename >>> a.filename = 'test.ini' >>> a.write() >>> a.filename = filename >>> a == ConfigObj('test.ini', raise_errors=True) 1 """ if self.indent_type is None: # this can be true if initialised from a dictionary self.indent_type = DEFAULT_INDENT_TYPE out = [] cs = self._a_to_u('#') csp = self._a_to_u('# ') if section is None: int_val = self.interpolation self.interpolation = False section = self for line in self.initial_comment: line = self._decode_element(line) stripped_line = line.strip() if stripped_line and not stripped_line.startswith(cs): line = csp + line out.append(line) indent_string = self.indent_type * section.depth for entry in (section.scalars + section.sections): if entry in section.defaults: # don't write out default values continue for comment_line in section.comments[entry]: comment_line = self._decode_element(comment_line.lstrip()) if comment_line and not comment_line.startswith(cs): comment_line = csp + comment_line out.append(indent_string + comment_line) this_entry = section[entry] comment = self._handle_comment(section.inline_comments[entry]) if isinstance(this_entry, dict): # a section out.append(self._write_marker( indent_string, this_entry.depth, entry, comment)) out.extend(self.write(section=this_entry)) else: out.append(self._write_line( indent_string, entry, this_entry, comment)) if section is self: for line in self.final_comment: line = self._decode_element(line) stripped_line = line.strip() if stripped_line and not stripped_line.startswith(cs): line = csp + line out.append(line) self.interpolation = int_val if section is not self: return out if (self.filename is None) and (outfile is None): # output a list of lines # might need to encode # NOTE: This will *screw* UTF16, each line will start with the BOM if self.encoding: out = [l.encode(self.encoding) for l in out] if (self.BOM and ( (self.encoding is None) or (BOM_LIST.get(self.encoding.lower()) == 'utf_8'))): # Add the UTF8 BOM if not out: out.append('') out[0] = BOM_UTF8 + out[0] return out # Turn the list to a string, joined with correct newlines newline = self.newlines or os.linesep output = self._a_to_u(newline).join(out) if self.encoding: output = output.encode(self.encoding) if self.BOM and ((self.encoding is None) or match_utf8(self.encoding)): # Add the UTF8 BOM output = BOM_UTF8 + output if not output.endswith(newline): output += newline if outfile is not None: outfile.write(output) else: h = open(self.filename, 'wb') h.write(output) h.close() def validate(self, validator, preserve_errors=False, copy=False, section=None): """ Test the ConfigObj against a configspec. It uses the ``validator`` object from *validate.py*. To run ``validate`` on the current ConfigObj, call: :: test = config.validate(validator) (Normally having previously passed in the configspec when the ConfigObj was created - you can dynamically assign a dictionary of checks to the ``configspec`` attribute of a section though). It returns ``True`` if everything passes, or a dictionary of pass/fails (True/False). If every member of a subsection passes, it will just have the value ``True``. (It also returns ``False`` if all members fail). In addition, it converts the values from strings to their native types if their checks pass (and ``stringify`` is set). If ``preserve_errors`` is ``True`` (``False`` is default) then instead of a marking a fail with a ``False``, it will preserve the actual exception object. This can contain info about the reason for failure. For example the ``VdtValueTooSmallError`` indicates that the value supplied was too small. If a value (or section) is missing it will still be marked as ``False``. You must have the validate module to use ``preserve_errors=True``. You can then use the ``flatten_errors`` function to turn your nested results dictionary into a flattened list of failures - useful for displaying meaningful error messages. """ if section is None: if self.configspec is None: raise ValueError('No configspec supplied.') if preserve_errors: # We do this once to remove a top level dependency on the # validate module which makes importing configobj faster from validate import VdtMissingValue self._vdtMissingValue = VdtMissingValue section = self if copy: section.initial_comment = section.configspec.initial_comment section.final_comment = section.configspec.final_comment section.encoding = section.configspec.encoding section.BOM = section.configspec.BOM section.newlines = section.configspec.newlines section.indent_type = section.configspec.indent_type # configspec = section.configspec self._set_configspec(section, copy) def validate_entry(entry, spec, val, missing, ret_true, ret_false): try: check = validator.check(spec, val, missing=missing ) except validator.baseErrorClass, e: if not preserve_errors or isinstance(e, self._vdtMissingValue): out[entry] = False else: # preserve the error out[entry] = e ret_false = False ret_true = False else: try: section.default_values.pop(entry, None) except AttributeError: # For Python 2.2 compatibility try: del section.default_values[entry] except KeyError: pass try: section.default_values[entry] = \ validator.get_default_value(configspec[entry]) except (KeyError, AttributeError): # No default or validator has no 'get_default_value' (e.g. # SimpleVal) pass ret_false = False out[entry] = True if self.stringify or missing: # if we are doing type conversion # or the value is a supplied default if not self.stringify: if isinstance(check, (list, tuple)): # preserve lists check = [self._str(item) for item in check] elif missing and check is None: # convert the None from a default to a '' check = '' else: check = self._str(check) if (check != val) or missing: section[entry] = check if not copy and missing and entry not in section.defaults: section.defaults.append(entry) return ret_true, ret_false # out = {} ret_true = True ret_false = True unvalidated = [k for k in section.scalars if k not in configspec] incorrect_sections = [k for k in configspec.sections if k in section.scalars] incorrect_scalars = [k for k in configspec.scalars if k in section.sections] for entry in configspec.scalars: if entry in ('__many__', '___many___'): # reserved names continue if (entry not in section.scalars) or (entry in section.defaults): # missing entries # or entries from defaults missing = True val = None if copy and entry not in section.scalars: # copy comments section.comments[entry] = ( configspec.comments.get(entry, [])) section.inline_comments[entry] = ( configspec.inline_comments.get(entry, '')) # else: missing = False val = section[entry] ret_true, ret_false = validate_entry(entry, configspec[entry], val, missing, ret_true, ret_false) many = None if '__many__' in configspec.scalars: many = configspec['__many__'] elif '___many___' in configspec.scalars: many = configspec['___many___'] if many is not None: for entry in unvalidated: val = section[entry] ret_true, ret_false = validate_entry(entry, many, val, False, ret_true, ret_false) for entry in incorrect_scalars: ret_true = False if not preserve_errors: out[entry] = False else: ret_false = False msg = 'Value %r was provided as a section' % entry out[entry] = validator.baseErrorClass(msg) for entry in incorrect_sections: ret_true = False if not preserve_errors: out[entry] = False else: ret_false = False msg = 'Section %r was provided as a single value' % entry out[entry] = validator.baseErrorClass(msg) # Missing sections will have been created as empty ones when the # configspec was read. for entry in section.sections: # FIXME: this means DEFAULT is not copied in copy mode if section is self and entry == 'DEFAULT': continue if section[entry].configspec is None: continue if copy: section.comments[entry] = configspec.comments.get(entry, []) section.inline_comments[entry] = \ configspec.inline_comments.get(entry, '') check = self.validate(validator, preserve_errors=preserve_errors, copy=copy, section=section[entry]) out[entry] = check if not check: ret_true = False elif check: ret_false = False else: ret_true = False ret_false = False # if ret_true: return True elif ret_false: return False return out def reset(self): """Clear ConfigObj instance and restore to 'freshly created' state.""" self.clear() self._initialise() # FIXME: Should be done by '_initialise', but ConfigObj constructor # (and reload) requires an empty dictionary self.configspec = None # Just to be sure ;-) self._original_configspec = None def reload(self): """ Reload a ConfigObj from file. This method raises a ``ReloadError`` if the ConfigObj doesn't have a filename attribute pointing to a file. """ if not isinstance(self.filename, basestring): raise ReloadError() filename = self.filename current_options = {} for entry in OPTION_DEFAULTS: if entry == 'configspec': continue current_options[entry] = getattr(self, entry) configspec = self._original_configspec current_options['configspec'] = configspec self.clear() self._initialise(current_options) self._load(filename, configspec) class SimpleVal(object): """ A simple validator. Can be used to check that all members expected are present. To use it, provide a configspec with all your members in (the value given will be ignored). Pass an instance of ``SimpleVal`` to the ``validate`` method of your ``ConfigObj``. ``validate`` will return ``True`` if all members are present, or a dictionary with True/False meaning present/missing. (Whole missing sections will be replaced with ``False``) """ def __init__(self): self.baseErrorClass = ConfigObjError def check(self, check, member, missing=False): """A dummy check method, always returns the value unchanged.""" if missing: raise self.baseErrorClass() return member # Check / processing functions for options def flatten_errors(cfg, res, levels=None, results=None): """ An example function that will turn a nested dictionary of results (as returned by ``ConfigObj.validate``) into a flat list. ``cfg`` is the ConfigObj instance being checked, ``res`` is the results dictionary returned by ``validate``. (This is a recursive function, so you shouldn't use the ``levels`` or ``results`` arguments - they are used by the function.) Returns a list of keys that failed. Each member of the list is a tuple : :: ([list of sections...], key, result) If ``validate`` was called with ``preserve_errors=False`` (the default) then ``result`` will always be ``False``. *list of sections* is a flattened list of sections that the key was found in. If the section was missing (or a section was expected and a scalar provided - or vice-versa) then key will be ``None``. If the value (or section) was missing then ``result`` will be ``False``. If ``validate`` was called with ``preserve_errors=True`` and a value was present, but failed the check, then ``result`` will be the exception object returned. You can use this as a string that describes the failure. For example *The value "3" is of the wrong type*. >>> import validate >>> vtor = validate.Validator() >>> my_ini = ''' ... option1 = True ... [section1] ... option1 = True ... [section2] ... another_option = Probably ... [section3] ... another_option = True ... [[section3b]] ... value = 3 ... value2 = a ... value3 = 11 ... ''' >>> my_cfg = ''' ... option1 = boolean() ... option2 = boolean() ... option3 = boolean(default=Bad_value) ... [section1] ... option1 = boolean() ... option2 = boolean() ... option3 = boolean(default=Bad_value) ... [section2] ... another_option = boolean() ... [section3] ... another_option = boolean() ... [[section3b]] ... value = integer ... value2 = integer ... value3 = integer(0, 10) ... [[[section3b-sub]]] ... value = string ... [section4] ... another_option = boolean() ... ''' >>> cs = my_cfg.split('\\n') >>> ini = my_ini.split('\\n') >>> cfg = ConfigObj(ini, configspec=cs) >>> res = cfg.validate(vtor, preserve_errors=True) >>> errors = [] >>> for entry in flatten_errors(cfg, res): ... section_list, key, error = entry ... section_list.insert(0, '[root]') ... if key is not None: ... section_list.append(key) ... else: ... section_list.append('[missing]') ... section_string = ', '.join(section_list) ... errors.append((section_string, ' = ', error)) >>> errors.sort() >>> for entry in errors: ... print entry[0], entry[1], (entry[2] or 0) [root], option2 = 0 [root], option3 = the value "Bad_value" is of the wrong type. [root], section1, option2 = 0 [root], section1, option3 = the value "Bad_value" is of the wrong type. [root], section2, another_option = the value "Probably" is of the wrong type. [root], section3, section3b, section3b-sub, [missing] = 0 [root], section3, section3b, value2 = the value "a" is of the wrong type. [root], section3, section3b, value3 = the value "11" is too big. [root], section4, [missing] = 0 """ if levels is None: # first time called levels = [] results = [] if res is True: return results if res is False or isinstance(res, Exception): results.append((levels[:], None, res)) if levels: levels.pop() return results for (key, val) in res.items(): if val: continue if isinstance(cfg.get(key), dict): # Go down one level levels.append(key) flatten_errors(cfg[key], val, levels, results) continue results.append((levels[:], key, val)) # # Go up one level if levels: levels.pop() # return results """*A programming language is a medium of expression.* - Paul Graham"""
gpl-2.0
gotomypc/bigcouch
couchjs/scons/scons-local-2.0.1/SCons/Tool/sunc++.py
61
4752
"""SCons.Tool.sunc++ Tool-specific initialization for C++ on SunOS / Solaris. There normally shouldn't be any need to import this module directly. It will usually be imported through the generic SCons.Tool.Tool() selection method. """ # # Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010 The SCons Foundation # # Permission is hereby granted, free of charge, to any person obtaining # a copy of this software and associated documentation files (the # "Software"), to deal in the Software without restriction, including # without limitation the rights to use, copy, modify, merge, publish, # distribute, sublicense, and/or sell copies of the Software, and to # permit persons to whom the Software is furnished to do so, subject to # the following conditions: # # The above copyright notice and this permission notice shall be included # in all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY # KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE # WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND # NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE # LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION # OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION # WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. # __revision__ = "src/engine/SCons/Tool/sunc++.py 5134 2010/08/16 23:02:40 bdeegan" import SCons import os import re import subprocess cplusplus = __import__('c++', globals(), locals(), []) package_info = {} def get_package_info(package_name, pkginfo, pkgchk): try: return package_info[package_name] except KeyError: version = None pathname = None try: sadm_contents = open('/var/sadm/install/contents', 'r').read() except EnvironmentError: pass else: sadm_re = re.compile('^(\S*/bin/CC)(=\S*)? %s$' % package_name, re.M) sadm_match = sadm_re.search(sadm_contents) if sadm_match: pathname = os.path.dirname(sadm_match.group(1)) try: p = subprocess.Popen([pkginfo, '-l', package_name], stdout=subprocess.PIPE, stderr=open('/dev/null', 'w')) except EnvironmentError: pass else: pkginfo_contents = p.communicate()[0] version_re = re.compile('^ *VERSION:\s*(.*)$', re.M) version_match = version_re.search(pkginfo_contents) if version_match: version = version_match.group(1) if pathname is None: try: p = subprocess.Popen([pkgchk, '-l', package_name], stdout=subprocess.PIPE, stderr=open('/dev/null', 'w')) except EnvironmentError: pass else: pkgchk_contents = p.communicate()[0] pathname_re = re.compile(r'^Pathname:\s*(.*/bin/CC)$', re.M) pathname_match = pathname_re.search(pkgchk_contents) if pathname_match: pathname = os.path.dirname(pathname_match.group(1)) package_info[package_name] = (pathname, version) return package_info[package_name] # use the package installer tool lslpp to figure out where cppc and what # version of it is installed def get_cppc(env): cxx = env.subst('$CXX') if cxx: cppcPath = os.path.dirname(cxx) else: cppcPath = None cppcVersion = None pkginfo = env.subst('$PKGINFO') pkgchk = env.subst('$PKGCHK') for package in ['SPROcpl']: path, version = get_package_info(package, pkginfo, pkgchk) if path and version: cppcPath, cppcVersion = path, version break return (cppcPath, 'CC', 'CC', cppcVersion) def generate(env): """Add Builders and construction variables for SunPRO C++.""" path, cxx, shcxx, version = get_cppc(env) if path: cxx = os.path.join(path, cxx) shcxx = os.path.join(path, shcxx) cplusplus.generate(env) env['CXX'] = cxx env['SHCXX'] = shcxx env['CXXVERSION'] = version env['SHCXXFLAGS'] = SCons.Util.CLVar('$CXXFLAGS -KPIC') env['SHOBJPREFIX'] = 'so_' env['SHOBJSUFFIX'] = '.o' def exists(env): path, cxx, shcxx, version = get_cppc(env) if path and cxx: cppc = os.path.join(path, cxx) if os.path.exists(cppc): return cppc return None # Local Variables: # tab-width:4 # indent-tabs-mode:nil # End: # vim: set expandtab tabstop=4 shiftwidth=4:
apache-2.0
project-magpie/enigma2-openpli
lib/python/Screens/InstallWizard.py
11
5928
from Screens.Screen import Screen from Components.ConfigList import ConfigListScreen, ConfigList from Components.ActionMap import ActionMap from Components.Sources.StaticText import StaticText from Components.config import config, ConfigSubsection, ConfigBoolean, getConfigListEntry, ConfigSelection, ConfigYesNo, ConfigIP from Components.Network import iNetwork from Components.Ipkg import IpkgComponent from enigma import eDVBDB config.misc.installwizard = ConfigSubsection() config.misc.installwizard.hasnetwork = ConfigBoolean(default = False) config.misc.installwizard.ipkgloaded = ConfigBoolean(default = False) config.misc.installwizard.channellistdownloaded = ConfigBoolean(default = False) class InstallWizard(Screen, ConfigListScreen): STATE_UPDATE = 0 STATE_CHOISE_CHANNELLIST = 1 STATE_CHOISE_SOFTCAM = 2 def __init__(self, session, args = None): Screen.__init__(self, session) self.index = args self.list = [] ConfigListScreen.__init__(self, self.list) if self.index == self.STATE_UPDATE: config.misc.installwizard.hasnetwork.value = False config.misc.installwizard.ipkgloaded.value = False modes = {0: " "} self.enabled = ConfigSelection(choices = modes, default = 0) self.adapters = [(iNetwork.getFriendlyAdapterName(x),x) for x in iNetwork.getAdapterList()] is_found = False for x in self.adapters: if x[1] == 'eth0': if iNetwork.getAdapterAttribute(x[1], 'up'): self.ipConfigEntry = ConfigIP(default = iNetwork.getAdapterAttribute(x[1], "ip")) iNetwork.checkNetworkState(self.checkNetworkCB) if_found = True else: iNetwork.restartNetwork(self.checkNetworkLinkCB) break if is_found is False: self.createMenu() elif self.index == self.STATE_CHOISE_CHANNELLIST: self.enabled = ConfigYesNo(default = True) modes = {"19e": "Astra 1", "23e": "Astra 3", "19e-23e": "Astra 1 Astra 3", "19e-23e-28e": "Astra 1 Astra 2 Astra 3", "13e-19e-23e-28e": "Astra 1 Astra 2 Astra 3 Hotbird"} self.channellist_type = ConfigSelection(choices = modes, default = "19e") self.createMenu() elif self.index == self.STATE_CHOISE_SOFTCAM: self.enabled = ConfigYesNo(default = True) modes = {"cccam": _("default") + " (CCcam)", "scam": "scam"} self.softcam_type = ConfigSelection(choices = modes, default = "cccam") self.createMenu() def checkNetworkCB(self, data): if data < 3: config.misc.installwizard.hasnetwork.value = True self.createMenu() def checkNetworkLinkCB(self, retval): if retval: iNetwork.checkNetworkState(self.checkNetworkCB) else: self.createMenu() def createMenu(self): try: test = self.index except: return self.list = [] if self.index == self.STATE_UPDATE: if config.misc.installwizard.hasnetwork.value: self.list.append(getConfigListEntry(_("Your internet connection is working (ip: %s)") % (self.ipConfigEntry.getText()), self.enabled)) else: self.list.append(getConfigListEntry(_("Your receiver does not have an internet connection"), self.enabled)) elif self.index == self.STATE_CHOISE_CHANNELLIST: self.list.append(getConfigListEntry(_("Install channel list"), self.enabled)) if self.enabled.value: self.list.append(getConfigListEntry(_("Channel list type"), self.channellist_type)) elif self.index == self.STATE_CHOISE_SOFTCAM: self.list.append(getConfigListEntry(_("Install softcam"), self.enabled)) if self.enabled.value: self.list.append(getConfigListEntry(_("Softcam type"), self.softcam_type)) self["config"].list = self.list self["config"].l.setList(self.list) def keyLeft(self): if self.index == 0: return ConfigListScreen.keyLeft(self) self.createMenu() def keyRight(self): if self.index == 0: return ConfigListScreen.keyRight(self) self.createMenu() def run(self): if self.index == self.STATE_UPDATE: if config.misc.installwizard.hasnetwork.value: self.session.open(InstallWizardIpkgUpdater, self.index, _('Please wait (updating packages)'), IpkgComponent.CMD_UPDATE) elif self.index == self.STATE_CHOISE_CHANNELLIST and self.enabled.value: self.session.open(InstallWizardIpkgUpdater, self.index, _('Please wait (downloading channel list)'), IpkgComponent.CMD_REMOVE, {'package': 'enigma2-plugin-settings-henksat-' + self.channellist_type.value}) elif self.index == self.STATE_CHOISE_SOFTCAM and self.enabled.value: self.session.open(InstallWizardIpkgUpdater, self.index, _('Please wait (downloading softcam)'), IpkgComponent.CMD_INSTALL, {'package': 'enigma2-plugin-softcams-' + self.softcam_type.value}) return class InstallWizardIpkgUpdater(Screen): skin = """ <screen position="c-300,c-25" size="600,50" title=" "> <widget source="statusbar" render="Label" position="10,5" zPosition="10" size="e-10,30" halign="center" valign="center" font="Regular;22" transparent="1" shadowColor="black" shadowOffset="-1,-1" /> </screen>""" def __init__(self, session, index, info, cmd, pkg = None): self.skin = InstallWizardIpkgUpdater.skin Screen.__init__(self, session) self["statusbar"] = StaticText(info) self.pkg = pkg self.index = index self.state = 0 self.ipkg = IpkgComponent() self.ipkg.addCallback(self.ipkgCallback) if self.index == InstallWizard.STATE_CHOISE_CHANNELLIST: self.ipkg.startCmd(cmd, {'package': 'enigma2-plugin-settings-*'}) else: self.ipkg.startCmd(cmd, pkg) def ipkgCallback(self, event, param): if event == IpkgComponent.EVENT_DONE: if self.index == InstallWizard.STATE_UPDATE: config.misc.installwizard.ipkgloaded.value = True elif self.index == InstallWizard.STATE_CHOISE_CHANNELLIST: if self.state == 0: self.ipkg.startCmd(IpkgComponent.CMD_INSTALL, self.pkg) self.state = 1 return else: config.misc.installwizard.channellistdownloaded.value = True eDVBDB.getInstance().reloadBouquets() eDVBDB.getInstance().reloadServicelist() self.close()
gpl-2.0
TeamLocker/Server
TeamLocker_Server/protobufs/Libsodium_pb2.py
1
3197
# Generated by the protocol buffer compiler. DO NOT EDIT! # source: protobufs/Libsodium.proto import sys _b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1')) from google.protobuf import descriptor as _descriptor from google.protobuf import message as _message from google.protobuf import reflection as _reflection from google.protobuf import symbol_database as _symbol_database from google.protobuf import descriptor_pb2 # @@protoc_insertion_point(imports) _sym_db = _symbol_database.Default() DESCRIPTOR = _descriptor.FileDescriptor( name='protobufs/Libsodium.proto', package='', syntax='proto3', serialized_pb=_b('\n\x19protobufs/Libsodium.proto\"R\n\rLibsodiumItem\x12\x0c\n\x04\x64\x61ta\x18\x01 \x01(\x0c\x12\r\n\x05nonce\x18\x02 \x01(\x0c\x12\x11\n\tops_limit\x18\x03 \x01(\x03\x12\x11\n\tmem_limit\x18\x04 \x01(\x03\x42,\n*me.camerongray.teamlocker.client.protobufsb\x06proto3') ) _LIBSODIUMITEM = _descriptor.Descriptor( name='LibsodiumItem', full_name='LibsodiumItem', filename=None, file=DESCRIPTOR, containing_type=None, fields=[ _descriptor.FieldDescriptor( name='data', full_name='LibsodiumItem.data', index=0, number=1, type=12, cpp_type=9, label=1, has_default_value=False, default_value=_b(""), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), _descriptor.FieldDescriptor( name='nonce', full_name='LibsodiumItem.nonce', index=1, number=2, type=12, cpp_type=9, label=1, has_default_value=False, default_value=_b(""), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), _descriptor.FieldDescriptor( name='ops_limit', full_name='LibsodiumItem.ops_limit', index=2, number=3, type=3, cpp_type=2, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), _descriptor.FieldDescriptor( name='mem_limit', full_name='LibsodiumItem.mem_limit', index=3, number=4, type=3, cpp_type=2, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), ], extensions=[ ], nested_types=[], enum_types=[ ], options=None, is_extendable=False, syntax='proto3', extension_ranges=[], oneofs=[ ], serialized_start=29, serialized_end=111, ) DESCRIPTOR.message_types_by_name['LibsodiumItem'] = _LIBSODIUMITEM _sym_db.RegisterFileDescriptor(DESCRIPTOR) LibsodiumItem = _reflection.GeneratedProtocolMessageType('LibsodiumItem', (_message.Message,), dict( DESCRIPTOR = _LIBSODIUMITEM, __module__ = 'protobufs.Libsodium_pb2' # @@protoc_insertion_point(class_scope:LibsodiumItem) )) _sym_db.RegisterMessage(LibsodiumItem) DESCRIPTOR.has_options = True DESCRIPTOR._options = _descriptor._ParseOptions(descriptor_pb2.FileOptions(), _b('\n*me.camerongray.teamlocker.client.protobufs')) # @@protoc_insertion_point(module_scope)
gpl-3.0
ghchinoy/tensorflow
tensorflow/contrib/distribute/python/tpu_strategy.py
4
1079
# Copyright 2018 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """TPU Distribution Strategy. This is experimental. It's not ready for general use. """ from __future__ import absolute_import from __future__ import division from __future__ import print_function # pylint: disable=unused-import from tensorflow.python.distribute.tpu_strategy import TPUStrategyV1 as TPUStrategy from tensorflow.python.tpu.tpu_strategy_util import initialize_tpu_system
apache-2.0
Zhongqilong/mykbengineer
kbe/res/scripts/common/Lib/multiprocessing/pool.py
79
24643
# # Module providing the `Pool` class for managing a process pool # # multiprocessing/pool.py # # Copyright (c) 2006-2008, R Oudkerk # Licensed to PSF under a Contributor Agreement. # __all__ = ['Pool', 'ThreadPool'] # # Imports # import threading import queue import itertools import collections import os import time import traceback # If threading is available then ThreadPool should be provided. Therefore # we avoid top-level imports which are liable to fail on some systems. from . import util from . import get_context, TimeoutError # # Constants representing the state of a pool # RUN = 0 CLOSE = 1 TERMINATE = 2 # # Miscellaneous # job_counter = itertools.count() def mapstar(args): return list(map(*args)) def starmapstar(args): return list(itertools.starmap(args[0], args[1])) # # Hack to embed stringification of remote traceback in local traceback # class RemoteTraceback(Exception): def __init__(self, tb): self.tb = tb def __str__(self): return self.tb class ExceptionWithTraceback: def __init__(self, exc, tb): tb = traceback.format_exception(type(exc), exc, tb) tb = ''.join(tb) self.exc = exc self.tb = '\n"""\n%s"""' % tb def __reduce__(self): return rebuild_exc, (self.exc, self.tb) def rebuild_exc(exc, tb): exc.__cause__ = RemoteTraceback(tb) return exc # # Code run by worker processes # class MaybeEncodingError(Exception): """Wraps possible unpickleable errors, so they can be safely sent through the socket.""" def __init__(self, exc, value): self.exc = repr(exc) self.value = repr(value) super(MaybeEncodingError, self).__init__(self.exc, self.value) def __str__(self): return "Error sending result: '%s'. Reason: '%s'" % (self.value, self.exc) def __repr__(self): return "<MaybeEncodingError: %s>" % str(self) def worker(inqueue, outqueue, initializer=None, initargs=(), maxtasks=None, wrap_exception=False): assert maxtasks is None or (type(maxtasks) == int and maxtasks > 0) put = outqueue.put get = inqueue.get if hasattr(inqueue, '_writer'): inqueue._writer.close() outqueue._reader.close() if initializer is not None: initializer(*initargs) completed = 0 while maxtasks is None or (maxtasks and completed < maxtasks): try: task = get() except (EOFError, OSError): util.debug('worker got EOFError or OSError -- exiting') break if task is None: util.debug('worker got sentinel -- exiting') break job, i, func, args, kwds = task try: result = (True, func(*args, **kwds)) except Exception as e: if wrap_exception: e = ExceptionWithTraceback(e, e.__traceback__) result = (False, e) try: put((job, i, result)) except Exception as e: wrapped = MaybeEncodingError(e, result[1]) util.debug("Possible encoding error while sending result: %s" % ( wrapped)) put((job, i, (False, wrapped))) completed += 1 util.debug('worker exiting after %d tasks' % completed) # # Class representing a process pool # class Pool(object): ''' Class which supports an async version of applying functions to arguments. ''' _wrap_exception = True def Process(self, *args, **kwds): return self._ctx.Process(*args, **kwds) def __init__(self, processes=None, initializer=None, initargs=(), maxtasksperchild=None, context=None): self._ctx = context or get_context() self._setup_queues() self._taskqueue = queue.Queue() self._cache = {} self._state = RUN self._maxtasksperchild = maxtasksperchild self._initializer = initializer self._initargs = initargs if processes is None: processes = os.cpu_count() or 1 if processes < 1: raise ValueError("Number of processes must be at least 1") if initializer is not None and not callable(initializer): raise TypeError('initializer must be a callable') self._processes = processes self._pool = [] self._repopulate_pool() self._worker_handler = threading.Thread( target=Pool._handle_workers, args=(self, ) ) self._worker_handler.daemon = True self._worker_handler._state = RUN self._worker_handler.start() self._task_handler = threading.Thread( target=Pool._handle_tasks, args=(self._taskqueue, self._quick_put, self._outqueue, self._pool, self._cache) ) self._task_handler.daemon = True self._task_handler._state = RUN self._task_handler.start() self._result_handler = threading.Thread( target=Pool._handle_results, args=(self._outqueue, self._quick_get, self._cache) ) self._result_handler.daemon = True self._result_handler._state = RUN self._result_handler.start() self._terminate = util.Finalize( self, self._terminate_pool, args=(self._taskqueue, self._inqueue, self._outqueue, self._pool, self._worker_handler, self._task_handler, self._result_handler, self._cache), exitpriority=15 ) def _join_exited_workers(self): """Cleanup after any worker processes which have exited due to reaching their specified lifetime. Returns True if any workers were cleaned up. """ cleaned = False for i in reversed(range(len(self._pool))): worker = self._pool[i] if worker.exitcode is not None: # worker exited util.debug('cleaning up worker %d' % i) worker.join() cleaned = True del self._pool[i] return cleaned def _repopulate_pool(self): """Bring the number of pool processes up to the specified number, for use after reaping workers which have exited. """ for i in range(self._processes - len(self._pool)): w = self.Process(target=worker, args=(self._inqueue, self._outqueue, self._initializer, self._initargs, self._maxtasksperchild, self._wrap_exception) ) self._pool.append(w) w.name = w.name.replace('Process', 'PoolWorker') w.daemon = True w.start() util.debug('added worker') def _maintain_pool(self): """Clean up any exited workers and start replacements for them. """ if self._join_exited_workers(): self._repopulate_pool() def _setup_queues(self): self._inqueue = self._ctx.SimpleQueue() self._outqueue = self._ctx.SimpleQueue() self._quick_put = self._inqueue._writer.send self._quick_get = self._outqueue._reader.recv def apply(self, func, args=(), kwds={}): ''' Equivalent of `func(*args, **kwds)`. ''' assert self._state == RUN return self.apply_async(func, args, kwds).get() def map(self, func, iterable, chunksize=None): ''' Apply `func` to each element in `iterable`, collecting the results in a list that is returned. ''' return self._map_async(func, iterable, mapstar, chunksize).get() def starmap(self, func, iterable, chunksize=None): ''' Like `map()` method but the elements of the `iterable` are expected to be iterables as well and will be unpacked as arguments. Hence `func` and (a, b) becomes func(a, b). ''' return self._map_async(func, iterable, starmapstar, chunksize).get() def starmap_async(self, func, iterable, chunksize=None, callback=None, error_callback=None): ''' Asynchronous version of `starmap()` method. ''' return self._map_async(func, iterable, starmapstar, chunksize, callback, error_callback) def imap(self, func, iterable, chunksize=1): ''' Equivalent of `map()` -- can be MUCH slower than `Pool.map()`. ''' if self._state != RUN: raise ValueError("Pool not running") if chunksize == 1: result = IMapIterator(self._cache) self._taskqueue.put((((result._job, i, func, (x,), {}) for i, x in enumerate(iterable)), result._set_length)) return result else: assert chunksize > 1 task_batches = Pool._get_tasks(func, iterable, chunksize) result = IMapIterator(self._cache) self._taskqueue.put((((result._job, i, mapstar, (x,), {}) for i, x in enumerate(task_batches)), result._set_length)) return (item for chunk in result for item in chunk) def imap_unordered(self, func, iterable, chunksize=1): ''' Like `imap()` method but ordering of results is arbitrary. ''' if self._state != RUN: raise ValueError("Pool not running") if chunksize == 1: result = IMapUnorderedIterator(self._cache) self._taskqueue.put((((result._job, i, func, (x,), {}) for i, x in enumerate(iterable)), result._set_length)) return result else: assert chunksize > 1 task_batches = Pool._get_tasks(func, iterable, chunksize) result = IMapUnorderedIterator(self._cache) self._taskqueue.put((((result._job, i, mapstar, (x,), {}) for i, x in enumerate(task_batches)), result._set_length)) return (item for chunk in result for item in chunk) def apply_async(self, func, args=(), kwds={}, callback=None, error_callback=None): ''' Asynchronous version of `apply()` method. ''' if self._state != RUN: raise ValueError("Pool not running") result = ApplyResult(self._cache, callback, error_callback) self._taskqueue.put(([(result._job, None, func, args, kwds)], None)) return result def map_async(self, func, iterable, chunksize=None, callback=None, error_callback=None): ''' Asynchronous version of `map()` method. ''' return self._map_async(func, iterable, mapstar, chunksize, callback, error_callback) def _map_async(self, func, iterable, mapper, chunksize=None, callback=None, error_callback=None): ''' Helper function to implement map, starmap and their async counterparts. ''' if self._state != RUN: raise ValueError("Pool not running") if not hasattr(iterable, '__len__'): iterable = list(iterable) if chunksize is None: chunksize, extra = divmod(len(iterable), len(self._pool) * 4) if extra: chunksize += 1 if len(iterable) == 0: chunksize = 0 task_batches = Pool._get_tasks(func, iterable, chunksize) result = MapResult(self._cache, chunksize, len(iterable), callback, error_callback=error_callback) self._taskqueue.put((((result._job, i, mapper, (x,), {}) for i, x in enumerate(task_batches)), None)) return result @staticmethod def _handle_workers(pool): thread = threading.current_thread() # Keep maintaining workers until the cache gets drained, unless the pool # is terminated. while thread._state == RUN or (pool._cache and thread._state != TERMINATE): pool._maintain_pool() time.sleep(0.1) # send sentinel to stop workers pool._taskqueue.put(None) util.debug('worker handler exiting') @staticmethod def _handle_tasks(taskqueue, put, outqueue, pool, cache): thread = threading.current_thread() for taskseq, set_length in iter(taskqueue.get, None): i = -1 for i, task in enumerate(taskseq): if thread._state: util.debug('task handler found thread._state != RUN') break try: put(task) except Exception as e: job, ind = task[:2] try: cache[job]._set(ind, (False, e)) except KeyError: pass else: if set_length: util.debug('doing set_length()') set_length(i+1) continue break else: util.debug('task handler got sentinel') try: # tell result handler to finish when cache is empty util.debug('task handler sending sentinel to result handler') outqueue.put(None) # tell workers there is no more work util.debug('task handler sending sentinel to workers') for p in pool: put(None) except OSError: util.debug('task handler got OSError when sending sentinels') util.debug('task handler exiting') @staticmethod def _handle_results(outqueue, get, cache): thread = threading.current_thread() while 1: try: task = get() except (OSError, EOFError): util.debug('result handler got EOFError/OSError -- exiting') return if thread._state: assert thread._state == TERMINATE util.debug('result handler found thread._state=TERMINATE') break if task is None: util.debug('result handler got sentinel') break job, i, obj = task try: cache[job]._set(i, obj) except KeyError: pass while cache and thread._state != TERMINATE: try: task = get() except (OSError, EOFError): util.debug('result handler got EOFError/OSError -- exiting') return if task is None: util.debug('result handler ignoring extra sentinel') continue job, i, obj = task try: cache[job]._set(i, obj) except KeyError: pass if hasattr(outqueue, '_reader'): util.debug('ensuring that outqueue is not full') # If we don't make room available in outqueue then # attempts to add the sentinel (None) to outqueue may # block. There is guaranteed to be no more than 2 sentinels. try: for i in range(10): if not outqueue._reader.poll(): break get() except (OSError, EOFError): pass util.debug('result handler exiting: len(cache)=%s, thread._state=%s', len(cache), thread._state) @staticmethod def _get_tasks(func, it, size): it = iter(it) while 1: x = tuple(itertools.islice(it, size)) if not x: return yield (func, x) def __reduce__(self): raise NotImplementedError( 'pool objects cannot be passed between processes or pickled' ) def close(self): util.debug('closing pool') if self._state == RUN: self._state = CLOSE self._worker_handler._state = CLOSE def terminate(self): util.debug('terminating pool') self._state = TERMINATE self._worker_handler._state = TERMINATE self._terminate() def join(self): util.debug('joining pool') assert self._state in (CLOSE, TERMINATE) self._worker_handler.join() self._task_handler.join() self._result_handler.join() for p in self._pool: p.join() @staticmethod def _help_stuff_finish(inqueue, task_handler, size): # task_handler may be blocked trying to put items on inqueue util.debug('removing tasks from inqueue until task handler finished') inqueue._rlock.acquire() while task_handler.is_alive() and inqueue._reader.poll(): inqueue._reader.recv() time.sleep(0) @classmethod def _terminate_pool(cls, taskqueue, inqueue, outqueue, pool, worker_handler, task_handler, result_handler, cache): # this is guaranteed to only be called once util.debug('finalizing pool') worker_handler._state = TERMINATE task_handler._state = TERMINATE util.debug('helping task handler/workers to finish') cls._help_stuff_finish(inqueue, task_handler, len(pool)) assert result_handler.is_alive() or len(cache) == 0 result_handler._state = TERMINATE outqueue.put(None) # sentinel # We must wait for the worker handler to exit before terminating # workers because we don't want workers to be restarted behind our back. util.debug('joining worker handler') if threading.current_thread() is not worker_handler: worker_handler.join() # Terminate workers which haven't already finished. if pool and hasattr(pool[0], 'terminate'): util.debug('terminating workers') for p in pool: if p.exitcode is None: p.terminate() util.debug('joining task handler') if threading.current_thread() is not task_handler: task_handler.join() util.debug('joining result handler') if threading.current_thread() is not result_handler: result_handler.join() if pool and hasattr(pool[0], 'terminate'): util.debug('joining pool workers') for p in pool: if p.is_alive(): # worker has not yet exited util.debug('cleaning up worker %d' % p.pid) p.join() def __enter__(self): return self def __exit__(self, exc_type, exc_val, exc_tb): self.terminate() # # Class whose instances are returned by `Pool.apply_async()` # class ApplyResult(object): def __init__(self, cache, callback, error_callback): self._event = threading.Event() self._job = next(job_counter) self._cache = cache self._callback = callback self._error_callback = error_callback cache[self._job] = self def ready(self): return self._event.is_set() def successful(self): assert self.ready() return self._success def wait(self, timeout=None): self._event.wait(timeout) def get(self, timeout=None): self.wait(timeout) if not self.ready(): raise TimeoutError if self._success: return self._value else: raise self._value def _set(self, i, obj): self._success, self._value = obj if self._callback and self._success: self._callback(self._value) if self._error_callback and not self._success: self._error_callback(self._value) self._event.set() del self._cache[self._job] AsyncResult = ApplyResult # create alias -- see #17805 # # Class whose instances are returned by `Pool.map_async()` # class MapResult(ApplyResult): def __init__(self, cache, chunksize, length, callback, error_callback): ApplyResult.__init__(self, cache, callback, error_callback=error_callback) self._success = True self._value = [None] * length self._chunksize = chunksize if chunksize <= 0: self._number_left = 0 self._event.set() del cache[self._job] else: self._number_left = length//chunksize + bool(length % chunksize) def _set(self, i, success_result): success, result = success_result if success: self._value[i*self._chunksize:(i+1)*self._chunksize] = result self._number_left -= 1 if self._number_left == 0: if self._callback: self._callback(self._value) del self._cache[self._job] self._event.set() else: self._success = False self._value = result if self._error_callback: self._error_callback(self._value) del self._cache[self._job] self._event.set() # # Class whose instances are returned by `Pool.imap()` # class IMapIterator(object): def __init__(self, cache): self._cond = threading.Condition(threading.Lock()) self._job = next(job_counter) self._cache = cache self._items = collections.deque() self._index = 0 self._length = None self._unsorted = {} cache[self._job] = self def __iter__(self): return self def next(self, timeout=None): self._cond.acquire() try: try: item = self._items.popleft() except IndexError: if self._index == self._length: raise StopIteration self._cond.wait(timeout) try: item = self._items.popleft() except IndexError: if self._index == self._length: raise StopIteration raise TimeoutError finally: self._cond.release() success, value = item if success: return value raise value __next__ = next # XXX def _set(self, i, obj): self._cond.acquire() try: if self._index == i: self._items.append(obj) self._index += 1 while self._index in self._unsorted: obj = self._unsorted.pop(self._index) self._items.append(obj) self._index += 1 self._cond.notify() else: self._unsorted[i] = obj if self._index == self._length: del self._cache[self._job] finally: self._cond.release() def _set_length(self, length): self._cond.acquire() try: self._length = length if self._index == self._length: self._cond.notify() del self._cache[self._job] finally: self._cond.release() # # Class whose instances are returned by `Pool.imap_unordered()` # class IMapUnorderedIterator(IMapIterator): def _set(self, i, obj): self._cond.acquire() try: self._items.append(obj) self._index += 1 self._cond.notify() if self._index == self._length: del self._cache[self._job] finally: self._cond.release() # # # class ThreadPool(Pool): _wrap_exception = False @staticmethod def Process(*args, **kwds): from .dummy import Process return Process(*args, **kwds) def __init__(self, processes=None, initializer=None, initargs=()): Pool.__init__(self, processes, initializer, initargs) def _setup_queues(self): self._inqueue = queue.Queue() self._outqueue = queue.Queue() self._quick_put = self._inqueue.put self._quick_get = self._outqueue.get @staticmethod def _help_stuff_finish(inqueue, task_handler, size): # put sentinels at head of inqueue to make workers finish inqueue.not_empty.acquire() try: inqueue.queue.clear() inqueue.queue.extend([None] * size) inqueue.not_empty.notify_all() finally: inqueue.not_empty.release()
lgpl-3.0
ThinkingBridge/platform_external_chromium_org
content/browser/gpu/generate_webgl_conformance_test_list.py
38
3326
#!/usr/bin/env python # Copyright (c) 2012 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. """Auto-generates the WebGL conformance test list header file. Parses the WebGL conformance test *.txt file, which contains a list of URLs for individual conformance tests (each on a new line). It recursively parses *.txt files. For each test URL, the matching gtest call is created and sent to the C++ header file. """ import getopt import os import re import sys COPYRIGHT = """\ // Copyright (c) 2012 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. """ WARNING = """\ // DO NOT EDIT! This file is auto-generated by // generate_webgl_conformance_test_list.py // It is included by webgl_conformance_test.cc """ HEADER_GUARD = """\ #ifndef CONTENT_TEST_GPU_WEBGL_CONFORMANCE_TEST_LIST_AUTOGEN_H_ #define CONTENT_TEST_GPU_WEBGL_CONFORMANCE_TEST_LIST_AUTOGEN_H_ """ HEADER_GUARD_END = """ #endif // CONTENT_TEST_GPU_WEBGL_CONFORMANCE_TEST_LIST_AUTOGEN_H_ """ # Assume this script is run from the src/content/test/gpu directory. INPUT_DIR = "../../../third_party/webgl_conformance" INPUT_FILE = "00_test_list.txt" OUTPUT_FILE = "webgl_conformance_test_list_autogen.h" def main(argv): """Main function for the WebGL conformance test list generator. """ if not os.path.exists(os.path.join(INPUT_DIR, INPUT_FILE)): print >> sys.stderr, "ERROR: WebGL conformance tests do not exist." print >> sys.stderr, "Run the script from the directory containing it." return 1 output = open(OUTPUT_FILE, "w") output.write(COPYRIGHT) output.write(WARNING) output.write(HEADER_GUARD) test_prefix = {} unparsed_files = [INPUT_FILE] while unparsed_files: filename = unparsed_files.pop(0) try: input = open(os.path.join(INPUT_DIR, filename)) except IOError: print >> sys.stderr, "WARNING: %s does not exist (skipped)." % filename continue for url in input: url = re.sub("//.*", "", url) url = re.sub("#.*", "", url) url = url.strip() # Some filename has options before them, for example, # --min-version 1.0.2 testname.html pos = url.rfind(" ") if pos != -1: url = url[pos+1:] if not url: continue # Cannot use os.path.join() because Windows with use "\\" but this path # is sent through javascript. if os.path.dirname(filename): url = "%s/%s" % (os.path.dirname(filename), url) # Queue all text files for parsing, because test list URLs are nested # through .txt files. if re.match(".+\.txt\s*$", url): unparsed_files.append(url) # Convert the filename to a valid test name and output the gtest code. else: name = os.path.splitext(url)[0] name = re.sub("\W+", "_", name) if os.path.exists(os.path.join(INPUT_DIR, url)): output.write('CONFORMANCE_TEST(%s,\n "%s");\n' % (name, url)) else: print >> sys.stderr, "WARNING: %s does not exist (skipped)." % url input.close() output.write(HEADER_GUARD_END) output.close() return 0 if __name__ == "__main__": sys.exit(main(sys.argv[1:]))
bsd-3-clause
toanalien/phantomjs
src/qt/qtwebkit/Tools/Scripts/webkitpy/thirdparty/mod_pywebsocket/__init__.py
129
7351
# Copyright 2011, Google Inc. # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above # copyright notice, this list of conditions and the following disclaimer # in the documentation and/or other materials provided with the # distribution. # * Neither the name of Google Inc. nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. """WebSocket extension for Apache HTTP Server. mod_pywebsocket is a WebSocket extension for Apache HTTP Server intended for testing or experimental purposes. mod_python is required. Installation ============ 0. Prepare an Apache HTTP Server for which mod_python is enabled. 1. Specify the following Apache HTTP Server directives to suit your configuration. If mod_pywebsocket is not in the Python path, specify the following. <websock_lib> is the directory where mod_pywebsocket is installed. PythonPath "sys.path+['<websock_lib>']" Always specify the following. <websock_handlers> is the directory where user-written WebSocket handlers are placed. PythonOption mod_pywebsocket.handler_root <websock_handlers> PythonHeaderParserHandler mod_pywebsocket.headerparserhandler To limit the search for WebSocket handlers to a directory <scan_dir> under <websock_handlers>, configure as follows: PythonOption mod_pywebsocket.handler_scan <scan_dir> <scan_dir> is useful in saving scan time when <websock_handlers> contains many non-WebSocket handler files. If you want to allow handlers whose canonical path is not under the root directory (i.e. symbolic link is in root directory but its target is not), configure as follows: PythonOption mod_pywebsocket.allow_handlers_outside_root_dir On Example snippet of httpd.conf: (mod_pywebsocket is in /websock_lib, WebSocket handlers are in /websock_handlers, port is 80 for ws, 443 for wss.) <IfModule python_module> PythonPath "sys.path+['/websock_lib']" PythonOption mod_pywebsocket.handler_root /websock_handlers PythonHeaderParserHandler mod_pywebsocket.headerparserhandler </IfModule> 2. Tune Apache parameters for serving WebSocket. We'd like to note that at least TimeOut directive from core features and RequestReadTimeout directive from mod_reqtimeout should be modified not to kill connections in only a few seconds of idle time. 3. Verify installation. You can use example/console.html to poke the server. Writing WebSocket handlers ========================== When a WebSocket request comes in, the resource name specified in the handshake is considered as if it is a file path under <websock_handlers> and the handler defined in <websock_handlers>/<resource_name>_wsh.py is invoked. For example, if the resource name is /example/chat, the handler defined in <websock_handlers>/example/chat_wsh.py is invoked. A WebSocket handler is composed of the following three functions: web_socket_do_extra_handshake(request) web_socket_transfer_data(request) web_socket_passive_closing_handshake(request) where: request: mod_python request. web_socket_do_extra_handshake is called during the handshake after the headers are successfully parsed and WebSocket properties (ws_location, ws_origin, and ws_resource) are added to request. A handler can reject the request by raising an exception. A request object has the following properties that you can use during the extra handshake (web_socket_do_extra_handshake): - ws_resource - ws_origin - ws_version - ws_location (HyBi 00 only) - ws_extensions (HyBi 06 and later) - ws_deflate (HyBi 06 and later) - ws_protocol - ws_requested_protocols (HyBi 06 and later) The last two are a bit tricky. See the next subsection. Subprotocol Negotiation ----------------------- For HyBi 06 and later, ws_protocol is always set to None when web_socket_do_extra_handshake is called. If ws_requested_protocols is not None, you must choose one subprotocol from this list and set it to ws_protocol. For HyBi 00, when web_socket_do_extra_handshake is called, ws_protocol is set to the value given by the client in Sec-WebSocket-Protocol header or None if such header was not found in the opening handshake request. Finish extra handshake with ws_protocol untouched to accept the request subprotocol. Then, Sec-WebSocket-Protocol header will be sent to the client in response with the same value as requested. Raise an exception in web_socket_do_extra_handshake to reject the requested subprotocol. Data Transfer ------------- web_socket_transfer_data is called after the handshake completed successfully. A handler can receive/send messages from/to the client using request. mod_pywebsocket.msgutil module provides utilities for data transfer. You can receive a message by the following statement. message = request.ws_stream.receive_message() This call blocks until any complete text frame arrives, and the payload data of the incoming frame will be stored into message. When you're using IETF HyBi 00 or later protocol, receive_message() will return None on receiving client-initiated closing handshake. When any error occurs, receive_message() will raise some exception. You can send a message by the following statement. request.ws_stream.send_message(message) Closing Connection ------------------ Executing the following statement or just return-ing from web_socket_transfer_data cause connection close. request.ws_stream.close_connection() close_connection will wait for closing handshake acknowledgement coming from the client. When it couldn't receive a valid acknowledgement, raises an exception. web_socket_passive_closing_handshake is called after the server receives incoming closing frame from the client peer immediately. You can specify code and reason by return values. They are sent as a outgoing closing frame from the server. A request object has the following properties that you can use in web_socket_passive_closing_handshake. - ws_close_code - ws_close_reason Threading --------- A WebSocket handler must be thread-safe if the server (Apache or standalone.py) is configured to use threads. """ # vi:sts=4 sw=4 et tw=72
bsd-3-clause
macks22/scikit-learn
sklearn/externals/joblib/_memory_helpers.py
303
3605
try: # Available in Python 3 from tokenize import open as open_py_source except ImportError: # Copied from python3 tokenize from codecs import lookup, BOM_UTF8 import re from io import TextIOWrapper, open cookie_re = re.compile("coding[:=]\s*([-\w.]+)") def _get_normal_name(orig_enc): """Imitates get_normal_name in tokenizer.c.""" # Only care about the first 12 characters. enc = orig_enc[:12].lower().replace("_", "-") if enc == "utf-8" or enc.startswith("utf-8-"): return "utf-8" if enc in ("latin-1", "iso-8859-1", "iso-latin-1") or \ enc.startswith(("latin-1-", "iso-8859-1-", "iso-latin-1-")): return "iso-8859-1" return orig_enc def _detect_encoding(readline): """ The detect_encoding() function is used to detect the encoding that should be used to decode a Python source file. It requires one argment, readline, in the same way as the tokenize() generator. It will call readline a maximum of twice, and return the encoding used (as a string) and a list of any lines (left as bytes) it has read in. It detects the encoding from the presence of a utf-8 bom or an encoding cookie as specified in pep-0263. If both a bom and a cookie are present, but disagree, a SyntaxError will be raised. If the encoding cookie is an invalid charset, raise a SyntaxError. Note that if a utf-8 bom is found, 'utf-8-sig' is returned. If no encoding is specified, then the default of 'utf-8' will be returned. """ bom_found = False encoding = None default = 'utf-8' def read_or_stop(): try: return readline() except StopIteration: return b'' def find_cookie(line): try: line_string = line.decode('ascii') except UnicodeDecodeError: return None matches = cookie_re.findall(line_string) if not matches: return None encoding = _get_normal_name(matches[0]) try: codec = lookup(encoding) except LookupError: # This behaviour mimics the Python interpreter raise SyntaxError("unknown encoding: " + encoding) if bom_found: if codec.name != 'utf-8': # This behaviour mimics the Python interpreter raise SyntaxError('encoding problem: utf-8') encoding += '-sig' return encoding first = read_or_stop() if first.startswith(BOM_UTF8): bom_found = True first = first[3:] default = 'utf-8-sig' if not first: return default, [] encoding = find_cookie(first) if encoding: return encoding, [first] second = read_or_stop() if not second: return default, [first] encoding = find_cookie(second) if encoding: return encoding, [first, second] return default, [first, second] def open_py_source(filename): """Open a file in read only mode using the encoding detected by detect_encoding(). """ buffer = open(filename, 'rb') encoding, lines = _detect_encoding(buffer.readline) buffer.seek(0) text = TextIOWrapper(buffer, encoding, line_buffering=True) text.mode = 'r' return text
bsd-3-clause
atsao72/sympy
sympy/ntheory/egyptian_fraction.py
63
6082
from __future__ import print_function, division import sympy.polys from sympy import Integer from sympy.core.compatibility import range from fractions import gcd def egyptian_fraction(r, algorithm="Greedy"): """ Return the list of denominators of an Egyptian fraction expansion [1]_ of the said rational `r`. Parameters ========== r : Rational a positive rational number. algorithm : { "Greedy", "Graham Jewett", "Takenouchi", "Golomb" }, optional Denotes the algorithm to be used (the default is "Greedy"). Examples ======== >>> from sympy import Rational >>> from sympy.ntheory.egyptian_fraction import egyptian_fraction >>> egyptian_fraction(Rational(3, 7)) [3, 11, 231] >>> egyptian_fraction(Rational(3, 7), "Graham Jewett") [7, 8, 9, 56, 57, 72, 3192] >>> egyptian_fraction(Rational(3, 7), "Takenouchi") [4, 7, 28] >>> egyptian_fraction(Rational(3, 7), "Golomb") [3, 15, 35] >>> egyptian_fraction(Rational(11, 5), "Golomb") [1, 2, 3, 4, 9, 234, 1118, 2580] See Also ======== sympy.core.numbers.Rational Notes ===== Currently the following algorithms are supported: 1) Greedy Algorithm Also called the Fibonacci-Sylvester algorithm [2]_. At each step, extract the largest unit fraction less than the target and replace the target with the remainder. It has some distinct properties: a) Given `p/q` in lowest terms, generates an expansion of maximum length `p`. Even as the numerators get large, the number of terms is seldom more than a handful. b) Uses minimal memory. c) The terms can blow up (standard examples of this are 5/121 and 31/311). The denominator is at most squared at each step (doubly-exponential growth) and typically exhibits singly-exponential growth. 2) Graham Jewett Algorithm The algorithm suggested by the result of Graham and Jewett. Note that this has a tendency to blow up: the length of the resulting expansion is always ``2**(x/gcd(x, y)) - 1``. See [3]_. 3) Takenouchi Algorithm The algorithm suggested by Takenouchi (1921). Differs from the Graham-Jewett algorithm only in the handling of duplicates. See [3]_. 4) Golomb's Algorithm A method given by Golumb (1962), using modular arithmetic and inverses. It yields the same results as a method using continued fractions proposed by Bleicher (1972). See [4]_. If the given rational is greater than or equal to 1, a greedy algorithm of summing the harmonic sequence 1/1 + 1/2 + 1/3 + ... is used, taking all the unit fractions of this sequence until adding one more would be greater than the given number. This list of denominators is prefixed to the result from the requested algorithm used on the remainder. For example, if r is 8/3, using the Greedy algorithm, we get [1, 2, 3, 4, 5, 6, 7, 14, 420], where the beginning of the sequence, [1, 2, 3, 4, 5, 6, 7] is part of the harmonic sequence summing to 363/140, leaving a remainder of 31/420, which yields [14, 420] by the Greedy algorithm. The result of egyptian_fraction(Rational(8, 3), "Golomb") is [1, 2, 3, 4, 5, 6, 7, 14, 574, 2788, 6460, 11590, 33062, 113820], and so on. References ========== .. [1] http://en.wikipedia.org/wiki/Egyptian_fraction .. [2] https://en.wikipedia.org/wiki/Greedy_algorithm_for_Egyptian_fractions .. [3] http://www.ics.uci.edu/~eppstein/numth/egypt/conflict.html .. [4] http://ami.ektf.hu/uploads/papers/finalpdf/AMI_42_from129to134.pdf """ if r <= 0: raise ValueError("Value must be positive") prefix, rem = egypt_harmonic(r) if rem == 0: return prefix x, y = rem.as_numer_denom() if algorithm == "Greedy": return prefix + egypt_greedy(x, y) elif algorithm == "Graham Jewett": return prefix + egypt_graham_jewett(x, y) elif algorithm == "Takenouchi": return prefix + egypt_takenouchi(x, y) elif algorithm == "Golomb": return prefix + egypt_golomb(x, y) else: raise ValueError("Entered invalid algorithm") def egypt_greedy(x, y): if x == 1: return [y] else: a = (-y) % (x) b = y*(y//x + 1) c = gcd(a, b) if c > 1: num, denom = a//c, b//c else: num, denom = a, b return [y//x + 1] + egypt_greedy(num, denom) def egypt_graham_jewett(x, y): l = [y] * x # l is now a list of integers whose reciprocals sum to x/y. # we shall now proceed to manipulate the elements of l without # changing the reciprocated sum until all elements are unique. while len(l) != len(set(l)): l.sort() # so the list has duplicates. find a smallest pair for i in range(len(l) - 1): if l[i] == l[i + 1]: break # we have now identified a pair of identical # elements: l[i] and l[i + 1]. # now comes the application of the result of graham and jewett: l[i + 1] = l[i] + 1 # and we just iterate that until the list has no duplicates. l.append(l[i]*(l[i] + 1)) return sorted(l) def egypt_takenouchi(x, y): l = [y] * x while len(l) != len(set(l)): l.sort() for i in range(len(l) - 1): if l[i] == l[i + 1]: break k = l[i] if k % 2 == 0: l[i] = l[i] // 2 del l[i + 1] else: l[i], l[i + 1] = (k + 1)//2, k*(k + 1)//2 return sorted(l) def egypt_golomb(x, y): if x == 1: return [y] xp = sympy.polys.ZZ.invert(int(x), int(y)) rv = [Integer(xp*y)] rv.extend(egypt_golomb((x*xp - 1)//y, xp)) return sorted(rv) def egypt_harmonic(r): rv = [] d = Integer(1) acc = Integer(0) while acc + 1/d <= r: acc += 1/d rv.append(d) d += 1 return (rv, r - acc)
bsd-3-clause
Abi1ity/uniclust2.0
flask/lib/python2.7/site-packages/sqlalchemy/util/queue.py
18
7472
# util/queue.py # Copyright (C) 2005-2012 the SQLAlchemy authors and contributors <see AUTHORS file> # # This module is part of SQLAlchemy and is released under # the MIT License: http://www.opensource.org/licenses/mit-license.php """An adaptation of Py2.3/2.4's Queue module which supports reentrant behavior, using RLock instead of Lock for its mutex object. This is to support the connection pool's usage of weakref callbacks to return connections to the underlying Queue, which can in extremely rare cases be invoked within the ``get()`` method of the Queue itself, producing a ``put()`` inside the ``get()`` and therefore a reentrant condition.""" from collections import deque from time import time as _time from sqlalchemy.util import threading import sys if sys.version_info < (2, 6): def notify_all(condition): condition.notify() else: def notify_all(condition): condition.notify_all() __all__ = ['Empty', 'Full', 'Queue'] class Empty(Exception): "Exception raised by Queue.get(block=0)/get_nowait()." pass class Full(Exception): "Exception raised by Queue.put(block=0)/put_nowait()." pass class SAAbort(Exception): "Special SQLA exception to abort waiting" def __init__(self, context): self.context = context class Queue: def __init__(self, maxsize=0): """Initialize a queue object with a given maximum size. If `maxsize` is <= 0, the queue size is infinite. """ self._init(maxsize) # mutex must be held whenever the queue is mutating. All methods # that acquire mutex must release it before returning. mutex # is shared between the two conditions, so acquiring and # releasing the conditions also acquires and releases mutex. self.mutex = threading.RLock() # Notify not_empty whenever an item is added to the queue; a # thread waiting to get is notified then. self.not_empty = threading.Condition(self.mutex) # Notify not_full whenever an item is removed from the queue; # a thread waiting to put is notified then. self.not_full = threading.Condition(self.mutex) # when this is set, SAAbort is raised within get(). self._sqla_abort_context = False def qsize(self): """Return the approximate size of the queue (not reliable!).""" self.mutex.acquire() n = self._qsize() self.mutex.release() return n def empty(self): """Return True if the queue is empty, False otherwise (not reliable!).""" self.mutex.acquire() n = self._empty() self.mutex.release() return n def full(self): """Return True if the queue is full, False otherwise (not reliable!).""" self.mutex.acquire() n = self._full() self.mutex.release() return n def put(self, item, block=True, timeout=None): """Put an item into the queue. If optional args `block` is True and `timeout` is None (the default), block if necessary until a free slot is available. If `timeout` is a positive number, it blocks at most `timeout` seconds and raises the ``Full`` exception if no free slot was available within that time. Otherwise (`block` is false), put an item on the queue if a free slot is immediately available, else raise the ``Full`` exception (`timeout` is ignored in that case). """ self.not_full.acquire() try: if not block: if self._full(): raise Full elif timeout is None: while self._full(): self.not_full.wait() else: if timeout < 0: raise ValueError("'timeout' must be a positive number") endtime = _time() + timeout while self._full(): remaining = endtime - _time() if remaining <= 0.0: raise Full self.not_full.wait(remaining) self._put(item) self.not_empty.notify() finally: self.not_full.release() def put_nowait(self, item): """Put an item into the queue without blocking. Only enqueue the item if a free slot is immediately available. Otherwise raise the ``Full`` exception. """ return self.put(item, False) def get(self, block=True, timeout=None): """Remove and return an item from the queue. If optional args `block` is True and `timeout` is None (the default), block if necessary until an item is available. If `timeout` is a positive number, it blocks at most `timeout` seconds and raises the ``Empty`` exception if no item was available within that time. Otherwise (`block` is false), return an item if one is immediately available, else raise the ``Empty`` exception (`timeout` is ignored in that case). """ self.not_empty.acquire() try: if not block: if self._empty(): raise Empty elif timeout is None: while self._empty(): self.not_empty.wait() if self._sqla_abort_context: raise SAAbort(self._sqla_abort_context) else: if timeout < 0: raise ValueError("'timeout' must be a positive number") endtime = _time() + timeout while self._empty(): remaining = endtime - _time() if remaining <= 0.0: raise Empty self.not_empty.wait(remaining) if self._sqla_abort_context: raise SAAbort(self._sqla_abort_context) item = self._get() self.not_full.notify() return item finally: self.not_empty.release() def abort(self, context): """Issue an 'abort', will force any thread waiting on get() to stop waiting and raise SAAbort. """ self._sqla_abort_context = context if not self.not_full.acquire(False): return try: notify_all(self.not_empty) finally: self.not_full.release() def get_nowait(self): """Remove and return an item from the queue without blocking. Only get an item if one is immediately available. Otherwise raise the ``Empty`` exception. """ return self.get(False) # Override these methods to implement other queue organizations # (e.g. stack or priority queue). # These will only be called with appropriate locks held # Initialize the queue representation def _init(self, maxsize): self.maxsize = maxsize self.queue = deque() def _qsize(self): return len(self.queue) # Check whether the queue is empty def _empty(self): return not self.queue # Check whether the queue is full def _full(self): return self.maxsize > 0 and len(self.queue) == self.maxsize # Put a new item in the queue def _put(self, item): self.queue.append(item) # Get an item from the queue def _get(self): return self.queue.popleft()
bsd-3-clause
NeCTAR-RC/python-neutronclient
neutronclient/neutron/v2_0/subnetpool.py
2
5010
# Copyright 2015 OpenStack Foundation. # All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # from neutronclient.i18n import _ from neutronclient.neutron import v2_0 as neutronV20 def add_updatable_arguments(parser): parser.add_argument( '--min-prefixlen', type=int, help=_('Subnetpool minimum prefix length.')) parser.add_argument( '--max-prefixlen', type=int, help=_('Subnetpool maximum prefix length.')) parser.add_argument( '--default-prefixlen', type=int, help=_('Subnetpool default prefix length.')) parser.add_argument( '--pool-prefix', action='append', dest='prefixes', help=_('Subnetpool prefixes (This option can be repeated).')) def updatable_args2body(parsed_args, body, for_create=True): neutronV20.update_dict(parsed_args, body['subnetpool'], ['name', 'prefixes', 'default_prefixlen', 'min_prefixlen', 'max_prefixlen']) class ListSubnetPool(neutronV20.ListCommand): """List subnetpools that belong to a given tenant.""" resource = 'subnetpool' list_columns = ['id', 'name', 'prefixes', 'default_prefixlen', 'address-scope'] pagination_support = True sorting_support = True class ShowSubnetPool(neutronV20.ShowCommand): """Show information of a given subnetpool.""" resource = 'subnetpool' class CreateSubnetPool(neutronV20.CreateCommand): """Create a subnetpool for a given tenant.""" resource = 'subnetpool' def add_known_arguments(self, parser): add_updatable_arguments(parser) parser.add_argument( '--shared', action='store_true', help=_('Set the subnetpool as shared.')) parser.add_argument( 'name', help=_('Name of subnetpool to create.')) parser.add_argument( '--address-scope', metavar='ADDRSCOPE', help=_('ID or name of the address scope with which the subnetpool ' 'is associated. Prefixes must be unique across address ' 'scopes')) def args2body(self, parsed_args): body = {'subnetpool': {'prefixes': parsed_args.prefixes}} updatable_args2body(parsed_args, body) if parsed_args.shared: body['subnetpool']['shared'] = True # Parse and update for "address-scope" option if parsed_args.address_scope: _addrscope_id = neutronV20.find_resourceid_by_name_or_id( self.get_client(), 'address-scope', parsed_args.address_scope) body['subnetpool']['address_scope_id'] = _addrscope_id return body class DeleteSubnetPool(neutronV20.DeleteCommand): """Delete a given subnetpool.""" resource = 'subnetpool' class UpdateSubnetPool(neutronV20.UpdateCommand): """Update subnetpool's information.""" resource = 'subnetpool' def add_known_arguments(self, parser): add_updatable_arguments(parser) parser.add_argument('--name', help=_('Name of subnetpool to update.')) addrscope_args = parser.add_mutually_exclusive_group() addrscope_args.add_argument('--address-scope', metavar='ADDRSCOPE', help=_('ID or name of the address scope ' 'with which the subnetpool is ' 'associated. Prefixes must be ' 'unique across address scopes')) addrscope_args.add_argument('--no-address-scope', action='store_true', help=_('Detach subnetpool from the ' 'address scope')) def args2body(self, parsed_args): body = {'subnetpool': {}} updatable_args2body(parsed_args, body, for_create=False) # Parse and update for "address-scope" option/s if parsed_args.no_address_scope: body['subnetpool']['address_scope_id'] = None elif parsed_args.address_scope: _addrscope_id = neutronV20.find_resourceid_by_name_or_id( self.get_client(), 'address-scope', parsed_args.address_scope) body['subnetpool']['address_scope_id'] = _addrscope_id return body
apache-2.0
nichung/wwwflaskBlogrevA
venv/lib/python2.7/site-packages/flask/testsuite/__init__.py
564
7022
# -*- coding: utf-8 -*- """ flask.testsuite ~~~~~~~~~~~~~~~ Tests Flask itself. The majority of Flask is already tested as part of Werkzeug. :copyright: (c) 2011 by Armin Ronacher. :license: BSD, see LICENSE for more details. """ from __future__ import print_function import os import sys import flask import warnings import unittest from functools import update_wrapper from contextlib import contextmanager from werkzeug.utils import import_string, find_modules from flask._compat import reraise, StringIO def add_to_path(path): """Adds an entry to sys.path if it's not already there. This does not append it but moves it to the front so that we can be sure it is loaded. """ if not os.path.isdir(path): raise RuntimeError('Tried to add nonexisting path') def _samefile(x, y): if x == y: return True try: return os.path.samefile(x, y) except (IOError, OSError, AttributeError): # Windows has no samefile return False sys.path[:] = [x for x in sys.path if not _samefile(path, x)] sys.path.insert(0, path) def iter_suites(): """Yields all testsuites.""" for module in find_modules(__name__): mod = import_string(module) if hasattr(mod, 'suite'): yield mod.suite() def find_all_tests(suite): """Yields all the tests and their names from a given suite.""" suites = [suite] while suites: s = suites.pop() try: suites.extend(s) except TypeError: yield s, '%s.%s.%s' % ( s.__class__.__module__, s.__class__.__name__, s._testMethodName ) @contextmanager def catch_warnings(): """Catch warnings in a with block in a list""" # make sure deprecation warnings are active in tests warnings.simplefilter('default', category=DeprecationWarning) filters = warnings.filters warnings.filters = filters[:] old_showwarning = warnings.showwarning log = [] def showwarning(message, category, filename, lineno, file=None, line=None): log.append(locals()) try: warnings.showwarning = showwarning yield log finally: warnings.filters = filters warnings.showwarning = old_showwarning @contextmanager def catch_stderr(): """Catch stderr in a StringIO""" old_stderr = sys.stderr sys.stderr = rv = StringIO() try: yield rv finally: sys.stderr = old_stderr def emits_module_deprecation_warning(f): def new_f(self, *args, **kwargs): with catch_warnings() as log: f(self, *args, **kwargs) self.assert_true(log, 'expected deprecation warning') for entry in log: self.assert_in('Modules are deprecated', str(entry['message'])) return update_wrapper(new_f, f) class FlaskTestCase(unittest.TestCase): """Baseclass for all the tests that Flask uses. Use these methods for testing instead of the camelcased ones in the baseclass for consistency. """ def ensure_clean_request_context(self): # make sure we're not leaking a request context since we are # testing flask internally in debug mode in a few cases leaks = [] while flask._request_ctx_stack.top is not None: leaks.append(flask._request_ctx_stack.pop()) self.assert_equal(leaks, []) def setup(self): pass def teardown(self): pass def setUp(self): self.setup() def tearDown(self): unittest.TestCase.tearDown(self) self.ensure_clean_request_context() self.teardown() def assert_equal(self, x, y): return self.assertEqual(x, y) def assert_raises(self, exc_type, callable=None, *args, **kwargs): catcher = _ExceptionCatcher(self, exc_type) if callable is None: return catcher with catcher: callable(*args, **kwargs) def assert_true(self, x, msg=None): self.assertTrue(x, msg) def assert_false(self, x, msg=None): self.assertFalse(x, msg) def assert_in(self, x, y): self.assertIn(x, y) def assert_not_in(self, x, y): self.assertNotIn(x, y) if sys.version_info[:2] == (2, 6): def assertIn(self, x, y): assert x in y, "%r unexpectedly not in %r" % (x, y) def assertNotIn(self, x, y): assert x not in y, "%r unexpectedly in %r" % (x, y) class _ExceptionCatcher(object): def __init__(self, test_case, exc_type): self.test_case = test_case self.exc_type = exc_type def __enter__(self): return self def __exit__(self, exc_type, exc_value, tb): exception_name = self.exc_type.__name__ if exc_type is None: self.test_case.fail('Expected exception of type %r' % exception_name) elif not issubclass(exc_type, self.exc_type): reraise(exc_type, exc_value, tb) return True class BetterLoader(unittest.TestLoader): """A nicer loader that solves two problems. First of all we are setting up tests from different sources and we're doing this programmatically which breaks the default loading logic so this is required anyways. Secondly this loader has a nicer interpolation for test names than the default one so you can just do ``run-tests.py ViewTestCase`` and it will work. """ def getRootSuite(self): return suite() def loadTestsFromName(self, name, module=None): root = self.getRootSuite() if name == 'suite': return root all_tests = [] for testcase, testname in find_all_tests(root): if testname == name or \ testname.endswith('.' + name) or \ ('.' + name + '.') in testname or \ testname.startswith(name + '.'): all_tests.append(testcase) if not all_tests: raise LookupError('could not find test case for "%s"' % name) if len(all_tests) == 1: return all_tests[0] rv = unittest.TestSuite() for test in all_tests: rv.addTest(test) return rv def setup_path(): add_to_path(os.path.abspath(os.path.join( os.path.dirname(__file__), 'test_apps'))) def suite(): """A testsuite that has all the Flask tests. You can use this function to integrate the Flask tests into your own testsuite in case you want to test that monkeypatches to Flask do not break it. """ setup_path() suite = unittest.TestSuite() for other_suite in iter_suites(): suite.addTest(other_suite) return suite def main(): """Runs the testsuite as command line application.""" try: unittest.main(testLoader=BetterLoader(), defaultTest='suite') except Exception as e: print('Error: %s' % e)
mit
ingokegel/intellij-community
python/helpers/py2only/docutils/writers/manpage.py
113
35660
# -*- coding: utf-8 -*- # $Id: manpage.py 7628 2013-03-09 10:19:35Z grubert $ # Author: Engelbert Gruber <grubert@users.sourceforge.net> # Copyright: This module is put into the public domain. """ Simple man page writer for reStructuredText. Man pages (short for "manual pages") contain system documentation on unix-like systems. The pages are grouped in numbered sections: 1 executable programs and shell commands 2 system calls 3 library functions 4 special files 5 file formats 6 games 7 miscellaneous 8 system administration Man pages are written *troff*, a text file formatting system. See http://www.tldp.org/HOWTO/Man-Page for a start. Man pages have no subsection only parts. Standard parts NAME , SYNOPSIS , DESCRIPTION , OPTIONS , FILES , SEE ALSO , BUGS , and AUTHOR . A unix-like system keeps an index of the DESCRIPTIONs, which is accesable by the command whatis or apropos. """ __docformat__ = 'reStructuredText' import re import docutils from docutils import nodes, writers, languages try: import roman except ImportError: import docutils.utils.roman as roman FIELD_LIST_INDENT = 7 DEFINITION_LIST_INDENT = 7 OPTION_LIST_INDENT = 7 BLOCKQOUTE_INDENT = 3.5 LITERAL_BLOCK_INDENT = 3.5 # Define two macros so man/roff can calculate the # indent/unindent margins by itself MACRO_DEF = (r""". .nr rst2man-indent-level 0 . .de1 rstReportMargin \\$1 \\n[an-margin] level \\n[rst2man-indent-level] level margin: \\n[rst2man-indent\\n[rst2man-indent-level]] - \\n[rst2man-indent0] \\n[rst2man-indent1] \\n[rst2man-indent2] .. .de1 INDENT .\" .rstReportMargin pre: . RS \\$1 . nr rst2man-indent\\n[rst2man-indent-level] \\n[an-margin] . nr rst2man-indent-level +1 .\" .rstReportMargin post: .. .de UNINDENT . RE .\" indent \\n[an-margin] .\" old: \\n[rst2man-indent\\n[rst2man-indent-level]] .nr rst2man-indent-level -1 .\" new: \\n[rst2man-indent\\n[rst2man-indent-level]] .in \\n[rst2man-indent\\n[rst2man-indent-level]]u .. """) class Writer(writers.Writer): supported = ('manpage',) """Formats this writer supports.""" output = None """Final translated form of `document`.""" def __init__(self): writers.Writer.__init__(self) self.translator_class = Translator def translate(self): visitor = self.translator_class(self.document) self.document.walkabout(visitor) self.output = visitor.astext() class Table(object): def __init__(self): self._rows = [] self._options = ['center'] self._tab_char = '\t' self._coldefs = [] def new_row(self): self._rows.append([]) def append_separator(self, separator): """Append the separator for table head.""" self._rows.append([separator]) def append_cell(self, cell_lines): """cell_lines is an array of lines""" start = 0 if len(cell_lines) > 0 and cell_lines[0] == '.sp\n': start = 1 self._rows[-1].append(cell_lines[start:]) if len(self._coldefs) < len(self._rows[-1]): self._coldefs.append('l') def _minimize_cell(self, cell_lines): """Remove leading and trailing blank and ``.sp`` lines""" while (cell_lines and cell_lines[0] in ('\n', '.sp\n')): del cell_lines[0] while (cell_lines and cell_lines[-1] in ('\n', '.sp\n')): del cell_lines[-1] def as_list(self): text = ['.TS\n'] text.append(' '.join(self._options) + ';\n') text.append('|%s|.\n' % ('|'.join(self._coldefs))) for row in self._rows: # row = array of cells. cell = array of lines. text.append('_\n') # line above text.append('T{\n') for i in range(len(row)): cell = row[i] self._minimize_cell(cell) text.extend(cell) if not text[-1].endswith('\n'): text[-1] += '\n' if i < len(row)-1: text.append('T}'+self._tab_char+'T{\n') else: text.append('T}\n') text.append('_\n') text.append('.TE\n') return text class Translator(nodes.NodeVisitor): """""" words_and_spaces = re.compile(r'\S+| +|\n') possibly_a_roff_command = re.compile(r'\.\w') document_start = """Man page generated from reStructuredText.""" def __init__(self, document): nodes.NodeVisitor.__init__(self, document) self.settings = settings = document.settings lcode = settings.language_code self.language = languages.get_language(lcode, document.reporter) self.head = [] self.body = [] self.foot = [] self.section_level = 0 self.context = [] self.topic_class = '' self.colspecs = [] self.compact_p = 1 self.compact_simple = None # the list style "*" bullet or "#" numbered self._list_char = [] # writing the header .TH and .SH NAME is postboned after # docinfo. self._docinfo = { "title" : "", "title_upper": "", "subtitle" : "", "manual_section" : "", "manual_group" : "", "author" : [], "date" : "", "copyright" : "", "version" : "", } self._docinfo_keys = [] # a list to keep the sequence as in source. self._docinfo_names = {} # to get name from text not normalized. self._in_docinfo = None self._active_table = None self._in_literal = False self.header_written = 0 self._line_block = 0 self.authors = [] self.section_level = 0 self._indent = [0] # central definition of simple processing rules # what to output on : visit, depart # Do not use paragraph requests ``.PP`` because these set indentation. # use ``.sp``. Remove superfluous ``.sp`` in ``astext``. # # Fonts are put on a stack, the top one is used. # ``.ft P`` or ``\\fP`` pop from stack. # ``B`` bold, ``I`` italic, ``R`` roman should be available. # Hopefully ``C`` courier too. self.defs = { 'indent' : ('.INDENT %.1f\n', '.UNINDENT\n'), 'definition_list_item' : ('.TP', ''), 'field_name' : ('.TP\n.B ', '\n'), 'literal' : ('\\fB', '\\fP'), 'literal_block' : ('.sp\n.nf\n.ft C\n', '\n.ft P\n.fi\n'), 'option_list_item' : ('.TP\n', ''), 'reference' : (r'\fI\%', r'\fP'), 'emphasis': ('\\fI', '\\fP'), 'strong' : ('\\fB', '\\fP'), 'term' : ('\n.B ', '\n'), 'title_reference' : ('\\fI', '\\fP'), 'topic-title' : ('.SS ',), 'sidebar-title' : ('.SS ',), 'problematic' : ('\n.nf\n', '\n.fi\n'), } # NOTE do not specify the newline before a dot-command, but ensure # it is there. def comment_begin(self, text): """Return commented version of the passed text WITHOUT end of line/comment.""" prefix = '.\\" ' out_text = ''.join( [(prefix + in_line + '\n') for in_line in text.split('\n')]) return out_text def comment(self, text): """Return commented version of the passed text.""" return self.comment_begin(text)+'.\n' def ensure_eol(self): """Ensure the last line in body is terminated by new line.""" if len(self.body) > 0 and self.body[-1][-1] != '\n': self.body.append('\n') def astext(self): """Return the final formatted document as a string.""" if not self.header_written: # ensure we get a ".TH" as viewers require it. self.append_header() # filter body for i in xrange(len(self.body)-1, 0, -1): # remove superfluous vertical gaps. if self.body[i] == '.sp\n': if self.body[i - 1][:4] in ('.BI ','.IP '): self.body[i] = '.\n' elif (self.body[i - 1][:3] == '.B ' and self.body[i - 2][:4] == '.TP\n'): self.body[i] = '.\n' elif (self.body[i - 1] == '\n' and not self.possibly_a_roff_command.match(self.body[i - 2]) and (self.body[i - 3][:7] == '.TP\n.B ' or self.body[i - 3][:4] == '\n.B ') ): self.body[i] = '.\n' return ''.join(self.head + self.body + self.foot) def deunicode(self, text): text = text.replace(u'\xa0', '\\ ') text = text.replace(u'\u2020', '\\(dg') return text def visit_Text(self, node): text = node.astext() text = text.replace('\\','\\e') replace_pairs = [ (u'-', ur'\-'), (u'\'', ur'\(aq'), (u'´', ur'\''), (u'`', ur'\(ga'), ] for (in_char, out_markup) in replace_pairs: text = text.replace(in_char, out_markup) # unicode text = self.deunicode(text) # prevent interpretation of "." at line start if text.startswith('.'): text = '\\&' + text if self._in_literal: text = text.replace('\n.', '\n\\&.') self.body.append(text) def depart_Text(self, node): pass def list_start(self, node): class enum_char(object): enum_style = { 'bullet' : '\\(bu', 'emdash' : '\\(em', } def __init__(self, style): self._style = style if node.has_key('start'): self._cnt = node['start'] - 1 else: self._cnt = 0 self._indent = 2 if style == 'arabic': # indentation depends on number of childrens # and start value. self._indent = len(str(len(node.children))) self._indent += len(str(self._cnt)) + 1 elif style == 'loweralpha': self._cnt += ord('a') - 1 self._indent = 3 elif style == 'upperalpha': self._cnt += ord('A') - 1 self._indent = 3 elif style.endswith('roman'): self._indent = 5 def next(self): if self._style == 'bullet': return self.enum_style[self._style] elif self._style == 'emdash': return self.enum_style[self._style] self._cnt += 1 # TODO add prefix postfix if self._style == 'arabic': return "%d." % self._cnt elif self._style in ('loweralpha', 'upperalpha'): return "%c." % self._cnt elif self._style.endswith('roman'): res = roman.toRoman(self._cnt) + '.' if self._style.startswith('upper'): return res.upper() return res.lower() else: return "%d." % self._cnt def get_width(self): return self._indent def __repr__(self): return 'enum_style-%s' % list(self._style) if node.has_key('enumtype'): self._list_char.append(enum_char(node['enumtype'])) else: self._list_char.append(enum_char('bullet')) if len(self._list_char) > 1: # indent nested lists self.indent(self._list_char[-2].get_width()) else: self.indent(self._list_char[-1].get_width()) def list_end(self): self.dedent() self._list_char.pop() def header(self): tmpl = (".TH %(title_upper)s %(manual_section)s" " \"%(date)s\" \"%(version)s\" \"%(manual_group)s\"\n" ".SH NAME\n" "%(title)s \- %(subtitle)s\n") return tmpl % self._docinfo def append_header(self): """append header with .TH and .SH NAME""" # NOTE before everything # .TH title_upper section date source manual if self.header_written: return self.head.append(self.header()) self.head.append(MACRO_DEF) self.header_written = 1 def visit_address(self, node): self.visit_docinfo_item(node, 'address') def depart_address(self, node): pass def visit_admonition(self, node, name=None): # # Make admonitions a simple block quote # with a strong heading # # Using .IP/.RE doesn't preserve indentation # when admonitions contain bullets, literal, # and/or block quotes. # if name: # .. admonition:: has no name self.body.append('.sp\n') name = '%s%s:%s\n' % ( self.defs['strong'][0], self.language.labels.get(name, name).upper(), self.defs['strong'][1], ) self.body.append(name) self.visit_block_quote(node) def depart_admonition(self, node): self.depart_block_quote(node) def visit_attention(self, node): self.visit_admonition(node, 'attention') depart_attention = depart_admonition def visit_docinfo_item(self, node, name): if name == 'author': self._docinfo[name].append(node.astext()) else: self._docinfo[name] = node.astext() self._docinfo_keys.append(name) raise nodes.SkipNode def depart_docinfo_item(self, node): pass def visit_author(self, node): self.visit_docinfo_item(node, 'author') depart_author = depart_docinfo_item def visit_authors(self, node): # _author is called anyway. pass def depart_authors(self, node): pass def visit_block_quote(self, node): # BUG/HACK: indent alway uses the _last_ indention, # thus we need two of them. self.indent(BLOCKQOUTE_INDENT) self.indent(0) def depart_block_quote(self, node): self.dedent() self.dedent() def visit_bullet_list(self, node): self.list_start(node) def depart_bullet_list(self, node): self.list_end() def visit_caption(self, node): pass def depart_caption(self, node): pass def visit_caution(self, node): self.visit_admonition(node, 'caution') depart_caution = depart_admonition def visit_citation(self, node): num, text = node.astext().split(None, 1) num = num.strip() self.body.append('.IP [%s] 5\n' % num) def depart_citation(self, node): pass def visit_citation_reference(self, node): self.body.append('['+node.astext()+']') raise nodes.SkipNode def visit_classifier(self, node): pass def depart_classifier(self, node): pass def visit_colspec(self, node): self.colspecs.append(node) def depart_colspec(self, node): pass def write_colspecs(self): self.body.append("%s.\n" % ('L '*len(self.colspecs))) def visit_comment(self, node, sub=re.compile('-(?=-)').sub): self.body.append(self.comment(node.astext())) raise nodes.SkipNode def visit_contact(self, node): self.visit_docinfo_item(node, 'contact') depart_contact = depart_docinfo_item def visit_container(self, node): pass def depart_container(self, node): pass def visit_compound(self, node): pass def depart_compound(self, node): pass def visit_copyright(self, node): self.visit_docinfo_item(node, 'copyright') def visit_danger(self, node): self.visit_admonition(node, 'danger') depart_danger = depart_admonition def visit_date(self, node): self.visit_docinfo_item(node, 'date') def visit_decoration(self, node): pass def depart_decoration(self, node): pass def visit_definition(self, node): pass def depart_definition(self, node): pass def visit_definition_list(self, node): self.indent(DEFINITION_LIST_INDENT) def depart_definition_list(self, node): self.dedent() def visit_definition_list_item(self, node): self.body.append(self.defs['definition_list_item'][0]) def depart_definition_list_item(self, node): self.body.append(self.defs['definition_list_item'][1]) def visit_description(self, node): pass def depart_description(self, node): pass def visit_docinfo(self, node): self._in_docinfo = 1 def depart_docinfo(self, node): self._in_docinfo = None # NOTE nothing should be written before this self.append_header() def visit_doctest_block(self, node): self.body.append(self.defs['literal_block'][0]) self._in_literal = True def depart_doctest_block(self, node): self._in_literal = False self.body.append(self.defs['literal_block'][1]) def visit_document(self, node): # no blank line between comment and header. self.head.append(self.comment(self.document_start).rstrip()+'\n') # writing header is postboned self.header_written = 0 def depart_document(self, node): if self._docinfo['author']: self.body.append('.SH AUTHOR\n%s\n' % ', '.join(self._docinfo['author'])) skip = ('author', 'copyright', 'date', 'manual_group', 'manual_section', 'subtitle', 'title', 'title_upper', 'version') for name in self._docinfo_keys: if name == 'address': self.body.append("\n%s:\n%s%s.nf\n%s\n.fi\n%s%s" % ( self.language.labels.get(name, name), self.defs['indent'][0] % 0, self.defs['indent'][0] % BLOCKQOUTE_INDENT, self._docinfo[name], self.defs['indent'][1], self.defs['indent'][1])) elif not name in skip: if name in self._docinfo_names: label = self._docinfo_names[name] else: label = self.language.labels.get(name, name) self.body.append("\n%s: %s\n" % (label, self._docinfo[name])) if self._docinfo['copyright']: self.body.append('.SH COPYRIGHT\n%s\n' % self._docinfo['copyright']) self.body.append(self.comment( 'Generated by docutils manpage writer.')) def visit_emphasis(self, node): self.body.append(self.defs['emphasis'][0]) def depart_emphasis(self, node): self.body.append(self.defs['emphasis'][1]) def visit_entry(self, node): # a cell in a table row if 'morerows' in node: self.document.reporter.warning('"table row spanning" not supported', base_node=node) if 'morecols' in node: self.document.reporter.warning( '"table cell spanning" not supported', base_node=node) self.context.append(len(self.body)) def depart_entry(self, node): start = self.context.pop() self._active_table.append_cell(self.body[start:]) del self.body[start:] def visit_enumerated_list(self, node): self.list_start(node) def depart_enumerated_list(self, node): self.list_end() def visit_error(self, node): self.visit_admonition(node, 'error') depart_error = depart_admonition def visit_field(self, node): pass def depart_field(self, node): pass def visit_field_body(self, node): if self._in_docinfo: name_normalized = self._field_name.lower().replace(" ","_") self._docinfo_names[name_normalized] = self._field_name self.visit_docinfo_item(node, name_normalized) raise nodes.SkipNode def depart_field_body(self, node): pass def visit_field_list(self, node): self.indent(FIELD_LIST_INDENT) def depart_field_list(self, node): self.dedent() def visit_field_name(self, node): if self._in_docinfo: self._field_name = node.astext() raise nodes.SkipNode else: self.body.append(self.defs['field_name'][0]) def depart_field_name(self, node): self.body.append(self.defs['field_name'][1]) def visit_figure(self, node): self.indent(2.5) self.indent(0) def depart_figure(self, node): self.dedent() self.dedent() def visit_footer(self, node): self.document.reporter.warning('"footer" not supported', base_node=node) def depart_footer(self, node): pass def visit_footnote(self, node): num, text = node.astext().split(None, 1) num = num.strip() self.body.append('.IP [%s] 5\n' % self.deunicode(num)) def depart_footnote(self, node): pass def footnote_backrefs(self, node): self.document.reporter.warning('"footnote_backrefs" not supported', base_node=node) def visit_footnote_reference(self, node): self.body.append('['+self.deunicode(node.astext())+']') raise nodes.SkipNode def depart_footnote_reference(self, node): pass def visit_generated(self, node): pass def depart_generated(self, node): pass def visit_header(self, node): raise NotImplementedError, node.astext() def depart_header(self, node): pass def visit_hint(self, node): self.visit_admonition(node, 'hint') depart_hint = depart_admonition def visit_subscript(self, node): self.body.append('\\s-2\\d') def depart_subscript(self, node): self.body.append('\\u\\s0') def visit_superscript(self, node): self.body.append('\\s-2\\u') def depart_superscript(self, node): self.body.append('\\d\\s0') def visit_attribution(self, node): self.body.append('\\(em ') def depart_attribution(self, node): self.body.append('\n') def visit_image(self, node): self.document.reporter.warning('"image" not supported', base_node=node) text = [] if 'alt' in node.attributes: text.append(node.attributes['alt']) if 'uri' in node.attributes: text.append(node.attributes['uri']) self.body.append('[image: %s]\n' % ('/'.join(text))) raise nodes.SkipNode def visit_important(self, node): self.visit_admonition(node, 'important') depart_important = depart_admonition def visit_label(self, node): # footnote and citation if (isinstance(node.parent, nodes.footnote) or isinstance(node.parent, nodes.citation)): raise nodes.SkipNode self.document.reporter.warning('"unsupported "label"', base_node=node) self.body.append('[') def depart_label(self, node): self.body.append(']\n') def visit_legend(self, node): pass def depart_legend(self, node): pass # WHAT should we use .INDENT, .UNINDENT ? def visit_line_block(self, node): self._line_block += 1 if self._line_block == 1: # TODO: separate inline blocks from previous paragraphs # see http://hg.intevation.org/mercurial/crew/rev/9c142ed9c405 # self.body.append('.sp\n') # but it does not work for me. self.body.append('.nf\n') else: self.body.append('.in +2\n') def depart_line_block(self, node): self._line_block -= 1 if self._line_block == 0: self.body.append('.fi\n') self.body.append('.sp\n') else: self.body.append('.in -2\n') def visit_line(self, node): pass def depart_line(self, node): self.body.append('\n') def visit_list_item(self, node): # man 7 man argues to use ".IP" instead of ".TP" self.body.append('.IP %s %d\n' % ( self._list_char[-1].next(), self._list_char[-1].get_width(),)) def depart_list_item(self, node): pass def visit_literal(self, node): self.body.append(self.defs['literal'][0]) def depart_literal(self, node): self.body.append(self.defs['literal'][1]) def visit_literal_block(self, node): # BUG/HACK: indent alway uses the _last_ indention, # thus we need two of them. self.indent(LITERAL_BLOCK_INDENT) self.indent(0) self.body.append(self.defs['literal_block'][0]) self._in_literal = True def depart_literal_block(self, node): self._in_literal = False self.body.append(self.defs['literal_block'][1]) self.dedent() self.dedent() def visit_math(self, node): self.document.reporter.warning('"math" role not supported', base_node=node) self.visit_literal(node) def depart_math(self, node): self.depart_literal(node) def visit_math_block(self, node): self.document.reporter.warning('"math" directive not supported', base_node=node) self.visit_literal_block(node) def depart_math_block(self, node): self.depart_literal_block(node) def visit_meta(self, node): raise NotImplementedError, node.astext() def depart_meta(self, node): pass def visit_note(self, node): self.visit_admonition(node, 'note') depart_note = depart_admonition def indent(self, by=0.5): # if we are in a section ".SH" there already is a .RS step = self._indent[-1] self._indent.append(by) self.body.append(self.defs['indent'][0] % step) def dedent(self): self._indent.pop() self.body.append(self.defs['indent'][1]) def visit_option_list(self, node): self.indent(OPTION_LIST_INDENT) def depart_option_list(self, node): self.dedent() def visit_option_list_item(self, node): # one item of the list self.body.append(self.defs['option_list_item'][0]) def depart_option_list_item(self, node): self.body.append(self.defs['option_list_item'][1]) def visit_option_group(self, node): # as one option could have several forms it is a group # options without parameter bold only, .B, -v # options with parameter bold italic, .BI, -f file # # we do not know if .B or .BI self.context.append('.B') # blind guess self.context.append(len(self.body)) # to be able to insert later self.context.append(0) # option counter def depart_option_group(self, node): self.context.pop() # the counter start_position = self.context.pop() text = self.body[start_position:] del self.body[start_position:] self.body.append('%s%s\n' % (self.context.pop(), ''.join(text))) def visit_option(self, node): # each form of the option will be presented separately if self.context[-1] > 0: self.body.append('\\fP,\\fB ') if self.context[-3] == '.BI': self.body.append('\\') self.body.append(' ') def depart_option(self, node): self.context[-1] += 1 def visit_option_string(self, node): # do not know if .B or .BI pass def depart_option_string(self, node): pass def visit_option_argument(self, node): self.context[-3] = '.BI' # bold/italic alternate if node['delimiter'] != ' ': self.body.append('\\fB%s ' % node['delimiter']) elif self.body[len(self.body)-1].endswith('='): # a blank only means no blank in output, just changing font self.body.append(' ') else: # blank backslash blank, switch font then a blank self.body.append(' \\ ') def depart_option_argument(self, node): pass def visit_organization(self, node): self.visit_docinfo_item(node, 'organization') def depart_organization(self, node): pass def first_child(self, node): first = isinstance(node.parent[0], nodes.label) # skip label for child in node.parent.children[first:]: if isinstance(child, nodes.Invisible): continue if child is node: return 1 break return 0 def visit_paragraph(self, node): # ``.PP`` : Start standard indented paragraph. # ``.LP`` : Start block paragraph, all except the first. # ``.P [type]`` : Start paragraph type. # NOTE dont use paragraph starts because they reset indentation. # ``.sp`` is only vertical space self.ensure_eol() if not self.first_child(node): self.body.append('.sp\n') def depart_paragraph(self, node): self.body.append('\n') def visit_problematic(self, node): self.body.append(self.defs['problematic'][0]) def depart_problematic(self, node): self.body.append(self.defs['problematic'][1]) def visit_raw(self, node): if node.get('format') == 'manpage': self.body.append(node.astext() + "\n") # Keep non-manpage raw text out of output: raise nodes.SkipNode def visit_reference(self, node): """E.g. link or email address.""" self.body.append(self.defs['reference'][0]) def depart_reference(self, node): self.body.append(self.defs['reference'][1]) def visit_revision(self, node): self.visit_docinfo_item(node, 'revision') depart_revision = depart_docinfo_item def visit_row(self, node): self._active_table.new_row() def depart_row(self, node): pass def visit_section(self, node): self.section_level += 1 def depart_section(self, node): self.section_level -= 1 def visit_status(self, node): self.visit_docinfo_item(node, 'status') depart_status = depart_docinfo_item def visit_strong(self, node): self.body.append(self.defs['strong'][0]) def depart_strong(self, node): self.body.append(self.defs['strong'][1]) def visit_substitution_definition(self, node): """Internal only.""" raise nodes.SkipNode def visit_substitution_reference(self, node): self.document.reporter.warning('"substitution_reference" not supported', base_node=node) def visit_subtitle(self, node): if isinstance(node.parent, nodes.sidebar): self.body.append(self.defs['strong'][0]) elif isinstance(node.parent, nodes.document): self.visit_docinfo_item(node, 'subtitle') elif isinstance(node.parent, nodes.section): self.body.append(self.defs['strong'][0]) def depart_subtitle(self, node): # document subtitle calls SkipNode self.body.append(self.defs['strong'][1]+'\n.PP\n') def visit_system_message(self, node): # TODO add report_level #if node['level'] < self.document.reporter['writer'].report_level: # Level is too low to display: # raise nodes.SkipNode attr = {} backref_text = '' if node.hasattr('id'): attr['name'] = node['id'] if node.hasattr('line'): line = ', line %s' % node['line'] else: line = '' self.body.append('.IP "System Message: %s/%s (%s:%s)"\n' % (node['type'], node['level'], node['source'], line)) def depart_system_message(self, node): pass def visit_table(self, node): self._active_table = Table() def depart_table(self, node): self.ensure_eol() self.body.extend(self._active_table.as_list()) self._active_table = None def visit_target(self, node): # targets are in-document hyper targets, without any use for man-pages. raise nodes.SkipNode def visit_tbody(self, node): pass def depart_tbody(self, node): pass def visit_term(self, node): self.body.append(self.defs['term'][0]) def depart_term(self, node): self.body.append(self.defs['term'][1]) def visit_tgroup(self, node): pass def depart_tgroup(self, node): pass def visit_thead(self, node): # MAYBE double line '=' pass def depart_thead(self, node): # MAYBE double line '=' pass def visit_tip(self, node): self.visit_admonition(node, 'tip') depart_tip = depart_admonition def visit_title(self, node): if isinstance(node.parent, nodes.topic): self.body.append(self.defs['topic-title'][0]) elif isinstance(node.parent, nodes.sidebar): self.body.append(self.defs['sidebar-title'][0]) elif isinstance(node.parent, nodes.admonition): self.body.append('.IP "') elif self.section_level == 0: self._docinfo['title'] = node.astext() # document title for .TH self._docinfo['title_upper'] = node.astext().upper() raise nodes.SkipNode elif self.section_level == 1: self.body.append('.SH %s\n' % self.deunicode(node.astext().upper())) raise nodes.SkipNode else: self.body.append('.SS ') def depart_title(self, node): if isinstance(node.parent, nodes.admonition): self.body.append('"') self.body.append('\n') def visit_title_reference(self, node): """inline citation reference""" self.body.append(self.defs['title_reference'][0]) def depart_title_reference(self, node): self.body.append(self.defs['title_reference'][1]) def visit_topic(self, node): pass def depart_topic(self, node): pass def visit_sidebar(self, node): pass def depart_sidebar(self, node): pass def visit_rubric(self, node): pass def depart_rubric(self, node): pass def visit_transition(self, node): # .PP Begin a new paragraph and reset prevailing indent. # .sp N leaves N lines of blank space. # .ce centers the next line self.body.append('\n.sp\n.ce\n----\n') def depart_transition(self, node): self.body.append('\n.ce 0\n.sp\n') def visit_version(self, node): self.visit_docinfo_item(node, 'version') def visit_warning(self, node): self.visit_admonition(node, 'warning') depart_warning = depart_admonition def unimplemented_visit(self, node): raise NotImplementedError('visiting unimplemented node type: %s' % node.__class__.__name__) # vim: set fileencoding=utf-8 et ts=4 ai :
apache-2.0
farjump/gnu-binutils
gdb/testsuite/gdb.perf/single-step.py
46
1269
# Copyright (C) 2013-2015 Free Software Foundation, Inc. # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. from perftest import perftest class SingleStep (perftest.TestCaseWithBasicMeasurements): def __init__(self, step): super (SingleStep, self).__init__ ("single-step") self.step = step def warm_up(self): for _ in range(0, self.step): gdb.execute("stepi", False, True) def _run(self, r): for _ in range(0, r): gdb.execute("stepi", False, True) def execute_test(self): for i in range(1, 5): func = lambda: self._run(i * self.step) self.measure.measure(func, i * self.step)
gpl-2.0
Hao-Liu/tp-libvirt
libguestfs/tests/guestmount.py
8
2688
import logging import os from autotest.client.shared import error, utils from virttest import data_dir, utils_test def umount_fs(mountpoint): if os.path.ismount(mountpoint): result = utils.run("umount -l %s" % mountpoint, ignore_status=True) if result.exit_status: logging.debug("Umount %s failed", mountpoint) return False logging.debug("Umount %s successfully", mountpoint) return True def run(test, params, env): """ Test libguestfs tool guestmount. """ vm_name = params.get("main_vm") vm = env.get_vm(vm_name) start_vm = "yes" == params.get("start_vm", "no") if vm.is_alive() and not start_vm: vm.destroy() elif vm.is_dead() and start_vm: vm.start() # Create a file to vm with guestmount content = "This is file for guestmount test." path = params.get("gm_tempfile", "/home/gm_tmp") mountpoint = os.path.join(data_dir.get_tmp_dir(), "mountpoint") status_error = "yes" == params.get("status_error", "yes") readonly = "no" == params.get("gm_readonly", "no") special_mount = "yes" == params.get("gm_mount", "no") vt = utils_test.libguestfs.VirtTools(vm, params) vm_ref = params.get("gm_vm_ref") is_disk = "yes" == params.get("gm_is_disk", "no") # Automatically get disk if no disk specified. if is_disk and vm_ref is None: vm_ref = utils_test.libguestfs.get_primary_disk(vm) if special_mount: # Get root filesystem before test params['libvirt_domain'] = params.get("main_vm") params['gf_inspector'] = True gf = utils_test.libguestfs.GuestfishTools(params) roots, rootfs = gf.get_root() gf.close_session() if roots is False: raise error.TestError("Can not get root filesystem " "in guestfish before test") logging.info("Root filesystem is:%s", rootfs) params['special_mountpoints'] = [rootfs] writes, writeo = vt.write_file_with_guestmount(mountpoint, path, content, vm_ref) if umount_fs(mountpoint) is False: logging.error("Umount vm's filesystem failed.") if status_error: if writes: if readonly: raise error.TestFail("Write file to readonly mounted " "filesystem successfully.Not expected.") else: raise error.TestFail("Write file with guestmount " "successfully.Not expected.") else: if not writes: raise error.TestFail("Write file to mounted filesystem failed.")
gpl-2.0
JieC/StockCheck
lib/bottle.py
30
149580
#!/usr/bin/env python # -*- coding: utf-8 -*- """ Bottle is a fast and simple micro-framework for small web applications. It offers request dispatching (Routes) with url parameter support, templates, a built-in HTTP Server and adapters for many third party WSGI/HTTP-server and template engines - all in a single file and with no dependencies other than the Python Standard Library. Homepage and documentation: http://bottlepy.org/ Copyright (c) 2013, Marcel Hellkamp. License: MIT (see LICENSE for details) """ from __future__ import with_statement __author__ = 'Marcel Hellkamp' __version__ = '0.12.9' __license__ = 'MIT' # The gevent server adapter needs to patch some modules before they are imported # This is why we parse the commandline parameters here but handle them later if __name__ == '__main__': from optparse import OptionParser _cmd_parser = OptionParser(usage="usage: %prog [options] package.module:app") _opt = _cmd_parser.add_option _opt("--version", action="store_true", help="show version number.") _opt("-b", "--bind", metavar="ADDRESS", help="bind socket to ADDRESS.") _opt("-s", "--server", default='wsgiref', help="use SERVER as backend.") _opt("-p", "--plugin", action="append", help="install additional plugin/s.") _opt("--debug", action="store_true", help="start server in debug mode.") _opt("--reload", action="store_true", help="auto-reload on file changes.") _cmd_options, _cmd_args = _cmd_parser.parse_args() if _cmd_options.server and _cmd_options.server.startswith('gevent'): import gevent.monkey; gevent.monkey.patch_all() import base64, cgi, email.utils, functools, hmac, imp, itertools, mimetypes,\ os, re, subprocess, sys, tempfile, threading, time, warnings from datetime import date as datedate, datetime, timedelta from tempfile import TemporaryFile from traceback import format_exc, print_exc from inspect import getargspec from unicodedata import normalize try: from simplejson import dumps as json_dumps, loads as json_lds except ImportError: # pragma: no cover try: from json import dumps as json_dumps, loads as json_lds except ImportError: try: from django.utils.simplejson import dumps as json_dumps, loads as json_lds except ImportError: def json_dumps(data): raise ImportError("JSON support requires Python 2.6 or simplejson.") json_lds = json_dumps # We now try to fix 2.5/2.6/3.1/3.2 incompatibilities. # It ain't pretty but it works... Sorry for the mess. py = sys.version_info py3k = py >= (3, 0, 0) py25 = py < (2, 6, 0) py31 = (3, 1, 0) <= py < (3, 2, 0) # Workaround for the missing "as" keyword in py3k. def _e(): return sys.exc_info()[1] # Workaround for the "print is a keyword/function" Python 2/3 dilemma # and a fallback for mod_wsgi (resticts stdout/err attribute access) try: _stdout, _stderr = sys.stdout.write, sys.stderr.write except IOError: _stdout = lambda x: sys.stdout.write(x) _stderr = lambda x: sys.stderr.write(x) # Lots of stdlib and builtin differences. if py3k: import http.client as httplib import _thread as thread from urllib.parse import urljoin, SplitResult as UrlSplitResult from urllib.parse import urlencode, quote as urlquote, unquote as urlunquote urlunquote = functools.partial(urlunquote, encoding='latin1') from http.cookies import SimpleCookie from collections import MutableMapping as DictMixin import pickle from io import BytesIO from configparser import ConfigParser basestring = str unicode = str json_loads = lambda s: json_lds(touni(s)) callable = lambda x: hasattr(x, '__call__') imap = map def _raise(*a): raise a[0](a[1]).with_traceback(a[2]) else: # 2.x import httplib import thread from urlparse import urljoin, SplitResult as UrlSplitResult from urllib import urlencode, quote as urlquote, unquote as urlunquote from Cookie import SimpleCookie from itertools import imap import cPickle as pickle from StringIO import StringIO as BytesIO from ConfigParser import SafeConfigParser as ConfigParser if py25: msg = "Python 2.5 support may be dropped in future versions of Bottle." warnings.warn(msg, DeprecationWarning) from UserDict import DictMixin def next(it): return it.next() bytes = str else: # 2.6, 2.7 from collections import MutableMapping as DictMixin unicode = unicode json_loads = json_lds eval(compile('def _raise(*a): raise a[0], a[1], a[2]', '<py3fix>', 'exec')) # Some helpers for string/byte handling def tob(s, enc='utf8'): return s.encode(enc) if isinstance(s, unicode) else bytes(s) def touni(s, enc='utf8', err='strict'): return s.decode(enc, err) if isinstance(s, bytes) else unicode(s) tonat = touni if py3k else tob # 3.2 fixes cgi.FieldStorage to accept bytes (which makes a lot of sense). # 3.1 needs a workaround. if py31: from io import TextIOWrapper class NCTextIOWrapper(TextIOWrapper): def close(self): pass # Keep wrapped buffer open. # A bug in functools causes it to break if the wrapper is an instance method def update_wrapper(wrapper, wrapped, *a, **ka): try: functools.update_wrapper(wrapper, wrapped, *a, **ka) except AttributeError: pass # These helpers are used at module level and need to be defined first. # And yes, I know PEP-8, but sometimes a lower-case classname makes more sense. def depr(message, hard=False): warnings.warn(message, DeprecationWarning, stacklevel=3) def makelist(data): # This is just to handy if isinstance(data, (tuple, list, set, dict)): return list(data) elif data: return [data] else: return [] class DictProperty(object): ''' Property that maps to a key in a local dict-like attribute. ''' def __init__(self, attr, key=None, read_only=False): self.attr, self.key, self.read_only = attr, key, read_only def __call__(self, func): functools.update_wrapper(self, func, updated=[]) self.getter, self.key = func, self.key or func.__name__ return self def __get__(self, obj, cls): if obj is None: return self key, storage = self.key, getattr(obj, self.attr) if key not in storage: storage[key] = self.getter(obj) return storage[key] def __set__(self, obj, value): if self.read_only: raise AttributeError("Read-Only property.") getattr(obj, self.attr)[self.key] = value def __delete__(self, obj): if self.read_only: raise AttributeError("Read-Only property.") del getattr(obj, self.attr)[self.key] class cached_property(object): ''' A property that is only computed once per instance and then replaces itself with an ordinary attribute. Deleting the attribute resets the property. ''' def __init__(self, func): self.__doc__ = getattr(func, '__doc__') self.func = func def __get__(self, obj, cls): if obj is None: return self value = obj.__dict__[self.func.__name__] = self.func(obj) return value class lazy_attribute(object): ''' A property that caches itself to the class object. ''' def __init__(self, func): functools.update_wrapper(self, func, updated=[]) self.getter = func def __get__(self, obj, cls): value = self.getter(cls) setattr(cls, self.__name__, value) return value ############################################################################### # Exceptions and Events ######################################################## ############################################################################### class BottleException(Exception): """ A base class for exceptions used by bottle. """ pass ############################################################################### # Routing ###################################################################### ############################################################################### class RouteError(BottleException): """ This is a base class for all routing related exceptions """ class RouteReset(BottleException): """ If raised by a plugin or request handler, the route is reset and all plugins are re-applied. """ class RouterUnknownModeError(RouteError): pass class RouteSyntaxError(RouteError): """ The route parser found something not supported by this router. """ class RouteBuildError(RouteError): """ The route could not be built. """ def _re_flatten(p): ''' Turn all capturing groups in a regular expression pattern into non-capturing groups. ''' if '(' not in p: return p return re.sub(r'(\\*)(\(\?P<[^>]+>|\((?!\?))', lambda m: m.group(0) if len(m.group(1)) % 2 else m.group(1) + '(?:', p) class Router(object): ''' A Router is an ordered collection of route->target pairs. It is used to efficiently match WSGI requests against a number of routes and return the first target that satisfies the request. The target may be anything, usually a string, ID or callable object. A route consists of a path-rule and a HTTP method. The path-rule is either a static path (e.g. `/contact`) or a dynamic path that contains wildcards (e.g. `/wiki/<page>`). The wildcard syntax and details on the matching order are described in docs:`routing`. ''' default_pattern = '[^/]+' default_filter = 're' #: The current CPython regexp implementation does not allow more #: than 99 matching groups per regular expression. _MAX_GROUPS_PER_PATTERN = 99 def __init__(self, strict=False): self.rules = [] # All rules in order self._groups = {} # index of regexes to find them in dyna_routes self.builder = {} # Data structure for the url builder self.static = {} # Search structure for static routes self.dyna_routes = {} self.dyna_regexes = {} # Search structure for dynamic routes #: If true, static routes are no longer checked first. self.strict_order = strict self.filters = { 're': lambda conf: (_re_flatten(conf or self.default_pattern), None, None), 'int': lambda conf: (r'-?\d+', int, lambda x: str(int(x))), 'float': lambda conf: (r'-?[\d.]+', float, lambda x: str(float(x))), 'path': lambda conf: (r'.+?', None, None)} def add_filter(self, name, func): ''' Add a filter. The provided function is called with the configuration string as parameter and must return a (regexp, to_python, to_url) tuple. The first element is a string, the last two are callables or None. ''' self.filters[name] = func rule_syntax = re.compile('(\\\\*)'\ '(?:(?::([a-zA-Z_][a-zA-Z_0-9]*)?()(?:#(.*?)#)?)'\ '|(?:<([a-zA-Z_][a-zA-Z_0-9]*)?(?::([a-zA-Z_]*)'\ '(?::((?:\\\\.|[^\\\\>]+)+)?)?)?>))') def _itertokens(self, rule): offset, prefix = 0, '' for match in self.rule_syntax.finditer(rule): prefix += rule[offset:match.start()] g = match.groups() if len(g[0])%2: # Escaped wildcard prefix += match.group(0)[len(g[0]):] offset = match.end() continue if prefix: yield prefix, None, None name, filtr, conf = g[4:7] if g[2] is None else g[1:4] yield name, filtr or 'default', conf or None offset, prefix = match.end(), '' if offset <= len(rule) or prefix: yield prefix+rule[offset:], None, None def add(self, rule, method, target, name=None): ''' Add a new rule or replace the target for an existing rule. ''' anons = 0 # Number of anonymous wildcards found keys = [] # Names of keys pattern = '' # Regular expression pattern with named groups filters = [] # Lists of wildcard input filters builder = [] # Data structure for the URL builder is_static = True for key, mode, conf in self._itertokens(rule): if mode: is_static = False if mode == 'default': mode = self.default_filter mask, in_filter, out_filter = self.filters[mode](conf) if not key: pattern += '(?:%s)' % mask key = 'anon%d' % anons anons += 1 else: pattern += '(?P<%s>%s)' % (key, mask) keys.append(key) if in_filter: filters.append((key, in_filter)) builder.append((key, out_filter or str)) elif key: pattern += re.escape(key) builder.append((None, key)) self.builder[rule] = builder if name: self.builder[name] = builder if is_static and not self.strict_order: self.static.setdefault(method, {}) self.static[method][self.build(rule)] = (target, None) return try: re_pattern = re.compile('^(%s)$' % pattern) re_match = re_pattern.match except re.error: raise RouteSyntaxError("Could not add Route: %s (%s)" % (rule, _e())) if filters: def getargs(path): url_args = re_match(path).groupdict() for name, wildcard_filter in filters: try: url_args[name] = wildcard_filter(url_args[name]) except ValueError: raise HTTPError(400, 'Path has wrong format.') return url_args elif re_pattern.groupindex: def getargs(path): return re_match(path).groupdict() else: getargs = None flatpat = _re_flatten(pattern) whole_rule = (rule, flatpat, target, getargs) if (flatpat, method) in self._groups: if DEBUG: msg = 'Route <%s %s> overwrites a previously defined route' warnings.warn(msg % (method, rule), RuntimeWarning) self.dyna_routes[method][self._groups[flatpat, method]] = whole_rule else: self.dyna_routes.setdefault(method, []).append(whole_rule) self._groups[flatpat, method] = len(self.dyna_routes[method]) - 1 self._compile(method) def _compile(self, method): all_rules = self.dyna_routes[method] comborules = self.dyna_regexes[method] = [] maxgroups = self._MAX_GROUPS_PER_PATTERN for x in range(0, len(all_rules), maxgroups): some = all_rules[x:x+maxgroups] combined = (flatpat for (_, flatpat, _, _) in some) combined = '|'.join('(^%s$)' % flatpat for flatpat in combined) combined = re.compile(combined).match rules = [(target, getargs) for (_, _, target, getargs) in some] comborules.append((combined, rules)) def build(self, _name, *anons, **query): ''' Build an URL by filling the wildcards in a rule. ''' builder = self.builder.get(_name) if not builder: raise RouteBuildError("No route with that name.", _name) try: for i, value in enumerate(anons): query['anon%d'%i] = value url = ''.join([f(query.pop(n)) if n else f for (n,f) in builder]) return url if not query else url+'?'+urlencode(query) except KeyError: raise RouteBuildError('Missing URL argument: %r' % _e().args[0]) def match(self, environ): ''' Return a (target, url_agrs) tuple or raise HTTPError(400/404/405). ''' verb = environ['REQUEST_METHOD'].upper() path = environ['PATH_INFO'] or '/' target = None if verb == 'HEAD': methods = ['PROXY', verb, 'GET', 'ANY'] else: methods = ['PROXY', verb, 'ANY'] for method in methods: if method in self.static and path in self.static[method]: target, getargs = self.static[method][path] return target, getargs(path) if getargs else {} elif method in self.dyna_regexes: for combined, rules in self.dyna_regexes[method]: match = combined(path) if match: target, getargs = rules[match.lastindex - 1] return target, getargs(path) if getargs else {} # No matching route found. Collect alternative methods for 405 response allowed = set([]) nocheck = set(methods) for method in set(self.static) - nocheck: if path in self.static[method]: allowed.add(verb) for method in set(self.dyna_regexes) - allowed - nocheck: for combined, rules in self.dyna_regexes[method]: match = combined(path) if match: allowed.add(method) if allowed: allow_header = ",".join(sorted(allowed)) raise HTTPError(405, "Method not allowed.", Allow=allow_header) # No matching route and no alternative method found. We give up raise HTTPError(404, "Not found: " + repr(path)) class Route(object): ''' This class wraps a route callback along with route specific metadata and configuration and applies Plugins on demand. It is also responsible for turing an URL path rule into a regular expression usable by the Router. ''' def __init__(self, app, rule, method, callback, name=None, plugins=None, skiplist=None, **config): #: The application this route is installed to. self.app = app #: The path-rule string (e.g. ``/wiki/:page``). self.rule = rule #: The HTTP method as a string (e.g. ``GET``). self.method = method #: The original callback with no plugins applied. Useful for introspection. self.callback = callback #: The name of the route (if specified) or ``None``. self.name = name or None #: A list of route-specific plugins (see :meth:`Bottle.route`). self.plugins = plugins or [] #: A list of plugins to not apply to this route (see :meth:`Bottle.route`). self.skiplist = skiplist or [] #: Additional keyword arguments passed to the :meth:`Bottle.route` #: decorator are stored in this dictionary. Used for route-specific #: plugin configuration and meta-data. self.config = ConfigDict().load_dict(config, make_namespaces=True) def __call__(self, *a, **ka): depr("Some APIs changed to return Route() instances instead of"\ " callables. Make sure to use the Route.call method and not to"\ " call Route instances directly.") #0.12 return self.call(*a, **ka) @cached_property def call(self): ''' The route callback with all plugins applied. This property is created on demand and then cached to speed up subsequent requests.''' return self._make_callback() def reset(self): ''' Forget any cached values. The next time :attr:`call` is accessed, all plugins are re-applied. ''' self.__dict__.pop('call', None) def prepare(self): ''' Do all on-demand work immediately (useful for debugging).''' self.call @property def _context(self): depr('Switch to Plugin API v2 and access the Route object directly.') #0.12 return dict(rule=self.rule, method=self.method, callback=self.callback, name=self.name, app=self.app, config=self.config, apply=self.plugins, skip=self.skiplist) def all_plugins(self): ''' Yield all Plugins affecting this route. ''' unique = set() for p in reversed(self.app.plugins + self.plugins): if True in self.skiplist: break name = getattr(p, 'name', False) if name and (name in self.skiplist or name in unique): continue if p in self.skiplist or type(p) in self.skiplist: continue if name: unique.add(name) yield p def _make_callback(self): callback = self.callback for plugin in self.all_plugins(): try: if hasattr(plugin, 'apply'): api = getattr(plugin, 'api', 1) context = self if api > 1 else self._context callback = plugin.apply(callback, context) else: callback = plugin(callback) except RouteReset: # Try again with changed configuration. return self._make_callback() if not callback is self.callback: update_wrapper(callback, self.callback) return callback def get_undecorated_callback(self): ''' Return the callback. If the callback is a decorated function, try to recover the original function. ''' func = self.callback func = getattr(func, '__func__' if py3k else 'im_func', func) closure_attr = '__closure__' if py3k else 'func_closure' while hasattr(func, closure_attr) and getattr(func, closure_attr): func = getattr(func, closure_attr)[0].cell_contents return func def get_callback_args(self): ''' Return a list of argument names the callback (most likely) accepts as keyword arguments. If the callback is a decorated function, try to recover the original function before inspection. ''' return getargspec(self.get_undecorated_callback())[0] def get_config(self, key, default=None): ''' Lookup a config field and return its value, first checking the route.config, then route.app.config.''' for conf in (self.config, self.app.conifg): if key in conf: return conf[key] return default def __repr__(self): cb = self.get_undecorated_callback() return '<%s %r %r>' % (self.method, self.rule, cb) ############################################################################### # Application Object ########################################################### ############################################################################### class Bottle(object): """ Each Bottle object represents a single, distinct web application and consists of routes, callbacks, plugins, resources and configuration. Instances are callable WSGI applications. :param catchall: If true (default), handle all exceptions. Turn off to let debugging middleware handle exceptions. """ def __init__(self, catchall=True, autojson=True): #: A :class:`ConfigDict` for app specific configuration. self.config = ConfigDict() self.config._on_change = functools.partial(self.trigger_hook, 'config') self.config.meta_set('autojson', 'validate', bool) self.config.meta_set('catchall', 'validate', bool) self.config['catchall'] = catchall self.config['autojson'] = autojson #: A :class:`ResourceManager` for application files self.resources = ResourceManager() self.routes = [] # List of installed :class:`Route` instances. self.router = Router() # Maps requests to :class:`Route` instances. self.error_handler = {} # Core plugins self.plugins = [] # List of installed plugins. if self.config['autojson']: self.install(JSONPlugin()) self.install(TemplatePlugin()) #: If true, most exceptions are caught and returned as :exc:`HTTPError` catchall = DictProperty('config', 'catchall') __hook_names = 'before_request', 'after_request', 'app_reset', 'config' __hook_reversed = 'after_request' @cached_property def _hooks(self): return dict((name, []) for name in self.__hook_names) def add_hook(self, name, func): ''' Attach a callback to a hook. Three hooks are currently implemented: before_request Executed once before each request. The request context is available, but no routing has happened yet. after_request Executed once after each request regardless of its outcome. app_reset Called whenever :meth:`Bottle.reset` is called. ''' if name in self.__hook_reversed: self._hooks[name].insert(0, func) else: self._hooks[name].append(func) def remove_hook(self, name, func): ''' Remove a callback from a hook. ''' if name in self._hooks and func in self._hooks[name]: self._hooks[name].remove(func) return True def trigger_hook(self, __name, *args, **kwargs): ''' Trigger a hook and return a list of results. ''' return [hook(*args, **kwargs) for hook in self._hooks[__name][:]] def hook(self, name): """ Return a decorator that attaches a callback to a hook. See :meth:`add_hook` for details.""" def decorator(func): self.add_hook(name, func) return func return decorator def mount(self, prefix, app, **options): ''' Mount an application (:class:`Bottle` or plain WSGI) to a specific URL prefix. Example:: root_app.mount('/admin/', admin_app) :param prefix: path prefix or `mount-point`. If it ends in a slash, that slash is mandatory. :param app: an instance of :class:`Bottle` or a WSGI application. All other parameters are passed to the underlying :meth:`route` call. ''' if isinstance(app, basestring): depr('Parameter order of Bottle.mount() changed.', True) # 0.10 segments = [p for p in prefix.split('/') if p] if not segments: raise ValueError('Empty path prefix.') path_depth = len(segments) def mountpoint_wrapper(): try: request.path_shift(path_depth) rs = HTTPResponse([]) def start_response(status, headerlist, exc_info=None): if exc_info: try: _raise(*exc_info) finally: exc_info = None rs.status = status for name, value in headerlist: rs.add_header(name, value) return rs.body.append body = app(request.environ, start_response) if body and rs.body: body = itertools.chain(rs.body, body) rs.body = body or rs.body return rs finally: request.path_shift(-path_depth) options.setdefault('skip', True) options.setdefault('method', 'PROXY') options.setdefault('mountpoint', {'prefix': prefix, 'target': app}) options['callback'] = mountpoint_wrapper self.route('/%s/<:re:.*>' % '/'.join(segments), **options) if not prefix.endswith('/'): self.route('/' + '/'.join(segments), **options) def merge(self, routes): ''' Merge the routes of another :class:`Bottle` application or a list of :class:`Route` objects into this application. The routes keep their 'owner', meaning that the :data:`Route.app` attribute is not changed. ''' if isinstance(routes, Bottle): routes = routes.routes for route in routes: self.add_route(route) def install(self, plugin): ''' Add a plugin to the list of plugins and prepare it for being applied to all routes of this application. A plugin may be a simple decorator or an object that implements the :class:`Plugin` API. ''' if hasattr(plugin, 'setup'): plugin.setup(self) if not callable(plugin) and not hasattr(plugin, 'apply'): raise TypeError("Plugins must be callable or implement .apply()") self.plugins.append(plugin) self.reset() return plugin def uninstall(self, plugin): ''' Uninstall plugins. Pass an instance to remove a specific plugin, a type object to remove all plugins that match that type, a string to remove all plugins with a matching ``name`` attribute or ``True`` to remove all plugins. Return the list of removed plugins. ''' removed, remove = [], plugin for i, plugin in list(enumerate(self.plugins))[::-1]: if remove is True or remove is plugin or remove is type(plugin) \ or getattr(plugin, 'name', True) == remove: removed.append(plugin) del self.plugins[i] if hasattr(plugin, 'close'): plugin.close() if removed: self.reset() return removed def reset(self, route=None): ''' Reset all routes (force plugins to be re-applied) and clear all caches. If an ID or route object is given, only that specific route is affected. ''' if route is None: routes = self.routes elif isinstance(route, Route): routes = [route] else: routes = [self.routes[route]] for route in routes: route.reset() if DEBUG: for route in routes: route.prepare() self.trigger_hook('app_reset') def close(self): ''' Close the application and all installed plugins. ''' for plugin in self.plugins: if hasattr(plugin, 'close'): plugin.close() self.stopped = True def run(self, **kwargs): ''' Calls :func:`run` with the same parameters. ''' run(self, **kwargs) def match(self, environ): """ Search for a matching route and return a (:class:`Route` , urlargs) tuple. The second value is a dictionary with parameters extracted from the URL. Raise :exc:`HTTPError` (404/405) on a non-match.""" return self.router.match(environ) def get_url(self, routename, **kargs): """ Return a string that matches a named route """ scriptname = request.environ.get('SCRIPT_NAME', '').strip('/') + '/' location = self.router.build(routename, **kargs).lstrip('/') return urljoin(urljoin('/', scriptname), location) def add_route(self, route): ''' Add a route object, but do not change the :data:`Route.app` attribute.''' self.routes.append(route) self.router.add(route.rule, route.method, route, name=route.name) if DEBUG: route.prepare() def route(self, path=None, method='GET', callback=None, name=None, apply=None, skip=None, **config): """ A decorator to bind a function to a request URL. Example:: @app.route('/hello/:name') def hello(name): return 'Hello %s' % name The ``:name`` part is a wildcard. See :class:`Router` for syntax details. :param path: Request path or a list of paths to listen to. If no path is specified, it is automatically generated from the signature of the function. :param method: HTTP method (`GET`, `POST`, `PUT`, ...) or a list of methods to listen to. (default: `GET`) :param callback: An optional shortcut to avoid the decorator syntax. ``route(..., callback=func)`` equals ``route(...)(func)`` :param name: The name for this route. (default: None) :param apply: A decorator or plugin or a list of plugins. These are applied to the route callback in addition to installed plugins. :param skip: A list of plugins, plugin classes or names. Matching plugins are not installed to this route. ``True`` skips all. Any additional keyword arguments are stored as route-specific configuration and passed to plugins (see :meth:`Plugin.apply`). """ if callable(path): path, callback = None, path plugins = makelist(apply) skiplist = makelist(skip) def decorator(callback): # TODO: Documentation and tests if isinstance(callback, basestring): callback = load(callback) for rule in makelist(path) or yieldroutes(callback): for verb in makelist(method): verb = verb.upper() route = Route(self, rule, verb, callback, name=name, plugins=plugins, skiplist=skiplist, **config) self.add_route(route) return callback return decorator(callback) if callback else decorator def get(self, path=None, method='GET', **options): """ Equals :meth:`route`. """ return self.route(path, method, **options) def post(self, path=None, method='POST', **options): """ Equals :meth:`route` with a ``POST`` method parameter. """ return self.route(path, method, **options) def put(self, path=None, method='PUT', **options): """ Equals :meth:`route` with a ``PUT`` method parameter. """ return self.route(path, method, **options) def delete(self, path=None, method='DELETE', **options): """ Equals :meth:`route` with a ``DELETE`` method parameter. """ return self.route(path, method, **options) def error(self, code=500): """ Decorator: Register an output handler for a HTTP error code""" def wrapper(handler): self.error_handler[int(code)] = handler return handler return wrapper def default_error_handler(self, res): return tob(template(ERROR_PAGE_TEMPLATE, e=res)) def _handle(self, environ): path = environ['bottle.raw_path'] = environ['PATH_INFO'] if py3k: try: environ['PATH_INFO'] = path.encode('latin1').decode('utf8') except UnicodeError: return HTTPError(400, 'Invalid path string. Expected UTF-8') try: environ['bottle.app'] = self request.bind(environ) response.bind() try: self.trigger_hook('before_request') route, args = self.router.match(environ) environ['route.handle'] = route environ['bottle.route'] = route environ['route.url_args'] = args return route.call(**args) finally: self.trigger_hook('after_request') except HTTPResponse: return _e() except RouteReset: route.reset() return self._handle(environ) except (KeyboardInterrupt, SystemExit, MemoryError): raise except Exception: if not self.catchall: raise stacktrace = format_exc() environ['wsgi.errors'].write(stacktrace) return HTTPError(500, "Internal Server Error", _e(), stacktrace) def _cast(self, out, peek=None): """ Try to convert the parameter into something WSGI compatible and set correct HTTP headers when possible. Support: False, str, unicode, dict, HTTPResponse, HTTPError, file-like, iterable of strings and iterable of unicodes """ # Empty output is done here if not out: if 'Content-Length' not in response: response['Content-Length'] = 0 return [] # Join lists of byte or unicode strings. Mixed lists are NOT supported if isinstance(out, (tuple, list))\ and isinstance(out[0], (bytes, unicode)): out = out[0][0:0].join(out) # b'abc'[0:0] -> b'' # Encode unicode strings if isinstance(out, unicode): out = out.encode(response.charset) # Byte Strings are just returned if isinstance(out, bytes): if 'Content-Length' not in response: response['Content-Length'] = len(out) return [out] # HTTPError or HTTPException (recursive, because they may wrap anything) # TODO: Handle these explicitly in handle() or make them iterable. if isinstance(out, HTTPError): out.apply(response) out = self.error_handler.get(out.status_code, self.default_error_handler)(out) return self._cast(out) if isinstance(out, HTTPResponse): out.apply(response) return self._cast(out.body) # File-like objects. if hasattr(out, 'read'): if 'wsgi.file_wrapper' in request.environ: return request.environ['wsgi.file_wrapper'](out) elif hasattr(out, 'close') or not hasattr(out, '__iter__'): return WSGIFileWrapper(out) # Handle Iterables. We peek into them to detect their inner type. try: iout = iter(out) first = next(iout) while not first: first = next(iout) except StopIteration: return self._cast('') except HTTPResponse: first = _e() except (KeyboardInterrupt, SystemExit, MemoryError): raise except Exception: if not self.catchall: raise first = HTTPError(500, 'Unhandled exception', _e(), format_exc()) # These are the inner types allowed in iterator or generator objects. if isinstance(first, HTTPResponse): return self._cast(first) elif isinstance(first, bytes): new_iter = itertools.chain([first], iout) elif isinstance(first, unicode): encoder = lambda x: x.encode(response.charset) new_iter = imap(encoder, itertools.chain([first], iout)) else: msg = 'Unsupported response type: %s' % type(first) return self._cast(HTTPError(500, msg)) if hasattr(out, 'close'): new_iter = _closeiter(new_iter, out.close) return new_iter def wsgi(self, environ, start_response): """ The bottle WSGI-interface. """ try: out = self._cast(self._handle(environ)) # rfc2616 section 4.3 if response._status_code in (100, 101, 204, 304)\ or environ['REQUEST_METHOD'] == 'HEAD': if hasattr(out, 'close'): out.close() out = [] start_response(response._status_line, response.headerlist) return out except (KeyboardInterrupt, SystemExit, MemoryError): raise except Exception: if not self.catchall: raise err = '<h1>Critical error while processing request: %s</h1>' \ % html_escape(environ.get('PATH_INFO', '/')) if DEBUG: err += '<h2>Error:</h2>\n<pre>\n%s\n</pre>\n' \ '<h2>Traceback:</h2>\n<pre>\n%s\n</pre>\n' \ % (html_escape(repr(_e())), html_escape(format_exc())) environ['wsgi.errors'].write(err) headers = [('Content-Type', 'text/html; charset=UTF-8')] start_response('500 INTERNAL SERVER ERROR', headers, sys.exc_info()) return [tob(err)] def __call__(self, environ, start_response): ''' Each instance of :class:'Bottle' is a WSGI application. ''' return self.wsgi(environ, start_response) ############################################################################### # HTTP and WSGI Tools ########################################################## ############################################################################### class BaseRequest(object): """ A wrapper for WSGI environment dictionaries that adds a lot of convenient access methods and properties. Most of them are read-only. Adding new attributes to a request actually adds them to the environ dictionary (as 'bottle.request.ext.<name>'). This is the recommended way to store and access request-specific data. """ __slots__ = ('environ') #: Maximum size of memory buffer for :attr:`body` in bytes. MEMFILE_MAX = 102400 def __init__(self, environ=None): """ Wrap a WSGI environ dictionary. """ #: The wrapped WSGI environ dictionary. This is the only real attribute. #: All other attributes actually are read-only properties. self.environ = {} if environ is None else environ self.environ['bottle.request'] = self @DictProperty('environ', 'bottle.app', read_only=True) def app(self): ''' Bottle application handling this request. ''' raise RuntimeError('This request is not connected to an application.') @DictProperty('environ', 'bottle.route', read_only=True) def route(self): """ The bottle :class:`Route` object that matches this request. """ raise RuntimeError('This request is not connected to a route.') @DictProperty('environ', 'route.url_args', read_only=True) def url_args(self): """ The arguments extracted from the URL. """ raise RuntimeError('This request is not connected to a route.') @property def path(self): ''' The value of ``PATH_INFO`` with exactly one prefixed slash (to fix broken clients and avoid the "empty path" edge case). ''' return '/' + self.environ.get('PATH_INFO','').lstrip('/') @property def method(self): ''' The ``REQUEST_METHOD`` value as an uppercase string. ''' return self.environ.get('REQUEST_METHOD', 'GET').upper() @DictProperty('environ', 'bottle.request.headers', read_only=True) def headers(self): ''' A :class:`WSGIHeaderDict` that provides case-insensitive access to HTTP request headers. ''' return WSGIHeaderDict(self.environ) def get_header(self, name, default=None): ''' Return the value of a request header, or a given default value. ''' return self.headers.get(name, default) @DictProperty('environ', 'bottle.request.cookies', read_only=True) def cookies(self): """ Cookies parsed into a :class:`FormsDict`. Signed cookies are NOT decoded. Use :meth:`get_cookie` if you expect signed cookies. """ cookies = SimpleCookie(self.environ.get('HTTP_COOKIE','')).values() return FormsDict((c.key, c.value) for c in cookies) def get_cookie(self, key, default=None, secret=None): """ Return the content of a cookie. To read a `Signed Cookie`, the `secret` must match the one used to create the cookie (see :meth:`BaseResponse.set_cookie`). If anything goes wrong (missing cookie or wrong signature), return a default value. """ value = self.cookies.get(key) if secret and value: dec = cookie_decode(value, secret) # (key, value) tuple or None return dec[1] if dec and dec[0] == key else default return value or default @DictProperty('environ', 'bottle.request.query', read_only=True) def query(self): ''' The :attr:`query_string` parsed into a :class:`FormsDict`. These values are sometimes called "URL arguments" or "GET parameters", but not to be confused with "URL wildcards" as they are provided by the :class:`Router`. ''' get = self.environ['bottle.get'] = FormsDict() pairs = _parse_qsl(self.environ.get('QUERY_STRING', '')) for key, value in pairs: get[key] = value return get @DictProperty('environ', 'bottle.request.forms', read_only=True) def forms(self): """ Form values parsed from an `url-encoded` or `multipart/form-data` encoded POST or PUT request body. The result is returned as a :class:`FormsDict`. All keys and values are strings. File uploads are stored separately in :attr:`files`. """ forms = FormsDict() for name, item in self.POST.allitems(): if not isinstance(item, FileUpload): forms[name] = item return forms @DictProperty('environ', 'bottle.request.params', read_only=True) def params(self): """ A :class:`FormsDict` with the combined values of :attr:`query` and :attr:`forms`. File uploads are stored in :attr:`files`. """ params = FormsDict() for key, value in self.query.allitems(): params[key] = value for key, value in self.forms.allitems(): params[key] = value return params @DictProperty('environ', 'bottle.request.files', read_only=True) def files(self): """ File uploads parsed from `multipart/form-data` encoded POST or PUT request body. The values are instances of :class:`FileUpload`. """ files = FormsDict() for name, item in self.POST.allitems(): if isinstance(item, FileUpload): files[name] = item return files @DictProperty('environ', 'bottle.request.json', read_only=True) def json(self): ''' If the ``Content-Type`` header is ``application/json``, this property holds the parsed content of the request body. Only requests smaller than :attr:`MEMFILE_MAX` are processed to avoid memory exhaustion. ''' ctype = self.environ.get('CONTENT_TYPE', '').lower().split(';')[0] if ctype == 'application/json': b = self._get_body_string() if not b: return None return json_loads(b) return None def _iter_body(self, read, bufsize): maxread = max(0, self.content_length) while maxread: part = read(min(maxread, bufsize)) if not part: break yield part maxread -= len(part) def _iter_chunked(self, read, bufsize): err = HTTPError(400, 'Error while parsing chunked transfer body.') rn, sem, bs = tob('\r\n'), tob(';'), tob('') while True: header = read(1) while header[-2:] != rn: c = read(1) header += c if not c: raise err if len(header) > bufsize: raise err size, _, _ = header.partition(sem) try: maxread = int(tonat(size.strip()), 16) except ValueError: raise err if maxread == 0: break buff = bs while maxread > 0: if not buff: buff = read(min(maxread, bufsize)) part, buff = buff[:maxread], buff[maxread:] if not part: raise err yield part maxread -= len(part) if read(2) != rn: raise err @DictProperty('environ', 'bottle.request.body', read_only=True) def _body(self): body_iter = self._iter_chunked if self.chunked else self._iter_body read_func = self.environ['wsgi.input'].read body, body_size, is_temp_file = BytesIO(), 0, False for part in body_iter(read_func, self.MEMFILE_MAX): body.write(part) body_size += len(part) if not is_temp_file and body_size > self.MEMFILE_MAX: body, tmp = TemporaryFile(mode='w+b'), body body.write(tmp.getvalue()) del tmp is_temp_file = True self.environ['wsgi.input'] = body body.seek(0) return body def _get_body_string(self): ''' read body until content-length or MEMFILE_MAX into a string. Raise HTTPError(413) on requests that are to large. ''' clen = self.content_length if clen > self.MEMFILE_MAX: raise HTTPError(413, 'Request to large') if clen < 0: clen = self.MEMFILE_MAX + 1 data = self.body.read(clen) if len(data) > self.MEMFILE_MAX: # Fail fast raise HTTPError(413, 'Request to large') return data @property def body(self): """ The HTTP request body as a seek-able file-like object. Depending on :attr:`MEMFILE_MAX`, this is either a temporary file or a :class:`io.BytesIO` instance. Accessing this property for the first time reads and replaces the ``wsgi.input`` environ variable. Subsequent accesses just do a `seek(0)` on the file object. """ self._body.seek(0) return self._body @property def chunked(self): ''' True if Chunked transfer encoding was. ''' return 'chunked' in self.environ.get('HTTP_TRANSFER_ENCODING', '').lower() #: An alias for :attr:`query`. GET = query @DictProperty('environ', 'bottle.request.post', read_only=True) def POST(self): """ The values of :attr:`forms` and :attr:`files` combined into a single :class:`FormsDict`. Values are either strings (form values) or instances of :class:`cgi.FieldStorage` (file uploads). """ post = FormsDict() # We default to application/x-www-form-urlencoded for everything that # is not multipart and take the fast path (also: 3.1 workaround) if not self.content_type.startswith('multipart/'): pairs = _parse_qsl(tonat(self._get_body_string(), 'latin1')) for key, value in pairs: post[key] = value return post safe_env = {'QUERY_STRING':''} # Build a safe environment for cgi for key in ('REQUEST_METHOD', 'CONTENT_TYPE', 'CONTENT_LENGTH'): if key in self.environ: safe_env[key] = self.environ[key] args = dict(fp=self.body, environ=safe_env, keep_blank_values=True) if py31: args['fp'] = NCTextIOWrapper(args['fp'], encoding='utf8', newline='\n') elif py3k: args['encoding'] = 'utf8' data = cgi.FieldStorage(**args) self['_cgi.FieldStorage'] = data #http://bugs.python.org/issue18394#msg207958 data = data.list or [] for item in data: if item.filename: post[item.name] = FileUpload(item.file, item.name, item.filename, item.headers) else: post[item.name] = item.value return post @property def url(self): """ The full request URI including hostname and scheme. If your app lives behind a reverse proxy or load balancer and you get confusing results, make sure that the ``X-Forwarded-Host`` header is set correctly. """ return self.urlparts.geturl() @DictProperty('environ', 'bottle.request.urlparts', read_only=True) def urlparts(self): ''' The :attr:`url` string as an :class:`urlparse.SplitResult` tuple. The tuple contains (scheme, host, path, query_string and fragment), but the fragment is always empty because it is not visible to the server. ''' env = self.environ http = env.get('HTTP_X_FORWARDED_PROTO') or env.get('wsgi.url_scheme', 'http') host = env.get('HTTP_X_FORWARDED_HOST') or env.get('HTTP_HOST') if not host: # HTTP 1.1 requires a Host-header. This is for HTTP/1.0 clients. host = env.get('SERVER_NAME', '127.0.0.1') port = env.get('SERVER_PORT') if port and port != ('80' if http == 'http' else '443'): host += ':' + port path = urlquote(self.fullpath) return UrlSplitResult(http, host, path, env.get('QUERY_STRING'), '') @property def fullpath(self): """ Request path including :attr:`script_name` (if present). """ return urljoin(self.script_name, self.path.lstrip('/')) @property def query_string(self): """ The raw :attr:`query` part of the URL (everything in between ``?`` and ``#``) as a string. """ return self.environ.get('QUERY_STRING', '') @property def script_name(self): ''' The initial portion of the URL's `path` that was removed by a higher level (server or routing middleware) before the application was called. This script path is returned with leading and tailing slashes. ''' script_name = self.environ.get('SCRIPT_NAME', '').strip('/') return '/' + script_name + '/' if script_name else '/' def path_shift(self, shift=1): ''' Shift path segments from :attr:`path` to :attr:`script_name` and vice versa. :param shift: The number of path segments to shift. May be negative to change the shift direction. (default: 1) ''' script = self.environ.get('SCRIPT_NAME','/') self['SCRIPT_NAME'], self['PATH_INFO'] = path_shift(script, self.path, shift) @property def content_length(self): ''' The request body length as an integer. The client is responsible to set this header. Otherwise, the real length of the body is unknown and -1 is returned. In this case, :attr:`body` will be empty. ''' return int(self.environ.get('CONTENT_LENGTH') or -1) @property def content_type(self): ''' The Content-Type header as a lowercase-string (default: empty). ''' return self.environ.get('CONTENT_TYPE', '').lower() @property def is_xhr(self): ''' True if the request was triggered by a XMLHttpRequest. This only works with JavaScript libraries that support the `X-Requested-With` header (most of the popular libraries do). ''' requested_with = self.environ.get('HTTP_X_REQUESTED_WITH','') return requested_with.lower() == 'xmlhttprequest' @property def is_ajax(self): ''' Alias for :attr:`is_xhr`. "Ajax" is not the right term. ''' return self.is_xhr @property def auth(self): """ HTTP authentication data as a (user, password) tuple. This implementation currently supports basic (not digest) authentication only. If the authentication happened at a higher level (e.g. in the front web-server or a middleware), the password field is None, but the user field is looked up from the ``REMOTE_USER`` environ variable. On any errors, None is returned. """ basic = parse_auth(self.environ.get('HTTP_AUTHORIZATION','')) if basic: return basic ruser = self.environ.get('REMOTE_USER') if ruser: return (ruser, None) return None @property def remote_route(self): """ A list of all IPs that were involved in this request, starting with the client IP and followed by zero or more proxies. This does only work if all proxies support the ```X-Forwarded-For`` header. Note that this information can be forged by malicious clients. """ proxy = self.environ.get('HTTP_X_FORWARDED_FOR') if proxy: return [ip.strip() for ip in proxy.split(',')] remote = self.environ.get('REMOTE_ADDR') return [remote] if remote else [] @property def remote_addr(self): """ The client IP as a string. Note that this information can be forged by malicious clients. """ route = self.remote_route return route[0] if route else None def copy(self): """ Return a new :class:`Request` with a shallow :attr:`environ` copy. """ return Request(self.environ.copy()) def get(self, value, default=None): return self.environ.get(value, default) def __getitem__(self, key): return self.environ[key] def __delitem__(self, key): self[key] = ""; del(self.environ[key]) def __iter__(self): return iter(self.environ) def __len__(self): return len(self.environ) def keys(self): return self.environ.keys() def __setitem__(self, key, value): """ Change an environ value and clear all caches that depend on it. """ if self.environ.get('bottle.request.readonly'): raise KeyError('The environ dictionary is read-only.') self.environ[key] = value todelete = () if key == 'wsgi.input': todelete = ('body', 'forms', 'files', 'params', 'post', 'json') elif key == 'QUERY_STRING': todelete = ('query', 'params') elif key.startswith('HTTP_'): todelete = ('headers', 'cookies') for key in todelete: self.environ.pop('bottle.request.'+key, None) def __repr__(self): return '<%s: %s %s>' % (self.__class__.__name__, self.method, self.url) def __getattr__(self, name): ''' Search in self.environ for additional user defined attributes. ''' try: var = self.environ['bottle.request.ext.%s'%name] return var.__get__(self) if hasattr(var, '__get__') else var except KeyError: raise AttributeError('Attribute %r not defined.' % name) def __setattr__(self, name, value): if name == 'environ': return object.__setattr__(self, name, value) self.environ['bottle.request.ext.%s'%name] = value def _hkey(s): return s.title().replace('_','-') class HeaderProperty(object): def __init__(self, name, reader=None, writer=str, default=''): self.name, self.default = name, default self.reader, self.writer = reader, writer self.__doc__ = 'Current value of the %r header.' % name.title() def __get__(self, obj, cls): if obj is None: return self value = obj.headers.get(self.name, self.default) return self.reader(value) if self.reader else value def __set__(self, obj, value): obj.headers[self.name] = self.writer(value) def __delete__(self, obj): del obj.headers[self.name] class BaseResponse(object): """ Storage class for a response body as well as headers and cookies. This class does support dict-like case-insensitive item-access to headers, but is NOT a dict. Most notably, iterating over a response yields parts of the body and not the headers. :param body: The response body as one of the supported types. :param status: Either an HTTP status code (e.g. 200) or a status line including the reason phrase (e.g. '200 OK'). :param headers: A dictionary or a list of name-value pairs. Additional keyword arguments are added to the list of headers. Underscores in the header name are replaced with dashes. """ default_status = 200 default_content_type = 'text/html; charset=UTF-8' # Header blacklist for specific response codes # (rfc2616 section 10.2.3 and 10.3.5) bad_headers = { 204: set(('Content-Type',)), 304: set(('Allow', 'Content-Encoding', 'Content-Language', 'Content-Length', 'Content-Range', 'Content-Type', 'Content-Md5', 'Last-Modified'))} def __init__(self, body='', status=None, headers=None, **more_headers): self._cookies = None self._headers = {} self.body = body self.status = status or self.default_status if headers: if isinstance(headers, dict): headers = headers.items() for name, value in headers: self.add_header(name, value) if more_headers: for name, value in more_headers.items(): self.add_header(name, value) def copy(self, cls=None): ''' Returns a copy of self. ''' cls = cls or BaseResponse assert issubclass(cls, BaseResponse) copy = cls() copy.status = self.status copy._headers = dict((k, v[:]) for (k, v) in self._headers.items()) if self._cookies: copy._cookies = SimpleCookie() copy._cookies.load(self._cookies.output(header='')) return copy def __iter__(self): return iter(self.body) def close(self): if hasattr(self.body, 'close'): self.body.close() @property def status_line(self): ''' The HTTP status line as a string (e.g. ``404 Not Found``).''' return self._status_line @property def status_code(self): ''' The HTTP status code as an integer (e.g. 404).''' return self._status_code def _set_status(self, status): if isinstance(status, int): code, status = status, _HTTP_STATUS_LINES.get(status) elif ' ' in status: status = status.strip() code = int(status.split()[0]) else: raise ValueError('String status line without a reason phrase.') if not 100 <= code <= 999: raise ValueError('Status code out of range.') self._status_code = code self._status_line = str(status or ('%d Unknown' % code)) def _get_status(self): return self._status_line status = property(_get_status, _set_status, None, ''' A writeable property to change the HTTP response status. It accepts either a numeric code (100-999) or a string with a custom reason phrase (e.g. "404 Brain not found"). Both :data:`status_line` and :data:`status_code` are updated accordingly. The return value is always a status string. ''') del _get_status, _set_status @property def headers(self): ''' An instance of :class:`HeaderDict`, a case-insensitive dict-like view on the response headers. ''' hdict = HeaderDict() hdict.dict = self._headers return hdict def __contains__(self, name): return _hkey(name) in self._headers def __delitem__(self, name): del self._headers[_hkey(name)] def __getitem__(self, name): return self._headers[_hkey(name)][-1] def __setitem__(self, name, value): self._headers[_hkey(name)] = [str(value)] def get_header(self, name, default=None): ''' Return the value of a previously defined header. If there is no header with that name, return a default value. ''' return self._headers.get(_hkey(name), [default])[-1] def set_header(self, name, value): ''' Create a new response header, replacing any previously defined headers with the same name. ''' self._headers[_hkey(name)] = [str(value)] def add_header(self, name, value): ''' Add an additional response header, not removing duplicates. ''' self._headers.setdefault(_hkey(name), []).append(str(value)) def iter_headers(self): ''' Yield (header, value) tuples, skipping headers that are not allowed with the current response status code. ''' return self.headerlist @property def headerlist(self): ''' WSGI conform list of (header, value) tuples. ''' out = [] headers = list(self._headers.items()) if 'Content-Type' not in self._headers: headers.append(('Content-Type', [self.default_content_type])) if self._status_code in self.bad_headers: bad_headers = self.bad_headers[self._status_code] headers = [h for h in headers if h[0] not in bad_headers] out += [(name, val) for name, vals in headers for val in vals] if self._cookies: for c in self._cookies.values(): out.append(('Set-Cookie', c.OutputString())) return out content_type = HeaderProperty('Content-Type') content_length = HeaderProperty('Content-Length', reader=int) expires = HeaderProperty('Expires', reader=lambda x: datetime.utcfromtimestamp(parse_date(x)), writer=lambda x: http_date(x)) @property def charset(self, default='UTF-8'): """ Return the charset specified in the content-type header (default: utf8). """ if 'charset=' in self.content_type: return self.content_type.split('charset=')[-1].split(';')[0].strip() return default def set_cookie(self, name, value, secret=None, **options): ''' Create a new cookie or replace an old one. If the `secret` parameter is set, create a `Signed Cookie` (described below). :param name: the name of the cookie. :param value: the value of the cookie. :param secret: a signature key required for signed cookies. Additionally, this method accepts all RFC 2109 attributes that are supported by :class:`cookie.Morsel`, including: :param max_age: maximum age in seconds. (default: None) :param expires: a datetime object or UNIX timestamp. (default: None) :param domain: the domain that is allowed to read the cookie. (default: current domain) :param path: limits the cookie to a given path (default: current path) :param secure: limit the cookie to HTTPS connections (default: off). :param httponly: prevents client-side javascript to read this cookie (default: off, requires Python 2.6 or newer). If neither `expires` nor `max_age` is set (default), the cookie will expire at the end of the browser session (as soon as the browser window is closed). Signed cookies may store any pickle-able object and are cryptographically signed to prevent manipulation. Keep in mind that cookies are limited to 4kb in most browsers. Warning: Signed cookies are not encrypted (the client can still see the content) and not copy-protected (the client can restore an old cookie). The main intention is to make pickling and unpickling save, not to store secret information at client side. ''' if not self._cookies: self._cookies = SimpleCookie() if secret: value = touni(cookie_encode((name, value), secret)) elif not isinstance(value, basestring): raise TypeError('Secret key missing for non-string Cookie.') if len(value) > 4096: raise ValueError('Cookie value to long.') self._cookies[name] = value for key, value in options.items(): if key == 'max_age': if isinstance(value, timedelta): value = value.seconds + value.days * 24 * 3600 if key == 'expires': if isinstance(value, (datedate, datetime)): value = value.timetuple() elif isinstance(value, (int, float)): value = time.gmtime(value) value = time.strftime("%a, %d %b %Y %H:%M:%S GMT", value) self._cookies[name][key.replace('_', '-')] = value def delete_cookie(self, key, **kwargs): ''' Delete a cookie. Be sure to use the same `domain` and `path` settings as used to create the cookie. ''' kwargs['max_age'] = -1 kwargs['expires'] = 0 self.set_cookie(key, '', **kwargs) def __repr__(self): out = '' for name, value in self.headerlist: out += '%s: %s\n' % (name.title(), value.strip()) return out def local_property(name=None): if name: depr('local_property() is deprecated and will be removed.') #0.12 ls = threading.local() def fget(self): try: return ls.var except AttributeError: raise RuntimeError("Request context not initialized.") def fset(self, value): ls.var = value def fdel(self): del ls.var return property(fget, fset, fdel, 'Thread-local property') class LocalRequest(BaseRequest): ''' A thread-local subclass of :class:`BaseRequest` with a different set of attributes for each thread. There is usually only one global instance of this class (:data:`request`). If accessed during a request/response cycle, this instance always refers to the *current* request (even on a multithreaded server). ''' bind = BaseRequest.__init__ environ = local_property() class LocalResponse(BaseResponse): ''' A thread-local subclass of :class:`BaseResponse` with a different set of attributes for each thread. There is usually only one global instance of this class (:data:`response`). Its attributes are used to build the HTTP response at the end of the request/response cycle. ''' bind = BaseResponse.__init__ _status_line = local_property() _status_code = local_property() _cookies = local_property() _headers = local_property() body = local_property() Request = BaseRequest Response = BaseResponse class HTTPResponse(Response, BottleException): def __init__(self, body='', status=None, headers=None, **more_headers): super(HTTPResponse, self).__init__(body, status, headers, **more_headers) def apply(self, response): response._status_code = self._status_code response._status_line = self._status_line response._headers = self._headers response._cookies = self._cookies response.body = self.body class HTTPError(HTTPResponse): default_status = 500 def __init__(self, status=None, body=None, exception=None, traceback=None, **options): self.exception = exception self.traceback = traceback super(HTTPError, self).__init__(body, status, **options) ############################################################################### # Plugins ###################################################################### ############################################################################### class PluginError(BottleException): pass class JSONPlugin(object): name = 'json' api = 2 def __init__(self, json_dumps=json_dumps): self.json_dumps = json_dumps def apply(self, callback, route): dumps = self.json_dumps if not dumps: return callback def wrapper(*a, **ka): try: rv = callback(*a, **ka) except HTTPError: rv = _e() if isinstance(rv, dict): #Attempt to serialize, raises exception on failure json_response = dumps(rv) #Set content type only if serialization succesful response.content_type = 'application/json' return json_response elif isinstance(rv, HTTPResponse) and isinstance(rv.body, dict): rv.body = dumps(rv.body) rv.content_type = 'application/json' return rv return wrapper class TemplatePlugin(object): ''' This plugin applies the :func:`view` decorator to all routes with a `template` config parameter. If the parameter is a tuple, the second element must be a dict with additional options (e.g. `template_engine`) or default variables for the template. ''' name = 'template' api = 2 def apply(self, callback, route): conf = route.config.get('template') if isinstance(conf, (tuple, list)) and len(conf) == 2: return view(conf[0], **conf[1])(callback) elif isinstance(conf, str): return view(conf)(callback) else: return callback #: Not a plugin, but part of the plugin API. TODO: Find a better place. class _ImportRedirect(object): def __init__(self, name, impmask): ''' Create a virtual package that redirects imports (see PEP 302). ''' self.name = name self.impmask = impmask self.module = sys.modules.setdefault(name, imp.new_module(name)) self.module.__dict__.update({'__file__': __file__, '__path__': [], '__all__': [], '__loader__': self}) sys.meta_path.append(self) def find_module(self, fullname, path=None): if '.' not in fullname: return packname = fullname.rsplit('.', 1)[0] if packname != self.name: return return self def load_module(self, fullname): if fullname in sys.modules: return sys.modules[fullname] modname = fullname.rsplit('.', 1)[1] realname = self.impmask % modname __import__(realname) module = sys.modules[fullname] = sys.modules[realname] setattr(self.module, modname, module) module.__loader__ = self return module ############################################################################### # Common Utilities ############################################################# ############################################################################### class MultiDict(DictMixin): """ This dict stores multiple values per key, but behaves exactly like a normal dict in that it returns only the newest value for any given key. There are special methods available to access the full list of values. """ def __init__(self, *a, **k): self.dict = dict((k, [v]) for (k, v) in dict(*a, **k).items()) def __len__(self): return len(self.dict) def __iter__(self): return iter(self.dict) def __contains__(self, key): return key in self.dict def __delitem__(self, key): del self.dict[key] def __getitem__(self, key): return self.dict[key][-1] def __setitem__(self, key, value): self.append(key, value) def keys(self): return self.dict.keys() if py3k: def values(self): return (v[-1] for v in self.dict.values()) def items(self): return ((k, v[-1]) for k, v in self.dict.items()) def allitems(self): return ((k, v) for k, vl in self.dict.items() for v in vl) iterkeys = keys itervalues = values iteritems = items iterallitems = allitems else: def values(self): return [v[-1] for v in self.dict.values()] def items(self): return [(k, v[-1]) for k, v in self.dict.items()] def iterkeys(self): return self.dict.iterkeys() def itervalues(self): return (v[-1] for v in self.dict.itervalues()) def iteritems(self): return ((k, v[-1]) for k, v in self.dict.iteritems()) def iterallitems(self): return ((k, v) for k, vl in self.dict.iteritems() for v in vl) def allitems(self): return [(k, v) for k, vl in self.dict.iteritems() for v in vl] def get(self, key, default=None, index=-1, type=None): ''' Return the most recent value for a key. :param default: The default value to be returned if the key is not present or the type conversion fails. :param index: An index for the list of available values. :param type: If defined, this callable is used to cast the value into a specific type. Exception are suppressed and result in the default value to be returned. ''' try: val = self.dict[key][index] return type(val) if type else val except Exception: pass return default def append(self, key, value): ''' Add a new value to the list of values for this key. ''' self.dict.setdefault(key, []).append(value) def replace(self, key, value): ''' Replace the list of values with a single value. ''' self.dict[key] = [value] def getall(self, key): ''' Return a (possibly empty) list of values for a key. ''' return self.dict.get(key) or [] #: Aliases for WTForms to mimic other multi-dict APIs (Django) getone = get getlist = getall class FormsDict(MultiDict): ''' This :class:`MultiDict` subclass is used to store request form data. Additionally to the normal dict-like item access methods (which return unmodified data as native strings), this container also supports attribute-like access to its values. Attributes are automatically de- or recoded to match :attr:`input_encoding` (default: 'utf8'). Missing attributes default to an empty string. ''' #: Encoding used for attribute values. input_encoding = 'utf8' #: If true (default), unicode strings are first encoded with `latin1` #: and then decoded to match :attr:`input_encoding`. recode_unicode = True def _fix(self, s, encoding=None): if isinstance(s, unicode) and self.recode_unicode: # Python 3 WSGI return s.encode('latin1').decode(encoding or self.input_encoding) elif isinstance(s, bytes): # Python 2 WSGI return s.decode(encoding or self.input_encoding) else: return s def decode(self, encoding=None): ''' Returns a copy with all keys and values de- or recoded to match :attr:`input_encoding`. Some libraries (e.g. WTForms) want a unicode dictionary. ''' copy = FormsDict() enc = copy.input_encoding = encoding or self.input_encoding copy.recode_unicode = False for key, value in self.allitems(): copy.append(self._fix(key, enc), self._fix(value, enc)) return copy def getunicode(self, name, default=None, encoding=None): ''' Return the value as a unicode string, or the default. ''' try: return self._fix(self[name], encoding) except (UnicodeError, KeyError): return default def __getattr__(self, name, default=unicode()): # Without this guard, pickle generates a cryptic TypeError: if name.startswith('__') and name.endswith('__'): return super(FormsDict, self).__getattr__(name) return self.getunicode(name, default=default) class HeaderDict(MultiDict): """ A case-insensitive version of :class:`MultiDict` that defaults to replace the old value instead of appending it. """ def __init__(self, *a, **ka): self.dict = {} if a or ka: self.update(*a, **ka) def __contains__(self, key): return _hkey(key) in self.dict def __delitem__(self, key): del self.dict[_hkey(key)] def __getitem__(self, key): return self.dict[_hkey(key)][-1] def __setitem__(self, key, value): self.dict[_hkey(key)] = [str(value)] def append(self, key, value): self.dict.setdefault(_hkey(key), []).append(str(value)) def replace(self, key, value): self.dict[_hkey(key)] = [str(value)] def getall(self, key): return self.dict.get(_hkey(key)) or [] def get(self, key, default=None, index=-1): return MultiDict.get(self, _hkey(key), default, index) def filter(self, names): for name in [_hkey(n) for n in names]: if name in self.dict: del self.dict[name] class WSGIHeaderDict(DictMixin): ''' This dict-like class wraps a WSGI environ dict and provides convenient access to HTTP_* fields. Keys and values are native strings (2.x bytes or 3.x unicode) and keys are case-insensitive. If the WSGI environment contains non-native string values, these are de- or encoded using a lossless 'latin1' character set. The API will remain stable even on changes to the relevant PEPs. Currently PEP 333, 444 and 3333 are supported. (PEP 444 is the only one that uses non-native strings.) ''' #: List of keys that do not have a ``HTTP_`` prefix. cgikeys = ('CONTENT_TYPE', 'CONTENT_LENGTH') def __init__(self, environ): self.environ = environ def _ekey(self, key): ''' Translate header field name to CGI/WSGI environ key. ''' key = key.replace('-','_').upper() if key in self.cgikeys: return key return 'HTTP_' + key def raw(self, key, default=None): ''' Return the header value as is (may be bytes or unicode). ''' return self.environ.get(self._ekey(key), default) def __getitem__(self, key): return tonat(self.environ[self._ekey(key)], 'latin1') def __setitem__(self, key, value): raise TypeError("%s is read-only." % self.__class__) def __delitem__(self, key): raise TypeError("%s is read-only." % self.__class__) def __iter__(self): for key in self.environ: if key[:5] == 'HTTP_': yield key[5:].replace('_', '-').title() elif key in self.cgikeys: yield key.replace('_', '-').title() def keys(self): return [x for x in self] def __len__(self): return len(self.keys()) def __contains__(self, key): return self._ekey(key) in self.environ class ConfigDict(dict): ''' A dict-like configuration storage with additional support for namespaces, validators, meta-data, on_change listeners and more. This storage is optimized for fast read access. Retrieving a key or using non-altering dict methods (e.g. `dict.get()`) has no overhead compared to a native dict. ''' __slots__ = ('_meta', '_on_change') class Namespace(DictMixin): def __init__(self, config, namespace): self._config = config self._prefix = namespace def __getitem__(self, key): depr('Accessing namespaces as dicts is discouraged. ' 'Only use flat item access: ' 'cfg["names"]["pace"]["key"] -> cfg["name.space.key"]') #0.12 return self._config[self._prefix + '.' + key] def __setitem__(self, key, value): self._config[self._prefix + '.' + key] = value def __delitem__(self, key): del self._config[self._prefix + '.' + key] def __iter__(self): ns_prefix = self._prefix + '.' for key in self._config: ns, dot, name = key.rpartition('.') if ns == self._prefix and name: yield name def keys(self): return [x for x in self] def __len__(self): return len(self.keys()) def __contains__(self, key): return self._prefix + '.' + key in self._config def __repr__(self): return '<Config.Namespace %s.*>' % self._prefix def __str__(self): return '<Config.Namespace %s.*>' % self._prefix # Deprecated ConfigDict features def __getattr__(self, key): depr('Attribute access is deprecated.') #0.12 if key not in self and key[0].isupper(): self[key] = ConfigDict.Namespace(self._config, self._prefix + '.' + key) if key not in self and key.startswith('__'): raise AttributeError(key) return self.get(key) def __setattr__(self, key, value): if key in ('_config', '_prefix'): self.__dict__[key] = value return depr('Attribute assignment is deprecated.') #0.12 if hasattr(DictMixin, key): raise AttributeError('Read-only attribute.') if key in self and self[key] and isinstance(self[key], self.__class__): raise AttributeError('Non-empty namespace attribute.') self[key] = value def __delattr__(self, key): if key in self: val = self.pop(key) if isinstance(val, self.__class__): prefix = key + '.' for key in self: if key.startswith(prefix): del self[prefix+key] def __call__(self, *a, **ka): depr('Calling ConfDict is deprecated. Use the update() method.') #0.12 self.update(*a, **ka) return self def __init__(self, *a, **ka): self._meta = {} self._on_change = lambda name, value: None if a or ka: depr('Constructor does no longer accept parameters.') #0.12 self.update(*a, **ka) def load_config(self, filename): ''' Load values from an *.ini style config file. If the config file contains sections, their names are used as namespaces for the values within. The two special sections ``DEFAULT`` and ``bottle`` refer to the root namespace (no prefix). ''' conf = ConfigParser() conf.read(filename) for section in conf.sections(): for key, value in conf.items(section): if section not in ('DEFAULT', 'bottle'): key = section + '.' + key self[key] = value return self def load_dict(self, source, namespace='', make_namespaces=False): ''' Import values from a dictionary structure. Nesting can be used to represent namespaces. >>> ConfigDict().load_dict({'name': {'space': {'key': 'value'}}}) {'name.space.key': 'value'} ''' stack = [(namespace, source)] while stack: prefix, source = stack.pop() if not isinstance(source, dict): raise TypeError('Source is not a dict (r)' % type(key)) for key, value in source.items(): if not isinstance(key, basestring): raise TypeError('Key is not a string (%r)' % type(key)) full_key = prefix + '.' + key if prefix else key if isinstance(value, dict): stack.append((full_key, value)) if make_namespaces: self[full_key] = self.Namespace(self, full_key) else: self[full_key] = value return self def update(self, *a, **ka): ''' If the first parameter is a string, all keys are prefixed with this namespace. Apart from that it works just as the usual dict.update(). Example: ``update('some.namespace', key='value')`` ''' prefix = '' if a and isinstance(a[0], basestring): prefix = a[0].strip('.') + '.' a = a[1:] for key, value in dict(*a, **ka).items(): self[prefix+key] = value def setdefault(self, key, value): if key not in self: self[key] = value return self[key] def __setitem__(self, key, value): if not isinstance(key, basestring): raise TypeError('Key has type %r (not a string)' % type(key)) value = self.meta_get(key, 'filter', lambda x: x)(value) if key in self and self[key] is value: return self._on_change(key, value) dict.__setitem__(self, key, value) def __delitem__(self, key): dict.__delitem__(self, key) def clear(self): for key in self: del self[key] def meta_get(self, key, metafield, default=None): ''' Return the value of a meta field for a key. ''' return self._meta.get(key, {}).get(metafield, default) def meta_set(self, key, metafield, value): ''' Set the meta field for a key to a new value. This triggers the on-change handler for existing keys. ''' self._meta.setdefault(key, {})[metafield] = value if key in self: self[key] = self[key] def meta_list(self, key): ''' Return an iterable of meta field names defined for a key. ''' return self._meta.get(key, {}).keys() # Deprecated ConfigDict features def __getattr__(self, key): depr('Attribute access is deprecated.') #0.12 if key not in self and key[0].isupper(): self[key] = self.Namespace(self, key) if key not in self and key.startswith('__'): raise AttributeError(key) return self.get(key) def __setattr__(self, key, value): if key in self.__slots__: return dict.__setattr__(self, key, value) depr('Attribute assignment is deprecated.') #0.12 if hasattr(dict, key): raise AttributeError('Read-only attribute.') if key in self and self[key] and isinstance(self[key], self.Namespace): raise AttributeError('Non-empty namespace attribute.') self[key] = value def __delattr__(self, key): if key in self: val = self.pop(key) if isinstance(val, self.Namespace): prefix = key + '.' for key in self: if key.startswith(prefix): del self[prefix+key] def __call__(self, *a, **ka): depr('Calling ConfDict is deprecated. Use the update() method.') #0.12 self.update(*a, **ka) return self class AppStack(list): """ A stack-like list. Calling it returns the head of the stack. """ def __call__(self): """ Return the current default application. """ return self[-1] def push(self, value=None): """ Add a new :class:`Bottle` instance to the stack """ if not isinstance(value, Bottle): value = Bottle() self.append(value) return value class WSGIFileWrapper(object): def __init__(self, fp, buffer_size=1024*64): self.fp, self.buffer_size = fp, buffer_size for attr in ('fileno', 'close', 'read', 'readlines', 'tell', 'seek'): if hasattr(fp, attr): setattr(self, attr, getattr(fp, attr)) def __iter__(self): buff, read = self.buffer_size, self.read while True: part = read(buff) if not part: return yield part class _closeiter(object): ''' This only exists to be able to attach a .close method to iterators that do not support attribute assignment (most of itertools). ''' def __init__(self, iterator, close=None): self.iterator = iterator self.close_callbacks = makelist(close) def __iter__(self): return iter(self.iterator) def close(self): for func in self.close_callbacks: func() class ResourceManager(object): ''' This class manages a list of search paths and helps to find and open application-bound resources (files). :param base: default value for :meth:`add_path` calls. :param opener: callable used to open resources. :param cachemode: controls which lookups are cached. One of 'all', 'found' or 'none'. ''' def __init__(self, base='./', opener=open, cachemode='all'): self.opener = open self.base = base self.cachemode = cachemode #: A list of search paths. See :meth:`add_path` for details. self.path = [] #: A cache for resolved paths. ``res.cache.clear()`` clears the cache. self.cache = {} def add_path(self, path, base=None, index=None, create=False): ''' Add a new path to the list of search paths. Return False if the path does not exist. :param path: The new search path. Relative paths are turned into an absolute and normalized form. If the path looks like a file (not ending in `/`), the filename is stripped off. :param base: Path used to absolutize relative search paths. Defaults to :attr:`base` which defaults to ``os.getcwd()``. :param index: Position within the list of search paths. Defaults to last index (appends to the list). The `base` parameter makes it easy to reference files installed along with a python module or package:: res.add_path('./resources/', __file__) ''' base = os.path.abspath(os.path.dirname(base or self.base)) path = os.path.abspath(os.path.join(base, os.path.dirname(path))) path += os.sep if path in self.path: self.path.remove(path) if create and not os.path.isdir(path): os.makedirs(path) if index is None: self.path.append(path) else: self.path.insert(index, path) self.cache.clear() return os.path.exists(path) def __iter__(self): ''' Iterate over all existing files in all registered paths. ''' search = self.path[:] while search: path = search.pop() if not os.path.isdir(path): continue for name in os.listdir(path): full = os.path.join(path, name) if os.path.isdir(full): search.append(full) else: yield full def lookup(self, name): ''' Search for a resource and return an absolute file path, or `None`. The :attr:`path` list is searched in order. The first match is returend. Symlinks are followed. The result is cached to speed up future lookups. ''' if name not in self.cache or DEBUG: for path in self.path: fpath = os.path.join(path, name) if os.path.isfile(fpath): if self.cachemode in ('all', 'found'): self.cache[name] = fpath return fpath if self.cachemode == 'all': self.cache[name] = None return self.cache[name] def open(self, name, mode='r', *args, **kwargs): ''' Find a resource and return a file object, or raise IOError. ''' fname = self.lookup(name) if not fname: raise IOError("Resource %r not found." % name) return self.opener(fname, mode=mode, *args, **kwargs) class FileUpload(object): def __init__(self, fileobj, name, filename, headers=None): ''' Wrapper for file uploads. ''' #: Open file(-like) object (BytesIO buffer or temporary file) self.file = fileobj #: Name of the upload form field self.name = name #: Raw filename as sent by the client (may contain unsafe characters) self.raw_filename = filename #: A :class:`HeaderDict` with additional headers (e.g. content-type) self.headers = HeaderDict(headers) if headers else HeaderDict() content_type = HeaderProperty('Content-Type') content_length = HeaderProperty('Content-Length', reader=int, default=-1) @cached_property def filename(self): ''' Name of the file on the client file system, but normalized to ensure file system compatibility. An empty filename is returned as 'empty'. Only ASCII letters, digits, dashes, underscores and dots are allowed in the final filename. Accents are removed, if possible. Whitespace is replaced by a single dash. Leading or tailing dots or dashes are removed. The filename is limited to 255 characters. ''' fname = self.raw_filename if not isinstance(fname, unicode): fname = fname.decode('utf8', 'ignore') fname = normalize('NFKD', fname).encode('ASCII', 'ignore').decode('ASCII') fname = os.path.basename(fname.replace('\\', os.path.sep)) fname = re.sub(r'[^a-zA-Z0-9-_.\s]', '', fname).strip() fname = re.sub(r'[-\s]+', '-', fname).strip('.-') return fname[:255] or 'empty' def _copy_file(self, fp, chunk_size=2**16): read, write, offset = self.file.read, fp.write, self.file.tell() while 1: buf = read(chunk_size) if not buf: break write(buf) self.file.seek(offset) def save(self, destination, overwrite=False, chunk_size=2**16): ''' Save file to disk or copy its content to an open file(-like) object. If *destination* is a directory, :attr:`filename` is added to the path. Existing files are not overwritten by default (IOError). :param destination: File path, directory or file(-like) object. :param overwrite: If True, replace existing files. (default: False) :param chunk_size: Bytes to read at a time. (default: 64kb) ''' if isinstance(destination, basestring): # Except file-likes here if os.path.isdir(destination): destination = os.path.join(destination, self.filename) if not overwrite and os.path.exists(destination): raise IOError('File exists.') with open(destination, 'wb') as fp: self._copy_file(fp, chunk_size) else: self._copy_file(destination, chunk_size) ############################################################################### # Application Helper ########################################################### ############################################################################### def abort(code=500, text='Unknown Error.'): """ Aborts execution and causes a HTTP error. """ raise HTTPError(code, text) def redirect(url, code=None): """ Aborts execution and causes a 303 or 302 redirect, depending on the HTTP protocol version. """ if not code: code = 303 if request.get('SERVER_PROTOCOL') == "HTTP/1.1" else 302 res = response.copy(cls=HTTPResponse) res.status = code res.body = "" res.set_header('Location', urljoin(request.url, url)) raise res def _file_iter_range(fp, offset, bytes, maxread=1024*1024): ''' Yield chunks from a range in a file. No chunk is bigger than maxread.''' fp.seek(offset) while bytes > 0: part = fp.read(min(bytes, maxread)) if not part: break bytes -= len(part) yield part def static_file(filename, root, mimetype='auto', download=False, charset='UTF-8'): """ Open a file in a safe way and return :exc:`HTTPResponse` with status code 200, 305, 403 or 404. The ``Content-Type``, ``Content-Encoding``, ``Content-Length`` and ``Last-Modified`` headers are set if possible. Special support for ``If-Modified-Since``, ``Range`` and ``HEAD`` requests. :param filename: Name or path of the file to send. :param root: Root path for file lookups. Should be an absolute directory path. :param mimetype: Defines the content-type header (default: guess from file extension) :param download: If True, ask the browser to open a `Save as...` dialog instead of opening the file with the associated program. You can specify a custom filename as a string. If not specified, the original filename is used (default: False). :param charset: The charset to use for files with a ``text/*`` mime-type. (default: UTF-8) """ root = os.path.abspath(root) + os.sep filename = os.path.abspath(os.path.join(root, filename.strip('/\\'))) headers = dict() if not filename.startswith(root): return HTTPError(403, "Access denied.") if not os.path.exists(filename) or not os.path.isfile(filename): return HTTPError(404, "File does not exist.") if not os.access(filename, os.R_OK): return HTTPError(403, "You do not have permission to access this file.") if mimetype == 'auto': mimetype, encoding = mimetypes.guess_type(filename) if encoding: headers['Content-Encoding'] = encoding if mimetype: if mimetype[:5] == 'text/' and charset and 'charset' not in mimetype: mimetype += '; charset=%s' % charset headers['Content-Type'] = mimetype if download: download = os.path.basename(filename if download == True else download) headers['Content-Disposition'] = 'attachment; filename="%s"' % download stats = os.stat(filename) headers['Content-Length'] = clen = stats.st_size lm = time.strftime("%a, %d %b %Y %H:%M:%S GMT", time.gmtime(stats.st_mtime)) headers['Last-Modified'] = lm ims = request.environ.get('HTTP_IF_MODIFIED_SINCE') if ims: ims = parse_date(ims.split(";")[0].strip()) if ims is not None and ims >= int(stats.st_mtime): headers['Date'] = time.strftime("%a, %d %b %Y %H:%M:%S GMT", time.gmtime()) return HTTPResponse(status=304, **headers) body = '' if request.method == 'HEAD' else open(filename, 'rb') headers["Accept-Ranges"] = "bytes" ranges = request.environ.get('HTTP_RANGE') if 'HTTP_RANGE' in request.environ: ranges = list(parse_range_header(request.environ['HTTP_RANGE'], clen)) if not ranges: return HTTPError(416, "Requested Range Not Satisfiable") offset, end = ranges[0] headers["Content-Range"] = "bytes %d-%d/%d" % (offset, end-1, clen) headers["Content-Length"] = str(end-offset) if body: body = _file_iter_range(body, offset, end-offset) return HTTPResponse(body, status=206, **headers) return HTTPResponse(body, **headers) ############################################################################### # HTTP Utilities and MISC (TODO) ############################################### ############################################################################### def debug(mode=True): """ Change the debug level. There is only one debug level supported at the moment.""" global DEBUG if mode: warnings.simplefilter('default') DEBUG = bool(mode) def http_date(value): if isinstance(value, (datedate, datetime)): value = value.utctimetuple() elif isinstance(value, (int, float)): value = time.gmtime(value) if not isinstance(value, basestring): value = time.strftime("%a, %d %b %Y %H:%M:%S GMT", value) return value def parse_date(ims): """ Parse rfc1123, rfc850 and asctime timestamps and return UTC epoch. """ try: ts = email.utils.parsedate_tz(ims) return time.mktime(ts[:8] + (0,)) - (ts[9] or 0) - time.timezone except (TypeError, ValueError, IndexError, OverflowError): return None def parse_auth(header): """ Parse rfc2617 HTTP authentication header string (basic) and return (user,pass) tuple or None""" try: method, data = header.split(None, 1) if method.lower() == 'basic': user, pwd = touni(base64.b64decode(tob(data))).split(':',1) return user, pwd except (KeyError, ValueError): return None def parse_range_header(header, maxlen=0): ''' Yield (start, end) ranges parsed from a HTTP Range header. Skip unsatisfiable ranges. The end index is non-inclusive.''' if not header or header[:6] != 'bytes=': return ranges = [r.split('-', 1) for r in header[6:].split(',') if '-' in r] for start, end in ranges: try: if not start: # bytes=-100 -> last 100 bytes start, end = max(0, maxlen-int(end)), maxlen elif not end: # bytes=100- -> all but the first 99 bytes start, end = int(start), maxlen else: # bytes=100-200 -> bytes 100-200 (inclusive) start, end = int(start), min(int(end)+1, maxlen) if 0 <= start < end <= maxlen: yield start, end except ValueError: pass def _parse_qsl(qs): r = [] for pair in qs.replace(';','&').split('&'): if not pair: continue nv = pair.split('=', 1) if len(nv) != 2: nv.append('') key = urlunquote(nv[0].replace('+', ' ')) value = urlunquote(nv[1].replace('+', ' ')) r.append((key, value)) return r def _lscmp(a, b): ''' Compares two strings in a cryptographically safe way: Runtime is not affected by length of common prefix. ''' return not sum(0 if x==y else 1 for x, y in zip(a, b)) and len(a) == len(b) def cookie_encode(data, key): ''' Encode and sign a pickle-able object. Return a (byte) string ''' msg = base64.b64encode(pickle.dumps(data, -1)) sig = base64.b64encode(hmac.new(tob(key), msg).digest()) return tob('!') + sig + tob('?') + msg def cookie_decode(data, key): ''' Verify and decode an encoded string. Return an object or None.''' data = tob(data) if cookie_is_encoded(data): sig, msg = data.split(tob('?'), 1) if _lscmp(sig[1:], base64.b64encode(hmac.new(tob(key), msg).digest())): return pickle.loads(base64.b64decode(msg)) return None def cookie_is_encoded(data): ''' Return True if the argument looks like a encoded cookie.''' return bool(data.startswith(tob('!')) and tob('?') in data) def html_escape(string): ''' Escape HTML special characters ``&<>`` and quotes ``'"``. ''' return string.replace('&','&amp;').replace('<','&lt;').replace('>','&gt;')\ .replace('"','&quot;').replace("'",'&#039;') def html_quote(string): ''' Escape and quote a string to be used as an HTTP attribute.''' return '"%s"' % html_escape(string).replace('\n','&#10;')\ .replace('\r','&#13;').replace('\t','&#9;') def yieldroutes(func): """ Return a generator for routes that match the signature (name, args) of the func parameter. This may yield more than one route if the function takes optional keyword arguments. The output is best described by example:: a() -> '/a' b(x, y) -> '/b/<x>/<y>' c(x, y=5) -> '/c/<x>' and '/c/<x>/<y>' d(x=5, y=6) -> '/d' and '/d/<x>' and '/d/<x>/<y>' """ path = '/' + func.__name__.replace('__','/').lstrip('/') spec = getargspec(func) argc = len(spec[0]) - len(spec[3] or []) path += ('/<%s>' * argc) % tuple(spec[0][:argc]) yield path for arg in spec[0][argc:]: path += '/<%s>' % arg yield path def path_shift(script_name, path_info, shift=1): ''' Shift path fragments from PATH_INFO to SCRIPT_NAME and vice versa. :return: The modified paths. :param script_name: The SCRIPT_NAME path. :param script_name: The PATH_INFO path. :param shift: The number of path fragments to shift. May be negative to change the shift direction. (default: 1) ''' if shift == 0: return script_name, path_info pathlist = path_info.strip('/').split('/') scriptlist = script_name.strip('/').split('/') if pathlist and pathlist[0] == '': pathlist = [] if scriptlist and scriptlist[0] == '': scriptlist = [] if shift > 0 and shift <= len(pathlist): moved = pathlist[:shift] scriptlist = scriptlist + moved pathlist = pathlist[shift:] elif shift < 0 and shift >= -len(scriptlist): moved = scriptlist[shift:] pathlist = moved + pathlist scriptlist = scriptlist[:shift] else: empty = 'SCRIPT_NAME' if shift < 0 else 'PATH_INFO' raise AssertionError("Cannot shift. Nothing left from %s" % empty) new_script_name = '/' + '/'.join(scriptlist) new_path_info = '/' + '/'.join(pathlist) if path_info.endswith('/') and pathlist: new_path_info += '/' return new_script_name, new_path_info def auth_basic(check, realm="private", text="Access denied"): ''' Callback decorator to require HTTP auth (basic). TODO: Add route(check_auth=...) parameter. ''' def decorator(func): def wrapper(*a, **ka): user, password = request.auth or (None, None) if user is None or not check(user, password): err = HTTPError(401, text) err.add_header('WWW-Authenticate', 'Basic realm="%s"' % realm) return err return func(*a, **ka) return wrapper return decorator # Shortcuts for common Bottle methods. # They all refer to the current default application. def make_default_app_wrapper(name): ''' Return a callable that relays calls to the current default app. ''' @functools.wraps(getattr(Bottle, name)) def wrapper(*a, **ka): return getattr(app(), name)(*a, **ka) return wrapper route = make_default_app_wrapper('route') get = make_default_app_wrapper('get') post = make_default_app_wrapper('post') put = make_default_app_wrapper('put') delete = make_default_app_wrapper('delete') error = make_default_app_wrapper('error') mount = make_default_app_wrapper('mount') hook = make_default_app_wrapper('hook') install = make_default_app_wrapper('install') uninstall = make_default_app_wrapper('uninstall') url = make_default_app_wrapper('get_url') ############################################################################### # Server Adapter ############################################################### ############################################################################### class ServerAdapter(object): quiet = False def __init__(self, host='127.0.0.1', port=8080, **options): self.options = options self.host = host self.port = int(port) def run(self, handler): # pragma: no cover pass def __repr__(self): args = ', '.join(['%s=%s'%(k,repr(v)) for k, v in self.options.items()]) return "%s(%s)" % (self.__class__.__name__, args) class CGIServer(ServerAdapter): quiet = True def run(self, handler): # pragma: no cover from wsgiref.handlers import CGIHandler def fixed_environ(environ, start_response): environ.setdefault('PATH_INFO', '') return handler(environ, start_response) CGIHandler().run(fixed_environ) class FlupFCGIServer(ServerAdapter): def run(self, handler): # pragma: no cover import flup.server.fcgi self.options.setdefault('bindAddress', (self.host, self.port)) flup.server.fcgi.WSGIServer(handler, **self.options).run() class WSGIRefServer(ServerAdapter): def run(self, app): # pragma: no cover from wsgiref.simple_server import WSGIRequestHandler, WSGIServer from wsgiref.simple_server import make_server import socket class FixedHandler(WSGIRequestHandler): def address_string(self): # Prevent reverse DNS lookups please. return self.client_address[0] def log_request(*args, **kw): if not self.quiet: return WSGIRequestHandler.log_request(*args, **kw) handler_cls = self.options.get('handler_class', FixedHandler) server_cls = self.options.get('server_class', WSGIServer) if ':' in self.host: # Fix wsgiref for IPv6 addresses. if getattr(server_cls, 'address_family') == socket.AF_INET: class server_cls(server_cls): address_family = socket.AF_INET6 srv = make_server(self.host, self.port, app, server_cls, handler_cls) srv.serve_forever() class CherryPyServer(ServerAdapter): def run(self, handler): # pragma: no cover from cherrypy import wsgiserver self.options['bind_addr'] = (self.host, self.port) self.options['wsgi_app'] = handler certfile = self.options.get('certfile') if certfile: del self.options['certfile'] keyfile = self.options.get('keyfile') if keyfile: del self.options['keyfile'] server = wsgiserver.CherryPyWSGIServer(**self.options) if certfile: server.ssl_certificate = certfile if keyfile: server.ssl_private_key = keyfile try: server.start() finally: server.stop() class WaitressServer(ServerAdapter): def run(self, handler): from waitress import serve serve(handler, host=self.host, port=self.port) class PasteServer(ServerAdapter): def run(self, handler): # pragma: no cover from paste import httpserver from paste.translogger import TransLogger handler = TransLogger(handler, setup_console_handler=(not self.quiet)) httpserver.serve(handler, host=self.host, port=str(self.port), **self.options) class MeinheldServer(ServerAdapter): def run(self, handler): from meinheld import server server.listen((self.host, self.port)) server.run(handler) class FapwsServer(ServerAdapter): """ Extremely fast webserver using libev. See http://www.fapws.org/ """ def run(self, handler): # pragma: no cover import fapws._evwsgi as evwsgi from fapws import base, config port = self.port if float(config.SERVER_IDENT[-2:]) > 0.4: # fapws3 silently changed its API in 0.5 port = str(port) evwsgi.start(self.host, port) # fapws3 never releases the GIL. Complain upstream. I tried. No luck. if 'BOTTLE_CHILD' in os.environ and not self.quiet: _stderr("WARNING: Auto-reloading does not work with Fapws3.\n") _stderr(" (Fapws3 breaks python thread support)\n") evwsgi.set_base_module(base) def app(environ, start_response): environ['wsgi.multiprocess'] = False return handler(environ, start_response) evwsgi.wsgi_cb(('', app)) evwsgi.run() class TornadoServer(ServerAdapter): """ The super hyped asynchronous server by facebook. Untested. """ def run(self, handler): # pragma: no cover import tornado.wsgi, tornado.httpserver, tornado.ioloop container = tornado.wsgi.WSGIContainer(handler) server = tornado.httpserver.HTTPServer(container) server.listen(port=self.port,address=self.host) tornado.ioloop.IOLoop.instance().start() class AppEngineServer(ServerAdapter): """ Adapter for Google App Engine. """ quiet = True def run(self, handler): from google.appengine.ext.webapp import util # A main() function in the handler script enables 'App Caching'. # Lets makes sure it is there. This _really_ improves performance. module = sys.modules.get('__main__') if module and not hasattr(module, 'main'): module.main = lambda: util.run_wsgi_app(handler) util.run_wsgi_app(handler) class TwistedServer(ServerAdapter): """ Untested. """ def run(self, handler): from twisted.web import server, wsgi from twisted.python.threadpool import ThreadPool from twisted.internet import reactor thread_pool = ThreadPool() thread_pool.start() reactor.addSystemEventTrigger('after', 'shutdown', thread_pool.stop) factory = server.Site(wsgi.WSGIResource(reactor, thread_pool, handler)) reactor.listenTCP(self.port, factory, interface=self.host) reactor.run() class DieselServer(ServerAdapter): """ Untested. """ def run(self, handler): from diesel.protocols.wsgi import WSGIApplication app = WSGIApplication(handler, port=self.port) app.run() class GeventServer(ServerAdapter): """ Untested. Options: * `fast` (default: False) uses libevent's http server, but has some issues: No streaming, no pipelining, no SSL. * See gevent.wsgi.WSGIServer() documentation for more options. """ def run(self, handler): from gevent import wsgi, pywsgi, local if not isinstance(threading.local(), local.local): msg = "Bottle requires gevent.monkey.patch_all() (before import)" raise RuntimeError(msg) if not self.options.pop('fast', None): wsgi = pywsgi self.options['log'] = None if self.quiet else 'default' address = (self.host, self.port) server = wsgi.WSGIServer(address, handler, **self.options) if 'BOTTLE_CHILD' in os.environ: import signal signal.signal(signal.SIGINT, lambda s, f: server.stop()) server.serve_forever() class GeventSocketIOServer(ServerAdapter): def run(self,handler): from socketio import server address = (self.host, self.port) server.SocketIOServer(address, handler, **self.options).serve_forever() class GunicornServer(ServerAdapter): """ Untested. See http://gunicorn.org/configure.html for options. """ def run(self, handler): from gunicorn.app.base import Application config = {'bind': "%s:%d" % (self.host, int(self.port))} config.update(self.options) class GunicornApplication(Application): def init(self, parser, opts, args): return config def load(self): return handler GunicornApplication().run() class EventletServer(ServerAdapter): """ Untested """ def run(self, handler): from eventlet import wsgi, listen try: wsgi.server(listen((self.host, self.port)), handler, log_output=(not self.quiet)) except TypeError: # Fallback, if we have old version of eventlet wsgi.server(listen((self.host, self.port)), handler) class RocketServer(ServerAdapter): """ Untested. """ def run(self, handler): from rocket import Rocket server = Rocket((self.host, self.port), 'wsgi', { 'wsgi_app' : handler }) server.start() class BjoernServer(ServerAdapter): """ Fast server written in C: https://github.com/jonashaag/bjoern """ def run(self, handler): from bjoern import run run(handler, self.host, self.port) class AutoServer(ServerAdapter): """ Untested. """ adapters = [WaitressServer, PasteServer, TwistedServer, CherryPyServer, WSGIRefServer] def run(self, handler): for sa in self.adapters: try: return sa(self.host, self.port, **self.options).run(handler) except ImportError: pass server_names = { 'cgi': CGIServer, 'flup': FlupFCGIServer, 'wsgiref': WSGIRefServer, 'waitress': WaitressServer, 'cherrypy': CherryPyServer, 'paste': PasteServer, 'fapws3': FapwsServer, 'tornado': TornadoServer, 'gae': AppEngineServer, 'twisted': TwistedServer, 'diesel': DieselServer, 'meinheld': MeinheldServer, 'gunicorn': GunicornServer, 'eventlet': EventletServer, 'gevent': GeventServer, 'geventSocketIO':GeventSocketIOServer, 'rocket': RocketServer, 'bjoern' : BjoernServer, 'auto': AutoServer, } ############################################################################### # Application Control ########################################################## ############################################################################### def load(target, **namespace): """ Import a module or fetch an object from a module. * ``package.module`` returns `module` as a module object. * ``pack.mod:name`` returns the module variable `name` from `pack.mod`. * ``pack.mod:func()`` calls `pack.mod.func()` and returns the result. The last form accepts not only function calls, but any type of expression. Keyword arguments passed to this function are available as local variables. Example: ``import_string('re:compile(x)', x='[a-z]')`` """ module, target = target.split(":", 1) if ':' in target else (target, None) if module not in sys.modules: __import__(module) if not target: return sys.modules[module] if target.isalnum(): return getattr(sys.modules[module], target) package_name = module.split('.')[0] namespace[package_name] = sys.modules[package_name] return eval('%s.%s' % (module, target), namespace) def load_app(target): """ Load a bottle application from a module and make sure that the import does not affect the current default application, but returns a separate application object. See :func:`load` for the target parameter. """ global NORUN; NORUN, nr_old = True, NORUN try: tmp = default_app.push() # Create a new "default application" rv = load(target) # Import the target module return rv if callable(rv) else tmp finally: default_app.remove(tmp) # Remove the temporary added default application NORUN = nr_old _debug = debug def run(app=None, server='wsgiref', host='127.0.0.1', port=8080, interval=1, reloader=False, quiet=False, plugins=None, debug=None, **kargs): """ Start a server instance. This method blocks until the server terminates. :param app: WSGI application or target string supported by :func:`load_app`. (default: :func:`default_app`) :param server: Server adapter to use. See :data:`server_names` keys for valid names or pass a :class:`ServerAdapter` subclass. (default: `wsgiref`) :param host: Server address to bind to. Pass ``0.0.0.0`` to listens on all interfaces including the external one. (default: 127.0.0.1) :param port: Server port to bind to. Values below 1024 require root privileges. (default: 8080) :param reloader: Start auto-reloading server? (default: False) :param interval: Auto-reloader interval in seconds (default: 1) :param quiet: Suppress output to stdout and stderr? (default: False) :param options: Options passed to the server adapter. """ if NORUN: return if reloader and not os.environ.get('BOTTLE_CHILD'): try: lockfile = None fd, lockfile = tempfile.mkstemp(prefix='bottle.', suffix='.lock') os.close(fd) # We only need this file to exist. We never write to it while os.path.exists(lockfile): args = [sys.executable] + sys.argv environ = os.environ.copy() environ['BOTTLE_CHILD'] = 'true' environ['BOTTLE_LOCKFILE'] = lockfile p = subprocess.Popen(args, env=environ) while p.poll() is None: # Busy wait... os.utime(lockfile, None) # I am alive! time.sleep(interval) if p.poll() != 3: if os.path.exists(lockfile): os.unlink(lockfile) sys.exit(p.poll()) except KeyboardInterrupt: pass finally: if os.path.exists(lockfile): os.unlink(lockfile) return try: if debug is not None: _debug(debug) app = app or default_app() if isinstance(app, basestring): app = load_app(app) if not callable(app): raise ValueError("Application is not callable: %r" % app) for plugin in plugins or []: app.install(plugin) if server in server_names: server = server_names.get(server) if isinstance(server, basestring): server = load(server) if isinstance(server, type): server = server(host=host, port=port, **kargs) if not isinstance(server, ServerAdapter): raise ValueError("Unknown or unsupported server: %r" % server) server.quiet = server.quiet or quiet if not server.quiet: _stderr("Bottle v%s server starting up (using %s)...\n" % (__version__, repr(server))) _stderr("Listening on http://%s:%d/\n" % (server.host, server.port)) _stderr("Hit Ctrl-C to quit.\n\n") if reloader: lockfile = os.environ.get('BOTTLE_LOCKFILE') bgcheck = FileCheckerThread(lockfile, interval) with bgcheck: server.run(app) if bgcheck.status == 'reload': sys.exit(3) else: server.run(app) except KeyboardInterrupt: pass except (SystemExit, MemoryError): raise except: if not reloader: raise if not getattr(server, 'quiet', quiet): print_exc() time.sleep(interval) sys.exit(3) class FileCheckerThread(threading.Thread): ''' Interrupt main-thread as soon as a changed module file is detected, the lockfile gets deleted or gets to old. ''' def __init__(self, lockfile, interval): threading.Thread.__init__(self) self.lockfile, self.interval = lockfile, interval #: Is one of 'reload', 'error' or 'exit' self.status = None def run(self): exists = os.path.exists mtime = lambda path: os.stat(path).st_mtime files = dict() for module in list(sys.modules.values()): path = getattr(module, '__file__', '') if path[-4:] in ('.pyo', '.pyc'): path = path[:-1] if path and exists(path): files[path] = mtime(path) while not self.status: if not exists(self.lockfile)\ or mtime(self.lockfile) < time.time() - self.interval - 5: self.status = 'error' thread.interrupt_main() for path, lmtime in list(files.items()): if not exists(path) or mtime(path) > lmtime: self.status = 'reload' thread.interrupt_main() break time.sleep(self.interval) def __enter__(self): self.start() def __exit__(self, exc_type, exc_val, exc_tb): if not self.status: self.status = 'exit' # silent exit self.join() return exc_type is not None and issubclass(exc_type, KeyboardInterrupt) ############################################################################### # Template Adapters ############################################################ ############################################################################### class TemplateError(HTTPError): def __init__(self, message): HTTPError.__init__(self, 500, message) class BaseTemplate(object): """ Base class and minimal API for template adapters """ extensions = ['tpl','html','thtml','stpl'] settings = {} #used in prepare() defaults = {} #used in render() def __init__(self, source=None, name=None, lookup=[], encoding='utf8', **settings): """ Create a new template. If the source parameter (str or buffer) is missing, the name argument is used to guess a template filename. Subclasses can assume that self.source and/or self.filename are set. Both are strings. The lookup, encoding and settings parameters are stored as instance variables. The lookup parameter stores a list containing directory paths. The encoding parameter should be used to decode byte strings or files. The settings parameter contains a dict for engine-specific settings. """ self.name = name self.source = source.read() if hasattr(source, 'read') else source self.filename = source.filename if hasattr(source, 'filename') else None self.lookup = [os.path.abspath(x) for x in lookup] self.encoding = encoding self.settings = self.settings.copy() # Copy from class variable self.settings.update(settings) # Apply if not self.source and self.name: self.filename = self.search(self.name, self.lookup) if not self.filename: raise TemplateError('Template %s not found.' % repr(name)) if not self.source and not self.filename: raise TemplateError('No template specified.') self.prepare(**self.settings) @classmethod def search(cls, name, lookup=[]): """ Search name in all directories specified in lookup. First without, then with common extensions. Return first hit. """ if not lookup: depr('The template lookup path list should not be empty.') #0.12 lookup = ['.'] if os.path.isabs(name) and os.path.isfile(name): depr('Absolute template path names are deprecated.') #0.12 return os.path.abspath(name) for spath in lookup: spath = os.path.abspath(spath) + os.sep fname = os.path.abspath(os.path.join(spath, name)) if not fname.startswith(spath): continue if os.path.isfile(fname): return fname for ext in cls.extensions: if os.path.isfile('%s.%s' % (fname, ext)): return '%s.%s' % (fname, ext) @classmethod def global_config(cls, key, *args): ''' This reads or sets the global settings stored in class.settings. ''' if args: cls.settings = cls.settings.copy() # Make settings local to class cls.settings[key] = args[0] else: return cls.settings[key] def prepare(self, **options): """ Run preparations (parsing, caching, ...). It should be possible to call this again to refresh a template or to update settings. """ raise NotImplementedError def render(self, *args, **kwargs): """ Render the template with the specified local variables and return a single byte or unicode string. If it is a byte string, the encoding must match self.encoding. This method must be thread-safe! Local variables may be provided in dictionaries (args) or directly, as keywords (kwargs). """ raise NotImplementedError class MakoTemplate(BaseTemplate): def prepare(self, **options): from mako.template import Template from mako.lookup import TemplateLookup options.update({'input_encoding':self.encoding}) options.setdefault('format_exceptions', bool(DEBUG)) lookup = TemplateLookup(directories=self.lookup, **options) if self.source: self.tpl = Template(self.source, lookup=lookup, **options) else: self.tpl = Template(uri=self.name, filename=self.filename, lookup=lookup, **options) def render(self, *args, **kwargs): for dictarg in args: kwargs.update(dictarg) _defaults = self.defaults.copy() _defaults.update(kwargs) return self.tpl.render(**_defaults) class CheetahTemplate(BaseTemplate): def prepare(self, **options): from Cheetah.Template import Template self.context = threading.local() self.context.vars = {} options['searchList'] = [self.context.vars] if self.source: self.tpl = Template(source=self.source, **options) else: self.tpl = Template(file=self.filename, **options) def render(self, *args, **kwargs): for dictarg in args: kwargs.update(dictarg) self.context.vars.update(self.defaults) self.context.vars.update(kwargs) out = str(self.tpl) self.context.vars.clear() return out class Jinja2Template(BaseTemplate): def prepare(self, filters=None, tests=None, globals={}, **kwargs): from jinja2 import Environment, FunctionLoader if 'prefix' in kwargs: # TODO: to be removed after a while raise RuntimeError('The keyword argument `prefix` has been removed. ' 'Use the full jinja2 environment name line_statement_prefix instead.') self.env = Environment(loader=FunctionLoader(self.loader), **kwargs) if filters: self.env.filters.update(filters) if tests: self.env.tests.update(tests) if globals: self.env.globals.update(globals) if self.source: self.tpl = self.env.from_string(self.source) else: self.tpl = self.env.get_template(self.filename) def render(self, *args, **kwargs): for dictarg in args: kwargs.update(dictarg) _defaults = self.defaults.copy() _defaults.update(kwargs) return self.tpl.render(**_defaults) def loader(self, name): fname = self.search(name, self.lookup) if not fname: return with open(fname, "rb") as f: return f.read().decode(self.encoding) class SimpleTemplate(BaseTemplate): def prepare(self, escape_func=html_escape, noescape=False, syntax=None, **ka): self.cache = {} enc = self.encoding self._str = lambda x: touni(x, enc) self._escape = lambda x: escape_func(touni(x, enc)) self.syntax = syntax if noescape: self._str, self._escape = self._escape, self._str @cached_property def co(self): return compile(self.code, self.filename or '<string>', 'exec') @cached_property def code(self): source = self.source if not source: with open(self.filename, 'rb') as f: source = f.read() try: source, encoding = touni(source), 'utf8' except UnicodeError: depr('Template encodings other than utf8 are no longer supported.') #0.11 source, encoding = touni(source, 'latin1'), 'latin1' parser = StplParser(source, encoding=encoding, syntax=self.syntax) code = parser.translate() self.encoding = parser.encoding return code def _rebase(self, _env, _name=None, **kwargs): if _name is None: depr('Rebase function called without arguments.' ' You were probably looking for {{base}}?', True) #0.12 _env['_rebase'] = (_name, kwargs) def _include(self, _env, _name=None, **kwargs): if _name is None: depr('Rebase function called without arguments.' ' You were probably looking for {{base}}?', True) #0.12 env = _env.copy() env.update(kwargs) if _name not in self.cache: self.cache[_name] = self.__class__(name=_name, lookup=self.lookup) return self.cache[_name].execute(env['_stdout'], env) def execute(self, _stdout, kwargs): env = self.defaults.copy() env.update(kwargs) env.update({'_stdout': _stdout, '_printlist': _stdout.extend, 'include': functools.partial(self._include, env), 'rebase': functools.partial(self._rebase, env), '_rebase': None, '_str': self._str, '_escape': self._escape, 'get': env.get, 'setdefault': env.setdefault, 'defined': env.__contains__ }) eval(self.co, env) if env.get('_rebase'): subtpl, rargs = env.pop('_rebase') rargs['base'] = ''.join(_stdout) #copy stdout del _stdout[:] # clear stdout return self._include(env, subtpl, **rargs) return env def render(self, *args, **kwargs): """ Render the template using keyword arguments as local variables. """ env = {}; stdout = [] for dictarg in args: env.update(dictarg) env.update(kwargs) self.execute(stdout, env) return ''.join(stdout) class StplSyntaxError(TemplateError): pass class StplParser(object): ''' Parser for stpl templates. ''' _re_cache = {} #: Cache for compiled re patterns # This huge pile of voodoo magic splits python code into 8 different tokens. # 1: All kinds of python strings (trust me, it works) _re_tok = '((?m)[urbURB]?(?:\'\'(?!\')|""(?!")|\'{6}|"{6}' \ '|\'(?:[^\\\\\']|\\\\.)+?\'|"(?:[^\\\\"]|\\\\.)+?"' \ '|\'{3}(?:[^\\\\]|\\\\.|\\n)+?\'{3}' \ '|"{3}(?:[^\\\\]|\\\\.|\\n)+?"{3}))' _re_inl = _re_tok.replace('|\\n','') # We re-use this string pattern later # 2: Comments (until end of line, but not the newline itself) _re_tok += '|(#.*)' # 3,4: Open and close grouping tokens _re_tok += '|([\[\{\(])' _re_tok += '|([\]\}\)])' # 5,6: Keywords that start or continue a python block (only start of line) _re_tok += '|^([ \\t]*(?:if|for|while|with|try|def|class)\\b)' \ '|^([ \\t]*(?:elif|else|except|finally)\\b)' # 7: Our special 'end' keyword (but only if it stands alone) _re_tok += '|((?:^|;)[ \\t]*end[ \\t]*(?=(?:%(block_close)s[ \\t]*)?\\r?$|;|#))' # 8: A customizable end-of-code-block template token (only end of line) _re_tok += '|(%(block_close)s[ \\t]*(?=$))' # 9: And finally, a single newline. The 10th token is 'everything else' _re_tok += '|(\\r?\\n)' # Match the start tokens of code areas in a template _re_split = '(?m)^[ \t]*(\\\\?)((%(line_start)s)|(%(block_start)s))(%%?)' # Match inline statements (may contain python strings) _re_inl = '%%(inline_start)s((?:%s|[^\'"\n]*?)+)%%(inline_end)s' % _re_inl default_syntax = '<% %> % {{ }}' def __init__(self, source, syntax=None, encoding='utf8'): self.source, self.encoding = touni(source, encoding), encoding self.set_syntax(syntax or self.default_syntax) self.code_buffer, self.text_buffer = [], [] self.lineno, self.offset = 1, 0 self.indent, self.indent_mod = 0, 0 self.paren_depth = 0 def get_syntax(self): ''' Tokens as a space separated string (default: <% %> % {{ }}) ''' return self._syntax def set_syntax(self, syntax): self._syntax = syntax self._tokens = syntax.split() if not syntax in self._re_cache: names = 'block_start block_close line_start inline_start inline_end' etokens = map(re.escape, self._tokens) pattern_vars = dict(zip(names.split(), etokens)) patterns = (self._re_split, self._re_tok, self._re_inl) patterns = [re.compile(p%pattern_vars) for p in patterns] self._re_cache[syntax] = patterns self.re_split, self.re_tok, self.re_inl = self._re_cache[syntax] syntax = property(get_syntax, set_syntax) def translate(self): if self.offset: raise RuntimeError('Parser is a one time instance.') while True: m = self.re_split.search(self.source[self.offset:]) if m: text = self.source[self.offset:self.offset+m.start()] self.text_buffer.append(text) self.offset += m.end() if m.group(1): # New escape syntax line, sep, _ = self.source[self.offset:].partition('\n') self.text_buffer.append(m.group(2)+m.group(5)+line+sep) self.offset += len(line+sep)+1 continue elif m.group(5): # Old escape syntax depr('Escape code lines with a backslash.') #0.12 line, sep, _ = self.source[self.offset:].partition('\n') self.text_buffer.append(m.group(2)+line+sep) self.offset += len(line+sep)+1 continue self.flush_text() self.read_code(multiline=bool(m.group(4))) else: break self.text_buffer.append(self.source[self.offset:]) self.flush_text() return ''.join(self.code_buffer) def read_code(self, multiline): code_line, comment = '', '' while True: m = self.re_tok.search(self.source[self.offset:]) if not m: code_line += self.source[self.offset:] self.offset = len(self.source) self.write_code(code_line.strip(), comment) return code_line += self.source[self.offset:self.offset+m.start()] self.offset += m.end() _str, _com, _po, _pc, _blk1, _blk2, _end, _cend, _nl = m.groups() if (code_line or self.paren_depth > 0) and (_blk1 or _blk2): # a if b else c code_line += _blk1 or _blk2 continue if _str: # Python string code_line += _str elif _com: # Python comment (up to EOL) comment = _com if multiline and _com.strip().endswith(self._tokens[1]): multiline = False # Allow end-of-block in comments elif _po: # open parenthesis self.paren_depth += 1 code_line += _po elif _pc: # close parenthesis if self.paren_depth > 0: # we could check for matching parentheses here, but it's # easier to leave that to python - just check counts self.paren_depth -= 1 code_line += _pc elif _blk1: # Start-block keyword (if/for/while/def/try/...) code_line, self.indent_mod = _blk1, -1 self.indent += 1 elif _blk2: # Continue-block keyword (else/elif/except/...) code_line, self.indent_mod = _blk2, -1 elif _end: # The non-standard 'end'-keyword (ends a block) self.indent -= 1 elif _cend: # The end-code-block template token (usually '%>') if multiline: multiline = False else: code_line += _cend else: # \n self.write_code(code_line.strip(), comment) self.lineno += 1 code_line, comment, self.indent_mod = '', '', 0 if not multiline: break def flush_text(self): text = ''.join(self.text_buffer) del self.text_buffer[:] if not text: return parts, pos, nl = [], 0, '\\\n'+' '*self.indent for m in self.re_inl.finditer(text): prefix, pos = text[pos:m.start()], m.end() if prefix: parts.append(nl.join(map(repr, prefix.splitlines(True)))) if prefix.endswith('\n'): parts[-1] += nl parts.append(self.process_inline(m.group(1).strip())) if pos < len(text): prefix = text[pos:] lines = prefix.splitlines(True) if lines[-1].endswith('\\\\\n'): lines[-1] = lines[-1][:-3] elif lines[-1].endswith('\\\\\r\n'): lines[-1] = lines[-1][:-4] parts.append(nl.join(map(repr, lines))) code = '_printlist((%s,))' % ', '.join(parts) self.lineno += code.count('\n')+1 self.write_code(code) def process_inline(self, chunk): if chunk[0] == '!': return '_str(%s)' % chunk[1:] return '_escape(%s)' % chunk def write_code(self, line, comment=''): line, comment = self.fix_backward_compatibility(line, comment) code = ' ' * (self.indent+self.indent_mod) code += line.lstrip() + comment + '\n' self.code_buffer.append(code) def fix_backward_compatibility(self, line, comment): parts = line.strip().split(None, 2) if parts and parts[0] in ('include', 'rebase'): depr('The include and rebase keywords are functions now.') #0.12 if len(parts) == 1: return "_printlist([base])", comment elif len(parts) == 2: return "_=%s(%r)" % tuple(parts), comment else: return "_=%s(%r, %s)" % tuple(parts), comment if self.lineno <= 2 and not line.strip() and 'coding' in comment: m = re.match(r"#.*coding[:=]\s*([-\w.]+)", comment) if m: depr('PEP263 encoding strings in templates are deprecated.') #0.12 enc = m.group(1) self.source = self.source.encode(self.encoding).decode(enc) self.encoding = enc return line, comment.replace('coding','coding*') return line, comment def template(*args, **kwargs): ''' Get a rendered template as a string iterator. You can use a name, a filename or a template string as first parameter. Template rendering arguments can be passed as dictionaries or directly (as keyword arguments). ''' tpl = args[0] if args else None adapter = kwargs.pop('template_adapter', SimpleTemplate) lookup = kwargs.pop('template_lookup', TEMPLATE_PATH) tplid = (id(lookup), tpl) if tplid not in TEMPLATES or DEBUG: settings = kwargs.pop('template_settings', {}) if isinstance(tpl, adapter): TEMPLATES[tplid] = tpl if settings: TEMPLATES[tplid].prepare(**settings) elif "\n" in tpl or "{" in tpl or "%" in tpl or '$' in tpl: TEMPLATES[tplid] = adapter(source=tpl, lookup=lookup, **settings) else: TEMPLATES[tplid] = adapter(name=tpl, lookup=lookup, **settings) if not TEMPLATES[tplid]: abort(500, 'Template (%s) not found' % tpl) for dictarg in args[1:]: kwargs.update(dictarg) return TEMPLATES[tplid].render(kwargs) mako_template = functools.partial(template, template_adapter=MakoTemplate) cheetah_template = functools.partial(template, template_adapter=CheetahTemplate) jinja2_template = functools.partial(template, template_adapter=Jinja2Template) def view(tpl_name, **defaults): ''' Decorator: renders a template for a handler. The handler can control its behavior like that: - return a dict of template vars to fill out the template - return something other than a dict and the view decorator will not process the template, but return the handler result as is. This includes returning a HTTPResponse(dict) to get, for instance, JSON with autojson or other castfilters. ''' def decorator(func): @functools.wraps(func) def wrapper(*args, **kwargs): result = func(*args, **kwargs) if isinstance(result, (dict, DictMixin)): tplvars = defaults.copy() tplvars.update(result) return template(tpl_name, **tplvars) elif result is None: return template(tpl_name, defaults) return result return wrapper return decorator mako_view = functools.partial(view, template_adapter=MakoTemplate) cheetah_view = functools.partial(view, template_adapter=CheetahTemplate) jinja2_view = functools.partial(view, template_adapter=Jinja2Template) ############################################################################### # Constants and Globals ######################################################## ############################################################################### TEMPLATE_PATH = ['./', './views/'] TEMPLATES = {} DEBUG = False NORUN = False # If set, run() does nothing. Used by load_app() #: A dict to map HTTP status codes (e.g. 404) to phrases (e.g. 'Not Found') HTTP_CODES = httplib.responses HTTP_CODES[418] = "I'm a teapot" # RFC 2324 HTTP_CODES[422] = "Unprocessable Entity" # RFC 4918 HTTP_CODES[428] = "Precondition Required" HTTP_CODES[429] = "Too Many Requests" HTTP_CODES[431] = "Request Header Fields Too Large" HTTP_CODES[511] = "Network Authentication Required" _HTTP_STATUS_LINES = dict((k, '%d %s'%(k,v)) for (k,v) in HTTP_CODES.items()) #: The default template used for error pages. Override with @error() ERROR_PAGE_TEMPLATE = """ %%try: %%from %s import DEBUG, HTTP_CODES, request, touni <!DOCTYPE HTML PUBLIC "-//IETF//DTD HTML 2.0//EN"> <html> <head> <title>Error: {{e.status}}</title> <style type="text/css"> html {background-color: #eee; font-family: sans;} body {background-color: #fff; border: 1px solid #ddd; padding: 15px; margin: 15px;} pre {background-color: #eee; border: 1px solid #ddd; padding: 5px;} </style> </head> <body> <h1>Error: {{e.status}}</h1> <p>Sorry, the requested URL <tt>{{repr(request.url)}}</tt> caused an error:</p> <pre>{{e.body}}</pre> %%if DEBUG and e.exception: <h2>Exception:</h2> <pre>{{repr(e.exception)}}</pre> %%end %%if DEBUG and e.traceback: <h2>Traceback:</h2> <pre>{{e.traceback}}</pre> %%end </body> </html> %%except ImportError: <b>ImportError:</b> Could not generate the error page. Please add bottle to the import path. %%end """ % __name__ #: A thread-safe instance of :class:`LocalRequest`. If accessed from within a #: request callback, this instance always refers to the *current* request #: (even on a multithreaded server). request = LocalRequest() #: A thread-safe instance of :class:`LocalResponse`. It is used to change the #: HTTP response for the *current* request. response = LocalResponse() #: A thread-safe namespace. Not used by Bottle. local = threading.local() # Initialize app stack (create first empty Bottle app) # BC: 0.6.4 and needed for run() app = default_app = AppStack() app.push() #: A virtual package that redirects import statements. #: Example: ``import bottle.ext.sqlite`` actually imports `bottle_sqlite`. ext = _ImportRedirect('bottle.ext' if __name__ == '__main__' else __name__+".ext", 'bottle_%s').module if __name__ == '__main__': opt, args, parser = _cmd_options, _cmd_args, _cmd_parser if opt.version: _stdout('Bottle %s\n'%__version__) sys.exit(0) if not args: parser.print_help() _stderr('\nError: No application specified.\n') sys.exit(1) sys.path.insert(0, '.') sys.modules.setdefault('bottle', sys.modules['__main__']) host, port = (opt.bind or 'localhost'), 8080 if ':' in host and host.rfind(']') < host.rfind(':'): host, port = host.rsplit(':', 1) host = host.strip('[]') run(args[0], host=host, port=int(port), server=opt.server, reloader=opt.reload, plugins=opt.plugin, debug=opt.debug) # THE END
apache-2.0
manipopopo/tensorflow
tensorflow/contrib/autograph/core/naming.py
14
4248
# Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Symbol naming utilities.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from tensorflow.contrib.autograph.pyct import qual_names class Namer(object): """Implementation of the namer interfaces required by various converters. This implementation performs additional tasks like keeping track of the function calls that have been encountered and replaced with calls to their corresponding compiled counterparts. Interfaces currently implemented: * call_trees.FunctionNamer * control_flow.SymbolNamer * side_effect_guards.SymbolNamer """ def __init__(self, global_namespace, recursive, name_map, partial_types): self.global_namespace = global_namespace self.recursive = recursive self.partial_types = partial_types self.renamed_calls = {} if name_map is not None: self.renamed_calls.update(name_map) self.generated_names = set() def compiled_class_name(self, original_fqn, live_entity=None): """See call_trees.FunctionNamer.compiled_class_name.""" if live_entity is not None and live_entity in self.renamed_calls: return self.renamed_calls[live_entity] if isinstance(original_fqn, tuple): original_name = '__'.join(original_fqn) else: original_name = original_fqn new_name_root = 'Tf%s' % original_name new_name = new_name_root n = 0 while new_name in self.global_namespace: n += 1 new_name = '%s_%d' % (new_name_root, n) self.generated_names.add(new_name) if live_entity is not None: self.renamed_calls[live_entity] = new_name return new_name def compiled_function_name(self, original_fqn, live_entity=None, owner_type=None): """See call_trees.FunctionNamer.compiled_function_name.""" if not self.recursive: return None, False if owner_type is not None and owner_type not in self.partial_types: # Members are not renamed when part of an entire converted class. return None, False if isinstance(original_fqn, tuple): original_name = '__'.join(original_fqn) else: original_name = original_fqn if live_entity is not None and live_entity in self.renamed_calls: return self.renamed_calls[live_entity], True new_name_root = 'tf__%s' % original_name new_name = new_name_root n = 0 while new_name in self.global_namespace: n += 1 new_name = '%s_%d' % (new_name_root, n) if live_entity is not None: self.renamed_calls[live_entity] = new_name self.generated_names.add(new_name) return new_name, True def new_symbol(self, name_root, reserved_locals): """See control_flow.SymbolNamer.new_symbol.""" # reserved_locals may contain QNs. all_reserved_locals = set() for s in reserved_locals: if isinstance(s, qual_names.QN): all_reserved_locals.update(s.qn) elif isinstance(s, str): all_reserved_locals.add(s) else: raise ValueError('Unexpected symbol type "%s"' % type(s)) pieces = name_root.split('_') if pieces[-1].isdigit(): name_root = '_'.join(pieces[:-1]) n = int(pieces[-1]) else: n = 0 new_name = name_root while (new_name in self.global_namespace or new_name in all_reserved_locals or new_name in self.generated_names): n += 1 new_name = '%s_%d' % (name_root, n) self.generated_names.add(new_name) return new_name
apache-2.0
scs/uclinux
user/python/python-2.4.4/Lib/plat-mac/lib-scriptpackages/_builtinSuites/builtin_Suite.py
8
4589
"""Suite builtin_Suite: Every application supports open, reopen, print, run, and quit Level 1, version 1 """ import aetools import MacOS _code = 'aevt' class builtin_Suite_Events: def open(self, _object, _attributes={}, **_arguments): """open: Open the specified object(s) Required argument: list of objects to open Keyword argument _attributes: AppleEvent attribute dictionary """ _code = 'aevt' _subcode = 'odoc' if _arguments: raise TypeError, 'No optional args expected' _arguments['----'] = _object _reply, _arguments, _attributes = self.send(_code, _subcode, _arguments, _attributes) if _arguments.get('errn', 0): raise aetools.Error, aetools.decodeerror(_arguments) # XXXX Optionally decode result if _arguments.has_key('----'): return _arguments['----'] def run(self, _no_object=None, _attributes={}, **_arguments): """run: Run an application. Most applications will open an empty, untitled window. Keyword argument _attributes: AppleEvent attribute dictionary """ _code = 'aevt' _subcode = 'oapp' if _arguments: raise TypeError, 'No optional args expected' if _no_object != None: raise TypeError, 'No direct arg expected' _reply, _arguments, _attributes = self.send(_code, _subcode, _arguments, _attributes) if _arguments.get('errn', 0): raise aetools.Error, aetools.decodeerror(_arguments) # XXXX Optionally decode result if _arguments.has_key('----'): return _arguments['----'] def reopen(self, _no_object=None, _attributes={}, **_arguments): """reopen: Reactivate a running application. Some applications will open a new untitled window if no window is open. Keyword argument _attributes: AppleEvent attribute dictionary """ _code = 'aevt' _subcode = 'rapp' if _arguments: raise TypeError, 'No optional args expected' if _no_object != None: raise TypeError, 'No direct arg expected' _reply, _arguments, _attributes = self.send(_code, _subcode, _arguments, _attributes) if _arguments.get('errn', 0): raise aetools.Error, aetools.decodeerror(_arguments) # XXXX Optionally decode result if _arguments.has_key('----'): return _arguments['----'] def _print(self, _object, _attributes={}, **_arguments): """print: Print the specified object(s) Required argument: list of objects to print Keyword argument _attributes: AppleEvent attribute dictionary """ _code = 'aevt' _subcode = 'pdoc' if _arguments: raise TypeError, 'No optional args expected' _arguments['----'] = _object _reply, _arguments, _attributes = self.send(_code, _subcode, _arguments, _attributes) if _arguments.get('errn', 0): raise aetools.Error, aetools.decodeerror(_arguments) # XXXX Optionally decode result if _arguments.has_key('----'): return _arguments['----'] _argmap_quit = { 'saving' : 'savo', } def quit(self, _no_object=None, _attributes={}, **_arguments): """quit: Quit an application Keyword argument saving: specifies whether to save currently open documents Keyword argument _attributes: AppleEvent attribute dictionary """ _code = 'aevt' _subcode = 'quit' aetools.keysubst(_arguments, self._argmap_quit) if _no_object != None: raise TypeError, 'No direct arg expected' aetools.enumsubst(_arguments, 'savo', _Enum_savo) _reply, _arguments, _attributes = self.send(_code, _subcode, _arguments, _attributes) if _arguments.get('errn', 0): raise aetools.Error, aetools.decodeerror(_arguments) # XXXX Optionally decode result if _arguments.has_key('----'): return _arguments['----'] _argmap_close = { 'saving' : 'savo', 'saving_in' : 'kfil', } _Enum_savo = { 'yes' : 'yes ', # Save objects now 'no' : 'no ', # Do not save objects 'ask' : 'ask ', # Ask the user whether to save } # # Indices of types declared in this module # _classdeclarations = { } _propdeclarations = { } _compdeclarations = { } _enumdeclarations = { 'savo' : _Enum_savo, }
gpl-2.0
dan-zilla/upfirdn
upfirdn/test_upfirdn.py
11
8668
# Copyright (c) 2009, Motorola, Inc # # All Rights Reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # # * Redistributions of source code must retain the above copyright notice, # this list of conditions and the following disclaimer. # # * Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the distribution. # # * Neither the name of Motorola nor the names of its contributors may be # used to endorse or promote products derived from this software without # specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS # IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, # THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR # PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR # CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, # EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, # PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR # PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF # LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING # NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS # SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. import numpy as np import upfirdn from nose.tools import assert_raises import sys import time random_state = np.random.RandomState(17) def tic(): global tictime tictime = time.time() def toc(): global tictime return time.time() - tictime def resample(x, h, p, q): """This is the 'slow' version of upsampling, FIR filtering, and downsampling.""" X = np.zeros((len(x), p), x.dtype) X[:, 0] = x X.shape = (-1,) Y = np.convolve(h, X) y = Y[::q] return y class ResamplerCase(object): """Test C++ Resampler object Mainly tests that "apply" outputs correct values and that state-retention is correct. """ def __init__(self, p, q, coefs): """ A random signal input type is chosen (real or complex). Inputs: p - a single integer upsampling factor q - a single integer downsampling factor coefs - real or complex coefficients array """ self.p = p self.q = q self.coefs = coefs self.output_type = float if random_state.rand()>.5: self.signal_type = complex self.output_type = complex else: self.signal_type = float if np.iscomplexobj(coefs): self.coef_type = complex self.output_type = complex else: self.coef_type = float self.klass = upfirdn.klass_lookup(self.signal_type(), self.coefs) def __str__(self): return 'ResamplerCase(%d, %d, %d, %s, %s)'%(self.p, self.q, \ len(self.coefs), self.signal_type.__name__, \ self.coef_type.__name__) def __call__(self): print self self.scrub(np.ones(100), 'ones') if self.signal_type == float: x = random_state.randn(1000) else: x = random_state.randn(1000) + 1.j*random_state.randn(1000) self.scrub(x, 'randn') self.scrub(np.arange(10000), 'ramp') resampler = self.klass(self.p, self.q, self.coefs) assert_raises(ValueError, resampler.apply, \ np.ones(200), np.ones(1, dtype=self.output_type)) def scrub(self, x, name): tic() yr = resample(x, self.coefs, self.p, self.q) resample_time = toc() out_count = np.ceil(float(self.p) / self.q * len(x)) yr = yr[:out_count] for test in ['oneshot', 'persample', 'randomsteps']: tic() y = self.__getattribute__(test)(x, out_count) test_time = toc() nmse = np.sum(abs(y - yr)**2) / np.sum(abs(yr)**2) print '%10s(%5d) %12s nmse = %10f %10fx' % \ (name, len(x), test, nmse, resample_time/test_time) assert nmse < 1e-10 def oneshot(self, x, out_count): self.resampler = self.klass(self.p, self.q, self.coefs) y = np.zeros((out_count,), dtype=self.output_type) actual_out_count = self.resampler.apply(x, y) return y def persample(self, x, out_count): self.resampler = self.klass(self.p, self.q, self.coefs) y = np.zeros((out_count,), dtype=self.output_type) out_ptr = 0 for xi in x: count = self.resampler.apply([xi], y[out_ptr:]) out_ptr += count return y def randomsteps(self, x, out_count): self.resampler = self.klass(self.p, self.q, self.coefs) y = np.zeros((out_count,), dtype=self.output_type) base_step_size = len(x) / float(40) in_ptr = 0 out_ptr = 0 while in_ptr < len(x): step = int(base_step_size + random_state.randint(100) + 1) count = self.resampler.apply(x[in_ptr:in_ptr+step], y[out_ptr:]) in_ptr += step out_ptr += count return y def random_array(shape): a = random_state.randn(*shape) if random_state.rand() > .5: a = a + 1.j*random_state.randn(*shape) return a class UpfirdnNdCase(object): """ Test that broadcasting is working as expected for upfirdn function. """ def __init__(self, p, q): self.p = p self.q = q def __str__(self): return 'UpfirdnNdCase (%d, %d)'%(self.p, self.q) def __call__(self): print self ndims = random_state.randint(1,5) shape = tuple([random_state.randint(2, 5) for i in range(ndims)]) xdim = random_state.randint(ndims+1) hdim = random_state.randint(ndims+1) coefCount = random_state.randint(10, 20) inCount = random_state.randint(50, 100) h_singleton = [random_state.randint(2) for s in shape] hshape = tuple(np.where(h_singleton, 1, shape)) x_singleton = [random_state.randint(2) for s in shape] # Avoid singleton dims in both arrays at the same place x_singleton = np.where(np.array(h_singleton) & x_singleton, 0, x_singleton) xshape = tuple(np.where(x_singleton, 1, shape)) h = random_array(hshape[:hdim] + (coefCount,) + hshape[hdim:]) x = random_array(xshape[:xdim] + (inCount,) + xshape[xdim:]) print ' xshape =', xshape, '(%s)'%x.dtype print ' hshape =', hshape, '(%s)'%h.dtype y = upfirdn.upfirdn(x, h, self.p, self.q, xdim=xdim, hdim=hdim) for idx, yi in upfirdn.enumdims(y, (xdim,), complement=True): x_idx = tuple(np.where(x_singleton, 0, idx)) x_idx = x_idx[:xdim] + (range(inCount),) + x_idx[xdim:] xi = x[x_idx] h_idx = tuple(np.where(h_singleton, 0, idx)) h_idx = h_idx[:hdim] + (range(coefCount),) + h_idx[hdim:] hi = h[h_idx] y_expected = upfirdn.upfirdn(xi, hi, self.p, self.q) #print idx, np.mean(abs(yi - y_expected)**2) assert np.allclose(yi, y_expected, 1e-10) def random_coefs(max_n): """Returns random length vector of normal random variables, with a 50/50 chance of complex.""" n = random_state.randint(max_n) + 1 coefs = random_state.randn(n) if random_state.rand() > .5: coefs = coefs + 1.j*random_state.randn(n) return coefs def test(): yield ResamplerCase(1, 1, [1.]), yield ResamplerCase(3, 2, [1.]), yield ResamplerCase(2, 3, [1.]), for i in range(10): p = random_state.randint(200)+1 q = random_state.randint(200)+1 coefs = random_coefs(200) yield ResamplerCase(p, q, coefs), for i in range(10): p = random_state.randint(10)+1 q = random_state.randint(200)+1 coefs = random_coefs(200) yield ResamplerCase(p, q, coefs), for i in range(10): p = random_state.randint(200)+1 q = random_state.randint(10)+1 coefs = random_coefs(200) yield ResamplerCase(p, q, coefs), for i in range(20): p = random_state.randint(10)+1 q = random_state.randint(10)+1 yield UpfirdnNdCase(p, q), if __name__ == '__main__': # Execute the test suite for t in test(): t[0]()
bsd-3-clause
infobloxopen/infoblox-netmri
infoblox_netmri/api/broker/v2_7_0/device_group_member_broker.py
17
41061
from ..broker import Broker class DeviceGroupMemberBroker(Broker): controller = "device_group_members" def show(self, **kwargs): """Shows the details for the specified device group member. **Inputs** | ``api version min:`` None | ``api version max:`` None | ``required:`` True | ``default:`` None :param DeviceGroupMemberID: The internal NetMRI identifier for this device group membership record. :type DeviceGroupMemberID: Integer | ``api version min:`` None | ``api version max:`` None | ``required:`` False | ``default:`` None :param methods: A list of device group member methods. The listed methods will be called on each device group member returned and included in the output. Available methods are: device, infradevice. :type methods: Array of String | ``api version min:`` None | ``api version max:`` None | ``required:`` False | ``default:`` None :param include: A list of associated object types to include in the output. The listed associations will be returned as outputs named according to the association name (see outputs below). Available includes are: device. :type include: Array of String **Outputs** | ``api version min:`` None | ``api version max:`` None | ``required:`` False | ``default:`` None :return device_group_member: The device group member identified by the specified DeviceGroupMemberID. :rtype device_group_member: DeviceGroupMember """ return self.api_request(self._get_method_fullname("show"), kwargs) def index(self, **kwargs): """Lists the available device group members. Any of the inputs listed may be be used to narrow the list; other inputs will be ignored. Of the various ways to query lists, using this method is most efficient. **Inputs** | ``api version min:`` 2.3 | ``api version max:`` 2.4 | ``required:`` False | ``default:`` None :param DeviceGroupMemberID: The internal NetMRI identifier for this device group membership record. :type DeviceGroupMemberID: Integer | ``api version min:`` 2.5 | ``api version max:`` None | ``required:`` False | ``default:`` None :param DeviceGroupMemberID: The internal NetMRI identifier for this device group membership record. :type DeviceGroupMemberID: Array of Integer | ``api version min:`` 2.3 | ``api version max:`` 2.4 | ``required:`` False | ``default:`` None :param DeviceID: The internal NetMRI identifier for the device associated with this membership record. :type DeviceID: Integer | ``api version min:`` 2.5 | ``api version max:`` None | ``required:`` False | ``default:`` None :param DeviceID: The internal NetMRI identifier for the device associated with this membership record. :type DeviceID: Array of Integer | ``api version min:`` 2.3 | ``api version max:`` 2.4 | ``required:`` False | ``default:`` None :param GroupID: The internal NetMRI identifier for the group associated with this membership record. :type GroupID: Integer | ``api version min:`` 2.5 | ``api version max:`` None | ``required:`` False | ``default:`` None :param GroupID: The internal NetMRI identifier for the group associated with this membership record. :type GroupID: Array of Integer | ``api version min:`` None | ``api version max:`` None | ``required:`` False | ``default:`` None :param DeviceGroupID: The internal NetMRI identifier of the device groups to which to limit the results. :type DeviceGroupID: Array of Integer | ``api version min:`` None | ``api version max:`` None | ``required:`` False | ``default:`` None :param timestamp: The data returned will represent the device group members as of this date and time. If omitted, the result will indicate the most recently collected data. :type timestamp: DateTime | ``api version min:`` None | ``api version max:`` None | ``required:`` False | ``default:`` None :param methods: A list of device group member methods. The listed methods will be called on each device group member returned and included in the output. Available methods are: device, infradevice. :type methods: Array of String | ``api version min:`` None | ``api version max:`` None | ``required:`` False | ``default:`` None :param include: A list of associated object types to include in the output. The listed associations will be returned as outputs named according to the association name (see outputs below). Available includes are: device. :type include: Array of String | ``api version min:`` None | ``api version max:`` None | ``required:`` False | ``default:`` 0 :param start: The record number to return in the selected page of data. It will always appear, although it may not be the first record. See the :limit for more information. :type start: Integer | ``api version min:`` None | ``api version max:`` None | ``required:`` False | ``default:`` 1000 :param limit: The size of the page of data, that is, the maximum number of records returned. The limit size will be used to break the data up into pages and the first page with the start record will be returned. So if you have 100 records and use a :limit of 10 and a :start of 10, you will get records 10-19. The maximum limit is 10000. :type limit: Integer | ``api version min:`` None | ``api version max:`` None | ``required:`` False | ``default:`` DeviceGroupMemberID :param sort: The data field(s) to use for sorting the output. Default is DeviceGroupMemberID. Valid values are DeviceGroupMemberID, GroupID, DeviceID, DeviceGroupMemberStartTime, DeviceGroupMemberEndTime, DeviceGroupMemberChangedCols, DeviceGroupMemberTimestamp, DataSourceID. :type sort: Array of String | ``api version min:`` None | ``api version max:`` None | ``required:`` False | ``default:`` asc :param dir: The direction(s) in which to sort the data. Default is 'asc'. Valid values are 'asc' and 'desc'. :type dir: Array of String | ``api version min:`` None | ``api version max:`` None | ``required:`` False | ``default:`` None :param select: The list of attributes to return for each DeviceGroupMember. Valid values are DeviceGroupMemberID, GroupID, DeviceID, DeviceGroupMemberStartTime, DeviceGroupMemberEndTime, DeviceGroupMemberChangedCols, DeviceGroupMemberTimestamp, DataSourceID. If empty or omitted, all attributes will be returned. :type select: Array | ``api version min:`` 2.8 | ``api version max:`` None | ``required:`` False | ``default:`` None :param goto_field: The field name for NIOS GOTO that is used for locating a row position of records. :type goto_field: String | ``api version min:`` 2.8 | ``api version max:`` None | ``required:`` False | ``default:`` None :param goto_value: The value of goto_field for NIOS GOTO that is used for locating a row position of records. :type goto_value: String **Outputs** | ``api version min:`` None | ``api version max:`` None | ``required:`` False | ``default:`` None :return device_group_members: An array of the DeviceGroupMember objects that match the specified input criteria. :rtype device_group_members: Array of DeviceGroupMember """ return self.api_list_request(self._get_method_fullname("index"), kwargs) def search(self, **kwargs): """Lists the available device group members matching the input criteria. This method provides a more flexible search interface than the index method, but searching using this method is more demanding on the system and will not perform to the same level as the index method. The input fields listed below will be used as in the index method, to filter the result, along with the optional query string and XML filter described below. **Inputs** | ``api version min:`` 2.3 | ``api version max:`` 2.4 | ``required:`` False | ``default:`` None :param DataSourceID: The internal NetMRI identifier for the collector NetMRI that collected this data record. :type DataSourceID: Integer | ``api version min:`` 2.5 | ``api version max:`` None | ``required:`` False | ``default:`` None :param DataSourceID: The internal NetMRI identifier for the collector NetMRI that collected this data record. :type DataSourceID: Array of Integer | ``api version min:`` 2.3 | ``api version max:`` 2.4 | ``required:`` False | ``default:`` None :param DeviceGroupMemberChangedCols: The fields that changed between this revision of the record and the previous revision. :type DeviceGroupMemberChangedCols: String | ``api version min:`` 2.5 | ``api version max:`` None | ``required:`` False | ``default:`` None :param DeviceGroupMemberChangedCols: The fields that changed between this revision of the record and the previous revision. :type DeviceGroupMemberChangedCols: Array of String | ``api version min:`` 2.3 | ``api version max:`` 2.4 | ``required:`` False | ``default:`` None :param DeviceGroupMemberEndTime: The ending effective time of this record, or empty if still in effect. :type DeviceGroupMemberEndTime: DateTime | ``api version min:`` 2.5 | ``api version max:`` None | ``required:`` False | ``default:`` None :param DeviceGroupMemberEndTime: The ending effective time of this record, or empty if still in effect. :type DeviceGroupMemberEndTime: Array of DateTime | ``api version min:`` 2.3 | ``api version max:`` 2.4 | ``required:`` False | ``default:`` None :param DeviceGroupMemberID: The internal NetMRI identifier for this device group membership record. :type DeviceGroupMemberID: Integer | ``api version min:`` 2.5 | ``api version max:`` None | ``required:`` False | ``default:`` None :param DeviceGroupMemberID: The internal NetMRI identifier for this device group membership record. :type DeviceGroupMemberID: Array of Integer | ``api version min:`` 2.3 | ``api version max:`` 2.4 | ``required:`` False | ``default:`` None :param DeviceGroupMemberStartTime: The starting effective time of this record. :type DeviceGroupMemberStartTime: DateTime | ``api version min:`` 2.5 | ``api version max:`` None | ``required:`` False | ``default:`` None :param DeviceGroupMemberStartTime: The starting effective time of this record. :type DeviceGroupMemberStartTime: Array of DateTime | ``api version min:`` 2.3 | ``api version max:`` 2.4 | ``required:`` False | ``default:`` None :param DeviceGroupMemberTimestamp: The date and time this record was collected or calculated. :type DeviceGroupMemberTimestamp: DateTime | ``api version min:`` 2.5 | ``api version max:`` None | ``required:`` False | ``default:`` None :param DeviceGroupMemberTimestamp: The date and time this record was collected or calculated. :type DeviceGroupMemberTimestamp: Array of DateTime | ``api version min:`` 2.3 | ``api version max:`` 2.4 | ``required:`` False | ``default:`` None :param DeviceID: The internal NetMRI identifier for the device associated with this membership record. :type DeviceID: Integer | ``api version min:`` 2.5 | ``api version max:`` None | ``required:`` False | ``default:`` None :param DeviceID: The internal NetMRI identifier for the device associated with this membership record. :type DeviceID: Array of Integer | ``api version min:`` 2.3 | ``api version max:`` 2.4 | ``required:`` False | ``default:`` None :param GroupID: The internal NetMRI identifier for the group associated with this membership record. :type GroupID: Integer | ``api version min:`` 2.5 | ``api version max:`` None | ``required:`` False | ``default:`` None :param GroupID: The internal NetMRI identifier for the group associated with this membership record. :type GroupID: Array of Integer | ``api version min:`` None | ``api version max:`` None | ``required:`` False | ``default:`` None :param DeviceGroupID: The internal NetMRI identifier of the device groups to which to limit the results. :type DeviceGroupID: Array of Integer | ``api version min:`` None | ``api version max:`` None | ``required:`` False | ``default:`` None :param timestamp: The data returned will represent the device group members as of this date and time. If omitted, the result will indicate the most recently collected data. :type timestamp: DateTime | ``api version min:`` None | ``api version max:`` None | ``required:`` False | ``default:`` None :param methods: A list of device group member methods. The listed methods will be called on each device group member returned and included in the output. Available methods are: device, infradevice. :type methods: Array of String | ``api version min:`` None | ``api version max:`` None | ``required:`` False | ``default:`` None :param include: A list of associated object types to include in the output. The listed associations will be returned as outputs named according to the association name (see outputs below). Available includes are: device. :type include: Array of String | ``api version min:`` None | ``api version max:`` None | ``required:`` False | ``default:`` 0 :param start: The record number to return in the selected page of data. It will always appear, although it may not be the first record. See the :limit for more information. :type start: Integer | ``api version min:`` None | ``api version max:`` None | ``required:`` False | ``default:`` 1000 :param limit: The size of the page of data, that is, the maximum number of records returned. The limit size will be used to break the data up into pages and the first page with the start record will be returned. So if you have 100 records and use a :limit of 10 and a :start of 10, you will get records 10-19. The maximum limit is 10000. :type limit: Integer | ``api version min:`` None | ``api version max:`` None | ``required:`` False | ``default:`` DeviceGroupMemberID :param sort: The data field(s) to use for sorting the output. Default is DeviceGroupMemberID. Valid values are DeviceGroupMemberID, GroupID, DeviceID, DeviceGroupMemberStartTime, DeviceGroupMemberEndTime, DeviceGroupMemberChangedCols, DeviceGroupMemberTimestamp, DataSourceID. :type sort: Array of String | ``api version min:`` None | ``api version max:`` None | ``required:`` False | ``default:`` asc :param dir: The direction(s) in which to sort the data. Default is 'asc'. Valid values are 'asc' and 'desc'. :type dir: Array of String | ``api version min:`` None | ``api version max:`` None | ``required:`` False | ``default:`` None :param select: The list of attributes to return for each DeviceGroupMember. Valid values are DeviceGroupMemberID, GroupID, DeviceID, DeviceGroupMemberStartTime, DeviceGroupMemberEndTime, DeviceGroupMemberChangedCols, DeviceGroupMemberTimestamp, DataSourceID. If empty or omitted, all attributes will be returned. :type select: Array | ``api version min:`` 2.8 | ``api version max:`` None | ``required:`` False | ``default:`` None :param goto_field: The field name for NIOS GOTO that is used for locating a row position of records. :type goto_field: String | ``api version min:`` 2.8 | ``api version max:`` None | ``required:`` False | ``default:`` None :param goto_value: The value of goto_field for NIOS GOTO that is used for locating a row position of records. :type goto_value: String | ``api version min:`` None | ``api version max:`` None | ``required:`` False | ``default:`` None :param query: This value will be matched against device group members, looking to see if one or more of the listed attributes contain the passed value. You may also surround the value with '/' and '/' to perform a regular expression search rather than a containment operation. Any record that matches will be returned. The attributes searched are: DataSourceID, DeviceGroupMemberChangedCols, DeviceGroupMemberEndTime, DeviceGroupMemberID, DeviceGroupMemberStartTime, DeviceGroupMemberTimestamp, DeviceID, GroupID. :type query: String | ``api version min:`` 2.3 | ``api version max:`` None | ``required:`` False | ``default:`` None :param xml_filter: A SetFilter XML structure to further refine the search. The SetFilter will be applied AFTER any search query or field values, but before any limit options. The limit and pagination will be enforced after the filter. Remind that this kind of filter may be costly and inefficient if not associated with a database filtering. :type xml_filter: String **Outputs** | ``api version min:`` None | ``api version max:`` None | ``required:`` False | ``default:`` None :return device_group_members: An array of the DeviceGroupMember objects that match the specified input criteria. :rtype device_group_members: Array of DeviceGroupMember """ return self.api_list_request(self._get_method_fullname("search"), kwargs) def find(self, **kwargs): """Lists the available device group members matching the input specification. This provides the most flexible search specification of all the query mechanisms, enabling searching using comparison operations other than equality. However, it is more complex to use and will not perform as efficiently as the index or search methods. In the input descriptions below, 'field names' refers to the following fields: DataSourceID, DeviceGroupMemberChangedCols, DeviceGroupMemberEndTime, DeviceGroupMemberID, DeviceGroupMemberStartTime, DeviceGroupMemberTimestamp, DeviceID, GroupID. **Inputs** | ``api version min:`` None | ``api version max:`` None | ``required:`` False | ``default:`` None :param op_DataSourceID: The operator to apply to the field DataSourceID. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. DataSourceID: The internal NetMRI identifier for the collector NetMRI that collected this data record. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values. :type op_DataSourceID: String | ``api version min:`` None | ``api version max:`` None | ``required:`` False | ``default:`` None :param val_f_DataSourceID: If op_DataSourceID is specified, the field named in this input will be compared to the value in DataSourceID using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_DataSourceID must be specified if op_DataSourceID is specified. :type val_f_DataSourceID: String | ``api version min:`` None | ``api version max:`` None | ``required:`` False | ``default:`` None :param val_c_DataSourceID: If op_DataSourceID is specified, this value will be compared to the value in DataSourceID using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_DataSourceID must be specified if op_DataSourceID is specified. :type val_c_DataSourceID: String | ``api version min:`` None | ``api version max:`` None | ``required:`` False | ``default:`` None :param op_DeviceGroupMemberChangedCols: The operator to apply to the field DeviceGroupMemberChangedCols. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. DeviceGroupMemberChangedCols: The fields that changed between this revision of the record and the previous revision. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values. :type op_DeviceGroupMemberChangedCols: String | ``api version min:`` None | ``api version max:`` None | ``required:`` False | ``default:`` None :param val_f_DeviceGroupMemberChangedCols: If op_DeviceGroupMemberChangedCols is specified, the field named in this input will be compared to the value in DeviceGroupMemberChangedCols using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_DeviceGroupMemberChangedCols must be specified if op_DeviceGroupMemberChangedCols is specified. :type val_f_DeviceGroupMemberChangedCols: String | ``api version min:`` None | ``api version max:`` None | ``required:`` False | ``default:`` None :param val_c_DeviceGroupMemberChangedCols: If op_DeviceGroupMemberChangedCols is specified, this value will be compared to the value in DeviceGroupMemberChangedCols using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_DeviceGroupMemberChangedCols must be specified if op_DeviceGroupMemberChangedCols is specified. :type val_c_DeviceGroupMemberChangedCols: String | ``api version min:`` None | ``api version max:`` None | ``required:`` False | ``default:`` None :param op_DeviceGroupMemberEndTime: The operator to apply to the field DeviceGroupMemberEndTime. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. DeviceGroupMemberEndTime: The ending effective time of this record, or empty if still in effect. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values. :type op_DeviceGroupMemberEndTime: String | ``api version min:`` None | ``api version max:`` None | ``required:`` False | ``default:`` None :param val_f_DeviceGroupMemberEndTime: If op_DeviceGroupMemberEndTime is specified, the field named in this input will be compared to the value in DeviceGroupMemberEndTime using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_DeviceGroupMemberEndTime must be specified if op_DeviceGroupMemberEndTime is specified. :type val_f_DeviceGroupMemberEndTime: String | ``api version min:`` None | ``api version max:`` None | ``required:`` False | ``default:`` None :param val_c_DeviceGroupMemberEndTime: If op_DeviceGroupMemberEndTime is specified, this value will be compared to the value in DeviceGroupMemberEndTime using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_DeviceGroupMemberEndTime must be specified if op_DeviceGroupMemberEndTime is specified. :type val_c_DeviceGroupMemberEndTime: String | ``api version min:`` None | ``api version max:`` None | ``required:`` False | ``default:`` None :param op_DeviceGroupMemberID: The operator to apply to the field DeviceGroupMemberID. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. DeviceGroupMemberID: The internal NetMRI identifier for this device group membership record. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values. :type op_DeviceGroupMemberID: String | ``api version min:`` None | ``api version max:`` None | ``required:`` False | ``default:`` None :param val_f_DeviceGroupMemberID: If op_DeviceGroupMemberID is specified, the field named in this input will be compared to the value in DeviceGroupMemberID using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_DeviceGroupMemberID must be specified if op_DeviceGroupMemberID is specified. :type val_f_DeviceGroupMemberID: String | ``api version min:`` None | ``api version max:`` None | ``required:`` False | ``default:`` None :param val_c_DeviceGroupMemberID: If op_DeviceGroupMemberID is specified, this value will be compared to the value in DeviceGroupMemberID using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_DeviceGroupMemberID must be specified if op_DeviceGroupMemberID is specified. :type val_c_DeviceGroupMemberID: String | ``api version min:`` None | ``api version max:`` None | ``required:`` False | ``default:`` None :param op_DeviceGroupMemberStartTime: The operator to apply to the field DeviceGroupMemberStartTime. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. DeviceGroupMemberStartTime: The starting effective time of this record. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values. :type op_DeviceGroupMemberStartTime: String | ``api version min:`` None | ``api version max:`` None | ``required:`` False | ``default:`` None :param val_f_DeviceGroupMemberStartTime: If op_DeviceGroupMemberStartTime is specified, the field named in this input will be compared to the value in DeviceGroupMemberStartTime using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_DeviceGroupMemberStartTime must be specified if op_DeviceGroupMemberStartTime is specified. :type val_f_DeviceGroupMemberStartTime: String | ``api version min:`` None | ``api version max:`` None | ``required:`` False | ``default:`` None :param val_c_DeviceGroupMemberStartTime: If op_DeviceGroupMemberStartTime is specified, this value will be compared to the value in DeviceGroupMemberStartTime using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_DeviceGroupMemberStartTime must be specified if op_DeviceGroupMemberStartTime is specified. :type val_c_DeviceGroupMemberStartTime: String | ``api version min:`` None | ``api version max:`` None | ``required:`` False | ``default:`` None :param op_DeviceGroupMemberTimestamp: The operator to apply to the field DeviceGroupMemberTimestamp. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. DeviceGroupMemberTimestamp: The date and time this record was collected or calculated. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values. :type op_DeviceGroupMemberTimestamp: String | ``api version min:`` None | ``api version max:`` None | ``required:`` False | ``default:`` None :param val_f_DeviceGroupMemberTimestamp: If op_DeviceGroupMemberTimestamp is specified, the field named in this input will be compared to the value in DeviceGroupMemberTimestamp using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_DeviceGroupMemberTimestamp must be specified if op_DeviceGroupMemberTimestamp is specified. :type val_f_DeviceGroupMemberTimestamp: String | ``api version min:`` None | ``api version max:`` None | ``required:`` False | ``default:`` None :param val_c_DeviceGroupMemberTimestamp: If op_DeviceGroupMemberTimestamp is specified, this value will be compared to the value in DeviceGroupMemberTimestamp using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_DeviceGroupMemberTimestamp must be specified if op_DeviceGroupMemberTimestamp is specified. :type val_c_DeviceGroupMemberTimestamp: String | ``api version min:`` None | ``api version max:`` None | ``required:`` False | ``default:`` None :param op_DeviceID: The operator to apply to the field DeviceID. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. DeviceID: The internal NetMRI identifier for the device associated with this membership record. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values. :type op_DeviceID: String | ``api version min:`` None | ``api version max:`` None | ``required:`` False | ``default:`` None :param val_f_DeviceID: If op_DeviceID is specified, the field named in this input will be compared to the value in DeviceID using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_DeviceID must be specified if op_DeviceID is specified. :type val_f_DeviceID: String | ``api version min:`` None | ``api version max:`` None | ``required:`` False | ``default:`` None :param val_c_DeviceID: If op_DeviceID is specified, this value will be compared to the value in DeviceID using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_DeviceID must be specified if op_DeviceID is specified. :type val_c_DeviceID: String | ``api version min:`` None | ``api version max:`` None | ``required:`` False | ``default:`` None :param op_GroupID: The operator to apply to the field GroupID. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. GroupID: The internal NetMRI identifier for the group associated with this membership record. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values. :type op_GroupID: String | ``api version min:`` None | ``api version max:`` None | ``required:`` False | ``default:`` None :param val_f_GroupID: If op_GroupID is specified, the field named in this input will be compared to the value in GroupID using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_GroupID must be specified if op_GroupID is specified. :type val_f_GroupID: String | ``api version min:`` None | ``api version max:`` None | ``required:`` False | ``default:`` None :param val_c_GroupID: If op_GroupID is specified, this value will be compared to the value in GroupID using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_GroupID must be specified if op_GroupID is specified. :type val_c_GroupID: String | ``api version min:`` None | ``api version max:`` None | ``required:`` False | ``default:`` None :param DeviceGroupID: The internal NetMRI identifier of the device groups to which to limit the results. :type DeviceGroupID: Array of Integer | ``api version min:`` None | ``api version max:`` None | ``required:`` False | ``default:`` None :param timestamp: The data returned will represent the device group members as of this date and time. If omitted, the result will indicate the most recently collected data. :type timestamp: DateTime | ``api version min:`` None | ``api version max:`` None | ``required:`` False | ``default:`` None :param methods: A list of device group member methods. The listed methods will be called on each device group member returned and included in the output. Available methods are: device, infradevice. :type methods: Array of String | ``api version min:`` None | ``api version max:`` None | ``required:`` False | ``default:`` None :param include: A list of associated object types to include in the output. The listed associations will be returned as outputs named according to the association name (see outputs below). Available includes are: device. :type include: Array of String | ``api version min:`` None | ``api version max:`` None | ``required:`` False | ``default:`` 0 :param start: The record number to return in the selected page of data. It will always appear, although it may not be the first record. See the :limit for more information. :type start: Integer | ``api version min:`` None | ``api version max:`` None | ``required:`` False | ``default:`` 1000 :param limit: The size of the page of data, that is, the maximum number of records returned. The limit size will be used to break the data up into pages and the first page with the start record will be returned. So if you have 100 records and use a :limit of 10 and a :start of 10, you will get records 10-19. The maximum limit is 10000. :type limit: Integer | ``api version min:`` None | ``api version max:`` None | ``required:`` False | ``default:`` DeviceGroupMemberID :param sort: The data field(s) to use for sorting the output. Default is DeviceGroupMemberID. Valid values are DeviceGroupMemberID, GroupID, DeviceID, DeviceGroupMemberStartTime, DeviceGroupMemberEndTime, DeviceGroupMemberChangedCols, DeviceGroupMemberTimestamp, DataSourceID. :type sort: Array of String | ``api version min:`` None | ``api version max:`` None | ``required:`` False | ``default:`` asc :param dir: The direction(s) in which to sort the data. Default is 'asc'. Valid values are 'asc' and 'desc'. :type dir: Array of String | ``api version min:`` None | ``api version max:`` None | ``required:`` False | ``default:`` None :param select: The list of attributes to return for each DeviceGroupMember. Valid values are DeviceGroupMemberID, GroupID, DeviceID, DeviceGroupMemberStartTime, DeviceGroupMemberEndTime, DeviceGroupMemberChangedCols, DeviceGroupMemberTimestamp, DataSourceID. If empty or omitted, all attributes will be returned. :type select: Array | ``api version min:`` 2.8 | ``api version max:`` None | ``required:`` False | ``default:`` None :param goto_field: The field name for NIOS GOTO that is used for locating a row position of records. :type goto_field: String | ``api version min:`` 2.8 | ``api version max:`` None | ``required:`` False | ``default:`` None :param goto_value: The value of goto_field for NIOS GOTO that is used for locating a row position of records. :type goto_value: String | ``api version min:`` 2.3 | ``api version max:`` None | ``required:`` False | ``default:`` None :param xml_filter: A SetFilter XML structure to further refine the search. The SetFilter will be applied AFTER any search query or field values, but before any limit options. The limit and pagination will be enforced after the filter. Remind that this kind of filter may be costly and inefficient if not associated with a database filtering. :type xml_filter: String **Outputs** | ``api version min:`` None | ``api version max:`` None | ``required:`` False | ``default:`` None :return device_group_members: An array of the DeviceGroupMember objects that match the specified input criteria. :rtype device_group_members: Array of DeviceGroupMember """ return self.api_list_request(self._get_method_fullname("find"), kwargs)
apache-2.0
kenshay/ImageScript
ProgramData/SystemFiles/Python/Lib/site-packages/jedi/evaluate/representation.py
3
22114
""" Like described in the :mod:`jedi.parser.tree` module, there's a need for an ast like module to represent the states of parsed modules. But now there are also structures in Python that need a little bit more than that. An ``Instance`` for example is only a ``Class`` before it is instantiated. This class represents these cases. So, why is there also a ``Class`` class here? Well, there are decorators and they change classes in Python 3. Representation modules also define "magic methods". Those methods look like ``py__foo__`` and are typically mappable to the Python equivalents ``__call__`` and others. Here's a list: ====================================== ======================================== **Method** **Description** -------------------------------------- ---------------------------------------- py__call__(params: Array) On callable objects, returns types. py__bool__() Returns True/False/None; None means that there's no certainty. py__bases__() Returns a list of base classes. py__mro__() Returns a list of classes (the mro). py__iter__() Returns a generator of a set of types. py__class__() Returns the class of an instance. py__getitem__(index: int/str) Returns a a set of types of the index. Can raise an IndexError/KeyError. py__file__() Only on modules. Returns None if does not exist. py__package__() Only on modules. For the import system. py__path__() Only on modules. For the import system. py__get__(call_object) Only on instances. Simulates descriptors. ====================================== ======================================== """ import os import pkgutil import imp import re from jedi._compatibility import use_metaclass from jedi.parser import tree from jedi import debug from jedi import common from jedi.evaluate.cache import memoize_default, CachedMetaClass, NO_DEFAULT from jedi.evaluate import compiled from jedi.evaluate import recursion from jedi.evaluate import iterable from jedi.evaluate import docstrings from jedi.evaluate import pep0484 from jedi.evaluate import param from jedi.evaluate import flow_analysis from jedi.evaluate import imports from jedi.evaluate import helpers from jedi.evaluate.filters import ParserTreeFilter, FunctionExecutionFilter, \ GlobalNameFilter, DictFilter, ContextName, AbstractNameDefinition, \ ParamName, AnonymousInstanceParamName, TreeNameDefinition from jedi.evaluate.dynamic import search_params from jedi.evaluate import context def apply_py__get__(context, base_context): try: method = context.py__get__ except AttributeError: yield context else: for descriptor_context in method(base_context): yield descriptor_context class ClassName(TreeNameDefinition): def infer(self): for result_context in super(ClassName, self).infer(): for c in apply_py__get__(result_context, self.parent_context): yield c class ClassFilter(ParserTreeFilter): name_class = ClassName class ClassContext(use_metaclass(CachedMetaClass, context.TreeContext)): """ This class is not only important to extend `tree.Class`, it is also a important for descriptors (if the descriptor methods are evaluated or not). """ api_type = 'class' def __init__(self, evaluator, classdef, parent_context): super(ClassContext, self).__init__(evaluator, parent_context=parent_context) self.tree_node = classdef @memoize_default(default=()) def py__mro__(self): def add(cls): if cls not in mro: mro.append(cls) mro = [self] # TODO Do a proper mro resolution. Currently we are just listing # classes. However, it's a complicated algorithm. for lazy_cls in self.py__bases__(): # TODO there's multiple different mro paths possible if this yields # multiple possibilities. Could be changed to be more correct. for cls in lazy_cls.infer(): # TODO detect for TypeError: duplicate base class str, # e.g. `class X(str, str): pass` try: mro_method = cls.py__mro__ except AttributeError: # TODO add a TypeError like: """ >>> class Y(lambda: test): pass Traceback (most recent call last): File "<stdin>", line 1, in <module> TypeError: function() argument 1 must be code, not str >>> class Y(1): pass Traceback (most recent call last): File "<stdin>", line 1, in <module> TypeError: int() takes at most 2 arguments (3 given) """ pass else: add(cls) for cls_new in mro_method(): add(cls_new) return tuple(mro) @memoize_default(default=()) def py__bases__(self): arglist = self.tree_node.get_super_arglist() if arglist: args = param.TreeArguments(self.evaluator, self, arglist) return [value for key, value in args.unpack() if key is None] else: return [context.LazyKnownContext(compiled.create(self.evaluator, object))] def py__call__(self, params): from jedi.evaluate.instance import TreeInstance return set([TreeInstance(self.evaluator, self.parent_context, self, params)]) def py__class__(self): return compiled.create(self.evaluator, type) def get_params(self): from jedi.evaluate.instance import AnonymousInstance anon = AnonymousInstance(self.evaluator, self.parent_context, self) return [AnonymousInstanceParamName(anon, param.name) for param in self.funcdef.params] def get_filters(self, search_global, until_position=None, origin_scope=None, is_instance=False): if search_global: yield ParserTreeFilter( self.evaluator, context=self, until_position=until_position, origin_scope=origin_scope ) else: for scope in self.py__mro__(): if isinstance(scope, compiled.CompiledObject): for filter in scope.get_filters(is_instance=is_instance): yield filter else: yield ClassFilter( self.evaluator, self, node_context=scope, origin_scope=origin_scope) def is_class(self): return True def get_subscope_by_name(self, name): raise DeprecationWarning for s in self.py__mro__(): for sub in reversed(s.subscopes): if sub.name.value == name: return sub raise KeyError("Couldn't find subscope.") def get_function_slot_names(self, name): for filter in self.get_filters(search_global=False): names = filter.get(name) if names: return names return [] def get_param_names(self): for name in self.get_function_slot_names('__init__'): for context_ in name.infer(): try: method = context_.get_param_names except AttributeError: pass else: return list(method())[1:] return [] @property def name(self): return ContextName(self, self.tree_node.name) class FunctionContext(use_metaclass(CachedMetaClass, context.TreeContext)): """ Needed because of decorators. Decorators are evaluated here. """ api_type = 'function' def __init__(self, evaluator, parent_context, funcdef): """ This should not be called directly """ super(FunctionContext, self).__init__(evaluator, parent_context) self.tree_node = funcdef def get_filters(self, search_global, until_position=None, origin_scope=None): if search_global: yield ParserTreeFilter( self.evaluator, context=self, until_position=until_position, origin_scope=origin_scope ) else: scope = self.py__class__() for filter in scope.get_filters(search_global=False, origin_scope=origin_scope): yield filter def infer_function_execution(self, function_execution): """ Created to be used by inheritance. """ if self.tree_node.is_generator(): return set([iterable.Generator(self.evaluator, function_execution)]) else: return function_execution.get_return_values() def get_function_execution(self, arguments=None): e = self.evaluator if arguments is None: return AnonymousFunctionExecution(e, self.parent_context, self) else: return FunctionExecutionContext(e, self.parent_context, self, arguments) def py__call__(self, arguments): function_execution = self.get_function_execution(arguments) return self.infer_function_execution(function_execution) def py__class__(self): # This differentiation is only necessary for Python2. Python3 does not # use a different method class. if isinstance(self.tree_node.get_parent_scope(), tree.Class): name = 'METHOD_CLASS' else: name = 'FUNCTION_CLASS' return compiled.get_special_object(self.evaluator, name) @property def name(self): return ContextName(self, self.tree_node.name) def get_param_names(self): function_execution = self.get_function_execution() return [ParamName(function_execution, param.name) for param in self.tree_node.params] class FunctionExecutionContext(context.TreeContext): """ This class is used to evaluate functions and their returns. This is the most complicated class, because it contains the logic to transfer parameters. It is even more complicated, because there may be multiple calls to functions and recursion has to be avoided. But this is responsibility of the decorators. """ function_execution_filter = FunctionExecutionFilter def __init__(self, evaluator, parent_context, function_context, var_args): super(FunctionExecutionContext, self).__init__(evaluator, parent_context) self.function_context = function_context self.tree_node = function_context.tree_node self.var_args = var_args @memoize_default(default=set()) @recursion.execution_recursion_decorator() def get_return_values(self, check_yields=False): funcdef = self.tree_node if funcdef.type == 'lambda': return self.evaluator.eval_element(self, funcdef.children[-1]) if check_yields: types = set() returns = funcdef.yields else: returns = funcdef.returns types = set(docstrings.find_return_types(self.get_root_context(), funcdef)) types |= set(pep0484.find_return_types(self.get_root_context(), funcdef)) for r in returns: check = flow_analysis.reachability_check(self, funcdef, r) if check is flow_analysis.UNREACHABLE: debug.dbg('Return unreachable: %s', r) else: if check_yields: types |= set(self._eval_yield(r)) else: types |= self.eval_node(r.children[1]) if check is flow_analysis.REACHABLE: debug.dbg('Return reachable: %s', r) break return types def _eval_yield(self, yield_expr): node = yield_expr.children[1] if node.type == 'yield_arg': # It must be a yield from. yield_from_types = self.eval_node(node.children[1]) for lazy_context in iterable.py__iter__(self.evaluator, yield_from_types, node): yield lazy_context else: yield context.LazyTreeContext(self, node) @recursion.execution_recursion_decorator(default=iter([])) def get_yield_values(self): for_parents = [(y, tree.search_ancestor(y, ('for_stmt', 'funcdef', 'while_stmt', 'if_stmt'))) for y in self.tree_node.yields] # Calculate if the yields are placed within the same for loop. yields_order = [] last_for_stmt = None for yield_, for_stmt in for_parents: # For really simple for loops we can predict the order. Otherwise # we just ignore it. parent = for_stmt.parent if parent.type == 'suite': parent = parent.parent if for_stmt.type == 'for_stmt' and parent == self.tree_node \ and for_stmt.defines_one_name(): # Simplicity for now. if for_stmt == last_for_stmt: yields_order[-1][1].append(yield_) else: yields_order.append((for_stmt, [yield_])) elif for_stmt == self.tree_node: yields_order.append((None, [yield_])) else: types = self.get_return_values(check_yields=True) if types: yield context.get_merged_lazy_context(list(types)) return last_for_stmt = for_stmt evaluator = self.evaluator for for_stmt, yields in yields_order: if for_stmt is None: # No for_stmt, just normal yields. for yield_ in yields: for result in self._eval_yield(yield_): yield result else: input_node = for_stmt.get_input_node() for_types = self.eval_node(input_node) ordered = iterable.py__iter__(evaluator, for_types, input_node) ordered = list(ordered) for lazy_context in ordered: dct = {str(for_stmt.children[1]): lazy_context.infer()} with helpers.predefine_names(self, for_stmt, dct): for yield_in_same_for_stmt in yields: for result in self._eval_yield(yield_in_same_for_stmt): yield result def get_filters(self, search_global, until_position=None, origin_scope=None): yield self.function_execution_filter(self.evaluator, self, until_position=until_position, origin_scope=origin_scope) @memoize_default(default=NO_DEFAULT) def get_params(self): return param.get_params(self.evaluator, self.parent_context, self.tree_node, self.var_args) class AnonymousFunctionExecution(FunctionExecutionContext): def __init__(self, evaluator, parent_context, function_context): super(AnonymousFunctionExecution, self).__init__( evaluator, parent_context, function_context, var_args=None) @memoize_default(default=NO_DEFAULT) def get_params(self): # We need to do a dynamic search here. return search_params(self.evaluator, self.parent_context, self.tree_node) class ModuleAttributeName(AbstractNameDefinition): """ For module attributes like __file__, __str__ and so on. """ api_type = 'instance' def __init__(self, parent_module, string_name): self.parent_context = parent_module self.string_name = string_name def infer(self): return compiled.create(self.parent_context.evaluator, str).execute( param.ValuesArguments([]) ) class ModuleContext(use_metaclass(CachedMetaClass, context.TreeContext)): api_type = 'module' parent_context = None def __init__(self, evaluator, module_node): super(ModuleContext, self).__init__(evaluator, parent_context=None) self.tree_node = module_node def get_filters(self, search_global, until_position=None, origin_scope=None): yield ParserTreeFilter( self.evaluator, context=self, until_position=until_position, origin_scope=origin_scope ) yield GlobalNameFilter(self, self.tree_node) yield DictFilter(self._sub_modules_dict()) yield DictFilter(self._module_attributes_dict()) for star_module in self.star_imports(): yield next(star_module.get_filters(search_global)) # I'm not sure if the star import cache is really that effective anymore # with all the other really fast import caches. Recheck. Also we would need # to push the star imports into Evaluator.modules, if we reenable this. @memoize_default([]) def star_imports(self): modules = [] for i in self.tree_node.imports: if i.is_star_import(): name = i.star_import_name() new = imports.infer_import(self, name) for module in new: if isinstance(module, ModuleContext): modules += module.star_imports() modules += new return modules @memoize_default() def _module_attributes_dict(self): names = ['__file__', '__package__', '__doc__', '__name__'] # All the additional module attributes are strings. return dict((n, ModuleAttributeName(self, n)) for n in names) @property @memoize_default() def name(self): return ContextName(self, self.tree_node.name) def _get_init_directory(self): """ :return: The path to the directory of a package. None in case it's not a package. """ for suffix, _, _ in imp.get_suffixes(): ending = '__init__' + suffix py__file__ = self.py__file__() if py__file__ is not None and py__file__.endswith(ending): # Remove the ending, including the separator. return self.py__file__()[:-len(ending) - 1] return None def py__name__(self): for name, module in self.evaluator.modules.items(): if module == self and name != '': return name return '__main__' def py__file__(self): """ In contrast to Python's __file__ can be None. """ if self.tree_node.path is None: return None return os.path.abspath(self.tree_node.path) def py__package__(self): if self._get_init_directory() is None: return re.sub(r'\.?[^\.]+$', '', self.py__name__()) else: return self.py__name__() def _py__path__(self): search_path = self.evaluator.sys_path init_path = self.py__file__() if os.path.basename(init_path) == '__init__.py': with open(init_path, 'rb') as f: content = common.source_to_unicode(f.read()) # these are strings that need to be used for namespace packages, # the first one is ``pkgutil``, the second ``pkg_resources``. options = ('declare_namespace(__name__)', 'extend_path(__path__') if options[0] in content or options[1] in content: # It is a namespace, now try to find the rest of the # modules on sys_path or whatever the search_path is. paths = set() for s in search_path: other = os.path.join(s, self.name.string_name) if os.path.isdir(other): paths.add(other) if paths: return list(paths) # TODO I'm not sure if this is how nested namespace # packages work. The tests are not really good enough to # show that. # Default to this. return [self._get_init_directory()] @property def py__path__(self): """ Not seen here, since it's a property. The callback actually uses a variable, so use it like:: foo.py__path__(sys_path) In case of a package, this returns Python's __path__ attribute, which is a list of paths (strings). Raises an AttributeError if the module is not a package. """ path = self._get_init_directory() if path is None: raise AttributeError('Only packages have __path__ attributes.') else: return self._py__path__ @memoize_default() def _sub_modules_dict(self): """ Lists modules in the directory of this module (if this module is a package). """ path = self.tree_node.path names = {} if path is not None and path.endswith(os.path.sep + '__init__.py'): mods = pkgutil.iter_modules([os.path.dirname(path)]) for module_loader, name, is_pkg in mods: # It's obviously a relative import to the current module. names[name] = imports.SubModuleName(self, name) # TODO add something like this in the future, its cleaner than the # import hacks. # ``os.path`` is a hardcoded exception, because it's a # ``sys.modules`` modification. # if str(self.name) == 'os': # names.append(Name('path', parent_context=self)) return names def py__class__(self): return compiled.get_special_object(self.evaluator, 'MODULE_CLASS')
gpl-3.0
pwoodworth/intellij-community
python/lib/Lib/site-packages/django/contrib/auth/tests/permissions.py
231
1654
try: from cStringIO import StringIO except ImportError: from StringIO import StringIO from django.contrib.auth.management import create_permissions from django.contrib.auth import models as auth_models from django.contrib.contenttypes import models as contenttypes_models from django.core.management import call_command from django.test import TestCase class TestAuthPermissions(TestCase): def tearDown(self): # These tests mess with content types, but content type lookups # are cached, so we need to make sure the effects of this test # are cleaned up. contenttypes_models.ContentType.objects.clear_cache() def test_permission_register_order(self): """Test that the order of registered permissions doesn't break""" # Changeset 14413 introduced a regression in the ordering of # newly created permissions for objects. When loading a fixture # after the initial creation (such as during unit tests), the # expected IDs for the permissions may not match up, leading to # SQL errors. This is ticket 14731 # Start with a clean slate and build the permissions as we # expect to see them in the fixtures. auth_models.Permission.objects.all().delete() contenttypes_models.ContentType.objects.all().delete() create_permissions(auth_models, [], verbosity=0) create_permissions(contenttypes_models, [], verbosity=0) stderr = StringIO() call_command('loaddata', 'test_permissions.json', verbosity=0, commit=False, stderr=stderr) self.assertEqual(stderr.getvalue(), '')
apache-2.0
pipsiscool/audacity
lib-src/lv2/sratom/waflib/Tools/compiler_cxx.py
343
1762
#! /usr/bin/env python # encoding: utf-8 # WARNING! Do not edit! http://waf.googlecode.com/git/docs/wafbook/single.html#_obtaining_the_waf_file import os,sys,imp,types from waflib.Tools import ccroot from waflib import Utils,Configure from waflib.Logs import debug cxx_compiler={'win32':['msvc','g++'],'cygwin':['g++'],'darwin':['g++'],'aix':['xlc++','g++'],'linux':['g++','icpc'],'sunos':['sunc++','g++'],'irix':['g++'],'hpux':['g++'],'gnu':['g++'],'java':['g++','msvc','icpc'],'default':['g++']} def configure(conf): try:test_for_compiler=conf.options.check_cxx_compiler except AttributeError:conf.fatal("Add options(opt): opt.load('compiler_cxx')") for compiler in test_for_compiler.split(): conf.env.stash() conf.start_msg('Checking for %r (c++ compiler)'%compiler) try: conf.load(compiler) except conf.errors.ConfigurationError ,e: conf.env.revert() conf.end_msg(False) debug('compiler_cxx: %r'%e) else: if conf.env['CXX']: conf.end_msg(conf.env.get_flat('CXX')) conf.env['COMPILER_CXX']=compiler break conf.end_msg(False) else: conf.fatal('could not configure a c++ compiler!') def options(opt): opt.load_special_tools('cxx_*.py') global cxx_compiler build_platform=Utils.unversioned_sys_platform() possible_compiler_list=cxx_compiler[build_platform in cxx_compiler and build_platform or'default'] test_for_compiler=' '.join(possible_compiler_list) cxx_compiler_opts=opt.add_option_group('C++ Compiler Options') cxx_compiler_opts.add_option('--check-cxx-compiler',default="%s"%test_for_compiler,help='On this platform (%s) the following C++ Compiler will be checked by default: "%s"'%(build_platform,test_for_compiler),dest="check_cxx_compiler") for x in test_for_compiler.split(): opt.load('%s'%x)
mit
bobcyw/django
django/db/models/sql/subqueries.py
93
8020
""" Query subclasses which provide extra functionality beyond simple data retrieval. """ from django.core.exceptions import FieldError from django.db import connections from django.db.models.query_utils import Q from django.db.models.sql.constants import ( CURSOR, GET_ITERATOR_CHUNK_SIZE, NO_RESULTS, ) from django.db.models.sql.query import Query from django.utils import six __all__ = ['DeleteQuery', 'UpdateQuery', 'InsertQuery', 'AggregateQuery'] class DeleteQuery(Query): """ Delete queries are done through this class, since they are more constrained than general queries. """ compiler = 'SQLDeleteCompiler' def do_query(self, table, where, using): self.tables = [table] self.where = where cursor = self.get_compiler(using).execute_sql(CURSOR) return cursor.rowcount if cursor else 0 def delete_batch(self, pk_list, using, field=None): """ Set up and execute delete queries for all the objects in pk_list. More than one physical query may be executed if there are a lot of values in pk_list. """ # number of objects deleted num_deleted = 0 if not field: field = self.get_meta().pk for offset in range(0, len(pk_list), GET_ITERATOR_CHUNK_SIZE): self.where = self.where_class() self.add_q(Q( **{field.attname + '__in': pk_list[offset:offset + GET_ITERATOR_CHUNK_SIZE]})) num_deleted += self.do_query(self.get_meta().db_table, self.where, using=using) return num_deleted def delete_qs(self, query, using): """ Delete the queryset in one SQL query (if possible). For simple queries this is done by copying the query.query.where to self.query, for complex queries by using subquery. """ innerq = query.query # Make sure the inner query has at least one table in use. innerq.get_initial_alias() # The same for our new query. self.get_initial_alias() innerq_used_tables = [t for t in innerq.tables if innerq.alias_refcount[t]] if not innerq_used_tables or innerq_used_tables == self.tables: # There is only the base table in use in the query. self.where = innerq.where else: pk = query.model._meta.pk if not connections[using].features.update_can_self_select: # We can't do the delete using subquery. values = list(query.values_list('pk', flat=True)) if not values: return return self.delete_batch(values, using) else: innerq.clear_select_clause() innerq.select = [ pk.get_col(self.get_initial_alias()) ] values = innerq self.where = self.where_class() self.add_q(Q(pk__in=values)) cursor = self.get_compiler(using).execute_sql(CURSOR) return cursor.rowcount if cursor else 0 class UpdateQuery(Query): """ Represents an "update" SQL query. """ compiler = 'SQLUpdateCompiler' def __init__(self, *args, **kwargs): super(UpdateQuery, self).__init__(*args, **kwargs) self._setup_query() def _setup_query(self): """ Runs on initialization and after cloning. Any attributes that would normally be set in __init__ should go in here, instead, so that they are also set up after a clone() call. """ self.values = [] self.related_ids = None if not hasattr(self, 'related_updates'): self.related_updates = {} def clone(self, klass=None, **kwargs): return super(UpdateQuery, self).clone(klass, related_updates=self.related_updates.copy(), **kwargs) def update_batch(self, pk_list, values, using): self.add_update_values(values) for offset in range(0, len(pk_list), GET_ITERATOR_CHUNK_SIZE): self.where = self.where_class() self.add_q(Q(pk__in=pk_list[offset: offset + GET_ITERATOR_CHUNK_SIZE])) self.get_compiler(using).execute_sql(NO_RESULTS) def add_update_values(self, values): """ Convert a dictionary of field name to value mappings into an update query. This is the entry point for the public update() method on querysets. """ values_seq = [] for name, val in six.iteritems(values): field = self.get_meta().get_field(name) direct = not (field.auto_created and not field.concrete) or not field.concrete model = field.model._meta.concrete_model if not direct or (field.is_relation and field.many_to_many): raise FieldError( 'Cannot update model field %r (only non-relations and ' 'foreign keys permitted).' % field ) if model is not self.get_meta().model: self.add_related_update(model, field, val) continue values_seq.append((field, model, val)) return self.add_update_fields(values_seq) def add_update_fields(self, values_seq): """ Append a sequence of (field, model, value) triples to the internal list that will be used to generate the UPDATE query. Might be more usefully called add_update_targets() to hint at the extra information here. """ self.values.extend(values_seq) def add_related_update(self, model, field, value): """ Adds (name, value) to an update query for an ancestor model. Updates are coalesced so that we only run one update query per ancestor. """ self.related_updates.setdefault(model, []).append((field, None, value)) def get_related_updates(self): """ Returns a list of query objects: one for each update required to an ancestor model. Each query will have the same filtering conditions as the current query but will only update a single table. """ if not self.related_updates: return [] result = [] for model, values in six.iteritems(self.related_updates): query = UpdateQuery(model) query.values = values if self.related_ids is not None: query.add_filter(('pk__in', self.related_ids)) result.append(query) return result class InsertQuery(Query): compiler = 'SQLInsertCompiler' def __init__(self, *args, **kwargs): super(InsertQuery, self).__init__(*args, **kwargs) self.fields = [] self.objs = [] def clone(self, klass=None, **kwargs): extras = { 'fields': self.fields[:], 'objs': self.objs[:], 'raw': self.raw, } extras.update(kwargs) return super(InsertQuery, self).clone(klass, **extras) def insert_values(self, fields, objs, raw=False): """ Set up the insert query from the 'insert_values' dictionary. The dictionary gives the model field names and their target values. If 'raw_values' is True, the values in the 'insert_values' dictionary are inserted directly into the query, rather than passed as SQL parameters. This provides a way to insert NULL and DEFAULT keywords into the query, for example. """ self.fields = fields self.objs = objs self.raw = raw class AggregateQuery(Query): """ An AggregateQuery takes another query as a parameter to the FROM clause and only selects the elements in the provided list. """ compiler = 'SQLAggregateCompiler' def add_subquery(self, query, using): self.subquery, self.sub_params = query.get_compiler(using).as_sql( with_col_aliases=True, subquery=True, )
bsd-3-clause
shaarli/python-shaarli-client
setup.py
1
2012
#!/usr/bin/env python3 """Setup script for shaarli-client""" import codecs import os import re from setuptools import find_packages, setup def get_long_description(): """Reads the main README.rst to get the program's long description""" with codecs.open('README.rst', 'r', 'utf-8') as f_readme: return f_readme.read() def get_package_metadata(attribute): """Reads metadata from the main package's __init__""" with open(os.path.join('shaarli_client', '__init__.py'), 'r') as f_init: return re.search( r'^__{attr}__\s*=\s*[\'"]([^\'"]*)[\'"]'.format(attr=attribute), f_init.read(), re.MULTILINE ).group(1) setup( name=get_package_metadata('title'), version=get_package_metadata('version'), description=get_package_metadata('brief'), long_description=get_long_description(), author=get_package_metadata('author'), maintainer='VirtualTam', maintainer_email='virtualtam@flibidi.net', license='MIT', url='https://github.com/shaarli/python-shaarli-client', keywords='bookmark bookmarking shaarli social', packages=find_packages(exclude=['tests.*', 'tests']), entry_points={ 'console_scripts': [ 'shaarli = shaarli_client.main:main', ], }, install_requires=[ 'requests >= 2.25', 'pyjwt == 2.0.1' ], classifiers=[ 'Development Status :: 3 - Alpha', 'Environment :: Console', 'Intended Audience :: Developers', 'Intended Audience :: End Users/Desktop', 'License :: OSI Approved :: MIT License', 'Natural Language :: English', 'Operating System :: OS Independent', 'Programming Language :: Python', 'Programming Language :: Python :: 3', 'Programming Language :: Python :: 3.6', 'Programming Language :: Python :: 3.7', 'Programming Language :: Python :: 3.8', 'Programming Language :: Python :: 3.9', 'Topic :: Utilities', ] )
mit
linkedin/indextank-service
storefront/thrift/transport/THttpClient.py
62
3603
# # Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. # from TTransport import * from cStringIO import StringIO import urlparse import httplib import warnings import socket class THttpClient(TTransportBase): """Http implementation of TTransport base.""" def __init__(self, uri_or_host, port=None, path=None): """THttpClient supports two different types constructor parameters. THttpClient(host, port, path) - deprecated THttpClient(uri) Only the second supports https.""" if port is not None: warnings.warn("Please use the THttpClient('http://host:port/path') syntax", DeprecationWarning, stacklevel=2) self.host = uri_or_host self.port = port assert path self.path = path self.scheme = 'http' else: parsed = urlparse.urlparse(uri_or_host) self.scheme = parsed.scheme assert self.scheme in ('http', 'https') if self.scheme == 'http': self.port = parsed.port or httplib.HTTP_PORT elif self.scheme == 'https': self.port = parsed.port or httplib.HTTPS_PORT self.host = parsed.hostname self.path = parsed.path if parsed.query: self.path += '?%s' % parsed.query self.__wbuf = StringIO() self.__http = None self.__timeout = None def open(self): if self.scheme == 'http': self.__http = httplib.HTTP(self.host, self.port) else: self.__http = httplib.HTTPS(self.host, self.port) def close(self): self.__http.close() self.__http = None def isOpen(self): return self.__http != None def setTimeout(self, ms): if not hasattr(socket, 'getdefaulttimeout'): raise NotImplementedError if ms is None: self.__timeout = None else: self.__timeout = ms/1000.0 def read(self, sz): return self.__http.file.read(sz) def write(self, buf): self.__wbuf.write(buf) def __withTimeout(f): def _f(*args, **kwargs): orig_timeout = socket.getdefaulttimeout() socket.setdefaulttimeout(args[0].__timeout) result = f(*args, **kwargs) socket.setdefaulttimeout(orig_timeout) return result return _f def flush(self): if self.isOpen(): self.close() self.open(); # Pull data out of buffer data = self.__wbuf.getvalue() self.__wbuf = StringIO() # HTTP request self.__http.putrequest('POST', self.path) # Write headers self.__http.putheader('Host', self.host) self.__http.putheader('Content-Type', 'application/x-thrift') self.__http.putheader('Content-Length', str(len(data))) self.__http.endheaders() # Write payload self.__http.send(data) # Get reply to flush the request self.code, self.message, self.headers = self.__http.getreply() # Decorate if we know how to timeout if hasattr(socket, 'getdefaulttimeout'): flush = __withTimeout(flush)
apache-2.0
cnoviello/micropython
tests/bytecode/pylib-tests/tabnanny.py
28
11399
#! /usr/bin/env python3 """The Tab Nanny despises ambiguous indentation. She knows no mercy. tabnanny -- Detection of ambiguous indentation For the time being this module is intended to be called as a script. However it is possible to import it into an IDE and use the function check() described below. Warning: The API provided by this module is likely to change in future releases; such changes may not be backward compatible. """ # Released to the public domain, by Tim Peters, 15 April 1998. # XXX Note: this is now a standard library module. # XXX The API needs to undergo changes however; the current code is too # XXX script-like. This will be addressed later. __version__ = "6" import os import sys import getopt import tokenize if not hasattr(tokenize, 'NL'): raise ValueError("tokenize.NL doesn't exist -- tokenize module too old") __all__ = ["check", "NannyNag", "process_tokens"] verbose = 0 filename_only = 0 def errprint(*args): sep = "" for arg in args: sys.stderr.write(sep + str(arg)) sep = " " sys.stderr.write("\n") def main(): global verbose, filename_only try: opts, args = getopt.getopt(sys.argv[1:], "qv") except getopt.error as msg: errprint(msg) return for o, a in opts: if o == '-q': filename_only = filename_only + 1 if o == '-v': verbose = verbose + 1 if not args: errprint("Usage:", sys.argv[0], "[-v] file_or_directory ...") return for arg in args: check(arg) class NannyNag(Exception): """ Raised by tokeneater() if detecting an ambiguous indent. Captured and handled in check(). """ def __init__(self, lineno, msg, line): self.lineno, self.msg, self.line = lineno, msg, line def get_lineno(self): return self.lineno def get_msg(self): return self.msg def get_line(self): return self.line def check(file): """check(file_or_dir) If file_or_dir is a directory and not a symbolic link, then recursively descend the directory tree named by file_or_dir, checking all .py files along the way. If file_or_dir is an ordinary Python source file, it is checked for whitespace related problems. The diagnostic messages are written to standard output using the print statement. """ if os.path.isdir(file) and not os.path.islink(file): if verbose: print("%r: listing directory" % (file,)) names = os.listdir(file) for name in names: fullname = os.path.join(file, name) if (os.path.isdir(fullname) and not os.path.islink(fullname) or os.path.normcase(name[-3:]) == ".py"): check(fullname) return try: f = tokenize.open(file) except IOError as msg: errprint("%r: I/O Error: %s" % (file, msg)) return if verbose > 1: print("checking %r ..." % file) try: process_tokens(tokenize.generate_tokens(f.readline)) except tokenize.TokenError as msg: errprint("%r: Token Error: %s" % (file, msg)) return except IndentationError as msg: errprint("%r: Indentation Error: %s" % (file, msg)) return except NannyNag as nag: badline = nag.get_lineno() line = nag.get_line() if verbose: print("%r: *** Line %d: trouble in tab city! ***" % (file, badline)) print("offending line: %r" % (line,)) print(nag.get_msg()) else: if ' ' in file: file = '"' + file + '"' if filename_only: print(file) else: print(file, badline, repr(line)) return finally: f.close() if verbose: print("%r: Clean bill of health." % (file,)) class Whitespace: # the characters used for space and tab S, T = ' \t' # members: # raw # the original string # n # the number of leading whitespace characters in raw # nt # the number of tabs in raw[:n] # norm # the normal form as a pair (count, trailing), where: # count # a tuple such that raw[:n] contains count[i] # instances of S * i + T # trailing # the number of trailing spaces in raw[:n] # It's A Theorem that m.indent_level(t) == # n.indent_level(t) for all t >= 1 iff m.norm == n.norm. # is_simple # true iff raw[:n] is of the form (T*)(S*) def __init__(self, ws): self.raw = ws S, T = Whitespace.S, Whitespace.T count = [] b = n = nt = 0 for ch in self.raw: if ch == S: n = n + 1 b = b + 1 elif ch == T: n = n + 1 nt = nt + 1 if b >= len(count): count = count + [0] * (b - len(count) + 1) count[b] = count[b] + 1 b = 0 else: break self.n = n self.nt = nt self.norm = tuple(count), b self.is_simple = len(count) <= 1 # return length of longest contiguous run of spaces (whether or not # preceding a tab) def longest_run_of_spaces(self): count, trailing = self.norm return max(len(count)-1, trailing) def indent_level(self, tabsize): # count, il = self.norm # for i in range(len(count)): # if count[i]: # il = il + (i//tabsize + 1)*tabsize * count[i] # return il # quicker: # il = trailing + sum (i//ts + 1)*ts*count[i] = # trailing + ts * sum (i//ts + 1)*count[i] = # trailing + ts * sum i//ts*count[i] + count[i] = # trailing + ts * [(sum i//ts*count[i]) + (sum count[i])] = # trailing + ts * [(sum i//ts*count[i]) + num_tabs] # and note that i//ts*count[i] is 0 when i < ts count, trailing = self.norm il = 0 for i in range(tabsize, len(count)): il = il + i//tabsize * count[i] return trailing + tabsize * (il + self.nt) # return true iff self.indent_level(t) == other.indent_level(t) # for all t >= 1 def equal(self, other): return self.norm == other.norm # return a list of tuples (ts, i1, i2) such that # i1 == self.indent_level(ts) != other.indent_level(ts) == i2. # Intended to be used after not self.equal(other) is known, in which # case it will return at least one witnessing tab size. def not_equal_witness(self, other): n = max(self.longest_run_of_spaces(), other.longest_run_of_spaces()) + 1 a = [] for ts in range(1, n+1): if self.indent_level(ts) != other.indent_level(ts): a.append( (ts, self.indent_level(ts), other.indent_level(ts)) ) return a # Return True iff self.indent_level(t) < other.indent_level(t) # for all t >= 1. # The algorithm is due to Vincent Broman. # Easy to prove it's correct. # XXXpost that. # Trivial to prove n is sharp (consider T vs ST). # Unknown whether there's a faster general way. I suspected so at # first, but no longer. # For the special (but common!) case where M and N are both of the # form (T*)(S*), M.less(N) iff M.len() < N.len() and # M.num_tabs() <= N.num_tabs(). Proof is easy but kinda long-winded. # XXXwrite that up. # Note that M is of the form (T*)(S*) iff len(M.norm[0]) <= 1. def less(self, other): if self.n >= other.n: return False if self.is_simple and other.is_simple: return self.nt <= other.nt n = max(self.longest_run_of_spaces(), other.longest_run_of_spaces()) + 1 # the self.n >= other.n test already did it for ts=1 for ts in range(2, n+1): if self.indent_level(ts) >= other.indent_level(ts): return False return True # return a list of tuples (ts, i1, i2) such that # i1 == self.indent_level(ts) >= other.indent_level(ts) == i2. # Intended to be used after not self.less(other) is known, in which # case it will return at least one witnessing tab size. def not_less_witness(self, other): n = max(self.longest_run_of_spaces(), other.longest_run_of_spaces()) + 1 a = [] for ts in range(1, n+1): if self.indent_level(ts) >= other.indent_level(ts): a.append( (ts, self.indent_level(ts), other.indent_level(ts)) ) return a def format_witnesses(w): firsts = (str(tup[0]) for tup in w) prefix = "at tab size" if len(w) > 1: prefix = prefix + "s" return prefix + " " + ', '.join(firsts) def process_tokens(tokens): INDENT = tokenize.INDENT DEDENT = tokenize.DEDENT NEWLINE = tokenize.NEWLINE JUNK = tokenize.COMMENT, tokenize.NL indents = [Whitespace("")] check_equal = 0 for (type, token, start, end, line) in tokens: if type == NEWLINE: # a program statement, or ENDMARKER, will eventually follow, # after some (possibly empty) run of tokens of the form # (NL | COMMENT)* (INDENT | DEDENT+)? # If an INDENT appears, setting check_equal is wrong, and will # be undone when we see the INDENT. check_equal = 1 elif type == INDENT: check_equal = 0 thisguy = Whitespace(token) if not indents[-1].less(thisguy): witness = indents[-1].not_less_witness(thisguy) msg = "indent not greater e.g. " + format_witnesses(witness) raise NannyNag(start[0], msg, line) indents.append(thisguy) elif type == DEDENT: # there's nothing we need to check here! what's important is # that when the run of DEDENTs ends, the indentation of the # program statement (or ENDMARKER) that triggered the run is # equal to what's left at the top of the indents stack # Ouch! This assert triggers if the last line of the source # is indented *and* lacks a newline -- then DEDENTs pop out # of thin air. # assert check_equal # else no earlier NEWLINE, or an earlier INDENT check_equal = 1 del indents[-1] elif check_equal and type not in JUNK: # this is the first "real token" following a NEWLINE, so it # must be the first token of the next program statement, or an # ENDMARKER; the "line" argument exposes the leading whitespace # for this statement; in the case of ENDMARKER, line is an empty # string, so will properly match the empty string with which the # "indents" stack was seeded check_equal = 0 thisguy = Whitespace(line) if not indents[-1].equal(thisguy): witness = indents[-1].not_equal_witness(thisguy) msg = "indent not equal e.g. " + format_witnesses(witness) raise NannyNag(start[0], msg, line) if __name__ == '__main__': main()
mit
pandeyop/rally
tests/unit/test_osclients.py
2
22887
# Copyright 2013: Mirantis Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from keystoneclient import exceptions as keystone_exceptions import mock from oslo_config import cfg from rally import consts from rally import exceptions from rally import objects from rally import osclients from tests.unit import fakes from tests.unit import test class TestCreateKeystoneClient(test.TestCase): def setUp(self): super(TestCreateKeystoneClient, self).setUp() self.kwargs = {"auth_url": "http://auth_url", "username": "user", "password": "password", "tenant_name": "tenant", "https_insecure": False, "https_cacert": None} def test_create_keystone_client_v2(self): mock_keystone = mock.MagicMock() fake_keystoneclient = mock.MagicMock() mock_keystone.v2_0.client.Client.return_value = fake_keystoneclient mock_discover = mock.MagicMock( version_data=mock.MagicMock(return_value=[{"version": [2]}])) mock_keystone.discover.Discover.return_value = mock_discover with mock.patch.dict("sys.modules", {"keystoneclient": mock_keystone, "keystoneclient.v2_0": mock_keystone.v2_0}): client = osclients.create_keystone_client(self.kwargs) mock_discover.version_data.assert_called_once_with() self.assertEqual(fake_keystoneclient, client) mock_keystone.v2_0.client.Client.assert_called_once_with( **self.kwargs) def test_create_keystone_client_v3(self): mock_keystone = mock.MagicMock() fake_keystoneclient = mock.MagicMock() mock_keystone.v3.client.Client.return_value = fake_keystoneclient mock_discover = mock.MagicMock( version_data=mock.MagicMock(return_value=[{"version": [3]}])) mock_keystone.discover.Discover.return_value = mock_discover with mock.patch.dict("sys.modules", {"keystoneclient": mock_keystone, "keystoneclient.v3": mock_keystone.v3}): client = osclients.create_keystone_client(self.kwargs) mock_discover.version_data.assert_called_once_with() self.assertEqual(fake_keystoneclient, client) mock_keystone.v3.client.Client.assert_called_once_with( **self.kwargs) def test_create_keystone_client_version_not_found(self): mock_keystone = mock.MagicMock() mock_discover = mock.MagicMock( version_data=mock.MagicMock(return_value=[{"version": [100500]}])) mock_keystone.discover.Discover.return_value = mock_discover with mock.patch.dict("sys.modules", {"keystoneclient": mock_keystone}): self.assertRaises(exceptions.RallyException, osclients.create_keystone_client, self.kwargs) mock_discover.version_data.assert_called_once_with() class OSClientsTestCase(test.TestCase): def setUp(self): super(OSClientsTestCase, self).setUp() self.endpoint = objects.Endpoint("http://auth_url", "use", "pass", "tenant") self.clients = osclients.Clients(self.endpoint) self.fake_keystone = fakes.FakeKeystoneClient() self.fake_keystone.auth_token = mock.MagicMock() self.service_catalog = self.fake_keystone.service_catalog self.service_catalog.url_for = mock.MagicMock() keystone_patcher = mock.patch("rally.osclients.create_keystone_client") self.mock_create_keystone_client = keystone_patcher.start() self.addCleanup(keystone_patcher.stop) self.mock_create_keystone_client.return_value = self.fake_keystone def tearDown(self): super(OSClientsTestCase, self).tearDown() def test_create_from_env(self): with mock.patch.dict("os.environ", {"OS_AUTH_URL": "foo_auth_url", "OS_USERNAME": "foo_username", "OS_PASSWORD": "foo_password", "OS_TENANT_NAME": "foo_tenant_name", "OS_REGION_NAME": "foo_region_name"}): clients = osclients.Clients.create_from_env() self.assertEqual("foo_auth_url", clients.endpoint.auth_url) self.assertEqual("foo_username", clients.endpoint.username) self.assertEqual("foo_password", clients.endpoint.password) self.assertEqual("foo_tenant_name", clients.endpoint.tenant_name) self.assertEqual("foo_region_name", clients.endpoint.region_name) def test_keystone(self): self.assertNotIn("keystone", self.clients.cache) client = self.clients.keystone() self.assertEqual(client, self.fake_keystone) endpoint = {"timeout": cfg.CONF.openstack_client_http_timeout, "insecure": False, "cacert": None} kwargs = self.endpoint.to_dict() kwargs.update(endpoint.items()) self.mock_create_keystone_client.assert_called_once_with(kwargs) self.assertEqual(self.fake_keystone, self.clients.cache["keystone"]) @mock.patch("rally.osclients.Clients.keystone") def test_verified_keystone_user_not_admin(self, mock_keystone): mock_keystone.return_value = fakes.FakeKeystoneClient() mock_keystone.return_value.auth_ref.role_names = ["notadmin"] self.assertRaises(exceptions.InvalidAdminException, self.clients.verified_keystone) @mock.patch("rally.osclients.Clients.keystone") def test_verified_keystone_unauthorized(self, mock_keystone): mock_keystone.return_value = fakes.FakeKeystoneClient() mock_keystone.side_effect = keystone_exceptions.Unauthorized self.assertRaises(exceptions.InvalidEndpointsException, self.clients.verified_keystone) @mock.patch("rally.osclients.Clients.keystone") def test_verified_keystone_unreachable(self, mock_keystone): mock_keystone.return_value = fakes.FakeKeystoneClient() mock_keystone.side_effect = keystone_exceptions.AuthorizationFailure self.assertRaises(exceptions.HostUnreachableException, self.clients.verified_keystone) def test_nova(self): fake_nova = fakes.FakeNovaClient() mock_nova = mock.MagicMock() mock_nova.client.Client.return_value = fake_nova self.assertNotIn("nova", self.clients.cache) with mock.patch.dict("sys.modules", {"novaclient": mock_nova}): client = self.clients.nova() self.assertEqual(fake_nova, client) self.service_catalog.url_for.assert_called_once_with( service_type="compute", endpoint_type=consts.EndpointType.PUBLIC, region_name=self.endpoint.region_name) mock_nova.client.Client.assert_called_once_with( "2", auth_token=self.fake_keystone.auth_token, http_log_debug=False, timeout=cfg.CONF.openstack_client_http_timeout, insecure=False, cacert=None, username=self.endpoint.username, api_key=self.endpoint.password, project_id=self.endpoint.tenant_name, auth_url=self.endpoint.auth_url) client.set_management_url.assert_called_once_with( self.service_catalog.url_for.return_value) self.assertEqual(fake_nova, self.clients.cache["nova"]) def test_neutron(self): fake_neutron = fakes.FakeNeutronClient() mock_neutron = mock.MagicMock() mock_neutron.client.Client.return_value = fake_neutron self.assertNotIn("neutron", self.clients.cache) with mock.patch.dict("sys.modules", {"neutronclient.neutron": mock_neutron}): client = self.clients.neutron() self.assertEqual(fake_neutron, client) kw = { "token": self.fake_keystone.auth_token, "endpoint_url": self.service_catalog.url_for.return_value, "timeout": cfg.CONF.openstack_client_http_timeout, "insecure": self.endpoint.insecure, "ca_cert": self.endpoint.cacert, "username": self.endpoint.username, "password": self.endpoint.password, "tenant_name": self.endpoint.tenant_name, "auth_url": self.endpoint.auth_url } self.service_catalog.url_for.assert_called_once_with( service_type="network", endpoint_type=consts.EndpointType.PUBLIC, region_name=self.endpoint.region_name) mock_neutron.client.Client.assert_called_once_with("2.0", **kw) self.assertEqual(fake_neutron, self.clients.cache["neutron"]) def test_glance(self): fake_glance = fakes.FakeGlanceClient() mock_glance = mock.MagicMock() mock_glance.Client = mock.MagicMock(return_value=fake_glance) with mock.patch.dict("sys.modules", {"glanceclient": mock_glance}): self.assertNotIn("glance", self.clients.cache) client = self.clients.glance() self.assertEqual(fake_glance, client) kw = {"endpoint": self.service_catalog.url_for.return_value, "token": self.fake_keystone.auth_token, "timeout": cfg.CONF.openstack_client_http_timeout, "insecure": False, "cacert": None} self.service_catalog.url_for.assert_called_once_with( service_type="image", endpoint_type=consts.EndpointType.PUBLIC, region_name=self.endpoint.region_name) mock_glance.Client.assert_called_once_with("1", **kw) self.assertEqual(fake_glance, self.clients.cache["glance"]) def test_cinder(self): fake_cinder = mock.MagicMock(client=fakes.FakeCinderClient()) mock_cinder = mock.MagicMock() mock_cinder.client.Client.return_value = fake_cinder self.assertNotIn("cinder", self.clients.cache) with mock.patch.dict("sys.modules", {"cinderclient": mock_cinder}): client = self.clients.cinder() self.assertEqual(fake_cinder, client) self.service_catalog.url_for.assert_called_once_with( service_type="volume", endpoint_type=consts.EndpointType.PUBLIC, region_name=self.endpoint.region_name) mock_cinder.client.Client.assert_called_once_with( "1", http_log_debug=False, timeout=cfg.CONF.openstack_client_http_timeout, insecure=False, cacert=None, username=self.endpoint.username, api_key=self.endpoint.password, project_id=self.endpoint.tenant_name, auth_url=self.endpoint.auth_url) self.assertEqual(fake_cinder.client.management_url, self.service_catalog.url_for.return_value) self.assertEqual(fake_cinder.client.auth_token, self.fake_keystone.auth_token) self.assertEqual(fake_cinder, self.clients.cache["cinder"]) def test_ceilometer(self): fake_ceilometer = fakes.FakeCeilometerClient() mock_ceilometer = mock.MagicMock() mock_ceilometer.client.get_client = mock.MagicMock( return_value=fake_ceilometer) self.assertNotIn("ceilometer", self.clients.cache) with mock.patch.dict("sys.modules", {"ceilometerclient": mock_ceilometer}): client = self.clients.ceilometer() self.assertEqual(fake_ceilometer, client) self.service_catalog.url_for.assert_called_once_with( service_type="metering", endpoint_type=consts.EndpointType.PUBLIC, region_name=self.endpoint.region_name) kw = {"os_endpoint": self.service_catalog.url_for.return_value, "token": self.fake_keystone.auth_token, "timeout": cfg.CONF.openstack_client_http_timeout, "insecure": False, "cacert": None, "username": self.endpoint.username, "password": self.endpoint.password, "tenant_name": self.endpoint.tenant_name, "auth_url": self.endpoint.auth_url } mock_ceilometer.client.get_client.assert_called_once_with("2", **kw) self.assertEqual(fake_ceilometer, self.clients.cache["ceilometer"]) def test_ironic(self): fake_ironic = fakes.FakeIronicClient() mock_ironic = mock.MagicMock() mock_ironic.client.get_client = mock.MagicMock( return_value=fake_ironic) self.assertNotIn("ironic", self.clients.cache) with mock.patch.dict("sys.modules", {"ironicclient": mock_ironic}): client = self.clients.ironic() self.assertEqual(fake_ironic, client) self.service_catalog.url_for.assert_called_once_with( service_type="baremetal", endpoint_type=consts.EndpointType.PUBLIC, region_name=self.endpoint.region_name) kw = { "os_auth_token": self.fake_keystone.auth_token, "ironic_url": self.service_catalog.url_for.return_value, "timeout": cfg.CONF.openstack_client_http_timeout, "insecure": self.endpoint.insecure, "cacert": self.endpoint.cacert } mock_ironic.client.get_client.assert_called_once_with("1.0", **kw) self.assertEqual(fake_ironic, self.clients.cache["ironic"]) def test_sahara(self): fake_sahara = fakes.FakeSaharaClient() mock_sahara = mock.MagicMock() mock_sahara.client.Client = mock.MagicMock(return_value=fake_sahara) self.assertNotIn("sahara", self.clients.cache) with mock.patch.dict("sys.modules", {"saharaclient": mock_sahara}): client = self.clients.sahara() self.assertEqual(fake_sahara, client) kw = { "username": self.endpoint.username, "api_key": self.endpoint.password, "project_name": self.endpoint.tenant_name, "auth_url": self.endpoint.auth_url } mock_sahara.client.Client.assert_called_once_with("1.1", **kw) self.assertEqual(fake_sahara, self.clients.cache["sahara"]) def test_zaqar(self): fake_zaqar = fakes.FakeZaqarClient() mock_zaqar = mock.MagicMock() mock_zaqar.client.Client = mock.MagicMock(return_value=fake_zaqar) self.assertNotIn("zaqar", self.clients.cache) with mock.patch.dict("sys.modules", {"zaqarclient.queues": mock_zaqar}): client = self.clients.zaqar() self.assertEqual(fake_zaqar, client) self.service_catalog.url_for.assert_called_once_with( service_type="messaging", endpoint_type=consts.EndpointType.PUBLIC, region_name=self.endpoint.region_name) fake_zaqar_url = self.service_catalog.url_for.return_value conf = {"auth_opts": {"backend": "keystone", "options": { "os_username": self.endpoint.username, "os_password": self.endpoint.password, "os_project_name": self.endpoint.tenant_name, "os_project_id": self.fake_keystone.auth_tenant_id, "os_auth_url": self.endpoint.auth_url, "insecure": self.endpoint.insecure, }}} mock_zaqar.client.Client.assert_called_once_with( url=fake_zaqar_url, version=1.1, conf=conf) self.assertEqual(fake_zaqar, self.clients.cache["zaqar"]) def test_trove(self): fake_trove = fakes.FakeTroveClient() mock_trove = mock.MagicMock() mock_trove.client.Client = mock.MagicMock(return_value=fake_trove) self.assertNotIn("trove", self.clients.cache) with mock.patch.dict("sys.modules", {"troveclient": mock_trove}): client = self.clients.trove() self.assertEqual(fake_trove, client) kw = { "username": self.endpoint.username, "api_key": self.endpoint.password, "project_id": self.endpoint.tenant_name, "auth_url": self.endpoint.auth_url, "region_name": self.endpoint.region_name, "timeout": cfg.CONF.openstack_client_http_timeout, "insecure": self.endpoint.insecure, "cacert": self.endpoint.cacert } mock_trove.client.Client.assert_called_once_with("1.0", **kw) self.assertEqual(fake_trove, self.clients.cache["trove"]) def test_mistral(self): fake_mistral = fakes.FakeMistralClient() mock_mistral = mock.Mock() mock_mistral.client.client.return_value = fake_mistral self.assertNotIn("mistral", self.clients.cache) with mock.patch.dict( "sys.modules", {"mistralclient": mock_mistral, "mistralclient.api": mock_mistral}): client = self.clients.mistral() self.assertEqual(fake_mistral, client) self.service_catalog.url_for.assert_called_once_with( service_type="workflowv2", endpoint_type=consts.EndpointType.PUBLIC, region_name=self.endpoint.region_name ) fake_mistral_url = self.service_catalog.url_for.return_value mock_mistral.client.client.assert_called_once_with( mistral_url=fake_mistral_url, service_type="workflowv2", auth_token=self.fake_keystone.auth_token ) self.assertEqual(fake_mistral, self.clients.cache["mistral"]) def test_swift(self): fake_swift = fakes.FakeSwiftClient() mock_swift = mock.MagicMock() mock_swift.client.Connection = mock.MagicMock(return_value=fake_swift) self.assertNotIn("swift", self.clients.cache) with mock.patch.dict("sys.modules", {"swiftclient": mock_swift}): client = self.clients.swift() self.assertEqual(client, fake_swift) self.service_catalog.url_for.assert_called_once_with( service_type="object-store", endpoint_type=consts.EndpointType.PUBLIC, region_name=self.endpoint.region_name) kw = {"retries": 1, "preauthurl": self.service_catalog.url_for.return_value, "preauthtoken": self.fake_keystone.auth_token, "insecure": False, "cacert": None, "user": self.endpoint.username, "key": self.endpoint.password, "tenant_name": self.endpoint.tenant_name, "auth_url": self.endpoint.auth_url } mock_swift.client.Connection.assert_called_once_with(**kw) self.assertEqual(self.clients.cache["swift"], fake_swift) def test_ec2(self): mock_boto = mock.Mock() self.service_catalog.url_for.return_value = "http://fake.to:1/fake" self.fake_keystone.ec2 = mock.Mock() self.fake_keystone.ec2.create.return_value = mock.Mock( access="fake_access", secret="fake_secret") fake_ec2 = fakes.FakeEC2Client() mock_boto.connect_ec2_endpoint.return_value = fake_ec2 self.assertNotIn("ec2", self.clients.cache) with mock.patch.dict("sys.modules", {"boto": mock_boto}): client = self.clients.ec2() self.assertEqual(fake_ec2, client) self.service_catalog.url_for.assert_called_once_with( service_type="ec2", endpoint_type=consts.EndpointType.PUBLIC, region_name=self.endpoint.region_name) kw = { "url": "http://fake.to:1/fake", "aws_access_key_id": "fake_access", "aws_secret_access_key": "fake_secret", "is_secure": self.endpoint.insecure, } mock_boto.connect_ec2_endpoint.assert_called_once_with(**kw) self.assertEqual(fake_ec2, self.clients.cache["ec2"]) @mock.patch("rally.osclients.Clients.keystone") def test_services(self, mock_keystone): available_services = {consts.ServiceType.IDENTITY: {}, consts.ServiceType.COMPUTE: {}, "unknown_service": {}} mock_keystone.return_value = mock.Mock(service_catalog=mock.Mock( get_endpoints=lambda: available_services)) clients = osclients.Clients(self.endpoint) self.assertEqual( {consts.ServiceType.IDENTITY: consts.Service.KEYSTONE, consts.ServiceType.COMPUTE: consts.Service.NOVA}, clients.services()) def test_murano(self): fake_murano = fakes.FakeMuranoClient() mock_murano = mock.Mock() mock_murano.client.Client.return_value = fake_murano self.assertNotIn("murano", self.clients.cache) with mock.patch.dict("sys.modules", {"muranoclient": mock_murano}): client = self.clients.murano() self.assertEqual(fake_murano, client) self.service_catalog.url_for.assert_called_once_with( service_type="application_catalog", endpoint_type=consts.EndpointType.PUBLIC, region_name=self.endpoint.region_name ) kw = {"endpoint": self.service_catalog.url_for.return_value, "token": self.fake_keystone.auth_token} mock_murano.client.Client.assert_called_once_with("1", **kw) self.assertEqual(fake_murano, self.clients.cache["murano"])
apache-2.0
LeZuse/psd-tools
tests/test_pixels.py
8
5675
# -*- coding: utf-8 -*- from __future__ import absolute_import, unicode_literals import pytest from psd_tools import PSDImage, Layer, Group from .utils import full_name PIXEL_COLORS = ( # filename probe point pixel value ('1layer.psd', (5, 5), (0x27, 0xBA, 0x0F)), ('group.psd', (10, 20), (0xFF, 0xFF, 0xFF)), ('hidden-groups.psd', (60, 100), (0xE1, 0x0B, 0x0B)), ('hidden-layer.psd', (0, 0), (0xFF, 0xFF, 0xFF)), # ('note.psd', (30, 30), (0, 0, 0)), # what is it? ('smart-object-slice.psd', (70, 80), (0xAC, 0x19, 0x19)), # XXX: what is this test about? ) TRANSPARENCY_PIXEL_COLORS = ( ('transparentbg-gimp.psd', (14, 14), (0xFF, 0xFF, 0xFF, 0x13)), ('2layers.psd', (70, 30), (0xF1, 0xF3, 0xC1)), # why gimp shows it as F2F4C2 ? ) MASK_PIXEL_COLORS = ( ('clipping-mask.psd', (182, 68), (0xDA, 0xE6, 0xF7)), # this is a clipped point ('mask.psd', (87, 7), (0xFF, 0xFF, 0xFF)), # mask truncates the layer here ) NO_LAYERS_PIXEL_COLORS = ( ('history.psd', (70, 85), (0x24, 0x26, 0x29)), ) PIXEL_COLORS_8BIT = (PIXEL_COLORS + NO_LAYERS_PIXEL_COLORS + MASK_PIXEL_COLORS + TRANSPARENCY_PIXEL_COLORS) PIXEL_COLORS_32BIT = ( ('32bit.psd', (75, 15), (136, 139, 145)), ('32bit.psd', (95, 15), (0, 0, 0)), ('300dpi.psd', (70, 30), (0, 0, 0)), ('300dpi.psd', (50, 60), (214, 59, 59)), ('gradient fill.psd', (10, 15), (235, 241, 250)), # background ('gradient fill.psd', (70, 50), (0, 0, 0)), # black circle ('gradient fill.psd', (50, 50), (205, 144, 110)), # filled ellipse ('pen-text.psd', (50, 50), (229, 93, 93)), ('pen-text.psd', (170, 40), (0, 0, 0)), ('vector mask.psd', (10, 15), (255, 255, 255)), ('vector mask.psd', (50, 90), (221, 227, 236)), ('transparentbg.psd', (0, 0), (255, 255, 255, 0)), ('transparentbg.psd', (50, 50), (0, 0, 0, 255)), ('32bit5x5.psd', (0, 0), (235, 241, 250)), # why not equal to 16bit5x5.psd? ('32bit5x5.psd', (4, 0), (0, 0, 0)), ('32bit5x5.psd', (1, 3), (46, 196, 104)), ) PIXEL_COLORS_16BIT = ( ('16bit5x5.psd', (0, 0), (236, 242, 251)), ('16bit5x5.psd', (4, 0), (0, 0, 0)), ('16bit5x5.psd', (1, 3), (46, 196, 104)), ) LAYER_COLORS = ( ('1layer.psd', 0, (5, 5), (0x27, 0xBA, 0x0F)), ('2layers.psd', 1, (5, 5), (0x27, 0xBA, 0x0F)), ('2layers.psd', 1, (70, 30), (0x27, 0xBA, 0x0F)), ('2layers.psd', 0, (0, 0), (0, 0, 0, 0)), ('2layers.psd', 0, (62, 26), (0xF2, 0xF4, 0xC2, 0xFE)), ) LAYER_COLORS_MULTIBYTE = ( ('16bit5x5.psd', 1, (0, 0), (236, 242, 251, 255)), ('16bit5x5.psd', 1, (1, 3), (46, 196, 104, 255)), ('32bit5x5.psd', 1, (0, 0), (235, 241, 250, 255)), # why not equal to 16bit5x5.psd? ('32bit5x5.psd', 1, (1, 3), (46, 196, 104, 255)), ) def color_PIL(psd, point): im = psd.as_PIL() return im.getpixel(point) def color_pymaging(psd, point): im = psd.as_pymaging() return tuple(im.get_pixel(*point)) BACKENDS = [[color_PIL], [color_pymaging]] @pytest.mark.parametrize(["get_color"], BACKENDS) @pytest.mark.parametrize(["filename", "point", "color"], PIXEL_COLORS_8BIT) def test_composite(filename, point, color, get_color): psd = PSDImage.load(full_name(filename)) assert color == get_color(psd, point) @pytest.mark.parametrize(["filename", "point", "color"], PIXEL_COLORS_32BIT) def test_composite_32bit(filename, point, color): psd = PSDImage.load(full_name(filename)) assert color == color_PIL(psd, point) @pytest.mark.parametrize(["filename", "point", "color"], PIXEL_COLORS_16BIT) def test_composite_16bit(filename, point, color): psd = PSDImage.load(full_name(filename)) assert color == color_PIL(psd, point) @pytest.mark.parametrize(["filename", "layer_num", "point", "color"], LAYER_COLORS_MULTIBYTE) def test_layer_colors_multibyte(filename, layer_num, point, color): psd = PSDImage.load(full_name(filename)) layer = psd.layers[layer_num] assert color == color_PIL(layer, point) @pytest.mark.parametrize(["get_color"], BACKENDS) @pytest.mark.parametrize(["filename", "layer_num", "point", "color"], LAYER_COLORS) def test_layer_colors(filename, layer_num, point, color, get_color): psd = PSDImage.load(full_name(filename)) layer = psd.layers[layer_num] assert color == get_color(layer, point) @pytest.mark.parametrize(["filename", "point", "color"], PIXEL_COLORS + MASK_PIXEL_COLORS + TRANSPARENCY_PIXEL_COLORS) def test_layer_merging_size(filename, point, color): psd = PSDImage.load(full_name(filename)) merged_image = psd.as_PIL_merged() assert merged_image.size == psd.as_PIL().size @pytest.mark.parametrize(["filename", "point", "color"], PIXEL_COLORS) def test_layer_merging_pixels(filename, point, color): psd = PSDImage.load(full_name(filename)) merged_image = psd.as_PIL_merged() assert color[:3] == merged_image.getpixel(point)[:3] assert merged_image.getpixel(point)[3] == 255 # alpha channel @pytest.mark.xfail @pytest.mark.parametrize(["filename", "point", "color"], TRANSPARENCY_PIXEL_COLORS) def test_layer_merging_pixels_transparency(filename, point, color): psd = PSDImage.load(full_name(filename)) merged_image = psd.as_PIL_merged() assert color == merged_image.getpixel(point)
mit
makielab/django-oscar
sites/demo/apps/bigbang/utils.py
8
3186
from decimal import Decimal as D import datetime from django.db.transaction import commit_on_success from oscar.apps.dashboard.reports.csv_utils import CsvUnicodeReader from oscar.apps.catalogue import models, categories from oscar.apps.partner import models as partner_models class Importer(object): """ Quick and dirty catalogue importer """ def __init__(self, logger): self.logger = logger @commit_on_success def handle(self, product_class_name, filepath): product_class = models.ProductClass.objects.get( name=product_class_name) attribute_codes = [] for row in CsvUnicodeReader(open(filepath, 'r')): if row[1] == 'UPC': attribute_codes = row[9:] continue self.create_product(product_class, attribute_codes, row) def create_product(self, product_class, attribute_codes, row): ptype, upc, title, description, category, partner, sku, price, stock = row[0:9] # Create product is_variant = ptype.lower() == 'variant' is_group = ptype.lower() == 'group' if upc: try: product = models.Product.objects.get( upc=upc) except models.Product.DoesNotExist: product = models.Product(upc=upc) else: product = models.Product() if not is_variant: product.title = title product.description = description product.product_class = product_class # Attributes if not is_group: for code, value in zip(attribute_codes, row[9:]): # Need to check if the attribute requires an Option instance attr = product_class.attributes.get( code=code) if attr.is_option: value = attr.option_group.options.get(option=value) if attr.type == 'date': value = datetime.datetime.strptime(value, "%d/%m/%Y").date() setattr(product.attr, code, value) # Assign parent for variants if is_variant: product.parent = self.parent product.save() # Save a reference to last group product if is_group: self.parent = product # Category information if category: leaf = categories.create_from_breadcrumbs(category) models.ProductCategory.objects.get_or_create( product=product, category=leaf) # Stock record if partner: partner, __ = partner_models.Partner.objects.get_or_create( name=partner) try: record = partner_models.StockRecord.objects.get( product=product) except partner_models.StockRecord.DoesNotExist: record = partner_models.StockRecord( product=product) record.partner = partner record.partner_sku = sku record.price_excl_tax = D(price) if stock != 'NULL': record.num_in_stock = stock record.save()
bsd-3-clause
theguardian/JIRA-APPy
cherrypy/test/modpy.py
28
5021
"""Wrapper for mod_python, for use as a CherryPy HTTP server when testing. To autostart modpython, the "apache" executable or script must be on your system path, or you must override the global APACHE_PATH. On some platforms, "apache" may be called "apachectl" or "apache2ctl"-- create a symlink to them if needed. If you wish to test the WSGI interface instead of our _cpmodpy interface, you also need the 'modpython_gateway' module at: http://projects.amor.org/misc/wiki/ModPythonGateway KNOWN BUGS ========== 1. Apache processes Range headers automatically; CherryPy's truncated output is then truncated again by Apache. See test_core.testRanges. This was worked around in http://www.cherrypy.org/changeset/1319. 2. Apache does not allow custom HTTP methods like CONNECT as per the spec. See test_core.testHTTPMethods. 3. Max request header and body settings do not work with Apache. 4. Apache replaces status "reason phrases" automatically. For example, CherryPy may set "304 Not modified" but Apache will write out "304 Not Modified" (capital "M"). 5. Apache does not allow custom error codes as per the spec. 6. Apache (or perhaps modpython, or modpython_gateway) unquotes %xx in the Request-URI too early. 7. mod_python will not read request bodies which use the "chunked" transfer-coding (it passes REQUEST_CHUNKED_ERROR to ap_setup_client_block instead of REQUEST_CHUNKED_DECHUNK, see Apache2's http_protocol.c and mod_python's requestobject.c). 8. Apache will output a "Content-Length: 0" response header even if there's no response entity body. This isn't really a bug; it just differs from the CherryPy default. """ import os curdir = os.path.join(os.getcwd(), os.path.dirname(__file__)) import re import time from cherrypy.test import helper def read_process(cmd, args=""): pipein, pipeout = os.popen4("%s %s" % (cmd, args)) try: firstline = pipeout.readline() if (re.search(r"(not recognized|No such file|not found)", firstline, re.IGNORECASE)): raise IOError('%s must be on your system path.' % cmd) output = firstline + pipeout.read() finally: pipeout.close() return output APACHE_PATH = "httpd" CONF_PATH = "test_mp.conf" conf_modpython_gateway = """ # Apache2 server conf file for testing CherryPy with modpython_gateway. ServerName 127.0.0.1 DocumentRoot "/" Listen %(port)s LoadModule python_module modules/mod_python.so SetHandler python-program PythonFixupHandler cherrypy.test.modpy::wsgisetup PythonOption testmod %(modulename)s PythonHandler modpython_gateway::handler PythonOption wsgi.application cherrypy::tree PythonOption socket_host %(host)s PythonDebug On """ conf_cpmodpy = """ # Apache2 server conf file for testing CherryPy with _cpmodpy. ServerName 127.0.0.1 DocumentRoot "/" Listen %(port)s LoadModule python_module modules/mod_python.so SetHandler python-program PythonFixupHandler cherrypy.test.modpy::cpmodpysetup PythonHandler cherrypy._cpmodpy::handler PythonOption cherrypy.setup cherrypy.test.%(modulename)s::setup_server PythonOption socket_host %(host)s PythonDebug On """ class ModPythonSupervisor(helper.Supervisor): using_apache = True using_wsgi = False template = None def __str__(self): return "ModPython Server on %s:%s" % (self.host, self.port) def start(self, modulename): mpconf = CONF_PATH if not os.path.isabs(mpconf): mpconf = os.path.join(curdir, mpconf) f = open(mpconf, 'wb') try: f.write(self.template % {'port': self.port, 'modulename': modulename, 'host': self.host}) finally: f.close() result = read_process(APACHE_PATH, "-k start -f %s" % mpconf) if result: print(result) def stop(self): """Gracefully shutdown a server that is serving forever.""" read_process(APACHE_PATH, "-k stop") loaded = False def wsgisetup(req): global loaded if not loaded: loaded = True options = req.get_options() import cherrypy cherrypy.config.update({ "log.error_file": os.path.join(curdir, "test.log"), "environment": "test_suite", "server.socket_host": options['socket_host'], }) modname = options['testmod'] mod = __import__(modname, globals(), locals(), ['']) mod.setup_server() cherrypy.server.unsubscribe() cherrypy.engine.start() from mod_python import apache return apache.OK def cpmodpysetup(req): global loaded if not loaded: loaded = True options = req.get_options() import cherrypy cherrypy.config.update({ "log.error_file": os.path.join(curdir, "test.log"), "environment": "test_suite", "server.socket_host": options['socket_host'], }) from mod_python import apache return apache.OK
gpl-2.0
phoebusliang/parallel-lettuce
tests/integration/lib/Django-1.3/django/dispatch/dispatcher.py
315
9292
import weakref import threading from django.dispatch import saferef WEAKREF_TYPES = (weakref.ReferenceType, saferef.BoundMethodWeakref) def _make_id(target): if hasattr(target, 'im_func'): return (id(target.im_self), id(target.im_func)) return id(target) class Signal(object): """ Base class for all signals Internal attributes: receivers { receriverkey (id) : weakref(receiver) } """ def __init__(self, providing_args=None): """ Create a new signal. providing_args A list of the arguments this signal can pass along in a send() call. """ self.receivers = [] if providing_args is None: providing_args = [] self.providing_args = set(providing_args) self.lock = threading.Lock() def connect(self, receiver, sender=None, weak=True, dispatch_uid=None): """ Connect receiver to sender for signal. Arguments: receiver A function or an instance method which is to receive signals. Receivers must be hashable objects. If weak is True, then receiver must be weak-referencable (more precisely saferef.safeRef() must be able to create a reference to the receiver). Receivers must be able to accept keyword arguments. If receivers have a dispatch_uid attribute, the receiver will not be added if another receiver already exists with that dispatch_uid. sender The sender to which the receiver should respond. Must either be of type Signal, or None to receive events from any sender. weak Whether to use weak references to the receiver. By default, the module will attempt to use weak references to the receiver objects. If this parameter is false, then strong references will be used. dispatch_uid An identifier used to uniquely identify a particular instance of a receiver. This will usually be a string, though it may be anything hashable. """ from django.conf import settings # If DEBUG is on, check that we got a good receiver if settings.DEBUG: import inspect assert callable(receiver), "Signal receivers must be callable." # Check for **kwargs # Not all callables are inspectable with getargspec, so we'll # try a couple different ways but in the end fall back on assuming # it is -- we don't want to prevent registration of valid but weird # callables. try: argspec = inspect.getargspec(receiver) except TypeError: try: argspec = inspect.getargspec(receiver.__call__) except (TypeError, AttributeError): argspec = None if argspec: assert argspec[2] is not None, \ "Signal receivers must accept keyword arguments (**kwargs)." if dispatch_uid: lookup_key = (dispatch_uid, _make_id(sender)) else: lookup_key = (_make_id(receiver), _make_id(sender)) if weak: receiver = saferef.safeRef(receiver, onDelete=self._remove_receiver) self.lock.acquire() try: for r_key, _ in self.receivers: if r_key == lookup_key: break else: self.receivers.append((lookup_key, receiver)) finally: self.lock.release() def disconnect(self, receiver=None, sender=None, weak=True, dispatch_uid=None): """ Disconnect receiver from sender for signal. If weak references are used, disconnect need not be called. The receiver will be remove from dispatch automatically. Arguments: receiver The registered receiver to disconnect. May be none if dispatch_uid is specified. sender The registered sender to disconnect weak The weakref state to disconnect dispatch_uid the unique identifier of the receiver to disconnect """ if dispatch_uid: lookup_key = (dispatch_uid, _make_id(sender)) else: lookup_key = (_make_id(receiver), _make_id(sender)) self.lock.acquire() try: for index in xrange(len(self.receivers)): (r_key, _) = self.receivers[index] if r_key == lookup_key: del self.receivers[index] break finally: self.lock.release() def send(self, sender, **named): """ Send signal from sender to all connected receivers. If any receiver raises an error, the error propagates back through send, terminating the dispatch loop, so it is quite possible to not have all receivers called if a raises an error. Arguments: sender The sender of the signal Either a specific object or None. named Named arguments which will be passed to receivers. Returns a list of tuple pairs [(receiver, response), ... ]. """ responses = [] if not self.receivers: return responses for receiver in self._live_receivers(_make_id(sender)): response = receiver(signal=self, sender=sender, **named) responses.append((receiver, response)) return responses def send_robust(self, sender, **named): """ Send signal from sender to all connected receivers catching errors. Arguments: sender The sender of the signal. Can be any python object (normally one registered with a connect if you actually want something to occur). named Named arguments which will be passed to receivers. These arguments must be a subset of the argument names defined in providing_args. Return a list of tuple pairs [(receiver, response), ... ]. May raise DispatcherKeyError. If any receiver raises an error (specifically any subclass of Exception), the error instance is returned as the result for that receiver. """ responses = [] if not self.receivers: return responses # Call each receiver with whatever arguments it can accept. # Return a list of tuple pairs [(receiver, response), ... ]. for receiver in self._live_receivers(_make_id(sender)): try: response = receiver(signal=self, sender=sender, **named) except Exception, err: responses.append((receiver, err)) else: responses.append((receiver, response)) return responses def _live_receivers(self, senderkey): """ Filter sequence of receivers to get resolved, live receivers. This checks for weak references and resolves them, then returning only live receivers. """ none_senderkey = _make_id(None) receivers = [] for (receiverkey, r_senderkey), receiver in self.receivers: if r_senderkey == none_senderkey or r_senderkey == senderkey: if isinstance(receiver, WEAKREF_TYPES): # Dereference the weak reference. receiver = receiver() if receiver is not None: receivers.append(receiver) else: receivers.append(receiver) return receivers def _remove_receiver(self, receiver): """ Remove dead receivers from connections. """ self.lock.acquire() try: to_remove = [] for key, connected_receiver in self.receivers: if connected_receiver == receiver: to_remove.append(key) for key in to_remove: last_idx = len(self.receivers) - 1 # enumerate in reverse order so that indexes are valid even # after we delete some items for idx, (r_key, _) in enumerate(reversed(self.receivers)): if r_key == key: del self.receivers[last_idx-idx] finally: self.lock.release() def receiver(signal, **kwargs): """ A decorator for connecting receivers to signals. Used by passing in the signal and keyword arguments to connect:: @receiver(post_save, sender=MyModel) def signal_receiver(sender, **kwargs): ... """ def _decorator(func): signal.connect(func, **kwargs) return func return _decorator
gpl-3.0
kritzware/PyBot
modules/timer.py
2
1722
import logging, coloredlogs from threading import Thread import time from random import choice from modules.config import * from modules.commandtext import auto_messages from modules.database import Database from modules.api import API database = Database(db_host, db_user, db_pass, db_name, db_autocommit) database.database_connection() class Timer(Thread): def __init__(self, user, time, timer_list, name): # Seconds self.user = user self.time = time super(Timer, self).__init__() self.temp_message = '' self.timer_list = timer_list self.name = name self.api = API(1) def run(self): if self.api.check_stream_online(): logging.info("Stream detected as online") for viewers in self.api.get_viewers_json('viewers'): print(viewers) if database.db_check_user_exists(viewers) == False: database.db_add_user(viewers) database.db_add_points_user(viewers, VIEWER_POINT_GAIN) for viewers in self.api.get_viewers_json('moderators'): print(viewers) if database.db_check_user_exists(viewers) == False: database.db_add_user(viewers) database.db_add_points_user(viewers, VIEWER_POINT_GAIN) time.sleep(self.time) self.auto() def auto(self): self.run() def auto_message(self): output = choice(auto_messages) print("OUT: ", output) self.temp_message = output print("TEMP: ", self.temp_message) self.get_message() time.sleep(self.time) self.auto_message_run() def get_message(self): return self.temp_message def cooldown(self): time.sleep(self.time) logging.info("{} removed from list {}".format(self.timer_list[0], self.name)) self.timer_list.pop(0) def cooldown_run(self): self.timer_list.append(self.user) self.cooldown()
mit
viralpandey/kivy
examples/settings/main.py
18
3788
""" Config Example ============== This file contains a simple example of how the use the Kivy settings classes in a real app. It allows the user to change the caption and font_size of the label and stores these changes. When the user next runs the programs, their changes are restored. """ from kivy.app import App from kivy.uix.settings import SettingsWithTabbedPanel from kivy.logger import Logger from kivy.lang import Builder # We first define our GUI kv = ''' BoxLayout: orientation: 'vertical' Button: text: 'Configure app (or press F1)' on_release: app.open_settings() Label: id: label text: 'Hello' ''' # This JSON defines entries we want to appear in our App configuration screen json = ''' [ { "type": "string", "title": "Label caption", "desc": "Choose the text that appears in the label", "section": "My Label", "key": "text" }, { "type": "numeric", "title": "Label font size", "desc": "Choose the font size the label", "section": "My Label", "key": "font_size" } ] ''' class MyApp(App): def build(self): """ Build and return the root widget. """ # The line below is optional. You could leave it out or use one of the # standard options, such as SettingsWithSidebar, SettingsWithSpinner # etc. self.settings_cls = MySettingsWithTabbedPanel # We apply the saved configuration settings or the defaults root = Builder.load_string(kv) label = root.ids.label label.text = self.config.get('My Label', 'text') label.font_size = float(self.config.get('My Label', 'font_size')) return root def build_config(self, config): """ Set the default values for the configs sections. """ config.setdefaults('My Label', {'text': 'Hello', 'font_size': 20}) def build_settings(self, settings): """ Add our custom section to the default configuration object. """ # We use the string defined above for our JSON, but it could also be # loaded from a file as follows: # settings.add_json_panel('My Label', self.config, 'settings.json') settings.add_json_panel('My Label', self.config, data=json) def on_config_change(self, config, section, key, value): """ Respond to changes in the configuration. """ Logger.info("main.py: App.on_config_change: {0}, {1}, {2}, {3}".format( config, section, key, value)) if section == "My Label": if key == "text": self.root.ids.label.text = value elif key == 'font_size': self.root.ids.label.font_size = float(value) def close_settings(self, settings): """ The settings panel has been closed. """ Logger.info("main.py: App.close_settings: {0}".format(settings)) super(MyApp, self).close_settings(settings) class MySettingsWithTabbedPanel(SettingsWithTabbedPanel): """ It is not usually necessary to create subclass of a settings panel. There are many built-in types that you can use out of the box (SettingsWithSidebar, SettingsWithSpinner etc.). You would only want to create a Settings subclass like this if you want to change the behavior or appearance of an existing Settings class. """ def on_close(self): Logger.info("main.py: MySettingsWithTabbedPanel.on_close") def on_config_change(self, config, section, key, value): Logger.info( "main.py: MySettingsWithTabbedPanel.on_config_change: " "{0}, {1}, {2}, {3}".format(config, section, key, value)) MyApp().run()
mit
de-tour/detour
server/handling.py
1
6094
import cherrypy from cherrypy.lib.static import serve_file from cherrypy.process.plugins import SimplePlugin from queue import Queue, Empty from collections import namedtuple from concurrent import Crawler import parsing import json import traceback import random from urllib.parse import unquote from ws4py.websocket import WebSocket from ws4py.messaging import TextMessage PoolItem = namedtuple('PoolItem', ['verb', 'args', 'output']) class Search: def __init__(self): self.engines_suggest = [] self.engines_search = [] self.add_engines(parsing.sites) self.pool_suggest = Crawler(cls_list=self.engines_suggest) self.pool_search = Crawler(cls_list=self.engines_search) def start(self): self.pool_suggest.start() self.pool_search.start() def add_engines(self, engines): for Engine in engines: if parsing.is_balancer(Engine): self.add_engines(Engine.balance()) else: if parsing.can_suggest(Engine): self.engines_suggest.append(Engine) if parsing.can_search(Engine): self.engines_search.append(Engine) def stop(self): self.pool_suggest.stop() self.pool_search.stop() def suggest(self, keyword): if not keyword: yield [] return output = Queue() k = len(self.engines_suggest) // 2 for engine in random.sample(self.engines_suggest, k): self.pool_suggest.put(engine, PoolItem('suggest', (keyword,), output)) failure = 0 result_set = set() while failure < 1: try: result_set.update(output.get(timeout=1)) except Empty: failure += 1 ordered_results = parsing.rank_list(result_set, keyword)[0:10] result_set = set(ordered_results) yield ordered_results def search(self, keyword, from_id): if not keyword: yield [] return output = Queue() for engine in self.engines_search: if not parsing.is_meta(engine): self.pool_search.put(engine, PoolItem('search', (keyword, from_id + 1, None), output)) else: for site in parsing.domains: filtered = engine.site_filter(site, keyword) self.pool_search.put(engine, PoolItem('search', (filtered, from_id + 1, None), output)) failure = 0 result_set = set() while failure < 5: try: new_results = set(output.get(timeout=1)) print('Search %s: %d unique results' % (repr(keyword), len(result_set))) yield parsing.rank_list(new_results - result_set, keyword) result_set.update(new_results) except Empty: failure += 1 class WSHandler(WebSocket): def opened(self): cherrypy.engine.log('WebSocket opened') def received_message(self, msg): cherrypy.engine.log('Received ' + str(msg)) try: params = json.loads(str(msg)) verb = params['verb'] if verb == 'suggest': self.ws_suggest(unquote(params['keyword'])) elif verb == 'search': self.ws_search(unquote(params['keyword']), params['from_id']) else: raise ValueError('Unknown verb. (suggest, serach)') except (KeyError, AttributeError, TypeError, ValueError) as e: cherrypy.engine.log('Handler Exception - %s' % repr(e)) cherrypy.engine.log(traceback.format_exc()) def closed(self, code, reason): cherrypy.engine.log('A client left') def ws_suggest(self, keyword): results = Queue() cherrypy.engine.publish('detour_suggest', keyword, results) generator = results.get() for item in generator: if item: msg = json.dumps({'from': keyword, 'results': item}) cherrypy.engine.publish('websocket-broadcast', msg) def ws_search(self, keyword, from_id): results = Queue() cherrypy.engine.publish('detour_search', keyword, from_id, results) generator = results.get() for r_list in generator: if r_list: d = { 'results': [r.items() for r in r_list], 'keyword': keyword, 'from_id': from_id, } cherrypy.engine.publish('websocket-broadcast', json.dumps(d)) class Daemon(SimplePlugin): def __init__(self, bus): SimplePlugin.__init__(self, bus) def start(self): self.bus.log('Daemon plugin starts') self.priority = 70 self.search_daemon = Search() self.search_daemon.start() self.bus.subscribe('detour_suggest', self.suggest_handler) self.bus.subscribe('detour_search', self.search_handler) def stop(self): self.bus.unsubscribe('detour_suggest', self.suggest_handler) self.bus.unsubscribe('detour_search', self.search_handler) self.search_daemon.stop() self.bus.log('Daemon plugin stops') def suggest_handler(self, keyword, bucket): self.bus.log('Suggest ' + repr(keyword)) generator = self.search_daemon.suggest(keyword) print("suggest_handler: got generator") bucket.put(generator) def search_handler(self, keyword, from_id, bucket): self.bus.log('Search ' + repr(keyword) + ' from ID ' + repr(from_id)) generator = self.search_daemon.search(keyword, from_id) print("search_handler: got generator") bucket.put(generator) class Detour: def __init__(self, public): self.public = public @cherrypy.expose def index(self, q=None): return serve_file(self.public + '/index.html') @cherrypy.expose def ws(self): handler = cherrypy.request.ws_handler cherrypy.log("Handler created: %s" % repr(handler))
gpl-3.0
probablytom/tomwallis.net
venv/lib/python2.7/site-packages/setuptools/command/setopt.py
458
5080
from distutils.util import convert_path from distutils import log from distutils.errors import DistutilsOptionError import distutils import os from setuptools import Command __all__ = ['config_file', 'edit_config', 'option_base', 'setopt'] def config_file(kind="local"): """Get the filename of the distutils, local, global, or per-user config `kind` must be one of "local", "global", or "user" """ if kind == 'local': return 'setup.cfg' if kind == 'global': return os.path.join( os.path.dirname(distutils.__file__), 'distutils.cfg' ) if kind == 'user': dot = os.name == 'posix' and '.' or '' return os.path.expanduser(convert_path("~/%spydistutils.cfg" % dot)) raise ValueError( "config_file() type must be 'local', 'global', or 'user'", kind ) def edit_config(filename, settings, dry_run=False): """Edit a configuration file to include `settings` `settings` is a dictionary of dictionaries or ``None`` values, keyed by command/section name. A ``None`` value means to delete the entire section, while a dictionary lists settings to be changed or deleted in that section. A setting of ``None`` means to delete that setting. """ from setuptools.compat import ConfigParser log.debug("Reading configuration from %s", filename) opts = ConfigParser.RawConfigParser() opts.read([filename]) for section, options in settings.items(): if options is None: log.info("Deleting section [%s] from %s", section, filename) opts.remove_section(section) else: if not opts.has_section(section): log.debug("Adding new section [%s] to %s", section, filename) opts.add_section(section) for option, value in options.items(): if value is None: log.debug( "Deleting %s.%s from %s", section, option, filename ) opts.remove_option(section, option) if not opts.options(section): log.info("Deleting empty [%s] section from %s", section, filename) opts.remove_section(section) else: log.debug( "Setting %s.%s to %r in %s", section, option, value, filename ) opts.set(section, option, value) log.info("Writing %s", filename) if not dry_run: with open(filename, 'w') as f: opts.write(f) class option_base(Command): """Abstract base class for commands that mess with config files""" user_options = [ ('global-config', 'g', "save options to the site-wide distutils.cfg file"), ('user-config', 'u', "save options to the current user's pydistutils.cfg file"), ('filename=', 'f', "configuration file to use (default=setup.cfg)"), ] boolean_options = [ 'global-config', 'user-config', ] def initialize_options(self): self.global_config = None self.user_config = None self.filename = None def finalize_options(self): filenames = [] if self.global_config: filenames.append(config_file('global')) if self.user_config: filenames.append(config_file('user')) if self.filename is not None: filenames.append(self.filename) if not filenames: filenames.append(config_file('local')) if len(filenames) > 1: raise DistutilsOptionError( "Must specify only one configuration file option", filenames ) self.filename, = filenames class setopt(option_base): """Save command-line options to a file""" description = "set an option in setup.cfg or another config file" user_options = [ ('command=', 'c', 'command to set an option for'), ('option=', 'o', 'option to set'), ('set-value=', 's', 'value of the option'), ('remove', 'r', 'remove (unset) the value'), ] + option_base.user_options boolean_options = option_base.boolean_options + ['remove'] def initialize_options(self): option_base.initialize_options(self) self.command = None self.option = None self.set_value = None self.remove = None def finalize_options(self): option_base.finalize_options(self) if self.command is None or self.option is None: raise DistutilsOptionError("Must specify --command *and* --option") if self.set_value is None and not self.remove: raise DistutilsOptionError("Must specify --set-value or --remove") def run(self): edit_config( self.filename, { self.command: {self.option.replace('-', '_'): self.set_value} }, self.dry_run )
artistic-2.0
sander76/home-assistant
tests/components/control4/test_config_flow.py
5
6182
"""Test the Control4 config flow.""" import datetime from unittest.mock import AsyncMock, patch from pyControl4.account import C4Account from pyControl4.director import C4Director from pyControl4.error_handling import Unauthorized from homeassistant import config_entries, setup from homeassistant.components.control4.const import DEFAULT_SCAN_INTERVAL, DOMAIN from homeassistant.const import ( CONF_HOST, CONF_PASSWORD, CONF_SCAN_INTERVAL, CONF_USERNAME, ) from tests.common import MockConfigEntry def _get_mock_c4_account( getAccountControllers={ "controllerCommonName": "control4_model_00AA00AA00AA", "href": "https://apis.control4.com/account/v3/rest/accounts/000000", "name": "Name", }, getDirectorBearerToken={ "token": "token", "token_expiration": datetime.datetime(2020, 7, 15, 13, 50, 15, 26940), }, ): c4_account_mock = AsyncMock(C4Account) c4_account_mock.getAccountControllers.return_value = getAccountControllers c4_account_mock.getDirectorBearerToken.return_value = getDirectorBearerToken return c4_account_mock def _get_mock_c4_director(getAllItemInfo={}): c4_director_mock = AsyncMock(C4Director) c4_director_mock.getAllItemInfo.return_value = getAllItemInfo return c4_director_mock async def test_form(hass): """Test we get the form.""" await setup.async_setup_component(hass, "persistent_notification", {}) result = await hass.config_entries.flow.async_init( DOMAIN, context={"source": config_entries.SOURCE_USER} ) assert result["type"] == "form" assert result["errors"] == {} c4_account = _get_mock_c4_account() c4_director = _get_mock_c4_director() with patch( "homeassistant.components.control4.config_flow.C4Account", return_value=c4_account, ), patch( "homeassistant.components.control4.config_flow.C4Director", return_value=c4_director, ), patch( "homeassistant.components.control4.async_setup_entry", return_value=True, ) as mock_setup_entry: result2 = await hass.config_entries.flow.async_configure( result["flow_id"], { CONF_HOST: "1.1.1.1", CONF_USERNAME: "test-username", CONF_PASSWORD: "test-password", }, ) await hass.async_block_till_done() assert result2["type"] == "create_entry" assert result2["title"] == "control4_model_00AA00AA00AA" assert result2["data"] == { CONF_HOST: "1.1.1.1", CONF_USERNAME: "test-username", CONF_PASSWORD: "test-password", "controller_unique_id": "control4_model_00AA00AA00AA", } assert len(mock_setup_entry.mock_calls) == 1 async def test_form_invalid_auth(hass): """Test we handle invalid auth.""" result = await hass.config_entries.flow.async_init( DOMAIN, context={"source": config_entries.SOURCE_USER} ) with patch( "homeassistant.components.control4.config_flow.C4Account", side_effect=Unauthorized("message"), ): result2 = await hass.config_entries.flow.async_configure( result["flow_id"], { CONF_HOST: "1.1.1.1", CONF_USERNAME: "test-username", CONF_PASSWORD: "test-password", }, ) assert result2["type"] == "form" assert result2["errors"] == {"base": "invalid_auth"} async def test_form_unexpected_exception(hass): """Test we handle an unexpected exception.""" result = await hass.config_entries.flow.async_init( DOMAIN, context={"source": config_entries.SOURCE_USER} ) with patch( "homeassistant.components.control4.config_flow.C4Account", side_effect=ValueError("message"), ): result2 = await hass.config_entries.flow.async_configure( result["flow_id"], { CONF_HOST: "1.1.1.1", CONF_USERNAME: "test-username", CONF_PASSWORD: "test-password", }, ) assert result2["type"] == "form" assert result2["errors"] == {"base": "unknown"} async def test_form_cannot_connect(hass): """Test we handle cannot connect error.""" result = await hass.config_entries.flow.async_init( DOMAIN, context={"source": config_entries.SOURCE_USER} ) with patch( "homeassistant.components.control4.config_flow.Control4Validator.authenticate", return_value=True, ), patch( "homeassistant.components.control4.config_flow.C4Director", side_effect=Unauthorized("message"), ): result2 = await hass.config_entries.flow.async_configure( result["flow_id"], { CONF_HOST: "1.1.1.1", CONF_USERNAME: "test-username", CONF_PASSWORD: "test-password", }, ) assert result2["type"] == "form" assert result2["errors"] == {"base": "cannot_connect"} async def test_option_flow(hass): """Test config flow options.""" entry = MockConfigEntry(domain=DOMAIN, data={}, options=None) entry.add_to_hass(hass) result = await hass.config_entries.options.async_init(entry.entry_id) assert result["type"] == "form" assert result["step_id"] == "init" result = await hass.config_entries.options.async_configure( result["flow_id"], user_input={CONF_SCAN_INTERVAL: 4}, ) assert result["type"] == "create_entry" assert result["data"] == { CONF_SCAN_INTERVAL: 4, } async def test_option_flow_defaults(hass): """Test config flow options.""" entry = MockConfigEntry(domain=DOMAIN, data={}, options=None) entry.add_to_hass(hass) result = await hass.config_entries.options.async_init(entry.entry_id) assert result["type"] == "form" assert result["step_id"] == "init" result = await hass.config_entries.options.async_configure( result["flow_id"], user_input={} ) assert result["type"] == "create_entry" assert result["data"] == { CONF_SCAN_INTERVAL: DEFAULT_SCAN_INTERVAL, }
apache-2.0