repo_name stringlengths 5 100 | path stringlengths 4 294 | copies stringclasses 990
values | size stringlengths 4 7 | content stringlengths 666 1M | license stringclasses 15
values |
|---|---|---|---|---|---|
devs1991/test_edx_docmode | venv/lib/python2.7/site-packages/lettuce/terrain.py | 12 | 2199 | # -*- coding: utf-8 -*-
# <Lettuce - Behaviour Driven Development for python>
# Copyright (C) <2010-2012> Gabriel Falcão <gabriel@nacaolivre.org>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from lettuce.registry import world
from lettuce.registry import CALLBACK_REGISTRY
world._set = True
def absorb(thing, name=None):
if not isinstance(name, basestring):
name = thing.__name__
setattr(world, name, thing)
return thing
world.absorb = absorb
@world.absorb
def spew(name):
if hasattr(world, name):
item = getattr(world, name)
delattr(world, name)
return item
class Main(object):
def __init__(self, callback):
self.name = callback
@classmethod
def _add_method(cls, name, where, when):
def method(self, fn):
CALLBACK_REGISTRY.append_to(where, when % {'0': self.name}, fn)
return fn
method.__name__ = method.fn_name = name
setattr(cls, name, method)
for name, where, when in (
('all', 'all', '%(0)s'),
('each_step', 'step', '%(0)s_each'),
('step_output', 'step', '%(0)s_output'),
('each_scenario', 'scenario', '%(0)s_each'),
('each_background', 'background', '%(0)s_each'),
('each_feature', 'feature', '%(0)s_each'),
('harvest', 'harvest', '%(0)s'),
('each_app', 'app', '%(0)s_each'),
('runserver', 'runserver', '%(0)s'),
('handle_request', 'handle_request', '%(0)s'),
('outline', 'scenario', 'outline')):
Main._add_method(name, where, when)
before = Main('before')
after = Main('after')
| agpl-3.0 |
Jurevic/BB-8_droid | bb8/users/migrations/0001_initial.py | 60 | 3032 | # -*- coding: utf-8 -*-
# Generated by Django 1.10.1 on 2016-09-23 04:36
from __future__ import unicode_literals
import django.contrib.auth.models
import django.contrib.auth.validators
from django.db import migrations, models
import django.utils.timezone
class Migration(migrations.Migration):
initial = True
dependencies = [
('auth', '0008_alter_user_username_max_length'),
]
operations = [
migrations.CreateModel(
name='User',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('password', models.CharField(max_length=128, verbose_name='password')),
('last_login', models.DateTimeField(blank=True, null=True, verbose_name='last login')),
('is_superuser', models.BooleanField(default=False, help_text='Designates that this user has all permissions without explicitly assigning them.', verbose_name='superuser status')),
('username', models.CharField(error_messages={'unique': 'A user with that username already exists.'}, help_text='Required. 150 characters or fewer. Letters, digits and @/./+/-/_ only.', max_length=150, unique=True, validators=[django.contrib.auth.validators.UnicodeUsernameValidator()], verbose_name='username')),
('first_name', models.CharField(blank=True, max_length=30, verbose_name='first name')),
('last_name', models.CharField(blank=True, max_length=30, verbose_name='last name')),
('email', models.EmailField(blank=True, max_length=254, verbose_name='email address')),
('is_staff', models.BooleanField(default=False, help_text='Designates whether the user can log into this admin site.', verbose_name='staff status')),
('is_active', models.BooleanField(default=True, help_text='Designates whether this user should be treated as active. Unselect this instead of deleting accounts.', verbose_name='active')),
('date_joined', models.DateTimeField(default=django.utils.timezone.now, verbose_name='date joined')),
('name', models.CharField(blank=True, max_length=255, verbose_name='Name of User')),
('groups', models.ManyToManyField(blank=True, help_text='The groups this user belongs to. A user will get all permissions granted to each of their groups.', related_name='user_set', related_query_name='user', to='auth.Group', verbose_name='groups')),
('user_permissions', models.ManyToManyField(blank=True, help_text='Specific permissions for this user.', related_name='user_set', related_query_name='user', to='auth.Permission', verbose_name='user permissions')),
],
options={
'verbose_name_plural': 'users',
'verbose_name': 'user',
'abstract': False,
},
managers=[
('objects', django.contrib.auth.models.UserManager()),
],
),
]
| mit |
chouseknecht/ansible | lib/ansible/modules/cloud/azure/azure_rm_aks.py | 14 | 33985 | #!/usr/bin/python
#
# Copyright (c) 2018 Sertac Ozercan, <seozerca@microsoft.com>
#
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: azure_rm_aks
version_added: "2.6"
short_description: Manage a managed Azure Container Service (AKS) instance
description:
- Create, update and delete a managed Azure Container Service (AKS) instance.
options:
resource_group:
description:
- Name of a resource group where the managed Azure Container Services (AKS) exists or will be created.
required: true
name:
description:
- Name of the managed Azure Container Services (AKS) instance.
required: true
state:
description:
- Assert the state of the AKS. Use C(present) to create or update an AKS and C(absent) to delete it.
default: present
choices:
- absent
- present
location:
description:
- Valid azure location. Defaults to location of the resource group.
dns_prefix:
description:
- DNS prefix specified when creating the managed cluster.
kubernetes_version:
description:
- Version of Kubernetes specified when creating the managed cluster.
linux_profile:
description:
- The Linux profile suboptions.
suboptions:
admin_username:
description:
- The Admin Username for the cluster.
required: true
ssh_key:
description:
- The Public SSH Key used to access the cluster.
required: true
agent_pool_profiles:
description:
- The agent pool profile suboptions.
suboptions:
name:
description:
- Unique name of the agent pool profile in the context of the subscription and resource group.
required: true
count:
description:
- Number of agents (VMs) to host docker containers.
- Allowed values must be in the range of C(1) to C(100) (inclusive).
required: true
vm_size:
description:
- The VM Size of each of the Agent Pool VM's (e.g. C(Standard_F1) / C(Standard_D2v2)).
required: true
os_disk_size_gb:
description:
- Size of the OS disk.
service_principal:
description:
- The service principal suboptions.
suboptions:
client_id:
description:
- The ID for the Service Principal.
required: true
client_secret:
description:
- The secret password associated with the service principal.
required: true
enable_rbac:
description:
- Enable RBAC.
- Existing non-RBAC enabled AKS clusters cannot currently be updated for RBAC use.
type: bool
default: no
version_added: "2.8"
network_profile:
description:
- Profile of network configuration.
suboptions:
network_plugin:
description:
- Network plugin used for building Kubernetes network.
- This property cannot been changed.
- With C(kubenet), nodes get an IP address from the Azure virtual network subnet.
- AKS features such as Virtual Nodes or network policies aren't supported with C(kubenet).
- C(azure) enables Azure Container Networking Interface(CNI), every pod gets an IP address from the subnet and can be accessed directly.
default: kubenet
choices:
- azure
- kubenet
network_policy:
description: Network policy used for building Kubernetes network.
choices:
- azure
- calico
pod_cidr:
description:
- A CIDR notation IP range from which to assign pod IPs when I(network_plugin=kubenet) is used.
- It should be a large address space that isn't in use elsewhere in your network environment.
- This address range must be large enough to accommodate the number of nodes that you expect to scale up to.
default: "10.244.0.0/16"
service_cidr:
description:
- A CIDR notation IP range from which to assign service cluster IPs.
- It must not overlap with any Subnet IP ranges.
- It should be the *.10 address of your service IP address range.
default: "10.0.0.0/16"
dns_service_ip:
description:
- An IP address assigned to the Kubernetes DNS service.
- It must be within the Kubernetes service address range specified in serviceCidr.
default: "10.0.0.10"
docker_bridge_cidr:
description:
- A CIDR notation IP range assigned to the Docker bridge network.
- It must not overlap with any Subnet IP ranges or the Kubernetes service address range.
default: "172.17.0.1/16"
version_added: "2.8"
aad_profile:
description:
- Profile of Azure Active Directory configuration.
suboptions:
client_app_id:
description: The client AAD application ID.
server_app_id:
description: The server AAD application ID.
server_app_secret:
description: The server AAD application secret.
tenant_id:
description:
- The AAD tenant ID to use for authentication.
- If not specified, will use the tenant of the deployment subscription.
version_added: "2.8"
addon:
description:
- Profile of managed cluster add-on.
- Key can be C(http_application_routing), C(monitoring), C(virtual_node).
- Value must be a dict contains a bool variable C(enabled).
type: dict
suboptions:
http_application_routing:
description:
- The HTTP application routing solution makes it easy to access applications that are deployed to your cluster.
type: dict
suboptions:
enabled:
description:
- Whether the solution enabled.
type: bool
monitoring:
description:
- It gives you performance visibility by collecting memory and processor metrics from controllers, nodes,
and containers that are available in Kubernetes through the Metrics API.
type: dict
suboptions:
enabled:
description:
- Whether the solution enabled.
type: bool
log_analytics_workspace_resource_id:
description:
- Where to store the container metrics.
virtual_node:
description:
- With virtual nodes, you have quick provisioning of pods, and only pay per second for their execution time.
- You don't need to wait for Kubernetes cluster autoscaler to deploy VM compute nodes to run the additional pods.
type: dict
suboptions:
enabled:
description:
- Whether the solution enabled.
type: bool
subnet_resource_id:
description:
- Subnet associated to the cluster.
version_added: "2.8"
extends_documentation_fragment:
- azure
- azure_tags
author:
- Sertac Ozercan (@sozercan)
- Yuwei Zhou (@yuwzho)
'''
EXAMPLES = '''
- name: Create a managed Azure Container Services (AKS) instance
azure_rm_aks:
name: myAKS
location: eastus
resource_group: myResourceGroup
dns_prefix: akstest
kubernetes_version: 1.14.6
linux_profile:
admin_username: azureuser
ssh_key: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAA...
service_principal:
client_id: "cf72ca99-f6b9-4004-b0e0-bee10c521948"
client_secret: "mySPNp@ssw0rd!"
agent_pool_profiles:
- name: default
count: 5
vm_size: Standard_D2_v2
tags:
Environment: Production
- name: Remove a managed Azure Container Services (AKS) instance
azure_rm_aks:
name: myAKS
resource_group: myResourceGroup
state: absent
'''
RETURN = '''
state:
description: Current state of the Azure Container Service (AKS).
returned: always
type: dict
example:
agent_pool_profiles:
- count: 1
dns_prefix: Null
name: default
os_disk_size_gb: Null
os_type: Linux
ports: Null
storage_profile: ManagedDisks
vm_size: Standard_DS1_v2
vnet_subnet_id: Null
changed: false
dns_prefix: aks9860bdcd89
id: "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourcegroups/myResourceGroup/providers/Microsoft.ContainerService/managedClusters/aks9860bdc"
kube_config: "......"
kubernetes_version: 1.14.6
linux_profile:
admin_username: azureuser
ssh_key: ssh-rsa AAAAB3NzaC1yc2EAAAADA.....
location: eastus
name: aks9860bdc
provisioning_state: Succeeded
service_principal_profile:
client_id: xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx
tags: {}
type: Microsoft.ContainerService/ManagedClusters
'''
from ansible.module_utils.azure_rm_common import AzureRMModuleBase
try:
from msrestazure.azure_exceptions import CloudError
except ImportError:
# This is handled in azure_rm_common
pass
def create_aks_dict(aks):
'''
Helper method to deserialize a ContainerService to a dict
:param: aks: ContainerService or AzureOperationPoller with the Azure callback object
:return: dict with the state on Azure
'''
return dict(
id=aks.id,
name=aks.name,
location=aks.location,
dns_prefix=aks.dns_prefix,
kubernetes_version=aks.kubernetes_version,
tags=aks.tags,
linux_profile=create_linux_profile_dict(aks.linux_profile),
service_principal_profile=create_service_principal_profile_dict(
aks.service_principal_profile),
provisioning_state=aks.provisioning_state,
agent_pool_profiles=create_agent_pool_profiles_dict(
aks.agent_pool_profiles),
type=aks.type,
kube_config=aks.kube_config,
enable_rbac=aks.enable_rbac,
network_profile=create_network_profiles_dict(aks.network_profile),
aad_profile=create_aad_profiles_dict(aks.aad_profile),
addon=create_addon_dict(aks.addon_profiles),
fqdn=aks.fqdn,
node_resource_group=aks.node_resource_group
)
def create_network_profiles_dict(network):
return dict(
network_plugin=network.network_plugin,
network_policy=network.network_policy,
pod_cidr=network.pod_cidr,
service_cidr=network.service_cidr,
dns_service_ip=network.dns_service_ip,
docker_bridge_cidr=network.docker_bridge_cidr
) if network else dict()
def create_aad_profiles_dict(aad):
return aad.as_dict() if aad else dict()
def create_addon_dict(addon):
result = dict()
addon = addon or dict()
for key in addon.keys():
result[key] = addon[key].config
result[key]['enabled'] = addon[key].enabled
return result
def create_linux_profile_dict(linuxprofile):
'''
Helper method to deserialize a ContainerServiceLinuxProfile to a dict
:param: linuxprofile: ContainerServiceLinuxProfile with the Azure callback object
:return: dict with the state on Azure
'''
return dict(
ssh_key=linuxprofile.ssh.public_keys[0].key_data,
admin_username=linuxprofile.admin_username
)
def create_service_principal_profile_dict(serviceprincipalprofile):
'''
Helper method to deserialize a ContainerServiceServicePrincipalProfile to a dict
Note: For security reason, the service principal secret is skipped on purpose.
:param: serviceprincipalprofile: ContainerServiceServicePrincipalProfile with the Azure callback object
:return: dict with the state on Azure
'''
return dict(
client_id=serviceprincipalprofile.client_id
)
def create_agent_pool_profiles_dict(agentpoolprofiles):
'''
Helper method to deserialize a ContainerServiceAgentPoolProfile to a dict
:param: agentpoolprofiles: ContainerServiceAgentPoolProfile with the Azure callback object
:return: dict with the state on Azure
'''
return [dict(
count=profile.count,
vm_size=profile.vm_size,
name=profile.name,
os_disk_size_gb=profile.os_disk_size_gb,
storage_profile=profile.storage_profile,
vnet_subnet_id=profile.vnet_subnet_id,
os_type=profile.os_type
) for profile in agentpoolprofiles] if agentpoolprofiles else None
def create_addon_profiles_spec():
'''
Helper method to parse the ADDONS dictionary and generate the addon spec
'''
spec = dict()
for key in ADDONS.keys():
values = ADDONS[key]
addon_spec = dict(
enabled=dict(type='bool', default=True)
)
configs = values.get('config') or {}
for item in configs.keys():
addon_spec[item] = dict(type='str', aliases=[configs[item]], required=True)
spec[key] = dict(type='dict', options=addon_spec, aliases=[values['name']])
return spec
ADDONS = {
'http_application_routing': dict(name='httpApplicationRouting'),
'monitoring': dict(name='omsagent', config={'log_analytics_workspace_resource_id': 'logAnalyticsWorkspaceResourceID'}),
'virtual_node': dict(name='aciConnector', config={'subnet_resource_id': 'SubnetName'})
}
linux_profile_spec = dict(
admin_username=dict(type='str', required=True),
ssh_key=dict(type='str', required=True)
)
service_principal_spec = dict(
client_id=dict(type='str', required=True),
client_secret=dict(type='str', no_log=True)
)
agent_pool_profile_spec = dict(
name=dict(type='str', required=True),
count=dict(type='int', required=True),
vm_size=dict(type='str', required=True),
os_disk_size_gb=dict(type='int'),
dns_prefix=dict(type='str'),
ports=dict(type='list', elements='int'),
storage_profiles=dict(type='str', choices=[
'StorageAccount', 'ManagedDisks']),
vnet_subnet_id=dict(type='str'),
os_type=dict(type='str', choices=['Linux', 'Windows'])
)
network_profile_spec = dict(
network_plugin=dict(type='str', choices=['azure', 'kubenet']),
network_policy=dict(type='str'),
pod_cidr=dict(type='str'),
service_cidr=dict(type='str'),
dns_service_ip=dict(type='str'),
docker_bridge_cidr=dict(type='str')
)
aad_profile_spec = dict(
client_app_id=dict(type='str'),
server_app_id=dict(type='str'),
server_app_secret=dict(type='str', no_log=True),
tenant_id=dict(type='str')
)
class AzureRMManagedCluster(AzureRMModuleBase):
"""Configuration class for an Azure RM container service (AKS) resource"""
def __init__(self):
self.module_arg_spec = dict(
resource_group=dict(
type='str',
required=True
),
name=dict(
type='str',
required=True
),
state=dict(
type='str',
default='present',
choices=['present', 'absent']
),
location=dict(
type='str'
),
dns_prefix=dict(
type='str'
),
kubernetes_version=dict(
type='str'
),
linux_profile=dict(
type='dict',
options=linux_profile_spec
),
agent_pool_profiles=dict(
type='list',
elements='dict',
options=agent_pool_profile_spec
),
service_principal=dict(
type='dict',
options=service_principal_spec
),
enable_rbac=dict(
type='bool',
default=False
),
network_profile=dict(
type='dict',
options=network_profile_spec
),
aad_profile=dict(
type='dict',
options=aad_profile_spec
),
addon=dict(
type='dict',
options=create_addon_profiles_spec()
)
)
self.resource_group = None
self.name = None
self.location = None
self.dns_prefix = None
self.kubernetes_version = None
self.tags = None
self.state = None
self.linux_profile = None
self.agent_pool_profiles = None
self.service_principal = None
self.enable_rbac = False
self.network_profile = None
self.aad_profile = None
self.addon = None
required_if = [
('state', 'present', [
'dns_prefix', 'linux_profile', 'agent_pool_profiles', 'service_principal'])
]
self.results = dict(changed=False)
super(AzureRMManagedCluster, self).__init__(derived_arg_spec=self.module_arg_spec,
supports_check_mode=True,
supports_tags=True,
required_if=required_if)
def exec_module(self, **kwargs):
"""Main module execution method"""
for key in list(self.module_arg_spec.keys()) + ['tags']:
setattr(self, key, kwargs[key])
resource_group = None
to_be_updated = False
update_tags = False
resource_group = self.get_resource_group(self.resource_group)
if not self.location:
self.location = resource_group.location
response = self.get_aks()
# Check if the AKS instance already present in the RG
if self.state == 'present':
# For now Agent Pool cannot be more than 1, just remove this part in the future if it change
agentpoolcount = len(self.agent_pool_profiles)
if agentpoolcount > 1:
self.fail('You cannot specify more than one agent_pool_profiles currently')
available_versions = self.get_all_versions()
if not response:
to_be_updated = True
if self.kubernetes_version not in available_versions.keys():
self.fail("Unsupported kubernetes version. Expected one of {0} but got {1}".format(available_versions.keys(), self.kubernetes_version))
else:
self.results = response
self.results['changed'] = False
self.log('Results : {0}'.format(response))
update_tags, response['tags'] = self.update_tags(response['tags'])
if response['provisioning_state'] == "Succeeded":
def is_property_changed(profile, property, ignore_case=False):
base = response[profile].get(property)
new = getattr(self, profile).get(property)
if ignore_case:
return base.lower() != new.lower()
else:
return base != new
# Cannot Update the SSH Key for now // Let service to handle it
if is_property_changed('linux_profile', 'ssh_key'):
self.log(("Linux Profile Diff SSH, Was {0} / Now {1}"
.format(response['linux_profile']['ssh_key'], self.linux_profile.get('ssh_key'))))
to_be_updated = True
# self.module.warn("linux_profile.ssh_key cannot be updated")
# self.log("linux_profile response : {0}".format(response['linux_profile'].get('admin_username')))
# self.log("linux_profile self : {0}".format(self.linux_profile[0].get('admin_username')))
# Cannot Update the Username for now // Let service to handle it
if is_property_changed('linux_profile', 'admin_username'):
self.log(("Linux Profile Diff User, Was {0} / Now {1}"
.format(response['linux_profile']['admin_username'], self.linux_profile.get('admin_username'))))
to_be_updated = True
# self.module.warn("linux_profile.admin_username cannot be updated")
# Cannot have more that one agent pool profile for now
if len(response['agent_pool_profiles']) != len(self.agent_pool_profiles):
self.log("Agent Pool count is diff, need to updated")
to_be_updated = True
if response['kubernetes_version'] != self.kubernetes_version:
upgrade_versions = available_versions.get(response['kubernetes_version']) or available_versions.keys()
if upgrade_versions and self.kubernetes_version not in upgrade_versions:
self.fail('Cannot upgrade kubernetes version to {0}, supported value are {1}'.format(self.kubernetes_version, upgrade_versions))
to_be_updated = True
if response['enable_rbac'] != self.enable_rbac:
to_be_updated = True
if self.network_profile:
for key in self.network_profile.keys():
original = response['network_profile'].get(key) or ''
if self.network_profile[key] and self.network_profile[key].lower() != original.lower():
to_be_updated = True
def compare_addon(origin, patch, config):
if not patch:
return True
if not origin:
return False
if origin['enabled'] != patch['enabled']:
return False
config = config or dict()
for key in config.keys():
if origin.get(config[key]) != patch.get(key):
return False
return True
if self.addon:
for key in ADDONS.keys():
addon_name = ADDONS[key]['name']
if not compare_addon(response['addon'].get(addon_name), self.addon.get(key), ADDONS[key].get('config')):
to_be_updated = True
for profile_result in response['agent_pool_profiles']:
matched = False
for profile_self in self.agent_pool_profiles:
if profile_result['name'] == profile_self['name']:
matched = True
os_disk_size_gb = profile_self.get('os_disk_size_gb') or profile_result['os_disk_size_gb']
if profile_result['count'] != profile_self['count'] \
or profile_result['vm_size'] != profile_self['vm_size'] \
or profile_result['os_disk_size_gb'] != os_disk_size_gb \
or profile_result['vnet_subnet_id'] != profile_self.get('vnet_subnet_id', profile_result['vnet_subnet_id']):
self.log(("Agent Profile Diff - Origin {0} / Update {1}".format(str(profile_result), str(profile_self))))
to_be_updated = True
if not matched:
self.log("Agent Pool not found")
to_be_updated = True
if to_be_updated:
self.log("Need to Create / Update the AKS instance")
if not self.check_mode:
self.results = self.create_update_aks()
self.log("Creation / Update done")
self.results['changed'] = True
elif update_tags:
self.log("Need to Update the AKS tags")
if not self.check_mode:
self.results['tags'] = self.update_aks_tags()
self.results['changed'] = True
return self.results
elif self.state == 'absent' and response:
self.log("Need to Delete the AKS instance")
self.results['changed'] = True
if self.check_mode:
return self.results
self.delete_aks()
self.log("AKS instance deleted")
return self.results
def create_update_aks(self):
'''
Creates or updates a managed Azure container service (AKS) with the specified configuration of agents.
:return: deserialized AKS instance state dictionary
'''
self.log("Creating / Updating the AKS instance {0}".format(self.name))
agentpools = []
if self.agent_pool_profiles:
agentpools = [self.create_agent_pool_profile_instance(profile) for profile in self.agent_pool_profiles]
service_principal_profile = self.create_service_principal_profile_instance(self.service_principal)
parameters = self.managedcluster_models.ManagedCluster(
location=self.location,
dns_prefix=self.dns_prefix,
kubernetes_version=self.kubernetes_version,
tags=self.tags,
service_principal_profile=service_principal_profile,
agent_pool_profiles=agentpools,
linux_profile=self.create_linux_profile_instance(self.linux_profile),
enable_rbac=self.enable_rbac,
network_profile=self.create_network_profile_instance(self.network_profile),
aad_profile=self.create_aad_profile_instance(self.aad_profile),
addon_profiles=self.create_addon_profile_instance(self.addon)
)
# self.log("service_principal_profile : {0}".format(parameters.service_principal_profile))
# self.log("linux_profile : {0}".format(parameters.linux_profile))
# self.log("ssh from yaml : {0}".format(results.get('linux_profile')[0]))
# self.log("ssh : {0}".format(parameters.linux_profile.ssh))
# self.log("agent_pool_profiles : {0}".format(parameters.agent_pool_profiles))
try:
poller = self.managedcluster_client.managed_clusters.create_or_update(self.resource_group, self.name, parameters)
response = self.get_poller_result(poller)
response.kube_config = self.get_aks_kubeconfig()
return create_aks_dict(response)
except CloudError as exc:
self.log('Error attempting to create the AKS instance.')
self.fail("Error creating the AKS instance: {0}".format(exc.message))
def update_aks_tags(self):
try:
poller = self.managedcluster_client.managed_clusters.update_tags(self.resource_group, self.name, self.tags)
response = self.get_poller_result(poller)
return response.tags
except CloudError as exc:
self.fail("Error attempting to update AKS tags: {0}".format(exc.message))
def delete_aks(self):
'''
Deletes the specified managed container service (AKS) in the specified subscription and resource group.
:return: True
'''
self.log("Deleting the AKS instance {0}".format(self.name))
try:
poller = self.managedcluster_client.managed_clusters.delete(self.resource_group, self.name)
self.get_poller_result(poller)
return True
except CloudError as e:
self.log('Error attempting to delete the AKS instance.')
self.fail("Error deleting the AKS instance: {0}".format(e.message))
return False
def get_aks(self):
'''
Gets the properties of the specified container service.
:return: deserialized AKS instance state dictionary
'''
self.log("Checking if the AKS instance {0} is present".format(self.name))
try:
response = self.managedcluster_client.managed_clusters.get(self.resource_group, self.name)
self.log("Response : {0}".format(response))
self.log("AKS instance : {0} found".format(response.name))
response.kube_config = self.get_aks_kubeconfig()
return create_aks_dict(response)
except CloudError:
self.log('Did not find the AKS instance.')
return False
def get_all_versions(self):
try:
result = dict()
response = self.containerservice_client.container_services.list_orchestrators(self.location, resource_type='managedClusters')
orchestrators = response.orchestrators
for item in orchestrators:
result[item.orchestrator_version] = [x.orchestrator_version for x in item.upgrades] if item.upgrades else []
return result
except Exception as exc:
self.fail('Error when getting AKS supported kubernetes version list for location {0} - {1}'.format(self.location, exc.message or str(exc)))
def get_aks_kubeconfig(self):
'''
Gets kubeconfig for the specified AKS instance.
:return: AKS instance kubeconfig
'''
access_profile = self.managedcluster_client.managed_clusters.get_access_profile(resource_group_name=self.resource_group,
resource_name=self.name,
role_name="clusterUser")
return access_profile.kube_config.decode('utf-8')
def create_agent_pool_profile_instance(self, agentpoolprofile):
'''
Helper method to serialize a dict to a ManagedClusterAgentPoolProfile
:param: agentpoolprofile: dict with the parameters to setup the ManagedClusterAgentPoolProfile
:return: ManagedClusterAgentPoolProfile
'''
return self.managedcluster_models.ManagedClusterAgentPoolProfile(**agentpoolprofile)
def create_service_principal_profile_instance(self, spnprofile):
'''
Helper method to serialize a dict to a ManagedClusterServicePrincipalProfile
:param: spnprofile: dict with the parameters to setup the ManagedClusterServicePrincipalProfile
:return: ManagedClusterServicePrincipalProfile
'''
return self.managedcluster_models.ManagedClusterServicePrincipalProfile(
client_id=spnprofile['client_id'],
secret=spnprofile['client_secret']
)
def create_linux_profile_instance(self, linuxprofile):
'''
Helper method to serialize a dict to a ContainerServiceLinuxProfile
:param: linuxprofile: dict with the parameters to setup the ContainerServiceLinuxProfile
:return: ContainerServiceLinuxProfile
'''
return self.managedcluster_models.ContainerServiceLinuxProfile(
admin_username=linuxprofile['admin_username'],
ssh=self.managedcluster_models.ContainerServiceSshConfiguration(public_keys=[
self.managedcluster_models.ContainerServiceSshPublicKey(key_data=str(linuxprofile['ssh_key']))])
)
def create_network_profile_instance(self, network):
return self.managedcluster_models.ContainerServiceNetworkProfile(**network) if network else None
def create_aad_profile_instance(self, aad):
return self.managedcluster_models.ManagedClusterAADProfile(**aad) if aad else None
def create_addon_profile_instance(self, addon):
result = dict()
addon = addon or {}
for key in addon.keys():
if not ADDONS.get(key):
self.fail('Unsupported addon {0}'.format(key))
if addon.get(key):
name = ADDONS[key]['name']
config_spec = ADDONS[key].get('config') or dict()
config = addon[key]
for v in config_spec.keys():
config[config_spec[v]] = config[v]
result[name] = self.managedcluster_models.ManagedClusterAddonProfile(config=config, enabled=config['enabled'])
return result
def main():
"""Main execution"""
AzureRMManagedCluster()
if __name__ == '__main__':
main()
| gpl-3.0 |
googleapis/googleapis-gen | google/cloud/aiplatform/v1/aiplatform-v1-py/google/cloud/aiplatform_v1/types/model_evaluation.py | 1 | 2961 | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import proto # type: ignore
from google.protobuf import struct_pb2 # type: ignore
from google.protobuf import timestamp_pb2 # type: ignore
__protobuf__ = proto.module(
package='google.cloud.aiplatform.v1',
manifest={
'ModelEvaluation',
},
)
class ModelEvaluation(proto.Message):
r"""A collection of metrics calculated by comparing Model's
predictions on all of the test data against annotations from the
test data.
Attributes:
name (str):
Output only. The resource name of the
ModelEvaluation.
metrics_schema_uri (str):
Output only. Points to a YAML file stored on Google Cloud
Storage describing the
[metrics][google.cloud.aiplatform.v1.ModelEvaluation.metrics]
of this ModelEvaluation. The schema is defined as an OpenAPI
3.0.2 `Schema
Object <https://github.com/OAI/OpenAPI-Specification/blob/main/versions/3.0.2.md#schemaObject>`__.
metrics (google.protobuf.struct_pb2.Value):
Output only. Evaluation metrics of the Model. The schema of
the metrics is stored in
[metrics_schema_uri][google.cloud.aiplatform.v1.ModelEvaluation.metrics_schema_uri]
create_time (google.protobuf.timestamp_pb2.Timestamp):
Output only. Timestamp when this
ModelEvaluation was created.
slice_dimensions (Sequence[str]):
Output only. All possible
[dimensions][ModelEvaluationSlice.slice.dimension] of
ModelEvaluationSlices. The dimensions can be used as the
filter of the
[ModelService.ListModelEvaluationSlices][google.cloud.aiplatform.v1.ModelService.ListModelEvaluationSlices]
request, in the form of ``slice.dimension = <dimension>``.
"""
name = proto.Field(
proto.STRING,
number=1,
)
metrics_schema_uri = proto.Field(
proto.STRING,
number=2,
)
metrics = proto.Field(
proto.MESSAGE,
number=3,
message=struct_pb2.Value,
)
create_time = proto.Field(
proto.MESSAGE,
number=4,
message=timestamp_pb2.Timestamp,
)
slice_dimensions = proto.RepeatedField(
proto.STRING,
number=5,
)
__all__ = tuple(sorted(__protobuf__.manifest))
| apache-2.0 |
dset0x/invenio | invenio/modules/formatter/upgrades/formatter_2014_10_29_add_mime_type.py | 15 | 2136 | # -*- coding: utf-8 -*-
#
# This file is part of Invenio.
# Copyright (C) 2014 CERN.
#
# Invenio is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# Invenio is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Invenio; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
import warnings
from invenio.ext.sqlalchemy import db
from invenio.modules.upgrader.api import op
from invenio.legacy.dbquery import run_sql
depends_on = [u'formatter_2014_08_01_recjson']
def info():
return "Adds a mime_type column to format table"
def do_upgrade():
"""Implement your upgrades here."""
op.add_column('format', db.Column('mime_type',
db.String(length=255), unique=True, nullable=True))
mime_type_dict = dict(
xm='application/marcxml+xml',
hm='application/marc',
recjson='application/json',
hx='application/x-bibtex',
xn='application/x-nlm',
)
query = "UPDATE format SET mime_type=%s WHERE code=%s"
for code, mime in mime_type_dict.items():
params = (mime, code)
try:
run_sql(query, params)
except Exception as e:
warnings.warn("Failed to execute query {0}: {1}".format(query, e))
def estimate():
"""Estimate running time of upgrade in seconds (optional)."""
return 1
def pre_upgrade():
"""Run pre-upgrade checks (optional)."""
# Example of raising errors:
# raise RuntimeError("Description of error 1", "Description of error 2")
def post_upgrade():
"""Run post-upgrade checks (optional)."""
# Example of issuing warnings:
# warnings.warn("A continuable error occurred")
| gpl-2.0 |
LalatenduMohanty/imagefactory | windows-proxy-code/consumer-service.py | 5 | 9811 | # Copyright 2011 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import win32serviceutil
import win32service
import win32event
import win32api
import win32security
import win32con
import win32process
import win32pipe
import win32file
import win32net
import win32netcon
import msvcrt
import os
import threading
import servicemanager
import socket
import platform
from qpid.messaging import *
from qpid.util import URL
import base64
import random
import string
class AppServerSvc (win32serviceutil.ServiceFramework):
_svc_name_ = "StartConsumer"
_svc_display_name_ = "Consumer Service"
_svc_description_ = "Consumer service to process Qpid commands"
def __init__(self,args):
win32serviceutil.ServiceFramework.__init__(self,args)
self.hWaitStop = win32event.CreateEvent(None,0,0,None)
socket.setdefaulttimeout(60)
def SvcStop(self):
self.ReportServiceStatus(win32service.SERVICE_STOP_PENDING)
win32event.SetEvent(self.hWaitStop)
def SvcDoRun(self):
servicemanager.LogMsg(servicemanager.EVENTLOG_INFORMATION_TYPE,
servicemanager.PYS_SERVICE_STARTED,
(self._svc_name_,''))
# Create an Administrator account to be impersonated and used for the process
def create_user():
params={}
params['name']= 'RHAdmin'
digits = "".join( [random.choice(string.digits) for i in range(10)] )
chars_lower = ''.join( [random.choice(string.ascii_lowercase) for i in range(10)] )
chars_upper = ''.join( [random.choice(string.ascii_uppercase) for i in range(10)] )
params['password']= digits+chars_lower+chars_upper
params['password'] = ''.join([str(w) for w in random.sample(params['password'], len(params['password']))])
params['flags']= win32netcon.UF_NORMAL_ACCOUNT | win32netcon.UF_SCRIPT
params['priv'] = win32netcon.USER_PRIV_USER
user = win32net.NetUserAdd(None, 1, params)
domain = socket.gethostname()
data = [ {'domainandname' : domain+'\\RHAdmin'} ]
win32net.NetLocalGroupAddMembers(None, 'Administrators', 3, data)
return params['password']
try:
win32net.NetUserDel(None, 'RHAdmin')
Password = create_user()
except:
Password = create_user()
token = win32security.LogonUser('RHAdmin', None, Password, \
win32con.LOGON32_LOGON_INTERACTIVE,
win32con.LOGON32_PROVIDER_DEFAULT)
win32security.ImpersonateLoggedOnUser(token)
self.main(token)
def main(self, token):
connection = Connection('localhost', port=5672)
connection.open()
session = connection.session(str(uuid4()))
receiver = session.receiver('amq.topic')
local_ip = socket.gethostbyname(socket.gethostname())
localhost_name = platform.uname()[1]
def make_inheritable(token):
"""Return a duplicate of handle, which is inheritable"""
return win32api.DuplicateHandle(win32api.GetCurrentProcess(), token,
win32api.GetCurrentProcess(), 0, 1,
win32con.DUPLICATE_SAME_ACCESS)
while True:
message = receiver.fetch()
session.acknowledge()
sender = session.sender(message.reply_to)
command = base64.b64decode(message.content)
if command.startswith('winrs' or 'winrm') != True or command.find('-r:') == -1 or command.find('localhost') != -1 or command.find(localhost_name) != -1 or command.find(local_ip) != -1:
sender.send(Message(base64.b64encode('Commands against the proxy are not accepted')))
else:
#Start the process:
# First let's create the communication pipes used by the process
# we need to have the pipes inherit the rights from token
stdin_read, stdin_write = win32pipe.CreatePipe(None, 0)
stdin_read = make_inheritable(stdin_read)
stdout_read, stdout_write = win32pipe.CreatePipe(None, 0)
stdout_write = make_inheritable(stdout_write)
stderr_read, stderr_write = win32pipe.CreatePipe(None, 0)
stderr_write = make_inheritable(stderr_write)
# Set start-up parameters the process will use.
#Here we specify the pipes for input, output and error.
si = win32process.STARTUPINFO()
si.dwFlags = win32con.STARTF_USESTDHANDLES
si.hStdInput = stdin_read
si.hStdOutput = stdout_write
si.hStdError = stderr_write
procArgs = (None, # appName
command, # commandLine
None, # processAttributes
None, # threadAttributes
1, # bInheritHandles
0, # dwCreationFlags
None, # newEnvironment
None, # currentDirectory
si) # startupinfo
# CreateProcessAsUser takes the first parameter the token,
# this way the process will impersonate a user
try:
hProcess, hThread, PId, TId = win32process.CreateProcessAsUser(token, *procArgs)
hThread.Close()
if stdin_read is not None:
stdin_read.Close()
if stdout_write is not None:
stdout_write.Close()
if stderr_write is not None:
stderr_write.Close()
stdin_write = msvcrt.open_osfhandle(stdin_write.Detach(), 0)
stdout_read = msvcrt.open_osfhandle(stdout_read.Detach(), 0)
stderr_read = msvcrt.open_osfhandle(stderr_read.Detach(), 0)
stdin_file = os.fdopen(stdin_write, 'wb', 0)
stdout_file = os.fdopen(stdout_read, 'rU', 0)
stderr_file = os.fdopen(stderr_read, 'rU', 0)
def readerthread(fh, buffer):
buffer.append(fh.read())
def translate_newlines(data):
data = data.replace("\r\n", "\n")
data = data.replace("\r", "\n")
return data
def wait():
"""Wait for child process to terminate. Returns returncode
attribute."""
win32event.WaitForSingleObject(hProcess,
win32event.INFINITE)
returncode = win32process.GetExitCodeProcess(hProcess)
return returncode
def communicate():
if stdout_file:
stdout = []
stdout_thread = threading.Thread(target=readerthread,
args=(stdout_file, stdout))
stdout_thread.setDaemon(True)
stdout_thread.start()
if stderr_file:
stderr = []
stderr_thread = threading.Thread(target=readerthread,
args=(stderr_file, stderr))
stderr_thread.setDaemon(True)
stderr_thread.start()
stdin_file.close()
if stdout_file:
stdout_thread.join()
if stderr_file:
stderr_thread.join()
if stdout is not None:
stdout = stdout[0]
if stderr is not None:
stderr = stderr[0]
if stdout:
stdout = translate_newlines(stdout)
if stderr:
stderr = translate_newlines(stderr)
return_code = wait()
return (stdout, stderr, return_code)
ret_stdout, ret_stderr, retcode = communicate()
result = Message(base64.b64encode(str(ret_stdout)))
result.properties["retcode"] = base64.b64encode(str(retcode))
if ret_stderr:
result.properties["stderr"] = base64.b64encode(str(ret_stderr))
else:
result.properties["stderr"] = base64.b64encode('')
sender.send(result)
except Exception as exception_message:
result = Message(base64.b64encode(''))
result.properties["retcode"] = base64.b64encode(str(exception_message[0]))
result.properties["stderr"] = base64.b64encode(str(exception_message[2]))
sender.send(result)
| apache-2.0 |
mdanielwork/intellij-community | python/lib/Lib/encodings/cp1253.py | 593 | 13350 | """ Python Character Mapping Codec cp1253 generated from 'MAPPINGS/VENDORS/MICSFT/WINDOWS/CP1253.TXT' with gencodec.py.
"""#"
import codecs
### Codec APIs
class Codec(codecs.Codec):
def encode(self,input,errors='strict'):
return codecs.charmap_encode(input,errors,encoding_table)
def decode(self,input,errors='strict'):
return codecs.charmap_decode(input,errors,decoding_table)
class IncrementalEncoder(codecs.IncrementalEncoder):
def encode(self, input, final=False):
return codecs.charmap_encode(input,self.errors,encoding_table)[0]
class IncrementalDecoder(codecs.IncrementalDecoder):
def decode(self, input, final=False):
return codecs.charmap_decode(input,self.errors,decoding_table)[0]
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
### encodings module API
def getregentry():
return codecs.CodecInfo(
name='cp1253',
encode=Codec().encode,
decode=Codec().decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
### Decoding Table
decoding_table = (
u'\x00' # 0x00 -> NULL
u'\x01' # 0x01 -> START OF HEADING
u'\x02' # 0x02 -> START OF TEXT
u'\x03' # 0x03 -> END OF TEXT
u'\x04' # 0x04 -> END OF TRANSMISSION
u'\x05' # 0x05 -> ENQUIRY
u'\x06' # 0x06 -> ACKNOWLEDGE
u'\x07' # 0x07 -> BELL
u'\x08' # 0x08 -> BACKSPACE
u'\t' # 0x09 -> HORIZONTAL TABULATION
u'\n' # 0x0A -> LINE FEED
u'\x0b' # 0x0B -> VERTICAL TABULATION
u'\x0c' # 0x0C -> FORM FEED
u'\r' # 0x0D -> CARRIAGE RETURN
u'\x0e' # 0x0E -> SHIFT OUT
u'\x0f' # 0x0F -> SHIFT IN
u'\x10' # 0x10 -> DATA LINK ESCAPE
u'\x11' # 0x11 -> DEVICE CONTROL ONE
u'\x12' # 0x12 -> DEVICE CONTROL TWO
u'\x13' # 0x13 -> DEVICE CONTROL THREE
u'\x14' # 0x14 -> DEVICE CONTROL FOUR
u'\x15' # 0x15 -> NEGATIVE ACKNOWLEDGE
u'\x16' # 0x16 -> SYNCHRONOUS IDLE
u'\x17' # 0x17 -> END OF TRANSMISSION BLOCK
u'\x18' # 0x18 -> CANCEL
u'\x19' # 0x19 -> END OF MEDIUM
u'\x1a' # 0x1A -> SUBSTITUTE
u'\x1b' # 0x1B -> ESCAPE
u'\x1c' # 0x1C -> FILE SEPARATOR
u'\x1d' # 0x1D -> GROUP SEPARATOR
u'\x1e' # 0x1E -> RECORD SEPARATOR
u'\x1f' # 0x1F -> UNIT SEPARATOR
u' ' # 0x20 -> SPACE
u'!' # 0x21 -> EXCLAMATION MARK
u'"' # 0x22 -> QUOTATION MARK
u'#' # 0x23 -> NUMBER SIGN
u'$' # 0x24 -> DOLLAR SIGN
u'%' # 0x25 -> PERCENT SIGN
u'&' # 0x26 -> AMPERSAND
u"'" # 0x27 -> APOSTROPHE
u'(' # 0x28 -> LEFT PARENTHESIS
u')' # 0x29 -> RIGHT PARENTHESIS
u'*' # 0x2A -> ASTERISK
u'+' # 0x2B -> PLUS SIGN
u',' # 0x2C -> COMMA
u'-' # 0x2D -> HYPHEN-MINUS
u'.' # 0x2E -> FULL STOP
u'/' # 0x2F -> SOLIDUS
u'0' # 0x30 -> DIGIT ZERO
u'1' # 0x31 -> DIGIT ONE
u'2' # 0x32 -> DIGIT TWO
u'3' # 0x33 -> DIGIT THREE
u'4' # 0x34 -> DIGIT FOUR
u'5' # 0x35 -> DIGIT FIVE
u'6' # 0x36 -> DIGIT SIX
u'7' # 0x37 -> DIGIT SEVEN
u'8' # 0x38 -> DIGIT EIGHT
u'9' # 0x39 -> DIGIT NINE
u':' # 0x3A -> COLON
u';' # 0x3B -> SEMICOLON
u'<' # 0x3C -> LESS-THAN SIGN
u'=' # 0x3D -> EQUALS SIGN
u'>' # 0x3E -> GREATER-THAN SIGN
u'?' # 0x3F -> QUESTION MARK
u'@' # 0x40 -> COMMERCIAL AT
u'A' # 0x41 -> LATIN CAPITAL LETTER A
u'B' # 0x42 -> LATIN CAPITAL LETTER B
u'C' # 0x43 -> LATIN CAPITAL LETTER C
u'D' # 0x44 -> LATIN CAPITAL LETTER D
u'E' # 0x45 -> LATIN CAPITAL LETTER E
u'F' # 0x46 -> LATIN CAPITAL LETTER F
u'G' # 0x47 -> LATIN CAPITAL LETTER G
u'H' # 0x48 -> LATIN CAPITAL LETTER H
u'I' # 0x49 -> LATIN CAPITAL LETTER I
u'J' # 0x4A -> LATIN CAPITAL LETTER J
u'K' # 0x4B -> LATIN CAPITAL LETTER K
u'L' # 0x4C -> LATIN CAPITAL LETTER L
u'M' # 0x4D -> LATIN CAPITAL LETTER M
u'N' # 0x4E -> LATIN CAPITAL LETTER N
u'O' # 0x4F -> LATIN CAPITAL LETTER O
u'P' # 0x50 -> LATIN CAPITAL LETTER P
u'Q' # 0x51 -> LATIN CAPITAL LETTER Q
u'R' # 0x52 -> LATIN CAPITAL LETTER R
u'S' # 0x53 -> LATIN CAPITAL LETTER S
u'T' # 0x54 -> LATIN CAPITAL LETTER T
u'U' # 0x55 -> LATIN CAPITAL LETTER U
u'V' # 0x56 -> LATIN CAPITAL LETTER V
u'W' # 0x57 -> LATIN CAPITAL LETTER W
u'X' # 0x58 -> LATIN CAPITAL LETTER X
u'Y' # 0x59 -> LATIN CAPITAL LETTER Y
u'Z' # 0x5A -> LATIN CAPITAL LETTER Z
u'[' # 0x5B -> LEFT SQUARE BRACKET
u'\\' # 0x5C -> REVERSE SOLIDUS
u']' # 0x5D -> RIGHT SQUARE BRACKET
u'^' # 0x5E -> CIRCUMFLEX ACCENT
u'_' # 0x5F -> LOW LINE
u'`' # 0x60 -> GRAVE ACCENT
u'a' # 0x61 -> LATIN SMALL LETTER A
u'b' # 0x62 -> LATIN SMALL LETTER B
u'c' # 0x63 -> LATIN SMALL LETTER C
u'd' # 0x64 -> LATIN SMALL LETTER D
u'e' # 0x65 -> LATIN SMALL LETTER E
u'f' # 0x66 -> LATIN SMALL LETTER F
u'g' # 0x67 -> LATIN SMALL LETTER G
u'h' # 0x68 -> LATIN SMALL LETTER H
u'i' # 0x69 -> LATIN SMALL LETTER I
u'j' # 0x6A -> LATIN SMALL LETTER J
u'k' # 0x6B -> LATIN SMALL LETTER K
u'l' # 0x6C -> LATIN SMALL LETTER L
u'm' # 0x6D -> LATIN SMALL LETTER M
u'n' # 0x6E -> LATIN SMALL LETTER N
u'o' # 0x6F -> LATIN SMALL LETTER O
u'p' # 0x70 -> LATIN SMALL LETTER P
u'q' # 0x71 -> LATIN SMALL LETTER Q
u'r' # 0x72 -> LATIN SMALL LETTER R
u's' # 0x73 -> LATIN SMALL LETTER S
u't' # 0x74 -> LATIN SMALL LETTER T
u'u' # 0x75 -> LATIN SMALL LETTER U
u'v' # 0x76 -> LATIN SMALL LETTER V
u'w' # 0x77 -> LATIN SMALL LETTER W
u'x' # 0x78 -> LATIN SMALL LETTER X
u'y' # 0x79 -> LATIN SMALL LETTER Y
u'z' # 0x7A -> LATIN SMALL LETTER Z
u'{' # 0x7B -> LEFT CURLY BRACKET
u'|' # 0x7C -> VERTICAL LINE
u'}' # 0x7D -> RIGHT CURLY BRACKET
u'~' # 0x7E -> TILDE
u'\x7f' # 0x7F -> DELETE
u'\u20ac' # 0x80 -> EURO SIGN
u'\ufffe' # 0x81 -> UNDEFINED
u'\u201a' # 0x82 -> SINGLE LOW-9 QUOTATION MARK
u'\u0192' # 0x83 -> LATIN SMALL LETTER F WITH HOOK
u'\u201e' # 0x84 -> DOUBLE LOW-9 QUOTATION MARK
u'\u2026' # 0x85 -> HORIZONTAL ELLIPSIS
u'\u2020' # 0x86 -> DAGGER
u'\u2021' # 0x87 -> DOUBLE DAGGER
u'\ufffe' # 0x88 -> UNDEFINED
u'\u2030' # 0x89 -> PER MILLE SIGN
u'\ufffe' # 0x8A -> UNDEFINED
u'\u2039' # 0x8B -> SINGLE LEFT-POINTING ANGLE QUOTATION MARK
u'\ufffe' # 0x8C -> UNDEFINED
u'\ufffe' # 0x8D -> UNDEFINED
u'\ufffe' # 0x8E -> UNDEFINED
u'\ufffe' # 0x8F -> UNDEFINED
u'\ufffe' # 0x90 -> UNDEFINED
u'\u2018' # 0x91 -> LEFT SINGLE QUOTATION MARK
u'\u2019' # 0x92 -> RIGHT SINGLE QUOTATION MARK
u'\u201c' # 0x93 -> LEFT DOUBLE QUOTATION MARK
u'\u201d' # 0x94 -> RIGHT DOUBLE QUOTATION MARK
u'\u2022' # 0x95 -> BULLET
u'\u2013' # 0x96 -> EN DASH
u'\u2014' # 0x97 -> EM DASH
u'\ufffe' # 0x98 -> UNDEFINED
u'\u2122' # 0x99 -> TRADE MARK SIGN
u'\ufffe' # 0x9A -> UNDEFINED
u'\u203a' # 0x9B -> SINGLE RIGHT-POINTING ANGLE QUOTATION MARK
u'\ufffe' # 0x9C -> UNDEFINED
u'\ufffe' # 0x9D -> UNDEFINED
u'\ufffe' # 0x9E -> UNDEFINED
u'\ufffe' # 0x9F -> UNDEFINED
u'\xa0' # 0xA0 -> NO-BREAK SPACE
u'\u0385' # 0xA1 -> GREEK DIALYTIKA TONOS
u'\u0386' # 0xA2 -> GREEK CAPITAL LETTER ALPHA WITH TONOS
u'\xa3' # 0xA3 -> POUND SIGN
u'\xa4' # 0xA4 -> CURRENCY SIGN
u'\xa5' # 0xA5 -> YEN SIGN
u'\xa6' # 0xA6 -> BROKEN BAR
u'\xa7' # 0xA7 -> SECTION SIGN
u'\xa8' # 0xA8 -> DIAERESIS
u'\xa9' # 0xA9 -> COPYRIGHT SIGN
u'\ufffe' # 0xAA -> UNDEFINED
u'\xab' # 0xAB -> LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
u'\xac' # 0xAC -> NOT SIGN
u'\xad' # 0xAD -> SOFT HYPHEN
u'\xae' # 0xAE -> REGISTERED SIGN
u'\u2015' # 0xAF -> HORIZONTAL BAR
u'\xb0' # 0xB0 -> DEGREE SIGN
u'\xb1' # 0xB1 -> PLUS-MINUS SIGN
u'\xb2' # 0xB2 -> SUPERSCRIPT TWO
u'\xb3' # 0xB3 -> SUPERSCRIPT THREE
u'\u0384' # 0xB4 -> GREEK TONOS
u'\xb5' # 0xB5 -> MICRO SIGN
u'\xb6' # 0xB6 -> PILCROW SIGN
u'\xb7' # 0xB7 -> MIDDLE DOT
u'\u0388' # 0xB8 -> GREEK CAPITAL LETTER EPSILON WITH TONOS
u'\u0389' # 0xB9 -> GREEK CAPITAL LETTER ETA WITH TONOS
u'\u038a' # 0xBA -> GREEK CAPITAL LETTER IOTA WITH TONOS
u'\xbb' # 0xBB -> RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
u'\u038c' # 0xBC -> GREEK CAPITAL LETTER OMICRON WITH TONOS
u'\xbd' # 0xBD -> VULGAR FRACTION ONE HALF
u'\u038e' # 0xBE -> GREEK CAPITAL LETTER UPSILON WITH TONOS
u'\u038f' # 0xBF -> GREEK CAPITAL LETTER OMEGA WITH TONOS
u'\u0390' # 0xC0 -> GREEK SMALL LETTER IOTA WITH DIALYTIKA AND TONOS
u'\u0391' # 0xC1 -> GREEK CAPITAL LETTER ALPHA
u'\u0392' # 0xC2 -> GREEK CAPITAL LETTER BETA
u'\u0393' # 0xC3 -> GREEK CAPITAL LETTER GAMMA
u'\u0394' # 0xC4 -> GREEK CAPITAL LETTER DELTA
u'\u0395' # 0xC5 -> GREEK CAPITAL LETTER EPSILON
u'\u0396' # 0xC6 -> GREEK CAPITAL LETTER ZETA
u'\u0397' # 0xC7 -> GREEK CAPITAL LETTER ETA
u'\u0398' # 0xC8 -> GREEK CAPITAL LETTER THETA
u'\u0399' # 0xC9 -> GREEK CAPITAL LETTER IOTA
u'\u039a' # 0xCA -> GREEK CAPITAL LETTER KAPPA
u'\u039b' # 0xCB -> GREEK CAPITAL LETTER LAMDA
u'\u039c' # 0xCC -> GREEK CAPITAL LETTER MU
u'\u039d' # 0xCD -> GREEK CAPITAL LETTER NU
u'\u039e' # 0xCE -> GREEK CAPITAL LETTER XI
u'\u039f' # 0xCF -> GREEK CAPITAL LETTER OMICRON
u'\u03a0' # 0xD0 -> GREEK CAPITAL LETTER PI
u'\u03a1' # 0xD1 -> GREEK CAPITAL LETTER RHO
u'\ufffe' # 0xD2 -> UNDEFINED
u'\u03a3' # 0xD3 -> GREEK CAPITAL LETTER SIGMA
u'\u03a4' # 0xD4 -> GREEK CAPITAL LETTER TAU
u'\u03a5' # 0xD5 -> GREEK CAPITAL LETTER UPSILON
u'\u03a6' # 0xD6 -> GREEK CAPITAL LETTER PHI
u'\u03a7' # 0xD7 -> GREEK CAPITAL LETTER CHI
u'\u03a8' # 0xD8 -> GREEK CAPITAL LETTER PSI
u'\u03a9' # 0xD9 -> GREEK CAPITAL LETTER OMEGA
u'\u03aa' # 0xDA -> GREEK CAPITAL LETTER IOTA WITH DIALYTIKA
u'\u03ab' # 0xDB -> GREEK CAPITAL LETTER UPSILON WITH DIALYTIKA
u'\u03ac' # 0xDC -> GREEK SMALL LETTER ALPHA WITH TONOS
u'\u03ad' # 0xDD -> GREEK SMALL LETTER EPSILON WITH TONOS
u'\u03ae' # 0xDE -> GREEK SMALL LETTER ETA WITH TONOS
u'\u03af' # 0xDF -> GREEK SMALL LETTER IOTA WITH TONOS
u'\u03b0' # 0xE0 -> GREEK SMALL LETTER UPSILON WITH DIALYTIKA AND TONOS
u'\u03b1' # 0xE1 -> GREEK SMALL LETTER ALPHA
u'\u03b2' # 0xE2 -> GREEK SMALL LETTER BETA
u'\u03b3' # 0xE3 -> GREEK SMALL LETTER GAMMA
u'\u03b4' # 0xE4 -> GREEK SMALL LETTER DELTA
u'\u03b5' # 0xE5 -> GREEK SMALL LETTER EPSILON
u'\u03b6' # 0xE6 -> GREEK SMALL LETTER ZETA
u'\u03b7' # 0xE7 -> GREEK SMALL LETTER ETA
u'\u03b8' # 0xE8 -> GREEK SMALL LETTER THETA
u'\u03b9' # 0xE9 -> GREEK SMALL LETTER IOTA
u'\u03ba' # 0xEA -> GREEK SMALL LETTER KAPPA
u'\u03bb' # 0xEB -> GREEK SMALL LETTER LAMDA
u'\u03bc' # 0xEC -> GREEK SMALL LETTER MU
u'\u03bd' # 0xED -> GREEK SMALL LETTER NU
u'\u03be' # 0xEE -> GREEK SMALL LETTER XI
u'\u03bf' # 0xEF -> GREEK SMALL LETTER OMICRON
u'\u03c0' # 0xF0 -> GREEK SMALL LETTER PI
u'\u03c1' # 0xF1 -> GREEK SMALL LETTER RHO
u'\u03c2' # 0xF2 -> GREEK SMALL LETTER FINAL SIGMA
u'\u03c3' # 0xF3 -> GREEK SMALL LETTER SIGMA
u'\u03c4' # 0xF4 -> GREEK SMALL LETTER TAU
u'\u03c5' # 0xF5 -> GREEK SMALL LETTER UPSILON
u'\u03c6' # 0xF6 -> GREEK SMALL LETTER PHI
u'\u03c7' # 0xF7 -> GREEK SMALL LETTER CHI
u'\u03c8' # 0xF8 -> GREEK SMALL LETTER PSI
u'\u03c9' # 0xF9 -> GREEK SMALL LETTER OMEGA
u'\u03ca' # 0xFA -> GREEK SMALL LETTER IOTA WITH DIALYTIKA
u'\u03cb' # 0xFB -> GREEK SMALL LETTER UPSILON WITH DIALYTIKA
u'\u03cc' # 0xFC -> GREEK SMALL LETTER OMICRON WITH TONOS
u'\u03cd' # 0xFD -> GREEK SMALL LETTER UPSILON WITH TONOS
u'\u03ce' # 0xFE -> GREEK SMALL LETTER OMEGA WITH TONOS
u'\ufffe' # 0xFF -> UNDEFINED
)
### Encoding table
encoding_table=codecs.charmap_build(decoding_table)
| apache-2.0 |
chen0031/rekall | rekall-core/rekall/plugins/windows/gui/clipboard.py | 4 | 6252 | # Rekall Memory Forensics
# Copyright (C) 2007,2008 Volatile Systems
# Copyright (C) 2010,2011,2012 Michael Hale Ligh <michael.ligh@mnin.org>
# Copyright 2013 Google Inc. All Rights Reserved.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or (at
# your option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
from rekall import obj
from rekall.plugins.windows import common
from rekall.plugins.windows.gui import sessions
from rekall.plugins.windows.gui import windowstations
from rekall.plugins.windows.gui import win32k_core
from rekall.plugins.windows.gui import constants
class Clipboard(common.WinProcessFilter):
"""Extract the contents of the windows clipboard"""
__name = "clipboard"
@classmethod
def args(cls, parser):
parser.add_argument("-v", "--verbose", default=False,
type="Boolean",
help="Dump more information")
def __init__(self, verbose=False, **kwargs):
super(Clipboard, self).__init__(**kwargs)
self.verbose = verbose
self.profile = win32k_core.Win32GUIProfile(self.profile)
def calculate(self):
session_plugin = self.session.plugins.sessions()
# Dictionary of MM_SESSION_SPACEs by ID
sessions = dict((int(session.SessionId), session)
for session in session_plugin.session_spaces())
# Dictionary of session USER objects by handle
session_handles = {}
# If various objects cannot be found or associated,
# we'll return none objects
e0 = obj.NoneObject("Unknown tagCLIPDATA")
e1 = obj.NoneObject("Unknown tagWINDOWSTATION")
e2 = obj.NoneObject("Unknown tagCLIP")
# Handle type filter
filters = [lambda x : str(x.bType) == "TYPE_CLIPDATA"]
# Load tagCLIPDATA handles from all sessions
import pdb; pdb.set_trace()
for sid, session in sessions.items():
handles = {}
shared_info = session.find_shared_info()
if not shared_info:
self.session.logging.debug(
"No shared info for session {0}".format(sid))
continue
for handle in shared_info.handles(filters):
handles[int(handle.phead.h)] = handle
session_handles[sid] = handles
# Scan for Each WindowStation
windowstations_plugin = self.session.plugins.wndscan()
for wndsta, station_as in windowstations_plugin.generate_hits():
session = sessions.get(int(wndsta.dwSessionId), None)
# The session is unknown
if not session:
continue
handles = session_handles.get(int(session.SessionId), None)
# No handles in the session
if not handles:
continue
clip_array = wndsta.pClipBase.dereference(vm=station_as)
# The tagCLIP array is empty or the pointer is invalid
if not clip_array:
continue
# Resolve tagCLIPDATA from tagCLIP.hData
for clip in clip_array:
handle = handles.get(int(clip.hData), e0)
# Remove this handle from the list
if handle:
handles.pop(int(clip.hData))
yield session, wndsta, clip, handle
# Any remaining tagCLIPDATA not matched. This allows us
# to still find clipboard data if a window station is not
# found or if pClipData or cNumClipFormats were corrupt
for sid in sessions.keys():
handles = session_handles.get(sid, None)
# No handles in the session
if not handles:
continue
for handle in handles.values():
yield sessions[sid], e1, e2, handle
def render(self, renderer):
renderer.table_header([("Session", "session", "10"),
("WindowStation", "window_station", "12"),
("Format", "format", "18"),
("Handle", "handle", "[addr]"),
("Object", "object", "[addrpad]"),
("Data", "data", "50"),
])
for session, wndsta, clip, handle in self.calculate():
# If no tagCLIP is provided, we do not know the format
if not clip:
fmt = obj.NoneObject("Format unknown")
else:
# Try to get the format name, but failing that, print
# the format number in hex instead.
if clip.fmt.v() in constants.CLIPBOARD_FORMAT_ENUM:
fmt = str(clip.fmt)
else:
fmt = hex(clip.fmt.v())
# Try to get the handle from tagCLIP first, but
# fall back to using _HANDLEENTRY.phead. Note: this can
# be a value like DUMMY_TEXT_HANDLE (1) etc.
handle_value = clip.hData or handle.phead.h
clip_data = ""
if handle and "TEXT" in fmt:
clip_data = handle.reference_object().as_string(fmt)
renderer.table_row(session.SessionId,
wndsta.Name,
fmt,
handle_value,
handle.phead.v(),
clip_data)
# Print an additional hexdump if --verbose is specified
if self.verbose and handle:
hex_dump = handle.reference_object().as_hex()
outfd.write("{0}".format(hex_dump))
| gpl-2.0 |
sekikn/ambari | ambari-server/src/test/resources/TestAmbaryServer.samples/dummy_stack/HIVE/package/scripts/params.py | 4 | 4514 | #!/usr/bin/env python
"""
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from resource_management import *
import status_params
# server configurations
config = Script.get_config()
hive_metastore_user_name = config['configurations']['hive-site']['javax.jdo.option.ConnectionUserName']
hive_server_conf_dir = "/etc/hive/conf.server"
hive_jdbc_connection_url = config['configurations']['hive-site']['javax.jdo.option.ConnectionURL']
hive_metastore_user_passwd = config['configurations']['hive-site']['javax.jdo.option.ConnectionPassword']
#users
hive_user = config['configurations']['global']['hive_user']
hive_lib = '/usr/lib/hive/lib/'
#JDBC driver jar name
hive_jdbc_driver = default('hive_jdbc_driver', 'com.mysql.jdbc.Driver')
if hive_jdbc_driver == "com.mysql.jdbc.Driver":
jdbc_jar_name = "mysql-connector-java.jar"
elif hive_jdbc_driver == "oracle.jdbc.driver.OracleDriver":
jdbc_jar_name = "ojdbc6.jar"
check_db_connection_jar_name = "DBConnectionVerification.jar"
check_db_connection_jar = format("/usr/lib/ambari-agent/{check_db_connection_jar_name}")
#common
hive_metastore_port = config['configurations']['global']['hive_metastore_port']
hive_var_lib = '/var/lib/hive'
hive_server_host = config['clusterHostInfo']['hive_server_host']
hive_url = format("jdbc:hive2://{hive_server_host}:10000")
smokeuser = config['configurations']['global']['smokeuser']
smoke_test_sql = "/tmp/hiveserver2.sql"
smoke_test_path = "/tmp/hiveserver2Smoke.sh"
smoke_user_keytab = config['configurations']['global']['smokeuser_keytab']
security_enabled = config['configurations']['global']['security_enabled']
kinit_path_local = functions.get_kinit_path(default('/configurations/kerberos-env/executable_search_paths', None))
hive_metastore_keytab_path = config['configurations']['hive-site']['hive.metastore.kerberos.keytab.file']
#hive_env
hive_conf_dir = "/etc/hive/conf"
hive_dbroot = config['configurations']['global']['hive_dbroot']
hive_log_dir = config['configurations']['global']['hive_log_dir']
hive_pid_dir = status_params.hive_pid_dir
hive_pid = status_params.hive_pid
#hive-site
hive_database_name = config['configurations']['global']['hive_database_name']
#Starting hiveserver2
start_hiveserver2_script = 'startHiveserver2.sh'
hadoop_home = '/usr'
##Starting metastore
start_metastore_script = 'startMetastore.sh'
hive_metastore_pid = status_params.hive_metastore_pid
java_share_dir = '/usr/share/java'
driver_curl_target = format("{java_share_dir}/{jdbc_jar_name}")
hdfs_user = config['configurations']['global']['hdfs_user']
user_group = config['configurations']['global']['user_group']
artifact_dir = "/tmp/HDP-artifacts/"
target = format("{hive_lib}/{jdbc_jar_name}")
jdk_location = config['ambariLevelParams']['jdk_location']
driver_curl_source = format("{jdk_location}/{jdbc_jar_name}")
start_hiveserver2_path = "/tmp/start_hiveserver2_script"
start_metastore_path = "/tmp/start_metastore_script"
hive_aux_jars_path = config['configurations']['global']['hive_aux_jars_path']
hadoop_heapsize = config['configurations']['global']['hadoop_heapsize']
java64_home = config['ambariLevelParams']['java_home']
##### MYSQL
db_name = config['configurations']['global']['hive_database_name']
mysql_user = "mysql"
mysql_group = 'mysql'
mysql_host = config['clusterHostInfo']['hive_mysql_host']
mysql_adduser_path = "/tmp/addMysqlUser.sh"
########## HCAT
hcat_conf_dir = '/etc/hcatalog/conf'
metastore_port = 9933
hcat_lib = '/usr/lib/hcatalog/share/hcatalog'
hcat_dbroot = hcat_lib
hcat_user = config['configurations']['global']['hcat_user']
webhcat_user = config['configurations']['global']['webhcat_user']
hcat_pid_dir = status_params.hcat_pid_dir
hcat_log_dir = config['configurations']['global']['hcat_log_dir'] #hcat_log_dir
hadoop_conf_dir = '/etc/hadoop/conf'
| apache-2.0 |
core-bitcoin/bitcoin | qa/pull-tester/rpc-tests.py | 3 | 12041 | #!/usr/bin/env python3
# Copyright (c) 2014-2016 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""
Run Regression Test Suite
This module calls down into individual test cases via subprocess. It will
forward all unrecognized arguments onto the individual test scripts, other
than:
- `-extended`: run the "extended" test suite in addition to the basic one.
- `-win`: signal that this is running in a Windows environment, and we
should run the tests.
- `--coverage`: this generates a basic coverage report for the RPC
interface.
For a description of arguments recognized by test scripts, see
`qa/pull-tester/test_framework/test_framework.py:BitcoinTestFramework.main`.
"""
import os
import time
import shutil
import sys
import subprocess
import tempfile
import re
sys.path.append("qa/pull-tester/")
from tests_config import *
BOLD = ("","")
if os.name == 'posix':
# primitive formatting on supported
# terminal via ANSI escape sequences:
BOLD = ('\033[0m', '\033[1m')
RPC_TESTS_DIR = SRCDIR + '/qa/rpc-tests/'
#If imported values are not defined then set to zero (or disabled)
if 'ENABLE_WALLET' not in vars():
ENABLE_WALLET=0
if 'ENABLE_BITCOIND' not in vars():
ENABLE_BITCOIND=0
if 'ENABLE_UTILS' not in vars():
ENABLE_UTILS=0
if 'ENABLE_ZMQ' not in vars():
ENABLE_ZMQ=0
ENABLE_COVERAGE=0
#Create a set to store arguments and create the passon string
opts = set()
passon_args = []
PASSON_REGEX = re.compile("^--")
PARALLEL_REGEX = re.compile('^-parallel=')
print_help = False
run_parallel = 4
for arg in sys.argv[1:]:
if arg == "--help" or arg == "-h" or arg == "-?":
print_help = True
break
if arg == '--coverage':
ENABLE_COVERAGE = 1
elif PASSON_REGEX.match(arg):
passon_args.append(arg)
elif PARALLEL_REGEX.match(arg):
run_parallel = int(arg.split(sep='=', maxsplit=1)[1])
else:
opts.add(arg)
#Set env vars
if "BITCOIND" not in os.environ:
os.environ["BITCOIND"] = BUILDDIR + '/src/bitcoind' + EXEEXT
if EXEEXT == ".exe" and "-win" not in opts:
# https://github.com/bitcoin/bitcoin/commit/d52802551752140cf41f0d9a225a43e84404d3e9
# https://github.com/bitcoin/bitcoin/pull/5677#issuecomment-136646964
print("Win tests currently disabled by default. Use -win option to enable")
sys.exit(0)
if not (ENABLE_WALLET == 1 and ENABLE_UTILS == 1 and ENABLE_BITCOIND == 1):
print("No rpc tests to run. Wallet, utils, and bitcoind must all be enabled")
sys.exit(0)
# python3-zmq may not be installed. Handle this gracefully and with some helpful info
if ENABLE_ZMQ:
try:
import zmq
except ImportError:
print("ERROR: \"import zmq\" failed. Set ENABLE_ZMQ=0 or "
"to run zmq tests, see dependency info in /qa/README.md.")
# ENABLE_ZMQ=0
raise
testScripts = [
# longest test should go first, to favor running tests in parallel
'wallet-hd.py',
'walletbackup.py',
# vv Tests less than 5m vv
'p2p-fullblocktest.py',
'fundrawtransaction.py',
'p2p-compactblocks.py',
'segwit.py',
# vv Tests less than 2m vv
'wallet.py',
'wallet-accounts.py',
'p2p-segwit.py',
'wallet-dump.py',
'listtransactions.py',
# vv Tests less than 60s vv
'sendheaders.py',
'zapwallettxes.py',
'importmulti.py',
'mempool_limit.py',
'merkle_blocks.py',
'receivedby.py',
'abandonconflict.py',
'bip68-112-113-p2p.py',
'rawtransactions.py',
'reindex.py',
# vv Tests less than 30s vv
'mempool_resurrect_test.py',
'txn_doublespend.py --mineblock',
'txn_clone.py',
'getchaintips.py',
'rest.py',
'mempool_spendcoinbase.py',
'mempool_reorg.py',
'httpbasics.py',
'multi_rpc.py',
'proxy_test.py',
'signrawtransactions.py',
'nodehandling.py',
'decodescript.py',
'blockchain.py',
'disablewallet.py',
'keypool.py',
'p2p-mempool.py',
'prioritise_transaction.py',
'invalidblockrequest.py',
'invalidtxrequest.py',
'p2p-versionbits-warning.py',
'preciousblock.py',
'importprunedfunds.py',
'signmessages.py',
'nulldummy.py',
'import-rescan.py',
'bumpfee.py',
'rpcnamedargs.py',
'listsinceblock.py',
'p2p-leaktests.py',
'bip91.py',
'fork-large-block.py',
]
if ENABLE_ZMQ:
testScripts.append('zmq_test.py')
testScriptsExt = [
'pruning.py',
# vv Tests less than 20m vv
'smartfees.py',
# vv Tests less than 5m vv
'maxuploadtarget.py',
'mempool_packages.py',
# vv Tests less than 2m vv
'bip68-sequence.py',
'getblocktemplate_longpoll.py',
'p2p-timeouts.py',
# vv Tests less than 60s vv
'bip9-softforks.py',
'p2p-feefilter.py',
'rpcbind_test.py',
# vv Tests less than 30s vv
'bip65-cltv.py',
'bip65-cltv-p2p.py',
'bipdersig-p2p.py',
'bipdersig.py',
'getblocktemplate_proposals.py',
'txn_doublespend.py',
'txn_clone.py --mineblock',
'forknotify.py',
'invalidateblock.py',
'maxblocksinflight.py',
'p2p-acceptblock.py',
'replace-by-fee.py',
]
def runtests():
test_list = []
if '-extended' in opts:
test_list = testScripts + testScriptsExt
elif len(opts) == 0 or (len(opts) == 1 and "-win" in opts):
test_list = testScripts
else:
for t in testScripts + testScriptsExt:
if t in opts or re.sub(".py$", "", t) in opts:
test_list.append(t)
if print_help:
# Only print help of the first script and exit
subprocess.check_call((RPC_TESTS_DIR + test_list[0]).split() + ['-h'])
sys.exit(0)
coverage = None
if ENABLE_COVERAGE:
coverage = RPCCoverage()
print("Initializing coverage directory at %s\n" % coverage.dir)
flags = ["--srcdir=%s/src" % BUILDDIR] + passon_args
flags.append("--cachedir=%s/qa/cache" % BUILDDIR)
if coverage:
flags.append(coverage.flag)
if len(test_list) > 1 and run_parallel > 1:
# Populate cache
subprocess.check_output([RPC_TESTS_DIR + 'create_cache.py'] + flags)
#Run Tests
max_len_name = len(max(test_list, key=len))
time_sum = 0
time0 = time.time()
job_queue = RPCTestHandler(run_parallel, test_list, flags)
results = BOLD[1] + "%s | %s | %s\n\n" % ("TEST".ljust(max_len_name), "PASSED", "DURATION") + BOLD[0]
all_passed = True
for _ in range(len(test_list)):
(name, stdout, stderr, passed, duration) = job_queue.get_next()
all_passed = all_passed and passed
time_sum += duration
print('\n' + BOLD[1] + name + BOLD[0] + ":")
print('' if passed else stdout + '\n', end='')
print('' if stderr == '' else 'stderr:\n' + stderr + '\n', end='')
results += "%s | %s | %s s\n" % (name.ljust(max_len_name), str(passed).ljust(6), duration)
print("Pass: %s%s%s, Duration: %s s\n" % (BOLD[1], passed, BOLD[0], duration))
results += BOLD[1] + "\n%s | %s | %s s (accumulated)" % ("ALL".ljust(max_len_name), str(all_passed).ljust(6), time_sum) + BOLD[0]
print(results)
print("\nRuntime: %s s" % (int(time.time() - time0)))
if coverage:
coverage.report_rpc_coverage()
print("Cleaning up coverage data")
coverage.cleanup()
sys.exit(not all_passed)
class RPCTestHandler:
"""
Trigger the testscrips passed in via the list.
"""
def __init__(self, num_tests_parallel, test_list=None, flags=None):
assert(num_tests_parallel >= 1)
self.num_jobs = num_tests_parallel
self.test_list = test_list
self.flags = flags
self.num_running = 0
# In case there is a graveyard of zombie bitcoinds, we can apply a
# pseudorandom offset to hopefully jump over them.
# (625 is PORT_RANGE/MAX_NODES)
self.portseed_offset = int(time.time() * 1000) % 625
self.jobs = []
def get_next(self):
while self.num_running < self.num_jobs and self.test_list:
# Add tests
self.num_running += 1
t = self.test_list.pop(0)
port_seed = ["--portseed={}".format(len(self.test_list) + self.portseed_offset)]
log_stdout = tempfile.SpooledTemporaryFile(max_size=2**16)
log_stderr = tempfile.SpooledTemporaryFile(max_size=2**16)
self.jobs.append((t,
time.time(),
subprocess.Popen((RPC_TESTS_DIR + t).split() + self.flags + port_seed,
universal_newlines=True,
stdout=log_stdout,
stderr=log_stderr),
log_stdout,
log_stderr))
if not self.jobs:
raise IndexError('pop from empty list')
while True:
# Return first proc that finishes
time.sleep(.5)
for j in self.jobs:
(name, time0, proc, log_out, log_err) = j
if proc.poll() is not None:
log_out.seek(0), log_err.seek(0)
[stdout, stderr] = [l.read().decode('utf-8') for l in (log_out, log_err)]
log_out.close(), log_err.close()
passed = stderr == "" and proc.returncode == 0
self.num_running -= 1
self.jobs.remove(j)
return name, stdout, stderr, passed, int(time.time() - time0)
print('.', end='', flush=True)
class RPCCoverage(object):
"""
Coverage reporting utilities for pull-tester.
Coverage calculation works by having each test script subprocess write
coverage files into a particular directory. These files contain the RPC
commands invoked during testing, as well as a complete listing of RPC
commands per `bitcoin-cli help` (`rpc_interface.txt`).
After all tests complete, the commands run are combined and diff'd against
the complete list to calculate uncovered RPC commands.
See also: qa/rpc-tests/test_framework/coverage.py
"""
def __init__(self):
self.dir = tempfile.mkdtemp(prefix="coverage")
self.flag = '--coveragedir=%s' % self.dir
def report_rpc_coverage(self):
"""
Print out RPC commands that were unexercised by tests.
"""
uncovered = self._get_uncovered_rpc_commands()
if uncovered:
print("Uncovered RPC commands:")
print("".join((" - %s\n" % i) for i in sorted(uncovered)))
else:
print("All RPC commands covered.")
def cleanup(self):
return shutil.rmtree(self.dir)
def _get_uncovered_rpc_commands(self):
"""
Return a set of currently untested RPC commands.
"""
# This is shared from `qa/rpc-tests/test-framework/coverage.py`
REFERENCE_FILENAME = 'rpc_interface.txt'
COVERAGE_FILE_PREFIX = 'coverage.'
coverage_ref_filename = os.path.join(self.dir, REFERENCE_FILENAME)
coverage_filenames = set()
all_cmds = set()
covered_cmds = set()
if not os.path.isfile(coverage_ref_filename):
raise RuntimeError("No coverage reference found")
with open(coverage_ref_filename, 'r') as f:
all_cmds.update([i.strip() for i in f.readlines()])
for root, dirs, files in os.walk(self.dir):
for filename in files:
if filename.startswith(COVERAGE_FILE_PREFIX):
coverage_filenames.add(os.path.join(root, filename))
for filename in coverage_filenames:
with open(filename, 'r') as f:
covered_cmds.update([i.strip() for i in f.readlines()])
return all_cmds - covered_cmds
if __name__ == '__main__':
runtests()
| mit |
Mistobaan/tensorflow | tensorflow/python/kernel_tests/reader_ops_test.py | 11 | 38883 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for Reader ops from io_ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import gzip
import os
import shutil
import threading
import zlib
import six
from tensorflow.core.protobuf import config_pb2
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors_impl
from tensorflow.python.lib.io import tf_record
from tensorflow.python.ops import data_flow_ops
from tensorflow.python.ops import io_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
from tensorflow.python.training import coordinator
from tensorflow.python.training import input as input_lib
from tensorflow.python.training import queue_runner_impl
from tensorflow.python.util import compat
prefix_path = "tensorflow/core/lib"
# pylint: disable=invalid-name
TFRecordCompressionType = tf_record.TFRecordCompressionType
# pylint: enable=invalid-name
# Edgar Allan Poe's 'Eldorado'
_TEXT = b"""Gaily bedight,
A gallant knight,
In sunshine and in shadow,
Had journeyed long,
Singing a song,
In search of Eldorado.
But he grew old
This knight so bold
And o'er his heart a shadow
Fell as he found
No spot of ground
That looked like Eldorado.
And, as his strength
Failed him at length,
He met a pilgrim shadow
'Shadow,' said he,
'Where can it be
This land of Eldorado?'
'Over the Mountains
Of the Moon'
Down the Valley of the Shadow,
Ride, boldly ride,'
The shade replied,
'If you seek for Eldorado!'
"""
class IdentityReaderTest(test.TestCase):
def _ExpectRead(self, sess, key, value, expected):
k, v = sess.run([key, value])
self.assertAllEqual(expected, k)
self.assertAllEqual(expected, v)
def testOneEpoch(self):
with self.test_session() as sess:
reader = io_ops.IdentityReader("test_reader")
work_completed = reader.num_work_units_completed()
produced = reader.num_records_produced()
queue = data_flow_ops.FIFOQueue(99, [dtypes.string], shapes=())
queued_length = queue.size()
key, value = reader.read(queue)
self.assertAllEqual(0, work_completed.eval())
self.assertAllEqual(0, produced.eval())
self.assertAllEqual(0, queued_length.eval())
queue.enqueue_many([["A", "B", "C"]]).run()
queue.close().run()
self.assertAllEqual(3, queued_length.eval())
self._ExpectRead(sess, key, value, b"A")
self.assertAllEqual(1, produced.eval())
self._ExpectRead(sess, key, value, b"B")
self._ExpectRead(sess, key, value, b"C")
self.assertAllEqual(3, produced.eval())
self.assertAllEqual(0, queued_length.eval())
with self.assertRaisesOpError("is closed and has insufficient elements "
"\\(requested 1, current size 0\\)"):
sess.run([key, value])
self.assertAllEqual(3, work_completed.eval())
self.assertAllEqual(3, produced.eval())
self.assertAllEqual(0, queued_length.eval())
def testMultipleEpochs(self):
with self.test_session() as sess:
reader = io_ops.IdentityReader("test_reader")
queue = data_flow_ops.FIFOQueue(99, [dtypes.string], shapes=())
enqueue = queue.enqueue_many([["DD", "EE"]])
key, value = reader.read(queue)
enqueue.run()
self._ExpectRead(sess, key, value, b"DD")
self._ExpectRead(sess, key, value, b"EE")
enqueue.run()
self._ExpectRead(sess, key, value, b"DD")
self._ExpectRead(sess, key, value, b"EE")
enqueue.run()
self._ExpectRead(sess, key, value, b"DD")
self._ExpectRead(sess, key, value, b"EE")
queue.close().run()
with self.assertRaisesOpError("is closed and has insufficient elements "
"\\(requested 1, current size 0\\)"):
sess.run([key, value])
def testSerializeRestore(self):
with self.test_session() as sess:
reader = io_ops.IdentityReader("test_reader")
produced = reader.num_records_produced()
queue = data_flow_ops.FIFOQueue(99, [dtypes.string], shapes=())
queue.enqueue_many([["X", "Y", "Z"]]).run()
key, value = reader.read(queue)
self._ExpectRead(sess, key, value, b"X")
self.assertAllEqual(1, produced.eval())
state = reader.serialize_state().eval()
self._ExpectRead(sess, key, value, b"Y")
self._ExpectRead(sess, key, value, b"Z")
self.assertAllEqual(3, produced.eval())
queue.enqueue_many([["Y", "Z"]]).run()
queue.close().run()
reader.restore_state(state).run()
self.assertAllEqual(1, produced.eval())
self._ExpectRead(sess, key, value, b"Y")
self._ExpectRead(sess, key, value, b"Z")
with self.assertRaisesOpError("is closed and has insufficient elements "
"\\(requested 1, current size 0\\)"):
sess.run([key, value])
self.assertAllEqual(3, produced.eval())
self.assertEqual(bytes, type(state))
with self.assertRaises(ValueError):
reader.restore_state([])
with self.assertRaises(ValueError):
reader.restore_state([state, state])
with self.assertRaisesOpError(
"Could not parse state for IdentityReader 'test_reader'"):
reader.restore_state(state[1:]).run()
with self.assertRaisesOpError(
"Could not parse state for IdentityReader 'test_reader'"):
reader.restore_state(state[:-1]).run()
with self.assertRaisesOpError(
"Could not parse state for IdentityReader 'test_reader'"):
reader.restore_state(state + b"ExtraJunk").run()
with self.assertRaisesOpError(
"Could not parse state for IdentityReader 'test_reader'"):
reader.restore_state(b"PREFIX" + state).run()
with self.assertRaisesOpError(
"Could not parse state for IdentityReader 'test_reader'"):
reader.restore_state(b"BOGUS" + state[5:]).run()
def testReset(self):
with self.test_session() as sess:
reader = io_ops.IdentityReader("test_reader")
work_completed = reader.num_work_units_completed()
produced = reader.num_records_produced()
queue = data_flow_ops.FIFOQueue(99, [dtypes.string], shapes=())
queued_length = queue.size()
key, value = reader.read(queue)
queue.enqueue_many([["X", "Y", "Z"]]).run()
self._ExpectRead(sess, key, value, b"X")
self.assertLess(0, queued_length.eval())
self.assertAllEqual(1, produced.eval())
self._ExpectRead(sess, key, value, b"Y")
self.assertLess(0, work_completed.eval())
self.assertAllEqual(2, produced.eval())
reader.reset().run()
self.assertAllEqual(0, work_completed.eval())
self.assertAllEqual(0, produced.eval())
self.assertAllEqual(1, queued_length.eval())
self._ExpectRead(sess, key, value, b"Z")
queue.enqueue_many([["K", "L"]]).run()
self._ExpectRead(sess, key, value, b"K")
class WholeFileReaderTest(test.TestCase):
def setUp(self):
super(WholeFileReaderTest, self).setUp()
self._filenames = [
os.path.join(self.get_temp_dir(), "whole_file.%d.txt" % i)
for i in range(3)
]
self._content = [b"One\na\nb\n", b"Two\nC\nD", b"Three x, y, z"]
for fn, c in zip(self._filenames, self._content):
with open(fn, "wb") as h:
h.write(c)
def tearDown(self):
for fn in self._filenames:
os.remove(fn)
super(WholeFileReaderTest, self).tearDown()
def _ExpectRead(self, sess, key, value, index):
k, v = sess.run([key, value])
self.assertAllEqual(compat.as_bytes(self._filenames[index]), k)
self.assertAllEqual(self._content[index], v)
def testOneEpoch(self):
with self.test_session() as sess:
reader = io_ops.WholeFileReader("test_reader")
queue = data_flow_ops.FIFOQueue(99, [dtypes.string], shapes=())
queue.enqueue_many([self._filenames]).run()
queue.close().run()
key, value = reader.read(queue)
self._ExpectRead(sess, key, value, 0)
self._ExpectRead(sess, key, value, 1)
self._ExpectRead(sess, key, value, 2)
with self.assertRaisesOpError("is closed and has insufficient elements "
"\\(requested 1, current size 0\\)"):
sess.run([key, value])
def testInfiniteEpochs(self):
with self.test_session() as sess:
reader = io_ops.WholeFileReader("test_reader")
queue = data_flow_ops.FIFOQueue(99, [dtypes.string], shapes=())
enqueue = queue.enqueue_many([self._filenames])
key, value = reader.read(queue)
enqueue.run()
self._ExpectRead(sess, key, value, 0)
self._ExpectRead(sess, key, value, 1)
enqueue.run()
self._ExpectRead(sess, key, value, 2)
self._ExpectRead(sess, key, value, 0)
self._ExpectRead(sess, key, value, 1)
enqueue.run()
self._ExpectRead(sess, key, value, 2)
self._ExpectRead(sess, key, value, 0)
class TextLineReaderTest(test.TestCase):
def setUp(self):
super(TextLineReaderTest, self).setUp()
self._num_files = 2
self._num_lines = 5
def _LineText(self, f, l):
return compat.as_bytes("%d: %d" % (f, l))
def _CreateFiles(self, crlf=False):
filenames = []
for i in range(self._num_files):
fn = os.path.join(self.get_temp_dir(), "text_line.%d.txt" % i)
filenames.append(fn)
with open(fn, "wb") as f:
for j in range(self._num_lines):
f.write(self._LineText(i, j))
# Always include a newline after the record unless it is
# at the end of the file, in which case we include it sometimes.
if j + 1 != self._num_lines or i == 0:
f.write(b"\r\n" if crlf else b"\n")
return filenames
def _testOneEpoch(self, files):
with self.test_session() as sess:
reader = io_ops.TextLineReader(name="test_reader")
queue = data_flow_ops.FIFOQueue(99, [dtypes.string], shapes=())
key, value = reader.read(queue)
queue.enqueue_many([files]).run()
queue.close().run()
for i in range(self._num_files):
for j in range(self._num_lines):
k, v = sess.run([key, value])
self.assertAllEqual("%s:%d" % (files[i], j + 1), compat.as_text(k))
self.assertAllEqual(self._LineText(i, j), v)
with self.assertRaisesOpError("is closed and has insufficient elements "
"\\(requested 1, current size 0\\)"):
k, v = sess.run([key, value])
def testOneEpochLF(self):
self._testOneEpoch(self._CreateFiles(crlf=False))
def testOneEpochCRLF(self):
self._testOneEpoch(self._CreateFiles(crlf=True))
def testSkipHeaderLines(self):
files = self._CreateFiles()
with self.test_session() as sess:
reader = io_ops.TextLineReader(skip_header_lines=1, name="test_reader")
queue = data_flow_ops.FIFOQueue(99, [dtypes.string], shapes=())
key, value = reader.read(queue)
queue.enqueue_many([files]).run()
queue.close().run()
for i in range(self._num_files):
for j in range(self._num_lines - 1):
k, v = sess.run([key, value])
self.assertAllEqual("%s:%d" % (files[i], j + 2), compat.as_text(k))
self.assertAllEqual(self._LineText(i, j + 1), v)
with self.assertRaisesOpError("is closed and has insufficient elements "
"\\(requested 1, current size 0\\)"):
k, v = sess.run([key, value])
class FixedLengthRecordReaderTest(test.TestCase):
def setUp(self):
super(FixedLengthRecordReaderTest, self).setUp()
self._num_files = 2
self._header_bytes = 5
self._record_bytes = 3
self._footer_bytes = 2
self._hop_bytes = 2
def _Record(self, f, r):
return compat.as_bytes(str(f * 2 + r) * self._record_bytes)
def _OverlappedRecord(self, f, r):
record_str = "".join([
str(i)[0]
for i in range(r * self._hop_bytes,
r * self._hop_bytes + self._record_bytes)
])
return compat.as_bytes(record_str)
# gap_bytes=hop_bytes-record_bytes
def _CreateFiles(self, num_records, gap_bytes):
filenames = []
for i in range(self._num_files):
fn = os.path.join(self.get_temp_dir(), "fixed_length_record.%d.txt" % i)
filenames.append(fn)
with open(fn, "wb") as f:
f.write(b"H" * self._header_bytes)
if num_records > 0:
f.write(self._Record(i, 0))
for j in range(1, num_records):
if gap_bytes > 0:
f.write(b"G" * gap_bytes)
f.write(self._Record(i, j))
f.write(b"F" * self._footer_bytes)
return filenames
def _CreateOverlappedRecordFiles(self, num_overlapped_records):
filenames = []
for i in range(self._num_files):
fn = os.path.join(self.get_temp_dir(),
"fixed_length_overlapped_record.%d.txt" % i)
filenames.append(fn)
with open(fn, "wb") as f:
f.write(b"H" * self._header_bytes)
if num_overlapped_records > 0:
all_records_str = "".join([
str(i)[0]
for i in range(self._record_bytes + self._hop_bytes *
(num_overlapped_records - 1))
])
f.write(compat.as_bytes(all_records_str))
f.write(b"F" * self._footer_bytes)
return filenames
# gap_bytes=hop_bytes-record_bytes
def _CreateGzipFiles(self, num_records, gap_bytes):
filenames = []
for i in range(self._num_files):
fn = os.path.join(self.get_temp_dir(), "fixed_length_record.%d.txt" % i)
filenames.append(fn)
with gzip.GzipFile(fn, "wb") as f:
f.write(b"H" * self._header_bytes)
if num_records > 0:
f.write(self._Record(i, 0))
for j in range(1, num_records):
if gap_bytes > 0:
f.write(b"G" * gap_bytes)
f.write(self._Record(i, j))
f.write(b"F" * self._footer_bytes)
return filenames
# gap_bytes=hop_bytes-record_bytes
def _CreateZlibFiles(self, num_records, gap_bytes):
filenames = []
for i in range(self._num_files):
fn = os.path.join(self.get_temp_dir(), "fixed_length_record.%d.txt" % i)
filenames.append(fn)
with open(fn+".tmp", "wb") as f:
f.write(b"H" * self._header_bytes)
if num_records > 0:
f.write(self._Record(i, 0))
for j in range(1, num_records):
if gap_bytes > 0:
f.write(b"G" * gap_bytes)
f.write(self._Record(i, j))
f.write(b"F" * self._footer_bytes)
with open(fn+".tmp", "rb") as f:
cdata = zlib.compress(f.read())
with open(fn, "wb") as zf:
zf.write(cdata)
return filenames
def _CreateGzipOverlappedRecordFiles(self, num_overlapped_records):
filenames = []
for i in range(self._num_files):
fn = os.path.join(self.get_temp_dir(),
"fixed_length_overlapped_record.%d.txt" % i)
filenames.append(fn)
with gzip.GzipFile(fn, "wb") as f:
f.write(b"H" * self._header_bytes)
if num_overlapped_records > 0:
all_records_str = "".join([
str(i)[0]
for i in range(self._record_bytes + self._hop_bytes *
(num_overlapped_records - 1))
])
f.write(compat.as_bytes(all_records_str))
f.write(b"F" * self._footer_bytes)
return filenames
def _CreateZlibOverlappedRecordFiles(self, num_overlapped_records):
filenames = []
for i in range(self._num_files):
fn = os.path.join(self.get_temp_dir(),
"fixed_length_overlapped_record.%d.txt" % i)
filenames.append(fn)
with open(fn+".tmp", "wb") as f:
f.write(b"H" * self._header_bytes)
if num_overlapped_records > 0:
all_records_str = "".join([
str(i)[0]
for i in range(self._record_bytes + self._hop_bytes *
(num_overlapped_records - 1))
])
f.write(compat.as_bytes(all_records_str))
f.write(b"F" * self._footer_bytes)
with open(fn+".tmp", "rb") as f:
cdata = zlib.compress(f.read())
with open(fn, "wb") as zf:
zf.write(cdata)
return filenames
# gap_bytes=hop_bytes-record_bytes
def _TestOneEpoch(self, files, num_records, gap_bytes, encoding=None):
hop_bytes = 0 if gap_bytes == 0 else self._record_bytes + gap_bytes
with self.test_session() as sess:
reader = io_ops.FixedLengthRecordReader(
header_bytes=self._header_bytes,
record_bytes=self._record_bytes,
footer_bytes=self._footer_bytes,
hop_bytes=hop_bytes,
encoding=encoding,
name="test_reader")
queue = data_flow_ops.FIFOQueue(99, [dtypes.string], shapes=())
key, value = reader.read(queue)
queue.enqueue_many([files]).run()
queue.close().run()
for i in range(self._num_files):
for j in range(num_records):
k, v = sess.run([key, value])
self.assertAllEqual("%s:%d" % (files[i], j), compat.as_text(k))
self.assertAllEqual(self._Record(i, j), v)
with self.assertRaisesOpError("is closed and has insufficient elements "
"\\(requested 1, current size 0\\)"):
k, v = sess.run([key, value])
def _TestOneEpochWithHopBytes(self, files, num_overlapped_records, encoding=None):
with self.test_session() as sess:
reader = io_ops.FixedLengthRecordReader(
header_bytes=self._header_bytes,
record_bytes=self._record_bytes,
footer_bytes=self._footer_bytes,
hop_bytes=self._hop_bytes,
encoding=encoding,
name="test_reader")
queue = data_flow_ops.FIFOQueue(99, [dtypes.string], shapes=())
key, value = reader.read(queue)
queue.enqueue_many([files]).run()
queue.close().run()
for i in range(self._num_files):
for j in range(num_overlapped_records):
k, v = sess.run([key, value])
print(v)
self.assertAllEqual("%s:%d" % (files[i], j), compat.as_text(k))
self.assertAllEqual(self._OverlappedRecord(i, j), v)
with self.assertRaisesOpError("is closed and has insufficient elements "
"\\(requested 1, current size 0\\)"):
k, v = sess.run([key, value])
def testOneEpoch(self):
for num_records in [0, 7]:
# gap_bytes=0: hop_bytes=0
# gap_bytes=1: hop_bytes=record_bytes+1
for gap_bytes in [0, 1]:
files = self._CreateFiles(num_records, gap_bytes)
self._TestOneEpoch(files, num_records, gap_bytes)
def testGzipOneEpoch(self):
for num_records in [0, 7]:
# gap_bytes=0: hop_bytes=0
# gap_bytes=1: hop_bytes=record_bytes+1
for gap_bytes in [0, 1]:
files = self._CreateGzipFiles(num_records, gap_bytes)
self._TestOneEpoch(files, num_records, gap_bytes, encoding="GZIP")
def testZlibOneEpoch(self):
for num_records in [0, 7]:
# gap_bytes=0: hop_bytes=0
# gap_bytes=1: hop_bytes=record_bytes+1
for gap_bytes in [0, 1]:
files = self._CreateZlibFiles(num_records, gap_bytes)
self._TestOneEpoch(files, num_records, gap_bytes, encoding="ZLIB")
def testOneEpochWithHopBytes(self):
for num_overlapped_records in [0, 2]:
files = self._CreateOverlappedRecordFiles(num_overlapped_records)
self._TestOneEpochWithHopBytes(files, num_overlapped_records)
def testGzipOneEpochWithHopBytes(self):
for num_overlapped_records in [0, 2]:
files = self._CreateGzipOverlappedRecordFiles(num_overlapped_records, )
self._TestOneEpochWithHopBytes(files, num_overlapped_records, encoding="GZIP")
def testZlibOneEpochWithHopBytes(self):
for num_overlapped_records in [0, 2]:
files = self._CreateZlibOverlappedRecordFiles(num_overlapped_records)
self._TestOneEpochWithHopBytes(files, num_overlapped_records, encoding="ZLIB")
class TFRecordReaderTest(test.TestCase):
def setUp(self):
super(TFRecordReaderTest, self).setUp()
self._num_files = 2
self._num_records = 7
def _Record(self, f, r):
return compat.as_bytes("Record %d of file %d" % (r, f))
def _CreateFiles(self):
filenames = []
for i in range(self._num_files):
fn = os.path.join(self.get_temp_dir(), "tf_record.%d.txt" % i)
filenames.append(fn)
writer = tf_record.TFRecordWriter(fn)
for j in range(self._num_records):
writer.write(self._Record(i, j))
return filenames
def testOneEpoch(self):
files = self._CreateFiles()
with self.test_session() as sess:
reader = io_ops.TFRecordReader(name="test_reader")
queue = data_flow_ops.FIFOQueue(99, [dtypes.string], shapes=())
key, value = reader.read(queue)
queue.enqueue_many([files]).run()
queue.close().run()
for i in range(self._num_files):
for j in range(self._num_records):
k, v = sess.run([key, value])
self.assertTrue(compat.as_text(k).startswith("%s:" % files[i]))
self.assertAllEqual(self._Record(i, j), v)
with self.assertRaisesOpError("is closed and has insufficient elements "
"\\(requested 1, current size 0\\)"):
k, v = sess.run([key, value])
def testReadUpTo(self):
files = self._CreateFiles()
with self.test_session() as sess:
reader = io_ops.TFRecordReader(name="test_reader")
queue = data_flow_ops.FIFOQueue(99, [dtypes.string], shapes=())
batch_size = 3
key, value = reader.read_up_to(queue, batch_size)
queue.enqueue_many([files]).run()
queue.close().run()
num_k = 0
num_v = 0
while True:
try:
k, v = sess.run([key, value])
# Test reading *up to* batch_size records
self.assertLessEqual(len(k), batch_size)
self.assertLessEqual(len(v), batch_size)
num_k += len(k)
num_v += len(v)
except errors_impl.OutOfRangeError:
break
# Test that we have read everything
self.assertEqual(self._num_files * self._num_records, num_k)
self.assertEqual(self._num_files * self._num_records, num_v)
def testReadZlibFiles(self):
files = self._CreateFiles()
zlib_files = []
for i, fn in enumerate(files):
with open(fn, "rb") as f:
cdata = zlib.compress(f.read())
zfn = os.path.join(self.get_temp_dir(), "tfrecord_%s.z" % i)
with open(zfn, "wb") as f:
f.write(cdata)
zlib_files.append(zfn)
with self.test_session() as sess:
options = tf_record.TFRecordOptions(TFRecordCompressionType.ZLIB)
reader = io_ops.TFRecordReader(name="test_reader", options=options)
queue = data_flow_ops.FIFOQueue(99, [dtypes.string], shapes=())
key, value = reader.read(queue)
queue.enqueue_many([zlib_files]).run()
queue.close().run()
for i in range(self._num_files):
for j in range(self._num_records):
k, v = sess.run([key, value])
self.assertTrue(compat.as_text(k).startswith("%s:" % zlib_files[i]))
self.assertAllEqual(self._Record(i, j), v)
def testReadGzipFiles(self):
files = self._CreateFiles()
gzip_files = []
for i, fn in enumerate(files):
with open(fn, "rb") as f:
cdata = f.read()
zfn = os.path.join(self.get_temp_dir(), "tfrecord_%s.gz" % i)
with gzip.GzipFile(zfn, "wb") as f:
f.write(cdata)
gzip_files.append(zfn)
with self.test_session() as sess:
options = tf_record.TFRecordOptions(TFRecordCompressionType.GZIP)
reader = io_ops.TFRecordReader(name="test_reader", options=options)
queue = data_flow_ops.FIFOQueue(99, [dtypes.string], shapes=())
key, value = reader.read(queue)
queue.enqueue_many([gzip_files]).run()
queue.close().run()
for i in range(self._num_files):
for j in range(self._num_records):
k, v = sess.run([key, value])
self.assertTrue(compat.as_text(k).startswith("%s:" % gzip_files[i]))
self.assertAllEqual(self._Record(i, j), v)
class TFRecordWriterZlibTest(test.TestCase):
def setUp(self):
super(TFRecordWriterZlibTest, self).setUp()
self._num_files = 2
self._num_records = 7
def _Record(self, f, r):
return compat.as_bytes("Record %d of file %d" % (r, f))
def _CreateFiles(self):
filenames = []
for i in range(self._num_files):
fn = os.path.join(self.get_temp_dir(), "tf_record.%d.txt" % i)
filenames.append(fn)
options = tf_record.TFRecordOptions(
compression_type=TFRecordCompressionType.ZLIB)
writer = tf_record.TFRecordWriter(fn, options=options)
for j in range(self._num_records):
writer.write(self._Record(i, j))
writer.close()
del writer
return filenames
def _WriteRecordsToFile(self, records, name="tf_record"):
fn = os.path.join(self.get_temp_dir(), name)
writer = tf_record.TFRecordWriter(fn, options=None)
for r in records:
writer.write(r)
writer.close()
del writer
return fn
def _ZlibCompressFile(self, infile, name="tfrecord.z"):
# zlib compress the file and write compressed contents to file.
with open(infile, "rb") as f:
cdata = zlib.compress(f.read())
zfn = os.path.join(self.get_temp_dir(), name)
with open(zfn, "wb") as f:
f.write(cdata)
return zfn
def testOneEpoch(self):
files = self._CreateFiles()
with self.test_session() as sess:
options = tf_record.TFRecordOptions(
compression_type=TFRecordCompressionType.ZLIB)
reader = io_ops.TFRecordReader(name="test_reader", options=options)
queue = data_flow_ops.FIFOQueue(99, [dtypes.string], shapes=())
key, value = reader.read(queue)
queue.enqueue_many([files]).run()
queue.close().run()
for i in range(self._num_files):
for j in range(self._num_records):
k, v = sess.run([key, value])
self.assertTrue(compat.as_text(k).startswith("%s:" % files[i]))
self.assertAllEqual(self._Record(i, j), v)
with self.assertRaisesOpError("is closed and has insufficient elements "
"\\(requested 1, current size 0\\)"):
k, v = sess.run([key, value])
def testZLibFlushRecord(self):
fn = self._WriteRecordsToFile([b"small record"], "small_record")
with open(fn, "rb") as h:
buff = h.read()
# creating more blocks and trailing blocks shouldn't break reads
compressor = zlib.compressobj(9, zlib.DEFLATED, zlib.MAX_WBITS)
output = b""
for c in buff:
if isinstance(c, int):
c = six.int2byte(c)
output += compressor.compress(c)
output += compressor.flush(zlib.Z_FULL_FLUSH)
output += compressor.flush(zlib.Z_FULL_FLUSH)
output += compressor.flush(zlib.Z_FULL_FLUSH)
output += compressor.flush(zlib.Z_FINISH)
# overwrite the original file with the compressed data
with open(fn, "wb") as h:
h.write(output)
with self.test_session() as sess:
options = tf_record.TFRecordOptions(
compression_type=TFRecordCompressionType.ZLIB)
reader = io_ops.TFRecordReader(name="test_reader", options=options)
queue = data_flow_ops.FIFOQueue(1, [dtypes.string], shapes=())
key, value = reader.read(queue)
queue.enqueue(fn).run()
queue.close().run()
k, v = sess.run([key, value])
self.assertTrue(compat.as_text(k).startswith("%s:" % fn))
self.assertAllEqual(b"small record", v)
def testZlibReadWrite(self):
"""Verify that files produced are zlib compatible."""
original = [b"foo", b"bar"]
fn = self._WriteRecordsToFile(original, "zlib_read_write.tfrecord")
zfn = self._ZlibCompressFile(fn, "zlib_read_write.tfrecord.z")
# read the compressed contents and verify.
actual = []
for r in tf_record.tf_record_iterator(
zfn,
options=tf_record.TFRecordOptions(
tf_record.TFRecordCompressionType.ZLIB)):
actual.append(r)
self.assertEqual(actual, original)
def testZlibReadWriteLarge(self):
"""Verify that writing large contents also works."""
# Make it large (about 5MB)
original = [_TEXT * 10240]
fn = self._WriteRecordsToFile(original, "zlib_read_write_large.tfrecord")
zfn = self._ZlibCompressFile(fn, "zlib_read_write_large.tfrecord.z")
# read the compressed contents and verify.
actual = []
for r in tf_record.tf_record_iterator(
zfn,
options=tf_record.TFRecordOptions(
tf_record.TFRecordCompressionType.ZLIB)):
actual.append(r)
self.assertEqual(actual, original)
def testGzipReadWrite(self):
"""Verify that files produced are gzip compatible."""
original = [b"foo", b"bar"]
fn = self._WriteRecordsToFile(original, "gzip_read_write.tfrecord")
# gzip compress the file and write compressed contents to file.
with open(fn, "rb") as f:
cdata = f.read()
gzfn = os.path.join(self.get_temp_dir(), "tf_record.gz")
with gzip.GzipFile(gzfn, "wb") as f:
f.write(cdata)
actual = []
for r in tf_record.tf_record_iterator(
gzfn, options=tf_record.TFRecordOptions(TFRecordCompressionType.GZIP)):
actual.append(r)
self.assertEqual(actual, original)
class TFRecordIteratorTest(test.TestCase):
def setUp(self):
super(TFRecordIteratorTest, self).setUp()
self._num_records = 7
def _Record(self, r):
return compat.as_bytes("Record %d" % r)
def _WriteCompressedRecordsToFile(
self,
records,
name="tfrecord.z",
compression_type=tf_record.TFRecordCompressionType.ZLIB):
fn = os.path.join(self.get_temp_dir(), name)
options = tf_record.TFRecordOptions(compression_type=compression_type)
writer = tf_record.TFRecordWriter(fn, options=options)
for r in records:
writer.write(r)
writer.close()
del writer
return fn
def _ZlibDecompressFile(self, infile, name="tfrecord", wbits=zlib.MAX_WBITS):
with open(infile, "rb") as f:
cdata = zlib.decompress(f.read(), wbits)
zfn = os.path.join(self.get_temp_dir(), name)
with open(zfn, "wb") as f:
f.write(cdata)
return zfn
def testIterator(self):
fn = self._WriteCompressedRecordsToFile(
[self._Record(i) for i in range(self._num_records)],
"compressed_records")
options = tf_record.TFRecordOptions(
compression_type=TFRecordCompressionType.ZLIB)
reader = tf_record.tf_record_iterator(fn, options)
for i in range(self._num_records):
record = next(reader)
self.assertAllEqual(self._Record(i), record)
with self.assertRaises(StopIteration):
record = next(reader)
def testWriteZlibRead(self):
"""Verify compression with TFRecordWriter is zlib library compatible."""
original = [b"foo", b"bar"]
fn = self._WriteCompressedRecordsToFile(original,
"write_zlib_read.tfrecord.z")
zfn = self._ZlibDecompressFile(fn, "write_zlib_read.tfrecord")
actual = []
for r in tf_record.tf_record_iterator(zfn):
actual.append(r)
self.assertEqual(actual, original)
def testWriteZlibReadLarge(self):
"""Verify compression for large records is zlib library compatible."""
# Make it large (about 5MB)
original = [_TEXT * 10240]
fn = self._WriteCompressedRecordsToFile(original,
"write_zlib_read_large.tfrecord.z")
zfn = self._ZlibDecompressFile(fn, "write_zlib_read_large.tf_record")
actual = []
for r in tf_record.tf_record_iterator(zfn):
actual.append(r)
self.assertEqual(actual, original)
def testWriteGzipRead(self):
original = [b"foo", b"bar"]
fn = self._WriteCompressedRecordsToFile(
original,
"write_gzip_read.tfrecord.gz",
compression_type=TFRecordCompressionType.GZIP)
with gzip.GzipFile(fn, "rb") as f:
cdata = f.read()
zfn = os.path.join(self.get_temp_dir(), "tf_record")
with open(zfn, "wb") as f:
f.write(cdata)
actual = []
for r in tf_record.tf_record_iterator(zfn):
actual.append(r)
self.assertEqual(actual, original)
def testBadFile(self):
"""Verify that tf_record_iterator throws an exception on bad TFRecords."""
fn = os.path.join(self.get_temp_dir(), "bad_file")
with tf_record.TFRecordWriter(fn) as writer:
writer.write(b"123")
fn_truncated = os.path.join(self.get_temp_dir(), "bad_file_truncated")
with open(fn, "rb") as f:
with open(fn_truncated, "wb") as f2:
# DataLossError requires that we've written the header, so this must
# be at least 12 bytes.
f2.write(f.read(14))
with self.assertRaises(errors_impl.DataLossError):
for _ in tf_record.tf_record_iterator(fn_truncated):
pass
class AsyncReaderTest(test.TestCase):
def testNoDeadlockFromQueue(self):
"""Tests that reading does not block main execution threads."""
config = config_pb2.ConfigProto(
inter_op_parallelism_threads=1, intra_op_parallelism_threads=1)
with self.test_session(config=config) as sess:
thread_data_t = collections.namedtuple("thread_data_t",
["thread", "queue", "output"])
thread_data = []
# Create different readers, each with its own queue.
for i in range(3):
queue = data_flow_ops.FIFOQueue(99, [dtypes.string], shapes=())
reader = io_ops.TextLineReader()
_, line = reader.read(queue)
output = []
t = threading.Thread(
target=AsyncReaderTest._RunSessionAndSave,
args=(sess, [line], output))
thread_data.append(thread_data_t(t, queue, output))
# Start all readers. They are all blocked waiting for queue entries.
sess.run(variables.global_variables_initializer())
for d in thread_data:
d.thread.start()
# Unblock the readers.
for i, d in enumerate(reversed(thread_data)):
fname = os.path.join(self.get_temp_dir(), "deadlock.%s.txt" % i)
with open(fname, "wb") as f:
f.write(("file-%s" % i).encode())
d.queue.enqueue_many([[fname]]).run()
d.thread.join()
self.assertEqual([[("file-%s" % i).encode()]], d.output)
@staticmethod
def _RunSessionAndSave(sess, args, output):
output.append(sess.run(args))
class LMDBReaderTest(test.TestCase):
def setUp(self):
super(LMDBReaderTest, self).setUp()
# Copy database out because we need the path to be writable to use locks.
path = os.path.join(prefix_path, "lmdb", "testdata", "data.mdb")
self.db_path = os.path.join(self.get_temp_dir(), "data.mdb")
shutil.copy(path, self.db_path)
def testReadFromFile(self):
with self.test_session() as sess:
reader = io_ops.LMDBReader(name="test_read_from_file")
queue = data_flow_ops.FIFOQueue(99, [dtypes.string], shapes=())
key, value = reader.read(queue)
queue.enqueue([self.db_path]).run()
queue.close().run()
for i in range(10):
k, v = sess.run([key, value])
self.assertAllEqual(compat.as_bytes(k), compat.as_bytes(str(i)))
self.assertAllEqual(
compat.as_bytes(v), compat.as_bytes(str(chr(ord("a") + i))))
with self.assertRaisesOpError("is closed and has insufficient elements "
"\\(requested 1, current size 0\\)"):
k, v = sess.run([key, value])
def testReadFromSameFile(self):
with self.test_session() as sess:
reader1 = io_ops.LMDBReader(name="test_read_from_same_file1")
reader2 = io_ops.LMDBReader(name="test_read_from_same_file2")
filename_queue = input_lib.string_input_producer(
[self.db_path], num_epochs=None)
key1, value1 = reader1.read(filename_queue)
key2, value2 = reader2.read(filename_queue)
coord = coordinator.Coordinator()
threads = queue_runner_impl.start_queue_runners(sess, coord=coord)
for _ in range(3):
for _ in range(10):
k1, v1, k2, v2 = sess.run([key1, value1, key2, value2])
self.assertAllEqual(compat.as_bytes(k1), compat.as_bytes(k2))
self.assertAllEqual(compat.as_bytes(v1), compat.as_bytes(v2))
coord.request_stop()
coord.join(threads)
def testReadFromFolder(self):
with self.test_session() as sess:
reader = io_ops.LMDBReader(name="test_read_from_folder")
queue = data_flow_ops.FIFOQueue(99, [dtypes.string], shapes=())
key, value = reader.read(queue)
queue.enqueue([self.db_path]).run()
queue.close().run()
for i in range(10):
k, v = sess.run([key, value])
self.assertAllEqual(compat.as_bytes(k), compat.as_bytes(str(i)))
self.assertAllEqual(
compat.as_bytes(v), compat.as_bytes(str(chr(ord("a") + i))))
with self.assertRaisesOpError("is closed and has insufficient elements "
"\\(requested 1, current size 0\\)"):
k, v = sess.run([key, value])
def testReadFromFileRepeatedly(self):
with self.test_session() as sess:
reader = io_ops.LMDBReader(name="test_read_from_file_repeated")
filename_queue = input_lib.string_input_producer(
[self.db_path], num_epochs=None)
key, value = reader.read(filename_queue)
coord = coordinator.Coordinator()
threads = queue_runner_impl.start_queue_runners(sess, coord=coord)
# Iterate over the lmdb 3 times.
for _ in range(3):
# Go over all 10 records each time.
for j in range(10):
k, v = sess.run([key, value])
self.assertAllEqual(compat.as_bytes(k), compat.as_bytes(str(j)))
self.assertAllEqual(
compat.as_bytes(v), compat.as_bytes(str(chr(ord("a") + j))))
coord.request_stop()
coord.join(threads)
if __name__ == "__main__":
test.main()
| apache-2.0 |
liangazhou/django-rdp | packages/PyDev/plugins/org.python.pydev.jython_4.4.0.201510052309/Lib/encodings/cp437.py | 593 | 34820 | """ Python Character Mapping Codec cp437 generated from 'VENDORS/MICSFT/PC/CP437.TXT' with gencodec.py.
"""#"
import codecs
### Codec APIs
class Codec(codecs.Codec):
def encode(self,input,errors='strict'):
return codecs.charmap_encode(input,errors,encoding_map)
def decode(self,input,errors='strict'):
return codecs.charmap_decode(input,errors,decoding_table)
class IncrementalEncoder(codecs.IncrementalEncoder):
def encode(self, input, final=False):
return codecs.charmap_encode(input,self.errors,encoding_map)[0]
class IncrementalDecoder(codecs.IncrementalDecoder):
def decode(self, input, final=False):
return codecs.charmap_decode(input,self.errors,decoding_table)[0]
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
### encodings module API
def getregentry():
return codecs.CodecInfo(
name='cp437',
encode=Codec().encode,
decode=Codec().decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
### Decoding Map
decoding_map = codecs.make_identity_dict(range(256))
decoding_map.update({
0x0080: 0x00c7, # LATIN CAPITAL LETTER C WITH CEDILLA
0x0081: 0x00fc, # LATIN SMALL LETTER U WITH DIAERESIS
0x0082: 0x00e9, # LATIN SMALL LETTER E WITH ACUTE
0x0083: 0x00e2, # LATIN SMALL LETTER A WITH CIRCUMFLEX
0x0084: 0x00e4, # LATIN SMALL LETTER A WITH DIAERESIS
0x0085: 0x00e0, # LATIN SMALL LETTER A WITH GRAVE
0x0086: 0x00e5, # LATIN SMALL LETTER A WITH RING ABOVE
0x0087: 0x00e7, # LATIN SMALL LETTER C WITH CEDILLA
0x0088: 0x00ea, # LATIN SMALL LETTER E WITH CIRCUMFLEX
0x0089: 0x00eb, # LATIN SMALL LETTER E WITH DIAERESIS
0x008a: 0x00e8, # LATIN SMALL LETTER E WITH GRAVE
0x008b: 0x00ef, # LATIN SMALL LETTER I WITH DIAERESIS
0x008c: 0x00ee, # LATIN SMALL LETTER I WITH CIRCUMFLEX
0x008d: 0x00ec, # LATIN SMALL LETTER I WITH GRAVE
0x008e: 0x00c4, # LATIN CAPITAL LETTER A WITH DIAERESIS
0x008f: 0x00c5, # LATIN CAPITAL LETTER A WITH RING ABOVE
0x0090: 0x00c9, # LATIN CAPITAL LETTER E WITH ACUTE
0x0091: 0x00e6, # LATIN SMALL LIGATURE AE
0x0092: 0x00c6, # LATIN CAPITAL LIGATURE AE
0x0093: 0x00f4, # LATIN SMALL LETTER O WITH CIRCUMFLEX
0x0094: 0x00f6, # LATIN SMALL LETTER O WITH DIAERESIS
0x0095: 0x00f2, # LATIN SMALL LETTER O WITH GRAVE
0x0096: 0x00fb, # LATIN SMALL LETTER U WITH CIRCUMFLEX
0x0097: 0x00f9, # LATIN SMALL LETTER U WITH GRAVE
0x0098: 0x00ff, # LATIN SMALL LETTER Y WITH DIAERESIS
0x0099: 0x00d6, # LATIN CAPITAL LETTER O WITH DIAERESIS
0x009a: 0x00dc, # LATIN CAPITAL LETTER U WITH DIAERESIS
0x009b: 0x00a2, # CENT SIGN
0x009c: 0x00a3, # POUND SIGN
0x009d: 0x00a5, # YEN SIGN
0x009e: 0x20a7, # PESETA SIGN
0x009f: 0x0192, # LATIN SMALL LETTER F WITH HOOK
0x00a0: 0x00e1, # LATIN SMALL LETTER A WITH ACUTE
0x00a1: 0x00ed, # LATIN SMALL LETTER I WITH ACUTE
0x00a2: 0x00f3, # LATIN SMALL LETTER O WITH ACUTE
0x00a3: 0x00fa, # LATIN SMALL LETTER U WITH ACUTE
0x00a4: 0x00f1, # LATIN SMALL LETTER N WITH TILDE
0x00a5: 0x00d1, # LATIN CAPITAL LETTER N WITH TILDE
0x00a6: 0x00aa, # FEMININE ORDINAL INDICATOR
0x00a7: 0x00ba, # MASCULINE ORDINAL INDICATOR
0x00a8: 0x00bf, # INVERTED QUESTION MARK
0x00a9: 0x2310, # REVERSED NOT SIGN
0x00aa: 0x00ac, # NOT SIGN
0x00ab: 0x00bd, # VULGAR FRACTION ONE HALF
0x00ac: 0x00bc, # VULGAR FRACTION ONE QUARTER
0x00ad: 0x00a1, # INVERTED EXCLAMATION MARK
0x00ae: 0x00ab, # LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
0x00af: 0x00bb, # RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
0x00b0: 0x2591, # LIGHT SHADE
0x00b1: 0x2592, # MEDIUM SHADE
0x00b2: 0x2593, # DARK SHADE
0x00b3: 0x2502, # BOX DRAWINGS LIGHT VERTICAL
0x00b4: 0x2524, # BOX DRAWINGS LIGHT VERTICAL AND LEFT
0x00b5: 0x2561, # BOX DRAWINGS VERTICAL SINGLE AND LEFT DOUBLE
0x00b6: 0x2562, # BOX DRAWINGS VERTICAL DOUBLE AND LEFT SINGLE
0x00b7: 0x2556, # BOX DRAWINGS DOWN DOUBLE AND LEFT SINGLE
0x00b8: 0x2555, # BOX DRAWINGS DOWN SINGLE AND LEFT DOUBLE
0x00b9: 0x2563, # BOX DRAWINGS DOUBLE VERTICAL AND LEFT
0x00ba: 0x2551, # BOX DRAWINGS DOUBLE VERTICAL
0x00bb: 0x2557, # BOX DRAWINGS DOUBLE DOWN AND LEFT
0x00bc: 0x255d, # BOX DRAWINGS DOUBLE UP AND LEFT
0x00bd: 0x255c, # BOX DRAWINGS UP DOUBLE AND LEFT SINGLE
0x00be: 0x255b, # BOX DRAWINGS UP SINGLE AND LEFT DOUBLE
0x00bf: 0x2510, # BOX DRAWINGS LIGHT DOWN AND LEFT
0x00c0: 0x2514, # BOX DRAWINGS LIGHT UP AND RIGHT
0x00c1: 0x2534, # BOX DRAWINGS LIGHT UP AND HORIZONTAL
0x00c2: 0x252c, # BOX DRAWINGS LIGHT DOWN AND HORIZONTAL
0x00c3: 0x251c, # BOX DRAWINGS LIGHT VERTICAL AND RIGHT
0x00c4: 0x2500, # BOX DRAWINGS LIGHT HORIZONTAL
0x00c5: 0x253c, # BOX DRAWINGS LIGHT VERTICAL AND HORIZONTAL
0x00c6: 0x255e, # BOX DRAWINGS VERTICAL SINGLE AND RIGHT DOUBLE
0x00c7: 0x255f, # BOX DRAWINGS VERTICAL DOUBLE AND RIGHT SINGLE
0x00c8: 0x255a, # BOX DRAWINGS DOUBLE UP AND RIGHT
0x00c9: 0x2554, # BOX DRAWINGS DOUBLE DOWN AND RIGHT
0x00ca: 0x2569, # BOX DRAWINGS DOUBLE UP AND HORIZONTAL
0x00cb: 0x2566, # BOX DRAWINGS DOUBLE DOWN AND HORIZONTAL
0x00cc: 0x2560, # BOX DRAWINGS DOUBLE VERTICAL AND RIGHT
0x00cd: 0x2550, # BOX DRAWINGS DOUBLE HORIZONTAL
0x00ce: 0x256c, # BOX DRAWINGS DOUBLE VERTICAL AND HORIZONTAL
0x00cf: 0x2567, # BOX DRAWINGS UP SINGLE AND HORIZONTAL DOUBLE
0x00d0: 0x2568, # BOX DRAWINGS UP DOUBLE AND HORIZONTAL SINGLE
0x00d1: 0x2564, # BOX DRAWINGS DOWN SINGLE AND HORIZONTAL DOUBLE
0x00d2: 0x2565, # BOX DRAWINGS DOWN DOUBLE AND HORIZONTAL SINGLE
0x00d3: 0x2559, # BOX DRAWINGS UP DOUBLE AND RIGHT SINGLE
0x00d4: 0x2558, # BOX DRAWINGS UP SINGLE AND RIGHT DOUBLE
0x00d5: 0x2552, # BOX DRAWINGS DOWN SINGLE AND RIGHT DOUBLE
0x00d6: 0x2553, # BOX DRAWINGS DOWN DOUBLE AND RIGHT SINGLE
0x00d7: 0x256b, # BOX DRAWINGS VERTICAL DOUBLE AND HORIZONTAL SINGLE
0x00d8: 0x256a, # BOX DRAWINGS VERTICAL SINGLE AND HORIZONTAL DOUBLE
0x00d9: 0x2518, # BOX DRAWINGS LIGHT UP AND LEFT
0x00da: 0x250c, # BOX DRAWINGS LIGHT DOWN AND RIGHT
0x00db: 0x2588, # FULL BLOCK
0x00dc: 0x2584, # LOWER HALF BLOCK
0x00dd: 0x258c, # LEFT HALF BLOCK
0x00de: 0x2590, # RIGHT HALF BLOCK
0x00df: 0x2580, # UPPER HALF BLOCK
0x00e0: 0x03b1, # GREEK SMALL LETTER ALPHA
0x00e1: 0x00df, # LATIN SMALL LETTER SHARP S
0x00e2: 0x0393, # GREEK CAPITAL LETTER GAMMA
0x00e3: 0x03c0, # GREEK SMALL LETTER PI
0x00e4: 0x03a3, # GREEK CAPITAL LETTER SIGMA
0x00e5: 0x03c3, # GREEK SMALL LETTER SIGMA
0x00e6: 0x00b5, # MICRO SIGN
0x00e7: 0x03c4, # GREEK SMALL LETTER TAU
0x00e8: 0x03a6, # GREEK CAPITAL LETTER PHI
0x00e9: 0x0398, # GREEK CAPITAL LETTER THETA
0x00ea: 0x03a9, # GREEK CAPITAL LETTER OMEGA
0x00eb: 0x03b4, # GREEK SMALL LETTER DELTA
0x00ec: 0x221e, # INFINITY
0x00ed: 0x03c6, # GREEK SMALL LETTER PHI
0x00ee: 0x03b5, # GREEK SMALL LETTER EPSILON
0x00ef: 0x2229, # INTERSECTION
0x00f0: 0x2261, # IDENTICAL TO
0x00f1: 0x00b1, # PLUS-MINUS SIGN
0x00f2: 0x2265, # GREATER-THAN OR EQUAL TO
0x00f3: 0x2264, # LESS-THAN OR EQUAL TO
0x00f4: 0x2320, # TOP HALF INTEGRAL
0x00f5: 0x2321, # BOTTOM HALF INTEGRAL
0x00f6: 0x00f7, # DIVISION SIGN
0x00f7: 0x2248, # ALMOST EQUAL TO
0x00f8: 0x00b0, # DEGREE SIGN
0x00f9: 0x2219, # BULLET OPERATOR
0x00fa: 0x00b7, # MIDDLE DOT
0x00fb: 0x221a, # SQUARE ROOT
0x00fc: 0x207f, # SUPERSCRIPT LATIN SMALL LETTER N
0x00fd: 0x00b2, # SUPERSCRIPT TWO
0x00fe: 0x25a0, # BLACK SQUARE
0x00ff: 0x00a0, # NO-BREAK SPACE
})
### Decoding Table
decoding_table = (
u'\x00' # 0x0000 -> NULL
u'\x01' # 0x0001 -> START OF HEADING
u'\x02' # 0x0002 -> START OF TEXT
u'\x03' # 0x0003 -> END OF TEXT
u'\x04' # 0x0004 -> END OF TRANSMISSION
u'\x05' # 0x0005 -> ENQUIRY
u'\x06' # 0x0006 -> ACKNOWLEDGE
u'\x07' # 0x0007 -> BELL
u'\x08' # 0x0008 -> BACKSPACE
u'\t' # 0x0009 -> HORIZONTAL TABULATION
u'\n' # 0x000a -> LINE FEED
u'\x0b' # 0x000b -> VERTICAL TABULATION
u'\x0c' # 0x000c -> FORM FEED
u'\r' # 0x000d -> CARRIAGE RETURN
u'\x0e' # 0x000e -> SHIFT OUT
u'\x0f' # 0x000f -> SHIFT IN
u'\x10' # 0x0010 -> DATA LINK ESCAPE
u'\x11' # 0x0011 -> DEVICE CONTROL ONE
u'\x12' # 0x0012 -> DEVICE CONTROL TWO
u'\x13' # 0x0013 -> DEVICE CONTROL THREE
u'\x14' # 0x0014 -> DEVICE CONTROL FOUR
u'\x15' # 0x0015 -> NEGATIVE ACKNOWLEDGE
u'\x16' # 0x0016 -> SYNCHRONOUS IDLE
u'\x17' # 0x0017 -> END OF TRANSMISSION BLOCK
u'\x18' # 0x0018 -> CANCEL
u'\x19' # 0x0019 -> END OF MEDIUM
u'\x1a' # 0x001a -> SUBSTITUTE
u'\x1b' # 0x001b -> ESCAPE
u'\x1c' # 0x001c -> FILE SEPARATOR
u'\x1d' # 0x001d -> GROUP SEPARATOR
u'\x1e' # 0x001e -> RECORD SEPARATOR
u'\x1f' # 0x001f -> UNIT SEPARATOR
u' ' # 0x0020 -> SPACE
u'!' # 0x0021 -> EXCLAMATION MARK
u'"' # 0x0022 -> QUOTATION MARK
u'#' # 0x0023 -> NUMBER SIGN
u'$' # 0x0024 -> DOLLAR SIGN
u'%' # 0x0025 -> PERCENT SIGN
u'&' # 0x0026 -> AMPERSAND
u"'" # 0x0027 -> APOSTROPHE
u'(' # 0x0028 -> LEFT PARENTHESIS
u')' # 0x0029 -> RIGHT PARENTHESIS
u'*' # 0x002a -> ASTERISK
u'+' # 0x002b -> PLUS SIGN
u',' # 0x002c -> COMMA
u'-' # 0x002d -> HYPHEN-MINUS
u'.' # 0x002e -> FULL STOP
u'/' # 0x002f -> SOLIDUS
u'0' # 0x0030 -> DIGIT ZERO
u'1' # 0x0031 -> DIGIT ONE
u'2' # 0x0032 -> DIGIT TWO
u'3' # 0x0033 -> DIGIT THREE
u'4' # 0x0034 -> DIGIT FOUR
u'5' # 0x0035 -> DIGIT FIVE
u'6' # 0x0036 -> DIGIT SIX
u'7' # 0x0037 -> DIGIT SEVEN
u'8' # 0x0038 -> DIGIT EIGHT
u'9' # 0x0039 -> DIGIT NINE
u':' # 0x003a -> COLON
u';' # 0x003b -> SEMICOLON
u'<' # 0x003c -> LESS-THAN SIGN
u'=' # 0x003d -> EQUALS SIGN
u'>' # 0x003e -> GREATER-THAN SIGN
u'?' # 0x003f -> QUESTION MARK
u'@' # 0x0040 -> COMMERCIAL AT
u'A' # 0x0041 -> LATIN CAPITAL LETTER A
u'B' # 0x0042 -> LATIN CAPITAL LETTER B
u'C' # 0x0043 -> LATIN CAPITAL LETTER C
u'D' # 0x0044 -> LATIN CAPITAL LETTER D
u'E' # 0x0045 -> LATIN CAPITAL LETTER E
u'F' # 0x0046 -> LATIN CAPITAL LETTER F
u'G' # 0x0047 -> LATIN CAPITAL LETTER G
u'H' # 0x0048 -> LATIN CAPITAL LETTER H
u'I' # 0x0049 -> LATIN CAPITAL LETTER I
u'J' # 0x004a -> LATIN CAPITAL LETTER J
u'K' # 0x004b -> LATIN CAPITAL LETTER K
u'L' # 0x004c -> LATIN CAPITAL LETTER L
u'M' # 0x004d -> LATIN CAPITAL LETTER M
u'N' # 0x004e -> LATIN CAPITAL LETTER N
u'O' # 0x004f -> LATIN CAPITAL LETTER O
u'P' # 0x0050 -> LATIN CAPITAL LETTER P
u'Q' # 0x0051 -> LATIN CAPITAL LETTER Q
u'R' # 0x0052 -> LATIN CAPITAL LETTER R
u'S' # 0x0053 -> LATIN CAPITAL LETTER S
u'T' # 0x0054 -> LATIN CAPITAL LETTER T
u'U' # 0x0055 -> LATIN CAPITAL LETTER U
u'V' # 0x0056 -> LATIN CAPITAL LETTER V
u'W' # 0x0057 -> LATIN CAPITAL LETTER W
u'X' # 0x0058 -> LATIN CAPITAL LETTER X
u'Y' # 0x0059 -> LATIN CAPITAL LETTER Y
u'Z' # 0x005a -> LATIN CAPITAL LETTER Z
u'[' # 0x005b -> LEFT SQUARE BRACKET
u'\\' # 0x005c -> REVERSE SOLIDUS
u']' # 0x005d -> RIGHT SQUARE BRACKET
u'^' # 0x005e -> CIRCUMFLEX ACCENT
u'_' # 0x005f -> LOW LINE
u'`' # 0x0060 -> GRAVE ACCENT
u'a' # 0x0061 -> LATIN SMALL LETTER A
u'b' # 0x0062 -> LATIN SMALL LETTER B
u'c' # 0x0063 -> LATIN SMALL LETTER C
u'd' # 0x0064 -> LATIN SMALL LETTER D
u'e' # 0x0065 -> LATIN SMALL LETTER E
u'f' # 0x0066 -> LATIN SMALL LETTER F
u'g' # 0x0067 -> LATIN SMALL LETTER G
u'h' # 0x0068 -> LATIN SMALL LETTER H
u'i' # 0x0069 -> LATIN SMALL LETTER I
u'j' # 0x006a -> LATIN SMALL LETTER J
u'k' # 0x006b -> LATIN SMALL LETTER K
u'l' # 0x006c -> LATIN SMALL LETTER L
u'm' # 0x006d -> LATIN SMALL LETTER M
u'n' # 0x006e -> LATIN SMALL LETTER N
u'o' # 0x006f -> LATIN SMALL LETTER O
u'p' # 0x0070 -> LATIN SMALL LETTER P
u'q' # 0x0071 -> LATIN SMALL LETTER Q
u'r' # 0x0072 -> LATIN SMALL LETTER R
u's' # 0x0073 -> LATIN SMALL LETTER S
u't' # 0x0074 -> LATIN SMALL LETTER T
u'u' # 0x0075 -> LATIN SMALL LETTER U
u'v' # 0x0076 -> LATIN SMALL LETTER V
u'w' # 0x0077 -> LATIN SMALL LETTER W
u'x' # 0x0078 -> LATIN SMALL LETTER X
u'y' # 0x0079 -> LATIN SMALL LETTER Y
u'z' # 0x007a -> LATIN SMALL LETTER Z
u'{' # 0x007b -> LEFT CURLY BRACKET
u'|' # 0x007c -> VERTICAL LINE
u'}' # 0x007d -> RIGHT CURLY BRACKET
u'~' # 0x007e -> TILDE
u'\x7f' # 0x007f -> DELETE
u'\xc7' # 0x0080 -> LATIN CAPITAL LETTER C WITH CEDILLA
u'\xfc' # 0x0081 -> LATIN SMALL LETTER U WITH DIAERESIS
u'\xe9' # 0x0082 -> LATIN SMALL LETTER E WITH ACUTE
u'\xe2' # 0x0083 -> LATIN SMALL LETTER A WITH CIRCUMFLEX
u'\xe4' # 0x0084 -> LATIN SMALL LETTER A WITH DIAERESIS
u'\xe0' # 0x0085 -> LATIN SMALL LETTER A WITH GRAVE
u'\xe5' # 0x0086 -> LATIN SMALL LETTER A WITH RING ABOVE
u'\xe7' # 0x0087 -> LATIN SMALL LETTER C WITH CEDILLA
u'\xea' # 0x0088 -> LATIN SMALL LETTER E WITH CIRCUMFLEX
u'\xeb' # 0x0089 -> LATIN SMALL LETTER E WITH DIAERESIS
u'\xe8' # 0x008a -> LATIN SMALL LETTER E WITH GRAVE
u'\xef' # 0x008b -> LATIN SMALL LETTER I WITH DIAERESIS
u'\xee' # 0x008c -> LATIN SMALL LETTER I WITH CIRCUMFLEX
u'\xec' # 0x008d -> LATIN SMALL LETTER I WITH GRAVE
u'\xc4' # 0x008e -> LATIN CAPITAL LETTER A WITH DIAERESIS
u'\xc5' # 0x008f -> LATIN CAPITAL LETTER A WITH RING ABOVE
u'\xc9' # 0x0090 -> LATIN CAPITAL LETTER E WITH ACUTE
u'\xe6' # 0x0091 -> LATIN SMALL LIGATURE AE
u'\xc6' # 0x0092 -> LATIN CAPITAL LIGATURE AE
u'\xf4' # 0x0093 -> LATIN SMALL LETTER O WITH CIRCUMFLEX
u'\xf6' # 0x0094 -> LATIN SMALL LETTER O WITH DIAERESIS
u'\xf2' # 0x0095 -> LATIN SMALL LETTER O WITH GRAVE
u'\xfb' # 0x0096 -> LATIN SMALL LETTER U WITH CIRCUMFLEX
u'\xf9' # 0x0097 -> LATIN SMALL LETTER U WITH GRAVE
u'\xff' # 0x0098 -> LATIN SMALL LETTER Y WITH DIAERESIS
u'\xd6' # 0x0099 -> LATIN CAPITAL LETTER O WITH DIAERESIS
u'\xdc' # 0x009a -> LATIN CAPITAL LETTER U WITH DIAERESIS
u'\xa2' # 0x009b -> CENT SIGN
u'\xa3' # 0x009c -> POUND SIGN
u'\xa5' # 0x009d -> YEN SIGN
u'\u20a7' # 0x009e -> PESETA SIGN
u'\u0192' # 0x009f -> LATIN SMALL LETTER F WITH HOOK
u'\xe1' # 0x00a0 -> LATIN SMALL LETTER A WITH ACUTE
u'\xed' # 0x00a1 -> LATIN SMALL LETTER I WITH ACUTE
u'\xf3' # 0x00a2 -> LATIN SMALL LETTER O WITH ACUTE
u'\xfa' # 0x00a3 -> LATIN SMALL LETTER U WITH ACUTE
u'\xf1' # 0x00a4 -> LATIN SMALL LETTER N WITH TILDE
u'\xd1' # 0x00a5 -> LATIN CAPITAL LETTER N WITH TILDE
u'\xaa' # 0x00a6 -> FEMININE ORDINAL INDICATOR
u'\xba' # 0x00a7 -> MASCULINE ORDINAL INDICATOR
u'\xbf' # 0x00a8 -> INVERTED QUESTION MARK
u'\u2310' # 0x00a9 -> REVERSED NOT SIGN
u'\xac' # 0x00aa -> NOT SIGN
u'\xbd' # 0x00ab -> VULGAR FRACTION ONE HALF
u'\xbc' # 0x00ac -> VULGAR FRACTION ONE QUARTER
u'\xa1' # 0x00ad -> INVERTED EXCLAMATION MARK
u'\xab' # 0x00ae -> LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
u'\xbb' # 0x00af -> RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
u'\u2591' # 0x00b0 -> LIGHT SHADE
u'\u2592' # 0x00b1 -> MEDIUM SHADE
u'\u2593' # 0x00b2 -> DARK SHADE
u'\u2502' # 0x00b3 -> BOX DRAWINGS LIGHT VERTICAL
u'\u2524' # 0x00b4 -> BOX DRAWINGS LIGHT VERTICAL AND LEFT
u'\u2561' # 0x00b5 -> BOX DRAWINGS VERTICAL SINGLE AND LEFT DOUBLE
u'\u2562' # 0x00b6 -> BOX DRAWINGS VERTICAL DOUBLE AND LEFT SINGLE
u'\u2556' # 0x00b7 -> BOX DRAWINGS DOWN DOUBLE AND LEFT SINGLE
u'\u2555' # 0x00b8 -> BOX DRAWINGS DOWN SINGLE AND LEFT DOUBLE
u'\u2563' # 0x00b9 -> BOX DRAWINGS DOUBLE VERTICAL AND LEFT
u'\u2551' # 0x00ba -> BOX DRAWINGS DOUBLE VERTICAL
u'\u2557' # 0x00bb -> BOX DRAWINGS DOUBLE DOWN AND LEFT
u'\u255d' # 0x00bc -> BOX DRAWINGS DOUBLE UP AND LEFT
u'\u255c' # 0x00bd -> BOX DRAWINGS UP DOUBLE AND LEFT SINGLE
u'\u255b' # 0x00be -> BOX DRAWINGS UP SINGLE AND LEFT DOUBLE
u'\u2510' # 0x00bf -> BOX DRAWINGS LIGHT DOWN AND LEFT
u'\u2514' # 0x00c0 -> BOX DRAWINGS LIGHT UP AND RIGHT
u'\u2534' # 0x00c1 -> BOX DRAWINGS LIGHT UP AND HORIZONTAL
u'\u252c' # 0x00c2 -> BOX DRAWINGS LIGHT DOWN AND HORIZONTAL
u'\u251c' # 0x00c3 -> BOX DRAWINGS LIGHT VERTICAL AND RIGHT
u'\u2500' # 0x00c4 -> BOX DRAWINGS LIGHT HORIZONTAL
u'\u253c' # 0x00c5 -> BOX DRAWINGS LIGHT VERTICAL AND HORIZONTAL
u'\u255e' # 0x00c6 -> BOX DRAWINGS VERTICAL SINGLE AND RIGHT DOUBLE
u'\u255f' # 0x00c7 -> BOX DRAWINGS VERTICAL DOUBLE AND RIGHT SINGLE
u'\u255a' # 0x00c8 -> BOX DRAWINGS DOUBLE UP AND RIGHT
u'\u2554' # 0x00c9 -> BOX DRAWINGS DOUBLE DOWN AND RIGHT
u'\u2569' # 0x00ca -> BOX DRAWINGS DOUBLE UP AND HORIZONTAL
u'\u2566' # 0x00cb -> BOX DRAWINGS DOUBLE DOWN AND HORIZONTAL
u'\u2560' # 0x00cc -> BOX DRAWINGS DOUBLE VERTICAL AND RIGHT
u'\u2550' # 0x00cd -> BOX DRAWINGS DOUBLE HORIZONTAL
u'\u256c' # 0x00ce -> BOX DRAWINGS DOUBLE VERTICAL AND HORIZONTAL
u'\u2567' # 0x00cf -> BOX DRAWINGS UP SINGLE AND HORIZONTAL DOUBLE
u'\u2568' # 0x00d0 -> BOX DRAWINGS UP DOUBLE AND HORIZONTAL SINGLE
u'\u2564' # 0x00d1 -> BOX DRAWINGS DOWN SINGLE AND HORIZONTAL DOUBLE
u'\u2565' # 0x00d2 -> BOX DRAWINGS DOWN DOUBLE AND HORIZONTAL SINGLE
u'\u2559' # 0x00d3 -> BOX DRAWINGS UP DOUBLE AND RIGHT SINGLE
u'\u2558' # 0x00d4 -> BOX DRAWINGS UP SINGLE AND RIGHT DOUBLE
u'\u2552' # 0x00d5 -> BOX DRAWINGS DOWN SINGLE AND RIGHT DOUBLE
u'\u2553' # 0x00d6 -> BOX DRAWINGS DOWN DOUBLE AND RIGHT SINGLE
u'\u256b' # 0x00d7 -> BOX DRAWINGS VERTICAL DOUBLE AND HORIZONTAL SINGLE
u'\u256a' # 0x00d8 -> BOX DRAWINGS VERTICAL SINGLE AND HORIZONTAL DOUBLE
u'\u2518' # 0x00d9 -> BOX DRAWINGS LIGHT UP AND LEFT
u'\u250c' # 0x00da -> BOX DRAWINGS LIGHT DOWN AND RIGHT
u'\u2588' # 0x00db -> FULL BLOCK
u'\u2584' # 0x00dc -> LOWER HALF BLOCK
u'\u258c' # 0x00dd -> LEFT HALF BLOCK
u'\u2590' # 0x00de -> RIGHT HALF BLOCK
u'\u2580' # 0x00df -> UPPER HALF BLOCK
u'\u03b1' # 0x00e0 -> GREEK SMALL LETTER ALPHA
u'\xdf' # 0x00e1 -> LATIN SMALL LETTER SHARP S
u'\u0393' # 0x00e2 -> GREEK CAPITAL LETTER GAMMA
u'\u03c0' # 0x00e3 -> GREEK SMALL LETTER PI
u'\u03a3' # 0x00e4 -> GREEK CAPITAL LETTER SIGMA
u'\u03c3' # 0x00e5 -> GREEK SMALL LETTER SIGMA
u'\xb5' # 0x00e6 -> MICRO SIGN
u'\u03c4' # 0x00e7 -> GREEK SMALL LETTER TAU
u'\u03a6' # 0x00e8 -> GREEK CAPITAL LETTER PHI
u'\u0398' # 0x00e9 -> GREEK CAPITAL LETTER THETA
u'\u03a9' # 0x00ea -> GREEK CAPITAL LETTER OMEGA
u'\u03b4' # 0x00eb -> GREEK SMALL LETTER DELTA
u'\u221e' # 0x00ec -> INFINITY
u'\u03c6' # 0x00ed -> GREEK SMALL LETTER PHI
u'\u03b5' # 0x00ee -> GREEK SMALL LETTER EPSILON
u'\u2229' # 0x00ef -> INTERSECTION
u'\u2261' # 0x00f0 -> IDENTICAL TO
u'\xb1' # 0x00f1 -> PLUS-MINUS SIGN
u'\u2265' # 0x00f2 -> GREATER-THAN OR EQUAL TO
u'\u2264' # 0x00f3 -> LESS-THAN OR EQUAL TO
u'\u2320' # 0x00f4 -> TOP HALF INTEGRAL
u'\u2321' # 0x00f5 -> BOTTOM HALF INTEGRAL
u'\xf7' # 0x00f6 -> DIVISION SIGN
u'\u2248' # 0x00f7 -> ALMOST EQUAL TO
u'\xb0' # 0x00f8 -> DEGREE SIGN
u'\u2219' # 0x00f9 -> BULLET OPERATOR
u'\xb7' # 0x00fa -> MIDDLE DOT
u'\u221a' # 0x00fb -> SQUARE ROOT
u'\u207f' # 0x00fc -> SUPERSCRIPT LATIN SMALL LETTER N
u'\xb2' # 0x00fd -> SUPERSCRIPT TWO
u'\u25a0' # 0x00fe -> BLACK SQUARE
u'\xa0' # 0x00ff -> NO-BREAK SPACE
)
### Encoding Map
encoding_map = {
0x0000: 0x0000, # NULL
0x0001: 0x0001, # START OF HEADING
0x0002: 0x0002, # START OF TEXT
0x0003: 0x0003, # END OF TEXT
0x0004: 0x0004, # END OF TRANSMISSION
0x0005: 0x0005, # ENQUIRY
0x0006: 0x0006, # ACKNOWLEDGE
0x0007: 0x0007, # BELL
0x0008: 0x0008, # BACKSPACE
0x0009: 0x0009, # HORIZONTAL TABULATION
0x000a: 0x000a, # LINE FEED
0x000b: 0x000b, # VERTICAL TABULATION
0x000c: 0x000c, # FORM FEED
0x000d: 0x000d, # CARRIAGE RETURN
0x000e: 0x000e, # SHIFT OUT
0x000f: 0x000f, # SHIFT IN
0x0010: 0x0010, # DATA LINK ESCAPE
0x0011: 0x0011, # DEVICE CONTROL ONE
0x0012: 0x0012, # DEVICE CONTROL TWO
0x0013: 0x0013, # DEVICE CONTROL THREE
0x0014: 0x0014, # DEVICE CONTROL FOUR
0x0015: 0x0015, # NEGATIVE ACKNOWLEDGE
0x0016: 0x0016, # SYNCHRONOUS IDLE
0x0017: 0x0017, # END OF TRANSMISSION BLOCK
0x0018: 0x0018, # CANCEL
0x0019: 0x0019, # END OF MEDIUM
0x001a: 0x001a, # SUBSTITUTE
0x001b: 0x001b, # ESCAPE
0x001c: 0x001c, # FILE SEPARATOR
0x001d: 0x001d, # GROUP SEPARATOR
0x001e: 0x001e, # RECORD SEPARATOR
0x001f: 0x001f, # UNIT SEPARATOR
0x0020: 0x0020, # SPACE
0x0021: 0x0021, # EXCLAMATION MARK
0x0022: 0x0022, # QUOTATION MARK
0x0023: 0x0023, # NUMBER SIGN
0x0024: 0x0024, # DOLLAR SIGN
0x0025: 0x0025, # PERCENT SIGN
0x0026: 0x0026, # AMPERSAND
0x0027: 0x0027, # APOSTROPHE
0x0028: 0x0028, # LEFT PARENTHESIS
0x0029: 0x0029, # RIGHT PARENTHESIS
0x002a: 0x002a, # ASTERISK
0x002b: 0x002b, # PLUS SIGN
0x002c: 0x002c, # COMMA
0x002d: 0x002d, # HYPHEN-MINUS
0x002e: 0x002e, # FULL STOP
0x002f: 0x002f, # SOLIDUS
0x0030: 0x0030, # DIGIT ZERO
0x0031: 0x0031, # DIGIT ONE
0x0032: 0x0032, # DIGIT TWO
0x0033: 0x0033, # DIGIT THREE
0x0034: 0x0034, # DIGIT FOUR
0x0035: 0x0035, # DIGIT FIVE
0x0036: 0x0036, # DIGIT SIX
0x0037: 0x0037, # DIGIT SEVEN
0x0038: 0x0038, # DIGIT EIGHT
0x0039: 0x0039, # DIGIT NINE
0x003a: 0x003a, # COLON
0x003b: 0x003b, # SEMICOLON
0x003c: 0x003c, # LESS-THAN SIGN
0x003d: 0x003d, # EQUALS SIGN
0x003e: 0x003e, # GREATER-THAN SIGN
0x003f: 0x003f, # QUESTION MARK
0x0040: 0x0040, # COMMERCIAL AT
0x0041: 0x0041, # LATIN CAPITAL LETTER A
0x0042: 0x0042, # LATIN CAPITAL LETTER B
0x0043: 0x0043, # LATIN CAPITAL LETTER C
0x0044: 0x0044, # LATIN CAPITAL LETTER D
0x0045: 0x0045, # LATIN CAPITAL LETTER E
0x0046: 0x0046, # LATIN CAPITAL LETTER F
0x0047: 0x0047, # LATIN CAPITAL LETTER G
0x0048: 0x0048, # LATIN CAPITAL LETTER H
0x0049: 0x0049, # LATIN CAPITAL LETTER I
0x004a: 0x004a, # LATIN CAPITAL LETTER J
0x004b: 0x004b, # LATIN CAPITAL LETTER K
0x004c: 0x004c, # LATIN CAPITAL LETTER L
0x004d: 0x004d, # LATIN CAPITAL LETTER M
0x004e: 0x004e, # LATIN CAPITAL LETTER N
0x004f: 0x004f, # LATIN CAPITAL LETTER O
0x0050: 0x0050, # LATIN CAPITAL LETTER P
0x0051: 0x0051, # LATIN CAPITAL LETTER Q
0x0052: 0x0052, # LATIN CAPITAL LETTER R
0x0053: 0x0053, # LATIN CAPITAL LETTER S
0x0054: 0x0054, # LATIN CAPITAL LETTER T
0x0055: 0x0055, # LATIN CAPITAL LETTER U
0x0056: 0x0056, # LATIN CAPITAL LETTER V
0x0057: 0x0057, # LATIN CAPITAL LETTER W
0x0058: 0x0058, # LATIN CAPITAL LETTER X
0x0059: 0x0059, # LATIN CAPITAL LETTER Y
0x005a: 0x005a, # LATIN CAPITAL LETTER Z
0x005b: 0x005b, # LEFT SQUARE BRACKET
0x005c: 0x005c, # REVERSE SOLIDUS
0x005d: 0x005d, # RIGHT SQUARE BRACKET
0x005e: 0x005e, # CIRCUMFLEX ACCENT
0x005f: 0x005f, # LOW LINE
0x0060: 0x0060, # GRAVE ACCENT
0x0061: 0x0061, # LATIN SMALL LETTER A
0x0062: 0x0062, # LATIN SMALL LETTER B
0x0063: 0x0063, # LATIN SMALL LETTER C
0x0064: 0x0064, # LATIN SMALL LETTER D
0x0065: 0x0065, # LATIN SMALL LETTER E
0x0066: 0x0066, # LATIN SMALL LETTER F
0x0067: 0x0067, # LATIN SMALL LETTER G
0x0068: 0x0068, # LATIN SMALL LETTER H
0x0069: 0x0069, # LATIN SMALL LETTER I
0x006a: 0x006a, # LATIN SMALL LETTER J
0x006b: 0x006b, # LATIN SMALL LETTER K
0x006c: 0x006c, # LATIN SMALL LETTER L
0x006d: 0x006d, # LATIN SMALL LETTER M
0x006e: 0x006e, # LATIN SMALL LETTER N
0x006f: 0x006f, # LATIN SMALL LETTER O
0x0070: 0x0070, # LATIN SMALL LETTER P
0x0071: 0x0071, # LATIN SMALL LETTER Q
0x0072: 0x0072, # LATIN SMALL LETTER R
0x0073: 0x0073, # LATIN SMALL LETTER S
0x0074: 0x0074, # LATIN SMALL LETTER T
0x0075: 0x0075, # LATIN SMALL LETTER U
0x0076: 0x0076, # LATIN SMALL LETTER V
0x0077: 0x0077, # LATIN SMALL LETTER W
0x0078: 0x0078, # LATIN SMALL LETTER X
0x0079: 0x0079, # LATIN SMALL LETTER Y
0x007a: 0x007a, # LATIN SMALL LETTER Z
0x007b: 0x007b, # LEFT CURLY BRACKET
0x007c: 0x007c, # VERTICAL LINE
0x007d: 0x007d, # RIGHT CURLY BRACKET
0x007e: 0x007e, # TILDE
0x007f: 0x007f, # DELETE
0x00a0: 0x00ff, # NO-BREAK SPACE
0x00a1: 0x00ad, # INVERTED EXCLAMATION MARK
0x00a2: 0x009b, # CENT SIGN
0x00a3: 0x009c, # POUND SIGN
0x00a5: 0x009d, # YEN SIGN
0x00aa: 0x00a6, # FEMININE ORDINAL INDICATOR
0x00ab: 0x00ae, # LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
0x00ac: 0x00aa, # NOT SIGN
0x00b0: 0x00f8, # DEGREE SIGN
0x00b1: 0x00f1, # PLUS-MINUS SIGN
0x00b2: 0x00fd, # SUPERSCRIPT TWO
0x00b5: 0x00e6, # MICRO SIGN
0x00b7: 0x00fa, # MIDDLE DOT
0x00ba: 0x00a7, # MASCULINE ORDINAL INDICATOR
0x00bb: 0x00af, # RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
0x00bc: 0x00ac, # VULGAR FRACTION ONE QUARTER
0x00bd: 0x00ab, # VULGAR FRACTION ONE HALF
0x00bf: 0x00a8, # INVERTED QUESTION MARK
0x00c4: 0x008e, # LATIN CAPITAL LETTER A WITH DIAERESIS
0x00c5: 0x008f, # LATIN CAPITAL LETTER A WITH RING ABOVE
0x00c6: 0x0092, # LATIN CAPITAL LIGATURE AE
0x00c7: 0x0080, # LATIN CAPITAL LETTER C WITH CEDILLA
0x00c9: 0x0090, # LATIN CAPITAL LETTER E WITH ACUTE
0x00d1: 0x00a5, # LATIN CAPITAL LETTER N WITH TILDE
0x00d6: 0x0099, # LATIN CAPITAL LETTER O WITH DIAERESIS
0x00dc: 0x009a, # LATIN CAPITAL LETTER U WITH DIAERESIS
0x00df: 0x00e1, # LATIN SMALL LETTER SHARP S
0x00e0: 0x0085, # LATIN SMALL LETTER A WITH GRAVE
0x00e1: 0x00a0, # LATIN SMALL LETTER A WITH ACUTE
0x00e2: 0x0083, # LATIN SMALL LETTER A WITH CIRCUMFLEX
0x00e4: 0x0084, # LATIN SMALL LETTER A WITH DIAERESIS
0x00e5: 0x0086, # LATIN SMALL LETTER A WITH RING ABOVE
0x00e6: 0x0091, # LATIN SMALL LIGATURE AE
0x00e7: 0x0087, # LATIN SMALL LETTER C WITH CEDILLA
0x00e8: 0x008a, # LATIN SMALL LETTER E WITH GRAVE
0x00e9: 0x0082, # LATIN SMALL LETTER E WITH ACUTE
0x00ea: 0x0088, # LATIN SMALL LETTER E WITH CIRCUMFLEX
0x00eb: 0x0089, # LATIN SMALL LETTER E WITH DIAERESIS
0x00ec: 0x008d, # LATIN SMALL LETTER I WITH GRAVE
0x00ed: 0x00a1, # LATIN SMALL LETTER I WITH ACUTE
0x00ee: 0x008c, # LATIN SMALL LETTER I WITH CIRCUMFLEX
0x00ef: 0x008b, # LATIN SMALL LETTER I WITH DIAERESIS
0x00f1: 0x00a4, # LATIN SMALL LETTER N WITH TILDE
0x00f2: 0x0095, # LATIN SMALL LETTER O WITH GRAVE
0x00f3: 0x00a2, # LATIN SMALL LETTER O WITH ACUTE
0x00f4: 0x0093, # LATIN SMALL LETTER O WITH CIRCUMFLEX
0x00f6: 0x0094, # LATIN SMALL LETTER O WITH DIAERESIS
0x00f7: 0x00f6, # DIVISION SIGN
0x00f9: 0x0097, # LATIN SMALL LETTER U WITH GRAVE
0x00fa: 0x00a3, # LATIN SMALL LETTER U WITH ACUTE
0x00fb: 0x0096, # LATIN SMALL LETTER U WITH CIRCUMFLEX
0x00fc: 0x0081, # LATIN SMALL LETTER U WITH DIAERESIS
0x00ff: 0x0098, # LATIN SMALL LETTER Y WITH DIAERESIS
0x0192: 0x009f, # LATIN SMALL LETTER F WITH HOOK
0x0393: 0x00e2, # GREEK CAPITAL LETTER GAMMA
0x0398: 0x00e9, # GREEK CAPITAL LETTER THETA
0x03a3: 0x00e4, # GREEK CAPITAL LETTER SIGMA
0x03a6: 0x00e8, # GREEK CAPITAL LETTER PHI
0x03a9: 0x00ea, # GREEK CAPITAL LETTER OMEGA
0x03b1: 0x00e0, # GREEK SMALL LETTER ALPHA
0x03b4: 0x00eb, # GREEK SMALL LETTER DELTA
0x03b5: 0x00ee, # GREEK SMALL LETTER EPSILON
0x03c0: 0x00e3, # GREEK SMALL LETTER PI
0x03c3: 0x00e5, # GREEK SMALL LETTER SIGMA
0x03c4: 0x00e7, # GREEK SMALL LETTER TAU
0x03c6: 0x00ed, # GREEK SMALL LETTER PHI
0x207f: 0x00fc, # SUPERSCRIPT LATIN SMALL LETTER N
0x20a7: 0x009e, # PESETA SIGN
0x2219: 0x00f9, # BULLET OPERATOR
0x221a: 0x00fb, # SQUARE ROOT
0x221e: 0x00ec, # INFINITY
0x2229: 0x00ef, # INTERSECTION
0x2248: 0x00f7, # ALMOST EQUAL TO
0x2261: 0x00f0, # IDENTICAL TO
0x2264: 0x00f3, # LESS-THAN OR EQUAL TO
0x2265: 0x00f2, # GREATER-THAN OR EQUAL TO
0x2310: 0x00a9, # REVERSED NOT SIGN
0x2320: 0x00f4, # TOP HALF INTEGRAL
0x2321: 0x00f5, # BOTTOM HALF INTEGRAL
0x2500: 0x00c4, # BOX DRAWINGS LIGHT HORIZONTAL
0x2502: 0x00b3, # BOX DRAWINGS LIGHT VERTICAL
0x250c: 0x00da, # BOX DRAWINGS LIGHT DOWN AND RIGHT
0x2510: 0x00bf, # BOX DRAWINGS LIGHT DOWN AND LEFT
0x2514: 0x00c0, # BOX DRAWINGS LIGHT UP AND RIGHT
0x2518: 0x00d9, # BOX DRAWINGS LIGHT UP AND LEFT
0x251c: 0x00c3, # BOX DRAWINGS LIGHT VERTICAL AND RIGHT
0x2524: 0x00b4, # BOX DRAWINGS LIGHT VERTICAL AND LEFT
0x252c: 0x00c2, # BOX DRAWINGS LIGHT DOWN AND HORIZONTAL
0x2534: 0x00c1, # BOX DRAWINGS LIGHT UP AND HORIZONTAL
0x253c: 0x00c5, # BOX DRAWINGS LIGHT VERTICAL AND HORIZONTAL
0x2550: 0x00cd, # BOX DRAWINGS DOUBLE HORIZONTAL
0x2551: 0x00ba, # BOX DRAWINGS DOUBLE VERTICAL
0x2552: 0x00d5, # BOX DRAWINGS DOWN SINGLE AND RIGHT DOUBLE
0x2553: 0x00d6, # BOX DRAWINGS DOWN DOUBLE AND RIGHT SINGLE
0x2554: 0x00c9, # BOX DRAWINGS DOUBLE DOWN AND RIGHT
0x2555: 0x00b8, # BOX DRAWINGS DOWN SINGLE AND LEFT DOUBLE
0x2556: 0x00b7, # BOX DRAWINGS DOWN DOUBLE AND LEFT SINGLE
0x2557: 0x00bb, # BOX DRAWINGS DOUBLE DOWN AND LEFT
0x2558: 0x00d4, # BOX DRAWINGS UP SINGLE AND RIGHT DOUBLE
0x2559: 0x00d3, # BOX DRAWINGS UP DOUBLE AND RIGHT SINGLE
0x255a: 0x00c8, # BOX DRAWINGS DOUBLE UP AND RIGHT
0x255b: 0x00be, # BOX DRAWINGS UP SINGLE AND LEFT DOUBLE
0x255c: 0x00bd, # BOX DRAWINGS UP DOUBLE AND LEFT SINGLE
0x255d: 0x00bc, # BOX DRAWINGS DOUBLE UP AND LEFT
0x255e: 0x00c6, # BOX DRAWINGS VERTICAL SINGLE AND RIGHT DOUBLE
0x255f: 0x00c7, # BOX DRAWINGS VERTICAL DOUBLE AND RIGHT SINGLE
0x2560: 0x00cc, # BOX DRAWINGS DOUBLE VERTICAL AND RIGHT
0x2561: 0x00b5, # BOX DRAWINGS VERTICAL SINGLE AND LEFT DOUBLE
0x2562: 0x00b6, # BOX DRAWINGS VERTICAL DOUBLE AND LEFT SINGLE
0x2563: 0x00b9, # BOX DRAWINGS DOUBLE VERTICAL AND LEFT
0x2564: 0x00d1, # BOX DRAWINGS DOWN SINGLE AND HORIZONTAL DOUBLE
0x2565: 0x00d2, # BOX DRAWINGS DOWN DOUBLE AND HORIZONTAL SINGLE
0x2566: 0x00cb, # BOX DRAWINGS DOUBLE DOWN AND HORIZONTAL
0x2567: 0x00cf, # BOX DRAWINGS UP SINGLE AND HORIZONTAL DOUBLE
0x2568: 0x00d0, # BOX DRAWINGS UP DOUBLE AND HORIZONTAL SINGLE
0x2569: 0x00ca, # BOX DRAWINGS DOUBLE UP AND HORIZONTAL
0x256a: 0x00d8, # BOX DRAWINGS VERTICAL SINGLE AND HORIZONTAL DOUBLE
0x256b: 0x00d7, # BOX DRAWINGS VERTICAL DOUBLE AND HORIZONTAL SINGLE
0x256c: 0x00ce, # BOX DRAWINGS DOUBLE VERTICAL AND HORIZONTAL
0x2580: 0x00df, # UPPER HALF BLOCK
0x2584: 0x00dc, # LOWER HALF BLOCK
0x2588: 0x00db, # FULL BLOCK
0x258c: 0x00dd, # LEFT HALF BLOCK
0x2590: 0x00de, # RIGHT HALF BLOCK
0x2591: 0x00b0, # LIGHT SHADE
0x2592: 0x00b1, # MEDIUM SHADE
0x2593: 0x00b2, # DARK SHADE
0x25a0: 0x00fe, # BLACK SQUARE
}
| apache-2.0 |
qinjian623/emacs-config | .python-environments/default/lib/python2.7/site-packages/pkg_resources.py | 134 | 99605 | """
Package resource API
--------------------
A resource is a logical file contained within a package, or a logical
subdirectory thereof. The package resource API expects resource names
to have their path parts separated with ``/``, *not* whatever the local
path separator is. Do not use os.path operations to manipulate resource
names being passed into the API.
The package resource API is designed to work with normal filesystem packages,
.egg files, and unpacked .egg files. It can also work in a limited way with
.zip files and with custom PEP 302 loaders that support the ``get_data()``
method.
"""
import sys
import os
import time
import re
import imp
import zipfile
import zipimport
import warnings
import stat
import functools
import pkgutil
import token
import symbol
import operator
import platform
from pkgutil import get_importer
try:
from urlparse import urlparse, urlunparse
except ImportError:
from urllib.parse import urlparse, urlunparse
try:
frozenset
except NameError:
from sets import ImmutableSet as frozenset
try:
basestring
next = lambda o: o.next()
from cStringIO import StringIO as BytesIO
except NameError:
basestring = str
from io import BytesIO
def execfile(fn, globs=None, locs=None):
if globs is None:
globs = globals()
if locs is None:
locs = globs
exec(compile(open(fn).read(), fn, 'exec'), globs, locs)
# capture these to bypass sandboxing
from os import utime
try:
from os import mkdir, rename, unlink
WRITE_SUPPORT = True
except ImportError:
# no write support, probably under GAE
WRITE_SUPPORT = False
from os import open as os_open
from os.path import isdir, split
# Avoid try/except due to potential problems with delayed import mechanisms.
if sys.version_info >= (3, 3) and sys.implementation.name == "cpython":
import importlib._bootstrap as importlib_bootstrap
else:
importlib_bootstrap = None
try:
import parser
except ImportError:
pass
def _bypass_ensure_directory(name, mode=0x1FF): # 0777
# Sandbox-bypassing version of ensure_directory()
if not WRITE_SUPPORT:
raise IOError('"os.mkdir" not supported on this platform.')
dirname, filename = split(name)
if dirname and filename and not isdir(dirname):
_bypass_ensure_directory(dirname)
mkdir(dirname, mode)
_state_vars = {}
def _declare_state(vartype, **kw):
g = globals()
for name, val in kw.items():
g[name] = val
_state_vars[name] = vartype
def __getstate__():
state = {}
g = globals()
for k, v in _state_vars.items():
state[k] = g['_sget_'+v](g[k])
return state
def __setstate__(state):
g = globals()
for k, v in state.items():
g['_sset_'+_state_vars[k]](k, g[k], v)
return state
def _sget_dict(val):
return val.copy()
def _sset_dict(key, ob, state):
ob.clear()
ob.update(state)
def _sget_object(val):
return val.__getstate__()
def _sset_object(key, ob, state):
ob.__setstate__(state)
_sget_none = _sset_none = lambda *args: None
def get_supported_platform():
"""Return this platform's maximum compatible version.
distutils.util.get_platform() normally reports the minimum version
of Mac OS X that would be required to *use* extensions produced by
distutils. But what we want when checking compatibility is to know the
version of Mac OS X that we are *running*. To allow usage of packages that
explicitly require a newer version of Mac OS X, we must also know the
current version of the OS.
If this condition occurs for any other platform with a version in its
platform strings, this function should be extended accordingly.
"""
plat = get_build_platform()
m = macosVersionString.match(plat)
if m is not None and sys.platform == "darwin":
try:
plat = 'macosx-%s-%s' % ('.'.join(_macosx_vers()[:2]), m.group(3))
except ValueError:
pass # not Mac OS X
return plat
__all__ = [
# Basic resource access and distribution/entry point discovery
'require', 'run_script', 'get_provider', 'get_distribution',
'load_entry_point', 'get_entry_map', 'get_entry_info', 'iter_entry_points',
'resource_string', 'resource_stream', 'resource_filename',
'resource_listdir', 'resource_exists', 'resource_isdir',
# Environmental control
'declare_namespace', 'working_set', 'add_activation_listener',
'find_distributions', 'set_extraction_path', 'cleanup_resources',
'get_default_cache',
# Primary implementation classes
'Environment', 'WorkingSet', 'ResourceManager',
'Distribution', 'Requirement', 'EntryPoint',
# Exceptions
'ResolutionError','VersionConflict','DistributionNotFound','UnknownExtra',
'ExtractionError',
# Parsing functions and string utilities
'parse_requirements', 'parse_version', 'safe_name', 'safe_version',
'get_platform', 'compatible_platforms', 'yield_lines', 'split_sections',
'safe_extra', 'to_filename', 'invalid_marker', 'evaluate_marker',
# filesystem utilities
'ensure_directory', 'normalize_path',
# Distribution "precedence" constants
'EGG_DIST', 'BINARY_DIST', 'SOURCE_DIST', 'CHECKOUT_DIST', 'DEVELOP_DIST',
# "Provider" interfaces, implementations, and registration/lookup APIs
'IMetadataProvider', 'IResourceProvider', 'FileMetadata',
'PathMetadata', 'EggMetadata', 'EmptyProvider', 'empty_provider',
'NullProvider', 'EggProvider', 'DefaultProvider', 'ZipProvider',
'register_finder', 'register_namespace_handler', 'register_loader_type',
'fixup_namespace_packages', 'get_importer',
# Deprecated/backward compatibility only
'run_main', 'AvailableDistributions',
]
class ResolutionError(Exception):
"""Abstract base for dependency resolution errors"""
def __repr__(self):
return self.__class__.__name__+repr(self.args)
class VersionConflict(ResolutionError):
"""An already-installed version conflicts with the requested version"""
class DistributionNotFound(ResolutionError):
"""A requested distribution was not found"""
class UnknownExtra(ResolutionError):
"""Distribution doesn't have an "extra feature" of the given name"""
_provider_factories = {}
PY_MAJOR = sys.version[:3]
EGG_DIST = 3
BINARY_DIST = 2
SOURCE_DIST = 1
CHECKOUT_DIST = 0
DEVELOP_DIST = -1
def register_loader_type(loader_type, provider_factory):
"""Register `provider_factory` to make providers for `loader_type`
`loader_type` is the type or class of a PEP 302 ``module.__loader__``,
and `provider_factory` is a function that, passed a *module* object,
returns an ``IResourceProvider`` for that module.
"""
_provider_factories[loader_type] = provider_factory
def get_provider(moduleOrReq):
"""Return an IResourceProvider for the named module or requirement"""
if isinstance(moduleOrReq,Requirement):
return working_set.find(moduleOrReq) or require(str(moduleOrReq))[0]
try:
module = sys.modules[moduleOrReq]
except KeyError:
__import__(moduleOrReq)
module = sys.modules[moduleOrReq]
loader = getattr(module, '__loader__', None)
return _find_adapter(_provider_factories, loader)(module)
def _macosx_vers(_cache=[]):
if not _cache:
import platform
version = platform.mac_ver()[0]
# fallback for MacPorts
if version == '':
import plistlib
plist = '/System/Library/CoreServices/SystemVersion.plist'
if os.path.exists(plist):
if hasattr(plistlib, 'readPlist'):
plist_content = plistlib.readPlist(plist)
if 'ProductVersion' in plist_content:
version = plist_content['ProductVersion']
_cache.append(version.split('.'))
return _cache[0]
def _macosx_arch(machine):
return {'PowerPC':'ppc', 'Power_Macintosh':'ppc'}.get(machine,machine)
def get_build_platform():
"""Return this platform's string for platform-specific distributions
XXX Currently this is the same as ``distutils.util.get_platform()``, but it
needs some hacks for Linux and Mac OS X.
"""
try:
# Python 2.7 or >=3.2
from sysconfig import get_platform
except ImportError:
from distutils.util import get_platform
plat = get_platform()
if sys.platform == "darwin" and not plat.startswith('macosx-'):
try:
version = _macosx_vers()
machine = os.uname()[4].replace(" ", "_")
return "macosx-%d.%d-%s" % (int(version[0]), int(version[1]),
_macosx_arch(machine))
except ValueError:
# if someone is running a non-Mac darwin system, this will fall
# through to the default implementation
pass
return plat
macosVersionString = re.compile(r"macosx-(\d+)\.(\d+)-(.*)")
darwinVersionString = re.compile(r"darwin-(\d+)\.(\d+)\.(\d+)-(.*)")
get_platform = get_build_platform # XXX backward compat
def compatible_platforms(provided,required):
"""Can code for the `provided` platform run on the `required` platform?
Returns true if either platform is ``None``, or the platforms are equal.
XXX Needs compatibility checks for Linux and other unixy OSes.
"""
if provided is None or required is None or provided==required:
return True # easy case
# Mac OS X special cases
reqMac = macosVersionString.match(required)
if reqMac:
provMac = macosVersionString.match(provided)
# is this a Mac package?
if not provMac:
# this is backwards compatibility for packages built before
# setuptools 0.6. All packages built after this point will
# use the new macosx designation.
provDarwin = darwinVersionString.match(provided)
if provDarwin:
dversion = int(provDarwin.group(1))
macosversion = "%s.%s" % (reqMac.group(1), reqMac.group(2))
if dversion == 7 and macosversion >= "10.3" or \
dversion == 8 and macosversion >= "10.4":
#import warnings
#warnings.warn("Mac eggs should be rebuilt to "
# "use the macosx designation instead of darwin.",
# category=DeprecationWarning)
return True
return False # egg isn't macosx or legacy darwin
# are they the same major version and machine type?
if provMac.group(1) != reqMac.group(1) or \
provMac.group(3) != reqMac.group(3):
return False
# is the required OS major update >= the provided one?
if int(provMac.group(2)) > int(reqMac.group(2)):
return False
return True
# XXX Linux and other platforms' special cases should go here
return False
def run_script(dist_spec, script_name):
"""Locate distribution `dist_spec` and run its `script_name` script"""
ns = sys._getframe(1).f_globals
name = ns['__name__']
ns.clear()
ns['__name__'] = name
require(dist_spec)[0].run_script(script_name, ns)
run_main = run_script # backward compatibility
def get_distribution(dist):
"""Return a current distribution object for a Requirement or string"""
if isinstance(dist,basestring): dist = Requirement.parse(dist)
if isinstance(dist,Requirement): dist = get_provider(dist)
if not isinstance(dist,Distribution):
raise TypeError("Expected string, Requirement, or Distribution", dist)
return dist
def load_entry_point(dist, group, name):
"""Return `name` entry point of `group` for `dist` or raise ImportError"""
return get_distribution(dist).load_entry_point(group, name)
def get_entry_map(dist, group=None):
"""Return the entry point map for `group`, or the full entry map"""
return get_distribution(dist).get_entry_map(group)
def get_entry_info(dist, group, name):
"""Return the EntryPoint object for `group`+`name`, or ``None``"""
return get_distribution(dist).get_entry_info(group, name)
class IMetadataProvider:
def has_metadata(name):
"""Does the package's distribution contain the named metadata?"""
def get_metadata(name):
"""The named metadata resource as a string"""
def get_metadata_lines(name):
"""Yield named metadata resource as list of non-blank non-comment lines
Leading and trailing whitespace is stripped from each line, and lines
with ``#`` as the first non-blank character are omitted."""
def metadata_isdir(name):
"""Is the named metadata a directory? (like ``os.path.isdir()``)"""
def metadata_listdir(name):
"""List of metadata names in the directory (like ``os.listdir()``)"""
def run_script(script_name, namespace):
"""Execute the named script in the supplied namespace dictionary"""
class IResourceProvider(IMetadataProvider):
"""An object that provides access to package resources"""
def get_resource_filename(manager, resource_name):
"""Return a true filesystem path for `resource_name`
`manager` must be an ``IResourceManager``"""
def get_resource_stream(manager, resource_name):
"""Return a readable file-like object for `resource_name`
`manager` must be an ``IResourceManager``"""
def get_resource_string(manager, resource_name):
"""Return a string containing the contents of `resource_name`
`manager` must be an ``IResourceManager``"""
def has_resource(resource_name):
"""Does the package contain the named resource?"""
def resource_isdir(resource_name):
"""Is the named resource a directory? (like ``os.path.isdir()``)"""
def resource_listdir(resource_name):
"""List of resource names in the directory (like ``os.listdir()``)"""
class WorkingSet(object):
"""A collection of active distributions on sys.path (or a similar list)"""
def __init__(self, entries=None):
"""Create working set from list of path entries (default=sys.path)"""
self.entries = []
self.entry_keys = {}
self.by_key = {}
self.callbacks = []
if entries is None:
entries = sys.path
for entry in entries:
self.add_entry(entry)
def add_entry(self, entry):
"""Add a path item to ``.entries``, finding any distributions on it
``find_distributions(entry, True)`` is used to find distributions
corresponding to the path entry, and they are added. `entry` is
always appended to ``.entries``, even if it is already present.
(This is because ``sys.path`` can contain the same value more than
once, and the ``.entries`` of the ``sys.path`` WorkingSet should always
equal ``sys.path``.)
"""
self.entry_keys.setdefault(entry, [])
self.entries.append(entry)
for dist in find_distributions(entry, True):
self.add(dist, entry, False)
def __contains__(self,dist):
"""True if `dist` is the active distribution for its project"""
return self.by_key.get(dist.key) == dist
def find(self, req):
"""Find a distribution matching requirement `req`
If there is an active distribution for the requested project, this
returns it as long as it meets the version requirement specified by
`req`. But, if there is an active distribution for the project and it
does *not* meet the `req` requirement, ``VersionConflict`` is raised.
If there is no active distribution for the requested project, ``None``
is returned.
"""
dist = self.by_key.get(req.key)
if dist is not None and dist not in req:
raise VersionConflict(dist,req) # XXX add more info
else:
return dist
def iter_entry_points(self, group, name=None):
"""Yield entry point objects from `group` matching `name`
If `name` is None, yields all entry points in `group` from all
distributions in the working set, otherwise only ones matching
both `group` and `name` are yielded (in distribution order).
"""
for dist in self:
entries = dist.get_entry_map(group)
if name is None:
for ep in entries.values():
yield ep
elif name in entries:
yield entries[name]
def run_script(self, requires, script_name):
"""Locate distribution for `requires` and run `script_name` script"""
ns = sys._getframe(1).f_globals
name = ns['__name__']
ns.clear()
ns['__name__'] = name
self.require(requires)[0].run_script(script_name, ns)
def __iter__(self):
"""Yield distributions for non-duplicate projects in the working set
The yield order is the order in which the items' path entries were
added to the working set.
"""
seen = {}
for item in self.entries:
if item not in self.entry_keys:
# workaround a cache issue
continue
for key in self.entry_keys[item]:
if key not in seen:
seen[key]=1
yield self.by_key[key]
def add(self, dist, entry=None, insert=True, replace=False):
"""Add `dist` to working set, associated with `entry`
If `entry` is unspecified, it defaults to the ``.location`` of `dist`.
On exit from this routine, `entry` is added to the end of the working
set's ``.entries`` (if it wasn't already present).
`dist` is only added to the working set if it's for a project that
doesn't already have a distribution in the set, unless `replace=True`.
If it's added, any callbacks registered with the ``subscribe()`` method
will be called.
"""
if insert:
dist.insert_on(self.entries, entry)
if entry is None:
entry = dist.location
keys = self.entry_keys.setdefault(entry,[])
keys2 = self.entry_keys.setdefault(dist.location,[])
if not replace and dist.key in self.by_key:
return # ignore hidden distros
self.by_key[dist.key] = dist
if dist.key not in keys:
keys.append(dist.key)
if dist.key not in keys2:
keys2.append(dist.key)
self._added_new(dist)
def resolve(self, requirements, env=None, installer=None,
replace_conflicting=False):
"""List all distributions needed to (recursively) meet `requirements`
`requirements` must be a sequence of ``Requirement`` objects. `env`,
if supplied, should be an ``Environment`` instance. If
not supplied, it defaults to all distributions available within any
entry or distribution in the working set. `installer`, if supplied,
will be invoked with each requirement that cannot be met by an
already-installed distribution; it should return a ``Distribution`` or
``None``.
Unless `replace_conflicting=True`, raises a VersionConflict exception if
any requirements are found on the path that have the correct name but
the wrong version. Otherwise, if an `installer` is supplied it will be
invoked to obtain the correct version of the requirement and activate
it.
"""
requirements = list(requirements)[::-1] # set up the stack
processed = {} # set of processed requirements
best = {} # key -> dist
to_activate = []
while requirements:
req = requirements.pop(0) # process dependencies breadth-first
if req in processed:
# Ignore cyclic or redundant dependencies
continue
dist = best.get(req.key)
if dist is None:
# Find the best distribution and add it to the map
dist = self.by_key.get(req.key)
if dist is None or (dist not in req and replace_conflicting):
ws = self
if env is None:
if dist is None:
env = Environment(self.entries)
else:
# Use an empty environment and workingset to avoid
# any further conflicts with the conflicting
# distribution
env = Environment([])
ws = WorkingSet([])
dist = best[req.key] = env.best_match(req, ws, installer)
if dist is None:
#msg = ("The '%s' distribution was not found on this "
# "system, and is required by this application.")
#raise DistributionNotFound(msg % req)
# unfortunately, zc.buildout uses a str(err)
# to get the name of the distribution here..
raise DistributionNotFound(req)
to_activate.append(dist)
if dist not in req:
# Oops, the "best" so far conflicts with a dependency
raise VersionConflict(dist,req) # XXX put more info here
requirements.extend(dist.requires(req.extras)[::-1])
processed[req] = True
return to_activate # return list of distros to activate
def find_plugins(self, plugin_env, full_env=None, installer=None,
fallback=True):
"""Find all activatable distributions in `plugin_env`
Example usage::
distributions, errors = working_set.find_plugins(
Environment(plugin_dirlist)
)
map(working_set.add, distributions) # add plugins+libs to sys.path
print 'Could not load', errors # display errors
The `plugin_env` should be an ``Environment`` instance that contains
only distributions that are in the project's "plugin directory" or
directories. The `full_env`, if supplied, should be an ``Environment``
contains all currently-available distributions. If `full_env` is not
supplied, one is created automatically from the ``WorkingSet`` this
method is called on, which will typically mean that every directory on
``sys.path`` will be scanned for distributions.
`installer` is a standard installer callback as used by the
``resolve()`` method. The `fallback` flag indicates whether we should
attempt to resolve older versions of a plugin if the newest version
cannot be resolved.
This method returns a 2-tuple: (`distributions`, `error_info`), where
`distributions` is a list of the distributions found in `plugin_env`
that were loadable, along with any other distributions that are needed
to resolve their dependencies. `error_info` is a dictionary mapping
unloadable plugin distributions to an exception instance describing the
error that occurred. Usually this will be a ``DistributionNotFound`` or
``VersionConflict`` instance.
"""
plugin_projects = list(plugin_env)
plugin_projects.sort() # scan project names in alphabetic order
error_info = {}
distributions = {}
if full_env is None:
env = Environment(self.entries)
env += plugin_env
else:
env = full_env + plugin_env
shadow_set = self.__class__([])
list(map(shadow_set.add, self)) # put all our entries in shadow_set
for project_name in plugin_projects:
for dist in plugin_env[project_name]:
req = [dist.as_requirement()]
try:
resolvees = shadow_set.resolve(req, env, installer)
except ResolutionError:
v = sys.exc_info()[1]
error_info[dist] = v # save error info
if fallback:
continue # try the next older version of project
else:
break # give up on this project, keep going
else:
list(map(shadow_set.add, resolvees))
distributions.update(dict.fromkeys(resolvees))
# success, no need to try any more versions of this project
break
distributions = list(distributions)
distributions.sort()
return distributions, error_info
def require(self, *requirements):
"""Ensure that distributions matching `requirements` are activated
`requirements` must be a string or a (possibly-nested) sequence
thereof, specifying the distributions and versions required. The
return value is a sequence of the distributions that needed to be
activated to fulfill the requirements; all relevant distributions are
included, even if they were already activated in this working set.
"""
needed = self.resolve(parse_requirements(requirements))
for dist in needed:
self.add(dist)
return needed
def subscribe(self, callback):
"""Invoke `callback` for all distributions (including existing ones)"""
if callback in self.callbacks:
return
self.callbacks.append(callback)
for dist in self:
callback(dist)
def _added_new(self, dist):
for callback in self.callbacks:
callback(dist)
def __getstate__(self):
return (
self.entries[:], self.entry_keys.copy(), self.by_key.copy(),
self.callbacks[:]
)
def __setstate__(self, e_k_b_c):
entries, keys, by_key, callbacks = e_k_b_c
self.entries = entries[:]
self.entry_keys = keys.copy()
self.by_key = by_key.copy()
self.callbacks = callbacks[:]
class Environment(object):
"""Searchable snapshot of distributions on a search path"""
def __init__(self, search_path=None, platform=get_supported_platform(), python=PY_MAJOR):
"""Snapshot distributions available on a search path
Any distributions found on `search_path` are added to the environment.
`search_path` should be a sequence of ``sys.path`` items. If not
supplied, ``sys.path`` is used.
`platform` is an optional string specifying the name of the platform
that platform-specific distributions must be compatible with. If
unspecified, it defaults to the current platform. `python` is an
optional string naming the desired version of Python (e.g. ``'3.3'``);
it defaults to the current version.
You may explicitly set `platform` (and/or `python`) to ``None`` if you
wish to map *all* distributions, not just those compatible with the
running platform or Python version.
"""
self._distmap = {}
self._cache = {}
self.platform = platform
self.python = python
self.scan(search_path)
def can_add(self, dist):
"""Is distribution `dist` acceptable for this environment?
The distribution must match the platform and python version
requirements specified when this environment was created, or False
is returned.
"""
return (self.python is None or dist.py_version is None
or dist.py_version==self.python) \
and compatible_platforms(dist.platform,self.platform)
def remove(self, dist):
"""Remove `dist` from the environment"""
self._distmap[dist.key].remove(dist)
def scan(self, search_path=None):
"""Scan `search_path` for distributions usable in this environment
Any distributions found are added to the environment.
`search_path` should be a sequence of ``sys.path`` items. If not
supplied, ``sys.path`` is used. Only distributions conforming to
the platform/python version defined at initialization are added.
"""
if search_path is None:
search_path = sys.path
for item in search_path:
for dist in find_distributions(item):
self.add(dist)
def __getitem__(self,project_name):
"""Return a newest-to-oldest list of distributions for `project_name`
"""
try:
return self._cache[project_name]
except KeyError:
project_name = project_name.lower()
if project_name not in self._distmap:
return []
if project_name not in self._cache:
dists = self._cache[project_name] = self._distmap[project_name]
_sort_dists(dists)
return self._cache[project_name]
def add(self,dist):
"""Add `dist` if we ``can_add()`` it and it isn't already added"""
if self.can_add(dist) and dist.has_version():
dists = self._distmap.setdefault(dist.key,[])
if dist not in dists:
dists.append(dist)
if dist.key in self._cache:
_sort_dists(self._cache[dist.key])
def best_match(self, req, working_set, installer=None):
"""Find distribution best matching `req` and usable on `working_set`
This calls the ``find(req)`` method of the `working_set` to see if a
suitable distribution is already active. (This may raise
``VersionConflict`` if an unsuitable version of the project is already
active in the specified `working_set`.) If a suitable distribution
isn't active, this method returns the newest distribution in the
environment that meets the ``Requirement`` in `req`. If no suitable
distribution is found, and `installer` is supplied, then the result of
calling the environment's ``obtain(req, installer)`` method will be
returned.
"""
dist = working_set.find(req)
if dist is not None:
return dist
for dist in self[req.key]:
if dist in req:
return dist
return self.obtain(req, installer) # try and download/install
def obtain(self, requirement, installer=None):
"""Obtain a distribution matching `requirement` (e.g. via download)
Obtain a distro that matches requirement (e.g. via download). In the
base ``Environment`` class, this routine just returns
``installer(requirement)``, unless `installer` is None, in which case
None is returned instead. This method is a hook that allows subclasses
to attempt other ways of obtaining a distribution before falling back
to the `installer` argument."""
if installer is not None:
return installer(requirement)
def __iter__(self):
"""Yield the unique project names of the available distributions"""
for key in self._distmap.keys():
if self[key]: yield key
def __iadd__(self, other):
"""In-place addition of a distribution or environment"""
if isinstance(other,Distribution):
self.add(other)
elif isinstance(other,Environment):
for project in other:
for dist in other[project]:
self.add(dist)
else:
raise TypeError("Can't add %r to environment" % (other,))
return self
def __add__(self, other):
"""Add an environment or distribution to an environment"""
new = self.__class__([], platform=None, python=None)
for env in self, other:
new += env
return new
AvailableDistributions = Environment # XXX backward compatibility
class ExtractionError(RuntimeError):
"""An error occurred extracting a resource
The following attributes are available from instances of this exception:
manager
The resource manager that raised this exception
cache_path
The base directory for resource extraction
original_error
The exception instance that caused extraction to fail
"""
class ResourceManager:
"""Manage resource extraction and packages"""
extraction_path = None
def __init__(self):
self.cached_files = {}
def resource_exists(self, package_or_requirement, resource_name):
"""Does the named resource exist?"""
return get_provider(package_or_requirement).has_resource(resource_name)
def resource_isdir(self, package_or_requirement, resource_name):
"""Is the named resource an existing directory?"""
return get_provider(package_or_requirement).resource_isdir(
resource_name
)
def resource_filename(self, package_or_requirement, resource_name):
"""Return a true filesystem path for specified resource"""
return get_provider(package_or_requirement).get_resource_filename(
self, resource_name
)
def resource_stream(self, package_or_requirement, resource_name):
"""Return a readable file-like object for specified resource"""
return get_provider(package_or_requirement).get_resource_stream(
self, resource_name
)
def resource_string(self, package_or_requirement, resource_name):
"""Return specified resource as a string"""
return get_provider(package_or_requirement).get_resource_string(
self, resource_name
)
def resource_listdir(self, package_or_requirement, resource_name):
"""List the contents of the named resource directory"""
return get_provider(package_or_requirement).resource_listdir(
resource_name
)
def extraction_error(self):
"""Give an error message for problems extracting file(s)"""
old_exc = sys.exc_info()[1]
cache_path = self.extraction_path or get_default_cache()
err = ExtractionError("""Can't extract file(s) to egg cache
The following error occurred while trying to extract file(s) to the Python egg
cache:
%s
The Python egg cache directory is currently set to:
%s
Perhaps your account does not have write access to this directory? You can
change the cache directory by setting the PYTHON_EGG_CACHE environment
variable to point to an accessible directory.
""" % (old_exc, cache_path)
)
err.manager = self
err.cache_path = cache_path
err.original_error = old_exc
raise err
def get_cache_path(self, archive_name, names=()):
"""Return absolute location in cache for `archive_name` and `names`
The parent directory of the resulting path will be created if it does
not already exist. `archive_name` should be the base filename of the
enclosing egg (which may not be the name of the enclosing zipfile!),
including its ".egg" extension. `names`, if provided, should be a
sequence of path name parts "under" the egg's extraction location.
This method should only be called by resource providers that need to
obtain an extraction location, and only for names they intend to
extract, as it tracks the generated names for possible cleanup later.
"""
extract_path = self.extraction_path or get_default_cache()
target_path = os.path.join(extract_path, archive_name+'-tmp', *names)
try:
_bypass_ensure_directory(target_path)
except:
self.extraction_error()
self._warn_unsafe_extraction_path(extract_path)
self.cached_files[target_path] = 1
return target_path
@staticmethod
def _warn_unsafe_extraction_path(path):
"""
If the default extraction path is overridden and set to an insecure
location, such as /tmp, it opens up an opportunity for an attacker to
replace an extracted file with an unauthorized payload. Warn the user
if a known insecure location is used.
See Distribute #375 for more details.
"""
if os.name == 'nt' and not path.startswith(os.environ['windir']):
# On Windows, permissions are generally restrictive by default
# and temp directories are not writable by other users, so
# bypass the warning.
return
mode = os.stat(path).st_mode
if mode & stat.S_IWOTH or mode & stat.S_IWGRP:
msg = ("%s is writable by group/others and vulnerable to attack "
"when "
"used with get_resource_filename. Consider a more secure "
"location (set with .set_extraction_path or the "
"PYTHON_EGG_CACHE environment variable)." % path)
warnings.warn(msg, UserWarning)
def postprocess(self, tempname, filename):
"""Perform any platform-specific postprocessing of `tempname`
This is where Mac header rewrites should be done; other platforms don't
have anything special they should do.
Resource providers should call this method ONLY after successfully
extracting a compressed resource. They must NOT call it on resources
that are already in the filesystem.
`tempname` is the current (temporary) name of the file, and `filename`
is the name it will be renamed to by the caller after this routine
returns.
"""
if os.name == 'posix':
# Make the resource executable
mode = ((os.stat(tempname).st_mode) | 0x16D) & 0xFFF # 0555, 07777
os.chmod(tempname, mode)
def set_extraction_path(self, path):
"""Set the base path where resources will be extracted to, if needed.
If you do not call this routine before any extractions take place, the
path defaults to the return value of ``get_default_cache()``. (Which
is based on the ``PYTHON_EGG_CACHE`` environment variable, with various
platform-specific fallbacks. See that routine's documentation for more
details.)
Resources are extracted to subdirectories of this path based upon
information given by the ``IResourceProvider``. You may set this to a
temporary directory, but then you must call ``cleanup_resources()`` to
delete the extracted files when done. There is no guarantee that
``cleanup_resources()`` will be able to remove all extracted files.
(Note: you may not change the extraction path for a given resource
manager once resources have been extracted, unless you first call
``cleanup_resources()``.)
"""
if self.cached_files:
raise ValueError(
"Can't change extraction path, files already extracted"
)
self.extraction_path = path
def cleanup_resources(self, force=False):
"""
Delete all extracted resource files and directories, returning a list
of the file and directory names that could not be successfully removed.
This function does not have any concurrency protection, so it should
generally only be called when the extraction path is a temporary
directory exclusive to a single process. This method is not
automatically called; you must call it explicitly or register it as an
``atexit`` function if you wish to ensure cleanup of a temporary
directory used for extractions.
"""
# XXX
def get_default_cache():
"""Determine the default cache location
This returns the ``PYTHON_EGG_CACHE`` environment variable, if set.
Otherwise, on Windows, it returns a "Python-Eggs" subdirectory of the
"Application Data" directory. On all other systems, it's "~/.python-eggs".
"""
try:
return os.environ['PYTHON_EGG_CACHE']
except KeyError:
pass
if os.name!='nt':
return os.path.expanduser('~/.python-eggs')
app_data = 'Application Data' # XXX this may be locale-specific!
app_homes = [
(('APPDATA',), None), # best option, should be locale-safe
(('USERPROFILE',), app_data),
(('HOMEDRIVE','HOMEPATH'), app_data),
(('HOMEPATH',), app_data),
(('HOME',), None),
(('WINDIR',), app_data), # 95/98/ME
]
for keys, subdir in app_homes:
dirname = ''
for key in keys:
if key in os.environ:
dirname = os.path.join(dirname, os.environ[key])
else:
break
else:
if subdir:
dirname = os.path.join(dirname,subdir)
return os.path.join(dirname, 'Python-Eggs')
else:
raise RuntimeError(
"Please set the PYTHON_EGG_CACHE enviroment variable"
)
def safe_name(name):
"""Convert an arbitrary string to a standard distribution name
Any runs of non-alphanumeric/. characters are replaced with a single '-'.
"""
return re.sub('[^A-Za-z0-9.]+', '-', name)
def safe_version(version):
"""Convert an arbitrary string to a standard version string
Spaces become dots, and all other non-alphanumeric characters become
dashes, with runs of multiple dashes condensed to a single dash.
"""
version = version.replace(' ','.')
return re.sub('[^A-Za-z0-9.]+', '-', version)
def safe_extra(extra):
"""Convert an arbitrary string to a standard 'extra' name
Any runs of non-alphanumeric characters are replaced with a single '_',
and the result is always lowercased.
"""
return re.sub('[^A-Za-z0-9.]+', '_', extra).lower()
def to_filename(name):
"""Convert a project or version name to its filename-escaped form
Any '-' characters are currently replaced with '_'.
"""
return name.replace('-','_')
class MarkerEvaluation(object):
values = {
'os_name': lambda: os.name,
'sys_platform': lambda: sys.platform,
'python_full_version': lambda: sys.version.split()[0],
'python_version': lambda:'%s.%s' % (sys.version_info[0], sys.version_info[1]),
'platform_version': platform.version,
'platform_machine': platform.machine,
'python_implementation': platform.python_implementation,
}
@classmethod
def is_invalid_marker(cls, text):
"""
Validate text as a PEP 426 environment marker; return an exception
if invalid or False otherwise.
"""
try:
cls.evaluate_marker(text)
except SyntaxError:
return cls.normalize_exception(sys.exc_info()[1])
return False
@staticmethod
def normalize_exception(exc):
"""
Given a SyntaxError from a marker evaluation, normalize the error message:
- Remove indications of filename and line number.
- Replace platform-specific error messages with standard error messages.
"""
subs = {
'unexpected EOF while parsing': 'invalid syntax',
'parenthesis is never closed': 'invalid syntax',
}
exc.filename = None
exc.lineno = None
exc.msg = subs.get(exc.msg, exc.msg)
return exc
@classmethod
def and_test(cls, nodelist):
# MUST NOT short-circuit evaluation, or invalid syntax can be skipped!
return functools.reduce(operator.and_, [cls.interpret(nodelist[i]) for i in range(1,len(nodelist),2)])
@classmethod
def test(cls, nodelist):
# MUST NOT short-circuit evaluation, or invalid syntax can be skipped!
return functools.reduce(operator.or_, [cls.interpret(nodelist[i]) for i in range(1,len(nodelist),2)])
@classmethod
def atom(cls, nodelist):
t = nodelist[1][0]
if t == token.LPAR:
if nodelist[2][0] == token.RPAR:
raise SyntaxError("Empty parentheses")
return cls.interpret(nodelist[2])
raise SyntaxError("Language feature not supported in environment markers")
@classmethod
def comparison(cls, nodelist):
if len(nodelist)>4:
raise SyntaxError("Chained comparison not allowed in environment markers")
comp = nodelist[2][1]
cop = comp[1]
if comp[0] == token.NAME:
if len(nodelist[2]) == 3:
if cop == 'not':
cop = 'not in'
else:
cop = 'is not'
try:
cop = cls.get_op(cop)
except KeyError:
raise SyntaxError(repr(cop)+" operator not allowed in environment markers")
return cop(cls.evaluate(nodelist[1]), cls.evaluate(nodelist[3]))
@classmethod
def get_op(cls, op):
ops = {
symbol.test: cls.test,
symbol.and_test: cls.and_test,
symbol.atom: cls.atom,
symbol.comparison: cls.comparison,
'not in': lambda x, y: x not in y,
'in': lambda x, y: x in y,
'==': operator.eq,
'!=': operator.ne,
}
if hasattr(symbol, 'or_test'):
ops[symbol.or_test] = cls.test
return ops[op]
@classmethod
def evaluate_marker(cls, text, extra=None):
"""
Evaluate a PEP 426 environment marker on CPython 2.4+.
Return a boolean indicating the marker result in this environment.
Raise SyntaxError if marker is invalid.
This implementation uses the 'parser' module, which is not implemented on
Jython and has been superseded by the 'ast' module in Python 2.6 and
later.
"""
return cls.interpret(parser.expr(text).totuple(1)[1])
@classmethod
def _markerlib_evaluate(cls, text):
"""
Evaluate a PEP 426 environment marker using markerlib.
Return a boolean indicating the marker result in this environment.
Raise SyntaxError if marker is invalid.
"""
import _markerlib
# markerlib implements Metadata 1.2 (PEP 345) environment markers.
# Translate the variables to Metadata 2.0 (PEP 426).
env = _markerlib.default_environment()
for key in env.keys():
new_key = key.replace('.', '_')
env[new_key] = env.pop(key)
try:
result = _markerlib.interpret(text, env)
except NameError:
e = sys.exc_info()[1]
raise SyntaxError(e.args[0])
return result
if 'parser' not in globals():
# Fall back to less-complete _markerlib implementation if 'parser' module
# is not available.
evaluate_marker = _markerlib_evaluate
@classmethod
def interpret(cls, nodelist):
while len(nodelist)==2: nodelist = nodelist[1]
try:
op = cls.get_op(nodelist[0])
except KeyError:
raise SyntaxError("Comparison or logical expression expected")
return op(nodelist)
@classmethod
def evaluate(cls, nodelist):
while len(nodelist)==2: nodelist = nodelist[1]
kind = nodelist[0]
name = nodelist[1]
if kind==token.NAME:
try:
op = cls.values[name]
except KeyError:
raise SyntaxError("Unknown name %r" % name)
return op()
if kind==token.STRING:
s = nodelist[1]
if s[:1] not in "'\"" or s.startswith('"""') or s.startswith("'''") \
or '\\' in s:
raise SyntaxError(
"Only plain strings allowed in environment markers")
return s[1:-1]
raise SyntaxError("Language feature not supported in environment markers")
invalid_marker = MarkerEvaluation.is_invalid_marker
evaluate_marker = MarkerEvaluation.evaluate_marker
class NullProvider:
"""Try to implement resources and metadata for arbitrary PEP 302 loaders"""
egg_name = None
egg_info = None
loader = None
def __init__(self, module):
self.loader = getattr(module, '__loader__', None)
self.module_path = os.path.dirname(getattr(module, '__file__', ''))
def get_resource_filename(self, manager, resource_name):
return self._fn(self.module_path, resource_name)
def get_resource_stream(self, manager, resource_name):
return BytesIO(self.get_resource_string(manager, resource_name))
def get_resource_string(self, manager, resource_name):
return self._get(self._fn(self.module_path, resource_name))
def has_resource(self, resource_name):
return self._has(self._fn(self.module_path, resource_name))
def has_metadata(self, name):
return self.egg_info and self._has(self._fn(self.egg_info,name))
if sys.version_info <= (3,):
def get_metadata(self, name):
if not self.egg_info:
return ""
return self._get(self._fn(self.egg_info,name))
else:
def get_metadata(self, name):
if not self.egg_info:
return ""
return self._get(self._fn(self.egg_info,name)).decode("utf-8")
def get_metadata_lines(self, name):
return yield_lines(self.get_metadata(name))
def resource_isdir(self,resource_name):
return self._isdir(self._fn(self.module_path, resource_name))
def metadata_isdir(self,name):
return self.egg_info and self._isdir(self._fn(self.egg_info,name))
def resource_listdir(self,resource_name):
return self._listdir(self._fn(self.module_path,resource_name))
def metadata_listdir(self,name):
if self.egg_info:
return self._listdir(self._fn(self.egg_info,name))
return []
def run_script(self,script_name,namespace):
script = 'scripts/'+script_name
if not self.has_metadata(script):
raise ResolutionError("No script named %r" % script_name)
script_text = self.get_metadata(script).replace('\r\n','\n')
script_text = script_text.replace('\r','\n')
script_filename = self._fn(self.egg_info,script)
namespace['__file__'] = script_filename
if os.path.exists(script_filename):
execfile(script_filename, namespace, namespace)
else:
from linecache import cache
cache[script_filename] = (
len(script_text), 0, script_text.split('\n'), script_filename
)
script_code = compile(script_text,script_filename,'exec')
exec(script_code, namespace, namespace)
def _has(self, path):
raise NotImplementedError(
"Can't perform this operation for unregistered loader type"
)
def _isdir(self, path):
raise NotImplementedError(
"Can't perform this operation for unregistered loader type"
)
def _listdir(self, path):
raise NotImplementedError(
"Can't perform this operation for unregistered loader type"
)
def _fn(self, base, resource_name):
if resource_name:
return os.path.join(base, *resource_name.split('/'))
return base
def _get(self, path):
if hasattr(self.loader, 'get_data'):
return self.loader.get_data(path)
raise NotImplementedError(
"Can't perform this operation for loaders without 'get_data()'"
)
register_loader_type(object, NullProvider)
class EggProvider(NullProvider):
"""Provider based on a virtual filesystem"""
def __init__(self,module):
NullProvider.__init__(self,module)
self._setup_prefix()
def _setup_prefix(self):
# we assume here that our metadata may be nested inside a "basket"
# of multiple eggs; that's why we use module_path instead of .archive
path = self.module_path
old = None
while path!=old:
if path.lower().endswith('.egg'):
self.egg_name = os.path.basename(path)
self.egg_info = os.path.join(path, 'EGG-INFO')
self.egg_root = path
break
old = path
path, base = os.path.split(path)
class DefaultProvider(EggProvider):
"""Provides access to package resources in the filesystem"""
def _has(self, path):
return os.path.exists(path)
def _isdir(self,path):
return os.path.isdir(path)
def _listdir(self,path):
return os.listdir(path)
def get_resource_stream(self, manager, resource_name):
return open(self._fn(self.module_path, resource_name), 'rb')
def _get(self, path):
stream = open(path, 'rb')
try:
return stream.read()
finally:
stream.close()
register_loader_type(type(None), DefaultProvider)
if importlib_bootstrap is not None:
register_loader_type(importlib_bootstrap.SourceFileLoader, DefaultProvider)
class EmptyProvider(NullProvider):
"""Provider that returns nothing for all requests"""
_isdir = _has = lambda self,path: False
_get = lambda self,path: ''
_listdir = lambda self,path: []
module_path = None
def __init__(self):
pass
empty_provider = EmptyProvider()
def build_zipmanifest(path):
"""
This builds a similar dictionary to the zipimport directory
caches. However instead of tuples, ZipInfo objects are stored.
The translation of the tuple is as follows:
* [0] - zipinfo.filename on stock pythons this needs "/" --> os.sep
on pypy it is the same (one reason why distribute did work
in some cases on pypy and win32).
* [1] - zipinfo.compress_type
* [2] - zipinfo.compress_size
* [3] - zipinfo.file_size
* [4] - len(utf-8 encoding of filename) if zipinfo & 0x800
len(ascii encoding of filename) otherwise
* [5] - (zipinfo.date_time[0] - 1980) << 9 |
zipinfo.date_time[1] << 5 | zipinfo.date_time[2]
* [6] - (zipinfo.date_time[3] - 1980) << 11 |
zipinfo.date_time[4] << 5 | (zipinfo.date_time[5] // 2)
* [7] - zipinfo.CRC
"""
zipinfo = dict()
zfile = zipfile.ZipFile(path)
#Got ZipFile has not __exit__ on python 3.1
try:
for zitem in zfile.namelist():
zpath = zitem.replace('/', os.sep)
zipinfo[zpath] = zfile.getinfo(zitem)
assert zipinfo[zpath] is not None
finally:
zfile.close()
return zipinfo
class ZipProvider(EggProvider):
"""Resource support for zips and eggs"""
eagers = None
def __init__(self, module):
EggProvider.__init__(self,module)
self.zipinfo = build_zipmanifest(self.loader.archive)
self.zip_pre = self.loader.archive+os.sep
def _zipinfo_name(self, fspath):
# Convert a virtual filename (full path to file) into a zipfile subpath
# usable with the zipimport directory cache for our target archive
if fspath.startswith(self.zip_pre):
return fspath[len(self.zip_pre):]
raise AssertionError(
"%s is not a subpath of %s" % (fspath,self.zip_pre)
)
def _parts(self,zip_path):
# Convert a zipfile subpath into an egg-relative path part list
fspath = self.zip_pre+zip_path # pseudo-fs path
if fspath.startswith(self.egg_root+os.sep):
return fspath[len(self.egg_root)+1:].split(os.sep)
raise AssertionError(
"%s is not a subpath of %s" % (fspath,self.egg_root)
)
def get_resource_filename(self, manager, resource_name):
if not self.egg_name:
raise NotImplementedError(
"resource_filename() only supported for .egg, not .zip"
)
# no need to lock for extraction, since we use temp names
zip_path = self._resource_to_zip(resource_name)
eagers = self._get_eager_resources()
if '/'.join(self._parts(zip_path)) in eagers:
for name in eagers:
self._extract_resource(manager, self._eager_to_zip(name))
return self._extract_resource(manager, zip_path)
@staticmethod
def _get_date_and_size(zip_stat):
size = zip_stat.file_size
date_time = zip_stat.date_time + (0, 0, -1) # ymdhms+wday, yday, dst
#1980 offset already done
timestamp = time.mktime(date_time)
return timestamp, size
def _extract_resource(self, manager, zip_path):
if zip_path in self._index():
for name in self._index()[zip_path]:
last = self._extract_resource(
manager, os.path.join(zip_path, name)
)
return os.path.dirname(last) # return the extracted directory name
timestamp, size = self._get_date_and_size(self.zipinfo[zip_path])
if not WRITE_SUPPORT:
raise IOError('"os.rename" and "os.unlink" are not supported '
'on this platform')
try:
real_path = manager.get_cache_path(
self.egg_name, self._parts(zip_path)
)
if self._is_current(real_path, zip_path):
return real_path
outf, tmpnam = _mkstemp(".$extract", dir=os.path.dirname(real_path))
os.write(outf, self.loader.get_data(zip_path))
os.close(outf)
utime(tmpnam, (timestamp,timestamp))
manager.postprocess(tmpnam, real_path)
try:
rename(tmpnam, real_path)
except os.error:
if os.path.isfile(real_path):
if self._is_current(real_path, zip_path):
# the file became current since it was checked above,
# so proceed.
return real_path
elif os.name=='nt': # Windows, del old file and retry
unlink(real_path)
rename(tmpnam, real_path)
return real_path
raise
except os.error:
manager.extraction_error() # report a user-friendly error
return real_path
def _is_current(self, file_path, zip_path):
"""
Return True if the file_path is current for this zip_path
"""
timestamp, size = self._get_date_and_size(self.zipinfo[zip_path])
if not os.path.isfile(file_path):
return False
stat = os.stat(file_path)
if stat.st_size!=size or stat.st_mtime!=timestamp:
return False
# check that the contents match
zip_contents = self.loader.get_data(zip_path)
f = open(file_path, 'rb')
file_contents = f.read()
f.close()
return zip_contents == file_contents
def _get_eager_resources(self):
if self.eagers is None:
eagers = []
for name in ('native_libs.txt', 'eager_resources.txt'):
if self.has_metadata(name):
eagers.extend(self.get_metadata_lines(name))
self.eagers = eagers
return self.eagers
def _index(self):
try:
return self._dirindex
except AttributeError:
ind = {}
for path in self.zipinfo:
parts = path.split(os.sep)
while parts:
parent = os.sep.join(parts[:-1])
if parent in ind:
ind[parent].append(parts[-1])
break
else:
ind[parent] = [parts.pop()]
self._dirindex = ind
return ind
def _has(self, fspath):
zip_path = self._zipinfo_name(fspath)
return zip_path in self.zipinfo or zip_path in self._index()
def _isdir(self,fspath):
return self._zipinfo_name(fspath) in self._index()
def _listdir(self,fspath):
return list(self._index().get(self._zipinfo_name(fspath), ()))
def _eager_to_zip(self,resource_name):
return self._zipinfo_name(self._fn(self.egg_root,resource_name))
def _resource_to_zip(self,resource_name):
return self._zipinfo_name(self._fn(self.module_path,resource_name))
register_loader_type(zipimport.zipimporter, ZipProvider)
class FileMetadata(EmptyProvider):
"""Metadata handler for standalone PKG-INFO files
Usage::
metadata = FileMetadata("/path/to/PKG-INFO")
This provider rejects all data and metadata requests except for PKG-INFO,
which is treated as existing, and will be the contents of the file at
the provided location.
"""
def __init__(self,path):
self.path = path
def has_metadata(self,name):
return name=='PKG-INFO'
def get_metadata(self,name):
if name=='PKG-INFO':
f = open(self.path,'rU')
metadata = f.read()
f.close()
return metadata
raise KeyError("No metadata except PKG-INFO is available")
def get_metadata_lines(self,name):
return yield_lines(self.get_metadata(name))
class PathMetadata(DefaultProvider):
"""Metadata provider for egg directories
Usage::
# Development eggs:
egg_info = "/path/to/PackageName.egg-info"
base_dir = os.path.dirname(egg_info)
metadata = PathMetadata(base_dir, egg_info)
dist_name = os.path.splitext(os.path.basename(egg_info))[0]
dist = Distribution(basedir,project_name=dist_name,metadata=metadata)
# Unpacked egg directories:
egg_path = "/path/to/PackageName-ver-pyver-etc.egg"
metadata = PathMetadata(egg_path, os.path.join(egg_path,'EGG-INFO'))
dist = Distribution.from_filename(egg_path, metadata=metadata)
"""
def __init__(self, path, egg_info):
self.module_path = path
self.egg_info = egg_info
class EggMetadata(ZipProvider):
"""Metadata provider for .egg files"""
def __init__(self, importer):
"""Create a metadata provider from a zipimporter"""
self.zipinfo = build_zipmanifest(importer.archive)
self.zip_pre = importer.archive+os.sep
self.loader = importer
if importer.prefix:
self.module_path = os.path.join(importer.archive, importer.prefix)
else:
self.module_path = importer.archive
self._setup_prefix()
_declare_state('dict', _distribution_finders = {})
def register_finder(importer_type, distribution_finder):
"""Register `distribution_finder` to find distributions in sys.path items
`importer_type` is the type or class of a PEP 302 "Importer" (sys.path item
handler), and `distribution_finder` is a callable that, passed a path
item and the importer instance, yields ``Distribution`` instances found on
that path item. See ``pkg_resources.find_on_path`` for an example."""
_distribution_finders[importer_type] = distribution_finder
def find_distributions(path_item, only=False):
"""Yield distributions accessible via `path_item`"""
importer = get_importer(path_item)
finder = _find_adapter(_distribution_finders, importer)
return finder(importer, path_item, only)
def find_eggs_in_zip(importer, path_item, only=False):
"""
Find eggs in zip files; possibly multiple nested eggs.
"""
if importer.archive.endswith('.whl'):
# wheels are not supported with this finder
# they don't have PKG-INFO metadata, and won't ever contain eggs
return
metadata = EggMetadata(importer)
if metadata.has_metadata('PKG-INFO'):
yield Distribution.from_filename(path_item, metadata=metadata)
if only:
return # don't yield nested distros
for subitem in metadata.resource_listdir('/'):
if subitem.endswith('.egg'):
subpath = os.path.join(path_item, subitem)
for dist in find_eggs_in_zip(zipimport.zipimporter(subpath), subpath):
yield dist
register_finder(zipimport.zipimporter, find_eggs_in_zip)
def find_nothing(importer, path_item, only=False):
return ()
register_finder(object,find_nothing)
def find_on_path(importer, path_item, only=False):
"""Yield distributions accessible on a sys.path directory"""
path_item = _normalize_cached(path_item)
if os.path.isdir(path_item) and os.access(path_item, os.R_OK):
if path_item.lower().endswith('.egg'):
# unpacked egg
yield Distribution.from_filename(
path_item, metadata=PathMetadata(
path_item, os.path.join(path_item,'EGG-INFO')
)
)
else:
# scan for .egg and .egg-info in directory
for entry in os.listdir(path_item):
lower = entry.lower()
if lower.endswith('.egg-info') or lower.endswith('.dist-info'):
fullpath = os.path.join(path_item, entry)
if os.path.isdir(fullpath):
# egg-info directory, allow getting metadata
metadata = PathMetadata(path_item, fullpath)
else:
metadata = FileMetadata(fullpath)
yield Distribution.from_location(
path_item,entry,metadata,precedence=DEVELOP_DIST
)
elif not only and lower.endswith('.egg'):
for dist in find_distributions(os.path.join(path_item, entry)):
yield dist
elif not only and lower.endswith('.egg-link'):
entry_file = open(os.path.join(path_item, entry))
try:
entry_lines = entry_file.readlines()
finally:
entry_file.close()
for line in entry_lines:
if not line.strip(): continue
for item in find_distributions(os.path.join(path_item,line.rstrip())):
yield item
break
register_finder(pkgutil.ImpImporter,find_on_path)
if importlib_bootstrap is not None:
register_finder(importlib_bootstrap.FileFinder, find_on_path)
_declare_state('dict', _namespace_handlers={})
_declare_state('dict', _namespace_packages={})
def register_namespace_handler(importer_type, namespace_handler):
"""Register `namespace_handler` to declare namespace packages
`importer_type` is the type or class of a PEP 302 "Importer" (sys.path item
handler), and `namespace_handler` is a callable like this::
def namespace_handler(importer,path_entry,moduleName,module):
# return a path_entry to use for child packages
Namespace handlers are only called if the importer object has already
agreed that it can handle the relevant path item, and they should only
return a subpath if the module __path__ does not already contain an
equivalent subpath. For an example namespace handler, see
``pkg_resources.file_ns_handler``.
"""
_namespace_handlers[importer_type] = namespace_handler
def _handle_ns(packageName, path_item):
"""Ensure that named package includes a subpath of path_item (if needed)"""
importer = get_importer(path_item)
if importer is None:
return None
loader = importer.find_module(packageName)
if loader is None:
return None
module = sys.modules.get(packageName)
if module is None:
module = sys.modules[packageName] = imp.new_module(packageName)
module.__path__ = []
_set_parent_ns(packageName)
elif not hasattr(module,'__path__'):
raise TypeError("Not a package:", packageName)
handler = _find_adapter(_namespace_handlers, importer)
subpath = handler(importer, path_item, packageName, module)
if subpath is not None:
path = module.__path__
path.append(subpath)
loader.load_module(packageName)
for path_item in path:
if path_item not in module.__path__:
module.__path__.append(path_item)
return subpath
def declare_namespace(packageName):
"""Declare that package 'packageName' is a namespace package"""
imp.acquire_lock()
try:
if packageName in _namespace_packages:
return
path, parent = sys.path, None
if '.' in packageName:
parent = '.'.join(packageName.split('.')[:-1])
declare_namespace(parent)
if parent not in _namespace_packages:
__import__(parent)
try:
path = sys.modules[parent].__path__
except AttributeError:
raise TypeError("Not a package:", parent)
# Track what packages are namespaces, so when new path items are added,
# they can be updated
_namespace_packages.setdefault(parent,[]).append(packageName)
_namespace_packages.setdefault(packageName,[])
for path_item in path:
# Ensure all the parent's path items are reflected in the child,
# if they apply
_handle_ns(packageName, path_item)
finally:
imp.release_lock()
def fixup_namespace_packages(path_item, parent=None):
"""Ensure that previously-declared namespace packages include path_item"""
imp.acquire_lock()
try:
for package in _namespace_packages.get(parent,()):
subpath = _handle_ns(package, path_item)
if subpath: fixup_namespace_packages(subpath,package)
finally:
imp.release_lock()
def file_ns_handler(importer, path_item, packageName, module):
"""Compute an ns-package subpath for a filesystem or zipfile importer"""
subpath = os.path.join(path_item, packageName.split('.')[-1])
normalized = _normalize_cached(subpath)
for item in module.__path__:
if _normalize_cached(item)==normalized:
break
else:
# Only return the path if it's not already there
return subpath
register_namespace_handler(pkgutil.ImpImporter,file_ns_handler)
register_namespace_handler(zipimport.zipimporter,file_ns_handler)
if importlib_bootstrap is not None:
register_namespace_handler(importlib_bootstrap.FileFinder, file_ns_handler)
def null_ns_handler(importer, path_item, packageName, module):
return None
register_namespace_handler(object,null_ns_handler)
def normalize_path(filename):
"""Normalize a file/dir name for comparison purposes"""
return os.path.normcase(os.path.realpath(filename))
def _normalize_cached(filename,_cache={}):
try:
return _cache[filename]
except KeyError:
_cache[filename] = result = normalize_path(filename)
return result
def _set_parent_ns(packageName):
parts = packageName.split('.')
name = parts.pop()
if parts:
parent = '.'.join(parts)
setattr(sys.modules[parent], name, sys.modules[packageName])
def yield_lines(strs):
"""Yield non-empty/non-comment lines of a ``basestring`` or sequence"""
if isinstance(strs,basestring):
for s in strs.splitlines():
s = s.strip()
if s and not s.startswith('#'): # skip blank lines/comments
yield s
else:
for ss in strs:
for s in yield_lines(ss):
yield s
LINE_END = re.compile(r"\s*(#.*)?$").match # whitespace and comment
CONTINUE = re.compile(r"\s*\\\s*(#.*)?$").match # line continuation
DISTRO = re.compile(r"\s*((\w|[-.])+)").match # Distribution or extra
VERSION = re.compile(r"\s*(<=?|>=?|==|!=)\s*((\w|[-.])+)").match # ver. info
COMMA = re.compile(r"\s*,").match # comma between items
OBRACKET = re.compile(r"\s*\[").match
CBRACKET = re.compile(r"\s*\]").match
MODULE = re.compile(r"\w+(\.\w+)*$").match
EGG_NAME = re.compile(
r"(?P<name>[^-]+)"
r"( -(?P<ver>[^-]+) (-py(?P<pyver>[^-]+) (-(?P<plat>.+))? )? )?",
re.VERBOSE | re.IGNORECASE
).match
component_re = re.compile(r'(\d+ | [a-z]+ | \.| -)', re.VERBOSE)
replace = {'pre':'c', 'preview':'c','-':'final-','rc':'c','dev':'@'}.get
def _parse_version_parts(s):
for part in component_re.split(s):
part = replace(part,part)
if not part or part=='.':
continue
if part[:1] in '0123456789':
yield part.zfill(8) # pad for numeric comparison
else:
yield '*'+part
yield '*final' # ensure that alpha/beta/candidate are before final
def parse_version(s):
"""Convert a version string to a chronologically-sortable key
This is a rough cross between distutils' StrictVersion and LooseVersion;
if you give it versions that would work with StrictVersion, then it behaves
the same; otherwise it acts like a slightly-smarter LooseVersion. It is
*possible* to create pathological version coding schemes that will fool
this parser, but they should be very rare in practice.
The returned value will be a tuple of strings. Numeric portions of the
version are padded to 8 digits so they will compare numerically, but
without relying on how numbers compare relative to strings. Dots are
dropped, but dashes are retained. Trailing zeros between alpha segments
or dashes are suppressed, so that e.g. "2.4.0" is considered the same as
"2.4". Alphanumeric parts are lower-cased.
The algorithm assumes that strings like "-" and any alpha string that
alphabetically follows "final" represents a "patch level". So, "2.4-1"
is assumed to be a branch or patch of "2.4", and therefore "2.4.1" is
considered newer than "2.4-1", which in turn is newer than "2.4".
Strings like "a", "b", "c", "alpha", "beta", "candidate" and so on (that
come before "final" alphabetically) are assumed to be pre-release versions,
so that the version "2.4" is considered newer than "2.4a1".
Finally, to handle miscellaneous cases, the strings "pre", "preview", and
"rc" are treated as if they were "c", i.e. as though they were release
candidates, and therefore are not as new as a version string that does not
contain them, and "dev" is replaced with an '@' so that it sorts lower than
than any other pre-release tag.
"""
parts = []
for part in _parse_version_parts(s.lower()):
if part.startswith('*'):
if part<'*final': # remove '-' before a prerelease tag
while parts and parts[-1]=='*final-': parts.pop()
# remove trailing zeros from each series of numeric parts
while parts and parts[-1]=='00000000':
parts.pop()
parts.append(part)
return tuple(parts)
class EntryPoint(object):
"""Object representing an advertised importable object"""
def __init__(self, name, module_name, attrs=(), extras=(), dist=None):
if not MODULE(module_name):
raise ValueError("Invalid module name", module_name)
self.name = name
self.module_name = module_name
self.attrs = tuple(attrs)
self.extras = Requirement.parse(("x[%s]" % ','.join(extras))).extras
self.dist = dist
def __str__(self):
s = "%s = %s" % (self.name, self.module_name)
if self.attrs:
s += ':' + '.'.join(self.attrs)
if self.extras:
s += ' [%s]' % ','.join(self.extras)
return s
def __repr__(self):
return "EntryPoint.parse(%r)" % str(self)
def load(self, require=True, env=None, installer=None):
if require: self.require(env, installer)
entry = __import__(self.module_name, globals(),globals(), ['__name__'])
for attr in self.attrs:
try:
entry = getattr(entry,attr)
except AttributeError:
raise ImportError("%r has no %r attribute" % (entry,attr))
return entry
def require(self, env=None, installer=None):
if self.extras and not self.dist:
raise UnknownExtra("Can't require() without a distribution", self)
list(map(working_set.add,
working_set.resolve(self.dist.requires(self.extras),env,installer)))
@classmethod
def parse(cls, src, dist=None):
"""Parse a single entry point from string `src`
Entry point syntax follows the form::
name = some.module:some.attr [extra1,extra2]
The entry name and module name are required, but the ``:attrs`` and
``[extras]`` parts are optional
"""
try:
attrs = extras = ()
name,value = src.split('=',1)
if '[' in value:
value,extras = value.split('[',1)
req = Requirement.parse("x["+extras)
if req.specs: raise ValueError
extras = req.extras
if ':' in value:
value,attrs = value.split(':',1)
if not MODULE(attrs.rstrip()):
raise ValueError
attrs = attrs.rstrip().split('.')
except ValueError:
raise ValueError(
"EntryPoint must be in 'name=module:attrs [extras]' format",
src
)
else:
return cls(name.strip(), value.strip(), attrs, extras, dist)
@classmethod
def parse_group(cls, group, lines, dist=None):
"""Parse an entry point group"""
if not MODULE(group):
raise ValueError("Invalid group name", group)
this = {}
for line in yield_lines(lines):
ep = cls.parse(line, dist)
if ep.name in this:
raise ValueError("Duplicate entry point", group, ep.name)
this[ep.name]=ep
return this
@classmethod
def parse_map(cls, data, dist=None):
"""Parse a map of entry point groups"""
if isinstance(data,dict):
data = data.items()
else:
data = split_sections(data)
maps = {}
for group, lines in data:
if group is None:
if not lines:
continue
raise ValueError("Entry points must be listed in groups")
group = group.strip()
if group in maps:
raise ValueError("Duplicate group name", group)
maps[group] = cls.parse_group(group, lines, dist)
return maps
def _remove_md5_fragment(location):
if not location:
return ''
parsed = urlparse(location)
if parsed[-1].startswith('md5='):
return urlunparse(parsed[:-1] + ('',))
return location
class Distribution(object):
"""Wrap an actual or potential sys.path entry w/metadata"""
PKG_INFO = 'PKG-INFO'
def __init__(self, location=None, metadata=None, project_name=None,
version=None, py_version=PY_MAJOR, platform=None,
precedence=EGG_DIST):
self.project_name = safe_name(project_name or 'Unknown')
if version is not None:
self._version = safe_version(version)
self.py_version = py_version
self.platform = platform
self.location = location
self.precedence = precedence
self._provider = metadata or empty_provider
@classmethod
def from_location(cls,location,basename,metadata=None,**kw):
project_name, version, py_version, platform = [None]*4
basename, ext = os.path.splitext(basename)
if ext.lower() in _distributionImpl:
# .dist-info gets much metadata differently
match = EGG_NAME(basename)
if match:
project_name, version, py_version, platform = match.group(
'name','ver','pyver','plat'
)
cls = _distributionImpl[ext.lower()]
return cls(
location, metadata, project_name=project_name, version=version,
py_version=py_version, platform=platform, **kw
)
hashcmp = property(
lambda self: (
getattr(self,'parsed_version',()),
self.precedence,
self.key,
_remove_md5_fragment(self.location),
self.py_version,
self.platform
)
)
def __hash__(self): return hash(self.hashcmp)
def __lt__(self, other):
return self.hashcmp < other.hashcmp
def __le__(self, other):
return self.hashcmp <= other.hashcmp
def __gt__(self, other):
return self.hashcmp > other.hashcmp
def __ge__(self, other):
return self.hashcmp >= other.hashcmp
def __eq__(self, other):
if not isinstance(other, self.__class__):
# It's not a Distribution, so they are not equal
return False
return self.hashcmp == other.hashcmp
def __ne__(self, other):
return not self == other
# These properties have to be lazy so that we don't have to load any
# metadata until/unless it's actually needed. (i.e., some distributions
# may not know their name or version without loading PKG-INFO)
@property
def key(self):
try:
return self._key
except AttributeError:
self._key = key = self.project_name.lower()
return key
@property
def parsed_version(self):
try:
return self._parsed_version
except AttributeError:
self._parsed_version = pv = parse_version(self.version)
return pv
@property
def version(self):
try:
return self._version
except AttributeError:
for line in self._get_metadata(self.PKG_INFO):
if line.lower().startswith('version:'):
self._version = safe_version(line.split(':',1)[1].strip())
return self._version
else:
raise ValueError(
"Missing 'Version:' header and/or %s file" % self.PKG_INFO, self
)
@property
def _dep_map(self):
try:
return self.__dep_map
except AttributeError:
dm = self.__dep_map = {None: []}
for name in 'requires.txt', 'depends.txt':
for extra,reqs in split_sections(self._get_metadata(name)):
if extra:
if ':' in extra:
extra, marker = extra.split(':',1)
if invalid_marker(marker):
reqs=[] # XXX warn
elif not evaluate_marker(marker):
reqs=[]
extra = safe_extra(extra) or None
dm.setdefault(extra,[]).extend(parse_requirements(reqs))
return dm
def requires(self,extras=()):
"""List of Requirements needed for this distro if `extras` are used"""
dm = self._dep_map
deps = []
deps.extend(dm.get(None,()))
for ext in extras:
try:
deps.extend(dm[safe_extra(ext)])
except KeyError:
raise UnknownExtra(
"%s has no such extra feature %r" % (self, ext)
)
return deps
def _get_metadata(self,name):
if self.has_metadata(name):
for line in self.get_metadata_lines(name):
yield line
def activate(self,path=None):
"""Ensure distribution is importable on `path` (default=sys.path)"""
if path is None: path = sys.path
self.insert_on(path)
if path is sys.path:
fixup_namespace_packages(self.location)
list(map(declare_namespace, self._get_metadata('namespace_packages.txt')))
def egg_name(self):
"""Return what this distribution's standard .egg filename should be"""
filename = "%s-%s-py%s" % (
to_filename(self.project_name), to_filename(self.version),
self.py_version or PY_MAJOR
)
if self.platform:
filename += '-'+self.platform
return filename
def __repr__(self):
if self.location:
return "%s (%s)" % (self,self.location)
else:
return str(self)
def __str__(self):
try: version = getattr(self,'version',None)
except ValueError: version = None
version = version or "[unknown version]"
return "%s %s" % (self.project_name,version)
def __getattr__(self,attr):
"""Delegate all unrecognized public attributes to .metadata provider"""
if attr.startswith('_'):
raise AttributeError(attr)
return getattr(self._provider, attr)
@classmethod
def from_filename(cls,filename,metadata=None, **kw):
return cls.from_location(
_normalize_cached(filename), os.path.basename(filename), metadata,
**kw
)
def as_requirement(self):
"""Return a ``Requirement`` that matches this distribution exactly"""
return Requirement.parse('%s==%s' % (self.project_name, self.version))
def load_entry_point(self, group, name):
"""Return the `name` entry point of `group` or raise ImportError"""
ep = self.get_entry_info(group,name)
if ep is None:
raise ImportError("Entry point %r not found" % ((group,name),))
return ep.load()
def get_entry_map(self, group=None):
"""Return the entry point map for `group`, or the full entry map"""
try:
ep_map = self._ep_map
except AttributeError:
ep_map = self._ep_map = EntryPoint.parse_map(
self._get_metadata('entry_points.txt'), self
)
if group is not None:
return ep_map.get(group,{})
return ep_map
def get_entry_info(self, group, name):
"""Return the EntryPoint object for `group`+`name`, or ``None``"""
return self.get_entry_map(group).get(name)
def insert_on(self, path, loc = None):
"""Insert self.location in path before its nearest parent directory"""
loc = loc or self.location
if not loc:
return
nloc = _normalize_cached(loc)
bdir = os.path.dirname(nloc)
npath= [(p and _normalize_cached(p) or p) for p in path]
for p, item in enumerate(npath):
if item==nloc:
break
elif item==bdir and self.precedence==EGG_DIST:
# if it's an .egg, give it precedence over its directory
if path is sys.path:
self.check_version_conflict()
path.insert(p, loc)
npath.insert(p, nloc)
break
else:
if path is sys.path:
self.check_version_conflict()
path.append(loc)
return
# p is the spot where we found or inserted loc; now remove duplicates
while 1:
try:
np = npath.index(nloc, p+1)
except ValueError:
break
else:
del npath[np], path[np]
p = np # ha!
return
def check_version_conflict(self):
if self.key=='setuptools':
return # ignore the inevitable setuptools self-conflicts :(
nsp = dict.fromkeys(self._get_metadata('namespace_packages.txt'))
loc = normalize_path(self.location)
for modname in self._get_metadata('top_level.txt'):
if (modname not in sys.modules or modname in nsp
or modname in _namespace_packages):
continue
if modname in ('pkg_resources', 'setuptools', 'site'):
continue
fn = getattr(sys.modules[modname], '__file__', None)
if fn and (normalize_path(fn).startswith(loc) or
fn.startswith(self.location)):
continue
issue_warning(
"Module %s was already imported from %s, but %s is being added"
" to sys.path" % (modname, fn, self.location),
)
def has_version(self):
try:
self.version
except ValueError:
issue_warning("Unbuilt egg for "+repr(self))
return False
return True
def clone(self,**kw):
"""Copy this distribution, substituting in any changed keyword args"""
for attr in (
'project_name', 'version', 'py_version', 'platform', 'location',
'precedence'
):
kw.setdefault(attr, getattr(self,attr,None))
kw.setdefault('metadata', self._provider)
return self.__class__(**kw)
@property
def extras(self):
return [dep for dep in self._dep_map if dep]
class DistInfoDistribution(Distribution):
"""Wrap an actual or potential sys.path entry w/metadata, .dist-info style"""
PKG_INFO = 'METADATA'
EQEQ = re.compile(r"([\(,])\s*(\d.*?)\s*([,\)])")
@property
def _parsed_pkg_info(self):
"""Parse and cache metadata"""
try:
return self._pkg_info
except AttributeError:
from email.parser import Parser
self._pkg_info = Parser().parsestr(self.get_metadata(self.PKG_INFO))
return self._pkg_info
@property
def _dep_map(self):
try:
return self.__dep_map
except AttributeError:
self.__dep_map = self._compute_dependencies()
return self.__dep_map
def _preparse_requirement(self, requires_dist):
"""Convert 'Foobar (1); baz' to ('Foobar ==1', 'baz')
Split environment marker, add == prefix to version specifiers as
necessary, and remove parenthesis.
"""
parts = requires_dist.split(';', 1) + ['']
distvers = parts[0].strip()
mark = parts[1].strip()
distvers = re.sub(self.EQEQ, r"\1==\2\3", distvers)
distvers = distvers.replace('(', '').replace(')', '')
return (distvers, mark)
def _compute_dependencies(self):
"""Recompute this distribution's dependencies."""
from _markerlib import compile as compile_marker
dm = self.__dep_map = {None: []}
reqs = []
# Including any condition expressions
for req in self._parsed_pkg_info.get_all('Requires-Dist') or []:
distvers, mark = self._preparse_requirement(req)
parsed = next(parse_requirements(distvers))
parsed.marker_fn = compile_marker(mark)
reqs.append(parsed)
def reqs_for_extra(extra):
for req in reqs:
if req.marker_fn(override={'extra':extra}):
yield req
common = frozenset(reqs_for_extra(None))
dm[None].extend(common)
for extra in self._parsed_pkg_info.get_all('Provides-Extra') or []:
extra = safe_extra(extra.strip())
dm[extra] = list(frozenset(reqs_for_extra(extra)) - common)
return dm
_distributionImpl = {
'.egg': Distribution,
'.egg-info': Distribution,
'.dist-info': DistInfoDistribution,
}
def issue_warning(*args,**kw):
level = 1
g = globals()
try:
# find the first stack frame that is *not* code in
# the pkg_resources module, to use for the warning
while sys._getframe(level).f_globals is g:
level += 1
except ValueError:
pass
from warnings import warn
warn(stacklevel = level+1, *args, **kw)
def parse_requirements(strs):
"""Yield ``Requirement`` objects for each specification in `strs`
`strs` must be an instance of ``basestring``, or a (possibly-nested)
iterable thereof.
"""
# create a steppable iterator, so we can handle \-continuations
lines = iter(yield_lines(strs))
def scan_list(ITEM,TERMINATOR,line,p,groups,item_name):
items = []
while not TERMINATOR(line,p):
if CONTINUE(line,p):
try:
line = next(lines)
p = 0
except StopIteration:
raise ValueError(
"\\ must not appear on the last nonblank line"
)
match = ITEM(line,p)
if not match:
raise ValueError("Expected "+item_name+" in",line,"at",line[p:])
items.append(match.group(*groups))
p = match.end()
match = COMMA(line,p)
if match:
p = match.end() # skip the comma
elif not TERMINATOR(line,p):
raise ValueError(
"Expected ',' or end-of-list in",line,"at",line[p:]
)
match = TERMINATOR(line,p)
if match: p = match.end() # skip the terminator, if any
return line, p, items
for line in lines:
match = DISTRO(line)
if not match:
raise ValueError("Missing distribution spec", line)
project_name = match.group(1)
p = match.end()
extras = []
match = OBRACKET(line,p)
if match:
p = match.end()
line, p, extras = scan_list(
DISTRO, CBRACKET, line, p, (1,), "'extra' name"
)
line, p, specs = scan_list(VERSION,LINE_END,line,p,(1,2),"version spec")
specs = [(op,safe_version(val)) for op,val in specs]
yield Requirement(project_name, specs, extras)
def _sort_dists(dists):
tmp = [(dist.hashcmp,dist) for dist in dists]
tmp.sort()
dists[::-1] = [d for hc,d in tmp]
class Requirement:
def __init__(self, project_name, specs, extras):
"""DO NOT CALL THIS UNDOCUMENTED METHOD; use Requirement.parse()!"""
self.unsafe_name, project_name = project_name, safe_name(project_name)
self.project_name, self.key = project_name, project_name.lower()
index = [(parse_version(v),state_machine[op],op,v) for op,v in specs]
index.sort()
self.specs = [(op,ver) for parsed,trans,op,ver in index]
self.index, self.extras = index, tuple(map(safe_extra,extras))
self.hashCmp = (
self.key, tuple([(op,parsed) for parsed,trans,op,ver in index]),
frozenset(self.extras)
)
self.__hash = hash(self.hashCmp)
def __str__(self):
specs = ','.join([''.join(s) for s in self.specs])
extras = ','.join(self.extras)
if extras: extras = '[%s]' % extras
return '%s%s%s' % (self.project_name, extras, specs)
def __eq__(self,other):
return isinstance(other,Requirement) and self.hashCmp==other.hashCmp
def __contains__(self,item):
if isinstance(item,Distribution):
if item.key != self.key: return False
if self.index: item = item.parsed_version # only get if we need it
elif isinstance(item,basestring):
item = parse_version(item)
last = None
compare = lambda a, b: (a > b) - (a < b) # -1, 0, 1
for parsed,trans,op,ver in self.index:
action = trans[compare(item,parsed)] # Indexing: 0, 1, -1
if action=='F':
return False
elif action=='T':
return True
elif action=='+':
last = True
elif action=='-' or last is None: last = False
if last is None: last = True # no rules encountered
return last
def __hash__(self):
return self.__hash
def __repr__(self): return "Requirement.parse(%r)" % str(self)
@staticmethod
def parse(s):
reqs = list(parse_requirements(s))
if reqs:
if len(reqs)==1:
return reqs[0]
raise ValueError("Expected only one requirement", s)
raise ValueError("No requirements found", s)
state_machine = {
# =><
'<': '--T',
'<=': 'T-T',
'>': 'F+F',
'>=': 'T+F',
'==': 'T..',
'!=': 'F++',
}
def _get_mro(cls):
"""Get an mro for a type or classic class"""
if not isinstance(cls,type):
class cls(cls,object): pass
return cls.__mro__[1:]
return cls.__mro__
def _find_adapter(registry, ob):
"""Return an adapter factory for `ob` from `registry`"""
for t in _get_mro(getattr(ob, '__class__', type(ob))):
if t in registry:
return registry[t]
def ensure_directory(path):
"""Ensure that the parent directory of `path` exists"""
dirname = os.path.dirname(path)
if not os.path.isdir(dirname):
os.makedirs(dirname)
def split_sections(s):
"""Split a string or iterable thereof into (section,content) pairs
Each ``section`` is a stripped version of the section header ("[section]")
and each ``content`` is a list of stripped lines excluding blank lines and
comment-only lines. If there are any such lines before the first section
header, they're returned in a first ``section`` of ``None``.
"""
section = None
content = []
for line in yield_lines(s):
if line.startswith("["):
if line.endswith("]"):
if section or content:
yield section, content
section = line[1:-1].strip()
content = []
else:
raise ValueError("Invalid section heading", line)
else:
content.append(line)
# wrap up last segment
yield section, content
def _mkstemp(*args,**kw):
from tempfile import mkstemp
old_open = os.open
try:
os.open = os_open # temporarily bypass sandboxing
return mkstemp(*args,**kw)
finally:
os.open = old_open # and then put it back
# Set up global resource manager (deliberately not state-saved)
_manager = ResourceManager()
def _initialize(g):
for name in dir(_manager):
if not name.startswith('_'):
g[name] = getattr(_manager, name)
_initialize(globals())
# Prepare the master working set and make the ``require()`` API available
_declare_state('object', working_set = WorkingSet())
try:
# Does the main program list any requirements?
from __main__ import __requires__
except ImportError:
pass # No: just use the default working set based on sys.path
else:
# Yes: ensure the requirements are met, by prefixing sys.path if necessary
try:
working_set.require(__requires__)
except VersionConflict: # try it without defaults already on sys.path
working_set = WorkingSet([]) # by starting with an empty path
for dist in working_set.resolve(
parse_requirements(__requires__), Environment()
):
working_set.add(dist)
for entry in sys.path: # add any missing entries from sys.path
if entry not in working_set.entries:
working_set.add_entry(entry)
sys.path[:] = working_set.entries # then copy back to sys.path
require = working_set.require
iter_entry_points = working_set.iter_entry_points
add_activation_listener = working_set.subscribe
run_script = working_set.run_script
run_main = run_script # backward compatibility
# Activate all distributions already on sys.path, and ensure that
# all distributions added to the working set in the future (e.g. by
# calling ``require()``) will get activated as well.
add_activation_listener(lambda dist: dist.activate())
working_set.entries=[]
list(map(working_set.add_entry,sys.path)) # match order
| gpl-3.0 |
MostafaGazar/tensorflow | tensorflow/contrib/bayesflow/python/ops/entropy.py | 3 | 15139 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
r"""Entropy Ops.
## Background
Common Shannon entropy, the Evidence Lower BOund (ELBO), KL divergence, and more
all have information theoretic use and interpretations. They are also often
used in variational inference. This library brings together `Ops` for
estimating them, e.g. using Monte Carlo expectations.
## Examples
Example of fitting a variational posterior with the ELBO.
```python
# We start by assuming knowledge of the log of a joint density p(z, x) over
# latent variable z and fixed measurement x. Since x is fixed, the Python
# function does not take x as an argument.
def log_joint(z):
theta = tf.Variable(0.) # Trainable variable that helps define log_joint.
...
# Next, define a Normal distribution with trainable parameters.
q = distributions.Normal(mu=tf.Variable(0.), sigma=tf.Variable(1.))
# Now, define a loss function (negative ELBO) that, when minimized, will adjust
# mu, sigma, and theta, increasing the ELBO, which we hope will both reduce the
# KL divergence between q(z) and p(z | x), and increase p(x). Note that we
# cannot guarantee both, but in general we expect both to happen.
elbo = entropy.elbo_ratio(log_p, q, n=10)
loss = -elbo
# Minimize the loss
train_op = tf.train.GradientDescentOptimizer(0.1).minimize(loss)
tf.initialize_all_variables().run()
for step in range(100):
train_op.run()
```
## Ops
@@elbo_ratio
@@entropy_shannon
@@renyi_ratio
@@renyi_alpha
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
from tensorflow.contrib.bayesflow.python.ops import monte_carlo
from tensorflow.contrib.bayesflow.python.ops import variational_inference
from tensorflow.python.framework import ops
from tensorflow.python.ops import check_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.platform import tf_logging as logging
# Make utility functions from monte_carlo available.
# pylint: disable=protected-access
_get_samples = monte_carlo._get_samples
_logspace_mean = monte_carlo._logspace_mean
_sample_mean = monte_carlo._sample_mean
# pylint: enable=protected-access
__all__ = [
'elbo_ratio',
'entropy_shannon',
'renyi_ratio',
'renyi_alpha',
]
ELBOForms = variational_inference.ELBOForms # pylint: disable=invalid-name
def elbo_ratio(log_p,
q,
z=None,
n=None,
seed=None,
form=None,
name='elbo_ratio'):
r"""Estimate of the ratio appearing in the `ELBO` and `KL` divergence.
With `p(z) := exp{log_p(z)}`, this `Op` returns an approximation of
```
E_q[ Log[p(Z) / q(Z)] ]
```
The term `E_q[ Log[p(Z)] ]` is always computed as a sample mean.
The term `E_q[ Log[q(z)] ]` can be computed with samples, or an exact formula
if `q.entropy()` is defined. This is controlled with the kwarg `form`.
This log-ratio appears in different contexts:
#### `KL[q || p]`
If `log_p(z) = Log[p(z)]` for distribution `p`, this `Op` approximates
the negative Kullback-Leibler divergence.
```
elbo_ratio(log_p, q, n=100) = -1 * KL[q || p],
KL[q || p] = E[ Log[q(Z)] - Log[p(Z)] ]
```
Note that if `p` is a `Distribution`, then `distributions.kl(q, p)` may be
defined and available as an exact result.
#### ELBO
If `log_p(z) = Log[p(z, x)]` is the log joint of a distribution `p`, this is
the Evidence Lower BOund (ELBO):
```
ELBO ~= E[ Log[p(Z, x)] - Log[q(Z)] ]
= Log[p(x)] - KL[q || p]
<= Log[p(x)]
```
User supplies either `Tensor` of samples `z`, or number of samples to draw `n`
Args:
log_p: Callable mapping samples from `q` to `Tensors` with
shape broadcastable to `q.batch_shape`.
For example, `log_p` works "just like" `q.log_prob`.
q: `tf.contrib.distributions.BaseDistribution`.
z: `Tensor` of samples from `q`, produced by `q.sample_n`.
n: Integer `Tensor`. Number of samples to generate if `z` is not provided.
seed: Python integer to seed the random number generator.
form: Either `ELBOForms.analytic_entropy` (use formula for entropy of `q`)
or `ELBOForms.sample` (sample estimate of entropy), or `ELBOForms.default`
(attempt analytic entropy, fallback on sample).
Default value is `ELBOForms.default`.
name: A name to give this `Op`.
Returns:
Scalar `Tensor` holding sample mean KL divergence. `shape` is the batch
shape of `q`, and `dtype` is the same as `q`.
Raises:
ValueError: If `form` is not handled by this function.
"""
form = ELBOForms.default if form is None else form
with ops.name_scope(name, values=[n, z]):
z = _get_samples(q, z, n, seed)
entropy = entropy_shannon(q, z=z, form=form)
# If log_p(z) = Log[p(z)], cross entropy = -E_q[log(p(Z))]
negative_cross_entropy = _sample_mean(log_p(z))
return entropy + negative_cross_entropy
def entropy_shannon(p,
z=None,
n=None,
seed=None,
form=None,
name='entropy_shannon'):
r"""Monte Carlo or deterministic computation of Shannon's entropy.
Depending on the kwarg `form`, this `Op` returns either the analytic entropy
of the distribution `p`, or the sampled entropy:
```
-n^{-1} sum_{i=1}^n p.log_prob(z_i), where z_i ~ p,
\approx - E_p[ Log[p(Z)] ]
= Entropy[p]
```
User supplies either `Tensor` of samples `z`, or number of samples to draw `n`
Args:
p: `tf.contrib.distributions.BaseDistribution`
z: `Tensor` of samples from `p`, produced by `p.sample_n(n)` for some `n`.
n: Integer `Tensor`. Number of samples to generate if `z` is not provided.
seed: Python integer to seed the random number generator.
form: Either `ELBOForms.analytic_entropy` (use formula for entropy of `q`)
or `ELBOForms.sample` (sample estimate of entropy), or `ELBOForms.default`
(attempt analytic entropy, fallback on sample).
Default value is `ELBOForms.default`.
name: A name to give this `Op`.
Returns:
A `Tensor` with same `dtype` as `p`, and shape equal to `p.batch_shape`.
Raises:
ValueError: If `form` not handled by this function.
ValueError: If `form` is `ELBOForms.analytic_entropy` and `n` was provided.
"""
form = ELBOForms.default if form is None else form
if n is not None and form == ELBOForms.analytic_entropy:
raise ValueError('If form == ELBOForms.analytic_entropy, n must be None.')
with ops.name_scope(name, values=[n, z]):
# Entropy: -E_p[log(p(Z))].
entropy = None
# Try analytic path
if form in [ELBOForms.default, ELBOForms.analytic_entropy]:
try:
entropy = p.entropy()
logging.info('Using analytic entropy(p:%s)', p)
except NotImplementedError as e:
if form == ELBOForms.analytic_entropy:
raise e
elif form != ELBOForms.sample:
raise ValueError('ELBOForm not handled by this function: %s' % form)
# Sample path
if entropy is None:
logging.info('Using sampled entropy(p:%s)', p)
entropy = -1. * monte_carlo.expectation(
p.log_prob, p, z=z, n=n, seed=seed)
return entropy
def renyi_ratio(log_p, q, alpha, z=None, n=None, seed=None, name='renyi_ratio'):
r"""Monte Carlo estimate of the ratio appearing in Renyi divergence.
This can be used to compute the Renyi (alpha) divergence, or a log evidence
approximation based on Renyi divergence.
#### Definition
With `z_i` iid samples from `q`, and `exp{log_p(z)} = p(z)`, this `Op` returns
the (biased for finite `n`) estimate:
```
(1 - alpha)^{-1} Log[ n^{-1} sum_{i=1}^n ( p(z_i) / q(z_i) )^{1 - alpha},
\approx (1 - alpha)^{-1} Log[ E_q[ (p(Z) / q(Z))^{1 - alpha} ] ]
```
This ratio appears in different contexts:
#### Renyi divergence
If `log_p(z) = Log[p(z)]` is the log prob of a distribution, and
`alpha > 0`, `alpha != 1`, this `Op` approximates `-1` times Renyi divergence:
```
# Choose reasonably high n to limit bias, see below.
renyi_ratio(log_p, q, alpha, n=100)
\approx -1 * D_alpha[q || p], where
D_alpha[q || p] := (1 - alpha)^{-1} Log E_q[(p(Z) / q(Z))^{1 - alpha}]
```
The Renyi (or "alpha") divergence is non-negative and equal to zero iff
`q = p`. Various limits of `alpha` lead to different special case results:
```
alpha D_alpha[q || p]
----- ---------------
--> 0 Log[ int_{q > 0} p(z) dz ]
= 0.5, -2 Log[1 - Hel^2[q || p]], (\propto squared Hellinger distance)
--> 1 KL[q || p]
= 2 Log[ 1 + chi^2[q || p] ], (\propto squared Chi-2 divergence)
--> infty Log[ max_z{q(z) / p(z)} ], (min description length principle).
```
See "Renyi Divergence Variational Inference", by Li and Turner.
#### Log evidence approximation
If `log_p(z) = Log[p(z, x)]` is the log of the joint distribution `p`, this is
an alternative to the ELBO common in variational inference.
```
L_alpha(q, p) = Log[p(x)] - D_alpha[q || p]
```
If `q` and `p` have the same support, and `0 < a <= b < 1`, one can show
`ELBO <= D_b <= D_a <= Log[p(x)]`. Thus, this `Op` allows a smooth
interpolation between the ELBO and the true evidence.
#### Stability notes
Note that when `1 - alpha` is not small, the ratio `(p(z) / q(z))^{1 - alpha}`
is subject to underflow/overflow issues. For that reason, it is evaluated in
log-space after centering. Nonetheless, infinite/NaN results may occur. For
that reason, one may wish to shrink `alpha` gradually. See the `Op`
`renyi_alpha`. Using `float64` will also help.
#### Bias for finite sample size
Due to nonlinearity of the logarithm, for random variables `{X_1,...,X_n}`,
`E[ Log[sum_{i=1}^n X_i] ] != Log[ E[sum_{i=1}^n X_i] ]`. As a result, this
estimate is biased for finite `n`. For `alpha < 1`, it is non-decreasing
with `n` (in expectation). For example, if `n = 1`, this estimator yields the
same result as `elbo_ratio`, and as `n` increases the expected value
of the estimator increases.
#### Call signature
User supplies either `Tensor` of samples `z`, or number of samples to draw `n`
Args:
log_p: Callable mapping samples from `q` to `Tensors` with
shape broadcastable to `q.batch_shape`.
For example, `log_p` works "just like" `q.log_prob`.
q: `tf.contrib.distributions.BaseDistribution`.
`float64` `dtype` recommended.
`log_p` and `q` should be supported on the same set.
alpha: `Tensor` with shape `q.batch_shape` and values not equal to 1.
z: `Tensor` of samples from `q`, produced by `q.sample_n`.
n: Integer `Tensor`. The number of samples to use if `z` is not provided.
Note that this can be highly biased for small `n`, see docstring.
seed: Python integer to seed the random number generator.
name: A name to give this `Op`.
Returns:
renyi_result: The scaled log of sample mean. `Tensor` with `shape` equal
to batch shape of `q`, and `dtype` = `q.dtype`.
"""
with ops.name_scope(name, values=[alpha, n, z]):
z = _get_samples(q, z, n, seed)
# Evaluate sample mean in logspace. Note that _logspace_mean will compute
# (among other things) the mean of q.log_prob(z), which could also be
# obtained with q.entropy(). However, DON'T use analytic entropy, because
# that increases variance, and could result in NaN/Inf values of a sensitive
# term.
# log_values
# = (1 - alpha) * ( Log p - Log q )
log_values = (1. - alpha) * (log_p(z) - q.log_prob(z))
# log_mean_values
# = Log[ E[ values ] ]
# = Log[ E[ (p / q)^{1-alpha} ] ]
log_mean_values = _logspace_mean(log_values)
return log_mean_values / (1. - alpha)
def renyi_alpha(step,
decay_time,
alpha_min,
alpha_max=0.99999,
name='renyi_alpha'):
r"""Exponentially decaying `Tensor` appropriate for Renyi ratios.
When minimizing the Renyi divergence for `0 <= alpha < 1` (or maximizing the
Renyi equivalent of elbo) in high dimensions, it is not uncommon to experience
`NaN` and `inf` values when `alpha` is far from `1`.
For that reason, it is often desirable to start the optimization with `alpha`
very close to 1, and reduce it to a final `alpha_min` according to some
schedule. The user may even want to optimize using `elbo_ratio` for
some fixed time before switching to Renyi based methods.
This `Op` returns an `alpha` decaying exponentially with step:
```
s(step) = (exp{step / decay_time} - 1) / (e - 1)
t(s) = max(0, min(s, 1)), (smooth growth from 0 to 1)
alpha(t) = (1 - t) alpha_min + t alpha_max
```
Args:
step: Non-negative scalar `Tensor`. Typically the global step or an
offset version thereof.
decay_time: Positive scalar `Tensor`.
alpha_min: `float` or `double` `Tensor`.
The minimal, final value of `alpha`, achieved when `step >= decay_time`
alpha_max: `Tensor` of same `dtype` as `alpha_min`.
The maximal, beginning value of `alpha`, achieved when `step == 0`
name: A name to give this `Op`.
Returns:
alpha: A `Tensor` of same `dtype` as `alpha_min`.
"""
with ops.name_scope(name, values=[step, decay_time, alpha_min, alpha_max]):
alpha_min = ops.convert_to_tensor(alpha_min, name='alpha_min')
dtype = alpha_min.dtype
alpha_max = ops.convert_to_tensor(alpha_max, dtype=dtype, name='alpha_max')
decay_time = math_ops.cast(decay_time, dtype)
step = math_ops.cast(step, dtype)
check_scalars = [
check_ops.assert_rank(step, 0, message='step must be scalar'),
check_ops.assert_rank(
decay_time, 0, message='decay_time must be scalar'),
check_ops.assert_rank(alpha_min, 0, message='alpha_min must be scalar'),
check_ops.assert_rank(alpha_max, 0, message='alpha_max must be scalar'),
]
check_sign = [
check_ops.assert_non_negative(
step, message='step must be non-negative'),
check_ops.assert_positive(
decay_time, message='decay_time must be positive'),
]
with ops.control_dependencies(check_scalars + check_sign):
theta = (math_ops.exp(step / decay_time) - 1.) / (math.e - 1.)
theta = math_ops.minimum(math_ops.maximum(theta, 0.), 1.)
return alpha_max * (1. - theta) + alpha_min * theta
| apache-2.0 |
yoer/hue | desktop/core/ext-py/python-openid-2.2.5/openid/test/test_discover.py | 65 | 26516 | import sys
import unittest
import datadriven
import os.path
from openid import fetchers
from openid.fetchers import HTTPResponse
from openid.yadis.discover import DiscoveryFailure
from openid.consumer import discover
from openid.yadis import xrires
from openid.yadis.xri import XRI
from urlparse import urlsplit
from openid import message
### Tests for conditions that trigger DiscoveryFailure
class SimpleMockFetcher(object):
def __init__(self, responses):
self.responses = list(responses)
def fetch(self, url, body=None, headers=None):
response = self.responses.pop(0)
assert body is None
assert response.final_url == url
return response
class TestDiscoveryFailure(datadriven.DataDrivenTestCase):
cases = [
[HTTPResponse('http://network.error/', None)],
[HTTPResponse('http://not.found/', 404)],
[HTTPResponse('http://bad.request/', 400)],
[HTTPResponse('http://server.error/', 500)],
[HTTPResponse('http://header.found/', 200,
headers={'x-xrds-location':'http://xrds.missing/'}),
HTTPResponse('http://xrds.missing/', 404)],
]
def __init__(self, responses):
self.url = responses[0].final_url
datadriven.DataDrivenTestCase.__init__(self, self.url)
self.responses = responses
def setUp(self):
fetcher = SimpleMockFetcher(self.responses)
fetchers.setDefaultFetcher(fetcher)
def tearDown(self):
fetchers.setDefaultFetcher(None)
def runOneTest(self):
expected_status = self.responses[-1].status
try:
discover.discover(self.url)
except DiscoveryFailure, why:
self.failUnlessEqual(why.http_response.status, expected_status)
else:
self.fail('Did not raise DiscoveryFailure')
### Tests for raising/catching exceptions from the fetcher through the
### discover function
# Python 2.5 displays a message when running this test, which is
# testing the behaviour in the presence of string exceptions,
# deprecated or not, so tell it no to complain when this particular
# string exception is raised.
import warnings
warnings.filterwarnings('ignore', 'raising a string.*', DeprecationWarning,
r'^openid\.test\.test_discover$', 77)
class ErrorRaisingFetcher(object):
"""Just raise an exception when fetch is called"""
def __init__(self, thing_to_raise):
self.thing_to_raise = thing_to_raise
def fetch(self, url, body=None, headers=None):
raise self.thing_to_raise
class DidFetch(Exception):
"""Custom exception just to make sure it's not handled differently"""
class TestFetchException(datadriven.DataDrivenTestCase):
"""Make sure exceptions get passed through discover function from
fetcher."""
cases = [
Exception(),
DidFetch(),
ValueError(),
RuntimeError(),
]
# String exceptions are finally gone from Python 2.6.
if sys.version_info[:2] < (2, 6):
cases.append('oi!')
def __init__(self, exc):
datadriven.DataDrivenTestCase.__init__(self, repr(exc))
self.exc = exc
def setUp(self):
fetcher = ErrorRaisingFetcher(self.exc)
fetchers.setDefaultFetcher(fetcher, wrap_exceptions=False)
def tearDown(self):
fetchers.setDefaultFetcher(None)
def runOneTest(self):
try:
discover.discover('http://doesnt.matter/')
except:
exc = sys.exc_info()[1]
if exc is None:
# str exception
self.failUnless(self.exc is sys.exc_info()[0])
else:
self.failUnless(self.exc is exc, exc)
else:
self.fail('Expected %r', self.exc)
### Tests for openid.consumer.discover.discover
class TestNormalization(unittest.TestCase):
def testAddingProtocol(self):
f = ErrorRaisingFetcher(RuntimeError())
fetchers.setDefaultFetcher(f, wrap_exceptions=False)
try:
discover.discover('users.stompy.janrain.com:8000/x')
except DiscoveryFailure, why:
self.fail('failed to parse url with port correctly')
except RuntimeError:
pass #expected
fetchers.setDefaultFetcher(None)
class DiscoveryMockFetcher(object):
redirect = None
def __init__(self, documents):
self.documents = documents
self.fetchlog = []
def fetch(self, url, body=None, headers=None):
self.fetchlog.append((url, body, headers))
if self.redirect:
final_url = self.redirect
else:
final_url = url
try:
ctype, body = self.documents[url]
except KeyError:
status = 404
ctype = 'text/plain'
body = ''
else:
status = 200
return HTTPResponse(final_url, status, {'content-type': ctype}, body)
# from twisted.trial import unittest as trialtest
class BaseTestDiscovery(unittest.TestCase):
id_url = "http://someuser.unittest/"
documents = {}
fetcherClass = DiscoveryMockFetcher
def _checkService(self, s,
server_url,
claimed_id=None,
local_id=None,
canonical_id=None,
types=None,
used_yadis=False,
display_identifier=None
):
self.failUnlessEqual(server_url, s.server_url)
if types == ['2.0 OP']:
self.failIf(claimed_id)
self.failIf(local_id)
self.failIf(s.claimed_id)
self.failIf(s.local_id)
self.failIf(s.getLocalID())
self.failIf(s.compatibilityMode())
self.failUnless(s.isOPIdentifier())
self.failUnlessEqual(s.preferredNamespace(),
discover.OPENID_2_0_MESSAGE_NS)
else:
self.failUnlessEqual(claimed_id, s.claimed_id)
self.failUnlessEqual(local_id, s.getLocalID())
if used_yadis:
self.failUnless(s.used_yadis, "Expected to use Yadis")
else:
self.failIf(s.used_yadis,
"Expected to use old-style discovery")
openid_types = {
'1.1': discover.OPENID_1_1_TYPE,
'1.0': discover.OPENID_1_0_TYPE,
'2.0': discover.OPENID_2_0_TYPE,
'2.0 OP': discover.OPENID_IDP_2_0_TYPE,
}
type_uris = [openid_types[t] for t in types]
self.failUnlessEqual(type_uris, s.type_uris)
self.failUnlessEqual(canonical_id, s.canonicalID)
if s.canonicalID:
self.failUnless(s.getDisplayIdentifier() != claimed_id)
self.failUnless(s.getDisplayIdentifier() is not None)
self.failUnlessEqual(display_identifier, s.getDisplayIdentifier())
self.failUnlessEqual(s.claimed_id, s.canonicalID)
self.failUnlessEqual(s.display_identifier or s.claimed_id, s.getDisplayIdentifier())
def setUp(self):
self.documents = self.documents.copy()
self.fetcher = self.fetcherClass(self.documents)
fetchers.setDefaultFetcher(self.fetcher)
def tearDown(self):
fetchers.setDefaultFetcher(None)
def readDataFile(filename):
module_directory = os.path.dirname(os.path.abspath(__file__))
filename = os.path.join(
module_directory, 'data', 'test_discover', filename)
return file(filename).read()
class TestDiscovery(BaseTestDiscovery):
def _discover(self, content_type, data,
expected_services, expected_id=None):
if expected_id is None:
expected_id = self.id_url
self.documents[self.id_url] = (content_type, data)
id_url, services = discover.discover(self.id_url)
self.failUnlessEqual(expected_services, len(services))
self.failUnlessEqual(expected_id, id_url)
return services
def test_404(self):
self.failUnlessRaises(DiscoveryFailure,
discover.discover, self.id_url + '/404')
def test_noOpenID(self):
services = self._discover(content_type='text/plain',
data="junk",
expected_services=0)
services = self._discover(
content_type='text/html',
data=readDataFile('openid_no_delegate.html'),
expected_services=1,
)
self._checkService(
services[0],
used_yadis=False,
types=['1.1'],
server_url="http://www.myopenid.com/server",
claimed_id=self.id_url,
local_id=self.id_url,
)
def test_html1(self):
services = self._discover(
content_type='text/html',
data=readDataFile('openid.html'),
expected_services=1)
self._checkService(
services[0],
used_yadis=False,
types=['1.1'],
server_url="http://www.myopenid.com/server",
claimed_id=self.id_url,
local_id='http://smoker.myopenid.com/',
display_identifier=self.id_url,
)
def test_html1Fragment(self):
"""Ensure that the Claimed Identifier does not have a fragment
if one is supplied in the User Input."""
content_type = 'text/html'
data = readDataFile('openid.html')
expected_services = 1
self.documents[self.id_url] = (content_type, data)
expected_id = self.id_url
self.id_url = self.id_url + '#fragment'
id_url, services = discover.discover(self.id_url)
self.failUnlessEqual(expected_services, len(services))
self.failUnlessEqual(expected_id, id_url)
self._checkService(
services[0],
used_yadis=False,
types=['1.1'],
server_url="http://www.myopenid.com/server",
claimed_id=expected_id,
local_id='http://smoker.myopenid.com/',
display_identifier=expected_id,
)
def test_html2(self):
services = self._discover(
content_type='text/html',
data=readDataFile('openid2.html'),
expected_services=1,
)
self._checkService(
services[0],
used_yadis=False,
types=['2.0'],
server_url="http://www.myopenid.com/server",
claimed_id=self.id_url,
local_id='http://smoker.myopenid.com/',
display_identifier=self.id_url,
)
def test_html1And2(self):
services = self._discover(
content_type='text/html',
data=readDataFile('openid_1_and_2.html'),
expected_services=2,
)
for t, s in zip(['2.0', '1.1'], services):
self._checkService(
s,
used_yadis=False,
types=[t],
server_url="http://www.myopenid.com/server",
claimed_id=self.id_url,
local_id='http://smoker.myopenid.com/',
display_identifier=self.id_url,
)
def test_yadisEmpty(self):
services = self._discover(content_type='application/xrds+xml',
data=readDataFile('yadis_0entries.xml'),
expected_services=0)
def test_htmlEmptyYadis(self):
"""HTML document has discovery information, but points to an
empty Yadis document."""
# The XRDS document pointed to by "openid_and_yadis.html"
self.documents[self.id_url + 'xrds'] = (
'application/xrds+xml', readDataFile('yadis_0entries.xml'))
services = self._discover(content_type='text/html',
data=readDataFile('openid_and_yadis.html'),
expected_services=1)
self._checkService(
services[0],
used_yadis=False,
types=['1.1'],
server_url="http://www.myopenid.com/server",
claimed_id=self.id_url,
local_id='http://smoker.myopenid.com/',
display_identifier=self.id_url,
)
def test_yadis1NoDelegate(self):
services = self._discover(content_type='application/xrds+xml',
data=readDataFile('yadis_no_delegate.xml'),
expected_services=1)
self._checkService(
services[0],
used_yadis=True,
types=['1.0'],
server_url="http://www.myopenid.com/server",
claimed_id=self.id_url,
local_id=self.id_url,
display_identifier=self.id_url,
)
def test_yadis2NoLocalID(self):
services = self._discover(
content_type='application/xrds+xml',
data=readDataFile('openid2_xrds_no_local_id.xml'),
expected_services=1,
)
self._checkService(
services[0],
used_yadis=True,
types=['2.0'],
server_url="http://www.myopenid.com/server",
claimed_id=self.id_url,
local_id=self.id_url,
display_identifier=self.id_url,
)
def test_yadis2(self):
services = self._discover(
content_type='application/xrds+xml',
data=readDataFile('openid2_xrds.xml'),
expected_services=1,
)
self._checkService(
services[0],
used_yadis=True,
types=['2.0'],
server_url="http://www.myopenid.com/server",
claimed_id=self.id_url,
local_id='http://smoker.myopenid.com/',
display_identifier=self.id_url,
)
def test_yadis2OP(self):
services = self._discover(
content_type='application/xrds+xml',
data=readDataFile('yadis_idp.xml'),
expected_services=1,
)
self._checkService(
services[0],
used_yadis=True,
types=['2.0 OP'],
server_url="http://www.myopenid.com/server",
display_identifier=self.id_url,
)
def test_yadis2OPDelegate(self):
"""The delegate tag isn't meaningful for OP entries."""
services = self._discover(
content_type='application/xrds+xml',
data=readDataFile('yadis_idp_delegate.xml'),
expected_services=1,
)
self._checkService(
services[0],
used_yadis=True,
types=['2.0 OP'],
server_url="http://www.myopenid.com/server",
display_identifier=self.id_url,
)
def test_yadis2BadLocalID(self):
self.failUnlessRaises(DiscoveryFailure, self._discover,
content_type='application/xrds+xml',
data=readDataFile('yadis_2_bad_local_id.xml'),
expected_services=1,
)
def test_yadis1And2(self):
services = self._discover(
content_type='application/xrds+xml',
data=readDataFile('openid_1_and_2_xrds.xml'),
expected_services=1,
)
self._checkService(
services[0],
used_yadis=True,
types=['2.0', '1.1'],
server_url="http://www.myopenid.com/server",
claimed_id=self.id_url,
local_id='http://smoker.myopenid.com/',
display_identifier=self.id_url,
)
def test_yadis1And2BadLocalID(self):
self.failUnlessRaises(DiscoveryFailure, self._discover,
content_type='application/xrds+xml',
data=readDataFile('openid_1_and_2_xrds_bad_delegate.xml'),
expected_services=1,
)
class MockFetcherForXRIProxy(object):
def __init__(self, documents, proxy_url=xrires.DEFAULT_PROXY):
self.documents = documents
self.fetchlog = []
self.proxy_url = None
def fetch(self, url, body=None, headers=None):
self.fetchlog.append((url, body, headers))
u = urlsplit(url)
proxy_host = u[1]
xri = u[2]
query = u[3]
if not headers and not query:
raise ValueError("No headers or query; you probably didn't "
"mean to do that.")
if xri.startswith('/'):
xri = xri[1:]
try:
ctype, body = self.documents[xri]
except KeyError:
status = 404
ctype = 'text/plain'
body = ''
else:
status = 200
return HTTPResponse(url, status, {'content-type': ctype}, body)
class TestXRIDiscovery(BaseTestDiscovery):
fetcherClass = MockFetcherForXRIProxy
documents = {'=smoker': ('application/xrds+xml',
readDataFile('yadis_2entries_delegate.xml')),
'=smoker*bad': ('application/xrds+xml',
readDataFile('yadis_another_delegate.xml')) }
def test_xri(self):
user_xri, services = discover.discoverXRI('=smoker')
self._checkService(
services[0],
used_yadis=True,
types=['1.0'],
server_url="http://www.myopenid.com/server",
claimed_id=XRI("=!1000"),
canonical_id=XRI("=!1000"),
local_id='http://smoker.myopenid.com/',
display_identifier='=smoker'
)
self._checkService(
services[1],
used_yadis=True,
types=['1.0'],
server_url="http://www.livejournal.com/openid/server.bml",
claimed_id=XRI("=!1000"),
canonical_id=XRI("=!1000"),
local_id='http://frank.livejournal.com/',
display_identifier='=smoker'
)
def test_xri_normalize(self):
user_xri, services = discover.discoverXRI('xri://=smoker')
self._checkService(
services[0],
used_yadis=True,
types=['1.0'],
server_url="http://www.myopenid.com/server",
claimed_id=XRI("=!1000"),
canonical_id=XRI("=!1000"),
local_id='http://smoker.myopenid.com/',
display_identifier='=smoker'
)
self._checkService(
services[1],
used_yadis=True,
types=['1.0'],
server_url="http://www.livejournal.com/openid/server.bml",
claimed_id=XRI("=!1000"),
canonical_id=XRI("=!1000"),
local_id='http://frank.livejournal.com/',
display_identifier='=smoker'
)
def test_xriNoCanonicalID(self):
user_xri, services = discover.discoverXRI('=smoker*bad')
self.failIf(services)
def test_useCanonicalID(self):
"""When there is no delegate, the CanonicalID should be used with XRI.
"""
endpoint = discover.OpenIDServiceEndpoint()
endpoint.claimed_id = XRI("=!1000")
endpoint.canonicalID = XRI("=!1000")
self.failUnlessEqual(endpoint.getLocalID(), XRI("=!1000"))
class TestXRIDiscoveryIDP(BaseTestDiscovery):
fetcherClass = MockFetcherForXRIProxy
documents = {'=smoker': ('application/xrds+xml',
readDataFile('yadis_2entries_idp.xml')) }
def test_xri(self):
user_xri, services = discover.discoverXRI('=smoker')
self.failUnless(services, "Expected services, got zero")
self.failUnlessEqual(services[0].server_url,
"http://www.livejournal.com/openid/server.bml")
class TestPreferredNamespace(datadriven.DataDrivenTestCase):
def __init__(self, expected_ns, type_uris):
datadriven.DataDrivenTestCase.__init__(
self, 'Expecting %s from %s' % (expected_ns, type_uris))
self.expected_ns = expected_ns
self.type_uris = type_uris
def runOneTest(self):
endpoint = discover.OpenIDServiceEndpoint()
endpoint.type_uris = self.type_uris
actual_ns = endpoint.preferredNamespace()
self.failUnlessEqual(actual_ns, self.expected_ns)
cases = [
(message.OPENID1_NS, []),
(message.OPENID1_NS, ['http://jyte.com/']),
(message.OPENID1_NS, [discover.OPENID_1_0_TYPE]),
(message.OPENID1_NS, [discover.OPENID_1_1_TYPE]),
(message.OPENID2_NS, [discover.OPENID_2_0_TYPE]),
(message.OPENID2_NS, [discover.OPENID_IDP_2_0_TYPE]),
(message.OPENID2_NS, [discover.OPENID_2_0_TYPE,
discover.OPENID_1_0_TYPE]),
(message.OPENID2_NS, [discover.OPENID_1_0_TYPE,
discover.OPENID_2_0_TYPE]),
]
class TestIsOPIdentifier(unittest.TestCase):
def setUp(self):
self.endpoint = discover.OpenIDServiceEndpoint()
def test_none(self):
self.failIf(self.endpoint.isOPIdentifier())
def test_openid1_0(self):
self.endpoint.type_uris = [discover.OPENID_1_0_TYPE]
self.failIf(self.endpoint.isOPIdentifier())
def test_openid1_1(self):
self.endpoint.type_uris = [discover.OPENID_1_1_TYPE]
self.failIf(self.endpoint.isOPIdentifier())
def test_openid2(self):
self.endpoint.type_uris = [discover.OPENID_2_0_TYPE]
self.failIf(self.endpoint.isOPIdentifier())
def test_openid2OP(self):
self.endpoint.type_uris = [discover.OPENID_IDP_2_0_TYPE]
self.failUnless(self.endpoint.isOPIdentifier())
def test_multipleMissing(self):
self.endpoint.type_uris = [discover.OPENID_2_0_TYPE,
discover.OPENID_1_0_TYPE]
self.failIf(self.endpoint.isOPIdentifier())
def test_multiplePresent(self):
self.endpoint.type_uris = [discover.OPENID_2_0_TYPE,
discover.OPENID_1_0_TYPE,
discover.OPENID_IDP_2_0_TYPE]
self.failUnless(self.endpoint.isOPIdentifier())
class TestFromOPEndpointURL(unittest.TestCase):
def setUp(self):
self.op_endpoint_url = 'http://example.com/op/endpoint'
self.endpoint = discover.OpenIDServiceEndpoint.fromOPEndpointURL(
self.op_endpoint_url)
def test_isOPEndpoint(self):
self.failUnless(self.endpoint.isOPIdentifier())
def test_noIdentifiers(self):
self.failUnlessEqual(self.endpoint.getLocalID(), None)
self.failUnlessEqual(self.endpoint.claimed_id, None)
def test_compatibility(self):
self.failIf(self.endpoint.compatibilityMode())
def test_canonicalID(self):
self.failUnlessEqual(self.endpoint.canonicalID, None)
def test_serverURL(self):
self.failUnlessEqual(self.endpoint.server_url, self.op_endpoint_url)
class TestDiscoverFunction(unittest.TestCase):
def setUp(self):
self._old_discoverURI = discover.discoverURI
self._old_discoverXRI = discover.discoverXRI
discover.discoverXRI = self.discoverXRI
discover.discoverURI = self.discoverURI
def tearDown(self):
discover.discoverURI = self._old_discoverURI
discover.discoverXRI = self._old_discoverXRI
def discoverXRI(self, identifier):
return 'XRI'
def discoverURI(self, identifier):
return 'URI'
def test_uri(self):
self.failUnlessEqual('URI', discover.discover('http://woo!'))
def test_uriForBogus(self):
self.failUnlessEqual('URI', discover.discover('not a URL or XRI'))
def test_xri(self):
self.failUnlessEqual('XRI', discover.discover('xri://=something'))
def test_xriChar(self):
self.failUnlessEqual('XRI', discover.discover('=something'))
class TestEndpointSupportsType(unittest.TestCase):
def setUp(self):
self.endpoint = discover.OpenIDServiceEndpoint()
def failUnlessSupportsOnly(self, *types):
for t in [
'foo',
discover.OPENID_1_1_TYPE,
discover.OPENID_1_0_TYPE,
discover.OPENID_2_0_TYPE,
discover.OPENID_IDP_2_0_TYPE,
]:
if t in types:
self.failUnless(self.endpoint.supportsType(t),
"Must support %r" % (t,))
else:
self.failIf(self.endpoint.supportsType(t),
"Shouldn't support %r" % (t,))
def test_supportsNothing(self):
self.failUnlessSupportsOnly()
def test_openid2(self):
self.endpoint.type_uris = [discover.OPENID_2_0_TYPE]
self.failUnlessSupportsOnly(discover.OPENID_2_0_TYPE)
def test_openid2provider(self):
self.endpoint.type_uris = [discover.OPENID_IDP_2_0_TYPE]
self.failUnlessSupportsOnly(discover.OPENID_IDP_2_0_TYPE,
discover.OPENID_2_0_TYPE)
def test_openid1_0(self):
self.endpoint.type_uris = [discover.OPENID_1_0_TYPE]
self.failUnlessSupportsOnly(discover.OPENID_1_0_TYPE)
def test_openid1_1(self):
self.endpoint.type_uris = [discover.OPENID_1_1_TYPE]
self.failUnlessSupportsOnly(discover.OPENID_1_1_TYPE)
def test_multiple(self):
self.endpoint.type_uris = [discover.OPENID_1_1_TYPE,
discover.OPENID_2_0_TYPE]
self.failUnlessSupportsOnly(discover.OPENID_1_1_TYPE,
discover.OPENID_2_0_TYPE)
def test_multipleWithProvider(self):
self.endpoint.type_uris = [discover.OPENID_1_1_TYPE,
discover.OPENID_2_0_TYPE,
discover.OPENID_IDP_2_0_TYPE]
self.failUnlessSupportsOnly(discover.OPENID_1_1_TYPE,
discover.OPENID_2_0_TYPE,
discover.OPENID_IDP_2_0_TYPE,
)
class TestEndpointDisplayIdentifier(unittest.TestCase):
def test_strip_fragment(self):
endpoint = discover.OpenIDServiceEndpoint()
endpoint.claimed_id = 'http://recycled.invalid/#123'
self.failUnlessEqual('http://recycled.invalid/', endpoint.getDisplayIdentifier())
def pyUnitTests():
return datadriven.loadTests(__name__)
if __name__ == '__main__':
suite = pyUnitTests()
runner = unittest.TextTestRunner()
runner.run(suite)
| apache-2.0 |
stannynuytkens/youtube-dl | youtube_dl/extractor/stitcher.py | 60 | 2947 | from __future__ import unicode_literals
import re
from .common import InfoExtractor
from ..utils import (
determine_ext,
int_or_none,
js_to_json,
unescapeHTML,
)
class StitcherIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?stitcher\.com/podcast/(?:[^/]+/)+e/(?:(?P<display_id>[^/#?&]+?)-)?(?P<id>\d+)(?:[/#?&]|$)'
_TESTS = [{
'url': 'http://www.stitcher.com/podcast/the-talking-machines/e/40789481?autoplay=true',
'md5': '391dd4e021e6edeb7b8e68fbf2e9e940',
'info_dict': {
'id': '40789481',
'ext': 'mp3',
'title': 'Machine Learning Mastery and Cancer Clusters',
'description': 'md5:55163197a44e915a14a1ac3a1de0f2d3',
'duration': 1604,
'thumbnail': r're:^https?://.*\.jpg',
},
}, {
'url': 'http://www.stitcher.com/podcast/panoply/vulture-tv/e/the-rare-hourlong-comedy-plus-40846275?autoplay=true',
'info_dict': {
'id': '40846275',
'display_id': 'the-rare-hourlong-comedy-plus',
'ext': 'mp3',
'title': "The CW's 'Crazy Ex-Girlfriend'",
'description': 'md5:04f1e2f98eb3f5cbb094cea0f9e19b17',
'duration': 2235,
'thumbnail': r're:^https?://.*\.jpg',
},
'params': {
'skip_download': True,
},
}, {
# escaped title
'url': 'http://www.stitcher.com/podcast/marketplace-on-stitcher/e/40910226?autoplay=true',
'only_matching': True,
}, {
'url': 'http://www.stitcher.com/podcast/panoply/getting-in/e/episode-2a-how-many-extracurriculars-should-i-have-40876278?autoplay=true',
'only_matching': True,
}]
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
audio_id = mobj.group('id')
display_id = mobj.group('display_id') or audio_id
webpage = self._download_webpage(url, display_id)
episode = self._parse_json(
js_to_json(self._search_regex(
r'(?s)var\s+stitcher(?:Config)?\s*=\s*({.+?});\n', webpage, 'episode config')),
display_id)['config']['episode']
title = unescapeHTML(episode['title'])
formats = [{
'url': episode[episode_key],
'ext': determine_ext(episode[episode_key]) or 'mp3',
'vcodec': 'none',
} for episode_key in ('episodeURL',) if episode.get(episode_key)]
description = self._search_regex(
r'Episode Info:\s*</span>([^<]+)<', webpage, 'description', fatal=False)
duration = int_or_none(episode.get('duration'))
thumbnail = episode.get('episodeImage')
return {
'id': audio_id,
'display_id': display_id,
'title': title,
'description': description,
'duration': duration,
'thumbnail': thumbnail,
'formats': formats,
}
| unlicense |
nazeehshoura/crawler | env/lib/python2.7/site-packages/pip/util.py | 343 | 24172 | import sys
import shutil
import os
import stat
import re
import posixpath
import zipfile
import tarfile
import subprocess
import textwrap
from pip.exceptions import InstallationError, BadCommand, PipError
from pip.backwardcompat import(WindowsError, string_types, raw_input,
console_to_str, user_site, PermissionError)
from pip.locations import site_packages, running_under_virtualenv, virtualenv_no_global
from pip.log import logger
from pip._vendor import pkg_resources
from pip._vendor.distlib import version
__all__ = ['rmtree', 'display_path', 'backup_dir',
'find_command', 'ask', 'Inf',
'normalize_name', 'splitext',
'format_size', 'is_installable_dir',
'is_svn_page', 'file_contents',
'split_leading_dir', 'has_leading_dir',
'make_path_relative', 'normalize_path',
'renames', 'get_terminal_size', 'get_prog',
'unzip_file', 'untar_file', 'create_download_cache_folder',
'cache_download', 'unpack_file', 'call_subprocess']
def get_prog():
try:
if os.path.basename(sys.argv[0]) in ('__main__.py', '-c'):
return "%s -m pip" % sys.executable
except (AttributeError, TypeError, IndexError):
pass
return 'pip'
def rmtree(dir, ignore_errors=False):
shutil.rmtree(dir, ignore_errors=ignore_errors,
onerror=rmtree_errorhandler)
def rmtree_errorhandler(func, path, exc_info):
"""On Windows, the files in .svn are read-only, so when rmtree() tries to
remove them, an exception is thrown. We catch that here, remove the
read-only attribute, and hopefully continue without problems."""
exctype, value = exc_info[:2]
if not ((exctype is WindowsError and value.args[0] == 5) or #others
(exctype is OSError and value.args[0] == 13) or #python2.4
(exctype is PermissionError and value.args[3] == 5) #python3.3
):
raise
# file type should currently be read only
if ((os.stat(path).st_mode & stat.S_IREAD) != stat.S_IREAD):
raise
# convert to read/write
os.chmod(path, stat.S_IWRITE)
# use the original function to repeat the operation
func(path)
def display_path(path):
"""Gives the display value for a given path, making it relative to cwd
if possible."""
path = os.path.normcase(os.path.abspath(path))
if path.startswith(os.getcwd() + os.path.sep):
path = '.' + path[len(os.getcwd()):]
return path
def backup_dir(dir, ext='.bak'):
"""Figure out the name of a directory to back up the given dir to
(adding .bak, .bak2, etc)"""
n = 1
extension = ext
while os.path.exists(dir + extension):
n += 1
extension = ext + str(n)
return dir + extension
def find_command(cmd, paths=None, pathext=None):
"""Searches the PATH for the given command and returns its path"""
if paths is None:
paths = os.environ.get('PATH', '').split(os.pathsep)
if isinstance(paths, string_types):
paths = [paths]
# check if there are funny path extensions for executables, e.g. Windows
if pathext is None:
pathext = get_pathext()
pathext = [ext for ext in pathext.lower().split(os.pathsep) if len(ext)]
# don't use extensions if the command ends with one of them
if os.path.splitext(cmd)[1].lower() in pathext:
pathext = ['']
# check if we find the command on PATH
for path in paths:
# try without extension first
cmd_path = os.path.join(path, cmd)
for ext in pathext:
# then including the extension
cmd_path_ext = cmd_path + ext
if os.path.isfile(cmd_path_ext):
return cmd_path_ext
if os.path.isfile(cmd_path):
return cmd_path
raise BadCommand('Cannot find command %r' % cmd)
def get_pathext(default_pathext=None):
"""Returns the path extensions from environment or a default"""
if default_pathext is None:
default_pathext = os.pathsep.join(['.COM', '.EXE', '.BAT', '.CMD'])
pathext = os.environ.get('PATHEXT', default_pathext)
return pathext
def ask_path_exists(message, options):
for action in os.environ.get('PIP_EXISTS_ACTION', '').split():
if action in options:
return action
return ask(message, options)
def ask(message, options):
"""Ask the message interactively, with the given possible responses"""
while 1:
if os.environ.get('PIP_NO_INPUT'):
raise Exception('No input was expected ($PIP_NO_INPUT set); question: %s' % message)
response = raw_input(message)
response = response.strip().lower()
if response not in options:
print('Your response (%r) was not one of the expected responses: %s' % (
response, ', '.join(options)))
else:
return response
class _Inf(object):
"""I am bigger than everything!"""
def __eq__(self, other):
if self is other:
return True
else:
return False
def __ne__(self, other):
return not self.__eq__(other)
def __lt__(self, other):
return False
def __le__(self, other):
return False
def __gt__(self, other):
return True
def __ge__(self, other):
return True
def __repr__(self):
return 'Inf'
Inf = _Inf() #this object is not currently used as a sortable in our code
del _Inf
_normalize_re = re.compile(r'[^a-z]', re.I)
def normalize_name(name):
return _normalize_re.sub('-', name.lower())
def format_size(bytes):
if bytes > 1000*1000:
return '%.1fMB' % (bytes/1000.0/1000)
elif bytes > 10*1000:
return '%ikB' % (bytes/1000)
elif bytes > 1000:
return '%.1fkB' % (bytes/1000.0)
else:
return '%ibytes' % bytes
def is_installable_dir(path):
"""Return True if `path` is a directory containing a setup.py file."""
if not os.path.isdir(path):
return False
setup_py = os.path.join(path, 'setup.py')
if os.path.isfile(setup_py):
return True
return False
def is_svn_page(html):
"""Returns true if the page appears to be the index page of an svn repository"""
return (re.search(r'<title>[^<]*Revision \d+:', html)
and re.search(r'Powered by (?:<a[^>]*?>)?Subversion', html, re.I))
def file_contents(filename):
fp = open(filename, 'rb')
try:
return fp.read().decode('utf-8')
finally:
fp.close()
def split_leading_dir(path):
path = str(path)
path = path.lstrip('/').lstrip('\\')
if '/' in path and (('\\' in path and path.find('/') < path.find('\\'))
or '\\' not in path):
return path.split('/', 1)
elif '\\' in path:
return path.split('\\', 1)
else:
return path, ''
def has_leading_dir(paths):
"""Returns true if all the paths have the same leading path name
(i.e., everything is in one subdirectory in an archive)"""
common_prefix = None
for path in paths:
prefix, rest = split_leading_dir(path)
if not prefix:
return False
elif common_prefix is None:
common_prefix = prefix
elif prefix != common_prefix:
return False
return True
def make_path_relative(path, rel_to):
"""
Make a filename relative, where the filename path, and it is
relative to rel_to
>>> make_relative_path('/usr/share/something/a-file.pth',
... '/usr/share/another-place/src/Directory')
'../../../something/a-file.pth'
>>> make_relative_path('/usr/share/something/a-file.pth',
... '/home/user/src/Directory')
'../../../usr/share/something/a-file.pth'
>>> make_relative_path('/usr/share/a-file.pth', '/usr/share/')
'a-file.pth'
"""
path_filename = os.path.basename(path)
path = os.path.dirname(path)
path = os.path.normpath(os.path.abspath(path))
rel_to = os.path.normpath(os.path.abspath(rel_to))
path_parts = path.strip(os.path.sep).split(os.path.sep)
rel_to_parts = rel_to.strip(os.path.sep).split(os.path.sep)
while path_parts and rel_to_parts and path_parts[0] == rel_to_parts[0]:
path_parts.pop(0)
rel_to_parts.pop(0)
full_parts = ['..']*len(rel_to_parts) + path_parts + [path_filename]
if full_parts == ['']:
return '.' + os.path.sep
return os.path.sep.join(full_parts)
def normalize_path(path):
"""
Convert a path to its canonical, case-normalized, absolute version.
"""
return os.path.normcase(os.path.realpath(os.path.expanduser(path)))
def splitext(path):
"""Like os.path.splitext, but take off .tar too"""
base, ext = posixpath.splitext(path)
if base.lower().endswith('.tar'):
ext = base[-4:] + ext
base = base[:-4]
return base, ext
def renames(old, new):
"""Like os.renames(), but handles renaming across devices."""
# Implementation borrowed from os.renames().
head, tail = os.path.split(new)
if head and tail and not os.path.exists(head):
os.makedirs(head)
shutil.move(old, new)
head, tail = os.path.split(old)
if head and tail:
try:
os.removedirs(head)
except OSError:
pass
def is_local(path):
"""
Return True if path is within sys.prefix, if we're running in a virtualenv.
If we're not in a virtualenv, all paths are considered "local."
"""
if not running_under_virtualenv():
return True
return normalize_path(path).startswith(normalize_path(sys.prefix))
def dist_is_local(dist):
"""
Return True if given Distribution object is installed locally
(i.e. within current virtualenv).
Always True if we're not in a virtualenv.
"""
return is_local(dist_location(dist))
def dist_in_usersite(dist):
"""
Return True if given Distribution is installed in user site.
"""
if user_site:
return normalize_path(dist_location(dist)).startswith(normalize_path(user_site))
else:
return False
def dist_in_site_packages(dist):
"""
Return True if given Distribution is installed in distutils.sysconfig.get_python_lib().
"""
return normalize_path(dist_location(dist)).startswith(normalize_path(site_packages))
def dist_is_editable(dist):
"""Is distribution an editable install?"""
#TODO: factor out determining editableness out of FrozenRequirement
from pip import FrozenRequirement
req = FrozenRequirement.from_dist(dist, [])
return req.editable
def get_installed_distributions(local_only=True,
skip=('setuptools', 'pip', 'python', 'distribute'),
include_editables=True,
editables_only=False):
"""
Return a list of installed Distribution objects.
If ``local_only`` is True (default), only return installations
local to the current virtualenv, if in a virtualenv.
``skip`` argument is an iterable of lower-case project names to
ignore; defaults to ('setuptools', 'pip', 'python'). [FIXME also
skip virtualenv?]
If ``editables`` is False, don't report editables.
If ``editables_only`` is True , only report editables.
"""
if local_only:
local_test = dist_is_local
else:
local_test = lambda d: True
if include_editables:
editable_test = lambda d: True
else:
editable_test = lambda d: not dist_is_editable(d)
if editables_only:
editables_only_test = lambda d: dist_is_editable(d)
else:
editables_only_test = lambda d: True
return [d for d in pkg_resources.working_set
if local_test(d)
and d.key not in skip
and editable_test(d)
and editables_only_test(d)
]
def egg_link_path(dist):
"""
Return the path for the .egg-link file if it exists, otherwise, None.
There's 3 scenarios:
1) not in a virtualenv
try to find in site.USER_SITE, then site_packages
2) in a no-global virtualenv
try to find in site_packages
3) in a yes-global virtualenv
try to find in site_packages, then site.USER_SITE (don't look in global location)
For #1 and #3, there could be odd cases, where there's an egg-link in 2 locations.
This method will just return the first one found.
"""
sites = []
if running_under_virtualenv():
if virtualenv_no_global():
sites.append(site_packages)
else:
sites.append(site_packages)
if user_site:
sites.append(user_site)
else:
if user_site:
sites.append(user_site)
sites.append(site_packages)
for site in sites:
egglink = os.path.join(site, dist.project_name) + '.egg-link'
if os.path.isfile(egglink):
return egglink
def dist_location(dist):
"""
Get the site-packages location of this distribution. Generally
this is dist.location, except in the case of develop-installed
packages, where dist.location is the source code location, and we
want to know where the egg-link file is.
"""
egg_link = egg_link_path(dist)
if egg_link:
return egg_link
return dist.location
def get_terminal_size():
"""Returns a tuple (x, y) representing the width(x) and the height(x)
in characters of the terminal window."""
def ioctl_GWINSZ(fd):
try:
import fcntl
import termios
import struct
cr = struct.unpack('hh', fcntl.ioctl(fd, termios.TIOCGWINSZ,
'1234'))
except:
return None
if cr == (0, 0):
return None
if cr == (0, 0):
return None
return cr
cr = ioctl_GWINSZ(0) or ioctl_GWINSZ(1) or ioctl_GWINSZ(2)
if not cr:
try:
fd = os.open(os.ctermid(), os.O_RDONLY)
cr = ioctl_GWINSZ(fd)
os.close(fd)
except:
pass
if not cr:
cr = (os.environ.get('LINES', 25), os.environ.get('COLUMNS', 80))
return int(cr[1]), int(cr[0])
def current_umask():
"""Get the current umask which involves having to set it temporarily."""
mask = os.umask(0)
os.umask(mask)
return mask
def unzip_file(filename, location, flatten=True):
"""
Unzip the file (with path `filename`) to the destination `location`. All
files are written based on system defaults and umask (i.e. permissions are
not preserved), except that regular file members with any execute
permissions (user, group, or world) have "chmod +x" applied after being
written. Note that for windows, any execute changes using os.chmod are
no-ops per the python docs.
"""
if not os.path.exists(location):
os.makedirs(location)
zipfp = open(filename, 'rb')
try:
zip = zipfile.ZipFile(zipfp)
leading = has_leading_dir(zip.namelist()) and flatten
for info in zip.infolist():
name = info.filename
data = zip.read(name)
fn = name
if leading:
fn = split_leading_dir(name)[1]
fn = os.path.join(location, fn)
dir = os.path.dirname(fn)
if not os.path.exists(dir):
os.makedirs(dir)
if fn.endswith('/') or fn.endswith('\\'):
# A directory
if not os.path.exists(fn):
os.makedirs(fn)
else:
fp = open(fn, 'wb')
try:
fp.write(data)
finally:
fp.close()
mode = info.external_attr >> 16
# if mode and regular file and any execute permissions for user/group/world?
if mode and stat.S_ISREG(mode) and mode & 0o111:
# make dest file have execute for user/group/world (chmod +x)
# no-op on windows per python docs
os.chmod(fn, (0o777-current_umask() | 0o111))
finally:
zipfp.close()
def untar_file(filename, location):
"""
Untar the file (with path `filename`) to the destination `location`.
All files are written based on system defaults and umask (i.e. permissions
are not preserved), except that regular file members with any execute
permissions (user, group, or world) have "chmod +x" applied after being
written. Note that for windows, any execute changes using os.chmod are
no-ops per the python docs.
"""
if not os.path.exists(location):
os.makedirs(location)
if filename.lower().endswith('.gz') or filename.lower().endswith('.tgz'):
mode = 'r:gz'
elif filename.lower().endswith('.bz2') or filename.lower().endswith('.tbz'):
mode = 'r:bz2'
elif filename.lower().endswith('.tar'):
mode = 'r'
else:
logger.warn('Cannot determine compression type for file %s' % filename)
mode = 'r:*'
tar = tarfile.open(filename, mode)
try:
# note: python<=2.5 doesnt seem to know about pax headers, filter them
leading = has_leading_dir([
member.name for member in tar.getmembers()
if member.name != 'pax_global_header'
])
for member in tar.getmembers():
fn = member.name
if fn == 'pax_global_header':
continue
if leading:
fn = split_leading_dir(fn)[1]
path = os.path.join(location, fn)
if member.isdir():
if not os.path.exists(path):
os.makedirs(path)
elif member.issym():
try:
tar._extract_member(member, path)
except:
e = sys.exc_info()[1]
# Some corrupt tar files seem to produce this
# (specifically bad symlinks)
logger.warn(
'In the tar file %s the member %s is invalid: %s'
% (filename, member.name, e))
continue
else:
try:
fp = tar.extractfile(member)
except (KeyError, AttributeError):
e = sys.exc_info()[1]
# Some corrupt tar files seem to produce this
# (specifically bad symlinks)
logger.warn(
'In the tar file %s the member %s is invalid: %s'
% (filename, member.name, e))
continue
if not os.path.exists(os.path.dirname(path)):
os.makedirs(os.path.dirname(path))
destfp = open(path, 'wb')
try:
shutil.copyfileobj(fp, destfp)
finally:
destfp.close()
fp.close()
# member have any execute permissions for user/group/world?
if member.mode & 0o111:
# make dest file have execute for user/group/world
# no-op on windows per python docs
os.chmod(path, (0o777-current_umask() | 0o111))
finally:
tar.close()
def create_download_cache_folder(folder):
logger.indent -= 2
logger.notify('Creating supposed download cache at %s' % folder)
logger.indent += 2
os.makedirs(folder)
def cache_download(target_file, temp_location, content_type):
logger.notify('Storing download in cache at %s' % display_path(target_file))
shutil.copyfile(temp_location, target_file)
fp = open(target_file+'.content-type', 'w')
fp.write(content_type)
fp.close()
def unpack_file(filename, location, content_type, link):
filename = os.path.realpath(filename)
if (content_type == 'application/zip'
or filename.endswith('.zip')
or filename.endswith('.pybundle')
or filename.endswith('.whl')
or zipfile.is_zipfile(filename)):
unzip_file(filename, location, flatten=not filename.endswith(('.pybundle', '.whl')))
elif (content_type == 'application/x-gzip'
or tarfile.is_tarfile(filename)
or splitext(filename)[1].lower() in ('.tar', '.tar.gz', '.tar.bz2', '.tgz', '.tbz')):
untar_file(filename, location)
elif (content_type and content_type.startswith('text/html')
and is_svn_page(file_contents(filename))):
# We don't really care about this
from pip.vcs.subversion import Subversion
Subversion('svn+' + link.url).unpack(location)
else:
## FIXME: handle?
## FIXME: magic signatures?
logger.fatal('Cannot unpack file %s (downloaded from %s, content-type: %s); cannot detect archive format'
% (filename, location, content_type))
raise InstallationError('Cannot determine archive format of %s' % location)
def call_subprocess(cmd, show_stdout=True,
filter_stdout=None, cwd=None,
raise_on_returncode=True,
command_level=logger.DEBUG, command_desc=None,
extra_environ=None):
if command_desc is None:
cmd_parts = []
for part in cmd:
if ' ' in part or '\n' in part or '"' in part or "'" in part:
part = '"%s"' % part.replace('"', '\\"')
cmd_parts.append(part)
command_desc = ' '.join(cmd_parts)
if show_stdout:
stdout = None
else:
stdout = subprocess.PIPE
logger.log(command_level, "Running command %s" % command_desc)
env = os.environ.copy()
if extra_environ:
env.update(extra_environ)
try:
proc = subprocess.Popen(
cmd, stderr=subprocess.STDOUT, stdin=None, stdout=stdout,
cwd=cwd, env=env)
except Exception:
e = sys.exc_info()[1]
logger.fatal(
"Error %s while executing command %s" % (e, command_desc))
raise
all_output = []
if stdout is not None:
stdout = proc.stdout
while 1:
line = console_to_str(stdout.readline())
if not line:
break
line = line.rstrip()
all_output.append(line + '\n')
if filter_stdout:
level = filter_stdout(line)
if isinstance(level, tuple):
level, line = level
logger.log(level, line)
if not logger.stdout_level_matches(level):
logger.show_progress()
else:
logger.info(line)
else:
returned_stdout, returned_stderr = proc.communicate()
all_output = [returned_stdout or '']
proc.wait()
if proc.returncode:
if raise_on_returncode:
if all_output:
logger.notify('Complete output from command %s:' % command_desc)
logger.notify('\n'.join(all_output) + '\n----------------------------------------')
raise InstallationError(
"Command %s failed with error code %s in %s"
% (command_desc, proc.returncode, cwd))
else:
logger.warn(
"Command %s had error code %s in %s"
% (command_desc, proc.returncode, cwd))
if stdout is not None:
return ''.join(all_output)
def is_prerelease(vers):
"""
Attempt to determine if this is a pre-release using PEP386/PEP426 rules.
Will return True if it is a pre-release and False if not. Versions are
assumed to be a pre-release if they cannot be parsed.
"""
normalized = version._suggest_normalized_version(vers)
if normalized is None:
# Cannot normalize, assume it is a pre-release
return True
parsed = version._normalized_key(normalized)
return any([any([y in set(["a", "b", "c", "rc", "dev"]) for y in x]) for x in parsed])
| mit |
sillydan1/WhatEverEngine | packages/IronPython.StdLib.2.7.5/content/Lib/distutils/command/install_egg_info.py | 438 | 2587 | """distutils.command.install_egg_info
Implements the Distutils 'install_egg_info' command, for installing
a package's PKG-INFO metadata."""
from distutils.cmd import Command
from distutils import log, dir_util
import os, sys, re
class install_egg_info(Command):
"""Install an .egg-info file for the package"""
description = "Install package's PKG-INFO metadata as an .egg-info file"
user_options = [
('install-dir=', 'd', "directory to install to"),
]
def initialize_options(self):
self.install_dir = None
def finalize_options(self):
self.set_undefined_options('install_lib',('install_dir','install_dir'))
basename = "%s-%s-py%s.egg-info" % (
to_filename(safe_name(self.distribution.get_name())),
to_filename(safe_version(self.distribution.get_version())),
sys.version[:3]
)
self.target = os.path.join(self.install_dir, basename)
self.outputs = [self.target]
def run(self):
target = self.target
if os.path.isdir(target) and not os.path.islink(target):
dir_util.remove_tree(target, dry_run=self.dry_run)
elif os.path.exists(target):
self.execute(os.unlink,(self.target,),"Removing "+target)
elif not os.path.isdir(self.install_dir):
self.execute(os.makedirs, (self.install_dir,),
"Creating "+self.install_dir)
log.info("Writing %s", target)
if not self.dry_run:
f = open(target, 'w')
self.distribution.metadata.write_pkg_file(f)
f.close()
def get_outputs(self):
return self.outputs
# The following routines are taken from setuptools' pkg_resources module and
# can be replaced by importing them from pkg_resources once it is included
# in the stdlib.
def safe_name(name):
"""Convert an arbitrary string to a standard distribution name
Any runs of non-alphanumeric/. characters are replaced with a single '-'.
"""
return re.sub('[^A-Za-z0-9.]+', '-', name)
def safe_version(version):
"""Convert an arbitrary string to a standard version string
Spaces become dots, and all other non-alphanumeric characters become
dashes, with runs of multiple dashes condensed to a single dash.
"""
version = version.replace(' ','.')
return re.sub('[^A-Za-z0-9.]+', '-', version)
def to_filename(name):
"""Convert a project or version name to its filename-escaped form
Any '-' characters are currently replaced with '_'.
"""
return name.replace('-','_')
| apache-2.0 |
QinerTech/QinerApps | openerp/addons/test_impex/tests/test_load.py | 34 | 44524 | # -*- coding: utf-8 -*-
import json
import pkgutil
import unittest
import openerp.modules.registry
import openerp
from openerp.tests import common
from openerp.tools.misc import mute_logger
def message(msg, type='error', from_=0, to_=0, record=0, field='value', **kwargs):
return dict(kwargs,
type=type, rows={'from': from_, 'to': to_}, record=record,
field=field, message=msg)
def moreaction(**kwargs):
return dict(kwargs,
type='ir.actions.act_window',
target='new',
view_mode='tree,form',
view_type='form',
views=[(False, 'tree'), (False, 'form')],
help=u"See all possible values")
def values(seq, field='value'):
return [item[field] for item in seq]
class ImporterCase(common.TransactionCase):
model_name = False
def __init__(self, *args, **kwargs):
super(ImporterCase, self).__init__(*args, **kwargs)
self.model = None
def setUp(self):
super(ImporterCase, self).setUp()
self.model = self.registry(self.model_name)
self.registry('ir.model.data').clear_caches()
def import_(self, fields, rows, context=None):
return self.model.load(
self.cr, openerp.SUPERUSER_ID, fields, rows, context=context)
def read(self, fields=('value',), domain=(), context=None):
return self.model.read(
self.cr, openerp.SUPERUSER_ID,
self.model.search(self.cr, openerp.SUPERUSER_ID, domain, context=context),
fields=fields, context=context)
def browse(self, domain=(), context=None):
return self.model.browse(
self.cr, openerp.SUPERUSER_ID,
self.model.search(self.cr, openerp.SUPERUSER_ID, domain, context=context),
context=context)
def xid(self, record):
ModelData = self.registry('ir.model.data')
ids = ModelData.search(
self.cr, openerp.SUPERUSER_ID,
[('model', '=', record._name), ('res_id', '=', record.id)])
if ids:
d = ModelData.read(
self.cr, openerp.SUPERUSER_ID, ids, ['name', 'module'])[0]
if d['module']:
return '%s.%s' % (d['module'], d['name'])
return d['name']
name = record.name_get()[0][1]
# fix dotted name_get results, otherwise xid lookups blow up
name = name.replace('.', '-')
ModelData.create(self.cr, openerp.SUPERUSER_ID, {
'name': name,
'model': record._name,
'res_id': record.id,
'module': '__test__'
})
return '__test__.' + name
def add_translations(self, name, type, code, *tnx):
Lang = self.registry('res.lang')
if not Lang.search(self.cr, openerp.SUPERUSER_ID, [('code', '=', code)]):
Lang.create(self.cr, openerp.SUPERUSER_ID, {
'name': code,
'code': code,
'translatable': True,
'date_format': '%d.%m.%Y',
'decimal_point': ',',
})
Translations = self.registry('ir.translation')
for source, value in tnx:
Translations.create(self.cr, openerp.SUPERUSER_ID, {
'name': name,
'lang': code,
'type': type,
'src': source,
'value': value,
'state': 'translated',
})
class test_ids_stuff(ImporterCase):
model_name = 'export.integer'
def test_create_with_id(self):
result = self.import_(['.id', 'value'], [['42', '36']])
self.assertIs(result['ids'], False)
self.assertEqual(result['messages'], [{
'type': 'error',
'rows': {'from': 0, 'to': 0},
'record': 0,
'field': '.id',
'message': u"Unknown database identifier '42'",
}])
def test_create_with_xid(self):
result = self.import_(['id', 'value'], [['somexmlid', '42']])
self.assertEqual(len(result['ids']), 1)
self.assertFalse(result['messages'])
self.assertEqual(
'somexmlid',
self.xid(self.browse()[0]))
def test_update_with_id(self):
id = self.model.create(self.cr, openerp.SUPERUSER_ID, {'value': 36})
self.assertEqual(
36,
self.model.browse(self.cr, openerp.SUPERUSER_ID, id).value)
result = self.import_(['.id', 'value'], [[str(id), '42']])
self.assertEqual(len(result['ids']), 1)
self.assertFalse(result['messages'])
self.assertEqual(
[42], # updated value to imported
values(self.read()))
def test_update_with_xid(self):
self.import_(['id', 'value'], [['somexmlid', '36']])
self.assertEqual([36], values(self.read()))
self.import_(['id', 'value'], [['somexmlid', '1234567']])
self.assertEqual([1234567], values(self.read()))
class test_boolean_field(ImporterCase):
model_name = 'export.boolean'
def test_empty(self):
self.assertEqual(
self.import_(['value'], []),
{'ids': [], 'messages': []})
def test_exported(self):
result = self.import_(['value'], [['False'], ['True'], ])
self.assertEqual(len(result['ids']), 2)
self.assertFalse(result['messages'])
records = self.read()
self.assertEqual([
False,
True,
], values(records))
def test_falses(self):
for lang, source, value in [('fr_FR', 'no', u'non'),
('de_DE', 'no', u'nein'),
('ru_RU', 'no', u'нет'),
('nl_BE', 'false', u'vals'),
('lt_LT', 'false', u'klaidingas')]:
self.add_translations('test_import.py', 'code', lang, (source, value))
falses = [[u'0'], [u'no'], [u'false'], [u'FALSE'], [u''],
[u'non'], # no, fr
[u'nein'], # no, de
[u'нет'], # no, ru
[u'vals'], # false, nl
[u'klaidingas'], # false, lt,
]
result = self.import_(['value'], falses)
self.assertFalse(result['messages'])
self.assertEqual(len(result['ids']), len(falses))
self.assertEqual([False] * len(falses), values(self.read()))
def test_trues(self):
trues = [['None'], ['nil'], ['()'], ['f'], ['#f'],
# Problem: OpenOffice (and probably excel) output localized booleans
['VRAI'], ['ok'], ['true'], ['yes'], ['1'], ]
result = self.import_(['value'], trues)
self.assertEqual(len(result['ids']), 10)
self.assertEqual(result['messages'], [
message(u"Unknown value '%s' for boolean field 'unknown', assuming 'yes'" % v[0],
moreinfo=u"Use '1' for yes and '0' for no",
type='warning', from_=i, to_=i, record=i)
for i, v in enumerate(trues)
if v[0] not in ('true', 'yes', '1')
])
self.assertEqual(
[True] * 10,
values(self.read()))
class test_integer_field(ImporterCase):
model_name = 'export.integer'
def test_none(self):
self.assertEqual(
self.import_(['value'], []),
{'ids': [], 'messages': []})
def test_empty(self):
result = self.import_(['value'], [['']])
self.assertEqual(len(result['ids']), 1)
self.assertFalse(result['messages'])
self.assertEqual(
[False],
values(self.read()))
def test_zero(self):
result = self.import_(['value'], [['0']])
self.assertEqual(len(result['ids']), 1)
self.assertFalse(result['messages'])
result = self.import_(['value'], [['-0']])
self.assertEqual(len(result['ids']), 1)
self.assertFalse(result['messages'])
self.assertEqual([False, False], values(self.read()))
def test_positives(self):
result = self.import_(['value'], [
['1'],
['42'],
[str(2**31-1)],
['12345678']
])
self.assertEqual(len(result['ids']), 4)
self.assertFalse(result['messages'])
self.assertEqual([
1, 42, 2**31-1, 12345678
], values(self.read()))
def test_negatives(self):
result = self.import_(['value'], [
['-1'],
['-42'],
[str(-(2**31 - 1))],
[str(-(2**31))],
['-12345678']
])
self.assertEqual(len(result['ids']), 5)
self.assertFalse(result['messages'])
self.assertEqual([
-1, -42, -(2**31 - 1), -(2**31), -12345678
], values(self.read()))
@mute_logger('openerp.sql_db', 'openerp.models')
def test_out_of_range(self):
result = self.import_(['value'], [[str(2**31)]])
self.assertIs(result['ids'], False)
self.assertEqual(result['messages'], [{
'type': 'error',
'rows': {'from': 0, 'to': 0},
'record': 0,
'message': "integer out of range\n"
}])
result = self.import_(['value'], [[str(-2**32)]])
self.assertIs(result['ids'], False)
self.assertEqual(result['messages'], [{
'type': 'error',
'rows': {'from': 0, 'to': 0},
'record': 0,
'message': "integer out of range\n"
}])
def test_nonsense(self):
result = self.import_(['value'], [['zorglub']])
self.assertIs(result['ids'], False)
self.assertEqual(result['messages'], [{
'type': 'error',
'rows': {'from': 0, 'to': 0},
'record': 0,
'field': 'value',
'message': u"'zorglub' does not seem to be an integer for field 'unknown'",
}])
class test_float_field(ImporterCase):
model_name = 'export.float'
def test_none(self):
self.assertEqual(
self.import_(['value'], []),
{'ids': [], 'messages': []})
def test_empty(self):
result = self.import_(['value'], [['']])
self.assertEqual(len(result['ids']), 1)
self.assertFalse(result['messages'])
self.assertEqual(
[False],
values(self.read()))
def test_zero(self):
result = self.import_(['value'], [['0']])
self.assertEqual(len(result['ids']), 1)
self.assertFalse(result['messages'])
result = self.import_(['value'], [['-0']])
self.assertEqual(len(result['ids']), 1)
self.assertFalse(result['messages'])
self.assertEqual([False, False], values(self.read()))
def test_positives(self):
result = self.import_(['value'], [
['1'],
['42'],
[str(2**31-1)],
['12345678'],
[str(2**33)],
['0.000001'],
])
self.assertEqual(len(result['ids']), 6)
self.assertFalse(result['messages'])
self.assertEqual([
1, 42, 2**31-1, 12345678, 2.0**33, .000001
], values(self.read()))
def test_negatives(self):
result = self.import_(['value'], [
['-1'],
['-42'],
[str(-2**31 + 1)],
[str(-2**31)],
['-12345678'],
[str(-2**33)],
['-0.000001'],
])
self.assertEqual(len(result['ids']), 7)
self.assertFalse(result['messages'])
self.assertEqual([
-1, -42, -(2**31 - 1), -(2**31), -12345678, -2.0**33, -.000001
], values(self.read()))
def test_nonsense(self):
result = self.import_(['value'], [['foobar']])
self.assertIs(result['ids'], False)
self.assertEqual(result['messages'], [
message(u"'foobar' does not seem to be a number for field 'unknown'")])
class test_string_field(ImporterCase):
model_name = 'export.string.bounded'
def test_empty(self):
result = self.import_(['value'], [['']])
self.assertEqual(len(result['ids']), 1)
self.assertFalse(result['messages'])
self.assertEqual([False], values(self.read()))
def test_imported(self):
result = self.import_(['value'], [
[u'foobar'],
[u'foobarbaz'],
[u'Með suð í eyrum við spilum endalaust'],
[u"People 'get' types. They use them all the time. Telling "
u"someone he can't pound a nail with a banana doesn't much "
u"surprise him."]
])
self.assertEqual(len(result['ids']), 4)
self.assertFalse(result['messages'])
self.assertEqual([
u"foobar",
u"foobarbaz",
u"Með suð í eyrum ",
u"People 'get' typ",
], values(self.read()))
class test_unbound_string_field(ImporterCase):
model_name = 'export.string'
def test_imported(self):
result = self.import_(['value'], [
[u'í dag viðrar vel til loftárása'],
# ackbar.jpg
[u"If they ask you about fun, you tell them – fun is a filthy"
u" parasite"]
])
self.assertEqual(len(result['ids']), 2)
self.assertFalse(result['messages'])
self.assertEqual([
u"í dag viðrar vel til loftárása",
u"If they ask you about fun, you tell them – fun is a filthy parasite"
], values(self.read()))
class test_required_string_field(ImporterCase):
model_name = 'export.string.required'
@mute_logger('openerp.sql_db', 'openerp.models')
def test_empty(self):
result = self.import_(['value'], [[]])
self.assertEqual(result['messages'], [message(
u"Missing required value for the field 'unknown' (value)")])
self.assertIs(result['ids'], False)
@mute_logger('openerp.sql_db', 'openerp.models')
def test_not_provided(self):
result = self.import_(['const'], [['12']])
self.assertEqual(result['messages'], [message(
u"Missing required value for the field 'unknown' (value)")])
self.assertIs(result['ids'], False)
class test_text(ImporterCase):
model_name = 'export.text'
def test_empty(self):
result = self.import_(['value'], [['']])
self.assertEqual(len(result['ids']), 1)
self.assertFalse(result['messages'])
self.assertEqual([False], values(self.read()))
def test_imported(self):
s = (u"Breiðskífa er notað um útgefna hljómplötu sem inniheldur "
u"stúdíóupptökur frá einum flytjanda. Breiðskífur eru oftast "
u"milli 25-80 mínútur og er lengd þeirra oft miðuð við 33⅓ "
u"snúninga 12 tommu vínylplötur (sem geta verið allt að 30 mín "
u"hvor hlið).\n\nBreiðskífur eru stundum tvöfaldar og eru þær þá"
u" gefnar út á tveimur geisladiskum eða tveimur vínylplötum.")
result = self.import_(['value'], [[s]])
self.assertEqual(len(result['ids']), 1)
self.assertFalse(result['messages'])
self.assertEqual([s], values(self.read()))
class test_selection(ImporterCase):
model_name = 'export.selection'
translations_fr = [
("Foo", "tete"),
("Bar", "titi"),
("Qux", "toto"),
]
def test_imported(self):
result = self.import_(['value'], [
['Qux'],
['Bar'],
['Foo'],
['2'],
])
self.assertEqual(len(result['ids']), 4)
self.assertFalse(result['messages'])
self.assertEqual([3, 2, 1, 2], values(self.read()))
def test_imported_translated(self):
self.add_translations(
'export.selection,value', 'selection', 'fr_FR', *self.translations_fr)
result = self.import_(['value'], [
['toto'],
['tete'],
['titi'],
], context={'lang': 'fr_FR'})
self.assertEqual(len(result['ids']), 3)
self.assertFalse(result['messages'])
self.assertEqual([3, 1, 2], values(self.read()))
result = self.import_(['value'], [['Foo']], context={'lang': 'fr_FR'})
self.assertEqual(len(result['ids']), 1)
self.assertFalse(result['messages'])
def test_invalid(self):
result = self.import_(['value'], [['Baz']])
self.assertIs(result['ids'], False)
self.assertEqual(result['messages'], [message(
u"Value 'Baz' not found in selection field 'unknown'",
moreinfo="Foo Bar Qux 4".split())])
result = self.import_(['value'], [[42]])
self.assertIs(result['ids'], False)
self.assertEqual(result['messages'], [message(
u"Value '42' not found in selection field 'unknown'",
moreinfo="Foo Bar Qux 4".split())])
class test_selection_with_default(ImporterCase):
model_name = 'export.selection.withdefault'
def test_empty(self):
""" Empty cells should set corresponding field to False
"""
result = self.import_(['value'], [['']])
self.assertFalse(result['messages'])
self.assertEqual(len(result['ids']), 1)
self.assertEqual(
values(self.read()),
[False])
def test_default(self):
""" Non-provided cells should set corresponding field to default
"""
result = self.import_(['const'], [['42']])
self.assertFalse(result['messages'])
self.assertEqual(len(result['ids']), 1)
self.assertEqual(
values(self.read()),
[2])
class test_selection_function(ImporterCase):
model_name = 'export.selection.function'
translations_fr = [
("Corge", "toto"),
("Grault", "titi"),
("Wheee", "tete"),
("Moog", "tutu"),
]
def test_imported(self):
""" import uses fields_get, so translates import label (may or may not
be good news) *and* serializes the selection function to reverse it:
import does not actually know that the selection field uses a function
"""
# NOTE: conflict between a value and a label => pick first
result = self.import_(['value'], [
['3'],
["Grault"],
])
self.assertEqual(len(result['ids']), 2)
self.assertFalse(result['messages'])
self.assertEqual(
[3, 1],
values(self.read()))
def test_translated(self):
""" Expects output of selection function returns translated labels
"""
self.add_translations(
'export.selection,value', 'selection', 'fr_FR', *self.translations_fr)
result = self.import_(['value'], [
['titi'],
['tete'],
], context={'lang': 'fr_FR'})
self.assertFalse(result['messages'])
self.assertEqual(len(result['ids']), 2)
self.assertEqual(values(self.read()), [1, 2])
result = self.import_(['value'], [['Wheee']], context={'lang': 'fr_FR'})
self.assertFalse(result['messages'])
self.assertEqual(len(result['ids']), 1)
class test_m2o(ImporterCase):
model_name = 'export.many2one'
def test_by_name(self):
# create integer objects
integer_id1 = self.registry('export.integer').create(
self.cr, openerp.SUPERUSER_ID, {'value': 42})
integer_id2 = self.registry('export.integer').create(
self.cr, openerp.SUPERUSER_ID, {'value': 36})
# get its name
name1 = dict(self.registry('export.integer').name_get(
self.cr, openerp.SUPERUSER_ID,[integer_id1]))[integer_id1]
name2 = dict(self.registry('export.integer').name_get(
self.cr, openerp.SUPERUSER_ID,[integer_id2]))[integer_id2]
result = self.import_(['value'], [
# import by name_get
[name1],
[name1],
[name2],
])
self.assertFalse(result['messages'])
self.assertEqual(len(result['ids']), 3)
# correct ids assigned to corresponding records
self.assertEqual([
(integer_id1, name1),
(integer_id1, name1),
(integer_id2, name2),],
values(self.read()))
def test_by_xid(self):
ExportInteger = self.registry('export.integer')
integer_id = ExportInteger.create(
self.cr, openerp.SUPERUSER_ID, {'value': 42})
xid = self.xid(ExportInteger.browse(
self.cr, openerp.SUPERUSER_ID, [integer_id])[0])
result = self.import_(['value/id'], [[xid]])
self.assertFalse(result['messages'])
self.assertEqual(len(result['ids']), 1)
b = self.browse()
self.assertEqual(42, b[0].value.value)
def test_by_id(self):
integer_id = self.registry('export.integer').create(
self.cr, openerp.SUPERUSER_ID, {'value': 42})
result = self.import_(['value/.id'], [[integer_id]])
self.assertFalse(result['messages'])
self.assertEqual(len(result['ids']), 1)
b = self.browse()
self.assertEqual(42, b[0].value.value)
def test_by_names(self):
integer_id1 = self.registry('export.integer').create(
self.cr, openerp.SUPERUSER_ID, {'value': 42})
integer_id2 = self.registry('export.integer').create(
self.cr, openerp.SUPERUSER_ID, {'value': 42})
name1 = dict(self.registry('export.integer').name_get(
self.cr, openerp.SUPERUSER_ID,[integer_id1]))[integer_id1]
name2 = dict(self.registry('export.integer').name_get(
self.cr, openerp.SUPERUSER_ID,[integer_id2]))[integer_id2]
# names should be the same
self.assertEqual(name1, name2)
result = self.import_(['value'], [[name2]])
self.assertEqual(
result['messages'],
[message(u"Found multiple matches for field 'unknown' (2 matches)",
type='warning')])
self.assertEqual(len(result['ids']), 1)
self.assertEqual([
(integer_id1, name1)
], values(self.read()))
def test_fail_by_implicit_id(self):
""" Can't implicitly import records by id
"""
# create integer objects
integer_id1 = self.registry('export.integer').create(
self.cr, openerp.SUPERUSER_ID, {'value': 42})
integer_id2 = self.registry('export.integer').create(
self.cr, openerp.SUPERUSER_ID, {'value': 36})
# Because name_search all the things. Fallback schmallback
result = self.import_(['value'], [
# import by id, without specifying it
[integer_id1],
[integer_id2],
[integer_id1],
])
self.assertEqual(result['messages'], [
message(u"No matching record found for name '%s' in field 'unknown'" % id,
from_=index, to_=index, record=index,
moreinfo=moreaction(res_model='export.integer'))
for index, id in enumerate([integer_id1, integer_id2, integer_id1])])
self.assertIs(result['ids'], False)
@mute_logger('openerp.sql_db')
def test_fail_id_mistype(self):
result = self.import_(['value/.id'], [["foo"]])
self.assertEqual(result['messages'], [
message(u"Invalid database id 'foo' for the field 'unknown'",
moreinfo=moreaction(res_model='ir.model.data',
domain=[('model','=','export.integer')]))
])
self.assertIs(result['ids'], False)
def test_sub_field(self):
""" Does not implicitly create the record, does not warn that you can't
import m2o subfields (at all)...
"""
result = self.import_(['value/value'], [['42']])
self.assertEqual(result['messages'], [
message(u"Can not create Many-To-One records indirectly, import "
u"the field separately")])
self.assertIs(result['ids'], False)
def test_fail_noids(self):
result = self.import_(['value'], [['nameisnoexist:3']])
self.assertEqual(result['messages'], [message(
u"No matching record found for name 'nameisnoexist:3' "
u"in field 'unknown'", moreinfo=moreaction(
res_model='export.integer'))])
self.assertIs(result['ids'], False)
result = self.import_(['value/id'], [['noxidhere']])
self.assertEqual(result['messages'], [message(
u"No matching record found for external id 'noxidhere' "
u"in field 'unknown'", moreinfo=moreaction(
res_model='ir.model.data', domain=[('model','=','export.integer')]))])
self.assertIs(result['ids'], False)
result = self.import_(['value/.id'], [['66']])
self.assertEqual(result['messages'], [message(
u"No matching record found for database id '66' "
u"in field 'unknown'", moreinfo=moreaction(
res_model='ir.model.data', domain=[('model','=','export.integer')]))])
self.assertIs(result['ids'], False)
def test_fail_multiple(self):
result = self.import_(
['value', 'value/id'],
[['somename', 'somexid']])
self.assertEqual(result['messages'], [message(
u"Ambiguous specification for field 'unknown', only provide one of "
u"name, external id or database id")])
self.assertIs(result['ids'], False)
class test_m2m(ImporterCase):
model_name = 'export.many2many'
# apparently, one and only thing which works is a
# csv_internal_sep-separated list of ids, xids, or names (depending if
# m2m/.id, m2m/id or m2m[/anythingelse]
def test_ids(self):
id1 = self.registry('export.many2many.other').create(
self.cr, openerp.SUPERUSER_ID, {'value': 3, 'str': 'record0'})
id2 = self.registry('export.many2many.other').create(
self.cr, openerp.SUPERUSER_ID, {'value': 44, 'str': 'record1'})
id3 = self.registry('export.many2many.other').create(
self.cr, openerp.SUPERUSER_ID, {'value': 84, 'str': 'record2'})
id4 = self.registry('export.many2many.other').create(
self.cr, openerp.SUPERUSER_ID, {'value': 9, 'str': 'record3'})
id5 = self.registry('export.many2many.other').create(
self.cr, openerp.SUPERUSER_ID, {'value': 99, 'str': 'record4'})
result = self.import_(['value/.id'], [
['%d,%d' % (id1, id2)],
['%d,%d,%d' % (id1, id3, id4)],
['%d,%d,%d' % (id1, id2, id3)],
['%d' % id5]
])
self.assertFalse(result['messages'])
self.assertEqual(len(result['ids']), 4)
ids = lambda records: [record.id for record in records]
b = self.browse()
self.assertEqual(ids(b[0].value), [id1, id2])
self.assertEqual(values(b[0].value), [3, 44])
self.assertEqual(ids(b[2].value), [id1, id2, id3])
self.assertEqual(values(b[2].value), [3, 44, 84])
def test_noids(self):
result = self.import_(['value/.id'], [['42']])
self.assertEqual(result['messages'], [message(
u"No matching record found for database id '42' in field "
u"'unknown'", moreinfo=moreaction(
res_model='ir.model.data', domain=[('model','=','export.many2many.other')]))])
self.assertIs(result['ids'], False)
def test_xids(self):
M2O_o = self.registry('export.many2many.other')
id1 = M2O_o.create(self.cr, openerp.SUPERUSER_ID, {'value': 3, 'str': 'record0'})
id2 = M2O_o.create(self.cr, openerp.SUPERUSER_ID, {'value': 44, 'str': 'record1'})
id3 = M2O_o.create(self.cr, openerp.SUPERUSER_ID, {'value': 84, 'str': 'record2'})
id4 = M2O_o.create(self.cr, openerp.SUPERUSER_ID, {'value': 9, 'str': 'record3'})
records = M2O_o.browse(self.cr, openerp.SUPERUSER_ID, [id1, id2, id3, id4])
result = self.import_(['value/id'], [
['%s,%s' % (self.xid(records[0]), self.xid(records[1]))],
['%s' % self.xid(records[3])],
['%s,%s' % (self.xid(records[2]), self.xid(records[1]))],
])
self.assertFalse(result['messages'])
self.assertEqual(len(result['ids']), 3)
b = self.browse()
self.assertEqual(values(b[0].value), [3, 44])
self.assertEqual(values(b[2].value), [44, 84])
def test_noxids(self):
result = self.import_(['value/id'], [['noxidforthat']])
self.assertEqual(result['messages'], [message(
u"No matching record found for external id 'noxidforthat' in field"
u" 'unknown'", moreinfo=moreaction(
res_model='ir.model.data', domain=[('model','=','export.many2many.other')]))])
self.assertIs(result['ids'], False)
def test_names(self):
M2O_o = self.registry('export.many2many.other')
id1 = M2O_o.create(self.cr, openerp.SUPERUSER_ID, {'value': 3, 'str': 'record0'})
id2 = M2O_o.create(self.cr, openerp.SUPERUSER_ID, {'value': 44, 'str': 'record1'})
id3 = M2O_o.create(self.cr, openerp.SUPERUSER_ID, {'value': 84, 'str': 'record2'})
id4 = M2O_o.create(self.cr, openerp.SUPERUSER_ID, {'value': 9, 'str': 'record3'})
records = M2O_o.browse(self.cr, openerp.SUPERUSER_ID, [id1, id2, id3, id4])
name = lambda record: record.name_get()[0][1]
result = self.import_(['value'], [
['%s,%s' % (name(records[1]), name(records[2]))],
['%s,%s,%s' % (name(records[0]), name(records[1]), name(records[2]))],
['%s,%s' % (name(records[0]), name(records[3]))],
])
self.assertFalse(result['messages'])
self.assertEqual(len(result['ids']), 3)
b = self.browse()
self.assertEqual(values(b[1].value), [3, 44, 84])
self.assertEqual(values(b[2].value), [3, 9])
def test_nonames(self):
result = self.import_(['value'], [['wherethem2mhavenonames']])
self.assertEqual(result['messages'], [message(
u"No matching record found for name 'wherethem2mhavenonames' in "
u"field 'unknown'", moreinfo=moreaction(
res_model='export.many2many.other'))])
self.assertIs(result['ids'], False)
def test_import_to_existing(self):
M2O_o = self.registry('export.many2many.other')
id1 = M2O_o.create(self.cr, openerp.SUPERUSER_ID, {'value': 3, 'str': 'record0'})
id2 = M2O_o.create(self.cr, openerp.SUPERUSER_ID, {'value': 44, 'str': 'record1'})
id3 = M2O_o.create(self.cr, openerp.SUPERUSER_ID, {'value': 84, 'str': 'record2'})
id4 = M2O_o.create(self.cr, openerp.SUPERUSER_ID, {'value': 9, 'str': 'record3'})
xid = 'myxid'
result = self.import_(['id', 'value/.id'], [[xid, '%d,%d' % (id1, id2)]])
self.assertFalse(result['messages'])
self.assertEqual(len(result['ids']), 1)
result = self.import_(['id', 'value/.id'], [[xid, '%d,%d' % (id3, id4)]])
self.assertFalse(result['messages'])
self.assertEqual(len(result['ids']), 1)
b = self.browse()
self.assertEqual(len(b), 1)
# TODO: replacement of existing m2m values is correct?
self.assertEqual(values(b[0].value), [84, 9])
class test_o2m(ImporterCase):
model_name = 'export.one2many'
def test_name_get(self):
s = u'Java is a DSL for taking large XML files and converting them ' \
u'to stack traces'
result = self.import_(
['const', 'value'],
[['5', s]])
self.assertEqual(result['messages'], [message(
u"No matching record found for name '%s' in field 'unknown'" % s,
moreinfo=moreaction(res_model='export.one2many.child'))])
self.assertIs(result['ids'], False)
def test_single(self):
result = self.import_(['const', 'value/value'], [
['5', '63']
])
self.assertFalse(result['messages'])
self.assertEqual(len(result['ids']), 1)
(b,) = self.browse()
self.assertEqual(b.const, 5)
self.assertEqual(values(b.value), [63])
def test_multicore(self):
result = self.import_(['const', 'value/value'], [
['5', '63'],
['6', '64'],
])
self.assertFalse(result['messages'])
self.assertEqual(len(result['ids']), 2)
b1, b2 = self.browse()
self.assertEqual(b1.const, 5)
self.assertEqual(values(b1.value), [63])
self.assertEqual(b2.const, 6)
self.assertEqual(values(b2.value), [64])
def test_multisub(self):
result = self.import_(['const', 'value/value'], [
['5', '63'],
['', '64'],
['', '65'],
['', '66'],
])
self.assertFalse(result['messages'])
self.assertEqual(len(result['ids']), 1)
(b,) = self.browse()
self.assertEqual(values(b.value), [63, 64, 65, 66])
def test_multi_subfields(self):
result = self.import_(['value/str', 'const', 'value/value'], [
['this', '5', '63'],
['is', '', '64'],
['the', '', '65'],
['rhythm', '', '66'],
])
self.assertFalse(result['messages'])
self.assertEqual(len(result['ids']), 1)
(b,) = self.browse()
self.assertEqual(values(b.value), [63, 64, 65, 66])
self.assertEqual(
values(b.value, 'str'),
'this is the rhythm'.split())
def test_link_inline(self):
""" m2m-style specification for o2ms
"""
id1 = self.registry('export.one2many.child').create(self.cr, openerp.SUPERUSER_ID, {
'str': 'Bf', 'value': 109
})
id2 = self.registry('export.one2many.child').create(self.cr, openerp.SUPERUSER_ID, {
'str': 'Me', 'value': 262
})
result = self.import_(['const', 'value/.id'], [
['42', '%d,%d' % (id1, id2)]
])
self.assertFalse(result['messages'])
self.assertEqual(len(result['ids']), 1)
[b] = self.browse()
self.assertEqual(b.const, 42)
# automatically forces link between core record and o2ms
self.assertEqual(values(b.value), [109, 262])
self.assertEqual(values(b.value, field='parent_id'), [b, b])
def test_link(self):
""" O2M relating to an existing record (update) force a LINK_TO as well
"""
O2M = self.registry('export.one2many.child')
id1 = O2M.create(self.cr, openerp.SUPERUSER_ID, {
'str': 'Bf', 'value': 109
})
id2 = O2M.create(self.cr, openerp.SUPERUSER_ID, {
'str': 'Me', 'value': 262
})
result = self.import_(['const', 'value/.id'], [
['42', str(id1)],
['', str(id2)],
])
self.assertFalse(result['messages'])
self.assertEqual(len(result['ids']), 1)
[b] = self.browse()
self.assertEqual(b.const, 42)
# automatically forces link between core record and o2ms
self.assertEqual(values(b.value), [109, 262])
self.assertEqual(values(b.value, field='parent_id'), [b, b])
def test_link_2(self):
O2M_c = self.registry('export.one2many.child')
id1 = O2M_c.create(self.cr, openerp.SUPERUSER_ID, {
'str': 'Bf', 'value': 109
})
id2 = O2M_c.create(self.cr, openerp.SUPERUSER_ID, {
'str': 'Me', 'value': 262
})
result = self.import_(['const', 'value/.id', 'value/value'], [
['42', str(id1), '1'],
['', str(id2), '2'],
])
self.assertFalse(result['messages'])
self.assertEqual(len(result['ids']), 1)
[b] = self.browse()
self.assertEqual(b.const, 42)
self.assertEqual(values(b.value), [1, 2])
self.assertEqual(values(b.value, field='parent_id'), [b, b])
class test_o2m_multiple(ImporterCase):
model_name = 'export.one2many.multiple'
def test_multi_mixed(self):
result = self.import_(['const', 'child1/value', 'child2/value'], [
['5', '11', '21'],
['', '12', '22'],
['', '13', '23'],
['', '14', ''],
])
self.assertFalse(result['messages'])
self.assertEqual(len(result['ids']), 1)
[b] = self.browse()
self.assertEqual(values(b.child1), [11, 12, 13, 14])
self.assertEqual(values(b.child2), [21, 22, 23])
def test_multi(self):
result = self.import_(['const', 'child1/value', 'child2/value'], [
['5', '11', '21'],
['', '12', ''],
['', '13', ''],
['', '14', ''],
['', '', '22'],
['', '', '23'],
])
self.assertFalse(result['messages'])
self.assertEqual(len(result['ids']), 1)
[b] = self.browse()
self.assertEqual(values(b.child1), [11, 12, 13, 14])
self.assertEqual(values(b.child2), [21, 22, 23])
def test_multi_fullsplit(self):
result = self.import_(['const', 'child1/value', 'child2/value'], [
['5', '11', ''],
['', '12', ''],
['', '13', ''],
['', '14', ''],
['', '', '21'],
['', '', '22'],
['', '', '23'],
])
self.assertFalse(result['messages'])
self.assertEqual(len(result['ids']), 1)
[b] = self.browse()
self.assertEqual(b.const, 5)
self.assertEqual(values(b.child1), [11, 12, 13, 14])
self.assertEqual(values(b.child2), [21, 22, 23])
class test_realworld(common.TransactionCase):
def test_bigfile(self):
data = json.loads(pkgutil.get_data(self.__module__, 'contacts_big.json'))
result = self.registry('res.partner').load(
self.cr, openerp.SUPERUSER_ID,
['name', 'mobile', 'email', 'image'],
data)
self.assertFalse(result['messages'])
self.assertEqual(len(result['ids']), len(data))
def test_backlink(self):
data = json.loads(pkgutil.get_data(self.__module__, 'contacts.json'))
result = self.registry('res.partner').load(
self.cr, openerp.SUPERUSER_ID,
["name", "type", "street", "city", "country_id", "category_id",
"supplier", "customer", "is_company", "parent_id"],
data)
self.assertFalse(result['messages'])
self.assertEqual(len(result['ids']), len(data))
def test_recursive_o2m(self):
""" The content of the o2m field's dict needs to go through conversion
as it may be composed of convertables or other relational fields
"""
self.registry('ir.model.data').clear_caches()
Model = self.registry('export.one2many.recursive')
result = Model.load(self.cr, openerp.SUPERUSER_ID,
['value', 'child/const', 'child/child1/str', 'child/child2/value'],
[
['4', '42', 'foo', '55'],
['', '43', 'bar', '56'],
['', '', 'baz', ''],
['', '55', 'qux', '57'],
['5', '99', 'wheee', ''],
['', '98', '', '12'],
],
context=None)
self.assertFalse(result['messages'])
self.assertEqual(len(result['ids']), 2)
b = Model.browse(self.cr, openerp.SUPERUSER_ID, result['ids'], context=None)
self.assertEqual((b[0].value, b[1].value), (4, 5))
self.assertEqual([child.str for child in b[0].child[1].child1],
['bar', 'baz'])
self.assertFalse(len(b[1].child[1].child1))
self.assertEqual([child.value for child in b[1].child[1].child2],
[12])
class test_date(ImporterCase):
model_name = 'export.date'
def test_empty(self):
self.assertEqual(
self.import_(['value'], []),
{'ids': [], 'messages': []})
def test_basic(self):
result = self.import_(['value'], [['2012-02-03']])
self.assertFalse(result['messages'])
self.assertEqual(len(result['ids']), 1)
def test_invalid(self):
result = self.import_(['value'], [['not really a date']])
self.assertEqual(result['messages'], [
message(u"'not really a date' does not seem to be a valid date "
u"for field 'unknown'",
moreinfo=u"Use the format '2012-12-31'")])
self.assertIs(result['ids'], False)
class test_datetime(ImporterCase):
model_name = 'export.datetime'
def test_empty(self):
self.assertEqual(
self.import_(['value'], []),
{'ids': [], 'messages': []})
def test_basic(self):
result = self.import_(['value'], [['2012-02-03 11:11:11']])
self.assertFalse(result['messages'])
self.assertEqual(len(result['ids']), 1)
def test_invalid(self):
result = self.import_(['value'], [['not really a datetime']])
self.assertEqual(result['messages'], [
message(u"'not really a datetime' does not seem to be a valid "
u"datetime for field 'unknown'",
moreinfo=u"Use the format '2012-12-31 23:59:59'")])
self.assertIs(result['ids'], False)
def test_checktz1(self):
""" Imported date should be interpreted as being in the tz provided by
the context
"""
# write dummy tz in user (Asia/Hovd UTC+0700), should be superseded by
# context
self.registry('res.users').write(
self.cr, openerp.SUPERUSER_ID, [openerp.SUPERUSER_ID],
{'tz': 'Asia/Hovd'})
# UTC+1400
result = self.import_(
['value'], [['2012-02-03 11:11:11']], {'tz': 'Pacific/Kiritimati'})
self.assertFalse(result['messages'])
self.assertEqual(
values(self.read(domain=[('id', 'in', result['ids'])])),
['2012-02-02 21:11:11'])
# UTC-0930
result = self.import_(
['value'], [['2012-02-03 11:11:11']], {'tz': 'Pacific/Marquesas'})
self.assertFalse(result['messages'])
self.assertEqual(
values(self.read(domain=[('id', 'in', result['ids'])])),
['2012-02-03 20:41:11'])
def test_usertz(self):
""" If the context does not hold a timezone, the importing user's tz
should be used
"""
# UTC +1000
self.registry('res.users').write(
self.cr, openerp.SUPERUSER_ID, [openerp.SUPERUSER_ID],
{'tz': 'Asia/Yakutsk'})
result = self.import_(
['value'], [['2012-02-03 11:11:11']])
self.assertFalse(result['messages'])
self.assertEqual(
values(self.read(domain=[('id', 'in', result['ids'])])),
['2012-02-03 01:11:11'])
def test_notz(self):
""" If there is no tz either in the context or on the user, falls back
to UTC
"""
self.registry('res.users').write(
self.cr, openerp.SUPERUSER_ID, [openerp.SUPERUSER_ID],
{'tz': False})
result = self.import_(['value'], [['2012-02-03 11:11:11']])
self.assertFalse(result['messages'])
self.assertEqual(
values(self.read(domain=[('id', 'in', result['ids'])])),
['2012-02-03 11:11:11'])
class test_unique(ImporterCase):
model_name = 'export.unique'
@mute_logger('openerp.sql_db')
def test_unique(self):
result = self.import_(['value'], [
['1'],
['1'],
['2'],
['3'],
['3'],
])
self.assertFalse(result['ids'])
self.assertEqual(result['messages'], [
dict(message=u"The value for the field 'value' already exists. "
u"This might be 'unknown' in the current model, "
u"or a field of the same name in an o2m.",
type='error', rows={'from': 1, 'to': 1},
record=1, field='value'),
dict(message=u"The value for the field 'value' already exists. "
u"This might be 'unknown' in the current model, "
u"or a field of the same name in an o2m.",
type='error', rows={'from': 4, 'to': 4},
record=4, field='value'),
])
| gpl-3.0 |
mars-knowsnothing/amos-bot | src/Lib/encodings/shift_jis.py | 816 | 1039 | #
# shift_jis.py: Python Unicode Codec for SHIFT_JIS
#
# Written by Hye-Shik Chang <perky@FreeBSD.org>
#
import _codecs_jp, codecs
import _multibytecodec as mbc
codec = _codecs_jp.getcodec('shift_jis')
class Codec(codecs.Codec):
encode = codec.encode
decode = codec.decode
class IncrementalEncoder(mbc.MultibyteIncrementalEncoder,
codecs.IncrementalEncoder):
codec = codec
class IncrementalDecoder(mbc.MultibyteIncrementalDecoder,
codecs.IncrementalDecoder):
codec = codec
class StreamReader(Codec, mbc.MultibyteStreamReader, codecs.StreamReader):
codec = codec
class StreamWriter(Codec, mbc.MultibyteStreamWriter, codecs.StreamWriter):
codec = codec
def getregentry():
return codecs.CodecInfo(
name='shift_jis',
encode=Codec().encode,
decode=Codec().decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
| gpl-3.0 |
lucafavatella/intellij-community | python/helpers/epydoc/markup/plaintext.py | 100 | 2934 | #
# plaintext.py: plaintext docstring parsing
# Edward Loper
#
# Created [04/10/01 12:00 AM]
# $Id: plaintext.py 1574 2007-03-07 02:55:14Z dvarrazzo $
#
"""
Parser for plaintext docstrings. Plaintext docstrings are rendered as
verbatim output, preserving all whitespace.
"""
__docformat__ = 'epytext en'
from epydoc.markup import *
from epydoc.util import plaintext_to_html, plaintext_to_latex
def parse_docstring(docstring, errors, **options):
"""
@return: A pair C{(M{d}, M{e})}, where C{M{d}} is a
C{ParsedDocstring} that encodes the contents of the given
plaintext docstring; and C{M{e}} is a list of errors that were
generated while parsing the docstring.
@rtype: C{L{ParsedPlaintextDocstring}, C{list} of L{ParseError}}
"""
return ParsedPlaintextDocstring(docstring, **options)
class ParsedPlaintextDocstring(ParsedDocstring):
def __init__(self, text, **options):
self._verbatim = options.get('verbatim', 1)
if text is None: raise ValueError, 'Bad text value (expected a str)'
self._text = text
def to_html(self, docstring_linker, **options):
if options.get('verbatim', self._verbatim) == 0:
return plaintext_to_html(self.to_plaintext(docstring_linker))
else:
return ParsedDocstring.to_html(self, docstring_linker, **options)
def to_latex(self, docstring_linker, **options):
if options.get('verbatim', self._verbatim) == 0:
return plaintext_to_latex(self.to_plaintext(docstring_linker))
else:
return ParsedDocstring.to_latex(self, docstring_linker, **options)
def to_plaintext(self, docstring_linker, **options):
if 'indent' in options:
indent = options['indent']
lines = self._text.split('\n')
return '\n'.join([' '*indent+l for l in lines])+'\n'
return self._text+'\n'
_SUMMARY_RE = re.compile(r'(\s*[\w\W]*?(?:\.(\s|$)|[\n][\t ]*[\n]))')
def summary(self):
m = self._SUMMARY_RE.match(self._text)
if m:
other = self._text[m.end():]
return (ParsedPlaintextDocstring(m.group(1), verbatim=0),
other != '' and not other.isspace())
else:
parts = self._text.strip('\n').split('\n', 1)
if len(parts) == 1:
summary = parts[0]
other = False
else:
summary = parts[0] + '...'
other = True
return ParsedPlaintextDocstring(summary, verbatim=0), other
# def concatenate(self, other):
# if not isinstance(other, ParsedPlaintextDocstring):
# raise ValueError, 'Could not concatenate docstrings'
# text = self._text+other._text
# options = self._options.copy()
# options.update(other._options)
# return ParsedPlaintextDocstring(text, options)
| apache-2.0 |
HomuHomu/GT-N7000-ICS-kernel | tools/perf/scripts/python/futex-contention.py | 11261 | 1486 | # futex contention
# (c) 2010, Arnaldo Carvalho de Melo <acme@redhat.com>
# Licensed under the terms of the GNU GPL License version 2
#
# Translation of:
#
# http://sourceware.org/systemtap/wiki/WSFutexContention
#
# to perf python scripting.
#
# Measures futex contention
import os, sys
sys.path.append(os.environ['PERF_EXEC_PATH'] + '/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from Util import *
process_names = {}
thread_thislock = {}
thread_blocktime = {}
lock_waits = {} # long-lived stats on (tid,lock) blockage elapsed time
process_names = {} # long-lived pid-to-execname mapping
def syscalls__sys_enter_futex(event, ctxt, cpu, s, ns, tid, comm,
nr, uaddr, op, val, utime, uaddr2, val3):
cmd = op & FUTEX_CMD_MASK
if cmd != FUTEX_WAIT:
return # we don't care about originators of WAKE events
process_names[tid] = comm
thread_thislock[tid] = uaddr
thread_blocktime[tid] = nsecs(s, ns)
def syscalls__sys_exit_futex(event, ctxt, cpu, s, ns, tid, comm,
nr, ret):
if thread_blocktime.has_key(tid):
elapsed = nsecs(s, ns) - thread_blocktime[tid]
add_stats(lock_waits, (tid, thread_thislock[tid]), elapsed)
del thread_blocktime[tid]
del thread_thislock[tid]
def trace_begin():
print "Press control+C to stop and show the summary"
def trace_end():
for (tid, lock) in lock_waits:
min, max, avg, count = lock_waits[tid, lock]
print "%s[%d] lock %x contended %d times, %d avg ns" % \
(process_names[tid], tid, lock, count, avg)
| gpl-2.0 |
KingPsychopath/namebench | nb_third_party/jinja2/visitor.py | 1401 | 3316 | # -*- coding: utf-8 -*-
"""
jinja2.visitor
~~~~~~~~~~~~~~
This module implements a visitor for the nodes.
:copyright: (c) 2010 by the Jinja Team.
:license: BSD.
"""
from jinja2.nodes import Node
class NodeVisitor(object):
"""Walks the abstract syntax tree and call visitor functions for every
node found. The visitor functions may return values which will be
forwarded by the `visit` method.
Per default the visitor functions for the nodes are ``'visit_'`` +
class name of the node. So a `TryFinally` node visit function would
be `visit_TryFinally`. This behavior can be changed by overriding
the `get_visitor` function. If no visitor function exists for a node
(return value `None`) the `generic_visit` visitor is used instead.
"""
def get_visitor(self, node):
"""Return the visitor function for this node or `None` if no visitor
exists for this node. In that case the generic visit function is
used instead.
"""
method = 'visit_' + node.__class__.__name__
return getattr(self, method, None)
def visit(self, node, *args, **kwargs):
"""Visit a node."""
f = self.get_visitor(node)
if f is not None:
return f(node, *args, **kwargs)
return self.generic_visit(node, *args, **kwargs)
def generic_visit(self, node, *args, **kwargs):
"""Called if no explicit visitor function exists for a node."""
for node in node.iter_child_nodes():
self.visit(node, *args, **kwargs)
class NodeTransformer(NodeVisitor):
"""Walks the abstract syntax tree and allows modifications of nodes.
The `NodeTransformer` will walk the AST and use the return value of the
visitor functions to replace or remove the old node. If the return
value of the visitor function is `None` the node will be removed
from the previous location otherwise it's replaced with the return
value. The return value may be the original node in which case no
replacement takes place.
"""
def generic_visit(self, node, *args, **kwargs):
for field, old_value in node.iter_fields():
if isinstance(old_value, list):
new_values = []
for value in old_value:
if isinstance(value, Node):
value = self.visit(value, *args, **kwargs)
if value is None:
continue
elif not isinstance(value, Node):
new_values.extend(value)
continue
new_values.append(value)
old_value[:] = new_values
elif isinstance(old_value, Node):
new_node = self.visit(old_value, *args, **kwargs)
if new_node is None:
delattr(node, field)
else:
setattr(node, field, new_node)
return node
def visit_list(self, node, *args, **kwargs):
"""As transformers may return lists in some places this method
can be used to enforce a list as return value.
"""
rv = self.visit(node, *args, **kwargs)
if not isinstance(rv, list):
rv = [rv]
return rv
| apache-2.0 |
wgcv/SWW-Crashphone | lib/python2.7/site-packages/PIL/ImagePath.py | 41 | 1231 | #
# The Python Imaging Library
# $Id$
#
# path interface
#
# History:
# 1996-11-04 fl Created
# 2002-04-14 fl Added documentation stub class
#
# Copyright (c) Secret Labs AB 1997.
# Copyright (c) Fredrik Lundh 1996.
#
# See the README file for information on usage and redistribution.
#
from PIL import Image
# the Python class below is overridden by the C implementation.
class Path:
def __init__(self, xy):
pass
##
# Compacts the path, by removing points that are close to each
# other. This method modifies the path in place.
def compact(self, distance=2):
pass
##
# Gets the bounding box.
def getbbox(self):
pass
##
# Maps the path through a function.
def map(self, function):
pass
##
# Converts the path to Python list.
#
# @param flat By default, this function returns a list of 2-tuples
# [(x, y), ...]. If this argument is true, it returns a flat
# list [x, y, ...] instead.
# @return A list of coordinates.
def tolist(self, flat=0):
pass
##
# Transforms the path.
def transform(self, matrix):
pass
# override with C implementation
Path = Image.core.path
| apache-2.0 |
tschneidereit/servo | python/mozlog/mozlog/structured/formatters/xunit.py | 46 | 3804 | import types
from xml.etree import ElementTree
import base
def format_test_id(test_id):
"""Take a test id and return something that looks a bit like
a class path"""
if type(test_id) not in types.StringTypes:
#Not sure how to deal with reftests yet
raise NotImplementedError
#Turn a path into something like a class heirachy
return test_id.replace('.', '_').replace('/', ".")
class XUnitFormatter(base.BaseFormatter):
"""Formatter that produces XUnit-style XML output.
The tree is created in-memory so this formatter may be problematic
with very large log files.
Note that the data model isn't a perfect match. In
particular XUnit assumes that each test has a unittest-style
class name and function name, which isn't the case for us. The
implementation currently replaces path names with something that
looks like class names, but this doesn't work for test types that
actually produce class names, or for test types that have multiple
components in their test id (e.g. reftests)."""
def __init__(self):
self.tree = ElementTree.ElementTree()
self.root = None
self.suite_start_time = None
self.test_start_time = None
self.tests_run = 0
self.errors = 0
self.failures = 0
self.skips = 0
def suite_start(self, data):
self.root = ElementTree.Element("testsuite")
self.tree.root = self.root
self.suite_start_time = data["time"]
def test_start(self, data):
self.tests_run += 1
self.test_start_time = data["time"]
def _create_result(self, data):
test = ElementTree.SubElement(self.root, "testcase")
name = format_test_id(data["test"])
extra = data.get('extra') or {}
test.attrib["classname"] = extra.get('class_name') or name
if "subtest" in data:
test.attrib["name"] = data["subtest"]
# We generally don't know how long subtests take
test.attrib["time"] = "0"
else:
if "." in name:
test_name = name.rsplit(".", 1)[1]
else:
test_name = name
test.attrib["name"] = extra.get('method_name') or test_name
test.attrib["time"] = "%.2f" % ((data["time"] - self.test_start_time) / 1000.0)
if ("expected" in data and data["expected"] != data["status"]):
if data["status"] in ("NOTRUN", "ASSERT", "ERROR"):
result = ElementTree.SubElement(test, "error")
self.errors += 1
else:
result = ElementTree.SubElement(test, "failure")
self.failures += 1
result.attrib["message"] = "Expected %s, got %s" % (data["expected"], data["status"])
result.text = '%s\n%s' % (data.get('stack', ''), data.get('message', ''))
elif data["status"] == "SKIP":
result = ElementTree.SubElement(test, "skipped")
self.skips += 1
def test_status(self, data):
self._create_result(data)
def test_end(self, data):
self._create_result(data)
def suite_end(self, data):
self.root.attrib.update({"tests": str(self.tests_run),
"errors": str(self.errors),
"failures": str(self.failures),
"skips": str(self.skips),
"time": "%.2f" % (
(data["time"] - self.suite_start_time) / 1000.0)})
xml_string = ElementTree.tostring(self.root, encoding="utf8")
# pretty printing can not be done from xml.etree
from xml.dom import minidom
return minidom.parseString(xml_string).toprettyxml(encoding="utf8")
| mpl-2.0 |
WaveBlocks/WaveBlocks | src/WaveBlocks/QuadratureRule.py | 1 | 1677 | """The WaveBlocks Project
This file contains the interface for general quadrature rules.
Do not confuse quadratures with quadrature rules! Quadrature rules
are structs containing just nodes and weights and some convenience
methods. Quadratures are classes that really can compute things
like inner products (brakets) etc.
@author: R. Bourquin
@copyright: Copyright (C) 2010, 2011 R. Bourquin
@license: Modified BSD License
"""
class QuadratureRule:
r"""
This class is an abstract interface to quadrature rules in general.
"""
def __init__(self):
r"""
General interface for quadrature rules.
:raise NotImplementedError: Abstract interface.
"""
raise NotImplementedError("'QuadratureRule' is an abstract interface.")
def __str__(self):
raise NotImplementedError("'QuadratureRule' is an abstract interface.")
def get_order(self):
r"""
:return: The order :math:`R` of the quadrature.
"""
raise NotImplementedError("'QuadratureRule' is an abstract interface.")
def get_number_nodes(self):
r"""
:return: The number of quadrature nodes.
"""
raise NotImplementedError("'QuadratureRule' is an abstract interface.")
def get_nodes(self):
r"""
:return: An array containing the quadrature nodes :math:`\gamma_i`.
"""
raise NotImplementedError("'QuadratureRule' is an abstract interface.")
def get_weights(self):
r"""
:return: An array containing the quadrature weights :math:`\omega_i`.
"""
raise NotImplementedError("'QuadratureRule' is an abstract interface.")
| bsd-3-clause |
ychen820/microblog | y/google-cloud-sdk/.install/.backup/platform/gsutil/third_party/oauth2client/tests/test_oauth2client.py | 2 | 43193 | #!/usr/bin/python2.4
#
# Copyright 2014 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Oauth2client tests
Unit tests for oauth2client.
"""
__author__ = 'jcgregorio@google.com (Joe Gregorio)'
import base64
import datetime
import json
try:
from mox3 import mox
except ImportError:
import mox
import os
import time
import unittest
import six
from six.moves import urllib
from .http_mock import HttpMock
from .http_mock import HttpMockSequence
from oauth2client import GOOGLE_REVOKE_URI
from oauth2client import GOOGLE_TOKEN_URI
from oauth2client import client
from oauth2client.client import AccessTokenCredentials
from oauth2client.client import AccessTokenCredentialsError
from oauth2client.client import AccessTokenRefreshError
from oauth2client.client import ADC_HELP_MSG
from oauth2client.client import AssertionCredentials
from oauth2client.client import AUTHORIZED_USER
from oauth2client.client import Credentials
from oauth2client.client import DEFAULT_ENV_NAME
from oauth2client.client import ApplicationDefaultCredentialsError
from oauth2client.client import FlowExchangeError
from oauth2client.client import GoogleCredentials
from oauth2client.client import GOOGLE_APPLICATION_CREDENTIALS
from oauth2client.client import MemoryCache
from oauth2client.client import NonAsciiHeaderError
from oauth2client.client import OAuth2Credentials
from oauth2client.client import OAuth2WebServerFlow
from oauth2client.client import OOB_CALLBACK_URN
from oauth2client.client import REFRESH_STATUS_CODES
from oauth2client.client import SERVICE_ACCOUNT
from oauth2client.client import Storage
from oauth2client.client import TokenRevokeError
from oauth2client.client import VerifyJwtTokenError
from oauth2client.client import _extract_id_token
from oauth2client.client import _get_application_default_credential_from_file
from oauth2client.client import _get_environment
from oauth2client.client import _get_environment_variable_file
from oauth2client.client import _get_well_known_file
from oauth2client.client import _raise_exception_for_missing_fields
from oauth2client.client import _raise_exception_for_reading_json
from oauth2client.client import _update_query_params
from oauth2client.client import credentials_from_clientsecrets_and_code
from oauth2client.client import credentials_from_code
from oauth2client.client import flow_from_clientsecrets
from oauth2client.client import save_to_well_known_file
from oauth2client.clientsecrets import _loadfile
from oauth2client.service_account import _ServiceAccountCredentials
DATA_DIR = os.path.join(os.path.dirname(__file__), 'data')
# TODO(craigcitro): This is duplicated from
# googleapiclient.test_discovery; consolidate these definitions.
def assertUrisEqual(testcase, expected, actual):
"""Test that URIs are the same, up to reordering of query parameters."""
expected = urllib.parse.urlparse(expected)
actual = urllib.parse.urlparse(actual)
testcase.assertEqual(expected.scheme, actual.scheme)
testcase.assertEqual(expected.netloc, actual.netloc)
testcase.assertEqual(expected.path, actual.path)
testcase.assertEqual(expected.params, actual.params)
testcase.assertEqual(expected.fragment, actual.fragment)
expected_query = urllib.parse.parse_qs(expected.query)
actual_query = urllib.parse.parse_qs(actual.query)
for name in expected_query.keys():
testcase.assertEqual(expected_query[name], actual_query[name])
for name in actual_query.keys():
testcase.assertEqual(expected_query[name], actual_query[name])
def datafile(filename):
return os.path.join(DATA_DIR, filename)
def load_and_cache(existing_file, fakename, cache_mock):
client_type, client_info = _loadfile(datafile(existing_file))
cache_mock.cache[fakename] = {client_type: client_info}
class CacheMock(object):
def __init__(self):
self.cache = {}
def get(self, key, namespace=''):
# ignoring namespace for easier testing
return self.cache.get(key, None)
def set(self, key, value, namespace=''):
# ignoring namespace for easier testing
self.cache[key] = value
class CredentialsTests(unittest.TestCase):
def test_to_from_json(self):
credentials = Credentials()
json = credentials.to_json()
restored = Credentials.new_from_json(json)
class MockResponse(object):
"""Mock the response of urllib2.urlopen() call."""
def __init__(self, headers):
self._headers = headers
def info(self):
class Info:
def __init__(self, headers):
self.headers = headers
return Info(self._headers)
class GoogleCredentialsTests(unittest.TestCase):
def setUp(self):
self.env_server_software = os.environ.get('SERVER_SOFTWARE', None)
self.env_google_application_credentials = (
os.environ.get(GOOGLE_APPLICATION_CREDENTIALS, None))
self.env_appdata = os.environ.get('APPDATA', None)
self.os_name = os.name
from oauth2client import client
client.SETTINGS.env_name = None
def tearDown(self):
self.reset_env('SERVER_SOFTWARE', self.env_server_software)
self.reset_env(GOOGLE_APPLICATION_CREDENTIALS,
self.env_google_application_credentials)
self.reset_env('APPDATA', self.env_appdata)
os.name = self.os_name
def reset_env(self, env, value):
"""Set the environment variable 'env' to 'value'."""
if value is not None:
os.environ[env] = value
else:
os.environ.pop(env, '')
def validate_service_account_credentials(self, credentials):
self.assertTrue(isinstance(credentials, _ServiceAccountCredentials))
self.assertEqual('123', credentials._service_account_id)
self.assertEqual('dummy@google.com', credentials._service_account_email)
self.assertEqual('ABCDEF', credentials._private_key_id)
self.assertEqual('', credentials._scopes)
def validate_google_credentials(self, credentials):
self.assertTrue(isinstance(credentials, GoogleCredentials))
self.assertEqual(None, credentials.access_token)
self.assertEqual('123', credentials.client_id)
self.assertEqual('secret', credentials.client_secret)
self.assertEqual('alabalaportocala', credentials.refresh_token)
self.assertEqual(None, credentials.token_expiry)
self.assertEqual(GOOGLE_TOKEN_URI, credentials.token_uri)
self.assertEqual('Python client library', credentials.user_agent)
def get_a_google_credentials_object(self):
return GoogleCredentials(None, None, None, None, None, None, None, None)
def test_create_scoped_required(self):
self.assertFalse(
self.get_a_google_credentials_object().create_scoped_required())
def test_create_scoped(self):
credentials = self.get_a_google_credentials_object()
self.assertEqual(credentials, credentials.create_scoped(None))
self.assertEqual(credentials,
credentials.create_scoped(['dummy_scope']))
def test_get_environment_gae_production(self):
os.environ['SERVER_SOFTWARE'] = 'Google App Engine/XYZ'
self.assertEqual('GAE_PRODUCTION', _get_environment())
def test_get_environment_gae_local(self):
os.environ['SERVER_SOFTWARE'] = 'Development/XYZ'
self.assertEqual('GAE_LOCAL', _get_environment())
def test_get_environment_gce_production(self):
os.environ['SERVER_SOFTWARE'] = ''
mockResponse = MockResponse(['Metadata-Flavor: Google\r\n'])
m = mox.Mox()
urllib2_urlopen = m.CreateMock(object)
urllib2_urlopen.__call__(('http://metadata.google.internal'
)).AndReturn(mockResponse)
m.ReplayAll()
self.assertEqual('GCE_PRODUCTION', _get_environment(urllib2_urlopen))
m.UnsetStubs()
m.VerifyAll()
def test_get_environment_unknown(self):
os.environ['SERVER_SOFTWARE'] = ''
mockResponse = MockResponse([])
m = mox.Mox()
urllib2_urlopen = m.CreateMock(object)
urllib2_urlopen.__call__(('http://metadata.google.internal'
)).AndReturn(mockResponse)
m.ReplayAll()
self.assertEqual(DEFAULT_ENV_NAME, _get_environment(urllib2_urlopen))
m.UnsetStubs()
m.VerifyAll()
def test_get_environment_variable_file(self):
environment_variable_file = datafile(
os.path.join('gcloud', 'application_default_credentials.json'))
os.environ[GOOGLE_APPLICATION_CREDENTIALS] = environment_variable_file
self.assertEqual(environment_variable_file,
_get_environment_variable_file())
def test_get_environment_variable_file_error(self):
nonexistent_file = datafile('nonexistent')
os.environ[GOOGLE_APPLICATION_CREDENTIALS] = nonexistent_file
# we can't use self.assertRaisesRegexp() because it is only in Python 2.7+
try:
_get_environment_variable_file()
self.fail(nonexistent_file + ' should not exist.')
except ApplicationDefaultCredentialsError as error:
self.assertEqual('File ' + nonexistent_file +
' (pointed by ' + GOOGLE_APPLICATION_CREDENTIALS +
' environment variable) does not exist!',
str(error))
def test_get_well_known_file_on_windows(self):
well_known_file = datafile(
os.path.join('gcloud', 'application_default_credentials.json'))
os.name = 'nt'
os.environ['APPDATA'] = DATA_DIR
self.assertEqual(well_known_file, _get_well_known_file())
def test_get_application_default_credential_from_file_service_account(self):
credentials_file = datafile(
os.path.join('gcloud', 'application_default_credentials.json'))
credentials = _get_application_default_credential_from_file(
credentials_file)
self.validate_service_account_credentials(credentials)
def test_save_to_well_known_file_service_account(self):
credential_file = datafile(
os.path.join('gcloud', 'application_default_credentials.json'))
credentials = _get_application_default_credential_from_file(
credential_file)
temp_credential_file = datafile(
os.path.join('gcloud', 'temp_well_known_file_service_account.json'))
save_to_well_known_file(credentials, temp_credential_file)
with open(temp_credential_file) as f:
d = json.load(f)
self.assertEqual('service_account', d['type'])
self.assertEqual('123', d['client_id'])
self.assertEqual('dummy@google.com', d['client_email'])
self.assertEqual('ABCDEF', d['private_key_id'])
os.remove(temp_credential_file)
def test_get_application_default_credential_from_file_authorized_user(self):
credentials_file = datafile(
os.path.join('gcloud',
'application_default_credentials_authorized_user.json'))
credentials = _get_application_default_credential_from_file(
credentials_file)
self.validate_google_credentials(credentials)
def test_save_to_well_known_file_authorized_user(self):
credentials_file = datafile(
os.path.join('gcloud',
'application_default_credentials_authorized_user.json'))
credentials = _get_application_default_credential_from_file(
credentials_file)
temp_credential_file = datafile(
os.path.join('gcloud', 'temp_well_known_file_authorized_user.json'))
save_to_well_known_file(credentials, temp_credential_file)
with open(temp_credential_file) as f:
d = json.load(f)
self.assertEqual('authorized_user', d['type'])
self.assertEqual('123', d['client_id'])
self.assertEqual('secret', d['client_secret'])
self.assertEqual('alabalaportocala', d['refresh_token'])
os.remove(temp_credential_file)
def test_get_application_default_credential_from_malformed_file_1(self):
credentials_file = datafile(
os.path.join('gcloud',
'application_default_credentials_malformed_1.json'))
# we can't use self.assertRaisesRegexp() because it is only in Python 2.7+
try:
_get_application_default_credential_from_file(credentials_file)
self.fail('An exception was expected!')
except ApplicationDefaultCredentialsError as error:
self.assertEqual("'type' field should be defined "
"(and have one of the '" + AUTHORIZED_USER +
"' or '" + SERVICE_ACCOUNT + "' values)",
str(error))
def test_get_application_default_credential_from_malformed_file_2(self):
credentials_file = datafile(
os.path.join('gcloud',
'application_default_credentials_malformed_2.json'))
# we can't use self.assertRaisesRegexp() because it is only in Python 2.7+
try:
_get_application_default_credential_from_file(credentials_file)
self.fail('An exception was expected!')
except ApplicationDefaultCredentialsError as error:
self.assertEqual('The following field(s) must be defined: private_key_id',
str(error))
def test_get_application_default_credential_from_malformed_file_3(self):
credentials_file = datafile(
os.path.join('gcloud',
'application_default_credentials_malformed_3.json'))
self.assertRaises(ValueError, _get_application_default_credential_from_file,
credentials_file)
def test_raise_exception_for_missing_fields(self):
missing_fields = ['first', 'second', 'third']
# we can't use self.assertRaisesRegexp() because it is only in Python 2.7+
try:
_raise_exception_for_missing_fields(missing_fields)
self.fail('An exception was expected!')
except ApplicationDefaultCredentialsError as error:
self.assertEqual('The following field(s) must be defined: ' +
', '.join(missing_fields),
str(error))
def test_raise_exception_for_reading_json(self):
credential_file = 'any_file'
extra_help = ' be good'
error = ApplicationDefaultCredentialsError('stuff happens')
# we can't use self.assertRaisesRegexp() because it is only in Python 2.7+
try:
_raise_exception_for_reading_json(credential_file, extra_help, error)
self.fail('An exception was expected!')
except ApplicationDefaultCredentialsError as ex:
self.assertEqual('An error was encountered while reading '
'json file: '+ credential_file +
extra_help + ': ' + str(error),
str(ex))
def test_get_application_default_from_environment_variable_service_account(
self):
os.environ['SERVER_SOFTWARE'] = ''
environment_variable_file = datafile(
os.path.join('gcloud', 'application_default_credentials.json'))
os.environ[GOOGLE_APPLICATION_CREDENTIALS] = environment_variable_file
self.validate_service_account_credentials(
GoogleCredentials.get_application_default())
def test_env_name(self):
from oauth2client import client
self.assertEqual(None, client.SETTINGS.env_name)
self.test_get_application_default_from_environment_variable_service_account()
self.assertEqual(DEFAULT_ENV_NAME, client.SETTINGS.env_name)
def test_get_application_default_from_environment_variable_authorized_user(
self):
os.environ['SERVER_SOFTWARE'] = ''
environment_variable_file = datafile(
os.path.join('gcloud',
'application_default_credentials_authorized_user.json'))
os.environ[GOOGLE_APPLICATION_CREDENTIALS] = environment_variable_file
self.validate_google_credentials(
GoogleCredentials.get_application_default())
def test_get_application_default_from_environment_variable_malformed_file(
self):
os.environ['SERVER_SOFTWARE'] = ''
environment_variable_file = datafile(
os.path.join('gcloud',
'application_default_credentials_malformed_3.json'))
os.environ[GOOGLE_APPLICATION_CREDENTIALS] = environment_variable_file
# we can't use self.assertRaisesRegexp() because it is only in Python 2.7+
try:
GoogleCredentials.get_application_default()
self.fail('An exception was expected!')
except ApplicationDefaultCredentialsError as error:
self.assertTrue(str(error).startswith(
'An error was encountered while reading json file: ' +
environment_variable_file + ' (pointed to by ' +
GOOGLE_APPLICATION_CREDENTIALS + ' environment variable):'))
def test_get_application_default_environment_not_set_up(self):
# It is normal for this test to fail if run inside
# a Google Compute Engine VM or after 'gcloud auth login' command
# has been executed on a non Windows machine.
os.environ['SERVER_SOFTWARE'] = ''
os.environ[GOOGLE_APPLICATION_CREDENTIALS] = ''
os.environ['APPDATA'] = ''
# we can't use self.assertRaisesRegexp() because it is only in Python 2.7+
try:
GoogleCredentials.get_application_default()
self.fail('An exception was expected!')
except ApplicationDefaultCredentialsError as error:
self.assertEqual(ADC_HELP_MSG, str(error))
def test_from_stream_service_account(self):
credentials_file = datafile(
os.path.join('gcloud', 'application_default_credentials.json'))
credentials = (
self.get_a_google_credentials_object().from_stream(credentials_file))
self.validate_service_account_credentials(credentials)
def test_from_stream_authorized_user(self):
credentials_file = datafile(
os.path.join('gcloud',
'application_default_credentials_authorized_user.json'))
credentials = (
self.get_a_google_credentials_object().from_stream(credentials_file))
self.validate_google_credentials(credentials)
def test_from_stream_malformed_file_1(self):
credentials_file = datafile(
os.path.join('gcloud',
'application_default_credentials_malformed_1.json'))
# we can't use self.assertRaisesRegexp() because it is only in Python 2.7+
try:
self.get_a_google_credentials_object().from_stream(credentials_file)
self.fail('An exception was expected!')
except ApplicationDefaultCredentialsError as error:
self.assertEqual("An error was encountered while reading json file: " +
credentials_file +
" (provided as parameter to the from_stream() method): "
"'type' field should be defined (and have one of the '" +
AUTHORIZED_USER + "' or '" + SERVICE_ACCOUNT +
"' values)",
str(error))
def test_from_stream_malformed_file_2(self):
credentials_file = datafile(
os.path.join('gcloud',
'application_default_credentials_malformed_2.json'))
# we can't use self.assertRaisesRegexp() because it is only in Python 2.7+
try:
self.get_a_google_credentials_object().from_stream(credentials_file)
self.fail('An exception was expected!')
except ApplicationDefaultCredentialsError as error:
self.assertEqual('An error was encountered while reading json file: ' +
credentials_file +
' (provided as parameter to the from_stream() method): '
'The following field(s) must be defined: '
'private_key_id',
str(error))
def test_from_stream_malformed_file_3(self):
credentials_file = datafile(
os.path.join('gcloud',
'application_default_credentials_malformed_3.json'))
self.assertRaises(
ApplicationDefaultCredentialsError,
self.get_a_google_credentials_object().from_stream, credentials_file)
class DummyDeleteStorage(Storage):
delete_called = False
def locked_delete(self):
self.delete_called = True
def _token_revoke_test_helper(testcase, status, revoke_raise,
valid_bool_value, token_attr):
current_store = getattr(testcase.credentials, 'store', None)
dummy_store = DummyDeleteStorage()
testcase.credentials.set_store(dummy_store)
actual_do_revoke = testcase.credentials._do_revoke
testcase.token_from_revoke = None
def do_revoke_stub(http_request, token):
testcase.token_from_revoke = token
return actual_do_revoke(http_request, token)
testcase.credentials._do_revoke = do_revoke_stub
http = HttpMock(headers={'status': status})
if revoke_raise:
testcase.assertRaises(TokenRevokeError, testcase.credentials.revoke, http)
else:
testcase.credentials.revoke(http)
testcase.assertEqual(getattr(testcase.credentials, token_attr),
testcase.token_from_revoke)
testcase.assertEqual(valid_bool_value, testcase.credentials.invalid)
testcase.assertEqual(valid_bool_value, dummy_store.delete_called)
testcase.credentials.set_store(current_store)
class BasicCredentialsTests(unittest.TestCase):
def setUp(self):
access_token = 'foo'
client_id = 'some_client_id'
client_secret = 'cOuDdkfjxxnv+'
refresh_token = '1/0/a.df219fjls0'
token_expiry = datetime.datetime.utcnow()
user_agent = 'refresh_checker/1.0'
self.credentials = OAuth2Credentials(
access_token, client_id, client_secret,
refresh_token, token_expiry, GOOGLE_TOKEN_URI,
user_agent, revoke_uri=GOOGLE_REVOKE_URI)
def test_token_refresh_success(self):
for status_code in REFRESH_STATUS_CODES:
token_response = {'access_token': '1/3w', 'expires_in': 3600}
http = HttpMockSequence([
({'status': status_code}, b''),
({'status': '200'}, json.dumps(token_response).encode('utf-8')),
({'status': '200'}, 'echo_request_headers'),
])
http = self.credentials.authorize(http)
resp, content = http.request('http://example.com')
self.assertEqual(b'Bearer 1/3w', content[b'Authorization'])
self.assertFalse(self.credentials.access_token_expired)
self.assertEqual(token_response, self.credentials.token_response)
def test_token_refresh_failure(self):
for status_code in REFRESH_STATUS_CODES:
http = HttpMockSequence([
({'status': status_code}, b''),
({'status': '400'}, b'{"error":"access_denied"}'),
])
http = self.credentials.authorize(http)
try:
http.request('http://example.com')
self.fail('should raise AccessTokenRefreshError exception')
except AccessTokenRefreshError:
pass
self.assertTrue(self.credentials.access_token_expired)
self.assertEqual(None, self.credentials.token_response)
def test_token_revoke_success(self):
_token_revoke_test_helper(
self, '200', revoke_raise=False,
valid_bool_value=True, token_attr='refresh_token')
def test_token_revoke_failure(self):
_token_revoke_test_helper(
self, '400', revoke_raise=True,
valid_bool_value=False, token_attr='refresh_token')
def test_non_401_error_response(self):
http = HttpMockSequence([
({'status': '400'}, b''),
])
http = self.credentials.authorize(http)
resp, content = http.request('http://example.com')
self.assertEqual(400, resp.status)
self.assertEqual(None, self.credentials.token_response)
def test_to_from_json(self):
json = self.credentials.to_json()
instance = OAuth2Credentials.from_json(json)
self.assertEqual(OAuth2Credentials, type(instance))
instance.token_expiry = None
self.credentials.token_expiry = None
self.assertEqual(instance.__dict__, self.credentials.__dict__)
def test_from_json_token_expiry(self):
data = json.loads(self.credentials.to_json())
data['token_expiry'] = None
instance = OAuth2Credentials.from_json(json.dumps(data))
self.assertTrue(isinstance(instance, OAuth2Credentials))
def test_unicode_header_checks(self):
access_token = u'foo'
client_id = u'some_client_id'
client_secret = u'cOuDdkfjxxnv+'
refresh_token = u'1/0/a.df219fjls0'
token_expiry = str(datetime.datetime.utcnow())
token_uri = str(GOOGLE_TOKEN_URI)
revoke_uri = str(GOOGLE_REVOKE_URI)
user_agent = u'refresh_checker/1.0'
credentials = OAuth2Credentials(access_token, client_id, client_secret,
refresh_token, token_expiry, token_uri,
user_agent, revoke_uri=revoke_uri)
# First, test that we correctly encode basic objects, making sure
# to include a bytes object. Note that oauth2client will normalize
# everything to bytes, no matter what python version we're in.
http = credentials.authorize(HttpMock(headers={'status': '200'}))
headers = {u'foo': 3, b'bar': True, 'baz': b'abc'}
cleaned_headers = {b'foo': b'3', b'bar': b'True', b'baz': b'abc'}
http.request(u'http://example.com', method=u'GET', headers=headers)
for k, v in cleaned_headers.items():
self.assertTrue(k in http.headers)
self.assertEqual(v, http.headers[k])
# Next, test that we do fail on unicode.
unicode_str = six.unichr(40960) + 'abcd'
self.assertRaises(
NonAsciiHeaderError,
http.request,
u'http://example.com', method=u'GET', headers={u'foo': unicode_str})
def test_no_unicode_in_request_params(self):
access_token = u'foo'
client_id = u'some_client_id'
client_secret = u'cOuDdkfjxxnv+'
refresh_token = u'1/0/a.df219fjls0'
token_expiry = str(datetime.datetime.utcnow())
token_uri = str(GOOGLE_TOKEN_URI)
revoke_uri = str(GOOGLE_REVOKE_URI)
user_agent = u'refresh_checker/1.0'
credentials = OAuth2Credentials(access_token, client_id, client_secret,
refresh_token, token_expiry, token_uri,
user_agent, revoke_uri=revoke_uri)
http = HttpMock(headers={'status': '200'})
http = credentials.authorize(http)
http.request(u'http://example.com', method=u'GET', headers={u'foo': u'bar'})
for k, v in six.iteritems(http.headers):
self.assertEqual(six.binary_type, type(k))
self.assertEqual(six.binary_type, type(v))
# Test again with unicode strings that can't simply be converted to ASCII.
try:
http.request(
u'http://example.com', method=u'GET', headers={u'foo': u'\N{COMET}'})
self.fail('Expected exception to be raised.')
except NonAsciiHeaderError:
pass
self.credentials.token_response = 'foobar'
instance = OAuth2Credentials.from_json(self.credentials.to_json())
self.assertEqual('foobar', instance.token_response)
def test_get_access_token(self):
S = 2 # number of seconds in which the token expires
token_response_first = {'access_token': 'first_token', 'expires_in': S}
token_response_second = {'access_token': 'second_token', 'expires_in': S}
http = HttpMockSequence([
({'status': '200'}, json.dumps(token_response_first).encode('utf-8')),
({'status': '200'}, json.dumps(token_response_second).encode('utf-8')),
])
token = self.credentials.get_access_token(http=http)
self.assertEqual('first_token', token.access_token)
self.assertEqual(S - 1, token.expires_in)
self.assertFalse(self.credentials.access_token_expired)
self.assertEqual(token_response_first, self.credentials.token_response)
token = self.credentials.get_access_token(http=http)
self.assertEqual('first_token', token.access_token)
self.assertEqual(S - 1, token.expires_in)
self.assertFalse(self.credentials.access_token_expired)
self.assertEqual(token_response_first, self.credentials.token_response)
time.sleep(S + 0.5) # some margin to avoid flakiness
self.assertTrue(self.credentials.access_token_expired)
token = self.credentials.get_access_token(http=http)
self.assertEqual('second_token', token.access_token)
self.assertEqual(S - 1, token.expires_in)
self.assertFalse(self.credentials.access_token_expired)
self.assertEqual(token_response_second, self.credentials.token_response)
class AccessTokenCredentialsTests(unittest.TestCase):
def setUp(self):
access_token = 'foo'
user_agent = 'refresh_checker/1.0'
self.credentials = AccessTokenCredentials(access_token, user_agent,
revoke_uri=GOOGLE_REVOKE_URI)
def test_token_refresh_success(self):
for status_code in REFRESH_STATUS_CODES:
http = HttpMockSequence([
({'status': status_code}, b''),
])
http = self.credentials.authorize(http)
try:
resp, content = http.request('http://example.com')
self.fail('should throw exception if token expires')
except AccessTokenCredentialsError:
pass
except Exception:
self.fail('should only throw AccessTokenCredentialsError')
def test_token_revoke_success(self):
_token_revoke_test_helper(
self, '200', revoke_raise=False,
valid_bool_value=True, token_attr='access_token')
def test_token_revoke_failure(self):
_token_revoke_test_helper(
self, '400', revoke_raise=True,
valid_bool_value=False, token_attr='access_token')
def test_non_401_error_response(self):
http = HttpMockSequence([
({'status': '400'}, b''),
])
http = self.credentials.authorize(http)
resp, content = http.request('http://example.com')
self.assertEqual(400, resp.status)
def test_auth_header_sent(self):
http = HttpMockSequence([
({'status': '200'}, 'echo_request_headers'),
])
http = self.credentials.authorize(http)
resp, content = http.request('http://example.com')
self.assertEqual(b'Bearer foo', content[b'Authorization'])
class TestAssertionCredentials(unittest.TestCase):
assertion_text = 'This is the assertion'
assertion_type = 'http://www.google.com/assertionType'
class AssertionCredentialsTestImpl(AssertionCredentials):
def _generate_assertion(self):
return TestAssertionCredentials.assertion_text
def setUp(self):
user_agent = 'fun/2.0'
self.credentials = self.AssertionCredentialsTestImpl(self.assertion_type,
user_agent=user_agent)
def test_assertion_body(self):
body = urllib.parse.parse_qs(
self.credentials._generate_refresh_request_body())
self.assertEqual(self.assertion_text, body['assertion'][0])
self.assertEqual('urn:ietf:params:oauth:grant-type:jwt-bearer',
body['grant_type'][0])
def test_assertion_refresh(self):
http = HttpMockSequence([
({'status': '200'}, b'{"access_token":"1/3w"}'),
({'status': '200'}, 'echo_request_headers'),
])
http = self.credentials.authorize(http)
resp, content = http.request('http://example.com')
self.assertEqual(b'Bearer 1/3w', content[b'Authorization'])
def test_token_revoke_success(self):
_token_revoke_test_helper(
self, '200', revoke_raise=False,
valid_bool_value=True, token_attr='access_token')
def test_token_revoke_failure(self):
_token_revoke_test_helper(
self, '400', revoke_raise=True,
valid_bool_value=False, token_attr='access_token')
class UpdateQueryParamsTest(unittest.TestCase):
def test_update_query_params_no_params(self):
uri = 'http://www.google.com'
updated = _update_query_params(uri, {'a': 'b'})
self.assertEqual(updated, uri + '?a=b')
def test_update_query_params_existing_params(self):
uri = 'http://www.google.com?x=y'
updated = _update_query_params(uri, {'a': 'b', 'c': 'd&'})
hardcoded_update = uri + '&a=b&c=d%26'
assertUrisEqual(self, updated, hardcoded_update)
class ExtractIdTokenTest(unittest.TestCase):
"""Tests _extract_id_token()."""
def test_extract_success(self):
body = {'foo': 'bar'}
body_json = json.dumps(body).encode('ascii')
payload = base64.urlsafe_b64encode(body_json).strip(b'=')
jwt = b'stuff.' + payload + b'.signature'
extracted = _extract_id_token(jwt)
self.assertEqual(extracted, body)
def test_extract_failure(self):
body = {'foo': 'bar'}
body_json = json.dumps(body).encode('ascii')
payload = base64.urlsafe_b64encode(body_json).strip(b'=')
jwt = b'stuff.' + payload
self.assertRaises(VerifyJwtTokenError, _extract_id_token, jwt)
class OAuth2WebServerFlowTest(unittest.TestCase):
def setUp(self):
self.flow = OAuth2WebServerFlow(
client_id='client_id+1',
client_secret='secret+1',
scope='foo',
redirect_uri=OOB_CALLBACK_URN,
user_agent='unittest-sample/1.0',
revoke_uri='dummy_revoke_uri',
)
def test_construct_authorize_url(self):
authorize_url = self.flow.step1_get_authorize_url()
parsed = urllib.parse.urlparse(authorize_url)
q = urllib.parse.parse_qs(parsed[4])
self.assertEqual('client_id+1', q['client_id'][0])
self.assertEqual('code', q['response_type'][0])
self.assertEqual('foo', q['scope'][0])
self.assertEqual(OOB_CALLBACK_URN, q['redirect_uri'][0])
self.assertEqual('offline', q['access_type'][0])
def test_override_flow_via_kwargs(self):
"""Passing kwargs to override defaults."""
flow = OAuth2WebServerFlow(
client_id='client_id+1',
client_secret='secret+1',
scope='foo',
redirect_uri=OOB_CALLBACK_URN,
user_agent='unittest-sample/1.0',
access_type='online',
response_type='token'
)
authorize_url = flow.step1_get_authorize_url()
parsed = urllib.parse.urlparse(authorize_url)
q = urllib.parse.parse_qs(parsed[4])
self.assertEqual('client_id+1', q['client_id'][0])
self.assertEqual('token', q['response_type'][0])
self.assertEqual('foo', q['scope'][0])
self.assertEqual(OOB_CALLBACK_URN, q['redirect_uri'][0])
self.assertEqual('online', q['access_type'][0])
def test_exchange_failure(self):
http = HttpMockSequence([
({'status': '400'}, b'{"error":"invalid_request"}'),
])
try:
credentials = self.flow.step2_exchange('some random code', http=http)
self.fail('should raise exception if exchange doesn\'t get 200')
except FlowExchangeError:
pass
def test_urlencoded_exchange_failure(self):
http = HttpMockSequence([
({'status': '400'}, b'error=invalid_request'),
])
try:
credentials = self.flow.step2_exchange('some random code', http=http)
self.fail('should raise exception if exchange doesn\'t get 200')
except FlowExchangeError as e:
self.assertEqual('invalid_request', str(e))
def test_exchange_failure_with_json_error(self):
# Some providers have 'error' attribute as a JSON object
# in place of regular string.
# This test makes sure no strange object-to-string coversion
# exceptions are being raised instead of FlowExchangeError.
http = HttpMockSequence([
({'status': '400'},
b""" {"error": {
"type": "OAuthException",
"message": "Error validating verification code."} }"""),
])
try:
credentials = self.flow.step2_exchange('some random code', http=http)
self.fail('should raise exception if exchange doesn\'t get 200')
except FlowExchangeError as e:
pass
def test_exchange_success(self):
http = HttpMockSequence([
({'status': '200'},
b"""{ "access_token":"SlAV32hkKG",
"expires_in":3600,
"refresh_token":"8xLOxBtZp8" }"""),
])
credentials = self.flow.step2_exchange('some random code', http=http)
self.assertEqual('SlAV32hkKG', credentials.access_token)
self.assertNotEqual(None, credentials.token_expiry)
self.assertEqual('8xLOxBtZp8', credentials.refresh_token)
self.assertEqual('dummy_revoke_uri', credentials.revoke_uri)
def test_exchange_dictlike(self):
class FakeDict(object):
def __init__(self, d):
self.d = d
def __getitem__(self, name):
return self.d[name]
def __contains__(self, name):
return name in self.d
code = 'some random code'
not_a_dict = FakeDict({'code': code})
payload = (b'{'
b' "access_token":"SlAV32hkKG",'
b' "expires_in":3600,'
b' "refresh_token":"8xLOxBtZp8"'
b'}')
http = HttpMockSequence([({'status': '200'}, payload),])
credentials = self.flow.step2_exchange(not_a_dict, http=http)
self.assertEqual('SlAV32hkKG', credentials.access_token)
self.assertNotEqual(None, credentials.token_expiry)
self.assertEqual('8xLOxBtZp8', credentials.refresh_token)
self.assertEqual('dummy_revoke_uri', credentials.revoke_uri)
request_code = urllib.parse.parse_qs(http.requests[0]['body'])['code'][0]
self.assertEqual(code, request_code)
def test_urlencoded_exchange_success(self):
http = HttpMockSequence([
({'status': '200'}, b'access_token=SlAV32hkKG&expires_in=3600'),
])
credentials = self.flow.step2_exchange('some random code', http=http)
self.assertEqual('SlAV32hkKG', credentials.access_token)
self.assertNotEqual(None, credentials.token_expiry)
def test_urlencoded_expires_param(self):
http = HttpMockSequence([
# Note the 'expires=3600' where you'd normally
# have if named 'expires_in'
({'status': '200'}, b'access_token=SlAV32hkKG&expires=3600'),
])
credentials = self.flow.step2_exchange('some random code', http=http)
self.assertNotEqual(None, credentials.token_expiry)
def test_exchange_no_expires_in(self):
http = HttpMockSequence([
({'status': '200'}, b"""{ "access_token":"SlAV32hkKG",
"refresh_token":"8xLOxBtZp8" }"""),
])
credentials = self.flow.step2_exchange('some random code', http=http)
self.assertEqual(None, credentials.token_expiry)
def test_urlencoded_exchange_no_expires_in(self):
http = HttpMockSequence([
# This might be redundant but just to make sure
# urlencoded access_token gets parsed correctly
({'status': '200'}, b'access_token=SlAV32hkKG'),
])
credentials = self.flow.step2_exchange('some random code', http=http)
self.assertEqual(None, credentials.token_expiry)
def test_exchange_fails_if_no_code(self):
http = HttpMockSequence([
({'status': '200'}, b"""{ "access_token":"SlAV32hkKG",
"refresh_token":"8xLOxBtZp8" }"""),
])
code = {'error': 'thou shall not pass'}
try:
credentials = self.flow.step2_exchange(code, http=http)
self.fail('should raise exception if no code in dictionary.')
except FlowExchangeError as e:
self.assertTrue('shall not pass' in str(e))
def test_exchange_id_token_fail(self):
http = HttpMockSequence([
({'status': '200'}, b"""{ "access_token":"SlAV32hkKG",
"refresh_token":"8xLOxBtZp8",
"id_token": "stuff.payload"}"""),
])
self.assertRaises(VerifyJwtTokenError, self.flow.step2_exchange,
'some random code', http=http)
def test_exchange_id_token(self):
body = {'foo': 'bar'}
body_json = json.dumps(body).encode('ascii')
payload = base64.urlsafe_b64encode(body_json).strip(b'=')
jwt = (base64.urlsafe_b64encode(b'stuff') + b'.' + payload + b'.' +
base64.urlsafe_b64encode(b'signature'))
http = HttpMockSequence([
({'status': '200'}, ("""{ "access_token":"SlAV32hkKG",
"refresh_token":"8xLOxBtZp8",
"id_token": "%s"}""" % jwt).encode('utf-8')),
])
credentials = self.flow.step2_exchange('some random code', http=http)
self.assertEqual(credentials.id_token, body)
class FlowFromCachedClientsecrets(unittest.TestCase):
def test_flow_from_clientsecrets_cached(self):
cache_mock = CacheMock()
load_and_cache('client_secrets.json', 'some_secrets', cache_mock)
flow = flow_from_clientsecrets(
'some_secrets', '', redirect_uri='oob', cache=cache_mock)
self.assertEqual('foo_client_secret', flow.client_secret)
class CredentialsFromCodeTests(unittest.TestCase):
def setUp(self):
self.client_id = 'client_id_abc'
self.client_secret = 'secret_use_code'
self.scope = 'foo'
self.code = '12345abcde'
self.redirect_uri = 'postmessage'
def test_exchange_code_for_token(self):
token = 'asdfghjkl'
payload = json.dumps({'access_token': token, 'expires_in': 3600})
http = HttpMockSequence([
({'status': '200'}, payload.encode('utf-8')),
])
credentials = credentials_from_code(self.client_id, self.client_secret,
self.scope, self.code, redirect_uri=self.redirect_uri,
http=http)
self.assertEqual(credentials.access_token, token)
self.assertNotEqual(None, credentials.token_expiry)
def test_exchange_code_for_token_fail(self):
http = HttpMockSequence([
({'status': '400'}, b'{"error":"invalid_request"}'),
])
try:
credentials = credentials_from_code(self.client_id, self.client_secret,
self.scope, self.code, redirect_uri=self.redirect_uri,
http=http)
self.fail('should raise exception if exchange doesn\'t get 200')
except FlowExchangeError:
pass
def test_exchange_code_and_file_for_token(self):
http = HttpMockSequence([
({'status': '200'},
b"""{ "access_token":"asdfghjkl",
"expires_in":3600 }"""),
])
credentials = credentials_from_clientsecrets_and_code(
datafile('client_secrets.json'), self.scope,
self.code, http=http)
self.assertEqual(credentials.access_token, 'asdfghjkl')
self.assertNotEqual(None, credentials.token_expiry)
def test_exchange_code_and_cached_file_for_token(self):
http = HttpMockSequence([
({'status': '200'}, b'{ "access_token":"asdfghjkl"}'),
])
cache_mock = CacheMock()
load_and_cache('client_secrets.json', 'some_secrets', cache_mock)
credentials = credentials_from_clientsecrets_and_code(
'some_secrets', self.scope,
self.code, http=http, cache=cache_mock)
self.assertEqual(credentials.access_token, 'asdfghjkl')
def test_exchange_code_and_file_for_token_fail(self):
http = HttpMockSequence([
({'status': '400'}, b'{"error":"invalid_request"}'),
])
try:
credentials = credentials_from_clientsecrets_and_code(
datafile('client_secrets.json'), self.scope,
self.code, http=http)
self.fail('should raise exception if exchange doesn\'t get 200')
except FlowExchangeError:
pass
class MemoryCacheTests(unittest.TestCase):
def test_get_set_delete(self):
m = MemoryCache()
self.assertEqual(None, m.get('foo'))
self.assertEqual(None, m.delete('foo'))
m.set('foo', 'bar')
self.assertEqual('bar', m.get('foo'))
m.delete('foo')
self.assertEqual(None, m.get('foo'))
if __name__ == '__main__':
unittest.main()
| bsd-3-clause |
Jammink2/DeepDreamAnim | dreamer.py | 3 | 10389 | #!/usr/bin/python
__author__ = 'samim'
# Imports
import argparse
import time
import os
import errno
import subprocess
from cStringIO import StringIO
import numpy as np
import scipy.ndimage as nd
import PIL.Image
from google.protobuf import text_format
import caffe
#Loading DNN model
model_name = 'bvlc_googlenet'
#model_name = 'age_net'
# model_name = 'gender_net'
# model_name = 'hybridCNN+'
# model_name = 'placesCNN+'
# model_name = 'vggf'
# model_name = 'flowers'
model_path = '/Users/samim/caffe/models/' + model_name + '/'
net_fn = model_path + 'deploy.prototxt'
param_fn = model_path + 'net.caffemodel'
# Patching model to be able to compute gradients.
# Note that you can also manually add "force_backward: true" line to "deploy.prototxt".
model = caffe.io.caffe_pb2.NetParameter()
text_format.Merge(open(net_fn).read(), model)
model.force_backward = True
open('tmp.prototxt', 'w').write(str(model))
net = caffe.Classifier('tmp.prototxt', param_fn,
mean = np.float32([104.0, 116.0, 122.0]), # ImageNet mean, training set dependent
channel_swap = (2,1,0)) # the reference model has channels in BGR order instead of RGB
def showarray(a, fmt='jpeg'):
a = np.uint8(np.clip(a, 0, 255))
f = StringIO()
PIL.Image.fromarray(a).save(f, fmt)
#display(Image(data=f.getvalue()))
def blur(img, sigma):
if sigma > 0:
img[0] = nd.filters.gaussian_filter(img[0], sigma, order=0)
img[1] = nd.filters.gaussian_filter(img[1], sigma, order=0)
img[2] = nd.filters.gaussian_filter(img[2], sigma, order=0)
return img
# a couple of utility functions for converting to and from Caffe's input image layout
def preprocess(net, img):
return np.float32(np.rollaxis(img, 2)[::-1]) - net.transformer.mean['data']
def deprocess(net, img):
return np.dstack((img + net.transformer.mean['data'])[::-1])
# First we implement a basic gradient ascent step function, applying the first two tricks // 32:
def make_step(net, step_size=1.5, end='inception_4c/output', jitter=32, clip=True):
'''Basic gradient ascent step.'''
src = net.blobs['data'] # input image is stored in Net's 'data' blob
dst = net.blobs[end]
ox, oy = np.random.randint(-jitter, jitter+1, 2)
# ox, oy = np.random.normal(0, max(1, jitter), 2).astype(int) # use gaussian distribution
src.data[0] = np.roll(np.roll(src.data[0], ox, -1), oy, -2) # apply jitter shift
# src.data[0] += np.random.normal(0, 1, (3, 224, 224)) # add some noise
net.forward(end=end)
dst.diff[:] = dst.data # specify the optimization objective
net.backward(start=end)
g = src.diff[0]
# apply normalized ascent step to the input image
src.data[:] += step_size/np.abs(g).mean() * g
src.data[0] = np.roll(np.roll(src.data[0], -ox, -1), -oy, -2) # unshift image
if clip:
bias = net.transformer.mean['data']
src.data[:] = np.clip(src.data, -bias, 255-bias)
# Next we implement an ascent through different scales. We call these scales "octaves".
def deepdream(net, base_img, iter_n=10, octave_n=4, step_size=1.5, octave_scale=1.4, jitter=32, end='inception_4c/output', clip=True, **step_params):
# prepare base images for all octaves
octaves = [preprocess(net, base_img)]
for i in xrange(octave_n-1):
octaves.append(nd.zoom(octaves[-1], (1, 1.0/octave_scale,1.0/octave_scale), order=1))
src = net.blobs['data']
detail = np.zeros_like(octaves[-1]) # allocate image for network-produced details
for octave, octave_base in enumerate(octaves[::-1]):
h, w = octave_base.shape[-2:]
if octave > 0:
# upscale details from the previous octave
h1, w1 = detail.shape[-2:]
detail = nd.zoom(detail, (1, 1.0*h/h1,1.0*w/w1), order=1)
src.reshape(1,3,h,w) # resize the network's input image size
src.data[0] = octave_base+detail
for i in xrange(iter_n):
make_step(net, end=end,step_size=step_size, jitter=jitter, clip=clip, **step_params)
# visualization
vis = deprocess(net, src.data[0])
if not clip: # adjust image contrast if clipping is disabled
vis = vis*(255.0/np.percentile(vis, 99.98))
#showarray(vis)
print octave, i, end, vis.shape
# extract details produced on the current octave
detail = src.data[0]-octave_base
# returning the resulting image
return deprocess(net, src.data[0])
# Animaton functions
def resizePicture(image,width):
img = PIL.Image.open(image)
basewidth = width
wpercent = (basewidth/float(img.size[0]))
hsize = int((float(img.size[1])*float(wpercent)))
return img.resize((basewidth,hsize), PIL.Image.ANTIALIAS)
def morphPicture(filename1,filename2,blend,width):
img1 = PIL.Image.open(filename1)
img2 = PIL.Image.open(filename2)
if width is not 0:
img2 = resizePicture(filename2,width)
return PIL.Image.blend(img1, img2, blend)
def make_sure_path_exists(path):
# make sure input and output directory exist, if not create them. If another error (permission denied) throw an error.
try:
os.makedirs(path)
except OSError as exception:
if exception.errno != errno.EEXIST:
raise
def main(inputdir,outputdir,preview,octaves,octave_scale,iterations,jitter,zoom,stepsize,blend,layers):
make_sure_path_exists(inputdir)
make_sure_path_exists(outputdir)
if preview is None: preview = 0
if octaves is None: octaves = 4
if octave_scale is None: octave_scale = 1.5
if iterations is None: iterations = 10
if jitter is None: jitter = 32
if jitter is None: jitter = 32
if zoom is None: zoom = 1
if stepsize is None: stepsize = 1.5
if blend is None: blend = 0.5
if layers is None: layers = ['inception_4c/output']
var_counter = 1
vidinput = os.listdir(inputdir)
vids = [];
for frame in vidinput:
if not ".jpeg" in frame: continue
vids.append(frame)
img = PIL.Image.open(inputdir+'/'+vids[0])
if preview is not 0:
img = resizePicture(inputdir+'/'+vids[0],preview)
frame = np.float32(img)
for v in range(len(vids)):
vid = vids[v]
now = time.time()
#net.blobs.keys()
h, w = frame.shape[:2]
s = 0.05 # scale coefficient
for i in xrange(zoom):
print 'Processing: ' + inputdir+'/'+ vid
endparam = layers[var_counter % len(layers)]
var_end = endparam.replace("/", "-");
frame = deepdream(net, frame, iter_n=iterations,step_size=stepsize, octave_n=octaves, octave_scale=octave_scale, jitter=jitter, end=endparam)
later = time.time()
difference = int(later - now)
filenameCounter = 10000 + var_counter
saveframe = outputdir+"/"+str(filenameCounter) + '_' + "_octaves"+str(octaves)+"_iterations"+str(iterations)+"_octavescale"+str(octave_scale)+'_net'+var_end+ '_jitter' + str(jitter) + '_stepsize' + str(stepsize) + '_blend' + str(blend) + '_renderTime' + str(difference) + 's' + '_filename'+ vid
# Stats
print '***************************************'
print 'Saving Image As: ' + saveframe
print 'Frame ' + str(var_counter) + ' of ' + str(len(vids))
print 'Frame Time: ' + str(difference) + 's'
timeleft = difference * (len(vids) - var_counter)
m, s = divmod(timeleft, 60)
h, m = divmod(m, 60)
print 'Estimated Total Time Remaining: ' + str(timeleft) + 's (' + "%d:%02d:%02d" % (h, m, s) + ')'
print '***************************************'
PIL.Image.fromarray(np.uint8(frame)).save(saveframe)
newframe = inputdir + '/' + vids[v+1]
print blend
if blend == 0:
newimg = PIL.Image.open(newframe)
if preview is not 0:
newimg = resizePicture(newframe,preview)
frame = newimg
else:
frame = morphPicture(saveframe,newframe,blend,preview)
frame = np.float32(frame)
var_counter += 1
def extractVideo(inputdir, outputdir):
print subprocess.Popen('ffmpeg -i '+inputdir+' -f image2 '+outputdir+'/image-%3d.jpeg', shell=True, stdout=subprocess.PIPE).stdout.read()
def createVideo(inputdir,outputdir,framerate):
print subprocess.Popen('ffmpeg -r '+str(framerate)+' -f image2 -pattern_type glob -i "'+inputdir+'/*.jpeg" '+outputdir, shell=True, stdout=subprocess.PIPE).stdout.read()
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='DeepDreamAnim')
parser.add_argument('-i','--input', help='Input directory',required=True)
parser.add_argument('-o','--output',help='Output directory', required=True)
parser.add_argument('-p','--preview',help='Preview image width. Default: 0', type=int, required=False)
parser.add_argument('-oct','--octaves',help='Octaves. Default: 4', type=int, required=False)
parser.add_argument('-octs','--octavescale',help='Octave Scale. Default: 1.4', type=float, required=False)
parser.add_argument('-itr','--iterations',help='Iterations. Default: 10', type=int, required=False)
parser.add_argument('-j','--jitter',help='Jitter. Default: 32', type=int, required=False)
parser.add_argument('-z','--zoom',help='Zoom in Amount. Default: 1', type=int, required=False)
parser.add_argument('-s','--stepsize',help='Step Size. Default: 1.5', type=float, required=False)
parser.add_argument('-b','--blend',help='Blend Amount. Default: 0.5', type=float, required=False)
parser.add_argument('-l','--layers',help='Layers Loop. Default: inception_4c/output', nargs="+", type=str, required=False)
parser.add_argument('-e','--extract',help='Extract Frames From Video.', type=int, required=False)
parser.add_argument('-c','--create',help='Create Video From Frames.', type=int, required=False)
args = parser.parse_args()
if args.extract is 1:
extractVideo(args.input, args.output)
elif args.create is 1:
createVideo(args.input, args.output,24)
else:
main(args.input, args.output, args.preview, args.octaves, args.octavescale, args.iterations, args.jitter, args.zoom, args.stepsize, args.blend, args.layers)
| mit |
dablak/boto | boto/pyami/installers/ubuntu/apache.py | 205 | 1929 | # Copyright (c) 2008 Chris Moyer http://coredumped.org
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
#
from boto.pyami.installers.ubuntu.installer import Installer
class Apache(Installer):
"""
Install apache2, mod_python, and libapache2-svn
"""
def install(self):
self.run("apt-get update")
self.run('apt-get -y install apache2', notify=True, exit_on_error=True)
self.run('apt-get -y install libapache2-mod-python', notify=True, exit_on_error=True)
self.run('a2enmod rewrite', notify=True, exit_on_error=True)
self.run('a2enmod ssl', notify=True, exit_on_error=True)
self.run('a2enmod proxy', notify=True, exit_on_error=True)
self.run('a2enmod proxy_ajp', notify=True, exit_on_error=True)
# Hard reboot the apache2 server to enable these module
self.stop("apache2")
self.start("apache2")
def main(self):
self.install()
| mit |
tardis-sn/tardis | tardis/montecarlo/tests/test_base.py | 1 | 1085 | import os
import pandas as pd
import numpy as np
import pytest
from astropy import units as u
from numpy.testing import assert_almost_equal
###
# Save and Load
###
@pytest.fixture(scope="module", autouse=True)
def to_hdf_buffer(hdf_file_path, simulation_verysimple):
simulation_verysimple.runner.to_hdf(
hdf_file_path, name="runner", overwrite=True
)
runner_properties = [
"output_nu",
"output_energy",
"nu_bar_estimator",
"j_estimator",
"montecarlo_virtual_luminosity",
"last_interaction_in_nu",
"last_interaction_type",
"last_line_interaction_in_id",
"last_line_interaction_out_id",
"last_line_interaction_shell_id",
"packet_luminosity",
]
@pytest.mark.parametrize("attr", runner_properties)
def test_hdf_runner(hdf_file_path, simulation_verysimple, attr):
actual = getattr(simulation_verysimple.runner, attr)
if hasattr(actual, "cgs"):
actual = actual.cgs.value
path = os.path.join("runner", attr)
expected = pd.read_hdf(hdf_file_path, path)
assert_almost_equal(actual, expected.values)
| bsd-3-clause |
kevinmarks/mentiontech | requests/packages/urllib3/util/connection.py | 679 | 3293 | import socket
try:
from select import poll, POLLIN
except ImportError: # `poll` doesn't exist on OSX and other platforms
poll = False
try:
from select import select
except ImportError: # `select` doesn't exist on AppEngine.
select = False
def is_connection_dropped(conn): # Platform-specific
"""
Returns True if the connection is dropped and should be closed.
:param conn:
:class:`httplib.HTTPConnection` object.
Note: For platforms like AppEngine, this will always return ``False`` to
let the platform handle connection recycling transparently for us.
"""
sock = getattr(conn, 'sock', False)
if sock is False: # Platform-specific: AppEngine
return False
if sock is None: # Connection already closed (such as by httplib).
return True
if not poll:
if not select: # Platform-specific: AppEngine
return False
try:
return select([sock], [], [], 0.0)[0]
except socket.error:
return True
# This version is better on platforms that support it.
p = poll()
p.register(sock, POLLIN)
for (fno, ev) in p.poll(0.0):
if fno == sock.fileno():
# Either data is buffered (bad), or the connection is dropped.
return True
# This function is copied from socket.py in the Python 2.7 standard
# library test suite. Added to its signature is only `socket_options`.
def create_connection(address, timeout=socket._GLOBAL_DEFAULT_TIMEOUT,
source_address=None, socket_options=None):
"""Connect to *address* and return the socket object.
Convenience function. Connect to *address* (a 2-tuple ``(host,
port)``) and return the socket object. Passing the optional
*timeout* parameter will set the timeout on the socket instance
before attempting to connect. If no *timeout* is supplied, the
global default timeout setting returned by :func:`getdefaulttimeout`
is used. If *source_address* is set it must be a tuple of (host, port)
for the socket to bind as a source address before making the connection.
An host of '' or port 0 tells the OS to use the default.
"""
host, port = address
err = None
for res in socket.getaddrinfo(host, port, 0, socket.SOCK_STREAM):
af, socktype, proto, canonname, sa = res
sock = None
try:
sock = socket.socket(af, socktype, proto)
# If provided, set socket level options before connecting.
# This is the only addition urllib3 makes to this function.
_set_socket_options(sock, socket_options)
if timeout is not socket._GLOBAL_DEFAULT_TIMEOUT:
sock.settimeout(timeout)
if source_address:
sock.bind(source_address)
sock.connect(sa)
return sock
except socket.error as _:
err = _
if sock is not None:
sock.close()
sock = None
if err is not None:
raise err
else:
raise socket.error("getaddrinfo returns an empty list")
def _set_socket_options(sock, options):
if options is None:
return
for opt in options:
sock.setsockopt(*opt)
| apache-2.0 |
nazo/ansible | test/units/plugins/action/test_action.py | 34 | 26342 | # -*- coding: utf-8 -*-
# (c) 2015, Florian Apolloner <florian@apolloner.eu>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
from ansible import constants as C
from ansible.compat.tests import unittest
from ansible.compat.tests.mock import patch, MagicMock, mock_open
from ansible.errors import AnsibleError
from ansible.module_utils.six import text_type
from ansible.module_utils.six.moves import shlex_quote, builtins
from ansible.module_utils._text import to_bytes
from ansible.playbook.play_context import PlayContext
from ansible.plugins.action import ActionBase
from ansible.template import Templar
from units.mock.loader import DictDataLoader
python_module_replacers = b"""
#!/usr/bin/python
#ANSIBLE_VERSION = "<<ANSIBLE_VERSION>>"
#MODULE_COMPLEX_ARGS = "<<INCLUDE_ANSIBLE_MODULE_COMPLEX_ARGS>>"
#SELINUX_SPECIAL_FS="<<SELINUX_SPECIAL_FILESYSTEMS>>"
test = u'Toshio \u304f\u3089\u3068\u307f'
from ansible.module_utils.basic import *
"""
powershell_module_replacers = b"""
WINDOWS_ARGS = "<<INCLUDE_ANSIBLE_MODULE_JSON_ARGS>>"
# POWERSHELL_COMMON
"""
class DerivedActionBase(ActionBase):
TRANSFERS_FILES = False
def run(self, tmp=None, task_vars=None):
# We're not testing the plugin run() method, just the helper
# methods ActionBase defines
return super(DerivedActionBase, self).run(tmp=tmp, task_vars=task_vars)
class TestActionBase(unittest.TestCase):
def test_action_base_run(self):
mock_task = MagicMock()
mock_task.action = "foo"
mock_task.args = dict(a=1, b=2, c=3)
mock_connection = MagicMock()
play_context = PlayContext()
mock_task.async = None
action_base = DerivedActionBase(mock_task, mock_connection, play_context, None, None, None)
results = action_base.run()
self.assertEqual(results, dict())
mock_task.async = 0
action_base = DerivedActionBase(mock_task, mock_connection, play_context, None, None, None)
results = action_base.run()
self.assertEqual(results, {})
def test_action_base__configure_module(self):
fake_loader = DictDataLoader({
})
# create our fake task
mock_task = MagicMock()
mock_task.action = "copy"
# create a mock connection, so we don't actually try and connect to things
mock_connection = MagicMock()
# create a mock shared loader object
def mock_find_plugin(name, options):
if name == 'badmodule':
return None
elif '.ps1' in options:
return '/fake/path/to/%s.ps1' % name
else:
return '/fake/path/to/%s' % name
mock_module_loader = MagicMock()
mock_module_loader.find_plugin.side_effect = mock_find_plugin
mock_shared_obj_loader = MagicMock()
mock_shared_obj_loader.module_loader = mock_module_loader
# we're using a real play context here
play_context = PlayContext()
# our test class
action_base = DerivedActionBase(
task=mock_task,
connection=mock_connection,
play_context=play_context,
loader=fake_loader,
templar=None,
shared_loader_obj=mock_shared_obj_loader,
)
# test python module formatting
with patch.object(builtins, 'open', mock_open(read_data=to_bytes(python_module_replacers.strip(), encoding='utf-8'))):
with patch.object(os, 'rename'):
mock_task.args = dict(a=1, foo='fö〩')
mock_connection.module_implementation_preferences = ('',)
(style, shebang, data, path) = action_base._configure_module(mock_task.action, mock_task.args)
self.assertEqual(style, "new")
self.assertEqual(shebang, u"#!/usr/bin/python")
# test module not found
self.assertRaises(AnsibleError, action_base._configure_module, 'badmodule', mock_task.args)
# test powershell module formatting
with patch.object(builtins, 'open', mock_open(read_data=to_bytes(powershell_module_replacers.strip(), encoding='utf-8'))):
mock_task.action = 'win_copy'
mock_task.args = dict(b=2)
mock_connection.module_implementation_preferences = ('.ps1',)
(style, shebang, data, path) = action_base._configure_module('stat', mock_task.args)
self.assertEqual(style, "new")
self.assertEqual(shebang, u'#!powershell')
# test module not found
self.assertRaises(AnsibleError, action_base._configure_module, 'badmodule', mock_task.args)
def test_action_base__compute_environment_string(self):
fake_loader = DictDataLoader({
})
# create our fake task
mock_task = MagicMock()
mock_task.action = "copy"
mock_task.args = dict(a=1)
# create a mock connection, so we don't actually try and connect to things
def env_prefix(**args):
return ' '.join(['%s=%s' % (k, shlex_quote(text_type(v))) for k, v in args.items()])
mock_connection = MagicMock()
mock_connection._shell.env_prefix.side_effect = env_prefix
# we're using a real play context here
play_context = PlayContext()
# and we're using a real templar here too
templar = Templar(loader=fake_loader)
# our test class
action_base = DerivedActionBase(
task=mock_task,
connection=mock_connection,
play_context=play_context,
loader=fake_loader,
templar=templar,
shared_loader_obj=None,
)
# test standard environment setup
mock_task.environment = [dict(FOO='foo'), None]
env_string = action_base._compute_environment_string()
self.assertEqual(env_string, "FOO=foo")
# test where environment is not a list
mock_task.environment = dict(FOO='foo')
env_string = action_base._compute_environment_string()
self.assertEqual(env_string, "FOO=foo")
# test environment with a variable in it
templar.set_available_variables(variables=dict(the_var='bar'))
mock_task.environment = [dict(FOO='{{the_var}}')]
env_string = action_base._compute_environment_string()
self.assertEqual(env_string, "FOO=bar")
# test with a bad environment set
mock_task.environment = dict(FOO='foo')
mock_task.environment = ['hi there']
self.assertRaises(AnsibleError, action_base._compute_environment_string)
def test_action_base__early_needs_tmp_path(self):
# create our fake task
mock_task = MagicMock()
# create a mock connection, so we don't actually try and connect to things
mock_connection = MagicMock()
# we're using a real play context here
play_context = PlayContext()
# our test class
action_base = DerivedActionBase(
task=mock_task,
connection=mock_connection,
play_context=play_context,
loader=None,
templar=None,
shared_loader_obj=None,
)
self.assertFalse(action_base._early_needs_tmp_path())
action_base.TRANSFERS_FILES = True
self.assertTrue(action_base._early_needs_tmp_path())
def test_action_base__make_tmp_path(self):
# create our fake task
mock_task = MagicMock()
# create a mock connection, so we don't actually try and connect to things
mock_connection = MagicMock()
mock_connection.transport = 'ssh'
mock_connection._shell.mkdtemp.return_value = 'mkdir command'
mock_connection._shell.join_path.side_effect = os.path.join
# we're using a real play context here
play_context = PlayContext()
play_context.become = True
play_context.become_user = 'foo'
# our test class
action_base = DerivedActionBase(
task=mock_task,
connection=mock_connection,
play_context=play_context,
loader=None,
templar=None,
shared_loader_obj=None,
)
action_base._low_level_execute_command = MagicMock()
action_base._low_level_execute_command.return_value = dict(rc=0, stdout='/some/path')
self.assertEqual(action_base._make_tmp_path('root'), '/some/path/')
# empty path fails
action_base._low_level_execute_command.return_value = dict(rc=0, stdout='')
self.assertRaises(AnsibleError, action_base._make_tmp_path, 'root')
# authentication failure
action_base._low_level_execute_command.return_value = dict(rc=5, stdout='')
self.assertRaises(AnsibleError, action_base._make_tmp_path, 'root')
# ssh error
action_base._low_level_execute_command.return_value = dict(rc=255, stdout='', stderr='')
self.assertRaises(AnsibleError, action_base._make_tmp_path, 'root')
play_context.verbosity = 5
self.assertRaises(AnsibleError, action_base._make_tmp_path, 'root')
# general error
action_base._low_level_execute_command.return_value = dict(rc=1, stdout='some stuff here', stderr='')
self.assertRaises(AnsibleError, action_base._make_tmp_path, 'root')
action_base._low_level_execute_command.return_value = dict(rc=1, stdout='some stuff here', stderr='No space left on device')
self.assertRaises(AnsibleError, action_base._make_tmp_path, 'root')
def test_action_base__remove_tmp_path(self):
# create our fake task
mock_task = MagicMock()
# create a mock connection, so we don't actually try and connect to things
mock_connection = MagicMock()
mock_connection._shell.remove.return_value = 'rm some stuff'
# we're using a real play context here
play_context = PlayContext()
# our test class
action_base = DerivedActionBase(
task=mock_task,
connection=mock_connection,
play_context=play_context,
loader=None,
templar=None,
shared_loader_obj=None,
)
action_base._low_level_execute_command = MagicMock()
# these don't really return anything or raise errors, so
# we're pretty much calling these for coverage right now
action_base._remove_tmp_path('/bad/path/dont/remove')
action_base._remove_tmp_path('/good/path/to/ansible-tmp-thing')
@patch('os.unlink')
@patch('os.fdopen')
@patch('tempfile.mkstemp')
def test_action_base__transfer_data(self, mock_mkstemp, mock_fdopen, mock_unlink):
# create our fake task
mock_task = MagicMock()
# create a mock connection, so we don't actually try and connect to things
mock_connection = MagicMock()
mock_connection.put_file.return_value = None
# we're using a real play context here
play_context = PlayContext()
# our test class
action_base = DerivedActionBase(
task=mock_task,
connection=mock_connection,
play_context=play_context,
loader=None,
templar=None,
shared_loader_obj=None,
)
mock_afd = MagicMock()
mock_afile = MagicMock()
mock_mkstemp.return_value = (mock_afd, mock_afile)
mock_unlink.return_value = None
mock_afo = MagicMock()
mock_afo.write.return_value = None
mock_afo.flush.return_value = None
mock_afo.close.return_value = None
mock_fdopen.return_value = mock_afo
self.assertEqual(action_base._transfer_data('/path/to/remote/file', 'some data'), '/path/to/remote/file')
self.assertEqual(action_base._transfer_data('/path/to/remote/file', 'some mixed data: fö〩'), '/path/to/remote/file')
self.assertEqual(action_base._transfer_data('/path/to/remote/file', dict(some_key='some value')), '/path/to/remote/file')
self.assertEqual(action_base._transfer_data('/path/to/remote/file', dict(some_key='fö〩')), '/path/to/remote/file')
mock_afo.write.side_effect = Exception()
self.assertRaises(AnsibleError, action_base._transfer_data, '/path/to/remote/file', '')
def test_action_base__execute_remote_stat(self):
# create our fake task
mock_task = MagicMock()
# create a mock connection, so we don't actually try and connect to things
mock_connection = MagicMock()
# we're using a real play context here
play_context = PlayContext()
# our test class
action_base = DerivedActionBase(
task=mock_task,
connection=mock_connection,
play_context=play_context,
loader=None,
templar=None,
shared_loader_obj=None,
)
action_base._execute_module = MagicMock()
# test normal case
action_base._execute_module.return_value = dict(stat=dict(checksum='1111111111111111111111111111111111', exists=True))
res = action_base._execute_remote_stat(path='/path/to/file', all_vars=dict(), follow=False)
self.assertEqual(res['checksum'], '1111111111111111111111111111111111')
# test does not exist
action_base._execute_module.return_value = dict(stat=dict(exists=False))
res = action_base._execute_remote_stat(path='/path/to/file', all_vars=dict(), follow=False)
self.assertFalse(res['exists'])
self.assertEqual(res['checksum'], '1')
# test no checksum in result from _execute_module
action_base._execute_module.return_value = dict(stat=dict(exists=True))
res = action_base._execute_remote_stat(path='/path/to/file', all_vars=dict(), follow=False)
self.assertTrue(res['exists'])
self.assertEqual(res['checksum'], '')
# test stat call failed
action_base._execute_module.return_value = dict(failed=True, msg="because I said so")
self.assertRaises(AnsibleError, action_base._execute_remote_stat, path='/path/to/file', all_vars=dict(), follow=False)
def test_action_base__execute_module(self):
# create our fake task
mock_task = MagicMock()
mock_task.action = 'copy'
mock_task.args = dict(a=1, b=2, c=3)
# create a mock connection, so we don't actually try and connect to things
def build_module_command(env_string, shebang, cmd, arg_path=None, rm_tmp=None):
to_run = [env_string, cmd]
if arg_path:
to_run.append(arg_path)
if rm_tmp:
to_run.append(rm_tmp)
return " ".join(to_run)
mock_connection = MagicMock()
mock_connection.build_module_command.side_effect = build_module_command
mock_connection._shell.get_remote_filename.return_value = 'copy.py'
mock_connection._shell.join_path.side_effect = os.path.join
# we're using a real play context here
play_context = PlayContext()
# our test class
action_base = DerivedActionBase(
task=mock_task,
connection=mock_connection,
play_context=play_context,
loader=None,
templar=None,
shared_loader_obj=None,
)
# fake a lot of methods as we test those elsewhere
action_base._configure_module = MagicMock()
action_base._supports_check_mode = MagicMock()
action_base._is_pipelining_enabled = MagicMock()
action_base._make_tmp_path = MagicMock()
action_base._transfer_data = MagicMock()
action_base._compute_environment_string = MagicMock()
action_base._low_level_execute_command = MagicMock()
action_base._fixup_perms2 = MagicMock()
action_base._configure_module.return_value = ('new', '#!/usr/bin/python', 'this is the module data', 'path')
action_base._is_pipelining_enabled.return_value = False
action_base._compute_environment_string.return_value = ''
action_base._connection.has_pipelining = False
action_base._make_tmp_path.return_value = '/the/tmp/path'
action_base._low_level_execute_command.return_value = dict(stdout='{"rc": 0, "stdout": "ok"}')
self.assertEqual(action_base._execute_module(module_name=None, module_args=None), dict(_ansible_parsed=True, rc=0, stdout="ok", stdout_lines=['ok']))
self.assertEqual(action_base._execute_module(module_name='foo',
module_args=dict(z=9, y=8, x=7), task_vars=dict(a=1)),
dict(_ansible_parsed=True, rc=0, stdout="ok",
stdout_lines=['ok']))
# test with needing/removing a remote tmp path
action_base._configure_module.return_value = ('old', '#!/usr/bin/python', 'this is the module data', 'path')
action_base._is_pipelining_enabled.return_value = False
action_base._make_tmp_path.return_value = '/the/tmp/path'
self.assertEqual(action_base._execute_module(), dict(_ansible_parsed=True, rc=0, stdout="ok", stdout_lines=['ok']))
action_base._configure_module.return_value = ('non_native_want_json', '#!/usr/bin/python', 'this is the module data', 'path')
self.assertEqual(action_base._execute_module(), dict(_ansible_parsed=True, rc=0, stdout="ok", stdout_lines=['ok']))
play_context.become = True
play_context.become_user = 'foo'
self.assertEqual(action_base._execute_module(), dict(_ansible_parsed=True, rc=0, stdout="ok", stdout_lines=['ok']))
# test an invalid shebang return
action_base._configure_module.return_value = ('new', '', 'this is the module data', 'path')
action_base._is_pipelining_enabled.return_value = False
action_base._make_tmp_path.return_value = '/the/tmp/path'
self.assertRaises(AnsibleError, action_base._execute_module)
# test with check mode enabled, once with support for check
# mode and once with support disabled to raise an error
play_context.check_mode = True
action_base._configure_module.return_value = ('new', '#!/usr/bin/python', 'this is the module data', 'path')
self.assertEqual(action_base._execute_module(), dict(_ansible_parsed=True, rc=0, stdout="ok", stdout_lines=['ok']))
action_base._supports_check_mode = False
self.assertRaises(AnsibleError, action_base._execute_module)
def test_action_base_sudo_only_if_user_differs(self):
fake_loader = MagicMock()
fake_loader.get_basedir.return_value = os.getcwd()
play_context = PlayContext()
action_base = DerivedActionBase(None, None, play_context, fake_loader, None, None)
action_base._connection = MagicMock(exec_command=MagicMock(return_value=(0, '', '')))
action_base._connection._shell = MagicMock(append_command=MagicMock(return_value=('JOINED CMD')))
play_context.become = True
play_context.become_user = play_context.remote_user = 'root'
play_context.make_become_cmd = MagicMock(return_value='CMD')
action_base._low_level_execute_command('ECHO', sudoable=True)
play_context.make_become_cmd.assert_not_called()
play_context.remote_user = 'apo'
action_base._low_level_execute_command('ECHO', sudoable=True, executable='/bin/csh')
play_context.make_become_cmd.assert_called_once_with("ECHO", executable='/bin/csh')
play_context.make_become_cmd.reset_mock()
become_allow_same_user = C.BECOME_ALLOW_SAME_USER
C.BECOME_ALLOW_SAME_USER = True
try:
play_context.remote_user = 'root'
action_base._low_level_execute_command('ECHO SAME', sudoable=True)
play_context.make_become_cmd.assert_called_once_with("ECHO SAME", executable=None)
finally:
C.BECOME_ALLOW_SAME_USER = become_allow_same_user
class TestActionBaseCleanReturnedData(unittest.TestCase):
def test(self):
fake_loader = DictDataLoader({
})
mock_module_loader = MagicMock()
mock_shared_loader_obj = MagicMock()
mock_shared_loader_obj.module_loader = mock_module_loader
connection_loader_paths = ['/tmp/asdfadf', '/usr/lib64/whatever',
'dfadfasf',
'foo.py',
'.*',
# FIXME: a path with parans breaks the regex
# '(.*)',
'/path/to/ansible/lib/ansible/plugins/connection/custom_connection.py',
'/path/to/ansible/lib/ansible/plugins/connection/ssh.py']
def fake_all(path_only=None):
for path in connection_loader_paths:
yield path
mock_connection_loader = MagicMock()
mock_connection_loader.all = fake_all
mock_shared_loader_obj.connection_loader = mock_connection_loader
mock_connection = MagicMock()
#mock_connection._shell.env_prefix.side_effect = env_prefix
#action_base = DerivedActionBase(mock_task, mock_connection, play_context, None, None, None)
action_base = DerivedActionBase(task=None,
connection=mock_connection,
play_context=None,
loader=fake_loader,
templar=None,
shared_loader_obj=mock_shared_loader_obj)
data = {'ansible_playbook_python': '/usr/bin/python',
#'ansible_rsync_path': '/usr/bin/rsync',
'ansible_python_interpreter': '/usr/bin/python',
'ansible_ssh_some_var': 'whatever',
'ansible_ssh_host_key_somehost': 'some key here',
'some_other_var': 'foo bar'}
action_base._clean_returned_data(data)
self.assertNotIn('ansible_playbook_python', data)
self.assertNotIn('ansible_python_interpreter', data)
self.assertIn('ansible_ssh_host_key_somehost', data)
self.assertIn('some_other_var', data)
class TestActionBaseParseReturnedData(unittest.TestCase):
def _action_base(self):
fake_loader = DictDataLoader({
})
mock_module_loader = MagicMock()
mock_shared_loader_obj = MagicMock()
mock_shared_loader_obj.module_loader = mock_module_loader
mock_connection_loader = MagicMock()
mock_shared_loader_obj.connection_loader = mock_connection_loader
mock_connection = MagicMock()
action_base = DerivedActionBase(task=None,
connection=mock_connection,
play_context=None,
loader=fake_loader,
templar=None,
shared_loader_obj=mock_shared_loader_obj)
return action_base
def test_fail_no_json(self):
action_base = self._action_base()
rc = 0
stdout = 'foo\nbar\n'
err = 'oopsy'
returned_data = {'rc': rc,
'stdout': stdout,
'stdout_lines': stdout.splitlines(),
'stderr': err}
res = action_base._parse_returned_data(returned_data)
self.assertFalse(res['_ansible_parsed'])
self.assertTrue(res['failed'])
self.assertEqual(res['module_stderr'], err)
def test_json_empty(self):
action_base = self._action_base()
rc = 0
stdout = '{}\n'
err = ''
returned_data = {'rc': rc,
'stdout': stdout,
'stdout_lines': stdout.splitlines(),
'stderr': err}
res = action_base._parse_returned_data(returned_data)
del res['_ansible_parsed'] # we always have _ansible_parsed
self.assertEqual(len(res), 0)
self.assertFalse(res)
def test_json_facts(self):
action_base = self._action_base()
rc = 0
stdout = '{"ansible_facts": {"foo": "bar", "ansible_blip": "blip_value"}}\n'
err = ''
returned_data = {'rc': rc,
'stdout': stdout,
'stdout_lines': stdout.splitlines(),
'stderr': err}
res = action_base._parse_returned_data(returned_data)
self.assertTrue(res['ansible_facts'])
self.assertIn('ansible_blip', res['ansible_facts'])
# TODO: Should this be an AnsibleUnsafe?
#self.assertIsInstance(res['ansible_facts'], AnsibleUnsafe)
def test_json_facts_add_host(self):
action_base = self._action_base()
rc = 0
stdout = '''{"ansible_facts": {"foo": "bar", "ansible_blip": "blip_value"},
"add_host": {"host_vars": {"some_key": ["whatever the add_host object is"]}
}
}\n'''
err = ''
returned_data = {'rc': rc,
'stdout': stdout,
'stdout_lines': stdout.splitlines(),
'stderr': err}
res = action_base._parse_returned_data(returned_data)
self.assertTrue(res['ansible_facts'])
self.assertIn('ansible_blip', res['ansible_facts'])
self.assertIn('add_host', res)
# TODO: Should this be an AnsibleUnsafe?
#self.assertIsInstance(res['ansible_facts'], AnsibleUnsafe)
| gpl-3.0 |
gregpuzzles1/Sandbox | Example Programs/Ch_16_Student_Files/Case Study/indexedlist.py | 1 | 5229 | """
File: indexedlist.py
Indexed lists include the index-based operations, append,
and index.
"""
from arrays import Array
class ArrayIndexedList(object):
"""Array implementation of an indexed list."""
DEFAULT_SIZE = 10
def __init__(self):
self._items = Array(ArrayIndexedList.DEFAULT_SIZE)
self._size = 0
def __len__(self):
return self._size
def isEmpty(self):
return len(self) == 0
def __str__(self):
result = ""
for item in self:
result += str(item) + " "
return result
def append(self, item):
"""Inserts item after the tail of the list."""
self._items[self._size] = item
self._size += 1
def __getitem__(self, index):
"""Preconditions left as an exercise."""
return self._items[index]
def __setitem__(self, index, item):
"""Preconditions left as an exercise."""
self._items[index] = item
def insert(self, index, item):
"""Puts item at index, shifting items to the right if
necessary."""
#Resizing array left as an exercise.
# Open a hole for the new item by shifting items to
# the right by one position
for probe in xrange(len(self), index, -1):
self._items[probe] = self._items[probe - 1]
self._items[index] = item
self._size += 1
def remove(self, index):
"""Deletes and returns item at index, shifting items
to the left if necessary."""
# Preconditions left as an exercise
oldItem = self.get(index)
for probe in xrange(index, len(self) - 1):
self._items[probe] = self._items[probe + 1]
self._size -= 1
# Resizing array left as an exercise
return oldItem
def index(self, item):
"""Returns the index of item if found or -1
otherwise."""
pass # Exercise
def __iter__(self):
"""An iterator for an array indexed list."""
cursor = 0
while True:
if cursor == len(self):
raise StopIteration
yield self._items[cursor]
cursor += 1
from node import Node
class LinkedIndexedList(object):
""" Linked implementation of an indexed list."""
# Instance variable head and tail reference the first
# and the last nodes, respectively.
def __init__(self):
self._head = None
self._tail = None
self._size = 0
def __len__(self):
return self._size
def isEmpty(self):
return len(self) == 0
def __str__(self):
result = "["
for item in self:
result += str(item) + " "
return result.strip() + "]"
def append(self, item):
"""Inserts item after the tail of the list."""
newNode = Node(item, None)
if self.isEmpty():
self._head = newNode
else:
self._tail.next = newNode
self._tail = newNode
self._size += 1
def _locate(self, index):
"""Searches for the node at position index.
Postconditions: _currentNode refers to the ith node, if
there is one, or None if not.
_previousNode refers to the previous
node, if there is one, or None if not"""
self._currentNode = self._head
self._previousNode = None
while index > 0:
self._previousNode = self._currentNode
self._currentNode = self._currentNode.next
index -= 1
def __setitem__(self, index, item):
"""Precondition: 0 <= index < len(list)"""
if index < 0 or index >= len(self):
raise Exception, "Index out of range"
self._locate(index)
self._currentNode.data = item
def insert(self, index, item):
"""Puts item at index, shifting items to the right if
necessary."""
if index >= len(self):
self.append(item)
else:
self._locate(index)
newNode = Node(item, self._currentNode)
if self._previousNode is None:
self._head = newNode
else:
self._previousNode.next = newNode
self._size += 1
def remove(self, index):
"""Exercise."""
pass
def index(self, item):
"""Exercise."""
pass
def __iter__(self):
"""An iterator for a linked indexed list."""
cursor = self._head
while True:
if cursor is None:
raise StopIteration
yield cursor.data
cursor = cursor.next
def main():
# Test either implementation with same code
a = ArrayIndexedList()
#a = LinkedIndexedList()
print "Length:", len(a)
print "Empty:", a.isEmpty()
print "Append 1-9"
for i in xrange(9):
a.append(i + 1)
print "Items (first to last):", a
print "Iterating with a for loop:"
for item in a: print item,
print "\nLength:", len(a)
print "Empty:", a.isEmpty()
print "Insert 10 at position 2:"
a.insert(2, 10)
print a
if __name__ == '__main__':
main()
| gpl-3.0 |
chrippa/python-flashmedia | src/flashmedia/ordereddict.py | 43 | 8877 | # Source: http://code.activestate.com/recipes/576693/
# Backport of OrderedDict() class that runs on Python 2.4, 2.5, 2.6, 2.7 and pypy.
# Passes Python2.7's test suite and incorporates all the latest updates.
try:
from thread import get_ident as _get_ident
except ImportError:
from dummy_thread import get_ident as _get_ident
try:
from _abcoll import KeysView, ValuesView, ItemsView
except ImportError:
pass
class OrderedDict(dict):
'Dictionary that remembers insertion order'
# An inherited dict maps keys to values.
# The inherited dict provides __getitem__, __len__, __contains__, and get.
# The remaining methods are order-aware.
# Big-O running times for all methods are the same as for regular dictionaries.
# The internal self.__map dictionary maps keys to links in a doubly linked list.
# The circular doubly linked list starts and ends with a sentinel element.
# The sentinel element never gets deleted (this simplifies the algorithm).
# Each link is stored as a list of length three: [PREV, NEXT, KEY].
def __init__(self, *args, **kwds):
'''Initialize an ordered dictionary. Signature is the same as for
regular dictionaries, but keyword arguments are not recommended
because their insertion order is arbitrary.
'''
if len(args) > 1:
raise TypeError('expected at most 1 arguments, got %d' % len(args))
try:
self.__root
except AttributeError:
self.__root = root = [] # sentinel node
root[:] = [root, root, None]
self.__map = {}
self.__update(*args, **kwds)
def __setitem__(self, key, value, dict_setitem=dict.__setitem__):
'od.__setitem__(i, y) <==> od[i]=y'
# Setting a new item creates a new link which goes at the end of the linked
# list, and the inherited dictionary is updated with the new key/value pair.
if key not in self:
root = self.__root
last = root[0]
last[1] = root[0] = self.__map[key] = [last, root, key]
dict_setitem(self, key, value)
def __delitem__(self, key, dict_delitem=dict.__delitem__):
'od.__delitem__(y) <==> del od[y]'
# Deleting an existing item uses self.__map to find the link which is
# then removed by updating the links in the predecessor and successor nodes.
dict_delitem(self, key)
link_prev, link_next, key = self.__map.pop(key)
link_prev[1] = link_next
link_next[0] = link_prev
def __iter__(self):
'od.__iter__() <==> iter(od)'
root = self.__root
curr = root[1]
while curr is not root:
yield curr[2]
curr = curr[1]
def __reversed__(self):
'od.__reversed__() <==> reversed(od)'
root = self.__root
curr = root[0]
while curr is not root:
yield curr[2]
curr = curr[0]
def clear(self):
'od.clear() -> None. Remove all items from od.'
try:
for node in self.__map.itervalues():
del node[:]
root = self.__root
root[:] = [root, root, None]
self.__map.clear()
except AttributeError:
pass
dict.clear(self)
def popitem(self, last=True):
'''od.popitem() -> (k, v), return and remove a (key, value) pair.
Pairs are returned in LIFO order if last is true or FIFO order if false.
'''
if not self:
raise KeyError('dictionary is empty')
root = self.__root
if last:
link = root[0]
link_prev = link[0]
link_prev[1] = root
root[0] = link_prev
else:
link = root[1]
link_next = link[1]
root[1] = link_next
link_next[0] = root
key = link[2]
del self.__map[key]
value = dict.pop(self, key)
return key, value
# -- the following methods do not depend on the internal structure --
def keys(self):
'od.keys() -> list of keys in od'
return list(self)
def values(self):
'od.values() -> list of values in od'
return [self[key] for key in self]
def items(self):
'od.items() -> list of (key, value) pairs in od'
return [(key, self[key]) for key in self]
def iterkeys(self):
'od.iterkeys() -> an iterator over the keys in od'
return iter(self)
def itervalues(self):
'od.itervalues -> an iterator over the values in od'
for k in self:
yield self[k]
def iteritems(self):
'od.iteritems -> an iterator over the (key, value) items in od'
for k in self:
yield (k, self[k])
def update(*args, **kwds):
'''od.update(E, **F) -> None. Update od from dict/iterable E and F.
If E is a dict instance, does: for k in E: od[k] = E[k]
If E has a .keys() method, does: for k in E.keys(): od[k] = E[k]
Or if E is an iterable of items, does: for k, v in E: od[k] = v
In either case, this is followed by: for k, v in F.items(): od[k] = v
'''
if len(args) > 2:
raise TypeError('update() takes at most 2 positional '
'arguments (%d given)' % (len(args),))
elif not args:
raise TypeError('update() takes at least 1 argument (0 given)')
self = args[0]
# Make progressively weaker assumptions about "other"
other = ()
if len(args) == 2:
other = args[1]
if isinstance(other, dict):
for key in other:
self[key] = other[key]
elif hasattr(other, 'keys'):
for key in other.keys():
self[key] = other[key]
else:
for key, value in other:
self[key] = value
for key, value in kwds.items():
self[key] = value
__update = update # let subclasses override update without breaking __init__
__marker = object()
def pop(self, key, default=__marker):
'''od.pop(k[,d]) -> v, remove specified key and return the corresponding value.
If key is not found, d is returned if given, otherwise KeyError is raised.
'''
if key in self:
result = self[key]
del self[key]
return result
if default is self.__marker:
raise KeyError(key)
return default
def setdefault(self, key, default=None):
'od.setdefault(k[,d]) -> od.get(k,d), also set od[k]=d if k not in od'
if key in self:
return self[key]
self[key] = default
return default
def __repr__(self, _repr_running={}):
'od.__repr__() <==> repr(od)'
call_key = id(self), _get_ident()
if call_key in _repr_running:
return '...'
_repr_running[call_key] = 1
try:
if not self:
return '%s()' % (self.__class__.__name__,)
return '%s(%r)' % (self.__class__.__name__, self.items())
finally:
del _repr_running[call_key]
def __reduce__(self):
'Return state information for pickling'
items = [[k, self[k]] for k in self]
inst_dict = vars(self).copy()
for k in vars(OrderedDict()):
inst_dict.pop(k, None)
if inst_dict:
return (self.__class__, (items,), inst_dict)
return self.__class__, (items,)
def copy(self):
'od.copy() -> a shallow copy of od'
return self.__class__(self)
@classmethod
def fromkeys(cls, iterable, value=None):
'''OD.fromkeys(S[, v]) -> New ordered dictionary with keys from S
and values equal to v (which defaults to None).
'''
d = cls()
for key in iterable:
d[key] = value
return d
def __eq__(self, other):
'''od.__eq__(y) <==> od==y. Comparison to another OD is order-sensitive
while comparison to a regular mapping is order-insensitive.
'''
if isinstance(other, OrderedDict):
return len(self)==len(other) and self.items() == other.items()
return dict.__eq__(self, other)
def __ne__(self, other):
return not self == other
# -- the following methods are only used in Python 2.7 --
def viewkeys(self):
"od.viewkeys() -> a set-like object providing a view on od's keys"
return KeysView(self)
def viewvalues(self):
"od.viewvalues() -> an object providing a view on od's values"
return ValuesView(self)
def viewitems(self):
"od.viewitems() -> a set-like object providing a view on od's items"
return ItemsView(self)
| bsd-2-clause |
yhe39/crosswalk-test-suite | misc/sampleapp-android-tests/sampleapp/spacedodgegame_manifestorientationresize.py | 14 | 4199 | #!/usr/bin/env python
#
# Copyright (c) 2015 Intel Corporation.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of works must retain the original copyright notice, this
# list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the original copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of Intel Corporation nor the names of its contributors
# may be used to endorse or promote products derived from this work without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY INTEL CORPORATION "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL INTEL CORPORATION BE LIABLE FOR ANY DIRECT,
# INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
# OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
# EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors:
# Li, Hao<haox.li@intel.com>
import unittest
import os
import sys
import commands
import comm
from TestApp import *
app_name = "Spacedodgegame"
package_name = "org.xwalk." + app_name.lower()
active_name = app_name + "Activity"
sample_src = comm.sample_src_pref + "space-dodge-game/manifest-orientation-resize/"
testapp = None
comm.setUp()
class Spacedodgegame(unittest.TestCase):
def test_1_pack(self):
#clean up old apk
commands.getstatusoutput("rm %s%s*" % (comm.build_app_dest, "org.xwalk." + app_name.lower()))
cmd = "%s --crosswalk=%s --platforms=android --android=%s --targets=%s --enable-remote-debugging %s" % \
(comm.apptools,
comm.crosswalkzip,
comm.MODE,
comm.ARCH,
sample_src)
comm.pack(cmd, app_name.lower(), self)
def test_2_install(self):
apk_file = commands.getstatusoutput("ls %s| grep %s" % (comm.build_app_dest, app_name.lower()))[1]
if apk_file.endswith(".apk"):
global testapp
testapp = TestApp(comm.device, comm.build_app_dest + apk_file, package_name, active_name)
if testapp.isInstalled():
testapp.uninstall()
self.assertTrue(testapp.install())
else:
print("-->> No packed %s apk in %s" % (app_name, comm.build_app_dest))
self.assertTrue(False)
def test_3_launch(self):
if testapp is not None:
self.assertTrue(testapp.launch())
else:
print("-->> Fail to pack %s apk" % app_name)
self.assertTrue(False)
def test_4_switch(self):
if testapp is not None:
self.assertTrue(testapp.switch())
else:
print("-->> Fail to pack %s apk" % app_name)
self.assertTrue(False)
def test_5_stop(self):
if testapp is not None:
self.assertTrue(testapp.stop())
else:
print("-->> Fail to pack %s apk" % app_name)
self.assertTrue(False)
def test_6_uninstall(self):
if testapp is not None:
self.assertTrue(testapp.uninstall())
else:
print("-->> Fail to pack %s apk" % app_name)
self.assertTrue(False)
def test_7_uninstall_when_app_running(self):
if testapp is not None:
if not testapp.isInstalled():
testapp.install()
if not testapp.isRunning():
testapp.launch()
self.assertTrue(testapp.uninstall())
else:
print("-->> Fail to pack %s apk" % app_name)
self.assertTrue(False)
if __name__ == '__main__':
unittest.main()
| bsd-3-clause |
SteveHNH/ansible | lib/ansible/modules/cloud/rackspace/rax_network.py | 49 | 3726 | #!/usr/bin/python
# Copyright: Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: rax_network
short_description: create / delete an isolated network in Rackspace Public Cloud
description:
- creates / deletes a Rackspace Public Cloud isolated network.
version_added: "1.4"
options:
state:
description:
- Indicate desired state of the resource
choices:
- present
- absent
default: present
label:
description:
- Label (name) to give the network
default: null
cidr:
description:
- cidr of the network being created
default: null
author:
- "Christopher H. Laco (@claco)"
- "Jesse Keating (@j2sol)"
extends_documentation_fragment: rackspace.openstack
'''
EXAMPLES = '''
- name: Build an Isolated Network
gather_facts: False
tasks:
- name: Network create request
local_action:
module: rax_network
credentials: ~/.raxpub
label: my-net
cidr: 192.168.3.0/24
state: present
'''
try:
import pyrax
HAS_PYRAX = True
except ImportError:
HAS_PYRAX = False
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.rax import rax_argument_spec, rax_required_together, setup_rax_module
def cloud_network(module, state, label, cidr):
changed = False
network = None
networks = []
if not pyrax.cloud_networks:
module.fail_json(msg='Failed to instantiate client. This '
'typically indicates an invalid region or an '
'incorrectly capitalized region name.')
if state == 'present':
if not cidr:
module.fail_json(msg='missing required arguments: cidr')
try:
network = pyrax.cloud_networks.find_network_by_label(label)
except pyrax.exceptions.NetworkNotFound:
try:
network = pyrax.cloud_networks.create(label, cidr=cidr)
changed = True
except Exception as e:
module.fail_json(msg='%s' % e.message)
except Exception as e:
module.fail_json(msg='%s' % e.message)
elif state == 'absent':
try:
network = pyrax.cloud_networks.find_network_by_label(label)
network.delete()
changed = True
except pyrax.exceptions.NetworkNotFound:
pass
except Exception as e:
module.fail_json(msg='%s' % e.message)
if network:
instance = dict(id=network.id,
label=network.label,
cidr=network.cidr)
networks.append(instance)
module.exit_json(changed=changed, networks=networks)
def main():
argument_spec = rax_argument_spec()
argument_spec.update(
dict(
state=dict(default='present',
choices=['present', 'absent']),
label=dict(required=True),
cidr=dict()
)
)
module = AnsibleModule(
argument_spec=argument_spec,
required_together=rax_required_together(),
)
if not HAS_PYRAX:
module.fail_json(msg='pyrax is required for this module')
state = module.params.get('state')
label = module.params.get('label')
cidr = module.params.get('cidr')
setup_rax_module(module, pyrax)
cloud_network(module, state, label, cidr)
if __name__ == '__main__':
main()
| gpl-3.0 |
x3ro/RIOT | tests/test_tools/tests/01-run.py | 7 | 2270 | #!/usr/bin/env python3
"""Test behaviour of the test running and the term program interaction."""
import sys
import pexpect
from testrunner import run
def _shellping(child, timeout=1):
"""Issue a 'shellping' command.
Raises a pexpect exception on failure.
:param timeout: timeout for the answer
"""
child.sendline('shellping')
child.expect_exact('shellpong\r\n', timeout=timeout)
def _wait_shell_ready(child, numtries=5):
"""Wait until the shell is ready by using 'shellping'."""
for _ in range(numtries - 1):
try:
_shellping(child)
except pexpect.TIMEOUT:
pass
else:
break
else:
# This one should fail
_shellping(child)
def _test_no_local_echo(child):
"""Verify that there is not local echo while testing."""
msg = 'true this should not be echoed'
child.sendline(msg)
res = child.expect_exact([pexpect.TIMEOUT, msg], timeout=1)
assert res == 0, "There should have been a timeout and not match stdin"
def _test_sending_newline(child):
"""Verify that a empty line can be send to the node.
The local terminal must NOT repeat the previous command.
"""
child.sendline('getchar')
child.sendline('') # send only one newline character
child.expect_exact('getchar 0x0a\r\n')
def _test_clean_output(child):
"""Verify that only what the node sends is received."""
child.sendline('toupper lowercase')
retline = child.readline()
assert retline.strip() == 'LOWERCASE'
def testfunc(child):
"""Run some tests to verify the board under test behaves correctly.
It currently tests:
* local echo
* getting some test output without other messages
* sending empty lines
"""
child.expect_exact("Running 'tests_tools' application")
_wait_shell_ready(child)
# Verify there is no local and remote echo as it is disabled
_test_no_local_echo(child)
# The node should still answer after the previous one
_shellping(child)
# Check that the output is clean without extra terminal output
_test_clean_output(child)
# It is possible to send an empty newline
_test_sending_newline(child)
if __name__ == "__main__":
sys.exit(run(testfunc))
| lgpl-2.1 |
bartscheers/tkp | tests/test_database/test_sql/test_view.py | 2 | 3897 | import unittest
import tkp.db
from tkp.testutil import db_subs
class TestAugmentedRunningcatalog(unittest.TestCase):
def setUp(self):
"""
create a fake transient. Taken from the transient test.
:return:
"""
self.database = tkp.db.Database()
self.dataset = tkp.db.DataSet(data={'description':
"Augmented Runningcatalog test"},
database=self.database)
self.n_images = 4
self.new_source_sigma_margin = 3
image_rms = 1e-3
detection_thresh = 10
self.search_params = {'eta_min': 1, 'v_min': 0.1}
self.barely_detectable_flux = 1.01 * image_rms * detection_thresh
self.reliably_detectable_flux = 1.01 * image_rms * (detection_thresh +
self.new_source_sigma_margin)
# 1mJy image RMS, 10-sigma detection threshold = 10mJy threshold.
test_specific_img_params = {'rms_qc': image_rms, 'rms_min': image_rms,
'rms_max': image_rms,
'detection_thresh': detection_thresh}
self.im_params = db_subs.generate_timespaced_dbimages_data(
self.n_images, **test_specific_img_params)
im_params = self.im_params
src_tuple = db_subs.example_extractedsource_tuple(ra=im_params[0]['centre_ra'],
dec=im_params[0]['centre_decl'],)
transient_src = db_subs.MockSource(
template_extractedsource=src_tuple,
lightcurve={im_params[2]['taustart_ts']:
self.reliably_detectable_flux}
)
for img_pars in im_params:
db_subs.insert_image_and_simulated_sources(self.dataset, img_pars,
[transient_src],
self.new_source_sigma_margin)
def tearDown(self):
tkp.db.rollback()
def test_extra_columns(self):
query = """
SELECT
v_int, eta_int,
sigma_rms_max, sigma_rms_min,
lightcurve_max, lightcurve_avg
FROM
augmented_runningcatalog
WHERE
dataset = %s
ORDER BY
id
""" % self.dataset.id
cursor = tkp.db.execute(query)
rows = cursor.fetchall()
self.assertEqual(len(rows), 1)
v_int, eta_int, sigma_max, sigma_min, lightcurve_max, lightcurve_avg = rows[0]
self.assertAlmostEqual(v_int, 1.41421356237309)
self.assertAlmostEqual(eta_int, 344.7938)
self.assertAlmostEqual(sigma_max, 13.13)
self.assertAlmostEqual(sigma_min, 13.13)
self.assertAlmostEqual(lightcurve_max, 0.01313)
self.assertAlmostEqual(lightcurve_avg, 0.006565)
@unittest.skip(
"""
This test fails when we mix the old "augmented runningcatalog" and
the new SQLAlchemy code. It's unclear why it's suddenly borked, but
since the relevant query is about to be reimplemented we skip it for
now and will debug the new version.
""")
def test_count(self):
"""
make sure the augmented view has a reasonable number of rows.
"""
n_runcats_qry = "select count(id) from runningcatalog"
n_runcat_flux_qry = "select count(id) from runningcatalog_flux"
n_in_view_qry = "select count(id) from augmented_runningcatalog"
n_runcats = tkp.db.execute(n_runcats_qry).fetchall()[0][0]
n_runcat_flux = tkp.db.execute(n_runcat_flux_qry).fetchall()[0][0]
n_in_view = tkp.db.execute(n_in_view_qry).fetchall()[0][0]
self.assertGreaterEqual(n_in_view, n_runcats)
self.assertGreaterEqual(n_runcat_flux, n_in_view) | bsd-2-clause |
lcplj123/video-get | extractors/wole.py | 2 | 1929 | #!/usr/bin/env python3
import re
import sys
import json
sys.path.append('..')
from define import *
from utils import *
from extractor import BasicExtractor
from extractors import sohu
class WoLeExtractor(BasicExtractor):
'''
56视频下载器
'''
def __init__(self,c):
super(WoLeExtractor,self).__init__(c, WOLE)
def download(self):
print('56:start downloading ...')
retry = 3
while retry > 0 :
self.page = get_html(self.c.url)
if self.page: break
retry -= 1
if not self.page:
print('error: request video info error,check url. %s' % (self.c.url,))
sys.exit(0)
self.i.vid = self.getVid()
if not self.i.vid:
print('error: not find vid! exit...')
sys.exit(0)
#跳转到sohu下载
sohu_url = ''
r = re.search(r'url\s*\:\s*\'(http\://my\.tv\.sohu\.com/.*?\.shtml)\'',self.page)
if r:
#print(r.groups()[0])
sohu_url = r.groups()[0]
else:
print('error: cannot download video.exut...')
sys.exit(0)
#metadata = ...
#self.i.title = self.getTitle(...)
#self.i.desc = self.getDesc()
#self.i.keywords = self.getKeywords()
#self.i.fname = self.getFname()
#self.i.fsize = self.getFsize()
#self.i.duration = self.getDuration()
#self.i.category = self.getCategory()
#self.i.uptime = self.getUptime()
#self.i.m3u8 = self.query_m3u8()
#self.flvlist = self.query_real()
#self.realdown()
self.c.url = sohu_url
sohu.download(self.c)
def query_m3u8(self,*args,**kwargs):
pass
def query_real(self,*args,**kwargs):
pass
def getVid(self,*args,**kwargs):
return '000000'
def getFsize(self,*args,**kwargs):
pass
def getTitle(self,*args,**kwargs):
pass
def getDesc(self,*args,**kwargs):
pass
def getKeywords(self,*args,**kwargs):
pass
def getCategory(self,*args,**kwargs):
pass
def getDuration(self,*args,**kwargs):
pass
def getUptime(self,*args,**kwargs):
pass
def download(c):
d = WoLeExtractor(c)
return d.download() | mit |
Srisai85/scikit-learn | sklearn/base.py | 65 | 17384 | """Base classes for all estimators."""
# Author: Gael Varoquaux <gael.varoquaux@normalesup.org>
# License: BSD 3 clause
import copy
import inspect
import warnings
import numpy as np
from scipy import sparse
from .externals import six
class ChangedBehaviorWarning(UserWarning):
pass
##############################################################################
def clone(estimator, safe=True):
"""Constructs a new estimator with the same parameters.
Clone does a deep copy of the model in an estimator
without actually copying attached data. It yields a new estimator
with the same parameters that has not been fit on any data.
Parameters
----------
estimator: estimator object, or list, tuple or set of objects
The estimator or group of estimators to be cloned
safe: boolean, optional
If safe is false, clone will fall back to a deepcopy on objects
that are not estimators.
"""
estimator_type = type(estimator)
# XXX: not handling dictionaries
if estimator_type in (list, tuple, set, frozenset):
return estimator_type([clone(e, safe=safe) for e in estimator])
elif not hasattr(estimator, 'get_params'):
if not safe:
return copy.deepcopy(estimator)
else:
raise TypeError("Cannot clone object '%s' (type %s): "
"it does not seem to be a scikit-learn estimator "
"as it does not implement a 'get_params' methods."
% (repr(estimator), type(estimator)))
klass = estimator.__class__
new_object_params = estimator.get_params(deep=False)
for name, param in six.iteritems(new_object_params):
new_object_params[name] = clone(param, safe=False)
new_object = klass(**new_object_params)
params_set = new_object.get_params(deep=False)
# quick sanity check of the parameters of the clone
for name in new_object_params:
param1 = new_object_params[name]
param2 = params_set[name]
if isinstance(param1, np.ndarray):
# For most ndarrays, we do not test for complete equality
if not isinstance(param2, type(param1)):
equality_test = False
elif (param1.ndim > 0
and param1.shape[0] > 0
and isinstance(param2, np.ndarray)
and param2.ndim > 0
and param2.shape[0] > 0):
equality_test = (
param1.shape == param2.shape
and param1.dtype == param2.dtype
# We have to use '.flat' for 2D arrays
and param1.flat[0] == param2.flat[0]
and param1.flat[-1] == param2.flat[-1]
)
else:
equality_test = np.all(param1 == param2)
elif sparse.issparse(param1):
# For sparse matrices equality doesn't work
if not sparse.issparse(param2):
equality_test = False
elif param1.size == 0 or param2.size == 0:
equality_test = (
param1.__class__ == param2.__class__
and param1.size == 0
and param2.size == 0
)
else:
equality_test = (
param1.__class__ == param2.__class__
and param1.data[0] == param2.data[0]
and param1.data[-1] == param2.data[-1]
and param1.nnz == param2.nnz
and param1.shape == param2.shape
)
else:
new_obj_val = new_object_params[name]
params_set_val = params_set[name]
# The following construct is required to check equality on special
# singletons such as np.nan that are not equal to them-selves:
equality_test = (new_obj_val == params_set_val or
new_obj_val is params_set_val)
if not equality_test:
raise RuntimeError('Cannot clone object %s, as the constructor '
'does not seem to set parameter %s' %
(estimator, name))
return new_object
###############################################################################
def _pprint(params, offset=0, printer=repr):
"""Pretty print the dictionary 'params'
Parameters
----------
params: dict
The dictionary to pretty print
offset: int
The offset in characters to add at the begin of each line.
printer:
The function to convert entries to strings, typically
the builtin str or repr
"""
# Do a multi-line justified repr:
options = np.get_printoptions()
np.set_printoptions(precision=5, threshold=64, edgeitems=2)
params_list = list()
this_line_length = offset
line_sep = ',\n' + (1 + offset // 2) * ' '
for i, (k, v) in enumerate(sorted(six.iteritems(params))):
if type(v) is float:
# use str for representing floating point numbers
# this way we get consistent representation across
# architectures and versions.
this_repr = '%s=%s' % (k, str(v))
else:
# use repr of the rest
this_repr = '%s=%s' % (k, printer(v))
if len(this_repr) > 500:
this_repr = this_repr[:300] + '...' + this_repr[-100:]
if i > 0:
if (this_line_length + len(this_repr) >= 75 or '\n' in this_repr):
params_list.append(line_sep)
this_line_length = len(line_sep)
else:
params_list.append(', ')
this_line_length += 2
params_list.append(this_repr)
this_line_length += len(this_repr)
np.set_printoptions(**options)
lines = ''.join(params_list)
# Strip trailing space to avoid nightmare in doctests
lines = '\n'.join(l.rstrip(' ') for l in lines.split('\n'))
return lines
###############################################################################
class BaseEstimator(object):
"""Base class for all estimators in scikit-learn
Notes
-----
All estimators should specify all the parameters that can be set
at the class level in their ``__init__`` as explicit keyword
arguments (no ``*args`` or ``**kwargs``).
"""
@classmethod
def _get_param_names(cls):
"""Get parameter names for the estimator"""
# fetch the constructor or the original constructor before
# deprecation wrapping if any
init = getattr(cls.__init__, 'deprecated_original', cls.__init__)
if init is object.__init__:
# No explicit constructor to introspect
return []
# introspect the constructor arguments to find the model parameters
# to represent
args, varargs, kw, default = inspect.getargspec(init)
if varargs is not None:
raise RuntimeError("scikit-learn estimators should always "
"specify their parameters in the signature"
" of their __init__ (no varargs)."
" %s doesn't follow this convention."
% (cls, ))
# Remove 'self'
# XXX: This is going to fail if the init is a staticmethod, but
# who would do this?
args.pop(0)
args.sort()
return args
def get_params(self, deep=True):
"""Get parameters for this estimator.
Parameters
----------
deep: boolean, optional
If True, will return the parameters for this estimator and
contained subobjects that are estimators.
Returns
-------
params : mapping of string to any
Parameter names mapped to their values.
"""
out = dict()
for key in self._get_param_names():
# We need deprecation warnings to always be on in order to
# catch deprecated param values.
# This is set in utils/__init__.py but it gets overwritten
# when running under python3 somehow.
warnings.simplefilter("always", DeprecationWarning)
try:
with warnings.catch_warnings(record=True) as w:
value = getattr(self, key, None)
if len(w) and w[0].category == DeprecationWarning:
# if the parameter is deprecated, don't show it
continue
finally:
warnings.filters.pop(0)
# XXX: should we rather test if instance of estimator?
if deep and hasattr(value, 'get_params'):
deep_items = value.get_params().items()
out.update((key + '__' + k, val) for k, val in deep_items)
out[key] = value
return out
def set_params(self, **params):
"""Set the parameters of this estimator.
The method works on simple estimators as well as on nested objects
(such as pipelines). The former have parameters of the form
``<component>__<parameter>`` so that it's possible to update each
component of a nested object.
Returns
-------
self
"""
if not params:
# Simple optimisation to gain speed (inspect is slow)
return self
valid_params = self.get_params(deep=True)
for key, value in six.iteritems(params):
split = key.split('__', 1)
if len(split) > 1:
# nested objects case
name, sub_name = split
if name not in valid_params:
raise ValueError('Invalid parameter %s for estimator %s. '
'Check the list of available parameters '
'with `estimator.get_params().keys()`.' %
(name, self))
sub_object = valid_params[name]
sub_object.set_params(**{sub_name: value})
else:
# simple objects case
if key not in valid_params:
raise ValueError('Invalid parameter %s for estimator %s. '
'Check the list of available parameters '
'with `estimator.get_params().keys()`.' %
(key, self.__class__.__name__))
setattr(self, key, value)
return self
def __repr__(self):
class_name = self.__class__.__name__
return '%s(%s)' % (class_name, _pprint(self.get_params(deep=False),
offset=len(class_name),),)
###############################################################################
class ClassifierMixin(object):
"""Mixin class for all classifiers in scikit-learn."""
_estimator_type = "classifier"
def score(self, X, y, sample_weight=None):
"""Returns the mean accuracy on the given test data and labels.
In multi-label classification, this is the subset accuracy
which is a harsh metric since you require for each sample that
each label set be correctly predicted.
Parameters
----------
X : array-like, shape = (n_samples, n_features)
Test samples.
y : array-like, shape = (n_samples) or (n_samples, n_outputs)
True labels for X.
sample_weight : array-like, shape = [n_samples], optional
Sample weights.
Returns
-------
score : float
Mean accuracy of self.predict(X) wrt. y.
"""
from .metrics import accuracy_score
return accuracy_score(y, self.predict(X), sample_weight=sample_weight)
###############################################################################
class RegressorMixin(object):
"""Mixin class for all regression estimators in scikit-learn."""
_estimator_type = "regressor"
def score(self, X, y, sample_weight=None):
"""Returns the coefficient of determination R^2 of the prediction.
The coefficient R^2 is defined as (1 - u/v), where u is the regression
sum of squares ((y_true - y_pred) ** 2).sum() and v is the residual
sum of squares ((y_true - y_true.mean()) ** 2).sum().
Best possible score is 1.0 and it can be negative (because the
model can be arbitrarily worse). A constant model that always
predicts the expected value of y, disregarding the input features,
would get a R^2 score of 0.0.
Parameters
----------
X : array-like, shape = (n_samples, n_features)
Test samples.
y : array-like, shape = (n_samples) or (n_samples, n_outputs)
True values for X.
sample_weight : array-like, shape = [n_samples], optional
Sample weights.
Returns
-------
score : float
R^2 of self.predict(X) wrt. y.
"""
from .metrics import r2_score
return r2_score(y, self.predict(X), sample_weight=sample_weight)
###############################################################################
class ClusterMixin(object):
"""Mixin class for all cluster estimators in scikit-learn."""
_estimator_type = "clusterer"
def fit_predict(self, X, y=None):
"""Performs clustering on X and returns cluster labels.
Parameters
----------
X : ndarray, shape (n_samples, n_features)
Input data.
Returns
-------
y : ndarray, shape (n_samples,)
cluster labels
"""
# non-optimized default implementation; override when a better
# method is possible for a given clustering algorithm
self.fit(X)
return self.labels_
class BiclusterMixin(object):
"""Mixin class for all bicluster estimators in scikit-learn"""
@property
def biclusters_(self):
"""Convenient way to get row and column indicators together.
Returns the ``rows_`` and ``columns_`` members.
"""
return self.rows_, self.columns_
def get_indices(self, i):
"""Row and column indices of the i'th bicluster.
Only works if ``rows_`` and ``columns_`` attributes exist.
Returns
-------
row_ind : np.array, dtype=np.intp
Indices of rows in the dataset that belong to the bicluster.
col_ind : np.array, dtype=np.intp
Indices of columns in the dataset that belong to the bicluster.
"""
rows = self.rows_[i]
columns = self.columns_[i]
return np.nonzero(rows)[0], np.nonzero(columns)[0]
def get_shape(self, i):
"""Shape of the i'th bicluster.
Returns
-------
shape : (int, int)
Number of rows and columns (resp.) in the bicluster.
"""
indices = self.get_indices(i)
return tuple(len(i) for i in indices)
def get_submatrix(self, i, data):
"""Returns the submatrix corresponding to bicluster `i`.
Works with sparse matrices. Only works if ``rows_`` and
``columns_`` attributes exist.
"""
from .utils.validation import check_array
data = check_array(data, accept_sparse='csr')
row_ind, col_ind = self.get_indices(i)
return data[row_ind[:, np.newaxis], col_ind]
###############################################################################
class TransformerMixin(object):
"""Mixin class for all transformers in scikit-learn."""
def fit_transform(self, X, y=None, **fit_params):
"""Fit to data, then transform it.
Fits transformer to X and y with optional parameters fit_params
and returns a transformed version of X.
Parameters
----------
X : numpy array of shape [n_samples, n_features]
Training set.
y : numpy array of shape [n_samples]
Target values.
Returns
-------
X_new : numpy array of shape [n_samples, n_features_new]
Transformed array.
"""
# non-optimized default implementation; override when a better
# method is possible for a given clustering algorithm
if y is None:
# fit method of arity 1 (unsupervised transformation)
return self.fit(X, **fit_params).transform(X)
else:
# fit method of arity 2 (supervised transformation)
return self.fit(X, y, **fit_params).transform(X)
###############################################################################
class MetaEstimatorMixin(object):
"""Mixin class for all meta estimators in scikit-learn."""
# this is just a tag for the moment
###############################################################################
def is_classifier(estimator):
"""Returns True if the given estimator is (probably) a classifier."""
return getattr(estimator, "_estimator_type", None) == "classifier"
def is_regressor(estimator):
"""Returns True if the given estimator is (probably) a regressor."""
return getattr(estimator, "_estimator_type", None) == "regressor"
| bsd-3-clause |
abhyasi/euler_py | euler/sol_6.py | 1 | 1480 | #The sum of the squares of the first ten natural numbers is,
# 1^2 + 2^2 + ... + 10^2 = 385
# The square of the sum of the first ten natural numbers is,
# (1 + 2 + ... + 10)^2 = 552 = 3025
# Hence the difference between the sum of the squares of the first
# ten natural numbers and the square of the sum is 3025 - 385 = 2640.
# Find the difference between the sum of the squares of the first
# one hundred natural numbers and the square of the sum.
import math
def sol_6():
"""
Find the difference between the sum of the squares of the first
one hundred natural numbers and the square of the sum.
"""
return square_diff(1, 100)
def sum_squares(start, end):
value = reduce(lambda x, y: x + y, map(lambda x: math.pow(x, 2), range(start, end + 1)))
return int(value)
def square_sum(start, end):
value = math.pow(reduce(lambda x, y: x + y, range(start, end + 1)), 2)
return int(value)
def square_diff(start, end):
return square_sum(start, end) - sum_squares(start, end)
def square_diff_progression(start, end):
# An implicit assumption of this function
# is that we are talking about first n numbers
# that is start = 1, end = n
n = end - start + 1
# Sum via arithmetic progression
square_sum = math.pow(n * (n + 1) / 2, 2)
# If we derive via mathematical induction
# we get sum of square formula as:
sum_squares = (n * (2 * n + 1) * (n + 1))/ 6
return int(square_sum - sum_squares)
| gpl-2.0 |
laumann/servo | tests/wpt/css-tests/tools/html5lib/html5lib/tests/test_tokenizer.py | 420 | 6544 | from __future__ import absolute_import, division, unicode_literals
import json
import warnings
import re
from .support import get_data_files
from html5lib.tokenizer import HTMLTokenizer
from html5lib import constants
class TokenizerTestParser(object):
def __init__(self, initialState, lastStartTag=None):
self.tokenizer = HTMLTokenizer
self._state = initialState
self._lastStartTag = lastStartTag
def parse(self, stream, encoding=None, innerHTML=False):
tokenizer = self.tokenizer(stream, encoding)
self.outputTokens = []
tokenizer.state = getattr(tokenizer, self._state)
if self._lastStartTag is not None:
tokenizer.currentToken = {"type": "startTag",
"name": self._lastStartTag}
types = dict((v, k) for k, v in constants.tokenTypes.items())
for token in tokenizer:
getattr(self, 'process%s' % types[token["type"]])(token)
return self.outputTokens
def processDoctype(self, token):
self.outputTokens.append(["DOCTYPE", token["name"], token["publicId"],
token["systemId"], token["correct"]])
def processStartTag(self, token):
self.outputTokens.append(["StartTag", token["name"],
dict(token["data"][::-1]), token["selfClosing"]])
def processEmptyTag(self, token):
if token["name"] not in constants.voidElements:
self.outputTokens.append("ParseError")
self.outputTokens.append(["StartTag", token["name"], dict(token["data"][::-1])])
def processEndTag(self, token):
self.outputTokens.append(["EndTag", token["name"],
token["selfClosing"]])
def processComment(self, token):
self.outputTokens.append(["Comment", token["data"]])
def processSpaceCharacters(self, token):
self.outputTokens.append(["Character", token["data"]])
self.processSpaceCharacters = self.processCharacters
def processCharacters(self, token):
self.outputTokens.append(["Character", token["data"]])
def processEOF(self, token):
pass
def processParseError(self, token):
self.outputTokens.append(["ParseError", token["data"]])
def concatenateCharacterTokens(tokens):
outputTokens = []
for token in tokens:
if "ParseError" not in token and token[0] == "Character":
if (outputTokens and "ParseError" not in outputTokens[-1] and
outputTokens[-1][0] == "Character"):
outputTokens[-1][1] += token[1]
else:
outputTokens.append(token)
else:
outputTokens.append(token)
return outputTokens
def normalizeTokens(tokens):
# TODO: convert tests to reflect arrays
for i, token in enumerate(tokens):
if token[0] == 'ParseError':
tokens[i] = token[0]
return tokens
def tokensMatch(expectedTokens, receivedTokens, ignoreErrorOrder,
ignoreErrors=False):
"""Test whether the test has passed or failed
If the ignoreErrorOrder flag is set to true we don't test the relative
positions of parse errors and non parse errors
"""
checkSelfClosing = False
for token in expectedTokens:
if (token[0] == "StartTag" and len(token) == 4
or token[0] == "EndTag" and len(token) == 3):
checkSelfClosing = True
break
if not checkSelfClosing:
for token in receivedTokens:
if token[0] == "StartTag" or token[0] == "EndTag":
token.pop()
if not ignoreErrorOrder and not ignoreErrors:
return expectedTokens == receivedTokens
else:
# Sort the tokens into two groups; non-parse errors and parse errors
tokens = {"expected": [[], []], "received": [[], []]}
for tokenType, tokenList in zip(list(tokens.keys()),
(expectedTokens, receivedTokens)):
for token in tokenList:
if token != "ParseError":
tokens[tokenType][0].append(token)
else:
if not ignoreErrors:
tokens[tokenType][1].append(token)
return tokens["expected"] == tokens["received"]
def unescape(test):
def decode(inp):
return inp.encode("utf-8").decode("unicode-escape")
test["input"] = decode(test["input"])
for token in test["output"]:
if token == "ParseError":
continue
else:
token[1] = decode(token[1])
if len(token) > 2:
for key, value in token[2]:
del token[2][key]
token[2][decode(key)] = decode(value)
return test
def runTokenizerTest(test):
warnings.resetwarnings()
warnings.simplefilter("error")
expected = concatenateCharacterTokens(test['output'])
if 'lastStartTag' not in test:
test['lastStartTag'] = None
parser = TokenizerTestParser(test['initialState'],
test['lastStartTag'])
tokens = parser.parse(test['input'])
tokens = concatenateCharacterTokens(tokens)
received = normalizeTokens(tokens)
errorMsg = "\n".join(["\n\nInitial state:",
test['initialState'],
"\nInput:", test['input'],
"\nExpected:", repr(expected),
"\nreceived:", repr(tokens)])
errorMsg = errorMsg
ignoreErrorOrder = test.get('ignoreErrorOrder', False)
assert tokensMatch(expected, received, ignoreErrorOrder, True), errorMsg
def _doCapitalize(match):
return match.group(1).upper()
_capitalizeRe = re.compile(r"\W+(\w)").sub
def capitalize(s):
s = s.lower()
s = _capitalizeRe(_doCapitalize, s)
return s
def testTokenizer():
for filename in get_data_files('tokenizer', '*.test'):
with open(filename) as fp:
tests = json.load(fp)
if 'tests' in tests:
for index, test in enumerate(tests['tests']):
if 'initialStates' not in test:
test["initialStates"] = ["Data state"]
if 'doubleEscaped' in test:
test = unescape(test)
for initialState in test["initialStates"]:
test["initialState"] = capitalize(initialState)
yield runTokenizerTest, test
| mpl-2.0 |
Oteng/youtube-dl | youtube_dl/extractor/varzesh3.py | 120 | 1696 | # coding: utf-8
from __future__ import unicode_literals
from .common import InfoExtractor
class Varzesh3IE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?video\.varzesh3\.com/(?:[^/]+/)+(?P<id>[^/]+)/?'
_TEST = {
'url': 'http://video.varzesh3.com/germany/bundesliga/5-%D9%88%D8%A7%DA%A9%D9%86%D8%B4-%D8%A8%D8%B1%D8%AA%D8%B1-%D8%AF%D8%B1%D9%88%D8%A7%D8%B2%D9%87%E2%80%8C%D8%A8%D8%A7%D9%86%D8%A7%D9%86%D8%9B%D9%87%D9%81%D8%AA%D9%87-26-%D8%A8%D9%88%D9%86%D8%AF%D8%B3/',
'md5': '2a933874cb7dce4366075281eb49e855',
'info_dict': {
'id': '76337',
'ext': 'mp4',
'title': '۵ واکنش برتر دروازهبانان؛هفته ۲۶ بوندسلیگا',
'description': 'فصل ۲۰۱۵-۲۰۱۴',
'thumbnail': 're:^https?://.*\.jpg$',
}
}
def _real_extract(self, url):
display_id = self._match_id(url)
webpage = self._download_webpage(url, display_id)
video_url = self._search_regex(
r'<source[^>]+src="([^"]+)"', webpage, 'video url')
title = self._og_search_title(webpage)
description = self._html_search_regex(
r'(?s)<div class="matn">(.+?)</div>',
webpage, 'description', fatal=False)
thumbnail = self._og_search_thumbnail(webpage)
video_id = self._search_regex(
r"<link[^>]+rel='(?:canonical|shortlink)'[^>]+href='/\?p=([^']+)'",
webpage, display_id, default=display_id)
return {
'url': video_url,
'id': video_id,
'title': title,
'description': description,
'thumbnail': thumbnail,
}
| unlicense |
stackforge/wsme | wsme/utils.py | 1 | 3304 | import decimal
import datetime
import pytz
import re
from six.moves import builtins, http_client
try:
import dateutil.parser
except ImportError:
dateutil = None # noqa
date_re = r'(?P<year>-?\d{4,})-(?P<month>\d{2})-(?P<day>\d{2})'
time_re = r'(?P<hour>\d{2}):(?P<min>\d{2}):(?P<sec>\d{2})' + \
r'(\.(?P<sec_frac>\d+))?'
tz_re = r'((?P<tz_sign>[+-])(?P<tz_hour>\d{2}):(?P<tz_min>\d{2}))' + \
r'|(?P<tz_z>Z)'
datetime_re = re.compile(
'%sT%s(%s)?' % (date_re, time_re, tz_re))
date_re = re.compile(date_re)
time_re = re.compile('%s(%s)?' % (time_re, tz_re))
if hasattr(builtins, '_'):
_ = builtins._
else:
def _(s):
return s
def parse_isodate(value):
m = date_re.match(value)
if m is None:
raise ValueError("'%s' is not a legal date value" % (value))
try:
return datetime.date(
int(m.group('year')),
int(m.group('month')),
int(m.group('day')))
except ValueError:
raise ValueError("'%s' is a out-of-range date" % (value))
def parse_isotime(value):
m = time_re.match(value)
if m is None:
raise ValueError("'%s' is not a legal time value" % (value))
try:
ms = 0
if m.group('sec_frac') is not None:
f = decimal.Decimal('0.' + m.group('sec_frac'))
f = str(f.quantize(decimal.Decimal('0.000001')))
ms = int(f[2:])
tz = _parse_tzparts(m.groupdict())
return datetime.time(
int(m.group('hour')),
int(m.group('min')),
int(m.group('sec')),
ms,
tz)
except ValueError:
raise ValueError("'%s' is a out-of-range time" % (value))
def parse_isodatetime(value):
if dateutil:
return dateutil.parser.parse(value)
m = datetime_re.match(value)
if m is None:
raise ValueError("'%s' is not a legal datetime value" % (value))
try:
ms = 0
if m.group('sec_frac') is not None:
f = decimal.Decimal('0.' + m.group('sec_frac'))
f = f.quantize(decimal.Decimal('0.000001'))
ms = int(str(f)[2:])
tz = _parse_tzparts(m.groupdict())
return datetime.datetime(
int(m.group('year')),
int(m.group('month')),
int(m.group('day')),
int(m.group('hour')),
int(m.group('min')),
int(m.group('sec')),
ms,
tz)
except ValueError:
raise ValueError("'%s' is a out-of-range datetime" % (value))
def _parse_tzparts(parts):
if 'tz_z' in parts and parts['tz_z'] == 'Z':
return pytz.UTC
if 'tz_min' not in parts or not parts['tz_min']:
return None
tz_minute_offset = (int(parts['tz_hour']) * 60 + int(parts['tz_min']))
tz_multiplier = -1 if parts['tz_sign'] == '-' else 1
return pytz.FixedOffset(tz_multiplier * tz_minute_offset)
def is_valid_code(code_value):
"""
This function checks if incoming value in http response codes range.
"""
return code_value in http_client.responses
def is_client_error(code):
""" Checks client error code (RFC 2616)."""
return 400 <= code < 500
try:
from collections import OrderedDict
except ImportError:
from ordereddict import OrderedDict # noqa
| mit |
orangeduck/PyAutoC | Python27/Lib/test/test_stringprep.py | 114 | 3245 | # To fully test this module, we would need a copy of the stringprep tables.
# Since we don't have them, this test checks only a few codepoints.
import unittest
from test import test_support
from stringprep import *
class StringprepTests(unittest.TestCase):
def test(self):
self.assertTrue(in_table_a1(u"\u0221"))
self.assertFalse(in_table_a1(u"\u0222"))
self.assertTrue(in_table_b1(u"\u00ad"))
self.assertFalse(in_table_b1(u"\u00ae"))
self.assertTrue(map_table_b2(u"\u0041"), u"\u0061")
self.assertTrue(map_table_b2(u"\u0061"), u"\u0061")
self.assertTrue(map_table_b3(u"\u0041"), u"\u0061")
self.assertTrue(map_table_b3(u"\u0061"), u"\u0061")
self.assertTrue(in_table_c11(u"\u0020"))
self.assertFalse(in_table_c11(u"\u0021"))
self.assertTrue(in_table_c12(u"\u00a0"))
self.assertFalse(in_table_c12(u"\u00a1"))
self.assertTrue(in_table_c12(u"\u00a0"))
self.assertFalse(in_table_c12(u"\u00a1"))
self.assertTrue(in_table_c11_c12(u"\u00a0"))
self.assertFalse(in_table_c11_c12(u"\u00a1"))
self.assertTrue(in_table_c21(u"\u001f"))
self.assertFalse(in_table_c21(u"\u0020"))
self.assertTrue(in_table_c22(u"\u009f"))
self.assertFalse(in_table_c22(u"\u00a0"))
self.assertTrue(in_table_c21_c22(u"\u009f"))
self.assertFalse(in_table_c21_c22(u"\u00a0"))
self.assertTrue(in_table_c3(u"\ue000"))
self.assertFalse(in_table_c3(u"\uf900"))
self.assertTrue(in_table_c4(u"\uffff"))
self.assertFalse(in_table_c4(u"\u0000"))
self.assertTrue(in_table_c5(u"\ud800"))
self.assertFalse(in_table_c5(u"\ud7ff"))
self.assertTrue(in_table_c6(u"\ufff9"))
self.assertFalse(in_table_c6(u"\ufffe"))
self.assertTrue(in_table_c7(u"\u2ff0"))
self.assertFalse(in_table_c7(u"\u2ffc"))
self.assertTrue(in_table_c8(u"\u0340"))
self.assertFalse(in_table_c8(u"\u0342"))
# C.9 is not in the bmp
# self.assertTrue(in_table_c9(u"\U000E0001"))
# self.assertFalse(in_table_c8(u"\U000E0002"))
self.assertTrue(in_table_d1(u"\u05be"))
self.assertFalse(in_table_d1(u"\u05bf"))
self.assertTrue(in_table_d2(u"\u0041"))
self.assertFalse(in_table_d2(u"\u0040"))
# This would generate a hash of all predicates. However, running
# it is quite expensive, and only serves to detect changes in the
# unicode database. Instead, stringprep.py asserts the version of
# the database.
# import hashlib
# predicates = [k for k in dir(stringprep) if k.startswith("in_table")]
# predicates.sort()
# for p in predicates:
# f = getattr(stringprep, p)
# # Collect all BMP code points
# data = ["0"] * 0x10000
# for i in range(0x10000):
# if f(unichr(i)):
# data[i] = "1"
# data = "".join(data)
# h = hashlib.sha1()
# h.update(data)
# print p, h.hexdigest()
def test_main():
test_support.run_unittest(StringprepTests)
if __name__ == '__main__':
test_main()
| bsd-2-clause |
BlackSmith/selenium | py/test/selenium/webdriver/firefox/ff_select_support_class_tests.py | 63 | 1419 | # Licensed to the Software Freedom Conservancy (SFC) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The SFC licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from selenium import webdriver
from selenium.test.selenium.webdriver.common import select_class_tests
from selenium.test.selenium.webdriver.common.webserver import SimpleWebServer
def setup_module(module):
webserver = SimpleWebServer()
webserver.start()
FirefoxSelectElementHandlingTests.webserver = webserver
FirefoxSelectElementHandlingTests.driver = webdriver.Firefox()
class FirefoxSelectElementHandlingTests(select_class_tests.WebDriverSelectSupportTests):
pass
def teardown_module(module):
FirefoxSelectElementHandlingTests.driver.quit()
FirefoxSelectElementHandlingTests.webserver.stop()
| apache-2.0 |
joariasl/odoo | addons/point_of_sale/report/pos_invoice.py | 317 | 2393 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2014-Today OpenERP SA (<http://www.openerp.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import osv
from openerp.tools.translate import _
class PosInvoiceReport(osv.AbstractModel):
_name = 'report.point_of_sale.report_invoice'
def render_html(self, cr, uid, ids, data=None, context=None):
report_obj = self.pool['report']
posorder_obj = self.pool['pos.order']
report = report_obj._get_report_from_name(cr, uid, 'account.report_invoice')
selected_orders = posorder_obj.browse(cr, uid, ids, context=context)
ids_to_print = []
invoiced_posorders_ids = []
for order in selected_orders:
if order.invoice_id:
ids_to_print.append(order.invoice_id.id)
invoiced_posorders_ids.append(order.id)
not_invoiced_orders_ids = list(set(ids) - set(invoiced_posorders_ids))
if not_invoiced_orders_ids:
not_invoiced_posorders = posorder_obj.browse(cr, uid, not_invoiced_orders_ids, context=context)
not_invoiced_orders_names = list(map(lambda a: a.name, not_invoiced_posorders))
raise osv.except_osv(_('Error!'), _('No link to an invoice for %s.' % ', '.join(not_invoiced_orders_names)))
docargs = {
'doc_ids': ids_to_print,
'doc_model': report.model,
'docs': selected_orders,
}
return report_obj.render(cr, uid, ids, 'account.report_invoice', docargs, context=context)
| agpl-3.0 |
mayankcu/Django-social | venv/Lib/encodings/cp865.py | 593 | 34874 | """ Python Character Mapping Codec generated from 'VENDORS/MICSFT/PC/CP865.TXT' with gencodec.py.
"""#"
import codecs
### Codec APIs
class Codec(codecs.Codec):
def encode(self,input,errors='strict'):
return codecs.charmap_encode(input,errors,encoding_map)
def decode(self,input,errors='strict'):
return codecs.charmap_decode(input,errors,decoding_table)
class IncrementalEncoder(codecs.IncrementalEncoder):
def encode(self, input, final=False):
return codecs.charmap_encode(input,self.errors,encoding_map)[0]
class IncrementalDecoder(codecs.IncrementalDecoder):
def decode(self, input, final=False):
return codecs.charmap_decode(input,self.errors,decoding_table)[0]
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
### encodings module API
def getregentry():
return codecs.CodecInfo(
name='cp865',
encode=Codec().encode,
decode=Codec().decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
### Decoding Map
decoding_map = codecs.make_identity_dict(range(256))
decoding_map.update({
0x0080: 0x00c7, # LATIN CAPITAL LETTER C WITH CEDILLA
0x0081: 0x00fc, # LATIN SMALL LETTER U WITH DIAERESIS
0x0082: 0x00e9, # LATIN SMALL LETTER E WITH ACUTE
0x0083: 0x00e2, # LATIN SMALL LETTER A WITH CIRCUMFLEX
0x0084: 0x00e4, # LATIN SMALL LETTER A WITH DIAERESIS
0x0085: 0x00e0, # LATIN SMALL LETTER A WITH GRAVE
0x0086: 0x00e5, # LATIN SMALL LETTER A WITH RING ABOVE
0x0087: 0x00e7, # LATIN SMALL LETTER C WITH CEDILLA
0x0088: 0x00ea, # LATIN SMALL LETTER E WITH CIRCUMFLEX
0x0089: 0x00eb, # LATIN SMALL LETTER E WITH DIAERESIS
0x008a: 0x00e8, # LATIN SMALL LETTER E WITH GRAVE
0x008b: 0x00ef, # LATIN SMALL LETTER I WITH DIAERESIS
0x008c: 0x00ee, # LATIN SMALL LETTER I WITH CIRCUMFLEX
0x008d: 0x00ec, # LATIN SMALL LETTER I WITH GRAVE
0x008e: 0x00c4, # LATIN CAPITAL LETTER A WITH DIAERESIS
0x008f: 0x00c5, # LATIN CAPITAL LETTER A WITH RING ABOVE
0x0090: 0x00c9, # LATIN CAPITAL LETTER E WITH ACUTE
0x0091: 0x00e6, # LATIN SMALL LIGATURE AE
0x0092: 0x00c6, # LATIN CAPITAL LIGATURE AE
0x0093: 0x00f4, # LATIN SMALL LETTER O WITH CIRCUMFLEX
0x0094: 0x00f6, # LATIN SMALL LETTER O WITH DIAERESIS
0x0095: 0x00f2, # LATIN SMALL LETTER O WITH GRAVE
0x0096: 0x00fb, # LATIN SMALL LETTER U WITH CIRCUMFLEX
0x0097: 0x00f9, # LATIN SMALL LETTER U WITH GRAVE
0x0098: 0x00ff, # LATIN SMALL LETTER Y WITH DIAERESIS
0x0099: 0x00d6, # LATIN CAPITAL LETTER O WITH DIAERESIS
0x009a: 0x00dc, # LATIN CAPITAL LETTER U WITH DIAERESIS
0x009b: 0x00f8, # LATIN SMALL LETTER O WITH STROKE
0x009c: 0x00a3, # POUND SIGN
0x009d: 0x00d8, # LATIN CAPITAL LETTER O WITH STROKE
0x009e: 0x20a7, # PESETA SIGN
0x009f: 0x0192, # LATIN SMALL LETTER F WITH HOOK
0x00a0: 0x00e1, # LATIN SMALL LETTER A WITH ACUTE
0x00a1: 0x00ed, # LATIN SMALL LETTER I WITH ACUTE
0x00a2: 0x00f3, # LATIN SMALL LETTER O WITH ACUTE
0x00a3: 0x00fa, # LATIN SMALL LETTER U WITH ACUTE
0x00a4: 0x00f1, # LATIN SMALL LETTER N WITH TILDE
0x00a5: 0x00d1, # LATIN CAPITAL LETTER N WITH TILDE
0x00a6: 0x00aa, # FEMININE ORDINAL INDICATOR
0x00a7: 0x00ba, # MASCULINE ORDINAL INDICATOR
0x00a8: 0x00bf, # INVERTED QUESTION MARK
0x00a9: 0x2310, # REVERSED NOT SIGN
0x00aa: 0x00ac, # NOT SIGN
0x00ab: 0x00bd, # VULGAR FRACTION ONE HALF
0x00ac: 0x00bc, # VULGAR FRACTION ONE QUARTER
0x00ad: 0x00a1, # INVERTED EXCLAMATION MARK
0x00ae: 0x00ab, # LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
0x00af: 0x00a4, # CURRENCY SIGN
0x00b0: 0x2591, # LIGHT SHADE
0x00b1: 0x2592, # MEDIUM SHADE
0x00b2: 0x2593, # DARK SHADE
0x00b3: 0x2502, # BOX DRAWINGS LIGHT VERTICAL
0x00b4: 0x2524, # BOX DRAWINGS LIGHT VERTICAL AND LEFT
0x00b5: 0x2561, # BOX DRAWINGS VERTICAL SINGLE AND LEFT DOUBLE
0x00b6: 0x2562, # BOX DRAWINGS VERTICAL DOUBLE AND LEFT SINGLE
0x00b7: 0x2556, # BOX DRAWINGS DOWN DOUBLE AND LEFT SINGLE
0x00b8: 0x2555, # BOX DRAWINGS DOWN SINGLE AND LEFT DOUBLE
0x00b9: 0x2563, # BOX DRAWINGS DOUBLE VERTICAL AND LEFT
0x00ba: 0x2551, # BOX DRAWINGS DOUBLE VERTICAL
0x00bb: 0x2557, # BOX DRAWINGS DOUBLE DOWN AND LEFT
0x00bc: 0x255d, # BOX DRAWINGS DOUBLE UP AND LEFT
0x00bd: 0x255c, # BOX DRAWINGS UP DOUBLE AND LEFT SINGLE
0x00be: 0x255b, # BOX DRAWINGS UP SINGLE AND LEFT DOUBLE
0x00bf: 0x2510, # BOX DRAWINGS LIGHT DOWN AND LEFT
0x00c0: 0x2514, # BOX DRAWINGS LIGHT UP AND RIGHT
0x00c1: 0x2534, # BOX DRAWINGS LIGHT UP AND HORIZONTAL
0x00c2: 0x252c, # BOX DRAWINGS LIGHT DOWN AND HORIZONTAL
0x00c3: 0x251c, # BOX DRAWINGS LIGHT VERTICAL AND RIGHT
0x00c4: 0x2500, # BOX DRAWINGS LIGHT HORIZONTAL
0x00c5: 0x253c, # BOX DRAWINGS LIGHT VERTICAL AND HORIZONTAL
0x00c6: 0x255e, # BOX DRAWINGS VERTICAL SINGLE AND RIGHT DOUBLE
0x00c7: 0x255f, # BOX DRAWINGS VERTICAL DOUBLE AND RIGHT SINGLE
0x00c8: 0x255a, # BOX DRAWINGS DOUBLE UP AND RIGHT
0x00c9: 0x2554, # BOX DRAWINGS DOUBLE DOWN AND RIGHT
0x00ca: 0x2569, # BOX DRAWINGS DOUBLE UP AND HORIZONTAL
0x00cb: 0x2566, # BOX DRAWINGS DOUBLE DOWN AND HORIZONTAL
0x00cc: 0x2560, # BOX DRAWINGS DOUBLE VERTICAL AND RIGHT
0x00cd: 0x2550, # BOX DRAWINGS DOUBLE HORIZONTAL
0x00ce: 0x256c, # BOX DRAWINGS DOUBLE VERTICAL AND HORIZONTAL
0x00cf: 0x2567, # BOX DRAWINGS UP SINGLE AND HORIZONTAL DOUBLE
0x00d0: 0x2568, # BOX DRAWINGS UP DOUBLE AND HORIZONTAL SINGLE
0x00d1: 0x2564, # BOX DRAWINGS DOWN SINGLE AND HORIZONTAL DOUBLE
0x00d2: 0x2565, # BOX DRAWINGS DOWN DOUBLE AND HORIZONTAL SINGLE
0x00d3: 0x2559, # BOX DRAWINGS UP DOUBLE AND RIGHT SINGLE
0x00d4: 0x2558, # BOX DRAWINGS UP SINGLE AND RIGHT DOUBLE
0x00d5: 0x2552, # BOX DRAWINGS DOWN SINGLE AND RIGHT DOUBLE
0x00d6: 0x2553, # BOX DRAWINGS DOWN DOUBLE AND RIGHT SINGLE
0x00d7: 0x256b, # BOX DRAWINGS VERTICAL DOUBLE AND HORIZONTAL SINGLE
0x00d8: 0x256a, # BOX DRAWINGS VERTICAL SINGLE AND HORIZONTAL DOUBLE
0x00d9: 0x2518, # BOX DRAWINGS LIGHT UP AND LEFT
0x00da: 0x250c, # BOX DRAWINGS LIGHT DOWN AND RIGHT
0x00db: 0x2588, # FULL BLOCK
0x00dc: 0x2584, # LOWER HALF BLOCK
0x00dd: 0x258c, # LEFT HALF BLOCK
0x00de: 0x2590, # RIGHT HALF BLOCK
0x00df: 0x2580, # UPPER HALF BLOCK
0x00e0: 0x03b1, # GREEK SMALL LETTER ALPHA
0x00e1: 0x00df, # LATIN SMALL LETTER SHARP S
0x00e2: 0x0393, # GREEK CAPITAL LETTER GAMMA
0x00e3: 0x03c0, # GREEK SMALL LETTER PI
0x00e4: 0x03a3, # GREEK CAPITAL LETTER SIGMA
0x00e5: 0x03c3, # GREEK SMALL LETTER SIGMA
0x00e6: 0x00b5, # MICRO SIGN
0x00e7: 0x03c4, # GREEK SMALL LETTER TAU
0x00e8: 0x03a6, # GREEK CAPITAL LETTER PHI
0x00e9: 0x0398, # GREEK CAPITAL LETTER THETA
0x00ea: 0x03a9, # GREEK CAPITAL LETTER OMEGA
0x00eb: 0x03b4, # GREEK SMALL LETTER DELTA
0x00ec: 0x221e, # INFINITY
0x00ed: 0x03c6, # GREEK SMALL LETTER PHI
0x00ee: 0x03b5, # GREEK SMALL LETTER EPSILON
0x00ef: 0x2229, # INTERSECTION
0x00f0: 0x2261, # IDENTICAL TO
0x00f1: 0x00b1, # PLUS-MINUS SIGN
0x00f2: 0x2265, # GREATER-THAN OR EQUAL TO
0x00f3: 0x2264, # LESS-THAN OR EQUAL TO
0x00f4: 0x2320, # TOP HALF INTEGRAL
0x00f5: 0x2321, # BOTTOM HALF INTEGRAL
0x00f6: 0x00f7, # DIVISION SIGN
0x00f7: 0x2248, # ALMOST EQUAL TO
0x00f8: 0x00b0, # DEGREE SIGN
0x00f9: 0x2219, # BULLET OPERATOR
0x00fa: 0x00b7, # MIDDLE DOT
0x00fb: 0x221a, # SQUARE ROOT
0x00fc: 0x207f, # SUPERSCRIPT LATIN SMALL LETTER N
0x00fd: 0x00b2, # SUPERSCRIPT TWO
0x00fe: 0x25a0, # BLACK SQUARE
0x00ff: 0x00a0, # NO-BREAK SPACE
})
### Decoding Table
decoding_table = (
u'\x00' # 0x0000 -> NULL
u'\x01' # 0x0001 -> START OF HEADING
u'\x02' # 0x0002 -> START OF TEXT
u'\x03' # 0x0003 -> END OF TEXT
u'\x04' # 0x0004 -> END OF TRANSMISSION
u'\x05' # 0x0005 -> ENQUIRY
u'\x06' # 0x0006 -> ACKNOWLEDGE
u'\x07' # 0x0007 -> BELL
u'\x08' # 0x0008 -> BACKSPACE
u'\t' # 0x0009 -> HORIZONTAL TABULATION
u'\n' # 0x000a -> LINE FEED
u'\x0b' # 0x000b -> VERTICAL TABULATION
u'\x0c' # 0x000c -> FORM FEED
u'\r' # 0x000d -> CARRIAGE RETURN
u'\x0e' # 0x000e -> SHIFT OUT
u'\x0f' # 0x000f -> SHIFT IN
u'\x10' # 0x0010 -> DATA LINK ESCAPE
u'\x11' # 0x0011 -> DEVICE CONTROL ONE
u'\x12' # 0x0012 -> DEVICE CONTROL TWO
u'\x13' # 0x0013 -> DEVICE CONTROL THREE
u'\x14' # 0x0014 -> DEVICE CONTROL FOUR
u'\x15' # 0x0015 -> NEGATIVE ACKNOWLEDGE
u'\x16' # 0x0016 -> SYNCHRONOUS IDLE
u'\x17' # 0x0017 -> END OF TRANSMISSION BLOCK
u'\x18' # 0x0018 -> CANCEL
u'\x19' # 0x0019 -> END OF MEDIUM
u'\x1a' # 0x001a -> SUBSTITUTE
u'\x1b' # 0x001b -> ESCAPE
u'\x1c' # 0x001c -> FILE SEPARATOR
u'\x1d' # 0x001d -> GROUP SEPARATOR
u'\x1e' # 0x001e -> RECORD SEPARATOR
u'\x1f' # 0x001f -> UNIT SEPARATOR
u' ' # 0x0020 -> SPACE
u'!' # 0x0021 -> EXCLAMATION MARK
u'"' # 0x0022 -> QUOTATION MARK
u'#' # 0x0023 -> NUMBER SIGN
u'$' # 0x0024 -> DOLLAR SIGN
u'%' # 0x0025 -> PERCENT SIGN
u'&' # 0x0026 -> AMPERSAND
u"'" # 0x0027 -> APOSTROPHE
u'(' # 0x0028 -> LEFT PARENTHESIS
u')' # 0x0029 -> RIGHT PARENTHESIS
u'*' # 0x002a -> ASTERISK
u'+' # 0x002b -> PLUS SIGN
u',' # 0x002c -> COMMA
u'-' # 0x002d -> HYPHEN-MINUS
u'.' # 0x002e -> FULL STOP
u'/' # 0x002f -> SOLIDUS
u'0' # 0x0030 -> DIGIT ZERO
u'1' # 0x0031 -> DIGIT ONE
u'2' # 0x0032 -> DIGIT TWO
u'3' # 0x0033 -> DIGIT THREE
u'4' # 0x0034 -> DIGIT FOUR
u'5' # 0x0035 -> DIGIT FIVE
u'6' # 0x0036 -> DIGIT SIX
u'7' # 0x0037 -> DIGIT SEVEN
u'8' # 0x0038 -> DIGIT EIGHT
u'9' # 0x0039 -> DIGIT NINE
u':' # 0x003a -> COLON
u';' # 0x003b -> SEMICOLON
u'<' # 0x003c -> LESS-THAN SIGN
u'=' # 0x003d -> EQUALS SIGN
u'>' # 0x003e -> GREATER-THAN SIGN
u'?' # 0x003f -> QUESTION MARK
u'@' # 0x0040 -> COMMERCIAL AT
u'A' # 0x0041 -> LATIN CAPITAL LETTER A
u'B' # 0x0042 -> LATIN CAPITAL LETTER B
u'C' # 0x0043 -> LATIN CAPITAL LETTER C
u'D' # 0x0044 -> LATIN CAPITAL LETTER D
u'E' # 0x0045 -> LATIN CAPITAL LETTER E
u'F' # 0x0046 -> LATIN CAPITAL LETTER F
u'G' # 0x0047 -> LATIN CAPITAL LETTER G
u'H' # 0x0048 -> LATIN CAPITAL LETTER H
u'I' # 0x0049 -> LATIN CAPITAL LETTER I
u'J' # 0x004a -> LATIN CAPITAL LETTER J
u'K' # 0x004b -> LATIN CAPITAL LETTER K
u'L' # 0x004c -> LATIN CAPITAL LETTER L
u'M' # 0x004d -> LATIN CAPITAL LETTER M
u'N' # 0x004e -> LATIN CAPITAL LETTER N
u'O' # 0x004f -> LATIN CAPITAL LETTER O
u'P' # 0x0050 -> LATIN CAPITAL LETTER P
u'Q' # 0x0051 -> LATIN CAPITAL LETTER Q
u'R' # 0x0052 -> LATIN CAPITAL LETTER R
u'S' # 0x0053 -> LATIN CAPITAL LETTER S
u'T' # 0x0054 -> LATIN CAPITAL LETTER T
u'U' # 0x0055 -> LATIN CAPITAL LETTER U
u'V' # 0x0056 -> LATIN CAPITAL LETTER V
u'W' # 0x0057 -> LATIN CAPITAL LETTER W
u'X' # 0x0058 -> LATIN CAPITAL LETTER X
u'Y' # 0x0059 -> LATIN CAPITAL LETTER Y
u'Z' # 0x005a -> LATIN CAPITAL LETTER Z
u'[' # 0x005b -> LEFT SQUARE BRACKET
u'\\' # 0x005c -> REVERSE SOLIDUS
u']' # 0x005d -> RIGHT SQUARE BRACKET
u'^' # 0x005e -> CIRCUMFLEX ACCENT
u'_' # 0x005f -> LOW LINE
u'`' # 0x0060 -> GRAVE ACCENT
u'a' # 0x0061 -> LATIN SMALL LETTER A
u'b' # 0x0062 -> LATIN SMALL LETTER B
u'c' # 0x0063 -> LATIN SMALL LETTER C
u'd' # 0x0064 -> LATIN SMALL LETTER D
u'e' # 0x0065 -> LATIN SMALL LETTER E
u'f' # 0x0066 -> LATIN SMALL LETTER F
u'g' # 0x0067 -> LATIN SMALL LETTER G
u'h' # 0x0068 -> LATIN SMALL LETTER H
u'i' # 0x0069 -> LATIN SMALL LETTER I
u'j' # 0x006a -> LATIN SMALL LETTER J
u'k' # 0x006b -> LATIN SMALL LETTER K
u'l' # 0x006c -> LATIN SMALL LETTER L
u'm' # 0x006d -> LATIN SMALL LETTER M
u'n' # 0x006e -> LATIN SMALL LETTER N
u'o' # 0x006f -> LATIN SMALL LETTER O
u'p' # 0x0070 -> LATIN SMALL LETTER P
u'q' # 0x0071 -> LATIN SMALL LETTER Q
u'r' # 0x0072 -> LATIN SMALL LETTER R
u's' # 0x0073 -> LATIN SMALL LETTER S
u't' # 0x0074 -> LATIN SMALL LETTER T
u'u' # 0x0075 -> LATIN SMALL LETTER U
u'v' # 0x0076 -> LATIN SMALL LETTER V
u'w' # 0x0077 -> LATIN SMALL LETTER W
u'x' # 0x0078 -> LATIN SMALL LETTER X
u'y' # 0x0079 -> LATIN SMALL LETTER Y
u'z' # 0x007a -> LATIN SMALL LETTER Z
u'{' # 0x007b -> LEFT CURLY BRACKET
u'|' # 0x007c -> VERTICAL LINE
u'}' # 0x007d -> RIGHT CURLY BRACKET
u'~' # 0x007e -> TILDE
u'\x7f' # 0x007f -> DELETE
u'\xc7' # 0x0080 -> LATIN CAPITAL LETTER C WITH CEDILLA
u'\xfc' # 0x0081 -> LATIN SMALL LETTER U WITH DIAERESIS
u'\xe9' # 0x0082 -> LATIN SMALL LETTER E WITH ACUTE
u'\xe2' # 0x0083 -> LATIN SMALL LETTER A WITH CIRCUMFLEX
u'\xe4' # 0x0084 -> LATIN SMALL LETTER A WITH DIAERESIS
u'\xe0' # 0x0085 -> LATIN SMALL LETTER A WITH GRAVE
u'\xe5' # 0x0086 -> LATIN SMALL LETTER A WITH RING ABOVE
u'\xe7' # 0x0087 -> LATIN SMALL LETTER C WITH CEDILLA
u'\xea' # 0x0088 -> LATIN SMALL LETTER E WITH CIRCUMFLEX
u'\xeb' # 0x0089 -> LATIN SMALL LETTER E WITH DIAERESIS
u'\xe8' # 0x008a -> LATIN SMALL LETTER E WITH GRAVE
u'\xef' # 0x008b -> LATIN SMALL LETTER I WITH DIAERESIS
u'\xee' # 0x008c -> LATIN SMALL LETTER I WITH CIRCUMFLEX
u'\xec' # 0x008d -> LATIN SMALL LETTER I WITH GRAVE
u'\xc4' # 0x008e -> LATIN CAPITAL LETTER A WITH DIAERESIS
u'\xc5' # 0x008f -> LATIN CAPITAL LETTER A WITH RING ABOVE
u'\xc9' # 0x0090 -> LATIN CAPITAL LETTER E WITH ACUTE
u'\xe6' # 0x0091 -> LATIN SMALL LIGATURE AE
u'\xc6' # 0x0092 -> LATIN CAPITAL LIGATURE AE
u'\xf4' # 0x0093 -> LATIN SMALL LETTER O WITH CIRCUMFLEX
u'\xf6' # 0x0094 -> LATIN SMALL LETTER O WITH DIAERESIS
u'\xf2' # 0x0095 -> LATIN SMALL LETTER O WITH GRAVE
u'\xfb' # 0x0096 -> LATIN SMALL LETTER U WITH CIRCUMFLEX
u'\xf9' # 0x0097 -> LATIN SMALL LETTER U WITH GRAVE
u'\xff' # 0x0098 -> LATIN SMALL LETTER Y WITH DIAERESIS
u'\xd6' # 0x0099 -> LATIN CAPITAL LETTER O WITH DIAERESIS
u'\xdc' # 0x009a -> LATIN CAPITAL LETTER U WITH DIAERESIS
u'\xf8' # 0x009b -> LATIN SMALL LETTER O WITH STROKE
u'\xa3' # 0x009c -> POUND SIGN
u'\xd8' # 0x009d -> LATIN CAPITAL LETTER O WITH STROKE
u'\u20a7' # 0x009e -> PESETA SIGN
u'\u0192' # 0x009f -> LATIN SMALL LETTER F WITH HOOK
u'\xe1' # 0x00a0 -> LATIN SMALL LETTER A WITH ACUTE
u'\xed' # 0x00a1 -> LATIN SMALL LETTER I WITH ACUTE
u'\xf3' # 0x00a2 -> LATIN SMALL LETTER O WITH ACUTE
u'\xfa' # 0x00a3 -> LATIN SMALL LETTER U WITH ACUTE
u'\xf1' # 0x00a4 -> LATIN SMALL LETTER N WITH TILDE
u'\xd1' # 0x00a5 -> LATIN CAPITAL LETTER N WITH TILDE
u'\xaa' # 0x00a6 -> FEMININE ORDINAL INDICATOR
u'\xba' # 0x00a7 -> MASCULINE ORDINAL INDICATOR
u'\xbf' # 0x00a8 -> INVERTED QUESTION MARK
u'\u2310' # 0x00a9 -> REVERSED NOT SIGN
u'\xac' # 0x00aa -> NOT SIGN
u'\xbd' # 0x00ab -> VULGAR FRACTION ONE HALF
u'\xbc' # 0x00ac -> VULGAR FRACTION ONE QUARTER
u'\xa1' # 0x00ad -> INVERTED EXCLAMATION MARK
u'\xab' # 0x00ae -> LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
u'\xa4' # 0x00af -> CURRENCY SIGN
u'\u2591' # 0x00b0 -> LIGHT SHADE
u'\u2592' # 0x00b1 -> MEDIUM SHADE
u'\u2593' # 0x00b2 -> DARK SHADE
u'\u2502' # 0x00b3 -> BOX DRAWINGS LIGHT VERTICAL
u'\u2524' # 0x00b4 -> BOX DRAWINGS LIGHT VERTICAL AND LEFT
u'\u2561' # 0x00b5 -> BOX DRAWINGS VERTICAL SINGLE AND LEFT DOUBLE
u'\u2562' # 0x00b6 -> BOX DRAWINGS VERTICAL DOUBLE AND LEFT SINGLE
u'\u2556' # 0x00b7 -> BOX DRAWINGS DOWN DOUBLE AND LEFT SINGLE
u'\u2555' # 0x00b8 -> BOX DRAWINGS DOWN SINGLE AND LEFT DOUBLE
u'\u2563' # 0x00b9 -> BOX DRAWINGS DOUBLE VERTICAL AND LEFT
u'\u2551' # 0x00ba -> BOX DRAWINGS DOUBLE VERTICAL
u'\u2557' # 0x00bb -> BOX DRAWINGS DOUBLE DOWN AND LEFT
u'\u255d' # 0x00bc -> BOX DRAWINGS DOUBLE UP AND LEFT
u'\u255c' # 0x00bd -> BOX DRAWINGS UP DOUBLE AND LEFT SINGLE
u'\u255b' # 0x00be -> BOX DRAWINGS UP SINGLE AND LEFT DOUBLE
u'\u2510' # 0x00bf -> BOX DRAWINGS LIGHT DOWN AND LEFT
u'\u2514' # 0x00c0 -> BOX DRAWINGS LIGHT UP AND RIGHT
u'\u2534' # 0x00c1 -> BOX DRAWINGS LIGHT UP AND HORIZONTAL
u'\u252c' # 0x00c2 -> BOX DRAWINGS LIGHT DOWN AND HORIZONTAL
u'\u251c' # 0x00c3 -> BOX DRAWINGS LIGHT VERTICAL AND RIGHT
u'\u2500' # 0x00c4 -> BOX DRAWINGS LIGHT HORIZONTAL
u'\u253c' # 0x00c5 -> BOX DRAWINGS LIGHT VERTICAL AND HORIZONTAL
u'\u255e' # 0x00c6 -> BOX DRAWINGS VERTICAL SINGLE AND RIGHT DOUBLE
u'\u255f' # 0x00c7 -> BOX DRAWINGS VERTICAL DOUBLE AND RIGHT SINGLE
u'\u255a' # 0x00c8 -> BOX DRAWINGS DOUBLE UP AND RIGHT
u'\u2554' # 0x00c9 -> BOX DRAWINGS DOUBLE DOWN AND RIGHT
u'\u2569' # 0x00ca -> BOX DRAWINGS DOUBLE UP AND HORIZONTAL
u'\u2566' # 0x00cb -> BOX DRAWINGS DOUBLE DOWN AND HORIZONTAL
u'\u2560' # 0x00cc -> BOX DRAWINGS DOUBLE VERTICAL AND RIGHT
u'\u2550' # 0x00cd -> BOX DRAWINGS DOUBLE HORIZONTAL
u'\u256c' # 0x00ce -> BOX DRAWINGS DOUBLE VERTICAL AND HORIZONTAL
u'\u2567' # 0x00cf -> BOX DRAWINGS UP SINGLE AND HORIZONTAL DOUBLE
u'\u2568' # 0x00d0 -> BOX DRAWINGS UP DOUBLE AND HORIZONTAL SINGLE
u'\u2564' # 0x00d1 -> BOX DRAWINGS DOWN SINGLE AND HORIZONTAL DOUBLE
u'\u2565' # 0x00d2 -> BOX DRAWINGS DOWN DOUBLE AND HORIZONTAL SINGLE
u'\u2559' # 0x00d3 -> BOX DRAWINGS UP DOUBLE AND RIGHT SINGLE
u'\u2558' # 0x00d4 -> BOX DRAWINGS UP SINGLE AND RIGHT DOUBLE
u'\u2552' # 0x00d5 -> BOX DRAWINGS DOWN SINGLE AND RIGHT DOUBLE
u'\u2553' # 0x00d6 -> BOX DRAWINGS DOWN DOUBLE AND RIGHT SINGLE
u'\u256b' # 0x00d7 -> BOX DRAWINGS VERTICAL DOUBLE AND HORIZONTAL SINGLE
u'\u256a' # 0x00d8 -> BOX DRAWINGS VERTICAL SINGLE AND HORIZONTAL DOUBLE
u'\u2518' # 0x00d9 -> BOX DRAWINGS LIGHT UP AND LEFT
u'\u250c' # 0x00da -> BOX DRAWINGS LIGHT DOWN AND RIGHT
u'\u2588' # 0x00db -> FULL BLOCK
u'\u2584' # 0x00dc -> LOWER HALF BLOCK
u'\u258c' # 0x00dd -> LEFT HALF BLOCK
u'\u2590' # 0x00de -> RIGHT HALF BLOCK
u'\u2580' # 0x00df -> UPPER HALF BLOCK
u'\u03b1' # 0x00e0 -> GREEK SMALL LETTER ALPHA
u'\xdf' # 0x00e1 -> LATIN SMALL LETTER SHARP S
u'\u0393' # 0x00e2 -> GREEK CAPITAL LETTER GAMMA
u'\u03c0' # 0x00e3 -> GREEK SMALL LETTER PI
u'\u03a3' # 0x00e4 -> GREEK CAPITAL LETTER SIGMA
u'\u03c3' # 0x00e5 -> GREEK SMALL LETTER SIGMA
u'\xb5' # 0x00e6 -> MICRO SIGN
u'\u03c4' # 0x00e7 -> GREEK SMALL LETTER TAU
u'\u03a6' # 0x00e8 -> GREEK CAPITAL LETTER PHI
u'\u0398' # 0x00e9 -> GREEK CAPITAL LETTER THETA
u'\u03a9' # 0x00ea -> GREEK CAPITAL LETTER OMEGA
u'\u03b4' # 0x00eb -> GREEK SMALL LETTER DELTA
u'\u221e' # 0x00ec -> INFINITY
u'\u03c6' # 0x00ed -> GREEK SMALL LETTER PHI
u'\u03b5' # 0x00ee -> GREEK SMALL LETTER EPSILON
u'\u2229' # 0x00ef -> INTERSECTION
u'\u2261' # 0x00f0 -> IDENTICAL TO
u'\xb1' # 0x00f1 -> PLUS-MINUS SIGN
u'\u2265' # 0x00f2 -> GREATER-THAN OR EQUAL TO
u'\u2264' # 0x00f3 -> LESS-THAN OR EQUAL TO
u'\u2320' # 0x00f4 -> TOP HALF INTEGRAL
u'\u2321' # 0x00f5 -> BOTTOM HALF INTEGRAL
u'\xf7' # 0x00f6 -> DIVISION SIGN
u'\u2248' # 0x00f7 -> ALMOST EQUAL TO
u'\xb0' # 0x00f8 -> DEGREE SIGN
u'\u2219' # 0x00f9 -> BULLET OPERATOR
u'\xb7' # 0x00fa -> MIDDLE DOT
u'\u221a' # 0x00fb -> SQUARE ROOT
u'\u207f' # 0x00fc -> SUPERSCRIPT LATIN SMALL LETTER N
u'\xb2' # 0x00fd -> SUPERSCRIPT TWO
u'\u25a0' # 0x00fe -> BLACK SQUARE
u'\xa0' # 0x00ff -> NO-BREAK SPACE
)
### Encoding Map
encoding_map = {
0x0000: 0x0000, # NULL
0x0001: 0x0001, # START OF HEADING
0x0002: 0x0002, # START OF TEXT
0x0003: 0x0003, # END OF TEXT
0x0004: 0x0004, # END OF TRANSMISSION
0x0005: 0x0005, # ENQUIRY
0x0006: 0x0006, # ACKNOWLEDGE
0x0007: 0x0007, # BELL
0x0008: 0x0008, # BACKSPACE
0x0009: 0x0009, # HORIZONTAL TABULATION
0x000a: 0x000a, # LINE FEED
0x000b: 0x000b, # VERTICAL TABULATION
0x000c: 0x000c, # FORM FEED
0x000d: 0x000d, # CARRIAGE RETURN
0x000e: 0x000e, # SHIFT OUT
0x000f: 0x000f, # SHIFT IN
0x0010: 0x0010, # DATA LINK ESCAPE
0x0011: 0x0011, # DEVICE CONTROL ONE
0x0012: 0x0012, # DEVICE CONTROL TWO
0x0013: 0x0013, # DEVICE CONTROL THREE
0x0014: 0x0014, # DEVICE CONTROL FOUR
0x0015: 0x0015, # NEGATIVE ACKNOWLEDGE
0x0016: 0x0016, # SYNCHRONOUS IDLE
0x0017: 0x0017, # END OF TRANSMISSION BLOCK
0x0018: 0x0018, # CANCEL
0x0019: 0x0019, # END OF MEDIUM
0x001a: 0x001a, # SUBSTITUTE
0x001b: 0x001b, # ESCAPE
0x001c: 0x001c, # FILE SEPARATOR
0x001d: 0x001d, # GROUP SEPARATOR
0x001e: 0x001e, # RECORD SEPARATOR
0x001f: 0x001f, # UNIT SEPARATOR
0x0020: 0x0020, # SPACE
0x0021: 0x0021, # EXCLAMATION MARK
0x0022: 0x0022, # QUOTATION MARK
0x0023: 0x0023, # NUMBER SIGN
0x0024: 0x0024, # DOLLAR SIGN
0x0025: 0x0025, # PERCENT SIGN
0x0026: 0x0026, # AMPERSAND
0x0027: 0x0027, # APOSTROPHE
0x0028: 0x0028, # LEFT PARENTHESIS
0x0029: 0x0029, # RIGHT PARENTHESIS
0x002a: 0x002a, # ASTERISK
0x002b: 0x002b, # PLUS SIGN
0x002c: 0x002c, # COMMA
0x002d: 0x002d, # HYPHEN-MINUS
0x002e: 0x002e, # FULL STOP
0x002f: 0x002f, # SOLIDUS
0x0030: 0x0030, # DIGIT ZERO
0x0031: 0x0031, # DIGIT ONE
0x0032: 0x0032, # DIGIT TWO
0x0033: 0x0033, # DIGIT THREE
0x0034: 0x0034, # DIGIT FOUR
0x0035: 0x0035, # DIGIT FIVE
0x0036: 0x0036, # DIGIT SIX
0x0037: 0x0037, # DIGIT SEVEN
0x0038: 0x0038, # DIGIT EIGHT
0x0039: 0x0039, # DIGIT NINE
0x003a: 0x003a, # COLON
0x003b: 0x003b, # SEMICOLON
0x003c: 0x003c, # LESS-THAN SIGN
0x003d: 0x003d, # EQUALS SIGN
0x003e: 0x003e, # GREATER-THAN SIGN
0x003f: 0x003f, # QUESTION MARK
0x0040: 0x0040, # COMMERCIAL AT
0x0041: 0x0041, # LATIN CAPITAL LETTER A
0x0042: 0x0042, # LATIN CAPITAL LETTER B
0x0043: 0x0043, # LATIN CAPITAL LETTER C
0x0044: 0x0044, # LATIN CAPITAL LETTER D
0x0045: 0x0045, # LATIN CAPITAL LETTER E
0x0046: 0x0046, # LATIN CAPITAL LETTER F
0x0047: 0x0047, # LATIN CAPITAL LETTER G
0x0048: 0x0048, # LATIN CAPITAL LETTER H
0x0049: 0x0049, # LATIN CAPITAL LETTER I
0x004a: 0x004a, # LATIN CAPITAL LETTER J
0x004b: 0x004b, # LATIN CAPITAL LETTER K
0x004c: 0x004c, # LATIN CAPITAL LETTER L
0x004d: 0x004d, # LATIN CAPITAL LETTER M
0x004e: 0x004e, # LATIN CAPITAL LETTER N
0x004f: 0x004f, # LATIN CAPITAL LETTER O
0x0050: 0x0050, # LATIN CAPITAL LETTER P
0x0051: 0x0051, # LATIN CAPITAL LETTER Q
0x0052: 0x0052, # LATIN CAPITAL LETTER R
0x0053: 0x0053, # LATIN CAPITAL LETTER S
0x0054: 0x0054, # LATIN CAPITAL LETTER T
0x0055: 0x0055, # LATIN CAPITAL LETTER U
0x0056: 0x0056, # LATIN CAPITAL LETTER V
0x0057: 0x0057, # LATIN CAPITAL LETTER W
0x0058: 0x0058, # LATIN CAPITAL LETTER X
0x0059: 0x0059, # LATIN CAPITAL LETTER Y
0x005a: 0x005a, # LATIN CAPITAL LETTER Z
0x005b: 0x005b, # LEFT SQUARE BRACKET
0x005c: 0x005c, # REVERSE SOLIDUS
0x005d: 0x005d, # RIGHT SQUARE BRACKET
0x005e: 0x005e, # CIRCUMFLEX ACCENT
0x005f: 0x005f, # LOW LINE
0x0060: 0x0060, # GRAVE ACCENT
0x0061: 0x0061, # LATIN SMALL LETTER A
0x0062: 0x0062, # LATIN SMALL LETTER B
0x0063: 0x0063, # LATIN SMALL LETTER C
0x0064: 0x0064, # LATIN SMALL LETTER D
0x0065: 0x0065, # LATIN SMALL LETTER E
0x0066: 0x0066, # LATIN SMALL LETTER F
0x0067: 0x0067, # LATIN SMALL LETTER G
0x0068: 0x0068, # LATIN SMALL LETTER H
0x0069: 0x0069, # LATIN SMALL LETTER I
0x006a: 0x006a, # LATIN SMALL LETTER J
0x006b: 0x006b, # LATIN SMALL LETTER K
0x006c: 0x006c, # LATIN SMALL LETTER L
0x006d: 0x006d, # LATIN SMALL LETTER M
0x006e: 0x006e, # LATIN SMALL LETTER N
0x006f: 0x006f, # LATIN SMALL LETTER O
0x0070: 0x0070, # LATIN SMALL LETTER P
0x0071: 0x0071, # LATIN SMALL LETTER Q
0x0072: 0x0072, # LATIN SMALL LETTER R
0x0073: 0x0073, # LATIN SMALL LETTER S
0x0074: 0x0074, # LATIN SMALL LETTER T
0x0075: 0x0075, # LATIN SMALL LETTER U
0x0076: 0x0076, # LATIN SMALL LETTER V
0x0077: 0x0077, # LATIN SMALL LETTER W
0x0078: 0x0078, # LATIN SMALL LETTER X
0x0079: 0x0079, # LATIN SMALL LETTER Y
0x007a: 0x007a, # LATIN SMALL LETTER Z
0x007b: 0x007b, # LEFT CURLY BRACKET
0x007c: 0x007c, # VERTICAL LINE
0x007d: 0x007d, # RIGHT CURLY BRACKET
0x007e: 0x007e, # TILDE
0x007f: 0x007f, # DELETE
0x00a0: 0x00ff, # NO-BREAK SPACE
0x00a1: 0x00ad, # INVERTED EXCLAMATION MARK
0x00a3: 0x009c, # POUND SIGN
0x00a4: 0x00af, # CURRENCY SIGN
0x00aa: 0x00a6, # FEMININE ORDINAL INDICATOR
0x00ab: 0x00ae, # LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
0x00ac: 0x00aa, # NOT SIGN
0x00b0: 0x00f8, # DEGREE SIGN
0x00b1: 0x00f1, # PLUS-MINUS SIGN
0x00b2: 0x00fd, # SUPERSCRIPT TWO
0x00b5: 0x00e6, # MICRO SIGN
0x00b7: 0x00fa, # MIDDLE DOT
0x00ba: 0x00a7, # MASCULINE ORDINAL INDICATOR
0x00bc: 0x00ac, # VULGAR FRACTION ONE QUARTER
0x00bd: 0x00ab, # VULGAR FRACTION ONE HALF
0x00bf: 0x00a8, # INVERTED QUESTION MARK
0x00c4: 0x008e, # LATIN CAPITAL LETTER A WITH DIAERESIS
0x00c5: 0x008f, # LATIN CAPITAL LETTER A WITH RING ABOVE
0x00c6: 0x0092, # LATIN CAPITAL LIGATURE AE
0x00c7: 0x0080, # LATIN CAPITAL LETTER C WITH CEDILLA
0x00c9: 0x0090, # LATIN CAPITAL LETTER E WITH ACUTE
0x00d1: 0x00a5, # LATIN CAPITAL LETTER N WITH TILDE
0x00d6: 0x0099, # LATIN CAPITAL LETTER O WITH DIAERESIS
0x00d8: 0x009d, # LATIN CAPITAL LETTER O WITH STROKE
0x00dc: 0x009a, # LATIN CAPITAL LETTER U WITH DIAERESIS
0x00df: 0x00e1, # LATIN SMALL LETTER SHARP S
0x00e0: 0x0085, # LATIN SMALL LETTER A WITH GRAVE
0x00e1: 0x00a0, # LATIN SMALL LETTER A WITH ACUTE
0x00e2: 0x0083, # LATIN SMALL LETTER A WITH CIRCUMFLEX
0x00e4: 0x0084, # LATIN SMALL LETTER A WITH DIAERESIS
0x00e5: 0x0086, # LATIN SMALL LETTER A WITH RING ABOVE
0x00e6: 0x0091, # LATIN SMALL LIGATURE AE
0x00e7: 0x0087, # LATIN SMALL LETTER C WITH CEDILLA
0x00e8: 0x008a, # LATIN SMALL LETTER E WITH GRAVE
0x00e9: 0x0082, # LATIN SMALL LETTER E WITH ACUTE
0x00ea: 0x0088, # LATIN SMALL LETTER E WITH CIRCUMFLEX
0x00eb: 0x0089, # LATIN SMALL LETTER E WITH DIAERESIS
0x00ec: 0x008d, # LATIN SMALL LETTER I WITH GRAVE
0x00ed: 0x00a1, # LATIN SMALL LETTER I WITH ACUTE
0x00ee: 0x008c, # LATIN SMALL LETTER I WITH CIRCUMFLEX
0x00ef: 0x008b, # LATIN SMALL LETTER I WITH DIAERESIS
0x00f1: 0x00a4, # LATIN SMALL LETTER N WITH TILDE
0x00f2: 0x0095, # LATIN SMALL LETTER O WITH GRAVE
0x00f3: 0x00a2, # LATIN SMALL LETTER O WITH ACUTE
0x00f4: 0x0093, # LATIN SMALL LETTER O WITH CIRCUMFLEX
0x00f6: 0x0094, # LATIN SMALL LETTER O WITH DIAERESIS
0x00f7: 0x00f6, # DIVISION SIGN
0x00f8: 0x009b, # LATIN SMALL LETTER O WITH STROKE
0x00f9: 0x0097, # LATIN SMALL LETTER U WITH GRAVE
0x00fa: 0x00a3, # LATIN SMALL LETTER U WITH ACUTE
0x00fb: 0x0096, # LATIN SMALL LETTER U WITH CIRCUMFLEX
0x00fc: 0x0081, # LATIN SMALL LETTER U WITH DIAERESIS
0x00ff: 0x0098, # LATIN SMALL LETTER Y WITH DIAERESIS
0x0192: 0x009f, # LATIN SMALL LETTER F WITH HOOK
0x0393: 0x00e2, # GREEK CAPITAL LETTER GAMMA
0x0398: 0x00e9, # GREEK CAPITAL LETTER THETA
0x03a3: 0x00e4, # GREEK CAPITAL LETTER SIGMA
0x03a6: 0x00e8, # GREEK CAPITAL LETTER PHI
0x03a9: 0x00ea, # GREEK CAPITAL LETTER OMEGA
0x03b1: 0x00e0, # GREEK SMALL LETTER ALPHA
0x03b4: 0x00eb, # GREEK SMALL LETTER DELTA
0x03b5: 0x00ee, # GREEK SMALL LETTER EPSILON
0x03c0: 0x00e3, # GREEK SMALL LETTER PI
0x03c3: 0x00e5, # GREEK SMALL LETTER SIGMA
0x03c4: 0x00e7, # GREEK SMALL LETTER TAU
0x03c6: 0x00ed, # GREEK SMALL LETTER PHI
0x207f: 0x00fc, # SUPERSCRIPT LATIN SMALL LETTER N
0x20a7: 0x009e, # PESETA SIGN
0x2219: 0x00f9, # BULLET OPERATOR
0x221a: 0x00fb, # SQUARE ROOT
0x221e: 0x00ec, # INFINITY
0x2229: 0x00ef, # INTERSECTION
0x2248: 0x00f7, # ALMOST EQUAL TO
0x2261: 0x00f0, # IDENTICAL TO
0x2264: 0x00f3, # LESS-THAN OR EQUAL TO
0x2265: 0x00f2, # GREATER-THAN OR EQUAL TO
0x2310: 0x00a9, # REVERSED NOT SIGN
0x2320: 0x00f4, # TOP HALF INTEGRAL
0x2321: 0x00f5, # BOTTOM HALF INTEGRAL
0x2500: 0x00c4, # BOX DRAWINGS LIGHT HORIZONTAL
0x2502: 0x00b3, # BOX DRAWINGS LIGHT VERTICAL
0x250c: 0x00da, # BOX DRAWINGS LIGHT DOWN AND RIGHT
0x2510: 0x00bf, # BOX DRAWINGS LIGHT DOWN AND LEFT
0x2514: 0x00c0, # BOX DRAWINGS LIGHT UP AND RIGHT
0x2518: 0x00d9, # BOX DRAWINGS LIGHT UP AND LEFT
0x251c: 0x00c3, # BOX DRAWINGS LIGHT VERTICAL AND RIGHT
0x2524: 0x00b4, # BOX DRAWINGS LIGHT VERTICAL AND LEFT
0x252c: 0x00c2, # BOX DRAWINGS LIGHT DOWN AND HORIZONTAL
0x2534: 0x00c1, # BOX DRAWINGS LIGHT UP AND HORIZONTAL
0x253c: 0x00c5, # BOX DRAWINGS LIGHT VERTICAL AND HORIZONTAL
0x2550: 0x00cd, # BOX DRAWINGS DOUBLE HORIZONTAL
0x2551: 0x00ba, # BOX DRAWINGS DOUBLE VERTICAL
0x2552: 0x00d5, # BOX DRAWINGS DOWN SINGLE AND RIGHT DOUBLE
0x2553: 0x00d6, # BOX DRAWINGS DOWN DOUBLE AND RIGHT SINGLE
0x2554: 0x00c9, # BOX DRAWINGS DOUBLE DOWN AND RIGHT
0x2555: 0x00b8, # BOX DRAWINGS DOWN SINGLE AND LEFT DOUBLE
0x2556: 0x00b7, # BOX DRAWINGS DOWN DOUBLE AND LEFT SINGLE
0x2557: 0x00bb, # BOX DRAWINGS DOUBLE DOWN AND LEFT
0x2558: 0x00d4, # BOX DRAWINGS UP SINGLE AND RIGHT DOUBLE
0x2559: 0x00d3, # BOX DRAWINGS UP DOUBLE AND RIGHT SINGLE
0x255a: 0x00c8, # BOX DRAWINGS DOUBLE UP AND RIGHT
0x255b: 0x00be, # BOX DRAWINGS UP SINGLE AND LEFT DOUBLE
0x255c: 0x00bd, # BOX DRAWINGS UP DOUBLE AND LEFT SINGLE
0x255d: 0x00bc, # BOX DRAWINGS DOUBLE UP AND LEFT
0x255e: 0x00c6, # BOX DRAWINGS VERTICAL SINGLE AND RIGHT DOUBLE
0x255f: 0x00c7, # BOX DRAWINGS VERTICAL DOUBLE AND RIGHT SINGLE
0x2560: 0x00cc, # BOX DRAWINGS DOUBLE VERTICAL AND RIGHT
0x2561: 0x00b5, # BOX DRAWINGS VERTICAL SINGLE AND LEFT DOUBLE
0x2562: 0x00b6, # BOX DRAWINGS VERTICAL DOUBLE AND LEFT SINGLE
0x2563: 0x00b9, # BOX DRAWINGS DOUBLE VERTICAL AND LEFT
0x2564: 0x00d1, # BOX DRAWINGS DOWN SINGLE AND HORIZONTAL DOUBLE
0x2565: 0x00d2, # BOX DRAWINGS DOWN DOUBLE AND HORIZONTAL SINGLE
0x2566: 0x00cb, # BOX DRAWINGS DOUBLE DOWN AND HORIZONTAL
0x2567: 0x00cf, # BOX DRAWINGS UP SINGLE AND HORIZONTAL DOUBLE
0x2568: 0x00d0, # BOX DRAWINGS UP DOUBLE AND HORIZONTAL SINGLE
0x2569: 0x00ca, # BOX DRAWINGS DOUBLE UP AND HORIZONTAL
0x256a: 0x00d8, # BOX DRAWINGS VERTICAL SINGLE AND HORIZONTAL DOUBLE
0x256b: 0x00d7, # BOX DRAWINGS VERTICAL DOUBLE AND HORIZONTAL SINGLE
0x256c: 0x00ce, # BOX DRAWINGS DOUBLE VERTICAL AND HORIZONTAL
0x2580: 0x00df, # UPPER HALF BLOCK
0x2584: 0x00dc, # LOWER HALF BLOCK
0x2588: 0x00db, # FULL BLOCK
0x258c: 0x00dd, # LEFT HALF BLOCK
0x2590: 0x00de, # RIGHT HALF BLOCK
0x2591: 0x00b0, # LIGHT SHADE
0x2592: 0x00b1, # MEDIUM SHADE
0x2593: 0x00b2, # DARK SHADE
0x25a0: 0x00fe, # BLACK SQUARE
}
| bsd-3-clause |
alfredodeza/boto | tests/integration/redshift/test_cert_verification.py | 126 | 1447 | # Copyright (c) 2013 Amazon.com, Inc. or its affiliates. All Rights Reserved
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
#
import unittest
from tests.integration import ServiceCertVerificationTest
import boto.redshift
class RedshiftCertVerificationTest(unittest.TestCase, ServiceCertVerificationTest):
redshift = True
regions = boto.redshift.regions()
def sample_service_call(self, conn):
conn.describe_cluster_versions()
| mit |
amenonsen/ansible | lib/ansible/modules/system/open_iscsi.py | 22 | 11413 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2013, Serge van Ginderachter <serge@vanginderachter.be>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = r'''
---
module: open_iscsi
author:
- Serge van Ginderachter (@srvg)
version_added: "1.4"
short_description: Manage iSCSI targets with Open-iSCSI
description:
- Discover targets on given portal, (dis)connect targets, mark targets to
manually or auto start, return device nodes of connected targets.
requirements:
- open_iscsi library and tools (iscsiadm)
options:
portal:
description:
- The IP address of the iSCSI target.
type: str
aliases: [ ip ]
port:
description:
- The port on which the iSCSI target process listens.
type: str
default: 3260
target:
description:
- The iSCSI target name.
type: str
aliases: [ name, targetname ]
login:
description:
- Whether the target node should be connected.
type: bool
aliases: [ state ]
node_auth:
description:
- The value for C(discovery.sendtargets.auth.authmethod).
type: str
default: CHAP
node_user:
description:
- The value for C(discovery.sendtargets.auth.username).
type: str
node_pass:
description:
- The value for C(discovery.sendtargets.auth.password).
type: str
auto_node_startup:
description:
- Whether the target node should be automatically connected at startup.
type: bool
aliases: [ automatic ]
discover:
description:
- Whether the list of target nodes on the portal should be
(re)discovered and added to the persistent iSCSI database.
- Keep in mind that C(iscsiadm) discovery resets configuration, like C(node.startup)
to manual, hence combined with C(auto_node_startup=yes) will always return
a changed state.
type: bool
show_nodes:
description:
- Whether the list of nodes in the persistent iSCSI database should be returned by the module.
type: bool
'''
EXAMPLES = r'''
- name: Perform a discovery on 10.1.2.3 and show available target nodes
open_iscsi:
show_nodes: yes
discover: yes
portal: 10.1.2.3
# NOTE: Only works if exactly one target is exported to the initiator
- name: Discover targets on portal and login to the one available
open_iscsi:
portal: '{{ iscsi_target }}'
login: yes
discover: yes
- name: Connect to the named target, after updating the local persistent database (cache)
open_iscsi:
login: yes
target: iqn.1986-03.com.sun:02:f8c1f9e0-c3ec-ec84-c9c9-8bfb0cd5de3d
- name: Discconnect from the cached named target
open_iscsi:
login: no
target: iqn.1986-03.com.sun:02:f8c1f9e0-c3ec-ec84-c9c9-8bfb0cd5de3d
'''
import glob
import os
import time
from ansible.module_utils.basic import AnsibleModule
ISCSIADM = 'iscsiadm'
def compare_nodelists(l1, l2):
l1.sort()
l2.sort()
return l1 == l2
def iscsi_get_cached_nodes(module, portal=None):
cmd = '%s --mode node' % iscsiadm_cmd
(rc, out, err) = module.run_command(cmd)
if rc == 0:
lines = out.splitlines()
nodes = []
for line in lines:
# line format is "ip:port,target_portal_group_tag targetname"
parts = line.split()
if len(parts) > 2:
module.fail_json(msg='error parsing output', cmd=cmd)
target = parts[1]
parts = parts[0].split(':')
target_portal = parts[0]
if portal is None or portal == target_portal:
nodes.append(target)
# older versions of scsiadm don't have nice return codes
# for newer versions see iscsiadm(8); also usr/iscsiadm.c for details
# err can contain [N|n]o records...
elif rc == 21 or (rc == 255 and "o records found" in err):
nodes = []
else:
module.fail_json(cmd=cmd, rc=rc, msg=err)
return nodes
def iscsi_discover(module, portal, port):
cmd = '%s --mode discovery --type sendtargets --portal %s:%s' % (iscsiadm_cmd, portal, port)
(rc, out, err) = module.run_command(cmd)
if rc > 0:
module.fail_json(cmd=cmd, rc=rc, msg=err)
def target_loggedon(module, target):
cmd = '%s --mode session' % iscsiadm_cmd
(rc, out, err) = module.run_command(cmd)
if rc == 0:
return target in out
elif rc == 21:
return False
else:
module.fail_json(cmd=cmd, rc=rc, msg=err)
def target_login(module, target, portal=None, port=None):
node_auth = module.params['node_auth']
node_user = module.params['node_user']
node_pass = module.params['node_pass']
if node_user:
params = [('node.session.auth.authmethod', node_auth),
('node.session.auth.username', node_user),
('node.session.auth.password', node_pass)]
for (name, value) in params:
cmd = '%s --mode node --targetname %s --op=update --name %s --value %s' % (iscsiadm_cmd, target, name, value)
(rc, out, err) = module.run_command(cmd)
if rc > 0:
module.fail_json(cmd=cmd, rc=rc, msg=err)
cmd = '%s --mode node --targetname %s --login' % (iscsiadm_cmd, target)
if portal is not None and port is not None:
cmd += ' --portal %s:%s' % (portal, port)
(rc, out, err) = module.run_command(cmd)
if rc > 0:
module.fail_json(cmd=cmd, rc=rc, msg=err)
def target_logout(module, target):
cmd = '%s --mode node --targetname %s --logout' % (iscsiadm_cmd, target)
(rc, out, err) = module.run_command(cmd)
if rc > 0:
module.fail_json(cmd=cmd, rc=rc, msg=err)
def target_device_node(module, target):
# if anyone know a better way to find out which devicenodes get created for
# a given target...
devices = glob.glob('/dev/disk/by-path/*%s*' % target)
devdisks = []
for dev in devices:
# exclude partitions
if "-part" not in dev:
devdisk = os.path.realpath(dev)
# only add once (multi-path?)
if devdisk not in devdisks:
devdisks.append(devdisk)
return devdisks
def target_isauto(module, target):
cmd = '%s --mode node --targetname %s' % (iscsiadm_cmd, target)
(rc, out, err) = module.run_command(cmd)
if rc == 0:
lines = out.splitlines()
for line in lines:
if 'node.startup' in line:
return 'automatic' in line
return False
else:
module.fail_json(cmd=cmd, rc=rc, msg=err)
def target_setauto(module, target):
cmd = '%s --mode node --targetname %s --op=update --name node.startup --value automatic' % (iscsiadm_cmd, target)
(rc, out, err) = module.run_command(cmd)
if rc > 0:
module.fail_json(cmd=cmd, rc=rc, msg=err)
def target_setmanual(module, target):
cmd = '%s --mode node --targetname %s --op=update --name node.startup --value manual' % (iscsiadm_cmd, target)
(rc, out, err) = module.run_command(cmd)
if rc > 0:
module.fail_json(cmd=cmd, rc=rc, msg=err)
def main():
# load ansible module object
module = AnsibleModule(
argument_spec=dict(
# target
portal=dict(type='str', aliases=['ip']),
port=dict(type='str', default=3260),
target=dict(type='str', aliases=['name', 'targetname']),
node_auth=dict(type='str', default='CHAP'),
node_user=dict(type='str'),
node_pass=dict(type='str', no_log=True),
# actions
login=dict(type='bool', aliases=['state']),
auto_node_startup=dict(type='bool', aliases=['automatic']),
discover=dict(type='bool', default=False),
show_nodes=dict(type='bool', default=False),
),
required_together=[['discover_user', 'discover_pass'],
['node_user', 'node_pass']],
supports_check_mode=True,
)
global iscsiadm_cmd
iscsiadm_cmd = module.get_bin_path('iscsiadm', required=True)
# parameters
portal = module.params['portal']
target = module.params['target']
port = module.params['port']
login = module.params['login']
automatic = module.params['auto_node_startup']
discover = module.params['discover']
show_nodes = module.params['show_nodes']
check = module.check_mode
cached = iscsi_get_cached_nodes(module, portal)
# return json dict
result = {}
result['changed'] = False
if discover:
if portal is None:
module.fail_json(msg="Need to specify at least the portal (ip) to discover")
elif check:
nodes = cached
else:
iscsi_discover(module, portal, port)
nodes = iscsi_get_cached_nodes(module, portal)
if not compare_nodelists(cached, nodes):
result['changed'] |= True
result['cache_updated'] = True
else:
nodes = cached
if login is not None or automatic is not None:
if target is None:
if len(nodes) > 1:
module.fail_json(msg="Need to specify a target")
else:
target = nodes[0]
else:
# check given target is in cache
check_target = False
for node in nodes:
if node == target:
check_target = True
break
if not check_target:
module.fail_json(msg="Specified target not found")
if show_nodes:
result['nodes'] = nodes
if login is not None:
loggedon = target_loggedon(module, target)
if (login and loggedon) or (not login and not loggedon):
result['changed'] |= False
if login:
result['devicenodes'] = target_device_node(module, target)
elif not check:
if login:
target_login(module, target, portal, port)
# give udev some time
time.sleep(1)
result['devicenodes'] = target_device_node(module, target)
else:
target_logout(module, target)
result['changed'] |= True
result['connection_changed'] = True
else:
result['changed'] |= True
result['connection_changed'] = True
if automatic is not None:
isauto = target_isauto(module, target)
if (automatic and isauto) or (not automatic and not isauto):
result['changed'] |= False
result['automatic_changed'] = False
elif not check:
if automatic:
target_setauto(module, target)
else:
target_setmanual(module, target)
result['changed'] |= True
result['automatic_changed'] = True
else:
result['changed'] |= True
result['automatic_changed'] = True
module.exit_json(**result)
if __name__ == '__main__':
main()
| gpl-3.0 |
lancezlin/pyjs | examples/gcharttestapp/GChartExample20.py | 6 | 17303 |
from pyjamas import DOM
from pyjamas import Window
from pyjamas.ui import Event
from pyjamas.ui.Button import Button
from pyjamas.ui.DialogBox import DialogBox
from pyjamas.ui.DockPanel import DockPanel
from pyjamas.ui.FlexTable import FlexTable
from pyjamas.ui import HasHorizontalAlignment
from pyjamas.ui.HorizontalPanel import HorizontalPanel
from pyjamas.ui.ListBox import ListBox
from pyjamas.ui.VerticalPanel import VerticalPanel
from pyjamas.chart.GChart import GChart
from pyjamas.chart import AnnotationLocation
from pyjamas.chart import SymbolType
from pyjamas.chart import TouchedPointUpdateOption
LABEL_COL = 0; # for the label/object pairs
OBJECT_COL = 1; # associated with the property
N_COLS = 2; # editing form
N_SLICES = 5
"""*
* This example displays a pie chart that, when you click on any slice,
* opens a dialog that lets you modify the color, shading pattern, and
* size of of that slice. That dialog also contains "Prev Slice" and "Next
* Slice" buttons that, by invoking GChart's <tt>touch</tt> method,
* programmatically emulate the user sequentially "touching" (selecting)
* slices with their mouse.
*
* <p>
*
* The slice properties dialog in this example is an ordinary GWT modal
* dialog (a <tt>DialogBox</tt> with modal=True, autoHide=True). It self.gets
* GChart to inform it of click events on the chart by implementing the
* standard GWT <tt>ClickHandler</tt> interface, and then passing
* itself to GChart's <tt>addClickHandler</tt> method. The dialog's
* <tt>onClick</tt> method shows itself (via <tt>DialogBox.show</tt>)
* and then uses GChart's <tt>getTouchedPoint</tt> method to self.get a
* reference to the clicked-on slice that it uses to load that slice's
* properties into the form. As the user makes changes via the form,
* they are copied back into the chart and the chart's <tt>update</tt>
* method is invoked to immediately show the changes on the chart.
* <p>
*
* GChart's "currently touched point" (available via
* <tt>getTouchedPoint</tt>) ordinarily moves in lock-step with current
* mouse location, and thus falls short of a True point selection
* capability. This example works around this limitation by exploiting
* the fact that a GWT modal dialog "eats" all mouse events while it is
* open. So, when the modal dialog is opened, the mouse location seen by
* GChart, and hence the "currently touched" point is frozen. This lets
* us use GChart's currently touched point as if it were the "selected"
* point in this example.
*
"""
"""
* A helper class to facilitate property editing via drop-down
* lists in this example (there's nothing GChart-specific here):
"""
class ObjectSelectorDropdownList(ListBox):
def __init__(self, labelObjectPairs):
ListBox.__init__(self)
self.labelObjectPairs = labelObjectPairs
self.setVisibleItemCount(1); # makes it a drop-down list
# add each label as an item on the drop-down list
for i in range(len(labelObjectPairs)):
self.addItem( labelObjectPairs[i][LABEL_COL])
# returns object at given index
def getObject(self, index):
result = self.labelObjectPairs[index][OBJECT_COL]
return result
# returns the currently selected object in the drop-down list
def getSelectedObject(self):
result = self.getObject(self.getSelectedIndex())
return result
# makes given object the selected one (assumes it's on list--once)
def setSelectedObject(self, selectedObject):
for i in range(len(self.labelObjectPairs)):
if selectedObject == self.labelObjectPairs[i][OBJECT_COL]:
self.setSelectedIndex(i)
return
raise IllegalArgumentException(
"selectedObject specified was not found on the labelObjectPairs list.")
# number of label, object pairs
def getNObjects(self):
return len(self.labelObjectPairs)
# class ObjectSelectorDropdownList
#
# holds color information associated with color selection drop-down
class ColorSpec:
def __init__(self, backgroundColor, borderColor):
self.backgroundColor = backgroundColor
self.borderColor =borderColor
# the modal dialog that pops up when they click on a slice to edit it
class SliceEditor(DialogBox):
def __init__(self, chart):
""" DialogBox CSS Style self.settings used with this example for reference:
Note: These simplified CSS styles make the dialog's title bar behave a
little quirkily in IE6 when dragging. For more sophisticated CSS that
fixes this problem (and also provides a more professional look) see the
CSS tab of the DialogBox example in the GWT <a href="xxx"> Showcase of
Features</a> (I just didn't want to copy 5 pages of obscure DialogBox
CSS into what is after all a Client-side GChart example).
.gwt-DialogBox .Caption {
font-size: 18
color: #eef
background: #00f repeat-x 0px -2003px
padding: 4px 4px 4px 8px
cursor: default
border-bottom: 2px solid #008
border-top: 3px solid #448
.gwt-DialogBox .dialogContent {
border: 1px solid #008
background: #ddd
padding: 3px
"""
DialogBox.__init__(self, autoHide=True, modal=True)
self.chart = chart
self.isFirstTime = True
mainPanel = VerticalPanel()
propertyForm = FlexTable()
commandBar = DockPanel()
sliceSwitcher = HorizontalPanel()
self.prevSlice = Button("<Prev Slice", self)
self.nextSlice = Button("Next Slice>", self)
self.closeButton = Button("Close", self)
self.chart.colorSelector.addChangeListener(self)
self.chart.sliceSizeSelector.addChangeListener(self)
self.chart.shadingSelector.addChangeListener(self)
#self.prevSlice.addClickListener(self)
#self.nextSlice.addClickListener(self)
#self.closeButton.addClickListener(self)
# slice properties table (slice color, shading and size)
propertyForm.setSize(3, 2)
propertyForm.setText( 0, 0, "Color:")
propertyForm.setWidget(0, 1, self.chart.colorSelector)
propertyForm.setText( 1, 0, "Shading Pattern:")
propertyForm.setWidget(1, 1, self.chart.shadingSelector)
propertyForm.setText( 2, 0, "Slice Size:")
propertyForm.setWidget(2, 1, self.chart.sliceSizeSelector)
# add additional properties here, if desired
# buttons for changing the selected slice from the form
sliceSwitcher.add(self.prevSlice)
sliceSwitcher.add(self.nextSlice)
commandBar.add(sliceSwitcher, DockPanel.WEST)
commandBar.add(self.closeButton, DockPanel.EAST)
commandBar.setCellHorizontalAlignment(self.closeButton,
HasHorizontalAlignment.ALIGN_RIGHT)
commandBar.setWidth("100%"); # pushes close button to right edge
# create main form and place it in DialogBox
mainPanel.add(propertyForm)
mainPanel.add(commandBar)
self.setWidget(mainPanel); # add the DialogBox' single, defining, widget
def onChange(self, sender):
self.copyFormPropertiesIntoChart(self.chart.getTouchedPoint())
# Changes in slice size can place a different, or no, slice under
# GChart's "current mouse position". Such chart changes "underneath the
# mouse" would normally result in a change in the touched point; the
# TOUCHED_POINT_LOCKED update option keeps that from happening.
self.chart.update(TouchedPointUpdateOption.TOUCHED_POINT_LOCKED)
# loads properties associated with point/slice into form
def copyChartPropertiesIntoForm(self, p):
# dialog title bar caption:
self.setText("Slice %d Properties " % \
self.chart.getCurveIndex(p.getParent()))
self.chart.colorSelector.setSelectedObject(
self.getColorSpec( p.getParent().getSymbol().getBackgroundColor(),
p.getParent().getSymbol().getBorderColor()))
self.chart.shadingSelector.setSelectedObject(
p.getParent().getSymbol().getSymbolType())
sliceSize = round(100*p.getParent().getSymbol().getPieSliceSize())
self.chart.sliceSizeSelector.setSelectedObject( int(sliceSize) )
# saves current form self.settings into associated point/slice of chart
def copyFormPropertiesIntoChart(self, p):
p.getParent().getSymbol().setBackgroundColor(
self.chart.colorSelector.getSelectedObject().backgroundColor)
p.getParent().getSymbol().setBorderColor(
self.chart.colorSelector.getSelectedObject().borderColor)
# selection flips border and background colors
p.getParent().getSymbol().setHoverSelectionBorderColor(
self.chart.colorSelector.getSelectedObject().backgroundColor)
p.getParent().getSymbol().setHoverSelectionBackgroundColor(
self.chart.colorSelector.getSelectedObject().borderColor)
p.getParent().getSymbol().setSymbolType(
self.chart.shadingSelector.getSelectedObject())
sliceSize = int(self.chart.sliceSizeSelector.getSelectedObject())
p.getParent().getSymbol().setPieSliceSize(sliceSize/100.)
# Retrieves an existing ColorSpec object reference, given its colors
def getColorSpec(self, backgroundColor, borderColor):
for i in range(self.chart.colorSelector.getNObjects()):
cs = self.chart.colorSelector.getObject(i)
if (backgroundColor == cs.backgroundColor and
borderColor == cs.borderColor):
return cs
raise IllegalArgumentException(
"Attempt to retrieve a non-existing color combination.")
def onClick(self, sender):
print "onClick", sender
if sender == self.prevSlice:
self.chart.onClickPrevSlice(sender)
elif sender == self.nextSlice:
self.chart.onClickNextSlice(sender)
elif sender == self.closeButton:
self.onClickClose(sender)
else:
self.onClickDefault(sender)
def onClickClose(self, event):
self.hide()
self.chart.touch(None); # clears any selected slice
self.chart.update(TouchedPointUpdateOption.TOUCHED_POINT_LOCKED)
def onClickDefault(self, sender):
# don't shown property editor if they clicked on nothing
if None == self.chart.getTouchedPoint():
return
event = DOM.eventGetCurrentEvent()
# load properties of clicked-on slice into form
self.copyChartPropertiesIntoForm(self.chart.getTouchedPoint())
if self.isFirstTime:
# initially put upper left corner wherever they clicked...
self.setPopupPosition(
Window.getScrollLeft()+ DOM.eventGetClientX(event),
Window.getScrollTop() + DOM.eventGetClientX(event))
self.show()
self.isFirstTime = False
else:
# ...thereafter, just stay whereever they dragged it to
self.show()
class GChartExample20 (GChart):
# the single dialog box that self.gets used to edit any slice
def __init__(self):
GChart.__init__(self)
# labels/values for color selection drop-down list:
self.colorSelector = ObjectSelectorDropdownList( \
[["Red", ColorSpec("red", "#F88")],
["Fuchsia", ColorSpec("#F0F", "#F8F")],
["Lime", ColorSpec("#0F0", "#8F8")],
["Blue", ColorSpec("#00F", "#88F")],
["Aqua", ColorSpec("#0FF", "#8FF")],
["Maroon", ColorSpec("#800", "#C88")],
["Purple", ColorSpec("#808", "#C8C")],
["Green", ColorSpec("#080", "#8C8")],
["Olive", ColorSpec("#880", "#CC8")],
["Navy", ColorSpec("#008", "#88C")],
["Teal", ColorSpec("#088", "#8CC")]])
# labels/values for slice shading pattern drop-down list
self.shadingSelector = ObjectSelectorDropdownList( \
[["Vertical shading", SymbolType.PIE_SLICE_VERTICAL_SHADING],
["Horizontal shading", SymbolType.PIE_SLICE_HORIZONTAL_SHADING],
["Optimal shading", SymbolType.PIE_SLICE_OPTIMAL_SHADING]])
# labels/values for pie slice size (as percentage) drop-down list
self.sliceSizeSelector = ObjectSelectorDropdownList([
["0%", int(0)],
["5%", int(5)],
["10%", int(10)],
["15%", int(15)],
["20%", int(20)],
["25%", int(25)],
["30%", int(30)],
["35%", int(35)],
["40%", int(40)],
["45%", int(45)],
["50%", int(50)],
["55%", int(55)],
["60%", int(60)],
["65%", int(65)],
["70%", int(70)],
["75%", int(75)],
["80%", int(80)],
["85%", int(85)],
["90%", int(90)],
["95%", int(95)],
["100%", int(100)]])
self.theSliceEditor = SliceEditor(self)
SOURCE_CODE_LINK = \
"<a href='GChartExample20.txt' target='_blank'>Source code</a>"
self.setChartSize(100, 100)
self.setBorderStyle("none")
self.setChartTitle("<big>Click pie to edit!</big>")
self.setChartTitleThickness(20)
self.setChartFootnotes(SOURCE_CODE_LINK)
self.setChartFootnotesThickness(20)
# initial slice sizes
initSliceSize = [0.3, 0.2, 0.1, 0.2, 0.2]
self.addClickListener(self.theSliceEditor)
for iCurve in range(N_SLICES):
self.addCurve()
self.getCurve().getSymbol().setBorderWidth(1)
self.getCurve().getSymbol().setFillThickness(4)
self.getCurve().getSymbol().setFillSpacing(4)
self.getCurve().getSymbol().setHoverLocation(
AnnotationLocation.ON_PIE_ARC)
self.getCurve().getSymbol().setBorderColor(
self.colorSelector.getObject(iCurve).borderColor)
self.getCurve().getSymbol().setBackgroundColor(
self.colorSelector.getObject(iCurve).backgroundColor)
# selection flips border and background colors
self.getCurve().getSymbol().setHoverSelectionBackgroundColor(
self.colorSelector.getObject(iCurve).borderColor)
self.getCurve().getSymbol().setHoverSelectionBorderColor(
self.colorSelector.getObject(iCurve).backgroundColor)
self.getCurve().getSymbol().setSymbolType(
SymbolType.PIE_SLICE_HORIZONTAL_SHADING)
self.getCurve().getSymbol().setPieSliceSize(initSliceSize[iCurve])
self.getCurve().getSymbol().setModelHeight(1.0); #diameter = yMax-yMin
self.getCurve().getSymbol().setModelWidth(0)
self.getCurve().addPoint(0.5, 0.5); # pie centered in world units
self.getXAxis().setAxisMin(0); # so 0.5,0.5 (see above) centers pie
self.getXAxis().setAxisMax(1)
self.getYAxis().setAxisMin(0)
self.getYAxis().setAxisMax(1)
self.getXAxis().setHasGridlines(False); # hides axes, ticks, etc.
self.getXAxis().setAxisVisible(False); # (not needed for the pie)
self.getXAxis().setTickCount(0)
self.getYAxis().setHasGridlines(False)
self.getYAxis().setAxisVisible(False)
self.getYAxis().setTickCount(0)
self.update()
def onClickPrevSlice(self, event):
iCurve = self.getCurveIndex(self.getTouchedCurve())
if (iCurve == 0) :
iPrev = self.getNCurves()-1
else:
iPrev = (iCurve-1)
self.touch(self.getCurve(iPrev).getPoint(0))
self.theSliceEditor.copyChartPropertiesIntoForm(self.getTouchedPoint())
self.update(TouchedPointUpdateOption.TOUCHED_POINT_LOCKED)
def onClickNextSlice(self, event):
iCurve = self.getCurveIndex(self.getTouchedCurve())
if (iCurve+1 == self.getNCurves()) :
iNext = 0
else:
iNext = (iCurve+1)
self.touch(self.getCurve(iNext).getPoint(0))
self.theSliceEditor.copyChartPropertiesIntoForm(self.getTouchedPoint())
self.update(TouchedPointUpdateOption.TOUCHED_POINT_LOCKED)
| apache-2.0 |
yvaucher/oerpscenario | features/steps/module_config.py | 7 | 2250 | from support.tools import puts, set_trace, model, assert_true, assert_less
import pprint
if False:
def given(str):
return
@given(u'/I install the following languages?/')
def impl(ctx):
ctx.data['lang'] = cfglang = set()
for (lang,) in ctx.table:
if model('res.lang').search([('code', '=', lang)]):
continue
res = model('base.language.install').create({'lang': lang})
model('base.language.install').lang_install([res.id])
cfglang.add(lang)
@then('these languages should be available')
def impl(ctx):
for lang in ctx.data['lang']:
assert_true(model('res.lang').search([('code', '=', lang)]))
@then('the language should be available')
def impl(ctx):
pass
@when('I update the following languages')
def impl(ctx):
tlangs = model('res.lang').browse([('translatable', '=', True)])
codes = set([lang for (lang,) in ctx.table])
mods = model('ir.module.module').browse(['state = installed'])
assert_true(codes)
assert_less(codes, set(tlangs.code))
mods.button_update_translations()
@given('I update the module list')
def impl(ctx):
model('ir.module.module').update_list()
@given('I install the required modules with dependencies')
def impl(ctx):
client = ctx.client
installed_mods = client.modules(installed=True)['installed']
to_install = []
to_upgrade = []
for row in ctx.table:
mod_name = row['name']
if mod_name in installed_mods:
to_upgrade.append(mod_name)
else:
to_install.append(mod_name)
client.upgrade(*to_upgrade)
client.install(*to_install)
ctx.data.setdefault('modules', set()).update(to_upgrade + to_install)
@given('I uninstall the following modules')
def impl(ctx):
client = ctx.client
installed_mods = client.modules(installed=True)['installed']
to_uninstall = []
for row in ctx.table:
mod_name = row['name']
if mod_name in installed_mods:
to_uninstall.append(mod_name)
client.uninstall(*to_uninstall)
@then('my modules should have been installed and models reloaded')
def impl(ctx):
pass # XXX
@then('execute the setup')
def impl(ctx):
assert ctx.found_item
ctx.found_item.execute()
| gpl-2.0 |
maxvogel/NetworKit-mirror2 | Doc/docs/python/source/ext/numpydoc/compiler_unparse.py | 67 | 24740 | """ Turn compiler.ast structures back into executable python code.
The unparse method takes a compiler.ast tree and transforms it back into
valid python code. It is incomplete and currently only works for
import statements, function calls, function definitions, assignments, and
basic expressions.
Inspired by python-2.5-svn/Demo/parser/unparse.py
fixme: We may want to move to using _ast trees because the compiler for
them is about 6 times faster than compiler.compile.
"""
from __future__ import division, absolute_import, print_function
import sys
from compiler.ast import Const, Name, Tuple, Div, Mul, Sub, Add
if sys.version_info[0] >= 3:
from io import StringIO
else:
from StringIO import StringIO
def unparse(ast, single_line_functions=False):
s = StringIO()
UnparseCompilerAst(ast, s, single_line_functions)
return s.getvalue().lstrip()
op_precedence = { 'compiler.ast.Power':3, 'compiler.ast.Mul':2, 'compiler.ast.Div':2,
'compiler.ast.Add':1, 'compiler.ast.Sub':1 }
class UnparseCompilerAst:
""" Methods in this class recursively traverse an AST and
output source code for the abstract syntax; original formatting
is disregarged.
"""
#########################################################################
# object interface.
#########################################################################
def __init__(self, tree, file = sys.stdout, single_line_functions=False):
""" Unparser(tree, file=sys.stdout) -> None.
Print the source for tree to file.
"""
self.f = file
self._single_func = single_line_functions
self._do_indent = True
self._indent = 0
self._dispatch(tree)
self._write("\n")
self.f.flush()
#########################################################################
# Unparser private interface.
#########################################################################
### format, output, and dispatch methods ################################
def _fill(self, text = ""):
"Indent a piece of text, according to the current indentation level"
if self._do_indent:
self._write("\n"+" "*self._indent + text)
else:
self._write(text)
def _write(self, text):
"Append a piece of text to the current line."
self.f.write(text)
def _enter(self):
"Print ':', and increase the indentation."
self._write(": ")
self._indent += 1
def _leave(self):
"Decrease the indentation level."
self._indent -= 1
def _dispatch(self, tree):
"_dispatcher function, _dispatching tree type T to method _T."
if isinstance(tree, list):
for t in tree:
self._dispatch(t)
return
meth = getattr(self, "_"+tree.__class__.__name__)
if tree.__class__.__name__ == 'NoneType' and not self._do_indent:
return
meth(tree)
#########################################################################
# compiler.ast unparsing methods.
#
# There should be one method per concrete grammar type. They are
# organized in alphabetical order.
#########################################################################
def _Add(self, t):
self.__binary_op(t, '+')
def _And(self, t):
self._write(" (")
for i, node in enumerate(t.nodes):
self._dispatch(node)
if i != len(t.nodes)-1:
self._write(") and (")
self._write(")")
def _AssAttr(self, t):
""" Handle assigning an attribute of an object
"""
self._dispatch(t.expr)
self._write('.'+t.attrname)
def _Assign(self, t):
""" Expression Assignment such as "a = 1".
This only handles assignment in expressions. Keyword assignment
is handled separately.
"""
self._fill()
for target in t.nodes:
self._dispatch(target)
self._write(" = ")
self._dispatch(t.expr)
if not self._do_indent:
self._write('; ')
def _AssName(self, t):
""" Name on left hand side of expression.
Treat just like a name on the right side of an expression.
"""
self._Name(t)
def _AssTuple(self, t):
""" Tuple on left hand side of an expression.
"""
# _write each elements, separated by a comma.
for element in t.nodes[:-1]:
self._dispatch(element)
self._write(", ")
# Handle the last one without writing comma
last_element = t.nodes[-1]
self._dispatch(last_element)
def _AugAssign(self, t):
""" +=,-=,*=,/=,**=, etc. operations
"""
self._fill()
self._dispatch(t.node)
self._write(' '+t.op+' ')
self._dispatch(t.expr)
if not self._do_indent:
self._write(';')
def _Bitand(self, t):
""" Bit and operation.
"""
for i, node in enumerate(t.nodes):
self._write("(")
self._dispatch(node)
self._write(")")
if i != len(t.nodes)-1:
self._write(" & ")
def _Bitor(self, t):
""" Bit or operation
"""
for i, node in enumerate(t.nodes):
self._write("(")
self._dispatch(node)
self._write(")")
if i != len(t.nodes)-1:
self._write(" | ")
def _CallFunc(self, t):
""" Function call.
"""
self._dispatch(t.node)
self._write("(")
comma = False
for e in t.args:
if comma: self._write(", ")
else: comma = True
self._dispatch(e)
if t.star_args:
if comma: self._write(", ")
else: comma = True
self._write("*")
self._dispatch(t.star_args)
if t.dstar_args:
if comma: self._write(", ")
else: comma = True
self._write("**")
self._dispatch(t.dstar_args)
self._write(")")
def _Compare(self, t):
self._dispatch(t.expr)
for op, expr in t.ops:
self._write(" " + op + " ")
self._dispatch(expr)
def _Const(self, t):
""" A constant value such as an integer value, 3, or a string, "hello".
"""
self._dispatch(t.value)
def _Decorators(self, t):
""" Handle function decorators (eg. @has_units)
"""
for node in t.nodes:
self._dispatch(node)
def _Dict(self, t):
self._write("{")
for i, (k, v) in enumerate(t.items):
self._dispatch(k)
self._write(": ")
self._dispatch(v)
if i < len(t.items)-1:
self._write(", ")
self._write("}")
def _Discard(self, t):
""" Node for when return value is ignored such as in "foo(a)".
"""
self._fill()
self._dispatch(t.expr)
def _Div(self, t):
self.__binary_op(t, '/')
def _Ellipsis(self, t):
self._write("...")
def _From(self, t):
""" Handle "from xyz import foo, bar as baz".
"""
# fixme: Are From and ImportFrom handled differently?
self._fill("from ")
self._write(t.modname)
self._write(" import ")
for i, (name,asname) in enumerate(t.names):
if i != 0:
self._write(", ")
self._write(name)
if asname is not None:
self._write(" as "+asname)
def _Function(self, t):
""" Handle function definitions
"""
if t.decorators is not None:
self._fill("@")
self._dispatch(t.decorators)
self._fill("def "+t.name + "(")
defaults = [None] * (len(t.argnames) - len(t.defaults)) + list(t.defaults)
for i, arg in enumerate(zip(t.argnames, defaults)):
self._write(arg[0])
if arg[1] is not None:
self._write('=')
self._dispatch(arg[1])
if i < len(t.argnames)-1:
self._write(', ')
self._write(")")
if self._single_func:
self._do_indent = False
self._enter()
self._dispatch(t.code)
self._leave()
self._do_indent = True
def _Getattr(self, t):
""" Handle getting an attribute of an object
"""
if isinstance(t.expr, (Div, Mul, Sub, Add)):
self._write('(')
self._dispatch(t.expr)
self._write(')')
else:
self._dispatch(t.expr)
self._write('.'+t.attrname)
def _If(self, t):
self._fill()
for i, (compare,code) in enumerate(t.tests):
if i == 0:
self._write("if ")
else:
self._write("elif ")
self._dispatch(compare)
self._enter()
self._fill()
self._dispatch(code)
self._leave()
self._write("\n")
if t.else_ is not None:
self._write("else")
self._enter()
self._fill()
self._dispatch(t.else_)
self._leave()
self._write("\n")
def _IfExp(self, t):
self._dispatch(t.then)
self._write(" if ")
self._dispatch(t.test)
if t.else_ is not None:
self._write(" else (")
self._dispatch(t.else_)
self._write(")")
def _Import(self, t):
""" Handle "import xyz.foo".
"""
self._fill("import ")
for i, (name,asname) in enumerate(t.names):
if i != 0:
self._write(", ")
self._write(name)
if asname is not None:
self._write(" as "+asname)
def _Keyword(self, t):
""" Keyword value assignment within function calls and definitions.
"""
self._write(t.name)
self._write("=")
self._dispatch(t.expr)
def _List(self, t):
self._write("[")
for i,node in enumerate(t.nodes):
self._dispatch(node)
if i < len(t.nodes)-1:
self._write(", ")
self._write("]")
def _Module(self, t):
if t.doc is not None:
self._dispatch(t.doc)
self._dispatch(t.node)
def _Mul(self, t):
self.__binary_op(t, '*')
def _Name(self, t):
self._write(t.name)
def _NoneType(self, t):
self._write("None")
def _Not(self, t):
self._write('not (')
self._dispatch(t.expr)
self._write(')')
def _Or(self, t):
self._write(" (")
for i, node in enumerate(t.nodes):
self._dispatch(node)
if i != len(t.nodes)-1:
self._write(") or (")
self._write(")")
def _Pass(self, t):
self._write("pass\n")
def _Printnl(self, t):
self._fill("print ")
if t.dest:
self._write(">> ")
self._dispatch(t.dest)
self._write(", ")
comma = False
for node in t.nodes:
if comma: self._write(', ')
else: comma = True
self._dispatch(node)
def _Power(self, t):
self.__binary_op(t, '**')
def _Return(self, t):
self._fill("return ")
if t.value:
if isinstance(t.value, Tuple):
text = ', '.join([ name.name for name in t.value.asList() ])
self._write(text)
else:
self._dispatch(t.value)
if not self._do_indent:
self._write('; ')
def _Slice(self, t):
self._dispatch(t.expr)
self._write("[")
if t.lower:
self._dispatch(t.lower)
self._write(":")
if t.upper:
self._dispatch(t.upper)
#if t.step:
# self._write(":")
# self._dispatch(t.step)
self._write("]")
def _Sliceobj(self, t):
for i, node in enumerate(t.nodes):
if i != 0:
self._write(":")
if not (isinstance(node, Const) and node.value is None):
self._dispatch(node)
def _Stmt(self, tree):
for node in tree.nodes:
self._dispatch(node)
def _Sub(self, t):
self.__binary_op(t, '-')
def _Subscript(self, t):
self._dispatch(t.expr)
self._write("[")
for i, value in enumerate(t.subs):
if i != 0:
self._write(",")
self._dispatch(value)
self._write("]")
def _TryExcept(self, t):
self._fill("try")
self._enter()
self._dispatch(t.body)
self._leave()
for handler in t.handlers:
self._fill('except ')
self._dispatch(handler[0])
if handler[1] is not None:
self._write(', ')
self._dispatch(handler[1])
self._enter()
self._dispatch(handler[2])
self._leave()
if t.else_:
self._fill("else")
self._enter()
self._dispatch(t.else_)
self._leave()
def _Tuple(self, t):
if not t.nodes:
# Empty tuple.
self._write("()")
else:
self._write("(")
# _write each elements, separated by a comma.
for element in t.nodes[:-1]:
self._dispatch(element)
self._write(", ")
# Handle the last one without writing comma
last_element = t.nodes[-1]
self._dispatch(last_element)
self._write(")")
def _UnaryAdd(self, t):
self._write("+")
self._dispatch(t.expr)
def _UnarySub(self, t):
self._write("-")
self._dispatch(t.expr)
def _With(self, t):
self._fill('with ')
self._dispatch(t.expr)
if t.vars:
self._write(' as ')
self._dispatch(t.vars.name)
self._enter()
self._dispatch(t.body)
self._leave()
self._write('\n')
def _int(self, t):
self._write(repr(t))
def __binary_op(self, t, symbol):
# Check if parenthesis are needed on left side and then dispatch
has_paren = False
left_class = str(t.left.__class__)
if (left_class in op_precedence.keys() and
op_precedence[left_class] < op_precedence[str(t.__class__)]):
has_paren = True
if has_paren:
self._write('(')
self._dispatch(t.left)
if has_paren:
self._write(')')
# Write the appropriate symbol for operator
self._write(symbol)
# Check if parenthesis are needed on the right side and then dispatch
has_paren = False
right_class = str(t.right.__class__)
if (right_class in op_precedence.keys() and
op_precedence[right_class] < op_precedence[str(t.__class__)]):
has_paren = True
if has_paren:
self._write('(')
self._dispatch(t.right)
if has_paren:
self._write(')')
def _float(self, t):
# if t is 0.1, str(t)->'0.1' while repr(t)->'0.1000000000001'
# We prefer str here.
self._write(str(t))
def _str(self, t):
self._write(repr(t))
def _tuple(self, t):
self._write(str(t))
#########################################################################
# These are the methods from the _ast modules unparse.
#
# As our needs to handle more advanced code increase, we may want to
# modify some of the methods below so that they work for compiler.ast.
#########################################################################
# # stmt
# def _Expr(self, tree):
# self._fill()
# self._dispatch(tree.value)
#
# def _Import(self, t):
# self._fill("import ")
# first = True
# for a in t.names:
# if first:
# first = False
# else:
# self._write(", ")
# self._write(a.name)
# if a.asname:
# self._write(" as "+a.asname)
#
## def _ImportFrom(self, t):
## self._fill("from ")
## self._write(t.module)
## self._write(" import ")
## for i, a in enumerate(t.names):
## if i == 0:
## self._write(", ")
## self._write(a.name)
## if a.asname:
## self._write(" as "+a.asname)
## # XXX(jpe) what is level for?
##
#
# def _Break(self, t):
# self._fill("break")
#
# def _Continue(self, t):
# self._fill("continue")
#
# def _Delete(self, t):
# self._fill("del ")
# self._dispatch(t.targets)
#
# def _Assert(self, t):
# self._fill("assert ")
# self._dispatch(t.test)
# if t.msg:
# self._write(", ")
# self._dispatch(t.msg)
#
# def _Exec(self, t):
# self._fill("exec ")
# self._dispatch(t.body)
# if t.globals:
# self._write(" in ")
# self._dispatch(t.globals)
# if t.locals:
# self._write(", ")
# self._dispatch(t.locals)
#
# def _Print(self, t):
# self._fill("print ")
# do_comma = False
# if t.dest:
# self._write(">>")
# self._dispatch(t.dest)
# do_comma = True
# for e in t.values:
# if do_comma:self._write(", ")
# else:do_comma=True
# self._dispatch(e)
# if not t.nl:
# self._write(",")
#
# def _Global(self, t):
# self._fill("global")
# for i, n in enumerate(t.names):
# if i != 0:
# self._write(",")
# self._write(" " + n)
#
# def _Yield(self, t):
# self._fill("yield")
# if t.value:
# self._write(" (")
# self._dispatch(t.value)
# self._write(")")
#
# def _Raise(self, t):
# self._fill('raise ')
# if t.type:
# self._dispatch(t.type)
# if t.inst:
# self._write(", ")
# self._dispatch(t.inst)
# if t.tback:
# self._write(", ")
# self._dispatch(t.tback)
#
#
# def _TryFinally(self, t):
# self._fill("try")
# self._enter()
# self._dispatch(t.body)
# self._leave()
#
# self._fill("finally")
# self._enter()
# self._dispatch(t.finalbody)
# self._leave()
#
# def _excepthandler(self, t):
# self._fill("except ")
# if t.type:
# self._dispatch(t.type)
# if t.name:
# self._write(", ")
# self._dispatch(t.name)
# self._enter()
# self._dispatch(t.body)
# self._leave()
#
# def _ClassDef(self, t):
# self._write("\n")
# self._fill("class "+t.name)
# if t.bases:
# self._write("(")
# for a in t.bases:
# self._dispatch(a)
# self._write(", ")
# self._write(")")
# self._enter()
# self._dispatch(t.body)
# self._leave()
#
# def _FunctionDef(self, t):
# self._write("\n")
# for deco in t.decorators:
# self._fill("@")
# self._dispatch(deco)
# self._fill("def "+t.name + "(")
# self._dispatch(t.args)
# self._write(")")
# self._enter()
# self._dispatch(t.body)
# self._leave()
#
# def _For(self, t):
# self._fill("for ")
# self._dispatch(t.target)
# self._write(" in ")
# self._dispatch(t.iter)
# self._enter()
# self._dispatch(t.body)
# self._leave()
# if t.orelse:
# self._fill("else")
# self._enter()
# self._dispatch(t.orelse)
# self._leave
#
# def _While(self, t):
# self._fill("while ")
# self._dispatch(t.test)
# self._enter()
# self._dispatch(t.body)
# self._leave()
# if t.orelse:
# self._fill("else")
# self._enter()
# self._dispatch(t.orelse)
# self._leave
#
# # expr
# def _Str(self, tree):
# self._write(repr(tree.s))
##
# def _Repr(self, t):
# self._write("`")
# self._dispatch(t.value)
# self._write("`")
#
# def _Num(self, t):
# self._write(repr(t.n))
#
# def _ListComp(self, t):
# self._write("[")
# self._dispatch(t.elt)
# for gen in t.generators:
# self._dispatch(gen)
# self._write("]")
#
# def _GeneratorExp(self, t):
# self._write("(")
# self._dispatch(t.elt)
# for gen in t.generators:
# self._dispatch(gen)
# self._write(")")
#
# def _comprehension(self, t):
# self._write(" for ")
# self._dispatch(t.target)
# self._write(" in ")
# self._dispatch(t.iter)
# for if_clause in t.ifs:
# self._write(" if ")
# self._dispatch(if_clause)
#
# def _IfExp(self, t):
# self._dispatch(t.body)
# self._write(" if ")
# self._dispatch(t.test)
# if t.orelse:
# self._write(" else ")
# self._dispatch(t.orelse)
#
# unop = {"Invert":"~", "Not": "not", "UAdd":"+", "USub":"-"}
# def _UnaryOp(self, t):
# self._write(self.unop[t.op.__class__.__name__])
# self._write("(")
# self._dispatch(t.operand)
# self._write(")")
#
# binop = { "Add":"+", "Sub":"-", "Mult":"*", "Div":"/", "Mod":"%",
# "LShift":">>", "RShift":"<<", "BitOr":"|", "BitXor":"^", "BitAnd":"&",
# "FloorDiv":"//", "Pow": "**"}
# def _BinOp(self, t):
# self._write("(")
# self._dispatch(t.left)
# self._write(")" + self.binop[t.op.__class__.__name__] + "(")
# self._dispatch(t.right)
# self._write(")")
#
# boolops = {_ast.And: 'and', _ast.Or: 'or'}
# def _BoolOp(self, t):
# self._write("(")
# self._dispatch(t.values[0])
# for v in t.values[1:]:
# self._write(" %s " % self.boolops[t.op.__class__])
# self._dispatch(v)
# self._write(")")
#
# def _Attribute(self,t):
# self._dispatch(t.value)
# self._write(".")
# self._write(t.attr)
#
## def _Call(self, t):
## self._dispatch(t.func)
## self._write("(")
## comma = False
## for e in t.args:
## if comma: self._write(", ")
## else: comma = True
## self._dispatch(e)
## for e in t.keywords:
## if comma: self._write(", ")
## else: comma = True
## self._dispatch(e)
## if t.starargs:
## if comma: self._write(", ")
## else: comma = True
## self._write("*")
## self._dispatch(t.starargs)
## if t.kwargs:
## if comma: self._write(", ")
## else: comma = True
## self._write("**")
## self._dispatch(t.kwargs)
## self._write(")")
#
# # slice
# def _Index(self, t):
# self._dispatch(t.value)
#
# def _ExtSlice(self, t):
# for i, d in enumerate(t.dims):
# if i != 0:
# self._write(': ')
# self._dispatch(d)
#
# # others
# def _arguments(self, t):
# first = True
# nonDef = len(t.args)-len(t.defaults)
# for a in t.args[0:nonDef]:
# if first:first = False
# else: self._write(", ")
# self._dispatch(a)
# for a,d in zip(t.args[nonDef:], t.defaults):
# if first:first = False
# else: self._write(", ")
# self._dispatch(a),
# self._write("=")
# self._dispatch(d)
# if t.vararg:
# if first:first = False
# else: self._write(", ")
# self._write("*"+t.vararg)
# if t.kwarg:
# if first:first = False
# else: self._write(", ")
# self._write("**"+t.kwarg)
#
## def _keyword(self, t):
## self._write(t.arg)
## self._write("=")
## self._dispatch(t.value)
#
# def _Lambda(self, t):
# self._write("lambda ")
# self._dispatch(t.args)
# self._write(": ")
# self._dispatch(t.body)
| mit |
campbe13/openhatch | vendor/packages/scrapy/scrapy/commands/startproject.py | 16 | 1767 | import sys
import string
import re
import shutil
from os.path import join, exists
import scrapy
from scrapy.command import ScrapyCommand
from scrapy.utils.template import render_templatefile, string_camelcase
from scrapy.utils.py26 import ignore_patterns, copytree
from scrapy.exceptions import UsageError
TEMPLATES_PATH = join(scrapy.__path__[0], 'templates', 'project')
TEMPLATES_TO_RENDER = (
('scrapy.cfg',),
('${project_name}', 'settings.py.tmpl'),
('${project_name}', 'items.py.tmpl'),
('${project_name}', 'pipelines.py.tmpl'),
)
IGNORE = ignore_patterns('*.pyc', '.svn')
class Command(ScrapyCommand):
requires_project = False
def syntax(self):
return "<project_name>"
def short_desc(self):
return "Create new project"
def run(self, args, opts):
if len(args) != 1:
raise UsageError()
project_name = args[0]
if not re.search(r'^[_a-zA-Z]\w*$', project_name):
print 'Error: Project names must begin with a letter and contain only\n' \
'letters, numbers and underscores'
sys.exit(1)
elif exists(project_name):
print "Error: directory %r already exists" % project_name
sys.exit(1)
moduletpl = join(TEMPLATES_PATH, 'module')
copytree(moduletpl, join(project_name, project_name), ignore=IGNORE)
shutil.copy(join(TEMPLATES_PATH, 'scrapy.cfg'), project_name)
for paths in TEMPLATES_TO_RENDER:
path = join(*paths)
tplfile = join(project_name,
string.Template(path).substitute(project_name=project_name))
render_templatefile(tplfile, project_name=project_name,
ProjectName=string_camelcase(project_name))
| agpl-3.0 |
pwong-mapr/private-hue | desktop/core/ext-py/Pygments-1.3.1/pygments/lexers/asm.py | 72 | 11831 | # -*- coding: utf-8 -*-
"""
pygments.lexers.asm
~~~~~~~~~~~~~~~~~~~
Lexers for assembly languages.
:copyright: Copyright 2006-2010 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import re
from pygments.lexer import RegexLexer, include, bygroups, using, DelegatingLexer
from pygments.lexers.compiled import DLexer, CppLexer, CLexer
from pygments.token import *
__all__ = ['GasLexer', 'ObjdumpLexer','DObjdumpLexer', 'CppObjdumpLexer',
'CObjdumpLexer', 'LlvmLexer', 'NasmLexer']
class GasLexer(RegexLexer):
"""
For Gas (AT&T) assembly code.
"""
name = 'GAS'
aliases = ['gas']
filenames = ['*.s', '*.S']
mimetypes = ['text/x-gas']
#: optional Comment or Whitespace
string = r'"(\\"|[^"])*"'
char = r'[a-zA-Z$._0-9@]'
identifier = r'(?:[a-zA-Z$_]' + char + '*|\.' + char + '+)'
number = r'(?:0[xX][a-zA-Z0-9]+|\d+)'
tokens = {
'root': [
include('whitespace'),
(identifier + ':', Name.Label),
(r'\.' + identifier, Name.Attribute, 'directive-args'),
(r'lock|rep(n?z)?|data\d+', Name.Attribute),
(identifier, Name.Function, 'instruction-args'),
(r'[\r\n]+', Text)
],
'directive-args': [
(identifier, Name.Constant),
(string, String),
('@' + identifier, Name.Attribute),
(number, Number.Integer),
(r'[\r\n]+', Text, '#pop'),
(r'#.*?$', Comment, '#pop'),
include('punctuation'),
include('whitespace')
],
'instruction-args': [
# For objdump-disassembled code, shouldn't occur in
# actual assembler input
('([a-z0-9]+)( )(<)('+identifier+')(>)',
bygroups(Number.Hex, Text, Punctuation, Name.Constant,
Punctuation)),
('([a-z0-9]+)( )(<)('+identifier+')([-+])('+number+')(>)',
bygroups(Number.Hex, Text, Punctuation, Name.Constant,
Punctuation, Number.Integer, Punctuation)),
# Address constants
(identifier, Name.Constant),
(number, Number.Integer),
# Registers
('%' + identifier, Name.Variable),
# Numeric constants
('$'+number, Number.Integer),
(r'[\r\n]+', Text, '#pop'),
(r'#.*?$', Comment, '#pop'),
include('punctuation'),
include('whitespace')
],
'whitespace': [
(r'\n', Text),
(r'\s+', Text),
(r'#.*?\n', Comment)
],
'punctuation': [
(r'[-*,.():]+', Punctuation)
]
}
def analyse_text(text):
return re.match(r'^\.\w+', text, re.M)
class ObjdumpLexer(RegexLexer):
"""
For the output of 'objdump -dr'
"""
name = 'objdump'
aliases = ['objdump']
filenames = ['*.objdump']
mimetypes = ['text/x-objdump']
hex = r'[0-9A-Za-z]'
tokens = {
'root': [
# File name & format:
('(.*?)(:)( +file format )(.*?)$',
bygroups(Name.Label, Punctuation, Text, String)),
# Section header
('(Disassembly of section )(.*?)(:)$',
bygroups(Text, Name.Label, Punctuation)),
# Function labels
# (With offset)
('('+hex+'+)( )(<)(.*?)([-+])(0[xX][A-Za-z0-9]+)(>:)$',
bygroups(Number.Hex, Text, Punctuation, Name.Function,
Punctuation, Number.Hex, Punctuation)),
# (Without offset)
('('+hex+'+)( )(<)(.*?)(>:)$',
bygroups(Number.Hex, Text, Punctuation, Name.Function,
Punctuation)),
# Code line with disassembled instructions
('( *)('+hex+r'+:)(\t)((?:'+hex+hex+' )+)( *\t)([a-zA-Z].*?)$',
bygroups(Text, Name.Label, Text, Number.Hex, Text,
using(GasLexer))),
# Code line with ascii
('( *)('+hex+r'+:)(\t)((?:'+hex+hex+' )+)( *)(.*?)$',
bygroups(Text, Name.Label, Text, Number.Hex, Text, String)),
# Continued code line, only raw opcodes without disassembled
# instruction
('( *)('+hex+r'+:)(\t)((?:'+hex+hex+' )+)$',
bygroups(Text, Name.Label, Text, Number.Hex)),
# Skipped a few bytes
('\t\.\.\.$', Text),
# Relocation line
# (With offset)
('(\t\t\t)('+hex+'+:)( )([^\t]+)(\t)(.*?)([-+])(0x' + hex + '+)$',
bygroups(Text, Name.Label, Text, Name.Property, Text,
Name.Constant, Punctuation, Number.Hex)),
# (Without offset)
('(\t\t\t)('+hex+'+:)( )([^\t]+)(\t)(.*?)$',
bygroups(Text, Name.Label, Text, Name.Property, Text,
Name.Constant)),
('[^\n]+\n', Other)
]
}
class DObjdumpLexer(DelegatingLexer):
"""
For the output of 'objdump -Sr on compiled D files'
"""
name = 'd-objdump'
aliases = ['d-objdump']
filenames = ['*.d-objdump']
mimetypes = ['text/x-d-objdump']
def __init__(self, **options):
super(DObjdumpLexer, self).__init__(DLexer, ObjdumpLexer, **options)
class CppObjdumpLexer(DelegatingLexer):
"""
For the output of 'objdump -Sr on compiled C++ files'
"""
name = 'cpp-objdump'
aliases = ['cpp-objdump', 'c++-objdumb', 'cxx-objdump']
filenames = ['*.cpp-objdump', '*.c++-objdump', '*.cxx-objdump']
mimetypes = ['text/x-cpp-objdump']
def __init__(self, **options):
super(CppObjdumpLexer, self).__init__(CppLexer, ObjdumpLexer, **options)
class CObjdumpLexer(DelegatingLexer):
"""
For the output of 'objdump -Sr on compiled C files'
"""
name = 'c-objdump'
aliases = ['c-objdump']
filenames = ['*.c-objdump']
mimetypes = ['text/x-c-objdump']
def __init__(self, **options):
super(CObjdumpLexer, self).__init__(CLexer, ObjdumpLexer, **options)
class LlvmLexer(RegexLexer):
"""
For LLVM assembly code.
"""
name = 'LLVM'
aliases = ['llvm']
filenames = ['*.ll']
mimetypes = ['text/x-llvm']
#: optional Comment or Whitespace
string = r'"[^"]*?"'
identifier = r'([-a-zA-Z$._][-a-zA-Z$._0-9]*|' + string + ')'
tokens = {
'root': [
include('whitespace'),
# Before keywords, because keywords are valid label names :(...
(r'^\s*' + identifier + '\s*:', Name.Label),
include('keyword'),
(r'%' + identifier, Name.Variable),#Name.Identifier.Local),
(r'@' + identifier, Name.Variable.Global),#Name.Identifier.Global),
(r'%\d+', Name.Variable.Anonymous),#Name.Identifier.Anonymous),
(r'@\d+', Name.Variable.Global),#Name.Identifier.Anonymous),
(r'!' + identifier, Name.Variable),
(r'!\d+', Name.Variable.Anonymous),
(r'c?' + string, String),
(r'0[xX][a-fA-F0-9]+', Number),
(r'-?\d+(?:[.]\d+)?(?:[eE][-+]?\d+(?:[.]\d+)?)?', Number),
(r'[=<>{}\[\]()*.,!]|x\b', Punctuation)
],
'whitespace': [
(r'(\n|\s)+', Text),
(r';.*?\n', Comment)
],
'keyword': [
# Regular keywords
(r'(begin|end'
r'|true|false'
r'|declare|define'
r'|global|constant'
r'|private|linker_private|internal|available_externally|linkonce'
r'|linkonce_odr|weak|weak_odr|appending|dllimport|dllexport'
r'|common|default|hidden|protected|extern_weak|external'
r'|thread_local|zeroinitializer|undef|null|to|tail|target|triple'
r'|deplibs|datalayout|volatile|nuw|nsw|exact|inbounds|align'
r'|addrspace|section|alias|module|asm|sideeffect|gc|dbg'
r'|ccc|fastcc|coldcc|x86_stdcallcc|x86_fastcallcc|arm_apcscc'
r'|arm_aapcscc|arm_aapcs_vfpcc'
r'|cc|c'
r'|signext|zeroext|inreg|sret|nounwind|noreturn|noalias|nocapture'
r'|byval|nest|readnone|readonly'
r'|inlinehint|noinline|alwaysinline|optsize|ssp|sspreq|noredzone'
r'|noimplicitfloat|naked'
r'|type|opaque'
r'|eq|ne|slt|sgt|sle'
r'|sge|ult|ugt|ule|uge'
r'|oeq|one|olt|ogt|ole'
r'|oge|ord|uno|ueq|une'
r'|x'
# instructions
r'|add|fadd|sub|fsub|mul|fmul|udiv|sdiv|fdiv|urem|srem|frem|shl'
r'|lshr|ashr|and|or|xor|icmp|fcmp'
r'|phi|call|trunc|zext|sext|fptrunc|fpext|uitofp|sitofp|fptoui'
r'fptosi|inttoptr|ptrtoint|bitcast|select|va_arg|ret|br|switch'
r'|invoke|unwind|unreachable'
r'|malloc|alloca|free|load|store|getelementptr'
r'|extractelement|insertelement|shufflevector|getresult'
r'|extractvalue|insertvalue'
r')\b', Keyword),
# Types
(r'void|float|double|x86_fp80|fp128|ppc_fp128|label|metadata',
Keyword.Type),
# Integer types
(r'i[1-9]\d*', Keyword)
]
}
class NasmLexer(RegexLexer):
"""
For Nasm (Intel) assembly code.
"""
name = 'NASM'
aliases = ['nasm']
filenames = ['*.asm', '*.ASM']
mimetypes = ['text/x-nasm']
identifier = r'[a-zA-Z$._?][a-zA-Z0-9$._?#@~]*'
hexn = r'(?:0[xX][0-9a-fA-F]+|$0[0-9a-fA-F]*|[0-9]+[0-9a-fA-F]*h)'
octn = r'[0-7]+q'
binn = r'[01]+b'
decn = r'[0-9]+'
floatn = decn + r'\.e?' + decn
string = r'"(\\"|[^"])*"|' + r"'(\\'|[^'])*'"
declkw = r'(?:res|d)[bwdqt]|times'
register = (r'[a-d][lh]|e?[a-d]x|e?[sb]p|e?[sd]i|[c-gs]s|st[0-7]|'
r'mm[0-7]|cr[0-4]|dr[0-367]|tr[3-7]')
wordop = r'seg|wrt|strict'
type = r'byte|[dq]?word'
directives = (r'BITS|USE16|USE32|SECTION|SEGMENT|ABSOLUTE|EXTERN|GLOBAL|'
r'ORG|ALIGN|STRUC|ENDSTRUC|COMMON|CPU|GROUP|UPPERCASE|IMPORT|'
r'EXPORT|LIBRARY|MODULE')
flags = re.IGNORECASE | re.MULTILINE
tokens = {
'root': [
include('whitespace'),
(r'^\s*%', Comment.Preproc, 'preproc'),
(identifier + ':', Name.Label),
(r'(%s)(\s+)(equ)' % identifier,
bygroups(Name.Constant, Keyword.Declaration, Keyword.Declaration),
'instruction-args'),
(directives, Keyword, 'instruction-args'),
(declkw, Keyword.Declaration, 'instruction-args'),
(identifier, Name.Function, 'instruction-args'),
(r'[\r\n]+', Text)
],
'instruction-args': [
(string, String),
(hexn, Number.Hex),
(octn, Number.Oct),
(binn, Number),
(floatn, Number.Float),
(decn, Number.Integer),
include('punctuation'),
(register, Name.Builtin),
(identifier, Name.Variable),
(r'[\r\n]+', Text, '#pop'),
include('whitespace')
],
'preproc': [
(r'[^;\n]+', Comment.Preproc),
(r';.*?\n', Comment.Single, '#pop'),
(r'\n', Comment.Preproc, '#pop'),
],
'whitespace': [
(r'\n', Text),
(r'[ \t]+', Text),
(r';.*', Comment.Single)
],
'punctuation': [
(r'[,():\[\]]+', Punctuation),
(r'[&|^<>+*/%~-]+', Operator),
(r'[$]+', Keyword.Constant),
(wordop, Operator.Word),
(type, Keyword.Type)
],
}
| apache-2.0 |
indexofire/mars | src/apps/entrez/views.py | 2 | 3401 | # -*- coding: utf-8 -*-
from django.core.urlresolvers import reverse
from django.http import HttpResponse, HttpResponseRedirect
from django.shortcuts import render_to_response, get_object_or_404
from django.contrib.auth.decorators import login_required
from django.template import RequestContext
from django.views.decorators.csrf import csrf_exempt
from entrez.models import EntrezEntry, EntrezTerm
from entrez.forms import AddTermForm
from entrez.utils import get_date
def get_user_all_terms(request):
return EntrezTerm.objects.filter(owner=request.user).select_related()
def get_user_all_entries(request):
return EntrezEntry.objects.filter(owner=request.user).select_related()
@login_required()
def index(request):
tpl = 'entrez/entrez_index.html'
ctx = {}
ctx["objects"] = get_user_all_entries(request)
ctx["terms"] = get_user_all_terms(request)
ctx["form"] = AddTermForm()
return render_to_response(tpl, ctx, context_instance=RequestContext(request))
@login_required()
def term_list(request, pk):
tp = 'entrez/entrez_term_list.html'
# todo: permission to check other user's term
term = EntrezTerm.objects.get(pk=pk)
objects = EntrezEntry.objects.filter(term=term).select_related()
terms = EntrezTerm.objects.filter(owner=request.user).select_related()
form = AddTermForm()
ct = {
"objects": objects,
"terms": terms,
"form": form,
"current_term": term,
}
return render_to_response(tp, ct, context_instance=RequestContext(request))
@login_required()
def entry_detail(request, pk):
tp = 'entrez/entrez_entry_detail.html'
terms = EntrezTerm.objects.filter(owner=request.user).select_related()
entry = EntrezEntry.objects.get(pk=pk)
if entry.owner == request.user:
ct = {
"entry": entry,
"terms": terms,
}
return render_to_response(tp, ct, context_instance=RequestContext(request))
else:
return HttpResponse
@csrf_exempt
def add_term(request):
form_class = AddTermForm
if request.method == 'POST':
form = form_class(request.POST)
if form.is_valid():
term = EntrezTerm.objects.create(
name=form.cleaned_data["name"],
#slug=form.cleaned_data["slug"],
db=form.cleaned_data["db"],
period=form.cleaned_data["period"],
owner=request.user,
term=form.cleaned_data["term"],
creation_date=get_date(),
lastedit_date=get_date(),
)
term.save()
return HttpResponseRedirect(reverse('entrez-index', ))
@csrf_exempt
def remove_term(request):
if request.method == 'POST':
if form.is_valid():
term = get_object_or_404(EntrezTerm, pk=request.POST.get('term_id'))
term.status = False
term.save()
return HttpResponse()
@csrf_exempt
def mark_as_read(request):
if request.method == "POST":
entry = get_object_or_404(EntrezEntry, pk=request.POST.get('feed_item_id'))
entry.read = True
entry.save()
return HttpResponse()
@csrf_exempt
def mark_as_unread(request):
if request.method == "POST":
entry = get_object_or_404(EntrezEntry, pk=request.POST.get('feed_item_id'))
entry.read = False
entry.save()
return HttpResponse()
| bsd-3-clause |
chengdh/openerp-ktv | openerp/addons/report_webkit_sample/report/__init__.py | 19 | 1485 | #-*- coding: utf-8 -*-
##############################################################################
#
# Copyright (c) 2010 Camptocamp SA (http://www.camptocamp.com)
# All Right Reserved
#
# Author : Nicolas Bessi (Camptocamp)
#
# WARNING: This program as such is intended to be used by professional
# programmers who take the whole responsability of assessing all potential
# consequences resulting from its eventual inadequacies and bugs
# End users who are looking for a ready-to-use solution with commercial
# garantees and support are strongly adviced to contract a Free Software
# Service Company
#
# This program is Free Software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
#
##############################################################################
import report_webkit_html
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
Letractively/kay-framework | kay/registration/views.py | 10 | 2075 | # -*- coding: utf-8 -*-
"""
Kay authentication views.
:Copyright: (c) 2009 Takashi Matsuo <tmatsuo@candit.jp> All rights reserved.
:license: BSD, see LICENSE for more details.
"""
from werkzeug import (
unescape, redirect, Response,
)
from kay.i18n import lazy_gettext as _
from kay.cache import NoCacheMixin
from kay.cache.decorators import no_cache
from kay.handlers import BaseHandler
from kay.utils import (
render_to_response, url_for
)
from kay.conf import settings
from kay.registration.forms import RegistrationForm
from kay.registration.models import RegistrationProfile
class ActivateHandler(BaseHandler, NoCacheMixin):
def __init__(self, template_name='registration/activate.html',
extra_context=None):
self.template_name = template_name
self.extra_context = extra_context or {}
def get(self, activation_key):
account = RegistrationProfile.activate_user(activation_key)
context = {'account': account,
'expiration_duration': settings.ACCOUNT_ACTIVATION_DURATION}
for key, value in self.extra_context.items():
context[key] = callable(value) and value() or value
return render_to_response(self.template_name, context)
class RegisterHandler(BaseHandler, NoCacheMixin):
def __init__(self, next_url=None, form_cls=None,
template_name='registration/registration_form.html',
extra_context=None):
self.next_url = next_url or url_for('registration/registration_complete')
self.form_cls = form_cls or RegistrationForm
self.template_name = template_name
self.extra_context = extra_context or {}
self.form = self.form_cls()
def get(self):
c = {'form': self.form.as_widget()}
c.update(self.extra_context)
return render_to_response(self.template_name, c)
def post(self):
if self.form.validate(self.request.form):
self.form.save()
return redirect(self.next_url)
else:
return self.get()
@no_cache
def registration_complete(request):
return render_to_response('registration/registration_complete.html')
| bsd-3-clause |
hydratk/hydratk-lib-network | src/hydratk/lib/network/soap/client.py | 1 | 11609 | # -*- coding: utf-8 -*-
"""Generic SOAP client
.. module:: network.soap.client
:platform: Unix
:synopsis: Generic SOAP client
.. moduleauthor:: Petr Rašek <bowman@hydratk.org>
"""
"""
Events:
-------
soap_before_load_wsdl
soap_after_load_wsdl
soap_before_request
soap_after_request
"""
from hydratk.core.masterhead import MasterHead
from hydratk.core import event
from suds import client, WebFault, MethodNotFound
from suds.transport import Reply, TransportError
from suds.transport.https import HttpAuthenticated, WindowsHttpAuthenticated
from suds.cache import NoCache
from suds.wsse import UsernameToken
from hashlib import sha1
from base64 import encodestring
from datetime import datetime
from requests import post
from lxml.etree import Element, SubElement, fromstring, tostring, XMLSyntaxError
from logging import getLogger, StreamHandler, CRITICAL, DEBUG
from sys import stderr
try:
from urllib2 import URLError
except ImportError:
from urllib.error import URLError
getLogger('suds.client').setLevel(CRITICAL)
class SOAPClient(object):
"""Class SOAPClient
"""
_mh = None
_client = None
_wsdl = None
_url = None
_proxies = None
_location = None
_user = None
_passw = None
_cert = None
_endpoint = None
_headers = None
_verbose = None
def __init__(self, verbose=False):
"""Class constructor
Called when the object is initialized
Args:
verbose (bool): verbose mode
"""
self._mh = MasterHead.get_head()
self._mh.find_module('hydratk.lib.network.soap.client', None)
self._verbose = verbose
if (self._verbose):
handler = StreamHandler(stderr)
logger = getLogger('suds.transport.http')
logger.setLevel(DEBUG), handler.setLevel(DEBUG)
logger.addHandler(handler)
@property
def client(self):
""" SOAP client property getter """
return self._client
@property
def wsdl(self):
""" WSDL property getter """
return self._wsdl
@property
def url(self):
""" URL property getter """
return self._url
@property
def proxies(self):
""" proxies property getter """
return self._proxies
@property
def location(self):
""" WSDL location property getter """
return self._location
@property
def user(self):
""" username property getter """
return self._user
@property
def passw(self):
""" user password property getter """
return self._passw
@property
def cert(self):
""" cert property getter """
return self._cert
@property
def endpoint(self):
""" service endpoint property getter """
return self._endpoint
@property
def headers(self):
""" HTTP headers property getter """
return self._headers
@property
def verbose(self):
""" verbose mode property getter """
return self._verbose
def load_wsdl(self, url, proxies=None, location='remote', user=None, passw=None, auth='Basic', cert=None,
endpoint=None, headers=None, use_cache=True, timeout=10):
"""Method loads wsdl
Args:
url (str): WSDL URL, URL for remote, file path for local
proxies (dict): HTTP proxies {http: url, https: url}
location (str): WSDL location, remote|local
user (str): username
passw (str): password
auth (str): HTTP authentication, Basic|NTLM
cert (obj): str (path to cert.perm path), tuple (path to cert.pem path, path to key.pem path)
endpoint (str): service endpoint, default endpoint from WSDL
headers (dict): HTTP headers
use_cache (bool): load WSDL from cache
timeout (int): timeout
Returns:
bool: result
Raises:
event: soap_before_load_wsdl
event: soap_after_load_wsdl
"""
try:
self._mh.demsg('htk_on_debug_info', self._mh._trn.msg('htk_soap_loading_wsdl', url,
user, passw, endpoint, headers), self._mh.fromhere())
ev = event.Event('soap_before_load_wsdl', url, proxies, location,
user, passw, auth, endpoint, headers, use_cache, timeout)
if (self._mh.fire_event(ev) > 0):
url = ev.argv(0)
proxies = ev.argv(1)
location = ev.argv(2)
user = ev.argv(3)
passw = ev.argv(4)
auth = ev.argv(5)
endpoint = ev.argv(6)
headers = ev.argv(7)
use_cache = ev.argv(8)
timeout = ev.argv(9)
self._url = url
self._proxies = proxies
self._location = location
self._user = user
self._passw = passw
self._endpoint = endpoint
self._headers = headers
if (ev.will_run_default()):
options = {}
if (self._location == 'local'):
self._url = 'file://' + self._url
if (self._proxies != None):
options['proxy'] = self._proxies
if (self._user != None):
if (auth == 'Basic'):
options['username'] = self._user
options['password'] = self._passw
elif (auth == 'NTLM'):
options['transport'] = WindowsHttpAuthenticated(
username=self._user, password=self._passw)
else:
options['username'] = self._user
options['password'] = self._passw
if (cert != None):
options['transport'] = RequestsTransport(cert=cert)
if (self._endpoint != None):
options['location'] = self._endpoint
if (self._headers != None):
options['headers'] = self._headers
if (timeout != None):
options['timeout'] = timeout
self._client = client.Client(
self._url, **options) if (use_cache) else client.Client(self._url, cache=NoCache(), **options)
self._wsdl = self._client.wsdl
self._mh.demsg('htk_on_debug_info', self._mh._trn.msg(
'htk_soap_wsdl_loaded'), self._mh.fromhere())
ev = event.Event('soap_after_load_wsdl')
self._mh.fire_event(ev)
return True
except (WebFault, TransportError, URLError, ValueError) as ex:
self._mh.demsg(
'htk_on_error', 'error: {0}'.format(ex), self._mh.fromhere())
return False
def get_operations(self):
"""Method returns service operations
Args:
none
Returns:
list: operations
"""
if (self._wsdl != None):
operations = []
for operation in self._wsdl.services[0].ports[0].methods.values():
operations.append(operation.name)
return operations
else:
self._mh.demsg('htk_on_warning', self._mh._trn.msg(
'htk_soap_wsdl_not_loaded'), self._mh.fromhere())
return None
def send_request(self, operation, body=None, request=None, headers=None):
"""Method sends request
Args:
operation (str): operation name
body (str|xml): request body
request (str|xml): SOAP request, use body or request
headers (dict): HTTP headers, SOAPAction, Content-Type are set automatically
Returns:
obj: response body, objectified xml
Raises:
event: soap_before_request
event: soap_after_request
"""
try:
self._mh.demsg('htk_on_debug_info', self._mh._trn.msg('htk_soap_request', operation, body, request, headers),
self._mh.fromhere())
if (self._wsdl == None):
self._mh.demsg('htk_on_warning', self._mh._trn.msg(
'htk_soap_wsdl_not_loaded'), self._mh.fromhere())
return None
ev = event.Event(
'soap_before_request', operation, body, request, headers)
if (self._mh.fire_event(ev) > 0):
operation = ev.argv(0)
body = ev.argv(1)
request = ev.argv(2)
headers = ev.argv(3)
if (ev.will_run_default()):
if (headers != None):
self._client.set_options(headers=headers)
if (body != None):
nsmap = {
'soapenv': 'http://schemas.xmlsoap.org/soap/envelope/'}
ns = '{%s}' % nsmap['soapenv']
root = Element(ns + 'Envelope', nsmap=nsmap)
elem = SubElement(root, ns + 'Header')
elem = SubElement(root, ns + 'Body')
if (isinstance(body, str)):
body = fromstring(body)
elif (isinstance(body, bytes)):
body = fromstring(body.decode())
elem.append(body)
request = tostring(root)
request = request if (request != None) else ''
response = getattr(self._client.service, operation)(
__inject={'msg': request})
self._mh.demsg('htk_on_debug_info', self._mh._trn.msg(
'htk_soap_response', response), self._mh.fromhere())
ev = event.Event('soap_after_request')
self._mh.fire_event(ev)
return response
except (WebFault, TransportError, URLError, ValueError, XMLSyntaxError, MethodNotFound) as ex:
self._mh.demsg(
'htk_on_error', 'error: {0}'.format(ex), self._mh.fromhere())
return None
def gen_wss_token(user, passw, method='Digest', nonce=True, created=True):
"""Method generates WSS token
Args:
user (str): username
passw (str): password
method (str): token method Text|Digest
nonce (bool): include nonce
created (bool): include created
Returns:
dict
"""
token = UsernameToken(user, passw)
if (nonce):
token.setnonce()
if (created):
token.setcreated()
token = {'username': user, 'password': passw, 'nonce': str(
token.nonce), 'created': str(token.created)}
if (method == 'Digest'):
token['created'] = datetime.utcnow().isoformat()[:-3] + 'Z'
token['password'] = encodestring(
sha1(token['nonce'] + token['created'] + passw).digest())[:-1]
token['nonce'] = encodestring(token['nonce'])[:-1]
return token
class RequestsTransport(HttpAuthenticated):
"""Class RequestsTransport
"""
def __init__(self, **kwargs):
self.cert = kwargs.pop('cert', None)
HttpAuthenticated.__init__(self, **kwargs)
def send(self, request):
self.addcredentials(request)
resp = post(request.url, data=request.message,
headers=request.headers, cert=self.cert, verify=True)
result = Reply(resp.status_code, resp.headers, resp.content)
return result
| bsd-3-clause |
kaplun/Invenio-OpenAIRE | modules/bibformat/lib/elements/bfe_additional_report_numbers.py | 35 | 1804 | # -*- coding: utf-8 -*-
##
## This file is part of Invenio.
## Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011 CERN.
##
## Invenio is free software; you can redistribute it and/or
## modify it under the terms of the GNU General Public License as
## published by the Free Software Foundation; either version 2 of the
## License, or (at your option) any later version.
##
## Invenio is distributed in the hope that it will be useful, but
## WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with Invenio; if not, write to the Free Software Foundation, Inc.,
## 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""BibFormat element - Prints additional report numbers
"""
__revision__ = "$Id$"
from invenio.bibformat_elements.bfe_report_numbers import \
build_report_number_link
def format_element(bfo, limit, separator=" ", link='yes'):
"""
Prints the additional report numbers of the record
@param separator: the separator between report numbers.
@param limit: the max number of report numbers to display
@param link: if 'yes', display report number with corresponding link when possible
"""
numbers = bfo.fields("088__a")
if limit.isdigit() and int(limit) <= len(numbers):
numbers = numbers[:int(limit)]
return separator.join([build_report_number_link(report_number,
link == 'yes') \
for report_number in numbers])
def escape_values(bfo):
"""
Called by BibFormat in order to check if output of this element
should be escaped.
"""
return 0
| gpl-2.0 |
mxjl620/scikit-learn | examples/neighbors/plot_digits_kde_sampling.py | 251 | 2022 | """
=========================
Kernel Density Estimation
=========================
This example shows how kernel density estimation (KDE), a powerful
non-parametric density estimation technique, can be used to learn
a generative model for a dataset. With this generative model in place,
new samples can be drawn. These new samples reflect the underlying model
of the data.
"""
import numpy as np
import matplotlib.pyplot as plt
from sklearn.datasets import load_digits
from sklearn.neighbors import KernelDensity
from sklearn.decomposition import PCA
from sklearn.grid_search import GridSearchCV
# load the data
digits = load_digits()
data = digits.data
# project the 64-dimensional data to a lower dimension
pca = PCA(n_components=15, whiten=False)
data = pca.fit_transform(digits.data)
# use grid search cross-validation to optimize the bandwidth
params = {'bandwidth': np.logspace(-1, 1, 20)}
grid = GridSearchCV(KernelDensity(), params)
grid.fit(data)
print("best bandwidth: {0}".format(grid.best_estimator_.bandwidth))
# use the best estimator to compute the kernel density estimate
kde = grid.best_estimator_
# sample 44 new points from the data
new_data = kde.sample(44, random_state=0)
new_data = pca.inverse_transform(new_data)
# turn data into a 4x11 grid
new_data = new_data.reshape((4, 11, -1))
real_data = digits.data[:44].reshape((4, 11, -1))
# plot real digits and resampled digits
fig, ax = plt.subplots(9, 11, subplot_kw=dict(xticks=[], yticks=[]))
for j in range(11):
ax[4, j].set_visible(False)
for i in range(4):
im = ax[i, j].imshow(real_data[i, j].reshape((8, 8)),
cmap=plt.cm.binary, interpolation='nearest')
im.set_clim(0, 16)
im = ax[i + 5, j].imshow(new_data[i, j].reshape((8, 8)),
cmap=plt.cm.binary, interpolation='nearest')
im.set_clim(0, 16)
ax[0, 5].set_title('Selection from the input data')
ax[5, 5].set_title('"New" digits drawn from the kernel density model')
plt.show()
| bsd-3-clause |
tangmi360/googletest | test/gtest_break_on_failure_unittest.py | 2140 | 7339 | #!/usr/bin/env python
#
# Copyright 2006, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Unit test for Google Test's break-on-failure mode.
A user can ask Google Test to seg-fault when an assertion fails, using
either the GTEST_BREAK_ON_FAILURE environment variable or the
--gtest_break_on_failure flag. This script tests such functionality
by invoking gtest_break_on_failure_unittest_ (a program written with
Google Test) with different environments and command line flags.
"""
__author__ = 'wan@google.com (Zhanyong Wan)'
import gtest_test_utils
import os
import sys
# Constants.
IS_WINDOWS = os.name == 'nt'
# The environment variable for enabling/disabling the break-on-failure mode.
BREAK_ON_FAILURE_ENV_VAR = 'GTEST_BREAK_ON_FAILURE'
# The command line flag for enabling/disabling the break-on-failure mode.
BREAK_ON_FAILURE_FLAG = 'gtest_break_on_failure'
# The environment variable for enabling/disabling the throw-on-failure mode.
THROW_ON_FAILURE_ENV_VAR = 'GTEST_THROW_ON_FAILURE'
# The environment variable for enabling/disabling the catch-exceptions mode.
CATCH_EXCEPTIONS_ENV_VAR = 'GTEST_CATCH_EXCEPTIONS'
# Path to the gtest_break_on_failure_unittest_ program.
EXE_PATH = gtest_test_utils.GetTestExecutablePath(
'gtest_break_on_failure_unittest_')
environ = gtest_test_utils.environ
SetEnvVar = gtest_test_utils.SetEnvVar
# Tests in this file run a Google-Test-based test program and expect it
# to terminate prematurely. Therefore they are incompatible with
# the premature-exit-file protocol by design. Unset the
# premature-exit filepath to prevent Google Test from creating
# the file.
SetEnvVar(gtest_test_utils.PREMATURE_EXIT_FILE_ENV_VAR, None)
def Run(command):
"""Runs a command; returns 1 if it was killed by a signal, or 0 otherwise."""
p = gtest_test_utils.Subprocess(command, env=environ)
if p.terminated_by_signal:
return 1
else:
return 0
# The tests.
class GTestBreakOnFailureUnitTest(gtest_test_utils.TestCase):
"""Tests using the GTEST_BREAK_ON_FAILURE environment variable or
the --gtest_break_on_failure flag to turn assertion failures into
segmentation faults.
"""
def RunAndVerify(self, env_var_value, flag_value, expect_seg_fault):
"""Runs gtest_break_on_failure_unittest_ and verifies that it does
(or does not) have a seg-fault.
Args:
env_var_value: value of the GTEST_BREAK_ON_FAILURE environment
variable; None if the variable should be unset.
flag_value: value of the --gtest_break_on_failure flag;
None if the flag should not be present.
expect_seg_fault: 1 if the program is expected to generate a seg-fault;
0 otherwise.
"""
SetEnvVar(BREAK_ON_FAILURE_ENV_VAR, env_var_value)
if env_var_value is None:
env_var_value_msg = ' is not set'
else:
env_var_value_msg = '=' + env_var_value
if flag_value is None:
flag = ''
elif flag_value == '0':
flag = '--%s=0' % BREAK_ON_FAILURE_FLAG
else:
flag = '--%s' % BREAK_ON_FAILURE_FLAG
command = [EXE_PATH]
if flag:
command.append(flag)
if expect_seg_fault:
should_or_not = 'should'
else:
should_or_not = 'should not'
has_seg_fault = Run(command)
SetEnvVar(BREAK_ON_FAILURE_ENV_VAR, None)
msg = ('when %s%s, an assertion failure in "%s" %s cause a seg-fault.' %
(BREAK_ON_FAILURE_ENV_VAR, env_var_value_msg, ' '.join(command),
should_or_not))
self.assert_(has_seg_fault == expect_seg_fault, msg)
def testDefaultBehavior(self):
"""Tests the behavior of the default mode."""
self.RunAndVerify(env_var_value=None,
flag_value=None,
expect_seg_fault=0)
def testEnvVar(self):
"""Tests using the GTEST_BREAK_ON_FAILURE environment variable."""
self.RunAndVerify(env_var_value='0',
flag_value=None,
expect_seg_fault=0)
self.RunAndVerify(env_var_value='1',
flag_value=None,
expect_seg_fault=1)
def testFlag(self):
"""Tests using the --gtest_break_on_failure flag."""
self.RunAndVerify(env_var_value=None,
flag_value='0',
expect_seg_fault=0)
self.RunAndVerify(env_var_value=None,
flag_value='1',
expect_seg_fault=1)
def testFlagOverridesEnvVar(self):
"""Tests that the flag overrides the environment variable."""
self.RunAndVerify(env_var_value='0',
flag_value='0',
expect_seg_fault=0)
self.RunAndVerify(env_var_value='0',
flag_value='1',
expect_seg_fault=1)
self.RunAndVerify(env_var_value='1',
flag_value='0',
expect_seg_fault=0)
self.RunAndVerify(env_var_value='1',
flag_value='1',
expect_seg_fault=1)
def testBreakOnFailureOverridesThrowOnFailure(self):
"""Tests that gtest_break_on_failure overrides gtest_throw_on_failure."""
SetEnvVar(THROW_ON_FAILURE_ENV_VAR, '1')
try:
self.RunAndVerify(env_var_value=None,
flag_value='1',
expect_seg_fault=1)
finally:
SetEnvVar(THROW_ON_FAILURE_ENV_VAR, None)
if IS_WINDOWS:
def testCatchExceptionsDoesNotInterfere(self):
"""Tests that gtest_catch_exceptions doesn't interfere."""
SetEnvVar(CATCH_EXCEPTIONS_ENV_VAR, '1')
try:
self.RunAndVerify(env_var_value='1',
flag_value='1',
expect_seg_fault=1)
finally:
SetEnvVar(CATCH_EXCEPTIONS_ENV_VAR, None)
if __name__ == '__main__':
gtest_test_utils.Main()
| bsd-3-clause |
fjorba/invenio | po/i18n_extract_from_wml_source.py | 35 | 4836 | ## This file is part of Invenio.
## Copyright (C) 2006, 2007, 2008, 2010, 2011 CERN.
##
## Invenio is free software; you can redistribute it and/or
## modify it under the terms of the GNU General Public License as
## published by the Free Software Foundation; either version 2 of the
## License, or (at your option) any later version.
##
## Invenio is distributed in the hope that it will be useful, but
## WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with Invenio; if not, write to the Free Software Foundation, Inc.,
## 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""
This tool extracts sentences to be translated from HTML / WML source
files.
The sentences to translate are marked with the following tag:
Blah blah _(To be translated)_ blah.
These tags can span several lines. Extra whitespace is discarded.
"""
import sys, re, os
_tag_re = re.compile(r'_\((.*?)\)_', re.M)
_nl_re = re.compile('\n')
_ws_re = re.compile('\s+')
def print_usage():
"""Print usage info."""
print """Usage: %s <dirname> <potfiles-filename>
Description: Extract translatable strings from the list of files read
from potfiles-filename. The files specified there are
relative to dirname. Print results on stdout.
"""
return
def quote(text):
"""Normalize and quote a string for inclusion in the po file."""
return text.\
replace('\\', '\\\\').\
replace('\n', '\\\\n').\
replace('\t', '\\\\t').\
replace('"', '\\"')
def extract_from_wml_files(dirname, potfiles_filename):
"""Extract translatable strings from the list of files read from
potfiles_filename. The files specified there are relative to
dirname. Print results on stdout.
"""
## extract messages and fill db:
db = {}
for f in [ f.strip() for f in open(potfiles_filename) ]:
if not f or f.startswith('#'):
continue
f = f.rstrip(' \\')
data = open(dirname + "/" + f).read()
lines = [0]
for m in _nl_re.finditer(data):
lines.append(m.end())
for m in _tag_re.finditer(data.replace('\n', ' ')):
word = m.group(1)
pos = m.start()
line = len([x for x in lines if x < pos])
ref = '%s:%d' % (f, line)
# normalize the word a bit, as it comes from a file where
# whitespace is not too significant.
word = _ws_re.sub(' ', word.strip())
db.setdefault(word, []).append(ref)
## print po header:
print r'''
# # This file is part of Invenio.
# # Copyright (C) 2006, 2007, 2008, 2010, 2011 CERN.
# #
# # Invenio is free software; you can redistribute it and/or
# # modify it under the terms of the GNU General Public License as
# # published by the Free Software Foundation; either version 2 of the
# # License, or (at your option) any later version.
# #
# # Invenio is distributed in the hope that it will be useful, but
# # WITHOUT ANY WARRANTY; without even the implied warranty of
# # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# # General Public License for more details.
# #
# # You should have received a copy of the GNU General Public License
# # along with Invenio; if not, write to the Free Software Foundation, Inc.,
# # 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
msgid ""
msgstr ""
"Project-Id-Version: Invenio 0.7\n"
"POT-Creation-Date: Tue Nov 22 16:44:03 2005\n"
"PO-Revision-Date: 2005-11-22 11:20+0100\n"
"Last-Translator: FULL NAME <EMAIL@ADDRESS>\n"
"Language-Team: LANGUAGE <LL@li.org>\n"
"MIME-Version: 1.0\n"
"Content-Type: text/plain; charset=UTF-8\n"
"Content-Transfer-Encoding: 8bit\n"
"Generated-By: pygettext.py 1.5\n"
'''
## print po content from db:
for original, refs in db.items():
for ref in refs:
print "#: %s" % ref
print 'msgid "%s"' % quote(original)
print 'msgstr ""'
print
return
if __name__ == "__main__":
if len(sys.argv) == 3:
dirname = sys.argv[1]
potfiles_filename = sys.argv[2]
if not os.path.isdir(dirname):
print "ERROR: %s is not a directory." % dirname
print_usage()
sys.exit(1)
elif not os.path.isfile(potfiles_filename):
print "ERROR: %s is not a file." % potfiles_filename
print_usage()
sys.exit(1)
else:
extract_from_wml_files(sys.argv[1], sys.argv[2])
else:
print_usage()
| gpl-2.0 |
Richard-Mathie/cassandra_benchmark | vendor/github.com/datastax/python-driver/cassandra/cqlengine/statements.py | 5 | 28541 | # Copyright 2013-2016 DataStax, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from datetime import datetime, timedelta
import time
import six
from six.moves import filter
from cassandra.query import FETCH_SIZE_UNSET
from cassandra.cqlengine import columns
from cassandra.cqlengine import UnicodeMixin
from cassandra.cqlengine.functions import QueryValue
from cassandra.cqlengine.operators import BaseWhereOperator, InOperator, EqualsOperator
class StatementException(Exception):
pass
class ValueQuoter(UnicodeMixin):
def __init__(self, value):
self.value = value
def __unicode__(self):
from cassandra.encoder import cql_quote
if isinstance(self.value, (list, tuple)):
return '[' + ', '.join([cql_quote(v) for v in self.value]) + ']'
elif isinstance(self.value, dict):
return '{' + ', '.join([cql_quote(k) + ':' + cql_quote(v) for k, v in self.value.items()]) + '}'
elif isinstance(self.value, set):
return '{' + ', '.join([cql_quote(v) for v in self.value]) + '}'
return cql_quote(self.value)
def __eq__(self, other):
if isinstance(other, self.__class__):
return self.value == other.value
return False
class InQuoter(ValueQuoter):
def __unicode__(self):
from cassandra.encoder import cql_quote
return '(' + ', '.join([cql_quote(v) for v in self.value]) + ')'
class BaseClause(UnicodeMixin):
def __init__(self, field, value):
self.field = field
self.value = value
self.context_id = None
def __unicode__(self):
raise NotImplementedError
def __hash__(self):
return hash(self.field) ^ hash(self.value)
def __eq__(self, other):
if isinstance(other, self.__class__):
return self.field == other.field and self.value == other.value
return False
def __ne__(self, other):
return not self.__eq__(other)
def get_context_size(self):
""" returns the number of entries this clause will add to the query context """
return 1
def set_context_id(self, i):
""" sets the value placeholder that will be used in the query """
self.context_id = i
def update_context(self, ctx):
""" updates the query context with this clauses values """
assert isinstance(ctx, dict)
ctx[str(self.context_id)] = self.value
class WhereClause(BaseClause):
""" a single where statement used in queries """
def __init__(self, field, operator, value, quote_field=True):
"""
:param field:
:param operator:
:param value:
:param quote_field: hack to get the token function rendering properly
:return:
"""
if not isinstance(operator, BaseWhereOperator):
raise StatementException(
"operator must be of type {0}, got {1}".format(BaseWhereOperator, type(operator))
)
super(WhereClause, self).__init__(field, value)
self.operator = operator
self.query_value = self.value if isinstance(self.value, QueryValue) else QueryValue(self.value)
self.quote_field = quote_field
def __unicode__(self):
field = ('"{0}"' if self.quote_field else '{0}').format(self.field)
return u'{0} {1} {2}'.format(field, self.operator, six.text_type(self.query_value))
def __hash__(self):
return super(WhereClause, self).__hash__() ^ hash(self.operator)
def __eq__(self, other):
if super(WhereClause, self).__eq__(other):
return self.operator.__class__ == other.operator.__class__
return False
def get_context_size(self):
return self.query_value.get_context_size()
def set_context_id(self, i):
super(WhereClause, self).set_context_id(i)
self.query_value.set_context_id(i)
def update_context(self, ctx):
if isinstance(self.operator, InOperator):
ctx[str(self.context_id)] = InQuoter(self.value)
else:
self.query_value.update_context(ctx)
class AssignmentClause(BaseClause):
""" a single variable st statement """
def __unicode__(self):
return u'"{0}" = %({1})s'.format(self.field, self.context_id)
def insert_tuple(self):
return self.field, self.context_id
class ConditionalClause(BaseClause):
""" A single variable iff statement """
def __unicode__(self):
return u'"{0}" = %({1})s'.format(self.field, self.context_id)
def insert_tuple(self):
return self.field, self.context_id
class ContainerUpdateTypeMapMeta(type):
def __init__(cls, name, bases, dct):
if not hasattr(cls, 'type_map'):
cls.type_map = {}
else:
cls.type_map[cls.col_type] = cls
super(ContainerUpdateTypeMapMeta, cls).__init__(name, bases, dct)
@six.add_metaclass(ContainerUpdateTypeMapMeta)
class ContainerUpdateClause(AssignmentClause):
def __init__(self, field, value, operation=None, previous=None):
super(ContainerUpdateClause, self).__init__(field, value)
self.previous = previous
self._assignments = None
self._operation = operation
self._analyzed = False
def _analyze(self):
raise NotImplementedError
def get_context_size(self):
raise NotImplementedError
def update_context(self, ctx):
raise NotImplementedError
class SetUpdateClause(ContainerUpdateClause):
""" updates a set collection """
col_type = columns.Set
_additions = None
_removals = None
def __unicode__(self):
qs = []
ctx_id = self.context_id
if (self.previous is None and
self._assignments is None and
self._additions is None and
self._removals is None):
qs += ['"{0}" = %({1})s'.format(self.field, ctx_id)]
if self._assignments is not None:
qs += ['"{0}" = %({1})s'.format(self.field, ctx_id)]
ctx_id += 1
if self._additions is not None:
qs += ['"{0}" = "{0}" + %({1})s'.format(self.field, ctx_id)]
ctx_id += 1
if self._removals is not None:
qs += ['"{0}" = "{0}" - %({1})s'.format(self.field, ctx_id)]
return ', '.join(qs)
def _analyze(self):
""" works out the updates to be performed """
if self.value is None or self.value == self.previous:
pass
elif self._operation == "add":
self._additions = self.value
elif self._operation == "remove":
self._removals = self.value
elif self.previous is None:
self._assignments = self.value
else:
# partial update time
self._additions = (self.value - self.previous) or None
self._removals = (self.previous - self.value) or None
self._analyzed = True
def get_context_size(self):
if not self._analyzed:
self._analyze()
if (self.previous is None and
not self._assignments and
self._additions is None and
self._removals is None):
return 1
return int(bool(self._assignments)) + int(bool(self._additions)) + int(bool(self._removals))
def update_context(self, ctx):
if not self._analyzed:
self._analyze()
ctx_id = self.context_id
if (self.previous is None and
self._assignments is None and
self._additions is None and
self._removals is None):
ctx[str(ctx_id)] = set()
if self._assignments is not None:
ctx[str(ctx_id)] = self._assignments
ctx_id += 1
if self._additions is not None:
ctx[str(ctx_id)] = self._additions
ctx_id += 1
if self._removals is not None:
ctx[str(ctx_id)] = self._removals
class ListUpdateClause(ContainerUpdateClause):
""" updates a list collection """
col_type = columns.List
_append = None
_prepend = None
def __unicode__(self):
if not self._analyzed:
self._analyze()
qs = []
ctx_id = self.context_id
if self._assignments is not None:
qs += ['"{0}" = %({1})s'.format(self.field, ctx_id)]
ctx_id += 1
if self._prepend is not None:
qs += ['"{0}" = %({1})s + "{0}"'.format(self.field, ctx_id)]
ctx_id += 1
if self._append is not None:
qs += ['"{0}" = "{0}" + %({1})s'.format(self.field, ctx_id)]
return ', '.join(qs)
def get_context_size(self):
if not self._analyzed:
self._analyze()
return int(self._assignments is not None) + int(bool(self._append)) + int(bool(self._prepend))
def update_context(self, ctx):
if not self._analyzed:
self._analyze()
ctx_id = self.context_id
if self._assignments is not None:
ctx[str(ctx_id)] = self._assignments
ctx_id += 1
if self._prepend is not None:
ctx[str(ctx_id)] = self._prepend
ctx_id += 1
if self._append is not None:
ctx[str(ctx_id)] = self._append
def _analyze(self):
""" works out the updates to be performed """
if self.value is None or self.value == self.previous:
pass
elif self._operation == "append":
self._append = self.value
elif self._operation == "prepend":
self._prepend = self.value
elif self.previous is None:
self._assignments = self.value
elif len(self.value) < len(self.previous):
# if elements have been removed,
# rewrite the whole list
self._assignments = self.value
elif len(self.previous) == 0:
# if we're updating from an empty
# list, do a complete insert
self._assignments = self.value
else:
# the max start idx we want to compare
search_space = len(self.value) - max(0, len(self.previous) - 1)
# the size of the sub lists we want to look at
search_size = len(self.previous)
for i in range(search_space):
# slice boundary
j = i + search_size
sub = self.value[i:j]
idx_cmp = lambda idx: self.previous[idx] == sub[idx]
if idx_cmp(0) and idx_cmp(-1) and self.previous == sub:
self._prepend = self.value[:i] or None
self._append = self.value[j:] or None
break
# if both append and prepend are still None after looking
# at both lists, an insert statement will be created
if self._prepend is self._append is None:
self._assignments = self.value
self._analyzed = True
class MapUpdateClause(ContainerUpdateClause):
""" updates a map collection """
col_type = columns.Map
_updates = None
def _analyze(self):
if self._operation == "update":
self._updates = self.value.keys()
else:
if self.previous is None:
self._updates = sorted([k for k, v in self.value.items()])
else:
self._updates = sorted([k for k, v in self.value.items() if v != self.previous.get(k)]) or None
self._analyzed = True
def get_context_size(self):
if self.is_assignment:
return 1
return len(self._updates or []) * 2
def update_context(self, ctx):
ctx_id = self.context_id
if self.is_assignment:
ctx[str(ctx_id)] = {}
else:
for key in self._updates or []:
val = self.value.get(key)
ctx[str(ctx_id)] = key
ctx[str(ctx_id + 1)] = val
ctx_id += 2
@property
def is_assignment(self):
if not self._analyzed:
self._analyze()
return self.previous is None and not self._updates
def __unicode__(self):
qs = []
ctx_id = self.context_id
if self.is_assignment:
qs += ['"{0}" = %({1})s'.format(self.field, ctx_id)]
else:
for _ in self._updates or []:
qs += ['"{0}"[%({1})s] = %({2})s'.format(self.field, ctx_id, ctx_id + 1)]
ctx_id += 2
return ', '.join(qs)
class CounterUpdateClause(AssignmentClause):
col_type = columns.Counter
def __init__(self, field, value, previous=None):
super(CounterUpdateClause, self).__init__(field, value)
self.previous = previous or 0
def get_context_size(self):
return 1
def update_context(self, ctx):
ctx[str(self.context_id)] = abs(self.value - self.previous)
def __unicode__(self):
delta = self.value - self.previous
sign = '-' if delta < 0 else '+'
return '"{0}" = "{0}" {1} %({2})s'.format(self.field, sign, self.context_id)
class BaseDeleteClause(BaseClause):
pass
class FieldDeleteClause(BaseDeleteClause):
""" deletes a field from a row """
def __init__(self, field):
super(FieldDeleteClause, self).__init__(field, None)
def __unicode__(self):
return '"{0}"'.format(self.field)
def update_context(self, ctx):
pass
def get_context_size(self):
return 0
class MapDeleteClause(BaseDeleteClause):
""" removes keys from a map """
def __init__(self, field, value, previous=None):
super(MapDeleteClause, self).__init__(field, value)
self.value = self.value or {}
self.previous = previous or {}
self._analyzed = False
self._removals = None
def _analyze(self):
self._removals = sorted([k for k in self.previous if k not in self.value])
self._analyzed = True
def update_context(self, ctx):
if not self._analyzed:
self._analyze()
for idx, key in enumerate(self._removals):
ctx[str(self.context_id + idx)] = key
def get_context_size(self):
if not self._analyzed:
self._analyze()
return len(self._removals)
def __unicode__(self):
if not self._analyzed:
self._analyze()
return ', '.join(['"{0}"[%({1})s]'.format(self.field, self.context_id + i) for i in range(len(self._removals))])
class BaseCQLStatement(UnicodeMixin):
""" The base cql statement class """
def __init__(self, table, timestamp=None, where=None, fetch_size=None, conditionals=None):
super(BaseCQLStatement, self).__init__()
self.table = table
self.context_id = 0
self.context_counter = self.context_id
self.timestamp = timestamp
self.fetch_size = fetch_size if fetch_size else FETCH_SIZE_UNSET
self.where_clauses = []
for clause in where or []:
self._add_where_clause(clause)
self.conditionals = []
for conditional in conditionals or []:
self.add_conditional_clause(conditional)
def _update_part_key_values(self, field_index_map, clauses, parts):
for clause in filter(lambda c: c.field in field_index_map, clauses):
parts[field_index_map[clause.field]] = clause.value
def partition_key_values(self, field_index_map):
parts = [None] * len(field_index_map)
self._update_part_key_values(field_index_map, (w for w in self.where_clauses if w.operator.__class__ == EqualsOperator), parts)
return parts
def add_where(self, column, operator, value, quote_field=True):
value = column.to_database(value)
clause = WhereClause(column.db_field_name, operator, value, quote_field)
self._add_where_clause(clause)
def _add_where_clause(self, clause):
clause.set_context_id(self.context_counter)
self.context_counter += clause.get_context_size()
self.where_clauses.append(clause)
def get_context(self):
"""
returns the context dict for this statement
:rtype: dict
"""
ctx = {}
for clause in self.where_clauses or []:
clause.update_context(ctx)
return ctx
def add_conditional_clause(self, clause):
"""
Adds a iff clause to this statement
:param clause: The clause that will be added to the iff statement
:type clause: ConditionalClause
"""
clause.set_context_id(self.context_counter)
self.context_counter += clause.get_context_size()
self.conditionals.append(clause)
def _get_conditionals(self):
return 'IF {0}'.format(' AND '.join([six.text_type(c) for c in self.conditionals]))
def get_context_size(self):
return len(self.get_context())
def update_context_id(self, i):
self.context_id = i
self.context_counter = self.context_id
for clause in self.where_clauses:
clause.set_context_id(self.context_counter)
self.context_counter += clause.get_context_size()
@property
def timestamp_normalized(self):
"""
we're expecting self.timestamp to be either a long, int, a datetime, or a timedelta
:return:
"""
if not self.timestamp:
return None
if isinstance(self.timestamp, six.integer_types):
return self.timestamp
if isinstance(self.timestamp, timedelta):
tmp = datetime.now() + self.timestamp
else:
tmp = self.timestamp
return int(time.mktime(tmp.timetuple()) * 1e+6 + tmp.microsecond)
def __unicode__(self):
raise NotImplementedError
def __repr__(self):
return self.__unicode__()
@property
def _where(self):
return 'WHERE {0}'.format(' AND '.join([six.text_type(c) for c in self.where_clauses]))
class SelectStatement(BaseCQLStatement):
""" a cql select statement """
def __init__(self,
table,
fields=None,
count=False,
where=None,
order_by=None,
limit=None,
allow_filtering=False,
distinct_fields=None,
fetch_size=None):
"""
:param where
:type where list of cqlengine.statements.WhereClause
"""
super(SelectStatement, self).__init__(
table,
where=where,
fetch_size=fetch_size
)
self.fields = [fields] if isinstance(fields, six.string_types) else (fields or [])
self.distinct_fields = distinct_fields
self.count = count
self.order_by = [order_by] if isinstance(order_by, six.string_types) else order_by
self.limit = limit
self.allow_filtering = allow_filtering
def __unicode__(self):
qs = ['SELECT']
if self.distinct_fields:
if self.count:
qs += ['DISTINCT COUNT({0})'.format(', '.join(['"{0}"'.format(f) for f in self.distinct_fields]))]
else:
qs += ['DISTINCT {0}'.format(', '.join(['"{0}"'.format(f) for f in self.distinct_fields]))]
elif self.count:
qs += ['COUNT(*)']
else:
qs += [', '.join(['"{0}"'.format(f) for f in self.fields]) if self.fields else '*']
qs += ['FROM', self.table]
if self.where_clauses:
qs += [self._where]
if self.order_by and not self.count:
qs += ['ORDER BY {0}'.format(', '.join(six.text_type(o) for o in self.order_by))]
if self.limit:
qs += ['LIMIT {0}'.format(self.limit)]
if self.allow_filtering:
qs += ['ALLOW FILTERING']
return ' '.join(qs)
class AssignmentStatement(BaseCQLStatement):
""" value assignment statements """
def __init__(self,
table,
assignments=None,
where=None,
ttl=None,
timestamp=None,
conditionals=None):
super(AssignmentStatement, self).__init__(
table,
where=where,
conditionals=conditionals
)
self.ttl = ttl
self.timestamp = timestamp
# add assignments
self.assignments = []
for assignment in assignments or []:
self._add_assignment_clause(assignment)
def update_context_id(self, i):
super(AssignmentStatement, self).update_context_id(i)
for assignment in self.assignments:
assignment.set_context_id(self.context_counter)
self.context_counter += assignment.get_context_size()
def partition_key_values(self, field_index_map):
parts = super(AssignmentStatement, self).partition_key_values(field_index_map)
self._update_part_key_values(field_index_map, self.assignments, parts)
return parts
def add_assignment(self, column, value):
value = column.to_database(value)
clause = AssignmentClause(column.db_field_name, value)
self._add_assignment_clause(clause)
def _add_assignment_clause(self, clause):
clause.set_context_id(self.context_counter)
self.context_counter += clause.get_context_size()
self.assignments.append(clause)
@property
def is_empty(self):
return len(self.assignments) == 0
def get_context(self):
ctx = super(AssignmentStatement, self).get_context()
for clause in self.assignments:
clause.update_context(ctx)
return ctx
class InsertStatement(AssignmentStatement):
""" an cql insert statement """
def __init__(self,
table,
assignments=None,
where=None,
ttl=None,
timestamp=None,
if_not_exists=False):
super(InsertStatement, self).__init__(table,
assignments=assignments,
where=where,
ttl=ttl,
timestamp=timestamp)
self.if_not_exists = if_not_exists
def __unicode__(self):
qs = ['INSERT INTO {0}'.format(self.table)]
# get column names and context placeholders
fields = [a.insert_tuple() for a in self.assignments]
columns, values = zip(*fields)
qs += ["({0})".format(', '.join(['"{0}"'.format(c) for c in columns]))]
qs += ['VALUES']
qs += ["({0})".format(', '.join(['%({0})s'.format(v) for v in values]))]
if self.if_not_exists:
qs += ["IF NOT EXISTS"]
if self.ttl:
qs += ["USING TTL {0}".format(self.ttl)]
if self.timestamp:
qs += ["USING TIMESTAMP {0}".format(self.timestamp_normalized)]
return ' '.join(qs)
class UpdateStatement(AssignmentStatement):
""" an cql update select statement """
def __init__(self,
table,
assignments=None,
where=None,
ttl=None,
timestamp=None,
conditionals=None,
if_exists=False):
super(UpdateStatement, self). __init__(table,
assignments=assignments,
where=where,
ttl=ttl,
timestamp=timestamp,
conditionals=conditionals)
self.if_exists = if_exists
def __unicode__(self):
qs = ['UPDATE', self.table]
using_options = []
if self.ttl:
using_options += ["TTL {0}".format(self.ttl)]
if self.timestamp:
using_options += ["TIMESTAMP {0}".format(self.timestamp_normalized)]
if using_options:
qs += ["USING {0}".format(" AND ".join(using_options))]
qs += ['SET']
qs += [', '.join([six.text_type(c) for c in self.assignments])]
if self.where_clauses:
qs += [self._where]
if len(self.conditionals) > 0:
qs += [self._get_conditionals()]
if self.if_exists:
qs += ["IF EXISTS"]
return ' '.join(qs)
def get_context(self):
ctx = super(UpdateStatement, self).get_context()
for clause in self.conditionals:
clause.update_context(ctx)
return ctx
def update_context_id(self, i):
super(UpdateStatement, self).update_context_id(i)
for conditional in self.conditionals:
conditional.set_context_id(self.context_counter)
self.context_counter += conditional.get_context_size()
def add_update(self, column, value, operation=None, previous=None):
value = column.to_database(value)
col_type = type(column)
container_update_type = ContainerUpdateClause.type_map.get(col_type)
if container_update_type:
previous = column.to_database(previous)
clause = container_update_type(column.db_field_name, value, operation, previous)
elif col_type == columns.Counter:
clause = CounterUpdateClause(column.db_field_name, value, previous)
else:
clause = AssignmentClause(column.db_field_name, value)
if clause.get_context_size(): # this is to exclude map removals from updates. Can go away if we drop support for C* < 1.2.4 and remove two-phase updates
self._add_assignment_clause(clause)
class DeleteStatement(BaseCQLStatement):
""" a cql delete statement """
def __init__(self, table, fields=None, where=None, timestamp=None, conditionals=None, if_exists=False):
super(DeleteStatement, self).__init__(
table,
where=where,
timestamp=timestamp,
conditionals=conditionals
)
self.fields = []
if isinstance(fields, six.string_types):
fields = [fields]
for field in fields or []:
self.add_field(field)
self.if_exists = if_exists
def update_context_id(self, i):
super(DeleteStatement, self).update_context_id(i)
for field in self.fields:
field.set_context_id(self.context_counter)
self.context_counter += field.get_context_size()
for t in self.conditionals:
t.set_context_id(self.context_counter)
self.context_counter += t.get_context_size()
def get_context(self):
ctx = super(DeleteStatement, self).get_context()
for field in self.fields:
field.update_context(ctx)
for clause in self.conditionals:
clause.update_context(ctx)
return ctx
def add_field(self, field):
if isinstance(field, six.string_types):
field = FieldDeleteClause(field)
if not isinstance(field, BaseClause):
raise StatementException("only instances of AssignmentClause can be added to statements")
field.set_context_id(self.context_counter)
self.context_counter += field.get_context_size()
self.fields.append(field)
def __unicode__(self):
qs = ['DELETE']
if self.fields:
qs += [', '.join(['{0}'.format(f) for f in self.fields])]
qs += ['FROM', self.table]
delete_option = []
if self.timestamp:
delete_option += ["TIMESTAMP {0}".format(self.timestamp_normalized)]
if delete_option:
qs += [" USING {0} ".format(" AND ".join(delete_option))]
if self.where_clauses:
qs += [self._where]
if self.conditionals:
qs += [self._get_conditionals()]
if self.if_exists:
qs += ["IF EXISTS"]
return ' '.join(qs)
| apache-2.0 |
osigaud/ArmModelPython | NeuralControl/ArmModel/ArmParameters.py | 3 | 4483 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
Author: Thomas Beucher
Module: ArmParameters
Description: -We find here all arm parameters
-we use a model of arm with two joints and six muscles
'''
import numpy as np
from GlobalVariables import pathWorkingDirectory
class ArmParameters:
'''
class ArmParameters
'''
def __init__(self):
'''
Intializes the class
'''
self.pathSetupFile = pathWorkingDirectory + "/ArmModel/Setup/Arm.params"
self.readSetupFile()
self.massMatrix()
self.AMatrix()
self.BMatrix()
self.readStops()
def readSetupFile(self):
'''
Reads the setup file
'''
with open(self.pathSetupFile, "r") as file:
alls = file.read()
#Split to read line by line
allsByLign = alls.split("\n")
#line 1, Arm length
self.l1 = float((allsByLign[0].split(":"))[1])
#line 2, ForeArm length
self.l2 = float((allsByLign[1].split(":"))[1])
#line 3, Arm mass
self.m1 = float((allsByLign[2].split(":"))[1])
#line 4, ForeArm mass
self.m2 = float((allsByLign[3].split(":"))[1])
#line 5, Arm inertia
self.I1 = float((allsByLign[4].split(":"))[1])
#line 6, ForeArm inertia
self.I2 = float((allsByLign[5].split(":"))[1])
#line 7, Distance from the center of segment 1 to its center of mass
self.s1 = float((allsByLign[6].split(":"))[1])
#line 8, Distance from the center of segment 2 to its center of mass
self.s2 = float((allsByLign[7].split(":"))[1])
def massMatrix(self):
'''
Initialization of parameters used for the inertia matrix
'''
self.k1 = self.I1 + self.I2 + self.m2*(self.l1**2)
self.k2 = self.m2*self.l1*self.s2
self.k3 = self.I2
def BMatrix(self):
'''
Defines the damping matrix B
'''
with open(self.pathSetupFile, "r") as file:
alls = file.read()
allsByLign = alls.split("\n")
#line 9, Damping term k6
b1 = float((allsByLign[8].split(":"))[1])
#line 10, Damping term k7
b2 = float((allsByLign[9].split(":"))[1])
#line 11, Damping term k8
b3 = float((allsByLign[10].split(":"))[1])
#line 12, Damping term k9
b4 = float((allsByLign[11].split(":"))[1])
#matrix definition
self.B = np.array([[b1,b2],[b3,b4]])
def AMatrix(self):
'''
Defines the moment arm matrix A
'''
with open(self.pathSetupFile, "r") as file:
alls = file.read()
allsByLign = alls.split("\n")
#line 13, Moment arm matrix, a1
a1 = float((allsByLign[12].split(":"))[1])
#line 14, Moment arm matrix, a2
a2 = float((allsByLign[13].split(":"))[1])
#line 15, Moment arm matrix, a3
a3 = float((allsByLign[14].split(":"))[1])
#line 16, Moment arm matrix, a4
a4 = float((allsByLign[15].split(":"))[1])
#line 17, Moment arm matrix, a5
a5 = float((allsByLign[16].split(":"))[1])
#line 18, Moment arm matrix, a6
a6 = float((allsByLign[17].split(":"))[1])
#line 19, Moment arm matrix, a7
a7 = float((allsByLign[18].split(":"))[1])
#line 20, Moment arm matrix, a8
a8 = float((allsByLign[19].split(":"))[1])
#line 21, Moment arm matrix, a9
a9 = float((allsByLign[20].split(":"))[1])
#line 22, Moment arm matrix, a10
a10 = float((allsByLign[21].split(":"))[1])
#line 23, Moment arm matrix, a11
a11 = float((allsByLign[22].split(":"))[1])
#line 24, Moment arm matrix, a12
a12 = float((allsByLign[23].split(":"))[1])
#matrix definition
self.At = np.array([[a1,a2,a3,a4,a5,a6], [a7,a8,a9,a10,a11,a12]])
def readStops(self):
with open(self.pathSetupFile, "r") as file:
alls = file.read()
allsByLign = alls.split("\n")
#line 25, Shoulder upper bound
self.sub = float((allsByLign[24].split(":"))[1])
#line 26, Shoulder lower bound
self.slb = float((allsByLign[25].split(":"))[1])
#line 27, Elbow upper bound
self.eub = float((allsByLign[26].split(":"))[1])
#line 28, Elbow lower bound
self.elb = float((allsByLign[27].split(":"))[1])
| gpl-2.0 |
bcarroll/authmgr | python-3.6.2-Win64/Lib/site-packages/sqlalchemy/dialects/postgresql/array.py | 32 | 10320 | # postgresql/array.py
# Copyright (C) 2005-2017 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
from .base import ischema_names
from ...sql import expression, operators
from ...sql.base import SchemaEventTarget
from ... import types as sqltypes
try:
from uuid import UUID as _python_UUID
except ImportError:
_python_UUID = None
def Any(other, arrexpr, operator=operators.eq):
"""A synonym for the :meth:`.ARRAY.Comparator.any` method.
This method is legacy and is here for backwards-compatibility.
.. seealso::
:func:`.expression.any_`
"""
return arrexpr.any(other, operator)
def All(other, arrexpr, operator=operators.eq):
"""A synonym for the :meth:`.ARRAY.Comparator.all` method.
This method is legacy and is here for backwards-compatibility.
.. seealso::
:func:`.expression.all_`
"""
return arrexpr.all(other, operator)
class array(expression.Tuple):
"""A PostgreSQL ARRAY literal.
This is used to produce ARRAY literals in SQL expressions, e.g.::
from sqlalchemy.dialects.postgresql import array
from sqlalchemy.dialects import postgresql
from sqlalchemy import select, func
stmt = select([
array([1,2]) + array([3,4,5])
])
print stmt.compile(dialect=postgresql.dialect())
Produces the SQL::
SELECT ARRAY[%(param_1)s, %(param_2)s] ||
ARRAY[%(param_3)s, %(param_4)s, %(param_5)s]) AS anon_1
An instance of :class:`.array` will always have the datatype
:class:`.ARRAY`. The "inner" type of the array is inferred from
the values present, unless the ``type_`` keyword argument is passed::
array(['foo', 'bar'], type_=CHAR)
.. versionadded:: 0.8 Added the :class:`~.postgresql.array` literal type.
See also:
:class:`.postgresql.ARRAY`
"""
__visit_name__ = 'array'
def __init__(self, clauses, **kw):
super(array, self).__init__(*clauses, **kw)
self.type = ARRAY(self.type)
def _bind_param(self, operator, obj, _assume_scalar=False, type_=None):
if _assume_scalar or operator is operators.getitem:
# if getitem->slice were called, Indexable produces
# a Slice object from that
assert isinstance(obj, int)
return expression.BindParameter(
None, obj, _compared_to_operator=operator,
type_=type_,
_compared_to_type=self.type, unique=True)
else:
return array([
self._bind_param(operator, o, _assume_scalar=True, type_=type_)
for o in obj])
def self_group(self, against=None):
if (against in (
operators.any_op, operators.all_op, operators.getitem)):
return expression.Grouping(self)
else:
return self
CONTAINS = operators.custom_op("@>", precedence=5)
CONTAINED_BY = operators.custom_op("<@", precedence=5)
OVERLAP = operators.custom_op("&&", precedence=5)
class ARRAY(SchemaEventTarget, sqltypes.ARRAY):
"""PostgreSQL ARRAY type.
.. versionchanged:: 1.1 The :class:`.postgresql.ARRAY` type is now
a subclass of the core :class:`.types.ARRAY` type.
The :class:`.postgresql.ARRAY` type is constructed in the same way
as the core :class:`.types.ARRAY` type; a member type is required, and a
number of dimensions is recommended if the type is to be used for more
than one dimension::
from sqlalchemy.dialects import postgresql
mytable = Table("mytable", metadata,
Column("data", postgresql.ARRAY(Integer, dimensions=2))
)
The :class:`.postgresql.ARRAY` type provides all operations defined on the
core :class:`.types.ARRAY` type, including support for "dimensions", indexed
access, and simple matching such as :meth:`.types.ARRAY.Comparator.any`
and :meth:`.types.ARRAY.Comparator.all`. :class:`.postgresql.ARRAY` class also
provides PostgreSQL-specific methods for containment operations, including
:meth:`.postgresql.ARRAY.Comparator.contains`
:meth:`.postgresql.ARRAY.Comparator.contained_by`,
and :meth:`.postgresql.ARRAY.Comparator.overlap`, e.g.::
mytable.c.data.contains([1, 2])
The :class:`.postgresql.ARRAY` type may not be supported on all
PostgreSQL DBAPIs; it is currently known to work on psycopg2 only.
Additionally, the :class:`.postgresql.ARRAY` type does not work directly in
conjunction with the :class:`.ENUM` type. For a workaround, see the
special type at :ref:`postgresql_array_of_enum`.
.. seealso::
:class:`.types.ARRAY` - base array type
:class:`.postgresql.array` - produces a literal array value.
"""
class Comparator(sqltypes.ARRAY.Comparator):
"""Define comparison operations for :class:`.ARRAY`.
Note that these operations are in addition to those provided
by the base :class:`.types.ARRAY.Comparator` class, including
:meth:`.types.ARRAY.Comparator.any` and
:meth:`.types.ARRAY.Comparator.all`.
"""
def contains(self, other, **kwargs):
"""Boolean expression. Test if elements are a superset of the
elements of the argument array expression.
"""
return self.operate(CONTAINS, other, result_type=sqltypes.Boolean)
def contained_by(self, other):
"""Boolean expression. Test if elements are a proper subset of the
elements of the argument array expression.
"""
return self.operate(
CONTAINED_BY, other, result_type=sqltypes.Boolean)
def overlap(self, other):
"""Boolean expression. Test if array has elements in common with
an argument array expression.
"""
return self.operate(OVERLAP, other, result_type=sqltypes.Boolean)
comparator_factory = Comparator
def __init__(self, item_type, as_tuple=False, dimensions=None,
zero_indexes=False):
"""Construct an ARRAY.
E.g.::
Column('myarray', ARRAY(Integer))
Arguments are:
:param item_type: The data type of items of this array. Note that
dimensionality is irrelevant here, so multi-dimensional arrays like
``INTEGER[][]``, are constructed as ``ARRAY(Integer)``, not as
``ARRAY(ARRAY(Integer))`` or such.
:param as_tuple=False: Specify whether return results
should be converted to tuples from lists. DBAPIs such
as psycopg2 return lists by default. When tuples are
returned, the results are hashable.
:param dimensions: if non-None, the ARRAY will assume a fixed
number of dimensions. This will cause the DDL emitted for this
ARRAY to include the exact number of bracket clauses ``[]``,
and will also optimize the performance of the type overall.
Note that PG arrays are always implicitly "non-dimensioned",
meaning they can store any number of dimensions no matter how
they were declared.
:param zero_indexes=False: when True, index values will be converted
between Python zero-based and PostgreSQL one-based indexes, e.g.
a value of one will be added to all index values before passing
to the database.
.. versionadded:: 0.9.5
"""
if isinstance(item_type, ARRAY):
raise ValueError("Do not nest ARRAY types; ARRAY(basetype) "
"handles multi-dimensional arrays of basetype")
if isinstance(item_type, type):
item_type = item_type()
self.item_type = item_type
self.as_tuple = as_tuple
self.dimensions = dimensions
self.zero_indexes = zero_indexes
@property
def hashable(self):
return self.as_tuple
@property
def python_type(self):
return list
def compare_values(self, x, y):
return x == y
def _set_parent(self, column):
"""Support SchemaEventTarget"""
if isinstance(self.item_type, SchemaEventTarget):
self.item_type._set_parent(column)
def _set_parent_with_dispatch(self, parent):
"""Support SchemaEventTarget"""
if isinstance(self.item_type, SchemaEventTarget):
self.item_type._set_parent_with_dispatch(parent)
def _proc_array(self, arr, itemproc, dim, collection):
if dim is None:
arr = list(arr)
if dim == 1 or dim is None and (
# this has to be (list, tuple), or at least
# not hasattr('__iter__'), since Py3K strings
# etc. have __iter__
not arr or not isinstance(arr[0], (list, tuple))):
if itemproc:
return collection(itemproc(x) for x in arr)
else:
return collection(arr)
else:
return collection(
self._proc_array(
x, itemproc,
dim - 1 if dim is not None else None,
collection)
for x in arr
)
def bind_processor(self, dialect):
item_proc = self.item_type.dialect_impl(dialect).\
bind_processor(dialect)
def process(value):
if value is None:
return value
else:
return self._proc_array(
value,
item_proc,
self.dimensions,
list)
return process
def result_processor(self, dialect, coltype):
item_proc = self.item_type.dialect_impl(dialect).\
result_processor(dialect, coltype)
def process(value):
if value is None:
return value
else:
return self._proc_array(
value,
item_proc,
self.dimensions,
tuple if self.as_tuple else list)
return process
ischema_names['_array'] = ARRAY
| bsd-3-clause |
muhkuh-sys/org.muhkuh.tests-iomatrix | jonchki/install.py | 1 | 1299 | import os
import subprocess
def install_host_debs(astrDeb):
astrInstall = []
for strDeb in astrDeb:
strDpkgStatus = subprocess.check_output("dpkg-query -W -f='${Status}' %s || echo 'unknown'" % strDeb, shell=True)
print('Check for %s = %s' % (strDeb, strDpkgStatus))
if strDpkgStatus != 'install ok installed':
astrInstall.append(strDeb)
if len(astrInstall) != 0:
subprocess.check_call('sudo apt-get update --assume-yes', shell=True)
subprocess.check_call('sudo apt-get install --assume-yes %s' % ' '.join(astrInstall), shell=True)
def install_foreign_debs(astrDeb, strCfg_workingFolder, strCfg_projectFolder):
# Create the folders if they do not exist yet.
astrFolders = [
strCfg_workingFolder,
os.path.join(strCfg_workingFolder, 'packages'),
]
for strPath in astrFolders:
if os.path.exists(strPath) is not True:
os.makedirs(strPath)
packagesPath = os.path.join(strCfg_workingFolder, 'packages')
os.chdir(packagesPath)
subProcessPath = os.path.join(strCfg_projectFolder, 'cmake', 'tools')
subProcessCall = '%s/get_dependencies.sh %s' % (subProcessPath, ' '.join(astrDeb))
subprocess.check_call(subProcessCall, shell=True)
os.chdir(strCfg_workingFolder)
| gpl-2.0 |
varunarya10/python-novaclient | novaclient/tests/test_auth_plugins.py | 4 | 12185 | # Copyright 2012 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import argparse
from keystoneclient import fixture
import mock
import pkg_resources
import requests
try:
import json
except ImportError:
import simplejson as json
from novaclient import auth_plugin
from novaclient import exceptions
from novaclient.tests import utils
from novaclient.v1_1 import client
def mock_http_request(resp=None):
"""Mock an HTTP Request."""
if not resp:
resp = fixture.V2Token()
resp.set_scope()
s = resp.add_service('compute')
s.add_endpoint("http://localhost:8774/v1.1", region='RegionOne')
auth_response = utils.TestResponse({
"status_code": 200,
"text": json.dumps(resp),
})
return mock.Mock(return_value=(auth_response))
def requested_headers(cs):
"""Return requested passed headers."""
return {
'User-Agent': cs.client.USER_AGENT,
'Content-Type': 'application/json',
'Accept': 'application/json',
}
class DeprecatedAuthPluginTest(utils.TestCase):
def test_auth_system_success(self):
class MockEntrypoint(pkg_resources.EntryPoint):
def load(self):
return self.authenticate
def authenticate(self, cls, auth_url):
cls._authenticate(auth_url, {"fake": "me"})
def mock_iter_entry_points(_type, name):
if _type == 'openstack.client.authenticate':
return [MockEntrypoint("fake", "fake", ["fake"])]
else:
return []
mock_request = mock_http_request()
@mock.patch.object(pkg_resources, "iter_entry_points",
mock_iter_entry_points)
@mock.patch.object(requests, "request", mock_request)
def test_auth_call():
plugin = auth_plugin.DeprecatedAuthPlugin("fake")
cs = client.Client("username", "password", "project_id",
utils.AUTH_URL_V2, auth_system="fake",
auth_plugin=plugin)
cs.client.authenticate()
headers = requested_headers(cs)
token_url = cs.client.auth_url + "/tokens"
mock_request.assert_called_with(
"POST",
token_url,
headers=headers,
data='{"fake": "me"}',
allow_redirects=True,
**self.TEST_REQUEST_BASE)
test_auth_call()
def test_auth_system_not_exists(self):
def mock_iter_entry_points(_t, name=None):
return [pkg_resources.EntryPoint("fake", "fake", ["fake"])]
mock_request = mock_http_request()
@mock.patch.object(pkg_resources, "iter_entry_points",
mock_iter_entry_points)
@mock.patch.object(requests, "request", mock_request)
def test_auth_call():
auth_plugin.discover_auth_systems()
plugin = auth_plugin.DeprecatedAuthPlugin("notexists")
cs = client.Client("username", "password", "project_id",
utils.AUTH_URL_V2, auth_system="notexists",
auth_plugin=plugin)
self.assertRaises(exceptions.AuthSystemNotFound,
cs.client.authenticate)
test_auth_call()
def test_auth_system_defining_auth_url(self):
class MockAuthUrlEntrypoint(pkg_resources.EntryPoint):
def load(self):
return self.auth_url
def auth_url(self):
return "http://faked/v2.0"
class MockAuthenticateEntrypoint(pkg_resources.EntryPoint):
def load(self):
return self.authenticate
def authenticate(self, cls, auth_url):
cls._authenticate(auth_url, {"fake": "me"})
def mock_iter_entry_points(_type, name):
if _type == 'openstack.client.auth_url':
return [MockAuthUrlEntrypoint("fakewithauthurl",
"fakewithauthurl",
["auth_url"])]
elif _type == 'openstack.client.authenticate':
return [MockAuthenticateEntrypoint("fakewithauthurl",
"fakewithauthurl",
["authenticate"])]
else:
return []
mock_request = mock_http_request()
@mock.patch.object(pkg_resources, "iter_entry_points",
mock_iter_entry_points)
@mock.patch.object(requests, "request", mock_request)
def test_auth_call():
plugin = auth_plugin.DeprecatedAuthPlugin("fakewithauthurl")
cs = client.Client("username", "password", "project_id",
auth_system="fakewithauthurl",
auth_plugin=plugin)
cs.client.authenticate()
self.assertEqual("http://faked/v2.0", cs.client.auth_url)
test_auth_call()
@mock.patch.object(pkg_resources, "iter_entry_points")
def test_client_raises_exc_without_auth_url(self, mock_iter_entry_points):
class MockAuthUrlEntrypoint(pkg_resources.EntryPoint):
def load(self):
return self.auth_url
def auth_url(self):
return None
mock_iter_entry_points.side_effect = lambda _t, name: [
MockAuthUrlEntrypoint("fakewithauthurl",
"fakewithauthurl",
["auth_url"])]
plugin = auth_plugin.DeprecatedAuthPlugin("fakewithauthurl")
self.assertRaises(
exceptions.EndpointNotFound,
client.Client, "username", "password", "project_id",
auth_system="fakewithauthurl", auth_plugin=plugin)
class AuthPluginTest(utils.TestCase):
@mock.patch.object(requests, "request")
@mock.patch.object(pkg_resources, "iter_entry_points")
def test_auth_system_success(self, mock_iter_entry_points, mock_request):
"""Test that we can authenticate using the auth system."""
class MockEntrypoint(pkg_resources.EntryPoint):
def load(self):
return FakePlugin
class FakePlugin(auth_plugin.BaseAuthPlugin):
def authenticate(self, cls, auth_url):
cls._authenticate(auth_url, {"fake": "me"})
mock_iter_entry_points.side_effect = lambda _t: [
MockEntrypoint("fake", "fake", ["FakePlugin"])]
mock_request.side_effect = mock_http_request()
auth_plugin.discover_auth_systems()
plugin = auth_plugin.load_plugin("fake")
cs = client.Client("username", "password", "project_id",
utils.AUTH_URL_V2, auth_system="fake",
auth_plugin=plugin)
cs.client.authenticate()
headers = requested_headers(cs)
token_url = cs.client.auth_url + "/tokens"
mock_request.assert_called_with(
"POST",
token_url,
headers=headers,
data='{"fake": "me"}',
allow_redirects=True,
**self.TEST_REQUEST_BASE)
@mock.patch.object(pkg_resources, "iter_entry_points")
def test_discover_auth_system_options(self, mock_iter_entry_points):
"""Test that we can load the auth system options."""
class FakePlugin(auth_plugin.BaseAuthPlugin):
@staticmethod
def add_opts(parser):
parser.add_argument('--auth_system_opt',
default=False,
action='store_true',
help="Fake option")
return parser
class MockEntrypoint(pkg_resources.EntryPoint):
def load(self):
return FakePlugin
mock_iter_entry_points.side_effect = lambda _t: [
MockEntrypoint("fake", "fake", ["FakePlugin"])]
parser = argparse.ArgumentParser()
auth_plugin.discover_auth_systems()
auth_plugin.load_auth_system_opts(parser)
opts, args = parser.parse_known_args(['--auth_system_opt'])
self.assertTrue(opts.auth_system_opt)
@mock.patch.object(pkg_resources, "iter_entry_points")
def test_parse_auth_system_options(self, mock_iter_entry_points):
"""Test that we can parse the auth system options."""
class MockEntrypoint(pkg_resources.EntryPoint):
def load(self):
return FakePlugin
class FakePlugin(auth_plugin.BaseAuthPlugin):
def __init__(self):
self.opts = {"fake_argument": True}
def parse_opts(self, args):
return self.opts
mock_iter_entry_points.side_effect = lambda _t: [
MockEntrypoint("fake", "fake", ["FakePlugin"])]
auth_plugin.discover_auth_systems()
plugin = auth_plugin.load_plugin("fake")
plugin.parse_opts([])
self.assertIn("fake_argument", plugin.opts)
@mock.patch.object(pkg_resources, "iter_entry_points")
def test_auth_system_defining_url(self, mock_iter_entry_points):
"""Test the auth_system defining an url."""
class MockEntrypoint(pkg_resources.EntryPoint):
def load(self):
return FakePlugin
class FakePlugin(auth_plugin.BaseAuthPlugin):
def get_auth_url(self):
return "http://faked/v2.0"
mock_iter_entry_points.side_effect = lambda _t: [
MockEntrypoint("fake", "fake", ["FakePlugin"])]
auth_plugin.discover_auth_systems()
plugin = auth_plugin.load_plugin("fake")
cs = client.Client("username", "password", "project_id",
auth_system="fakewithauthurl",
auth_plugin=plugin)
self.assertEqual("http://faked/v2.0", cs.client.auth_url)
@mock.patch.object(pkg_resources, "iter_entry_points")
def test_exception_if_no_authenticate(self, mock_iter_entry_points):
"""Test that no authenticate raises a proper exception."""
class MockEntrypoint(pkg_resources.EntryPoint):
def load(self):
return FakePlugin
class FakePlugin(auth_plugin.BaseAuthPlugin):
pass
mock_iter_entry_points.side_effect = lambda _t: [
MockEntrypoint("fake", "fake", ["FakePlugin"])]
auth_plugin.discover_auth_systems()
plugin = auth_plugin.load_plugin("fake")
self.assertRaises(
exceptions.EndpointNotFound,
client.Client, "username", "password", "project_id",
auth_system="fake", auth_plugin=plugin)
@mock.patch.object(pkg_resources, "iter_entry_points")
def test_exception_if_no_url(self, mock_iter_entry_points):
"""Test that no auth_url at all raises exception."""
class MockEntrypoint(pkg_resources.EntryPoint):
def load(self):
return FakePlugin
class FakePlugin(auth_plugin.BaseAuthPlugin):
pass
mock_iter_entry_points.side_effect = lambda _t: [
MockEntrypoint("fake", "fake", ["FakePlugin"])]
auth_plugin.discover_auth_systems()
plugin = auth_plugin.load_plugin("fake")
self.assertRaises(
exceptions.EndpointNotFound,
client.Client, "username", "password", "project_id",
auth_system="fake", auth_plugin=plugin)
| apache-2.0 |
nirmeshk/oh-mainline | vendor/packages/Django/django/contrib/webdesign/lorem_ipsum.py | 230 | 4908 | """
Utility functions for generating "lorem ipsum" Latin text.
"""
from __future__ import unicode_literals
import random
COMMON_P = 'Lorem ipsum dolor sit amet, consectetur adipisicing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo consequat. Duis aute irure dolor in reprehenderit in voluptate velit esse cillum dolore eu fugiat nulla pariatur. Excepteur sint occaecat cupidatat non proident, sunt in culpa qui officia deserunt mollit anim id est laborum.'
WORDS = ('exercitationem', 'perferendis', 'perspiciatis', 'laborum', 'eveniet',
'sunt', 'iure', 'nam', 'nobis', 'eum', 'cum', 'officiis', 'excepturi',
'odio', 'consectetur', 'quasi', 'aut', 'quisquam', 'vel', 'eligendi',
'itaque', 'non', 'odit', 'tempore', 'quaerat', 'dignissimos',
'facilis', 'neque', 'nihil', 'expedita', 'vitae', 'vero', 'ipsum',
'nisi', 'animi', 'cumque', 'pariatur', 'velit', 'modi', 'natus',
'iusto', 'eaque', 'sequi', 'illo', 'sed', 'ex', 'et', 'voluptatibus',
'tempora', 'veritatis', 'ratione', 'assumenda', 'incidunt', 'nostrum',
'placeat', 'aliquid', 'fuga', 'provident', 'praesentium', 'rem',
'necessitatibus', 'suscipit', 'adipisci', 'quidem', 'possimus',
'voluptas', 'debitis', 'sint', 'accusantium', 'unde', 'sapiente',
'voluptate', 'qui', 'aspernatur', 'laudantium', 'soluta', 'amet',
'quo', 'aliquam', 'saepe', 'culpa', 'libero', 'ipsa', 'dicta',
'reiciendis', 'nesciunt', 'doloribus', 'autem', 'impedit', 'minima',
'maiores', 'repudiandae', 'ipsam', 'obcaecati', 'ullam', 'enim',
'totam', 'delectus', 'ducimus', 'quis', 'voluptates', 'dolores',
'molestiae', 'harum', 'dolorem', 'quia', 'voluptatem', 'molestias',
'magni', 'distinctio', 'omnis', 'illum', 'dolorum', 'voluptatum', 'ea',
'quas', 'quam', 'corporis', 'quae', 'blanditiis', 'atque', 'deserunt',
'laboriosam', 'earum', 'consequuntur', 'hic', 'cupiditate',
'quibusdam', 'accusamus', 'ut', 'rerum', 'error', 'minus', 'eius',
'ab', 'ad', 'nemo', 'fugit', 'officia', 'at', 'in', 'id', 'quos',
'reprehenderit', 'numquam', 'iste', 'fugiat', 'sit', 'inventore',
'beatae', 'repellendus', 'magnam', 'recusandae', 'quod', 'explicabo',
'doloremque', 'aperiam', 'consequatur', 'asperiores', 'commodi',
'optio', 'dolor', 'labore', 'temporibus', 'repellat', 'veniam',
'architecto', 'est', 'esse', 'mollitia', 'nulla', 'a', 'similique',
'eos', 'alias', 'dolore', 'tenetur', 'deleniti', 'porro', 'facere',
'maxime', 'corrupti')
COMMON_WORDS = ('lorem', 'ipsum', 'dolor', 'sit', 'amet', 'consectetur',
'adipisicing', 'elit', 'sed', 'do', 'eiusmod', 'tempor', 'incididunt',
'ut', 'labore', 'et', 'dolore', 'magna', 'aliqua')
def sentence():
"""
Returns a randomly generated sentence of lorem ipsum text.
The first word is capitalized, and the sentence ends in either a period or
question mark. Commas are added at random.
"""
# Determine the number of comma-separated sections and number of words in
# each section for this sentence.
sections = [' '.join(random.sample(WORDS, random.randint(3, 12))) for i in range(random.randint(1, 5))]
s = ', '.join(sections)
# Convert to sentence case and add end punctuation.
return '%s%s%s' % (s[0].upper(), s[1:], random.choice('?.'))
def paragraph():
"""
Returns a randomly generated paragraph of lorem ipsum text.
The paragraph consists of between 1 and 4 sentences, inclusive.
"""
return ' '.join([sentence() for i in range(random.randint(1, 4))])
def paragraphs(count, common=True):
"""
Returns a list of paragraphs as returned by paragraph().
If `common` is True, then the first paragraph will be the standard
'lorem ipsum' paragraph. Otherwise, the first paragraph will be random
Latin text. Either way, subsequent paragraphs will be random Latin text.
"""
paras = []
for i in range(count):
if common and i == 0:
paras.append(COMMON_P)
else:
paras.append(paragraph())
return paras
def words(count, common=True):
"""
Returns a string of `count` lorem ipsum words separated by a single space.
If `common` is True, then the first 19 words will be the standard
'lorem ipsum' words. Otherwise, all words will be selected randomly.
"""
if common:
word_list = list(COMMON_WORDS)
else:
word_list = []
c = len(word_list)
if count > c:
count -= c
while count > 0:
c = min(count, len(WORDS))
count -= c
word_list += random.sample(WORDS, c)
else:
word_list = word_list[:count]
return ' '.join(word_list)
| agpl-3.0 |
chebee7i/dit | dit/algorithms/tests/test_minimal_sufficient_statistic.py | 1 | 1138 | """
Tests for dit.algorithms.minimal_sufficient_statistic.
"""
from __future__ import division
from nose.tools import assert_almost_equal, assert_true
from dit import Distribution, ScalarDistribution
from dit.algorithms import insert_mss, mss, mss_sigalg
def get_gm():
"""
"""
outcomes = ['0101', '0110', '0111', '1010', '1011', '1101', '1110', '1111']
pmf = [1/6, 1/12, 1/12, 1/6, 1/6, 1/6, 1/12, 1/12]
return Distribution(outcomes, pmf)
def test_mss():
"""
Test the construction of minimal sufficient statistics.
"""
d = get_gm()
d1 = mss(d, [0, 1], [2, 3])
d2 = mss(d, [2, 3], [0, 1])
dist = ScalarDistribution([0, 1], [1/3, 2/3])
assert_true(dist.is_approx_equal(d1))
assert_true(dist.is_approx_equal(d2))
assert_true(d1.is_approx_equal(d2))
def test_insert_mss():
"""
Test the insertion of minimal sufficient statistics.
"""
d = get_gm()
d = insert_mss(d, -1, [0, 1], [2, 3])
d = insert_mss(d, -1, [2, 3], [0, 1])
d = d.marginal([4, 5])
dist = Distribution(['01', '10', '11'], [1/3, 1/3, 1/3])
assert_true(d.is_approx_equal(dist))
| bsd-3-clause |
FedericoCeratto/firelet | tests/test.py | 1 | 59964 | # Firelet - Distributed firewall management.
# Copyright (C) 2010 Federico Ceratto
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from logging import getLogger
from mock import Mock
from netaddr import IPNetwork
from paramiko import SSHClient
from pytest import raises
import mock
import os
import os.path
import pytest
import testingutils
from firelet.confreader import ConfReader
from firelet.flcore import Host, HostGroup, Network, Service, Users
from firelet.flcore import Alert, validc
from firelet.flcore import clean, GitFireSet, DemoGitFireSet, savejson, loadjson
from firelet.flcore import readcsv, savecsv, Hosts
from firelet.flmap import draw_svg_map
from firelet.flssh import SSHConnector, MockSSHConnector
from firelet.flutils import Bunch
from firelet.mailer import Mailer
log = getLogger(__name__)
deb = log.debug
SkipTest = pytest.mark.skipif(True, reason='skipped')
# mark tests that require Git to be installed
require_git = pytest.mark.require_git
#TODO: migration to network objects
#TODO: parallel SSH
#TODO: SSH check and deployment
# Summary of tests:
#
# User Management
# File load/save
# GitFireSet
#
# CLI
# IP address manipulation
#
def debug(s, o=None):
"""Log an object representation"""
from json import dumps
try:
d = dumps(o, indent=2)
except:
d = repr(o)
li = d.split('\n')
if len(li) < 3:
log.debug("%s: %s" % (s, repr(o)))
else:
indented = "\n ".join(li)
log.debug("-------- [%s] ---------\n %s", s, indented)
log.debug("----- [end of %s] -----", s)
# # Testing Confreader # #
def test_confreader(repodir):
conf = ConfReader('firelet.ini')
assert conf.title == 'Firelet'
assert conf.ssh_username == 'firelet'
# # Testing misc # #
def test_validc(repodir):
"""Test invalid characters"""
for c in (30, 34, 39, 60, 62, 96, 128):
assert validc(chr(c)) == False
def test_clean():
"""Test user input cleanup"""
s = clean(' !"#$%&\'()*+,-./0123456789:;<=>?@ABCDEFGHIJKLMNOPQRSTUVWXYZ[\]^_')
assert s == ' !#$%&()*+,-./0123456789:;=?@ABCDEFGHIJKLMNOPQRSTUVWXYZ[\]^_'
# # Testing Mailer # #
def test_send_msg():
m = Mailer()
m._send = Mock()
m.send_msg()
m.join()
assert m._send.call_count == 1
msg = m._send.call_args[0][3] # 'msg' passed to _send()
assert 'Subject: [Firelet] Message' in msg
assert 'DOCTYPE html' in msg
def test_send_diff():
m = Mailer()
m._send = Mock()
m.send_diff({'items':[]})
m.join()
assert m._send.call_count == 1
msg = m._send.call_args[0][3] # 'msg' passed to _send()
assert 'Subject: [Firelet] Diff' in msg
assert 'DOCTYPE html' in msg
@SkipTest #FIXME
def test_send_html(repodir):
m = Mailer()
m._send = Mock()
m.send_html(sbj='sbj', body='body')
assert m._send.call_count == 1
msg = m._send.call_args[0][3] # 'msg' passed to _send()
assert 'Subject: [Firelet] Message' in msg
assert 'DOCTYPE html' in msg
# # Testing flssh module without network interaction # #
#TODO: test SSHConnector instead of MockSSHConnector where possible
def test_parse_iptables_save_1():
sx = MockSSHConnector(targets={'localhost':['127.0.0.1']})
with raises(Exception):
sx.parse_iptables_save('')
def test_parse_iptables_save_2():
sx = MockSSHConnector(targets={'localhost':['127.0.0.1']})
ret = sx.parse_iptables_save("""
# Generated by iptables-save v1.4.9 on Sun Feb 20 15:04:36 2011
*filter
:INPUT ACCEPT [36:4504]
:FORWARD ACCEPT [0:0]
:OUTPUT ACCEPT [36:4504]
-A INPUT -s 3.3.3.3/32 -j ACCEPT
-A INPUT -d 3.3.3.3/32 -p tcp -m tcp --dport 44 -j ACCEPT
COMMIT
# Completed on Sun Feb 20 15:04:36 2011
""")
assert ret['filter'] == ['-A INPUT -s 3.3.3.3/32 -j ACCEPT', '-A INPUT -d 3.3.3.3/32 -p tcp -m tcp --dport 44 -j ACCEPT']
assert ret['nat'] == []
assert len(ret) == 2
def test_parse_iptables_save_3():
sx = MockSSHConnector(targets={'localhost':['127.0.0.1']})
ret = sx.parse_iptables_save("""*nat
:PREROUTING ACCEPT [7:600]
:POSTROUTING ACCEPT [987:59895]
:OUTPUT ACCEPT [987:59895]
-A PREROUTING -d 1.2.3.4/32 -p tcp -m tcp --dport 44 -j ACCEPT
COMMIT
*filter
:INPUT ACCEPT [36:4504]
:FORWARD ACCEPT [0:0]
:OUTPUT ACCEPT [36:4504]
-A INPUT -d 3.3.3.3/32 -p tcp -m tcp --dport 44 -j ACCEPT
COMMIT""")
assert ret['filter'] == ['-A INPUT -d 3.3.3.3/32 -p tcp -m tcp --dport 44 -j ACCEPT']
assert ret['nat'] == ['-A PREROUTING -d 1.2.3.4/32 -p tcp -m tcp --dport 44 -j ACCEPT']
assert len(ret) == 2
#def test_gen_iptables_restore_1(repodir):
# sx = SSHConnector(targets={'localhost':['127.0.0.1']})
# block = sx._gen_iptables_restore('localhost', [])
# assert block == [
# '# Created by Firelet for host localhost',
# '*filter',
# ':INPUT ACCEPT',
# ':FORWARD ACCEPT',
# ':OUTPUT ACCEPT',
# 'COMMIT'
# ], "Incorrect empty iptables-restore block created: %s" % repr(block)
@SkipTest
def test_MockSSHConnector_get_confs(repodir):
sshconn = MockSSHConnector(targets={'localhost':['127.0.0.1']})
sshconn.repodir = repodir
d = sshconn.get_confs()
assert 'localhost' in d, repr(d)
assert 'iptables' in d['localhost'], repr(d)
assert 'ip_a_s' in d['localhost'], repr(d)
assert d['localhost'].iptables != None
assert d['localhost'].ip_a_s != None
ok = {'localhost': {'iptables': ['# Generated by iptables-save v1.4.8 on Sun Jul 4 09:28:19 2010', '*nat', ':PREROUTING ACCEPT [8:3712]', ':POSTROUTING ACCEPT [32:3081]', ':OUTPUT ACCEPT [32:3081]', '-A POSTROUTING -o eth3 -j MASQUERADE', 'COMMIT', '# Completed on Sun Jul 4 09:28:19 2010', '# Generated by iptables-save v1.4.8 on Sun Jul 4 09:28:19 2010', '*filter', ':INPUT ACCEPT [4304:2534591]', ':FORWARD ACCEPT [0:0]', ':OUTPUT ACCEPT [4589:2195434]', '-A INPUT -s 10.0.0.0/8 -p tcp -m tcp --dport 80 -j ACCEPT', '-A FORWARD -s 1.2.3.4/32 -d 5.6.7.8/32 -p tcp -m multiport --dports 22,80,443 -j ACCEPT', '-A OUTPUT -d 10.10.10.10/32 -p udp -m udp --dport 123 -j ACCEPT', 'COMMIT', '# Completed on Sun Jul 4 09:28:19 2010'], 'ip_a_s': {'lo': ('127.0.0.1/8', '::1/128'), 'wlan0': ('192.168.1.1/24', 'fe80::219:d2ff:fe26:fb8e/64'), 'eth0': (None, None)}}}
for x in d:
for y in d[x]:
assert d[x][y] == ok[x][y], """Incorrect conf retrieved:
\n%s\n\nversus\n\n%s""" % (d[x][y], ok[x][y])
#def setup_dummy_flssh():
# """Patch the pxssh module to use files instead of performing network interaction"""
# import pxssh
# setup_dir()
# def dummy_sl(self, a):
# n = self.my_hostname
# log.debug( "Sending '%s' to bogus '%s'" % (a, n))
# if 'save' in a:
# self.before = open('test/iptables-save-%s' % n).read()
# else:
# self.before = open('test/ip-addr-show-%s' % n).read()
#
# pxssh.login = pxssh.isalive = pxssh.prompt = pxssh.logout = lambda *x: True
# pxssh.sendline = dummy_sl
# globals()['pxssh'] = pxssh
#
#def teardown_flssh():
# teardown_dir()
#
#
#@with_setup(setup_dummy_flssh)
#def test_get_confs_local_dummy(repodir):
# from firelet.flssh import SSHConnector, MockSSHConnector
#
# sshconn = SSHConnector(targets={'localhost':['127.0.0.1']} )
# d = sshconn.get_confs( )
# assert 'localhost' in d
# assert d['localhost']
# assert d == {'localhost': [None, '127.0.0.1', {'filter': '-A INPUT -s 10.0.0.0/8 -p tcp -m tcp --dport 80 -j ACCEPT\n-A FORWARD -s 1.2.3.4/32 -d 5.6.7.8/32 -p tcp -m multiport --dports 22,80,443 -j ACCEPT\n-A OUTPUT -d 10.10.10.10/32 -p udp -m udp --dport 123 -j ACCEPT', 'nat': '-A POSTROUTING -o eth3 -j MASQUERADE'}, {'lo': ('127.0.0.1/8', '::1/128'), 'teredo': (None, 'fe80::ffff:ffff:ffff/64'), 'wlan0': ('192.168.1.1/24', 'fe80::219:d2ff:fe26:fb8e/64'), 'eth0': (None, None)}]}
#@with_setup(setup_dummy_flssh, teardown_dir)
#def test_get_confs3(repodir):
# fs = DumbFireSet(repodir=repodir)
# fs._get_confs()
# assert fs._remote_confs == {'InternalFW': [None, '10.66.2.1', {'filter': '-A INPUT -s 10.0.0.0/8 -p tcp -m tcp --dport 80 -j ACCEPT\n-A FORWARD -s 1.2.3.4/32 -d 5.6.7.8/32 -p tcp -m multiport --dports 22,80,443 -j ACCEPT\n-A OUTPUT -d 10.10.10.10/32 -p udp -m udp --dport 123 -j ACCEPT', 'nat': '-A POSTROUTING -o eth3 -j MASQUERADE'}, {'lo': ('127.0.0.1/8', '::1/128'), 'eth1': ('10.66.2.1/24', 'fe80::3939:3939:3939:3939/64'), 'eth0': ('10.66.1.2/24', 'fe80::3939:3939:3939:3939/64')}], 'Server001': [None, '10.66.2.2', {'filter': '-A INPUT -s 10.0.0.0/8 -p tcp -m tcp --dport 80 -j ACCEPT\n-A FORWARD -s 1.2.3.4/32 -d 5.6.7.8/32 -p tcp -m multiport --dports 22,80,443 -j ACCEPT\n-A OUTPUT -d 10.10.10.10/32 -p udp -m udp --dport 123 -j ACCEPT', 'nat': '-A POSTROUTING -o eth3 -j MASQUERADE'}, {'lo': ('127.0.0.1/8', '::1/128'), 'eth0': ('10.66.2.2/24', 'fe80::3939:3939:3939:3939/64')}], 'BorderFW': [None, '10.66.1.1', {'filter': '-A INPUT -s 10.0.0.0/8 -p tcp -m tcp --dport 80 -j ACCEPT\n-A FORWARD -s 1.2.3.4/32 -d 5.6.7.8/32 -p tcp -m multiport --dports 22,80,443 -j ACCEPT\n-A OUTPUT -d 10.10.10.10/32 -p udp -m udp --dport 123 -j ACCEPT', 'nat': '-A POSTROUTING -o eth3 -j MASQUERADE'}, {'lo': ('127.0.0.1/8', '::1/128'), 'eth1': ('10.66.1.1/24', 'fe80::3939:3939:3939:3939/64'), 'eth0': ('172.16.2.223/24', 'fe80::3939:3939:3939:3939/64')}], 'Smeagol': [None, '10.66.1.3', {'filter': '-A INPUT -s 10.0.0.0/8 -p tcp -m tcp --dport 80 -j ACCEPT\n-A FORWARD -s 1.2.3.4/32 -d 5.6.7.8/32 -p tcp -m multiport --dports 22,80,443 -j ACCEPT\n-A OUTPUT -d 10.10.10.10/32 -p udp -m udp --dport 123 -j ACCEPT', 'nat': '-A POSTROUTING -o eth3 -j MASQUERADE'}, {'lo': ('127.0.0.1/8', '::1/128'), 'eth0': ('10.66.1.3/24', 'fe80::3939:3939:3939:3939/64')}]}
#@with_setup(setup_dummy_flssh, teardown_dir)
#def test_get_confs4(repodir):
# fs = DumbFireSet(repodir=repodir)
# fs._get_confs()
# fs._check_ifaces()
# rd = fs.compile_dict(hosts=fs.hosts)
# mock the paramiko.SSHClient.connect method to test _connect_one
# _connect_one is normally running in a dedicated thread
@mock.patch.object(SSHClient, 'connect')
def test_flssh_connect_one(mocked_connect):
sx = SSHConnector(targets={})
sx._connect_one('bogusfirewall', (
'0.0.0.1',
'0.0.0.2',
))
assert sx._pool['bogusfirewall'].ip_addr == '0.0.0.1'
# # User management testing # #
def test_user_management(repodir):
u = Users(d=repodir)
u.create('Totoro', 'admin', 'rawr', 'totoro@nowhere.forest')
with raises(Exception):
u.create('Totoro', '', '', '')
u.validate('Totoro', 'rawr')
with raises(Exception):
u.validate('Totoro', 'booo')
u.update('Totoro', role='user')
assert u._users['Totoro'][0] == 'user'
u.update('Totoro', pwd='')
u.update('Totoro', email='')
assert u._users['Totoro'][2] == ''
with raises(Exception):
u.update('Totoro2', 'email=""')
u.delete('Totoro')
with raises(Exception):
u.delete('Totoro')
# # File save/load # #
def test_load_save_hosts(repodir):
fn = os.path.join(repodir, 'hosts.csv')
content = map(str.strip, open(fn))
content = filter(None, content)
h = Hosts(d=repodir)
h.save()
content2 = map(str.strip, open(fn))
content2 = filter(None, content2)
h2 = Hosts(d=repodir)
assert content == content2, "load/save hosts loop failed:\n\n%s\n\n%s\n\n" \
% (repr(content), repr(content2))
assert repr(h) == repr(h2), "load/save hosts loop failed"
def test_load_save_csv(repodir):
h = readcsv('rules', d=repodir)
h = tuple(h)
savecsv('rules', h, d=repodir)
h2 = readcsv('rules', d=repodir)
h2 = tuple(h2)
assert h == h2, "load/save hosts loop failed:\n%s\n!=\n%s" % \
(h, h2)
# # FireSet testing # #
@pytest.fixture
def gfs(repodir):
assert os.path.isdir(repodir)
filenames = os.listdir(repodir)
assert len(filenames) == 18, "Unexpected repository contents %r" % filenames
return GitFireSet(repodir=repodir)
@pytest.fixture
def fs(repodir):
return DemoGitFireSet(repodir=repodir)
def test_gitfireset_otp(gfs):
otp = gfs.generate_otp()
assert isinstance(otp, str)
assert len(otp) == 10
@require_git
def test_gitfireset_git_basics(gfs):
assert gfs._git_executable.endswith('git')
assert os.path.isfile(gfs._git_executable)
assert os.access(gfs._git_executable, os.X_OK)
out = gfs._git('config -l')
testingutils.printout(out, 'git config -l')
assert 'core.bare=false' in out[0]
out = gfs._git('rev-parse --show-toplevel')
testingutils.printout(out, 'git top level')
out, err = gfs._git('status -uno')
testingutils.printout(out, 'git status uno - output')
testingutils.printout(err, 'git status uno - errors')
assert 'On branch master' in out
assert 'nothing to commit' in out
assert not err, repr(err)
@require_git
def test_gitfireset_simple(gfs):
assert gfs.save_needed() == False
gfs.save('test')
assert gfs.save_needed() == False
gfs.reset()
assert gfs.save_needed() == False
@require_git
def test_gitfireset_long(gfs):
# Delete first item in every table
for t in ('rules', 'hosts', 'hostgroups', 'services', 'networks'):
gfs.delete(t, 1)
assert gfs.save_needed() == True, "save_needed non set when deleting item 1 from %s" % t
gfs.save("%s: n.1 deleted" % t)
assert gfs.save_needed() == False
# Perform changes
gfs.rules.disable(2)
assert not gfs.rules.enabled(2), "Rule 2 should be flagged as disabled"
gfs.rules.enable(2)
assert gfs.rules.enabled(2), "Rule 2 should be flagged as enabled"
gfs.rules.moveup(2)
assert gfs.save_needed() == True
gfs.rules.movedown(1)
gfs.save('movedown1')
gfs.rules.movedown(2)
gfs.save('movedown2')
gfs.rules.movedown(3)
gfs.save('movedown3')
# Check version list
vl = gfs.version_list()
assert zip(*vl)[2] == (['movedown3'],
['movedown2'],
['networks: n.1 deleted'],
['services: n.1 deleted'],
['hostgroups: n.1 deleted'],
['hosts: n.1 deleted'],
['rules: n.1 deleted'])
dup = testingutils.duplicates(zip(*vl)[3])
assert not dup, "Some commit IDs are duplicate: \
%s" % repr(dup)
# Check version_diff
last_commit_id = vl[-1][-1]
diff = gfs.version_diff(last_commit_id)
assert ('1 http_ok InternalFW:eth1 * Server001:eth0 HTTP ACCEPT 0 "Web server"',
'add') in diff
assert ('1 http_ok InternalFW:eth1 * Server001:eth0 HTTP ACCEPT 0 "Web server"',
'del') in diff
assert len(diff) == 52
# Rollback and check again
gfs.rollback(2)
assert gfs.save_needed() == False
vl = gfs.version_list()
log.debug('version_list: %s' % repr(vl))
assert zip(*vl)[2] == (['networks: n.1 deleted'],
['services: n.1 deleted'],
['hostgroups: n.1 deleted'],
['hosts: n.1 deleted'],
['rules: n.1 deleted'])
def test_gitfireset_smarttable_methods(gfs):
gfs.fetch('hosts', 0)
gfs.delete('hosts', 0)
#TODO: add other methods
# d = {'name': pg('name'),
# 'childs': childs}
# if rid == None: # new item
# gfs.hostgroups.add(d)
def test_gitfireset_check_ifaces_1(gfs):
gfs._remote_confs = None
with raises(AssertionError):
gfs._check_ifaces()
def test_gitfireset_check_ifaces_20(gfs):
d = {'InternalFW': {'filter': [], 'ip_a_s': {'eth1': ('10.66.2.1/24',None),
'eth0': ('10.66.1.2/24', None)}},
'Server001': {'filter': [], 'ip_a_s': {'eth0': ('10.66.2.2/24', None)}},
'BorderFW': {'filter': [], 'ip_a_s': {
'eth1': ('10.66.1.1/24', None),
'eth2': ('88.88.88.88/24', None),
'eth0': ('172.16.2.223/24', None)}},
'Smeagol': {'filter': [], 'ip_a_s': {'eth0': ('10.66.1.3/24', None)}} }
gfs._remote_confs = {}
for n, v in d.iteritems():
gfs._remote_confs[n] = Bunch(filter=v['filter'], ip_a_s=v['ip_a_s'])
gfs._check_ifaces()
def test_gitfireset_check_ifaces_wrong_value(gfs):
gfs._remote_confs = {'bogus': 'not a bunch'} # value should be a Bunch
with raises(AssertionError):
gfs._check_ifaces()
def test_gitfireset_check_ifaces_wrong_bunch_size(gfs):
gfs._remote_confs = {'bogus': Bunch()} # len(Bunch(...)) should be 2 (ip_addr_v4, ip_addr_v6)
with raises(AssertionError):
gfs._check_ifaces()
def test_gitfireset_check_ifaces_missing_iface(gfs):
gfs.hosts = [
Bunch(hostname='host1', iface='lo'),
Bunch(hostname='host2', iface='lo')
]
gfs._remote_confs = {
'host1': Bunch(ip_a_s = {'lo': ()}),
'host2': Bunch(ip_a_s = {}) # missing iface
}
with raises(AssertionError):
gfs._check_ifaces()
def test_gitfireset_check_ifaces_wrong_ipaddr_string(gfs):
"""_check_ifaces should raise AssertionError on incorrect IPaddr strings"""
gfs.hosts = [
Bunch(hostname='host1', iface='lo'),
Bunch(hostname='host2', iface='lo')
]
gfs._remote_confs = {
'host1': Bunch(ip_a_s = {'lo': ('bogus', 'bogus')}),
'host2': Bunch(ip_a_s = {'lo': ('bogus', 'bogus') })
}
with raises(AssertionError):
gfs._check_ifaces()
def test_gitfireset_check_ifaces_wrong_ipaddr(gfs):
"""_check_ifaces should raise AssertionError on incorrect IPaddr"""
gfs.hosts = [
Bunch(hostname='host1', iface='lo', ip_addr='1.2.3.4'),
]
gfs._remote_confs = {
'host1': Bunch(ip_a_s = {'lo': ('1.2.3.5/32', None)}),
}
with raises(AssertionError):
gfs._check_ifaces()
def test_gitfireset_check_ifaces_correct(gfs):
gfs.hosts = [
Bunch(hostname='host1', iface='lo', ip_addr='1.2.3.4', mng='1'),
]
gfs._remote_confs = {
'host1': Bunch(
ip_a_s = {'lo': ('1.2.3.4/32', None)},
iptables_p = Bunch()
),
}
gfs._check_ifaces(stop_on_extra_interfaces=False)
def test_gitfireset_check_ifaces_correct2(gfs):
"""gitfireset_check_ifaces_correct2 has stop_on_extra_interfaces = True"""
gfs.hosts = [
Bunch(hostname='host1', iface='lo', ip_addr='1.2.3.4', mng=1),
]
gfs._remote_confs = {
'host1': Bunch(
ip_a_s = {'lo': ('1.2.3.4/32', None)},
iptables_p = Bunch()
),
}
gfs._check_ifaces(stop_on_extra_interfaces=True)
def test_gitfireset_check_ifaces_alert(gfs):
"""gitfireset_check_ifaces_correct3 should raise Alert
'Alert: One or more firewalls have extra interfaces: host1: eth0'
"""
gfs.hosts = [
Bunch(hostname='host1', iface='lo', ip_addr='1.2.3.4', mng=1),
]
gfs._remote_confs = {
'host1': Bunch(
ip_a_s = {
'lo': ('1.2.3.4/32', None),
'eth0': ('7.7.7.7/32', None), #extra iface
},
iptables_p = Bunch()
),
}
with raises(Alert):
gfs._check_ifaces(stop_on_extra_interfaces=True)
def test_gitfireset_sibling_names(gfs):
names = ['AllSystems', 'BorderFW:eth0', 'BorderFW:eth1', 'BorderFW:eth2', 'Clients', 'InternalFW:eth0', \
'InternalFW:eth1', 'SSHnodes', 'Server001:eth0', 'Servers', 'Smeagol:eth0', 'Tester:eth1', 'WebServers']
sn = gfs.list_sibling_names()
assert sorted(sn) == sorted(names), "list_sibling_names generating incorrect output: %s" % repr(sorted(sn))
def test_gitfireset_get_firewalls(gfs):
hosts = gfs._get_firewalls()
hostnames = sorted((h.hostname, h.iface) for h in hosts)
ok = [('BorderFW', 'eth0'), ('BorderFW', 'eth1'), ('BorderFW', 'eth2'), ('InternalFW', 'eth0'),
('InternalFW', 'eth1'), ('Server001', 'eth0'), ('Smeagol', 'eth0')]
assert hostnames == ok, "Wrong hosts selected as firewalls: %s" % repr(hostnames)
#def test_dumbfireset(repodir):
# fs = DumbFireSet(repodir=repodir)
# assert gfs.save_needed() == False
# gfs.save('save')
# assert gfs.save_needed() == False
# gfs.reset()
# assert gfs.save_needed() == False
# gfs.rollback(2)
# assert gfs.save_needed() == False
# vl = gfs.version_list()
# # assert
# for t in ('rules', 'hosts', 'hostgroups', 'services', 'networks'):
# tmp = len(gfs.__dict__[t])
# gfs.delete(t, 0)
# assert gfs.save_needed() == True, t
# assert tmp == len(gfs.__dict__[t]) + 1, t
# gfs.save('test')
# assert gfs.save_needed() == False
# orig_rules = gfs.rules[:] # copy
# gfs.rules.moveup(2)
# assert gfs.save_needed() == True
# assert orig_rules != gfs.rules
# gfs.rules.movedown(1)
# assert orig_rules == gfs.rules
#
# gfs.rules.movedown(1)
# assert orig_rules != gfs.rules
# assert gfs.save_needed() == True
# gfs.reset()
# assert gfs.save_needed() == False
# assert orig_rules == gfs.rules
def test_DemoGitFireSet_get_confs(fs):
fs._get_confs(keep_sessions=False)
for hostname, v in fs._remote_confs.iteritems():
assert isinstance(v, Bunch)
for h in fs._get_firewalls():
assert h.hostname in fs._remote_confs, "Missing host %s" % h.hostname
@require_git
def test_DemoGitFireSet_deployment(fs):
"""Deploy confs, then check"""
fs.deploy()
diff = fs.check()
assert diff == {}, repr(diff)[:400]
# # Rule compliation and deployment testing # #
@require_git
def test_DemoGitFireSet_compile_rules_basic(fs):
"""Compile rules and perform basic testing"""
rset = fs.compile_rules()
for hn, d in rset.iteritems():
for chain, rules in d.iteritems():
assert testingutils.string_in_list('-j DROP', rules), "-j DROP not in %s" % repr(rules)
def test_DemoGitFireSet_compile_rules_full(gfs):
rd = gfs.compile_rules()
ok = {
"InternalFW": {
"FORWARD": [
"-j ACCEPT",
"-m state --state RELATED,ESTABLISHED -j ACCEPT",
"-s 10.66.1.1/32 -d 10.66.2.0/24 -p tcp -m tcp --dport 22 -j LOG --log-prefix \"f_ssh_mgmt\" --log-level 2",
"-s 10.66.1.1/32 -d 10.66.2.0/24 -p tcp -m tcp --dport 22 -j ACCEPT",
"-s 10.66.2.2/32 -d 10.66.1.3/32 -p tcp -m tcp --dport 6660:6669 -j LOG --log-prefix \"f_irc\" --log-level 0",
"-s 10.66.2.2/32 -d 10.66.1.3/32 -p tcp -m tcp --dport 6660:6669 -j ACCEPT",
"-s 10.66.1.3/32 -d 172.16.2.223/32 -p udp -m udp --dport 123 -j LOG --log-prefix \"f_ntp\" --log-level 0",
"-s 10.66.1.3/32 -d 172.16.2.223/32 -p udp -m udp --dport 123 -j ACCEPT",
"-s 10.66.2.2/32 -d 172.16.2.223/32 -p udp -m udp --dport 123 -j LOG --log-prefix \"f_ntp\" --log-level 0",
"-s 10.66.2.2/32 -d 172.16.2.223/32 -p udp -m udp --dport 123 -j ACCEPT",
"-s 10.66.2.2/32 -d 10.66.1.3/32 -p udp -m udp --dport 123 -j LOG --log-prefix \"f_ntp\" --log-level 0",
"-s 10.66.2.2/32 -d 10.66.1.3/32 -p udp -m udp --dport 123 -j ACCEPT",
" -j LOG --log-prefix \"f_default\" --log-level 1",
" -j DROP",
" -j LOG --log-prefix \"f_default\" --log-level 1",
" -j DROP"
],
"INPUT": [
"-j ACCEPT",
"-m state --state RELATED,ESTABLISHED -j ACCEPT",
"-i lo -j ACCEPT",
"-s 10.66.1.1/32 -d 10.66.2.0/24 -i eth1 -p tcp -m tcp --dport 22 -j LOG --log-prefix \"i_ssh_mgmt\" --log-level 2",
"-s 10.66.1.1/32 -d 10.66.2.0/24 -i eth1 -p tcp -m tcp --dport 22 -j ACCEPT",
"-s 10.66.1.3/32 -d 10.66.1.2/32 -i eth0 -p tcp -m multiport --dports 143,585,993 -j LOG --log-prefix \"i_imap\" --log-level 2",
"-s 10.66.1.3/32 -d 10.66.1.2/32 -i eth0 -p tcp -m multiport --dports 143,585,993 -j ACCEPT",
" -i eth0 -j LOG --log-prefix \"i_default\" --log-level 1",
" -i eth0 -j DROP",
" -i eth1 -j LOG --log-prefix \"i_default\" --log-level 1",
" -i eth1 -j DROP"
],
"OUTPUT": [
"-m state --state RELATED,ESTABLISHED -j ACCEPT",
"-o lo -j ACCEPT",
"-s 10.66.1.2/32 -d 10.66.1.1/32 -o eth0 -p tcp -m tcp --dport 443 -j ACCEPT",
"-s 10.66.2.1/32 -d 10.66.2.2/32 -o eth1 -p tcp -m tcp --dport 80 -j ACCEPT",
" -o eth0 -j LOG --log-prefix \"o_default\" --log-level 1",
" -o eth0 -j DROP",
" -o eth1 -j LOG --log-prefix \"o_default\" --log-level 1",
" -o eth1 -j DROP"
]
},
"BorderFW": {
"FORWARD": [
'-j ACCEPT', #FIXME
"-m state --state RELATED,ESTABLISHED -j ACCEPT",
"-s 88.88.88.1/32 -d 172.16.2.223/32 -p tcp -m tcp --dport 22 -j LOG --log-prefix \"f_ssh_all\" --log-level 0",
"-s 88.88.88.1/32 -d 172.16.2.223/32 -p tcp -m tcp --dport 22 -j ACCEPT",
"-s 88.88.88.1/32 -d 10.66.1.3/32 -p tcp -m tcp --dport 22 -j LOG --log-prefix \"f_ssh_all\" --log-level 0",
"-s 88.88.88.1/32 -d 10.66.1.3/32 -p tcp -m tcp --dport 22 -j ACCEPT",
"-s 88.88.88.1/32 -d 10.66.2.2/32 -p tcp -m tcp --dport 22 -j LOG --log-prefix \"f_ssh_all\" --log-level 0",
"-s 88.88.88.1/32 -d 10.66.2.2/32 -p tcp -m tcp --dport 22 -j ACCEPT",
"-s 10.66.1.3/32 -d 172.16.2.223/32 -p udp -m udp --dport 123 -j LOG --log-prefix \"f_ntp\" --log-level 0",
"-s 10.66.1.3/32 -d 172.16.2.223/32 -p udp -m udp --dport 123 -j ACCEPT",
" -j LOG --log-prefix \"f_default\" --log-level 1",
" -j DROP",
" -j LOG --log-prefix \"f_default\" --log-level 1",
" -j DROP",
" -j LOG --log-prefix \"f_default\" --log-level 1",
" -j DROP"
],
"INPUT": [
'-j ACCEPT', #FIXME
"-m state --state RELATED,ESTABLISHED -j ACCEPT",
"-i lo -j ACCEPT",
"-s 88.88.88.1/32 -d 172.16.2.223/32 -i eth0 -p tcp -m tcp --dport 22 -j ACCEPT",
"-s 10.66.1.2/32 -d 10.66.1.1/32 -i eth1 -p tcp -m tcp --dport 443 -j ACCEPT",
"-s 10.66.1.3/32 -d 10.66.1.1/32 -i eth1 -j LOG --log-prefix \"i_NoSmeagol\" --log-level 3",
"-s 10.66.1.3/32 -d 10.66.1.1/32 -i eth1 -j DROP",
"-s 10.66.1.3/32 -d 172.16.2.223/32 -i eth0 -p udp -m udp --dport 123 -j ACCEPT",
"-s 10.66.2.2/32 -d 172.16.2.223/32 -i eth0 -p udp -m udp --dport 123 -j ACCEPT",
" -i eth0 -j LOG --log-prefix \"i_default\" --log-level 1",
" -i eth0 -j DROP",
" -i eth1 -j LOG --log-prefix \"i_default\" --log-level 1",
" -i eth1 -j DROP",
" -i eth2 -j LOG --log-prefix \"i_default\" --log-level 1",
" -i eth2 -j DROP"
],
"OUTPUT": [
"-m state --state RELATED,ESTABLISHED -j ACCEPT",
"-o lo -j ACCEPT",
"-s 10.66.1.1/32 -d 10.66.2.0/24 -o eth1 -p tcp -m tcp --dport 22 -j LOG --log-prefix \"o_ssh_mgmt\" --log-level 2",
"-s 10.66.1.1/32 -d 10.66.2.0/24 -o eth1 -p tcp -m tcp --dport 22 -j ACCEPT",
"-s 172.16.2.223/32 -d 10.66.1.3/32 -o eth0 -p udp -m udp --dport 123 -j ACCEPT",
" -o eth0 -j LOG --log-prefix \"o_default\" --log-level 1",
" -o eth0 -j DROP",
" -o eth1 -j LOG --log-prefix \"o_default\" --log-level 1",
" -o eth1 -j DROP",
" -o eth2 -j LOG --log-prefix \"o_default\" --log-level 1",
" -o eth2 -j DROP"
]
},
"Server001": {
"FORWARD": [
"-j DROP"
],
"INPUT": [
"-m state --state RELATED,ESTABLISHED -j ACCEPT",
"-i lo -j ACCEPT",
"-s 88.88.88.1/32 -d 10.66.2.2/32 -i eth0 -p tcp -m tcp --dport 22 -j ACCEPT",
"-s 10.66.2.1/32 -d 10.66.2.2/32 -i eth0 -p tcp -m tcp --dport 80 -j ACCEPT",
"-d 10.66.2.2/32 -i eth0 -p tcp -m tcp --dport 80 -j ACCEPT",
"-s 10.66.1.1/32 -d 10.66.2.0/24 -i eth0 -p tcp -m tcp --dport 22 -j LOG --log-prefix \"i_ssh_mgmt\" --log-level 2",
"-s 10.66.1.1/32 -d 10.66.2.0/24 -i eth0 -p tcp -m tcp --dport 22 -j ACCEPT",
" -i eth0 -j LOG --log-prefix \"i_default\" --log-level 1",
" -i eth0 -j DROP"
],
"OUTPUT": [
"-m state --state RELATED,ESTABLISHED -j ACCEPT",
"-o lo -j ACCEPT",
"-s 10.66.2.2/32 -d 10.66.1.3/32 -o eth0 -p tcp -m tcp --dport 6660:6669 -j ACCEPT",
"-s 10.66.2.2/32 -d 172.16.2.223/32 -o eth0 -p udp -m udp --dport 123 -j ACCEPT",
"-s 10.66.2.2/32 -d 10.66.1.3/32 -o eth0 -p udp -m udp --dport 123 -j ACCEPT",
" -o eth0 -j LOG --log-prefix \"o_default\" --log-level 1",
" -o eth0 -j DROP"
]
},
"Tester": {
"FORWARD": [
"-j DROP"
],
"INPUT": [
"-m state --state RELATED,ESTABLISHED -j ACCEPT",
"-i lo -j ACCEPT",
" -i eth1 -j LOG --log-prefix \"i_default\" --log-level 1",
" -i eth1 -j DROP"
],
"OUTPUT": [
"-m state --state RELATED,ESTABLISHED -j ACCEPT",
"-o lo -j ACCEPT",
"-s 88.88.88.1/32 -d 172.16.2.223/32 -o eth1 -p tcp -m tcp --dport 22 -j ACCEPT",
"-s 88.88.88.1/32 -d 10.66.1.3/32 -o eth1 -p tcp -m tcp --dport 22 -j ACCEPT",
"-s 88.88.88.1/32 -d 10.66.2.2/32 -o eth1 -p tcp -m tcp --dport 22 -j ACCEPT",
" -o eth1 -j LOG --log-prefix \"o_default\" --log-level 1",
" -o eth1 -j DROP"
]
},
"Smeagol": {
"FORWARD": [
"-j DROP"
],
"INPUT": [
"-m state --state RELATED,ESTABLISHED -j ACCEPT",
"-i lo -j ACCEPT",
"-s 88.88.88.1/32 -d 10.66.1.3/32 -i eth0 -p tcp -m tcp --dport 22 -j ACCEPT",
"-s 10.66.2.2/32 -d 10.66.1.3/32 -i eth0 -p tcp -m tcp --dport 6660:6669 -j ACCEPT",
"-s 172.16.2.223/32 -d 10.66.1.3/32 -i eth0 -p udp -m udp --dport 123 -j ACCEPT",
"-s 10.66.2.2/32 -d 10.66.1.3/32 -i eth0 -p udp -m udp --dport 123 -j ACCEPT",
" -i eth0 -j LOG --log-prefix \"i_default\" --log-level 1",
" -i eth0 -j DROP"
],
"OUTPUT": [
"-m state --state RELATED,ESTABLISHED -j ACCEPT",
"-o lo -j ACCEPT",
"-s 10.66.1.3/32 -d 10.66.1.1/32 -o eth0 -j LOG --log-prefix \"o_NoSmeagol\" --log-level 3",
"-s 10.66.1.3/32 -d 10.66.1.1/32 -o eth0 -j DROP",
"-s 10.66.1.3/32 -d 10.66.1.2/32 -o eth0 -p tcp -m multiport --dports 143,585,993 -j LOG --log-prefix \"o_imap\" --log-level 2",
"-s 10.66.1.3/32 -d 10.66.1.2/32 -o eth0 -p tcp -m multiport --dports 143,585,993 -j ACCEPT",
"-s 10.66.1.3/32 -d 172.16.2.223/32 -o eth0 -p udp -m udp --dport 123 -j ACCEPT",
" -o eth0 -j LOG --log-prefix \"o_default\" --log-level 1",
" -o eth0 -j DROP"
]
}
}
for hostname in ok:
for chain in ok[hostname]:
for n, my_line in enumerate(rd[hostname][chain]):
ok_line = ok[hostname][chain][n]
assert my_line.strip() == ok_line.strip(), "Incorrect rules in %s chain %s:\ngot: [%s]\nexpected: [%s]\nline: %d" % (
hostname, chain, my_line, ok_line , n )
#FIXME: review rule, ensure they are really correct
@SkipTest
def test_DemoGitFireSet_build_ipt_restore(fs):
"""Run diff between compiled rules and empty remote confs"""
rset = fs.compile_rules()
m = map(fs._build_ipt_restore, rset.iteritems())
m = dict(m)
ok = {
"InternalFW": [
"# Created by Firelet for host InternalFW",
"*filter",
"-A INPUT -m state --state RELATED,ESTABLISHED -j ACCEPT",
"-A INPUT -i lo -j ACCEPT",
"-A INPUT -s 10.66.1.1/32 -d 10.66.2.0/24 -i eth1 -p tcp -m tcp --dport 22 -j LOG --log-prefix \"ssh_mgmt\" --log-level 2",
"-A INPUT -s 10.66.1.1/32 -d 10.66.2.0/24 -i eth1 -p tcp -m tcp --dport 22 -j ACCEPT",
"-A INPUT -s 10.66.1.3/32 -d 10.66.1.2/32 -i eth0 -p tcp -m multiport --dports 143,585,993 -j LOG --log-prefix \"imap\" --log-level 2",
"-A INPUT -s 10.66.1.3/32 -d 10.66.1.2/32 -i eth0 -p tcp -m multiport --dports 143,585,993 -j ACCEPT",
"-A INPUT -i eth0 -j LOG --log-prefix \"default\" --log-level 1",
"-A INPUT -i eth0 -j DROP",
"-A INPUT -i eth1 -j LOG --log-prefix \"default\" --log-level 1",
"-A INPUT -i eth1 -j DROP",
"-A FORWARD -m state --state RELATED,ESTABLISHED -j ACCEPT",
"-A FORWARD -s 10.66.1.1/32 -d 10.66.2.0/24 -p tcp -m tcp --dport 22 -j LOG --log-prefix \"ssh_mgmt\" --log-level 2",
"-A FORWARD -s 10.66.1.1/32 -d 10.66.2.0/24 -p tcp -m tcp --dport 22 -j ACCEPT",
"-A FORWARD -s 10.66.2.2/32 -d 10.66.1.3/32 -p tcp -m tcp --dport 6660:6669 -j LOG --log-prefix \"irc\" --log-level 0",
"-A FORWARD -s 10.66.2.2/32 -d 10.66.1.3/32 -p tcp -m tcp --dport 6660:6669 -j ACCEPT",
"-A FORWARD -s 10.66.1.3/32 -d 172.16.2.223/32 -p udp -m udp --dport 123 -j LOG --log-prefix \"ntp\" --log-level 0",
"-A FORWARD -s 10.66.1.3/32 -d 172.16.2.223/32 -p udp -m udp --dport 123 -j ACCEPT",
"-A FORWARD -s 10.66.2.2/32 -d 172.16.2.223/32 -p udp -m udp --dport 123 -j LOG --log-prefix \"ntp\" --log-level 0",
"-A FORWARD -s 10.66.2.2/32 -d 172.16.2.223/32 -p udp -m udp --dport 123 -j ACCEPT",
"-A FORWARD -s 10.66.2.2/32 -d 10.66.1.3/32 -p udp -m udp --dport 123 -j LOG --log-prefix \"ntp\" --log-level 0",
"-A FORWARD -s 10.66.2.2/32 -d 10.66.1.3/32 -p udp -m udp --dport 123 -j ACCEPT",
"-A FORWARD -j LOG --log-prefix \"default\" --log-level 1",
"-A FORWARD -j DROP",
"-A FORWARD -j LOG --log-prefix \"default\" --log-level 1",
"-A FORWARD -j DROP",
"-A OUTPUT -m state --state RELATED,ESTABLISHED -j ACCEPT",
"-A OUTPUT -o lo -j ACCEPT",
"-A OUTPUT -s 10.66.1.2/32 -d 10.66.1.1/32 -o eth0 -p tcp -m tcp --dport 443 -j ACCEPT",
"-A OUTPUT -s 10.66.2.1/32 -d 10.66.2.2/32 -o eth1 -p tcp -m tcp --dport 80 -j ACCEPT",
"-A OUTPUT -o eth0 -j LOG --log-prefix \"default\" --log-level 1",
"-A OUTPUT -o eth0 -j DROP",
"-A OUTPUT -o eth1 -j LOG --log-prefix \"default\" --log-level 1",
"-A OUTPUT -o eth1 -j DROP",
"COMMIT"
],
"Server001": [
"# Created by Firelet for host Server001",
"*filter",
"-A INPUT -m state --state RELATED,ESTABLISHED -j ACCEPT",
"-A INPUT -i lo -j ACCEPT",
"-A INPUT -s 88.88.88.1/32 -d 10.66.2.2/32 -i eth0 -p tcp -m tcp --dport 22 -j ACCEPT",
"-A INPUT -s 10.66.2.1/32 -d 10.66.2.2/32 -i eth0 -p tcp -m tcp --dport 80 -j ACCEPT",
"-A INPUT -s 0.0.0.0/0 -d 10.66.2.2/32 -i eth0 -p tcp -m tcp --dport 80 -j ACCEPT",
"-A INPUT -s 10.66.1.1/32 -d 10.66.2.0/24 -i eth0 -p tcp -m tcp --dport 22 -j LOG --log-prefix \"ssh_mgmt\" --log-level 2",
"-A INPUT -s 10.66.1.1/32 -d 10.66.2.0/24 -i eth0 -p tcp -m tcp --dport 22 -j ACCEPT",
"-A INPUT -i eth0 -j LOG --log-prefix \"default\" --log-level 1",
"-A INPUT -i eth0 -j DROP",
"-A FORWARD -j DROP",
"-A OUTPUT -m state --state RELATED,ESTABLISHED -j ACCEPT",
"-A OUTPUT -o lo -j ACCEPT",
"-A OUTPUT -s 10.66.2.2/32 -d 10.66.1.3/32 -o eth0 -p tcp -m tcp --dport 6660:6669 -j ACCEPT",
"-A OUTPUT -s 10.66.2.2/32 -d 172.16.2.223/32 -o eth0 -p udp -m udp --dport 123 -j ACCEPT",
"-A OUTPUT -s 10.66.2.2/32 -d 10.66.1.3/32 -o eth0 -p udp -m udp --dport 123 -j ACCEPT",
"-A OUTPUT -o eth0 -j LOG --log-prefix \"default\" --log-level 1",
"-A OUTPUT -o eth0 -j DROP",
"COMMIT"
],
"Smeagol": [
"# Created by Firelet for host Smeagol",
"*filter",
"-A INPUT -m state --state RELATED,ESTABLISHED -j ACCEPT",
"-A INPUT -i lo -j ACCEPT",
"-A INPUT -s 88.88.88.1/32 -d 10.66.1.3/32 -i eth0 -p tcp -m tcp --dport 22 -j ACCEPT",
"-A INPUT -s 10.66.2.2/32 -d 10.66.1.3/32 -i eth0 -p tcp -m tcp --dport 6660:6669 -j ACCEPT",
"-A INPUT -s 172.16.2.223/32 -d 10.66.1.3/32 -i eth0 -p udp -m udp --dport 123 -j ACCEPT",
"-A INPUT -s 10.66.2.2/32 -d 10.66.1.3/32 -i eth0 -p udp -m udp --dport 123 -j ACCEPT",
"-A INPUT -i eth0 -j LOG --log-prefix \"default\" --log-level 1",
"-A INPUT -i eth0 -j DROP",
"-A FORWARD -j DROP",
"-A OUTPUT -m state --state RELATED,ESTABLISHED -j ACCEPT",
"-A OUTPUT -o lo -j ACCEPT",
"-A OUTPUT -s 10.66.1.3/32 -d 10.66.1.1/32 -o eth0 -j LOG --log-prefix \"NoSmeagol\" --log-level 3",
"-A OUTPUT -s 10.66.1.3/32 -d 10.66.1.1/32 -o eth0 -j DROP",
"-A OUTPUT -s 10.66.1.3/32 -d 10.66.1.2/32 -o eth0 -p tcp -m multiport --dports 143,585,993 -j LOG --log-prefix \"imap\" --log-level 2",
"-A OUTPUT -s 10.66.1.3/32 -d 10.66.1.2/32 -o eth0 -p tcp -m multiport --dports 143,585,993 -j ACCEPT",
"-A OUTPUT -s 10.66.1.3/32 -d 172.16.2.223/32 -o eth0 -p udp -m udp --dport 123 -j ACCEPT",
"-A OUTPUT -o eth0 -j LOG --log-prefix \"default\" --log-level 1",
"-A OUTPUT -o eth0 -j DROP",
"COMMIT"
],
"Tester": [
"# Created by Firelet for host Tester",
"*filter",
"-A INPUT -m state --state RELATED,ESTABLISHED -j ACCEPT",
"-A INPUT -i lo -j ACCEPT",
"-A INPUT -i eth1 -j LOG --log-prefix \"default\" --log-level 1",
"-A INPUT -i eth1 -j DROP",
"-A FORWARD -j DROP",
"-A OUTPUT -m state --state RELATED,ESTABLISHED -j ACCEPT",
"-A OUTPUT -o lo -j ACCEPT",
"-A OUTPUT -s 88.88.88.1/32 -d 172.16.2.223/32 -o eth1 -p tcp -m tcp --dport 22 -j ACCEPT",
"-A OUTPUT -s 88.88.88.1/32 -d 10.66.1.3/32 -o eth1 -p tcp -m tcp --dport 22 -j ACCEPT",
"-A OUTPUT -s 88.88.88.1/32 -d 10.66.2.2/32 -o eth1 -p tcp -m tcp --dport 22 -j ACCEPT",
"-A OUTPUT -o eth1 -j LOG --log-prefix \"default\" --log-level 1",
"-A OUTPUT -o eth1 -j DROP",
"COMMIT"
],
"BorderFW": [
"# Created by Firelet for host BorderFW",
"*filter",
"-A INPUT -m state --state RELATED,ESTABLISHED -j ACCEPT",
"-A INPUT -i lo -j ACCEPT",
"-A INPUT -s 88.88.88.1/32 -d 172.16.2.223/32 -i eth0 -p tcp -m tcp --dport 22 -j ACCEPT",
"-A INPUT -s 10.66.1.2/32 -d 10.66.1.1/32 -i eth1 -p tcp -m tcp --dport 443 -j ACCEPT",
"-A INPUT -s 10.66.1.3/32 -d 10.66.1.1/32 -i eth1 -j LOG --log-prefix \"NoSmeagol\" --log-level 3",
"-A INPUT -s 10.66.1.3/32 -d 10.66.1.1/32 -i eth1 -j DROP",
"-A INPUT -s 10.66.1.3/32 -d 172.16.2.223/32 -i eth0 -p udp -m udp --dport 123 -j ACCEPT",
"-A INPUT -s 10.66.2.2/32 -d 172.16.2.223/32 -i eth0 -p udp -m udp --dport 123 -j ACCEPT",
"-A INPUT -i eth0 -j LOG --log-prefix \"default\" --log-level 1",
"-A INPUT -i eth0 -j DROP",
"-A INPUT -i eth1 -j LOG --log-prefix \"default\" --log-level 1",
"-A INPUT -i eth1 -j DROP",
"-A INPUT -i eth2 -j LOG --log-prefix \"default\" --log-level 1",
"-A INPUT -i eth2 -j DROP",
"-A FORWARD -m state --state RELATED,ESTABLISHED -j ACCEPT",
"-A FORWARD -s 88.88.88.1/32 -d 172.16.2.223/32 -p tcp -m tcp --dport 22 -j LOG --log-prefix \"ssh_all\" --log-level 0",
"-A FORWARD -s 88.88.88.1/32 -d 172.16.2.223/32 -p tcp -m tcp --dport 22 -j ACCEPT",
"-A FORWARD -s 88.88.88.1/32 -d 10.66.1.3/32 -p tcp -m tcp --dport 22 -j LOG --log-prefix \"ssh_all\" --log-level 0",
"-A FORWARD -s 88.88.88.1/32 -d 10.66.1.3/32 -p tcp -m tcp --dport 22 -j ACCEPT",
"-A FORWARD -s 88.88.88.1/32 -d 10.66.2.2/32 -p tcp -m tcp --dport 22 -j LOG --log-prefix \"ssh_all\" --log-level 0",
"-A FORWARD -s 88.88.88.1/32 -d 10.66.2.2/32 -p tcp -m tcp --dport 22 -j ACCEPT",
"-A FORWARD -s 10.66.1.3/32 -d 172.16.2.223/32 -p udp -m udp --dport 123 -j LOG --log-prefix \"ntp\" --log-level 0",
"-A FORWARD -s 10.66.1.3/32 -d 172.16.2.223/32 -p udp -m udp --dport 123 -j ACCEPT",
"-A FORWARD -j LOG --log-prefix \"default\" --log-level 1",
"-A FORWARD -j DROP",
"-A FORWARD -j LOG --log-prefix \"default\" --log-level 1",
"-A FORWARD -j DROP",
"-A FORWARD -j LOG --log-prefix \"default\" --log-level 1",
"-A FORWARD -j DROP",
"-A OUTPUT -m state --state RELATED,ESTABLISHED -j ACCEPT",
"-A OUTPUT -o lo -j ACCEPT",
"-A OUTPUT -s 10.66.1.1/32 -d 10.66.2.0/24 -o eth1 -p tcp -m tcp --dport 22 -j LOG --log-prefix \"ssh_mgmt\" --log-level 2",
"-A OUTPUT -s 10.66.1.1/32 -d 10.66.2.0/24 -o eth1 -p tcp -m tcp --dport 22 -j ACCEPT",
"-A OUTPUT -s 172.16.2.223/32 -d 10.66.1.3/32 -o eth0 -p udp -m udp --dport 123 -j ACCEPT",
"-A OUTPUT -o eth0 -j LOG --log-prefix \"default\" --log-level 1",
"-A OUTPUT -o eth0 -j DROP",
"-A OUTPUT -o eth1 -j LOG --log-prefix \"default\" --log-level 1",
"-A OUTPUT -o eth1 -j DROP",
"-A OUTPUT -o eth2 -j LOG --log-prefix \"default\" --log-level 1",
"-A OUTPUT -o eth2 -j DROP",
"COMMIT"
]
}
debug('m', m)
for hostname in m:
for ok_line, my_line in zip(ok[hostname], m[hostname]):
assert my_line == ok_line, "Incorrect rule built for %s:\ngot [%s]\nexpected [%s]" % (hostname, my_line, ok_line )
#@with_setup(setup_dir, teardown_dir)
#def test_DemoGitFireSet_diff_table_simple(fs):
# """Run diff between compiled rules and empty remote confs"""
# fs = DemoGitFireSet(repodir=repodir)
# new_confs = fs.compile_rules()
# remote_confs = {}
# dt = fs._diff(remote_confs, new_confs)
# assert dt == '<p>The firewalls are up to date. No deployment needed.</p>'
#FIXME: deployment IS needed
def test_DemoGitFireSet_extract_iptables_rules(fs):
#FIXME: the _get_confs implementation in DemoGitFireSet is broken
fs._get_confs(keep_sessions=False)
rules_d = fs._extract_ipt_filter_rules(fs._remote_confs)
for hn, rules in rules_d.iteritems():
assert len(rules) > 12, rules
assert len(rules) < 34, rules
for rule in rules:
assert rule not in ('COMMIT', '*filter', '*nat')
#TODO: review this test, ensure it's using real data
def test_DemoGitFireSet_extract_iptables_rules_2(fs):
remote_confs = {
'InternalFW': {
'iptables': {'filter': [], 'nat': []},
'ip_a_s': {},
},
'BorderFW': {
'iptables': {'filter': ['a', 'b', 'c'], 'nat': []},
'ip_a_s': {},
}
}
rules_d = fs._extract_ipt_filter_rules(remote_confs)
assert rules_d == {'InternalFW': [], 'BorderFW': ['a', 'b', 'c']}, repr(rules_d)
def test_DemoGitFireSet_diff_table_generation_1(fs):
"""Test diff with no changes"""
diff_dict = fs._diff({}, {})
assert diff_dict == {}
def test_DemoGitFireSet_diff_table_generation_2(fs):
"""Test diff with no changes"""
diff_dict = fs._diff({'InternalFW':['']}, {'InternalFW':['']})
assert diff_dict == {}
def test_DemoGitFireSet_diff_table_generation_3(fs):
diff_dict = fs._diff({'InternalFW':['old item', 'static item', 'old item2']},
{'InternalFW':['static item', 'new item', 'new item2']})
assert diff_dict == {'InternalFW': (['new item', 'new item2'], ['old item', 'old item2'])}
def test_DemoGitFireSet_diff_table_generation_all_fw_removed(fs):
"""Test diff where all the firewalls has been removed.
An empty diff should be generated."""
fs._get_confs(keep_sessions=False)
existing_rules = fs._extract_ipt_filter_rules(fs._remote_confs)
diff_dict = fs._diff(existing_rules, {})
assert diff_dict == {}, "An empty diff should be generated."
@require_git
def test_DemoGitFireSet_diff_table_generation_all_fw_added(fs):
"""Test diff right after all the firewalls has been added.
An empty diff should be generated."""
comp_rules = fs.compile_rules()
new_rules = {}
for hn, b in comp_rules.iteritems():
li = fs._build_ipt_restore_blocks((hn, b))
new_rules[hn] = li
diff_dict = fs._diff({}, new_rules)
assert diff_dict == {}, "An empty diff should be generated."
# Used during development with test/rebuild.sh #
# to generate new sets of test files #
#
#def test_DemoGitFireSet_rebuild(fs):
# comp_rules = fs.compile_rules()
# for hn, b in comp_rules.iteritems():
# li = fs._build_ipt_restore((hn, b))[1]
# open("test/new-iptables-save-%s" % hn, 'w').write('\n'.join(li)+'\n')
@require_git
@SkipTest
def test_DemoGitFireSet_check(fs):
"""Run diff between complied rules and remote confs using DemoGitFireSet
Given the test files, the check should be ok and require no deployment"""
diff_dict = fs.check()
assert diff_dict == {}, repr(diff_dict)[:300]
#FIXME: enable the test again
@require_git
def test_DemoGitFireSet_deploy(repodir, fs):
"""Run diff between complied rules and remote confs.
Given the test files, the check should be ok and require no deployment"""
log.debug("Test deployment in %s" % repodir)
fs.deploy()
for h in fs._get_firewalls():
ok = open(repodir + '/iptables-save-%s' % h.hostname).readlines()
r = open(repodir + '/iptables-save-%s-x' % h.hostname).readlines()
assert ok == r
assert not fs.save_needed()
diff_dict = fs.check()
assert diff_dict == {}, "Check should be giving empty result instead of: %s" \
% repr(diff_dict)[:300]
@require_git
def test_DemoGitFireSet_deploy_then_check(repodir, fs):
"""Deploy conf then run check again"""
assert not fs.save_needed()
log.debug("Running deployment using repository in %s" % repodir)
fs.deploy()
log.debug("Deployed.")
assert not fs.save_needed()
log.debug("Running check...")
diff_dict = fs.check()
assert diff_dict == {}, "Check should be giving empty result instead of: %s" \
% repr(diff_dict)[:300]
assert not fs.save_needed()
#def test_GitFireSet_deployment(fs):
# fs = GitFireSet(repodir=repodir)
# fs.deploy()
#def test_DemoGitFireSet_deploy(fs):
# dt = fs.deploy()
# for h in fs.hosts:
# r = map(str.rstrip, open(repodir + '/iptables-save-%s' % h.hostname))
# ok = map(str.rstrip, open(repodir + '/iptables-save-%s-correct' % h.hostname))
# for a, b in zip(r, ok):
# assert a == b, "%s differs from %s in iptables-save-%s" % (a, b, h.hostname)
#
#
#def test_get_confs_local_dummy(fs):
# from firelet.flssh import SSHConnector, MockSSHConnector
#
# sshconn = SSHConnector(targets={'localhost':['127.0.0.1']} )
# d = sshconn.get_confs( )
# assert 'localhost' in d
# assert d['localhost']
# assert d == {'localhost': [None, '127.0.0.1', {'filter': '-A INPUT -s 10.0.0.0/8 -p tcp -m tcp --dport 80 -j ACCEPT\n-A FORWARD -s 1.2.3.4/32 -d 5.6.7.8/32 -p tcp -m multiport --dports 22,80,443 -j ACCEPT\n-A OUTPUT -d 10.10.10.10/32 -p udp -m udp --dport 123 -j ACCEPT', 'nat': '-A POSTROUTING -o eth3 -j MASQUERADE'}, {'lo': ('127.0.0.1/8', '::1/128'), 'teredo': (None, 'fe80::ffff:ffff:ffff/64'), 'wlan0': ('192.168.1.1/24', 'fe80::219:d2ff:fe26:fb8e/64'), 'eth0': (None, None)}]}
# fs.services.update() testing
def test_DemoGitFireSet_service_update_error1(fs):
with raises(AssertionError):
fs.services.update({})
def test_DemoGitFireSet_service_update_incorrect_tcp_ports(fs):
with raises(Alert):
fs.services.update(dict(protocol='TCP', ports='foo,foo'), rid=0)
def test_DemoGitFireSet_service_update_missing_tcp_ports(fs):
with raises(Alert):
fs.services.update(dict(protocol='TCP', ports=','), rid=0)
def test_DemoGitFireSet_service_update_reversed_tcp_ports(fs):
with raises(AssertionError):
fs.services.update(dict(protocol='TCP', ports='10:1'), rid=0)
def test_DemoGitFireSet_service_update_tcp(fs):
fs.services.update(dict(protocol='TCP', ports='8888', name='HTTP'), rid=0)
assert fs.services[0].ports == '8888'
def test_DemoGitFireSet_service_update_ip(fs):
fs.services.update(dict(protocol='IP', ports='', name='IP'), rid=0)
assert fs.services[0].ports == ''
def test_DemoGitFireSet_service_update_incorrect_icmp_type(fs):
with raises(Alert):
fs.services.update(dict(protocol='ICMP', ports='foo'), rid=0)
def test_DemoGitFireSet_service_update_incorrect_protocol(fs):
with raises(Alert):
fs.services.update(dict(protocol='foo', ports=''), rid=0)
def test_DemoGitFireSet_service_update_icmp(fs):
fs.services.update(dict(protocol='ICMP', ports='8', name='NewName'), rid=0)
assert fs.services[0].ports == '8'
assert fs.services[0].name == 'NewName'
# fs.services.update() testing
def test_DemoGitFireSet_service_add(fs):
fs.services.add(dict(protocol='ICMP', ports='8', name='NewName'))
def test_DemoGitFireSet_service_add_duplicate(fs):
with raises(AssertionError):
fs.services.add(dict(protocol='ICMP', ports='8', name='HTTP'))
# fs.rules.update() testing
def test_DemoGitFireSet_rules_update_missing_rid(fs):
with raises(AssertionError):
fs.rules.update({})
def test_DemoGitFireSet_rules_update_missing_rule(fs):
with raises(Alert):
fs.rules.update({}, rid=1000)
def test_DemoGitFireSet_rules_update_missing_param(fs):
with raises(KeyError):
fs.rules.update({}, rid=0)
def test_DemoGitFireSet_rules_update(fs):
d = dict(
action='',
desc='desc_foo',
dst='',
dst_serv='',
enabled='',
log_level='',
name='Rule0',
src='',
src_serv='',
)
fs.rules.update(d, rid=0)
# fs.rules.add() testing
def test_DemoGitFireSet_rules_add(fs):
d = dict(
action='',
desc='desc_foo',
dst='',
dst_serv='',
enabled='',
log_level='',
name='Rule0',
src='',
src_serv='',
)
fs.rules.add(d, rid=0)
def test_DemoGitFireSet_rules_add_empty(fs):
fs.rules.add({}, rid=0)
def test_DemoGitFireSet_rules_add_duplicate(fs):
d = dict(
action='',
desc='desc_foo',
dst='',
dst_serv='',
enabled='',
log_level='',
name='ssh_all', # duplicate
src='',
src_serv='',
)
with raises(Alert):
fs.rules.add(d, rid=0)
# fs.rules.moveup()/movedown() testing
def test_DemoGitFireSet_rules_moveup(fs):
r0 = fs.rules[0]
fs.rules.moveup(1)
assert fs.rules[1] == r0
def test_DemoGitFireSet_rules_moveup_alert(fs):
with raises(Alert):
fs.rules.moveup(0)
def test_DemoGitFireSet_rules_movedown(fs):
last_rid = len(fs.rules) - 1
fs.rules.movedown(last_rid - 1)
def test_DemoGitFireSet_rules_movedown_alert(fs):
last_rid = len(fs.rules) - 1
with raises(Alert):
fs.rules.movedown(last_rid)
# fs.rules.update() testing
@SkipTest
def test_DemoGitFireSet_rules_update_using_token(fs):
token = fs.rules[1]._token()
fs.rules.update({'Name': 'foo'}, rid=1, token=token)
def test_DemoGitFireSet_rules_update_using_token_failing(fs):
token = 'bogustoken'
with raises(Exception):
fs.rules.update({'Name': 'foo'}, rid=1, token=token)
# fs.hosts.add() testing
def test_DemoGitFireSet_hosts_add(fs):
d = dict(
hostname='',
iface='',
ip_addr='',
masklen='',
local_fw='',
network_fw='',
mng=[],
routed=[],
)
fs.hosts.add(d)
def test_DemoGitFireSet_hosts_add_duplicate(fs):
d = dict(
hostname='InternalFW',
iface='eth0',
ip_addr='',
masklen='',
local_fw='',
network_fw='',
mng=[],
routed=[],
)
with raises(AssertionError):
fs.hosts.add(d)
# fs.hostgroups.add() testing
def test_DemoGitFireSet_hostgroups_add(fs):
d = dict(
name='',
childs=[],
)
fs.hostgroups.add(d)
def test_DemoGitFireSet_hostgroups_add_duplicate(fs):
d = dict(
name='AllSystems',
childs=[],
)
with raises(AssertionError):
fs.hostgroups.add(d)
# fs.hostgroups.update() testing
def test_DemoGitFireSet_hostgroups_update(fs):
d = dict(name='foo', childs=[])
fs.hostgroups.update(d, rid=0)
def test_DemoGitFireSet_hostgroups_update_missing(fs):
d = dict(name='foo', childs=[])
with raises(Alert):
fs.hostgroups.update(d, rid=1000)
# fs.networks.add() testing
def test_DemoGitFireSet_networks_add(fs):
d = dict(
name='',
ip_addr='1.2.0.0',
masklen=16,
)
fs.networks.add(d)
def test_DemoGitFireSet_networks_add_duplicate(fs):
d = dict(
name='Internet',
ip_addr='1.2.0.0',
masklen=16,
)
with raises(AssertionError):
fs.networks.add(d)
def test_DemoGitFireSet_networks_add_incorrect_ipaddr(fs):
d = dict(
name='',
ip_addr='foo',
masklen=16,
)
with raises(Exception):
fs.networks.add(d)
def test_DemoGitFireSet_networks_add_incorrect_netmask(fs):
d = dict(
name='',
ip_addr='1.2.0.0',
masklen='foo',
)
with raises(Exception):
fs.networks.add(d)
# # IP address handling # #
def test_network_update():
assert Network(['','255.255.255.255',8]).ip_addr == '255.0.0.0'
assert Network(['','255.255.255.255',16]).ip_addr == '255.255.0.0'
assert Network(['','255.255.255.255',24]).ip_addr == '255.255.255.0'
assert Network(['','255.255.255.255',27]).ip_addr == '255.255.255.224'
assert Network(['','255.255.255.255',28]).ip_addr == '255.255.255.240'
assert Network(['','255.255.255.255',29]).ip_addr == '255.255.255.248'
assert Network(['','255.255.255.255',30]).ip_addr == '255.255.255.252'
def test_network_contains_networks():
assert Network(['', '255.255.255.255', 16]) in Network(['', '255.255.255.255', 8])
assert Network(['', '255.255.255.255', 16]) in Network(['', '255.255.255.255', 16])
assert Network(['', '255.255.255.255', 8]) not in Network(['', '255.255.255.255', 16])
assert Network(['', '1.0.0.0', 17]) in Network(['', '1.0.0.0', 16])
assert Network(['', '1.0.0.0', 16]) in Network(['', '1.0.0.0', 16])
assert Network(['', '1.0.0.0', 15]) not in Network(['', '1.0.0.0', 16])
assert Network(['', '42.42.42.42', 15]) not in Network(['','42.42.42.42', 16])
assert Network(['', '42.42.42.42', 16]) in Network(['','42.42.42.42', 16])
assert Network(['', '42.42.42.42', 17]) in Network(['','42.42.42.42', 16])
def test_network_contains_hosts():
assert Host(['h', 'eth0', '1.1.1.1', 24, '1', '1', '1', [] ]) \
in Network(['h', '1.1.1.0', 28])
assert Host(['h', 'eth0', '1.1.1.15',24, '1', '1', '1', [] ]) \
in Network(['h', '1.1.1.0', 28])
assert Host(['h', 'eth0', '1.1.1.16',24, '1', '1', '1', [] ]) \
not in Network(['h', '1.1.1.0', 28])
assert Host(['h', 'eth0', '1.1.1.1',24, '1', '1', '1', [] ]) \
in Network(['h', '1.1.1.0', 24])
assert Host(['h', 'eth0', '1.1.1.1',24, '1', '1', '1', [] ]) \
in Network(['h', '1.1.1.0', 8])
assert Host(['h', 'eth0', '1.1.1.1',24, '1', '1', '1', [] ]) \
not in Network(['h', '1.1.2.0', 24])
assert Host(['h', 'eth0', '1.1.1.1',24, '1', '1', '1', [] ]) \
not in Network(['h', '10.1.1.0', 8])
def test_host_contains_host():
assert Host(['h', 'eth0', '1.1.1.1', 24, '1', '1', '1', [] ]) \
in Host(['h', 'eth0', '1.1.1.1', 24, '1', '1', '1', [] ])
def test_host_contains_network():
# only an Host can be in a Host, otherwise raise an Exception
with raises(Exception):
Network(['h', '1.1.1.0', 8]) in \
Host(['h', 'eth0', '1.1.1.1', 24, '1', '1', '1', [] ])
def test_compare():
for x in xrange(0, 32):
n=IPNetwork('255.1.1.1/%d' % x)
ok = n.network
mine = Network(['','255.1.1.1', x]).ip_addr
log.debug( 'ok: %s mine: %s len: %d' % (ok, mine, x))
assert str(mine) == str(ok)
#def test_flattening(repodir):
# hg2 = HostGroup(['name', [Host(['h', 'eth0', '1.1.1.1',24, '1', '1', '1', [] ])], ])
# hg3 = HostGroup(['name2', [Network(['n', '2.2.2.0', 24]), hg2]])
# hg = HostGroup(childs=[hg2, hg3])
# assert ['h', 'h'] == [h.hostname for h in hg.hosts()]
# assert ['n'] == [h.name for h in hg.networks()], repr(hg.networks())
@SkipTest # FIXME
def test_svg_map(repodir):
fs = GitFireSet(repodir=repodir)
svg = draw_svg_map(fs)
assert 'DOCTYPE svg PUBLIC' in svg, "No SVG output?"
assert 'rivendell' in svg, "No rivendell in the map"
# # Test JSON lib # #
def test_json_files(repodir):
d = {'d1':{'d2':{'d3':{'d4':{'d5':{'this is getting':'boring'}}}}}}
savejson('jfile', d, d=repodir)
nd = loadjson('jfile', d=repodir)
assert d == nd
# # Bunch objects testing # #
# Service bunch
def test_bunch_service_wrong_proto():
d = dict(name='s1', protocol='NotAProtocol', ports='53')
with raises(Exception):
Service(**d)
def test_bunch_service_correct():
d = dict(name='s1', protocol='TCP', ports='53')
s = Service(**d)
assert s.protocol == 'TCP'
def test_bunch_service_port_too_high():
d = dict(name='s1', protocol='TCP', ports='999999')
with raises(Exception):
Service(**d)
def test_bunch_service_negative_port():
d = dict(name='s1', protocol='TCP', ports='-1')
with raises(Exception):
Service(**d)
def test_bunch_service_bogus_port_range():
d = dict(name='s1', protocol='TCP', ports='10:20:30')
with raises(Exception):
Service(**d)
def test_bunch_service_inverse_port_range():
d = dict(name='s1', protocol='TCP', ports='30:20')
with raises(Exception):
Service(**d)
def test_bunch_service_bogus_port():
d = dict(name='s1', protocol='TCP', ports='blah')
with raises(Alert):
Service(**d)
def test_bunch_service_update():
d = dict(name='s1', protocol='TCP', ports='80')
s = Service(**d)
d = dict(name='s1', protocol='TCP', ports='blah')
with raises(Exception):
s.update(d)
def test_bunch_service():
s = Service(name='s1', protocol='UDP', ports='53')
assert s.name == 's1', 'Incorrect bunch name'
assert s.protocol == 'UDP', 'Incorrect bunch proto'
assert s.ports == '53', 'Incorrect bunch ports'
s.update({'name': 's2', 'protocol':'TCP', 'ports':'80'})
assert s.name == 's2', 'Incorrect bunch name'
assert s.protocol == 'TCP', 'Incorrect bunch proto'
assert s.ports == '80', 'Incorrect bunch ports'
with raises(Alert):
s.update({'name': 's2', 'protocol':'TCP', 'ports':'eighty'})
# HostGroup Bunch
def test_bunch_hostgroup1():
hg = HostGroup(['Servers'])
assert hg.name == 'Servers'
def test_bunch_hostgroup2():
hg = HostGroup(['Servers', 'a', 'b'])
assert hg.childs == ['a', 'b']
#def test_bunch_hostgroup_flatten1(repodir):
# hg = HostGroup(['Servers'])
# dicts = [{}, {}, {}]
# assert_raises(Exception, hg.flat, *dicts)
#def test_bunch_hostgroup_flatten2(repodir):
# hg = HostGroup(['Servers'])
# flat = hg.flat({'Servers':'a'}, {}, {})
# print flat
# dicts = [{}, {}, {}]
# assert_raises(Exception, hg.flat, *dicts)
| gpl-3.0 |
shiblon/pytour | static/js/pypyjs/pypy-nojit.js-0.3.1/lib/modules/test/test_time.py | 86 | 10401 | from test import test_support
import time
import unittest
import sys
class TimeTestCase(unittest.TestCase):
def setUp(self):
self.t = time.time()
def test_data_attributes(self):
time.altzone
time.daylight
time.timezone
time.tzname
def test_clock(self):
time.clock()
def test_conversions(self):
self.assertTrue(time.ctime(self.t)
== time.asctime(time.localtime(self.t)))
self.assertTrue(long(time.mktime(time.localtime(self.t)))
== long(self.t))
def test_sleep(self):
time.sleep(1.2)
def test_strftime(self):
tt = time.gmtime(self.t)
for directive in ('a', 'A', 'b', 'B', 'c', 'd', 'H', 'I',
'j', 'm', 'M', 'p', 'S',
'U', 'w', 'W', 'x', 'X', 'y', 'Y', 'Z', '%'):
format = ' %' + directive
try:
time.strftime(format, tt)
except ValueError:
self.fail('conversion specifier: %r failed.' % format)
# Issue #10762: Guard against invalid/non-supported format string
# so that Python don't crash (Windows crashes when the format string
# input to [w]strftime is not kosher.
if sys.platform.startswith('win'):
with self.assertRaises(ValueError):
time.strftime('%f')
def test_strftime_bounds_checking(self):
# Make sure that strftime() checks the bounds of the various parts
#of the time tuple (0 is valid for *all* values).
# Check year [1900, max(int)]
self.assertRaises(ValueError, time.strftime, '',
(1899, 1, 1, 0, 0, 0, 0, 1, -1))
if time.accept2dyear:
self.assertRaises(ValueError, time.strftime, '',
(-1, 1, 1, 0, 0, 0, 0, 1, -1))
self.assertRaises(ValueError, time.strftime, '',
(100, 1, 1, 0, 0, 0, 0, 1, -1))
# Check month [1, 12] + zero support
self.assertRaises(ValueError, time.strftime, '',
(1900, -1, 1, 0, 0, 0, 0, 1, -1))
self.assertRaises(ValueError, time.strftime, '',
(1900, 13, 1, 0, 0, 0, 0, 1, -1))
# Check day of month [1, 31] + zero support
self.assertRaises(ValueError, time.strftime, '',
(1900, 1, -1, 0, 0, 0, 0, 1, -1))
self.assertRaises(ValueError, time.strftime, '',
(1900, 1, 32, 0, 0, 0, 0, 1, -1))
# Check hour [0, 23]
self.assertRaises(ValueError, time.strftime, '',
(1900, 1, 1, -1, 0, 0, 0, 1, -1))
self.assertRaises(ValueError, time.strftime, '',
(1900, 1, 1, 24, 0, 0, 0, 1, -1))
# Check minute [0, 59]
self.assertRaises(ValueError, time.strftime, '',
(1900, 1, 1, 0, -1, 0, 0, 1, -1))
self.assertRaises(ValueError, time.strftime, '',
(1900, 1, 1, 0, 60, 0, 0, 1, -1))
# Check second [0, 61]
self.assertRaises(ValueError, time.strftime, '',
(1900, 1, 1, 0, 0, -1, 0, 1, -1))
# C99 only requires allowing for one leap second, but Python's docs say
# allow two leap seconds (0..61)
self.assertRaises(ValueError, time.strftime, '',
(1900, 1, 1, 0, 0, 62, 0, 1, -1))
# No check for upper-bound day of week;
# value forced into range by a ``% 7`` calculation.
# Start check at -2 since gettmarg() increments value before taking
# modulo.
self.assertRaises(ValueError, time.strftime, '',
(1900, 1, 1, 0, 0, 0, -2, 1, -1))
# Check day of the year [1, 366] + zero support
self.assertRaises(ValueError, time.strftime, '',
(1900, 1, 1, 0, 0, 0, 0, -1, -1))
self.assertRaises(ValueError, time.strftime, '',
(1900, 1, 1, 0, 0, 0, 0, 367, -1))
def test_default_values_for_zero(self):
# Make sure that using all zeros uses the proper default values.
# No test for daylight savings since strftime() does not change output
# based on its value.
expected = "2000 01 01 00 00 00 1 001"
result = time.strftime("%Y %m %d %H %M %S %w %j", (0,)*9)
self.assertEqual(expected, result)
def test_strptime(self):
# Should be able to go round-trip from strftime to strptime without
# raising an exception.
tt = time.gmtime(self.t)
for directive in ('a', 'A', 'b', 'B', 'c', 'd', 'H', 'I',
'j', 'm', 'M', 'p', 'S',
'U', 'w', 'W', 'x', 'X', 'y', 'Y', 'Z', '%'):
format = '%' + directive
strf_output = time.strftime(format, tt)
try:
time.strptime(strf_output, format)
except ValueError:
self.fail("conversion specifier %r failed with '%s' input." %
(format, strf_output))
def test_asctime(self):
time.asctime(time.gmtime(self.t))
self.assertRaises(TypeError, time.asctime, 0)
self.assertRaises(TypeError, time.asctime, ())
# XXX: Posix compiant asctime should refuse to convert
# year > 9999, but Linux implementation does not.
# self.assertRaises(ValueError, time.asctime,
# (12345, 1, 0, 0, 0, 0, 0, 0, 0))
# XXX: For now, just make sure we don't have a crash:
try:
time.asctime((12345, 1, 1, 0, 0, 0, 0, 1, 0))
except ValueError:
pass
@unittest.skipIf(not hasattr(time, "tzset"),
"time module has no attribute tzset")
def test_tzset(self):
from os import environ
# Epoch time of midnight Dec 25th 2002. Never DST in northern
# hemisphere.
xmas2002 = 1040774400.0
# These formats are correct for 2002, and possibly future years
# This format is the 'standard' as documented at:
# http://www.opengroup.org/onlinepubs/007904975/basedefs/xbd_chap08.html
# They are also documented in the tzset(3) man page on most Unix
# systems.
eastern = 'EST+05EDT,M4.1.0,M10.5.0'
victoria = 'AEST-10AEDT-11,M10.5.0,M3.5.0'
utc='UTC+0'
org_TZ = environ.get('TZ',None)
try:
# Make sure we can switch to UTC time and results are correct
# Note that unknown timezones default to UTC.
# Note that altzone is undefined in UTC, as there is no DST
environ['TZ'] = eastern
time.tzset()
environ['TZ'] = utc
time.tzset()
self.assertEqual(
time.gmtime(xmas2002), time.localtime(xmas2002)
)
self.assertEqual(time.daylight, 0)
self.assertEqual(time.timezone, 0)
self.assertEqual(time.localtime(xmas2002).tm_isdst, 0)
# Make sure we can switch to US/Eastern
environ['TZ'] = eastern
time.tzset()
self.assertNotEqual(time.gmtime(xmas2002), time.localtime(xmas2002))
self.assertEqual(time.tzname, ('EST', 'EDT'))
self.assertEqual(len(time.tzname), 2)
self.assertEqual(time.daylight, 1)
self.assertEqual(time.timezone, 18000)
self.assertEqual(time.altzone, 14400)
self.assertEqual(time.localtime(xmas2002).tm_isdst, 0)
self.assertEqual(len(time.tzname), 2)
# Now go to the southern hemisphere.
environ['TZ'] = victoria
time.tzset()
self.assertNotEqual(time.gmtime(xmas2002), time.localtime(xmas2002))
# Issue #11886: Australian Eastern Standard Time (UTC+10) is called
# "EST" (as Eastern Standard Time, UTC-5) instead of "AEST" on some
# operating systems (e.g. FreeBSD), which is wrong. See for example
# this bug: http://bugs.debian.org/cgi-bin/bugreport.cgi?bug=93810
self.assertIn(time.tzname[0], ('AEST' 'EST'), time.tzname[0])
self.assertTrue(time.tzname[1] == 'AEDT', str(time.tzname[1]))
self.assertEqual(len(time.tzname), 2)
self.assertEqual(time.daylight, 1)
self.assertEqual(time.timezone, -36000)
self.assertEqual(time.altzone, -39600)
self.assertEqual(time.localtime(xmas2002).tm_isdst, 1)
finally:
# Repair TZ environment variable in case any other tests
# rely on it.
if org_TZ is not None:
environ['TZ'] = org_TZ
elif environ.has_key('TZ'):
del environ['TZ']
time.tzset()
def test_insane_timestamps(self):
# It's possible that some platform maps time_t to double,
# and that this test will fail there. This test should
# exempt such platforms (provided they return reasonable
# results!).
for func in time.ctime, time.gmtime, time.localtime:
for unreasonable in -1e200, 1e200:
self.assertRaises(ValueError, func, unreasonable)
def test_ctime_without_arg(self):
# Not sure how to check the values, since the clock could tick
# at any time. Make sure these are at least accepted and
# don't raise errors.
time.ctime()
time.ctime(None)
def test_gmtime_without_arg(self):
gt0 = time.gmtime()
gt1 = time.gmtime(None)
t0 = time.mktime(gt0)
t1 = time.mktime(gt1)
self.assertTrue(0 <= (t1-t0) < 0.2)
def test_localtime_without_arg(self):
lt0 = time.localtime()
lt1 = time.localtime(None)
t0 = time.mktime(lt0)
t1 = time.mktime(lt1)
self.assertTrue(0 <= (t1-t0) < 0.2)
def test_mktime(self):
# Issue #1726687
for t in (-2, -1, 0, 1):
try:
tt = time.localtime(t)
except (OverflowError, ValueError):
pass
else:
self.assertEqual(time.mktime(tt), t)
def test_main():
test_support.run_unittest(TimeTestCase)
if __name__ == "__main__":
test_main()
| apache-2.0 |
Ramshackle-Jamathon/Interactive-Experiments | Three-Jumpstart/node_modules/node-gyp/gyp/pylib/gyp/generator/msvs.py | 886 | 131038 | # Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import copy
import ntpath
import os
import posixpath
import re
import subprocess
import sys
import gyp.common
import gyp.easy_xml as easy_xml
import gyp.generator.ninja as ninja_generator
import gyp.MSVSNew as MSVSNew
import gyp.MSVSProject as MSVSProject
import gyp.MSVSSettings as MSVSSettings
import gyp.MSVSToolFile as MSVSToolFile
import gyp.MSVSUserFile as MSVSUserFile
import gyp.MSVSUtil as MSVSUtil
import gyp.MSVSVersion as MSVSVersion
from gyp.common import GypError
from gyp.common import OrderedSet
# TODO: Remove once bots are on 2.7, http://crbug.com/241769
def _import_OrderedDict():
import collections
try:
return collections.OrderedDict
except AttributeError:
import gyp.ordered_dict
return gyp.ordered_dict.OrderedDict
OrderedDict = _import_OrderedDict()
# Regular expression for validating Visual Studio GUIDs. If the GUID
# contains lowercase hex letters, MSVS will be fine. However,
# IncrediBuild BuildConsole will parse the solution file, but then
# silently skip building the target causing hard to track down errors.
# Note that this only happens with the BuildConsole, and does not occur
# if IncrediBuild is executed from inside Visual Studio. This regex
# validates that the string looks like a GUID with all uppercase hex
# letters.
VALID_MSVS_GUID_CHARS = re.compile(r'^[A-F0-9\-]+$')
generator_default_variables = {
'EXECUTABLE_PREFIX': '',
'EXECUTABLE_SUFFIX': '.exe',
'STATIC_LIB_PREFIX': '',
'SHARED_LIB_PREFIX': '',
'STATIC_LIB_SUFFIX': '.lib',
'SHARED_LIB_SUFFIX': '.dll',
'INTERMEDIATE_DIR': '$(IntDir)',
'SHARED_INTERMEDIATE_DIR': '$(OutDir)obj/global_intermediate',
'OS': 'win',
'PRODUCT_DIR': '$(OutDir)',
'LIB_DIR': '$(OutDir)lib',
'RULE_INPUT_ROOT': '$(InputName)',
'RULE_INPUT_DIRNAME': '$(InputDir)',
'RULE_INPUT_EXT': '$(InputExt)',
'RULE_INPUT_NAME': '$(InputFileName)',
'RULE_INPUT_PATH': '$(InputPath)',
'CONFIGURATION_NAME': '$(ConfigurationName)',
}
# The msvs specific sections that hold paths
generator_additional_path_sections = [
'msvs_cygwin_dirs',
'msvs_props',
]
generator_additional_non_configuration_keys = [
'msvs_cygwin_dirs',
'msvs_cygwin_shell',
'msvs_large_pdb',
'msvs_shard',
'msvs_external_builder',
'msvs_external_builder_out_dir',
'msvs_external_builder_build_cmd',
'msvs_external_builder_clean_cmd',
'msvs_external_builder_clcompile_cmd',
'msvs_enable_winrt',
'msvs_requires_importlibrary',
'msvs_enable_winphone',
'msvs_application_type_revision',
'msvs_target_platform_version',
'msvs_target_platform_minversion',
]
# List of precompiled header related keys.
precomp_keys = [
'msvs_precompiled_header',
'msvs_precompiled_source',
]
cached_username = None
cached_domain = None
# TODO(gspencer): Switch the os.environ calls to be
# win32api.GetDomainName() and win32api.GetUserName() once the
# python version in depot_tools has been updated to work on Vista
# 64-bit.
def _GetDomainAndUserName():
if sys.platform not in ('win32', 'cygwin'):
return ('DOMAIN', 'USERNAME')
global cached_username
global cached_domain
if not cached_domain or not cached_username:
domain = os.environ.get('USERDOMAIN')
username = os.environ.get('USERNAME')
if not domain or not username:
call = subprocess.Popen(['net', 'config', 'Workstation'],
stdout=subprocess.PIPE)
config = call.communicate()[0]
username_re = re.compile(r'^User name\s+(\S+)', re.MULTILINE)
username_match = username_re.search(config)
if username_match:
username = username_match.group(1)
domain_re = re.compile(r'^Logon domain\s+(\S+)', re.MULTILINE)
domain_match = domain_re.search(config)
if domain_match:
domain = domain_match.group(1)
cached_domain = domain
cached_username = username
return (cached_domain, cached_username)
fixpath_prefix = None
def _NormalizedSource(source):
"""Normalize the path.
But not if that gets rid of a variable, as this may expand to something
larger than one directory.
Arguments:
source: The path to be normalize.d
Returns:
The normalized path.
"""
normalized = os.path.normpath(source)
if source.count('$') == normalized.count('$'):
source = normalized
return source
def _FixPath(path):
"""Convert paths to a form that will make sense in a vcproj file.
Arguments:
path: The path to convert, may contain / etc.
Returns:
The path with all slashes made into backslashes.
"""
if fixpath_prefix and path and not os.path.isabs(path) and not path[0] == '$':
path = os.path.join(fixpath_prefix, path)
path = path.replace('/', '\\')
path = _NormalizedSource(path)
if path and path[-1] == '\\':
path = path[:-1]
return path
def _FixPaths(paths):
"""Fix each of the paths of the list."""
return [_FixPath(i) for i in paths]
def _ConvertSourcesToFilterHierarchy(sources, prefix=None, excluded=None,
list_excluded=True, msvs_version=None):
"""Converts a list split source file paths into a vcproj folder hierarchy.
Arguments:
sources: A list of source file paths split.
prefix: A list of source file path layers meant to apply to each of sources.
excluded: A set of excluded files.
msvs_version: A MSVSVersion object.
Returns:
A hierarchy of filenames and MSVSProject.Filter objects that matches the
layout of the source tree.
For example:
_ConvertSourcesToFilterHierarchy([['a', 'bob1.c'], ['b', 'bob2.c']],
prefix=['joe'])
-->
[MSVSProject.Filter('a', contents=['joe\\a\\bob1.c']),
MSVSProject.Filter('b', contents=['joe\\b\\bob2.c'])]
"""
if not prefix: prefix = []
result = []
excluded_result = []
folders = OrderedDict()
# Gather files into the final result, excluded, or folders.
for s in sources:
if len(s) == 1:
filename = _NormalizedSource('\\'.join(prefix + s))
if filename in excluded:
excluded_result.append(filename)
else:
result.append(filename)
elif msvs_version and not msvs_version.UsesVcxproj():
# For MSVS 2008 and earlier, we need to process all files before walking
# the sub folders.
if not folders.get(s[0]):
folders[s[0]] = []
folders[s[0]].append(s[1:])
else:
contents = _ConvertSourcesToFilterHierarchy([s[1:]], prefix + [s[0]],
excluded=excluded,
list_excluded=list_excluded,
msvs_version=msvs_version)
contents = MSVSProject.Filter(s[0], contents=contents)
result.append(contents)
# Add a folder for excluded files.
if excluded_result and list_excluded:
excluded_folder = MSVSProject.Filter('_excluded_files',
contents=excluded_result)
result.append(excluded_folder)
if msvs_version and msvs_version.UsesVcxproj():
return result
# Populate all the folders.
for f in folders:
contents = _ConvertSourcesToFilterHierarchy(folders[f], prefix=prefix + [f],
excluded=excluded,
list_excluded=list_excluded,
msvs_version=msvs_version)
contents = MSVSProject.Filter(f, contents=contents)
result.append(contents)
return result
def _ToolAppend(tools, tool_name, setting, value, only_if_unset=False):
if not value: return
_ToolSetOrAppend(tools, tool_name, setting, value, only_if_unset)
def _ToolSetOrAppend(tools, tool_name, setting, value, only_if_unset=False):
# TODO(bradnelson): ugly hack, fix this more generally!!!
if 'Directories' in setting or 'Dependencies' in setting:
if type(value) == str:
value = value.replace('/', '\\')
else:
value = [i.replace('/', '\\') for i in value]
if not tools.get(tool_name):
tools[tool_name] = dict()
tool = tools[tool_name]
if tool.get(setting):
if only_if_unset: return
if type(tool[setting]) == list and type(value) == list:
tool[setting] += value
else:
raise TypeError(
'Appending "%s" to a non-list setting "%s" for tool "%s" is '
'not allowed, previous value: %s' % (
value, setting, tool_name, str(tool[setting])))
else:
tool[setting] = value
def _ConfigPlatform(config_data):
return config_data.get('msvs_configuration_platform', 'Win32')
def _ConfigBaseName(config_name, platform_name):
if config_name.endswith('_' + platform_name):
return config_name[0:-len(platform_name) - 1]
else:
return config_name
def _ConfigFullName(config_name, config_data):
platform_name = _ConfigPlatform(config_data)
return '%s|%s' % (_ConfigBaseName(config_name, platform_name), platform_name)
def _BuildCommandLineForRuleRaw(spec, cmd, cygwin_shell, has_input_path,
quote_cmd, do_setup_env):
if [x for x in cmd if '$(InputDir)' in x]:
input_dir_preamble = (
'set INPUTDIR=$(InputDir)\n'
'if NOT DEFINED INPUTDIR set INPUTDIR=.\\\n'
'set INPUTDIR=%INPUTDIR:~0,-1%\n'
)
else:
input_dir_preamble = ''
if cygwin_shell:
# Find path to cygwin.
cygwin_dir = _FixPath(spec.get('msvs_cygwin_dirs', ['.'])[0])
# Prepare command.
direct_cmd = cmd
direct_cmd = [i.replace('$(IntDir)',
'`cygpath -m "${INTDIR}"`') for i in direct_cmd]
direct_cmd = [i.replace('$(OutDir)',
'`cygpath -m "${OUTDIR}"`') for i in direct_cmd]
direct_cmd = [i.replace('$(InputDir)',
'`cygpath -m "${INPUTDIR}"`') for i in direct_cmd]
if has_input_path:
direct_cmd = [i.replace('$(InputPath)',
'`cygpath -m "${INPUTPATH}"`')
for i in direct_cmd]
direct_cmd = ['\\"%s\\"' % i.replace('"', '\\\\\\"') for i in direct_cmd]
# direct_cmd = gyp.common.EncodePOSIXShellList(direct_cmd)
direct_cmd = ' '.join(direct_cmd)
# TODO(quote): regularize quoting path names throughout the module
cmd = ''
if do_setup_env:
cmd += 'call "$(ProjectDir)%(cygwin_dir)s\\setup_env.bat" && '
cmd += 'set CYGWIN=nontsec&& '
if direct_cmd.find('NUMBER_OF_PROCESSORS') >= 0:
cmd += 'set /a NUMBER_OF_PROCESSORS_PLUS_1=%%NUMBER_OF_PROCESSORS%%+1&& '
if direct_cmd.find('INTDIR') >= 0:
cmd += 'set INTDIR=$(IntDir)&& '
if direct_cmd.find('OUTDIR') >= 0:
cmd += 'set OUTDIR=$(OutDir)&& '
if has_input_path and direct_cmd.find('INPUTPATH') >= 0:
cmd += 'set INPUTPATH=$(InputPath) && '
cmd += 'bash -c "%(cmd)s"'
cmd = cmd % {'cygwin_dir': cygwin_dir,
'cmd': direct_cmd}
return input_dir_preamble + cmd
else:
# Convert cat --> type to mimic unix.
if cmd[0] == 'cat':
command = ['type']
else:
command = [cmd[0].replace('/', '\\')]
# Add call before command to ensure that commands can be tied together one
# after the other without aborting in Incredibuild, since IB makes a bat
# file out of the raw command string, and some commands (like python) are
# actually batch files themselves.
command.insert(0, 'call')
# Fix the paths
# TODO(quote): This is a really ugly heuristic, and will miss path fixing
# for arguments like "--arg=path" or "/opt:path".
# If the argument starts with a slash or dash, it's probably a command line
# switch
arguments = [i if (i[:1] in "/-") else _FixPath(i) for i in cmd[1:]]
arguments = [i.replace('$(InputDir)', '%INPUTDIR%') for i in arguments]
arguments = [MSVSSettings.FixVCMacroSlashes(i) for i in arguments]
if quote_cmd:
# Support a mode for using cmd directly.
# Convert any paths to native form (first element is used directly).
# TODO(quote): regularize quoting path names throughout the module
arguments = ['"%s"' % i for i in arguments]
# Collapse into a single command.
return input_dir_preamble + ' '.join(command + arguments)
def _BuildCommandLineForRule(spec, rule, has_input_path, do_setup_env):
# Currently this weird argument munging is used to duplicate the way a
# python script would need to be run as part of the chrome tree.
# Eventually we should add some sort of rule_default option to set this
# per project. For now the behavior chrome needs is the default.
mcs = rule.get('msvs_cygwin_shell')
if mcs is None:
mcs = int(spec.get('msvs_cygwin_shell', 1))
elif isinstance(mcs, str):
mcs = int(mcs)
quote_cmd = int(rule.get('msvs_quote_cmd', 1))
return _BuildCommandLineForRuleRaw(spec, rule['action'], mcs, has_input_path,
quote_cmd, do_setup_env=do_setup_env)
def _AddActionStep(actions_dict, inputs, outputs, description, command):
"""Merge action into an existing list of actions.
Care must be taken so that actions which have overlapping inputs either don't
get assigned to the same input, or get collapsed into one.
Arguments:
actions_dict: dictionary keyed on input name, which maps to a list of
dicts describing the actions attached to that input file.
inputs: list of inputs
outputs: list of outputs
description: description of the action
command: command line to execute
"""
# Require there to be at least one input (call sites will ensure this).
assert inputs
action = {
'inputs': inputs,
'outputs': outputs,
'description': description,
'command': command,
}
# Pick where to stick this action.
# While less than optimal in terms of build time, attach them to the first
# input for now.
chosen_input = inputs[0]
# Add it there.
if chosen_input not in actions_dict:
actions_dict[chosen_input] = []
actions_dict[chosen_input].append(action)
def _AddCustomBuildToolForMSVS(p, spec, primary_input,
inputs, outputs, description, cmd):
"""Add a custom build tool to execute something.
Arguments:
p: the target project
spec: the target project dict
primary_input: input file to attach the build tool to
inputs: list of inputs
outputs: list of outputs
description: description of the action
cmd: command line to execute
"""
inputs = _FixPaths(inputs)
outputs = _FixPaths(outputs)
tool = MSVSProject.Tool(
'VCCustomBuildTool',
{'Description': description,
'AdditionalDependencies': ';'.join(inputs),
'Outputs': ';'.join(outputs),
'CommandLine': cmd,
})
# Add to the properties of primary input for each config.
for config_name, c_data in spec['configurations'].iteritems():
p.AddFileConfig(_FixPath(primary_input),
_ConfigFullName(config_name, c_data), tools=[tool])
def _AddAccumulatedActionsToMSVS(p, spec, actions_dict):
"""Add actions accumulated into an actions_dict, merging as needed.
Arguments:
p: the target project
spec: the target project dict
actions_dict: dictionary keyed on input name, which maps to a list of
dicts describing the actions attached to that input file.
"""
for primary_input in actions_dict:
inputs = OrderedSet()
outputs = OrderedSet()
descriptions = []
commands = []
for action in actions_dict[primary_input]:
inputs.update(OrderedSet(action['inputs']))
outputs.update(OrderedSet(action['outputs']))
descriptions.append(action['description'])
commands.append(action['command'])
# Add the custom build step for one input file.
description = ', and also '.join(descriptions)
command = '\r\n'.join(commands)
_AddCustomBuildToolForMSVS(p, spec,
primary_input=primary_input,
inputs=inputs,
outputs=outputs,
description=description,
cmd=command)
def _RuleExpandPath(path, input_file):
"""Given the input file to which a rule applied, string substitute a path.
Arguments:
path: a path to string expand
input_file: the file to which the rule applied.
Returns:
The string substituted path.
"""
path = path.replace('$(InputName)',
os.path.splitext(os.path.split(input_file)[1])[0])
path = path.replace('$(InputDir)', os.path.dirname(input_file))
path = path.replace('$(InputExt)',
os.path.splitext(os.path.split(input_file)[1])[1])
path = path.replace('$(InputFileName)', os.path.split(input_file)[1])
path = path.replace('$(InputPath)', input_file)
return path
def _FindRuleTriggerFiles(rule, sources):
"""Find the list of files which a particular rule applies to.
Arguments:
rule: the rule in question
sources: the set of all known source files for this project
Returns:
The list of sources that trigger a particular rule.
"""
return rule.get('rule_sources', [])
def _RuleInputsAndOutputs(rule, trigger_file):
"""Find the inputs and outputs generated by a rule.
Arguments:
rule: the rule in question.
trigger_file: the main trigger for this rule.
Returns:
The pair of (inputs, outputs) involved in this rule.
"""
raw_inputs = _FixPaths(rule.get('inputs', []))
raw_outputs = _FixPaths(rule.get('outputs', []))
inputs = OrderedSet()
outputs = OrderedSet()
inputs.add(trigger_file)
for i in raw_inputs:
inputs.add(_RuleExpandPath(i, trigger_file))
for o in raw_outputs:
outputs.add(_RuleExpandPath(o, trigger_file))
return (inputs, outputs)
def _GenerateNativeRulesForMSVS(p, rules, output_dir, spec, options):
"""Generate a native rules file.
Arguments:
p: the target project
rules: the set of rules to include
output_dir: the directory in which the project/gyp resides
spec: the project dict
options: global generator options
"""
rules_filename = '%s%s.rules' % (spec['target_name'],
options.suffix)
rules_file = MSVSToolFile.Writer(os.path.join(output_dir, rules_filename),
spec['target_name'])
# Add each rule.
for r in rules:
rule_name = r['rule_name']
rule_ext = r['extension']
inputs = _FixPaths(r.get('inputs', []))
outputs = _FixPaths(r.get('outputs', []))
# Skip a rule with no action and no inputs.
if 'action' not in r and not r.get('rule_sources', []):
continue
cmd = _BuildCommandLineForRule(spec, r, has_input_path=True,
do_setup_env=True)
rules_file.AddCustomBuildRule(name=rule_name,
description=r.get('message', rule_name),
extensions=[rule_ext],
additional_dependencies=inputs,
outputs=outputs,
cmd=cmd)
# Write out rules file.
rules_file.WriteIfChanged()
# Add rules file to project.
p.AddToolFile(rules_filename)
def _Cygwinify(path):
path = path.replace('$(OutDir)', '$(OutDirCygwin)')
path = path.replace('$(IntDir)', '$(IntDirCygwin)')
return path
def _GenerateExternalRules(rules, output_dir, spec,
sources, options, actions_to_add):
"""Generate an external makefile to do a set of rules.
Arguments:
rules: the list of rules to include
output_dir: path containing project and gyp files
spec: project specification data
sources: set of sources known
options: global generator options
actions_to_add: The list of actions we will add to.
"""
filename = '%s_rules%s.mk' % (spec['target_name'], options.suffix)
mk_file = gyp.common.WriteOnDiff(os.path.join(output_dir, filename))
# Find cygwin style versions of some paths.
mk_file.write('OutDirCygwin:=$(shell cygpath -u "$(OutDir)")\n')
mk_file.write('IntDirCygwin:=$(shell cygpath -u "$(IntDir)")\n')
# Gather stuff needed to emit all: target.
all_inputs = OrderedSet()
all_outputs = OrderedSet()
all_output_dirs = OrderedSet()
first_outputs = []
for rule in rules:
trigger_files = _FindRuleTriggerFiles(rule, sources)
for tf in trigger_files:
inputs, outputs = _RuleInputsAndOutputs(rule, tf)
all_inputs.update(OrderedSet(inputs))
all_outputs.update(OrderedSet(outputs))
# Only use one target from each rule as the dependency for
# 'all' so we don't try to build each rule multiple times.
first_outputs.append(list(outputs)[0])
# Get the unique output directories for this rule.
output_dirs = [os.path.split(i)[0] for i in outputs]
for od in output_dirs:
all_output_dirs.add(od)
first_outputs_cyg = [_Cygwinify(i) for i in first_outputs]
# Write out all: target, including mkdir for each output directory.
mk_file.write('all: %s\n' % ' '.join(first_outputs_cyg))
for od in all_output_dirs:
if od:
mk_file.write('\tmkdir -p `cygpath -u "%s"`\n' % od)
mk_file.write('\n')
# Define how each output is generated.
for rule in rules:
trigger_files = _FindRuleTriggerFiles(rule, sources)
for tf in trigger_files:
# Get all the inputs and outputs for this rule for this trigger file.
inputs, outputs = _RuleInputsAndOutputs(rule, tf)
inputs = [_Cygwinify(i) for i in inputs]
outputs = [_Cygwinify(i) for i in outputs]
# Prepare the command line for this rule.
cmd = [_RuleExpandPath(c, tf) for c in rule['action']]
cmd = ['"%s"' % i for i in cmd]
cmd = ' '.join(cmd)
# Add it to the makefile.
mk_file.write('%s: %s\n' % (' '.join(outputs), ' '.join(inputs)))
mk_file.write('\t%s\n\n' % cmd)
# Close up the file.
mk_file.close()
# Add makefile to list of sources.
sources.add(filename)
# Add a build action to call makefile.
cmd = ['make',
'OutDir=$(OutDir)',
'IntDir=$(IntDir)',
'-j', '${NUMBER_OF_PROCESSORS_PLUS_1}',
'-f', filename]
cmd = _BuildCommandLineForRuleRaw(spec, cmd, True, False, True, True)
# Insert makefile as 0'th input, so it gets the action attached there,
# as this is easier to understand from in the IDE.
all_inputs = list(all_inputs)
all_inputs.insert(0, filename)
_AddActionStep(actions_to_add,
inputs=_FixPaths(all_inputs),
outputs=_FixPaths(all_outputs),
description='Running external rules for %s' %
spec['target_name'],
command=cmd)
def _EscapeEnvironmentVariableExpansion(s):
"""Escapes % characters.
Escapes any % characters so that Windows-style environment variable
expansions will leave them alone.
See http://connect.microsoft.com/VisualStudio/feedback/details/106127/cl-d-name-text-containing-percentage-characters-doesnt-compile
to understand why we have to do this.
Args:
s: The string to be escaped.
Returns:
The escaped string.
"""
s = s.replace('%', '%%')
return s
quote_replacer_regex = re.compile(r'(\\*)"')
def _EscapeCommandLineArgumentForMSVS(s):
"""Escapes a Windows command-line argument.
So that the Win32 CommandLineToArgv function will turn the escaped result back
into the original string.
See http://msdn.microsoft.com/en-us/library/17w5ykft.aspx
("Parsing C++ Command-Line Arguments") to understand why we have to do
this.
Args:
s: the string to be escaped.
Returns:
the escaped string.
"""
def _Replace(match):
# For a literal quote, CommandLineToArgv requires an odd number of
# backslashes preceding it, and it produces half as many literal backslashes
# (rounded down). So we need to produce 2n+1 backslashes.
return 2 * match.group(1) + '\\"'
# Escape all quotes so that they are interpreted literally.
s = quote_replacer_regex.sub(_Replace, s)
# Now add unescaped quotes so that any whitespace is interpreted literally.
s = '"' + s + '"'
return s
delimiters_replacer_regex = re.compile(r'(\\*)([,;]+)')
def _EscapeVCProjCommandLineArgListItem(s):
"""Escapes command line arguments for MSVS.
The VCProj format stores string lists in a single string using commas and
semi-colons as separators, which must be quoted if they are to be
interpreted literally. However, command-line arguments may already have
quotes, and the VCProj parser is ignorant of the backslash escaping
convention used by CommandLineToArgv, so the command-line quotes and the
VCProj quotes may not be the same quotes. So to store a general
command-line argument in a VCProj list, we need to parse the existing
quoting according to VCProj's convention and quote any delimiters that are
not already quoted by that convention. The quotes that we add will also be
seen by CommandLineToArgv, so if backslashes precede them then we also have
to escape those backslashes according to the CommandLineToArgv
convention.
Args:
s: the string to be escaped.
Returns:
the escaped string.
"""
def _Replace(match):
# For a non-literal quote, CommandLineToArgv requires an even number of
# backslashes preceding it, and it produces half as many literal
# backslashes. So we need to produce 2n backslashes.
return 2 * match.group(1) + '"' + match.group(2) + '"'
segments = s.split('"')
# The unquoted segments are at the even-numbered indices.
for i in range(0, len(segments), 2):
segments[i] = delimiters_replacer_regex.sub(_Replace, segments[i])
# Concatenate back into a single string
s = '"'.join(segments)
if len(segments) % 2 == 0:
# String ends while still quoted according to VCProj's convention. This
# means the delimiter and the next list item that follow this one in the
# .vcproj file will be misinterpreted as part of this item. There is nothing
# we can do about this. Adding an extra quote would correct the problem in
# the VCProj but cause the same problem on the final command-line. Moving
# the item to the end of the list does works, but that's only possible if
# there's only one such item. Let's just warn the user.
print >> sys.stderr, ('Warning: MSVS may misinterpret the odd number of ' +
'quotes in ' + s)
return s
def _EscapeCppDefineForMSVS(s):
"""Escapes a CPP define so that it will reach the compiler unaltered."""
s = _EscapeEnvironmentVariableExpansion(s)
s = _EscapeCommandLineArgumentForMSVS(s)
s = _EscapeVCProjCommandLineArgListItem(s)
# cl.exe replaces literal # characters with = in preprocesor definitions for
# some reason. Octal-encode to work around that.
s = s.replace('#', '\\%03o' % ord('#'))
return s
quote_replacer_regex2 = re.compile(r'(\\+)"')
def _EscapeCommandLineArgumentForMSBuild(s):
"""Escapes a Windows command-line argument for use by MSBuild."""
def _Replace(match):
return (len(match.group(1)) / 2 * 4) * '\\' + '\\"'
# Escape all quotes so that they are interpreted literally.
s = quote_replacer_regex2.sub(_Replace, s)
return s
def _EscapeMSBuildSpecialCharacters(s):
escape_dictionary = {
'%': '%25',
'$': '%24',
'@': '%40',
"'": '%27',
';': '%3B',
'?': '%3F',
'*': '%2A'
}
result = ''.join([escape_dictionary.get(c, c) for c in s])
return result
def _EscapeCppDefineForMSBuild(s):
"""Escapes a CPP define so that it will reach the compiler unaltered."""
s = _EscapeEnvironmentVariableExpansion(s)
s = _EscapeCommandLineArgumentForMSBuild(s)
s = _EscapeMSBuildSpecialCharacters(s)
# cl.exe replaces literal # characters with = in preprocesor definitions for
# some reason. Octal-encode to work around that.
s = s.replace('#', '\\%03o' % ord('#'))
return s
def _GenerateRulesForMSVS(p, output_dir, options, spec,
sources, excluded_sources,
actions_to_add):
"""Generate all the rules for a particular project.
Arguments:
p: the project
output_dir: directory to emit rules to
options: global options passed to the generator
spec: the specification for this project
sources: the set of all known source files in this project
excluded_sources: the set of sources excluded from normal processing
actions_to_add: deferred list of actions to add in
"""
rules = spec.get('rules', [])
rules_native = [r for r in rules if not int(r.get('msvs_external_rule', 0))]
rules_external = [r for r in rules if int(r.get('msvs_external_rule', 0))]
# Handle rules that use a native rules file.
if rules_native:
_GenerateNativeRulesForMSVS(p, rules_native, output_dir, spec, options)
# Handle external rules (non-native rules).
if rules_external:
_GenerateExternalRules(rules_external, output_dir, spec,
sources, options, actions_to_add)
_AdjustSourcesForRules(rules, sources, excluded_sources, False)
def _AdjustSourcesForRules(rules, sources, excluded_sources, is_msbuild):
# Add outputs generated by each rule (if applicable).
for rule in rules:
# Add in the outputs from this rule.
trigger_files = _FindRuleTriggerFiles(rule, sources)
for trigger_file in trigger_files:
# Remove trigger_file from excluded_sources to let the rule be triggered
# (e.g. rule trigger ax_enums.idl is added to excluded_sources
# because it's also in an action's inputs in the same project)
excluded_sources.discard(_FixPath(trigger_file))
# Done if not processing outputs as sources.
if int(rule.get('process_outputs_as_sources', False)):
inputs, outputs = _RuleInputsAndOutputs(rule, trigger_file)
inputs = OrderedSet(_FixPaths(inputs))
outputs = OrderedSet(_FixPaths(outputs))
inputs.remove(_FixPath(trigger_file))
sources.update(inputs)
if not is_msbuild:
excluded_sources.update(inputs)
sources.update(outputs)
def _FilterActionsFromExcluded(excluded_sources, actions_to_add):
"""Take inputs with actions attached out of the list of exclusions.
Arguments:
excluded_sources: list of source files not to be built.
actions_to_add: dict of actions keyed on source file they're attached to.
Returns:
excluded_sources with files that have actions attached removed.
"""
must_keep = OrderedSet(_FixPaths(actions_to_add.keys()))
return [s for s in excluded_sources if s not in must_keep]
def _GetDefaultConfiguration(spec):
return spec['configurations'][spec['default_configuration']]
def _GetGuidOfProject(proj_path, spec):
"""Get the guid for the project.
Arguments:
proj_path: Path of the vcproj or vcxproj file to generate.
spec: The target dictionary containing the properties of the target.
Returns:
the guid.
Raises:
ValueError: if the specified GUID is invalid.
"""
# Pluck out the default configuration.
default_config = _GetDefaultConfiguration(spec)
# Decide the guid of the project.
guid = default_config.get('msvs_guid')
if guid:
if VALID_MSVS_GUID_CHARS.match(guid) is None:
raise ValueError('Invalid MSVS guid: "%s". Must match regex: "%s".' %
(guid, VALID_MSVS_GUID_CHARS.pattern))
guid = '{%s}' % guid
guid = guid or MSVSNew.MakeGuid(proj_path)
return guid
def _GetMsbuildToolsetOfProject(proj_path, spec, version):
"""Get the platform toolset for the project.
Arguments:
proj_path: Path of the vcproj or vcxproj file to generate.
spec: The target dictionary containing the properties of the target.
version: The MSVSVersion object.
Returns:
the platform toolset string or None.
"""
# Pluck out the default configuration.
default_config = _GetDefaultConfiguration(spec)
toolset = default_config.get('msbuild_toolset')
if not toolset and version.DefaultToolset():
toolset = version.DefaultToolset()
return toolset
def _GenerateProject(project, options, version, generator_flags):
"""Generates a vcproj file.
Arguments:
project: the MSVSProject object.
options: global generator options.
version: the MSVSVersion object.
generator_flags: dict of generator-specific flags.
Returns:
A list of source files that cannot be found on disk.
"""
default_config = _GetDefaultConfiguration(project.spec)
# Skip emitting anything if told to with msvs_existing_vcproj option.
if default_config.get('msvs_existing_vcproj'):
return []
if version.UsesVcxproj():
return _GenerateMSBuildProject(project, options, version, generator_flags)
else:
return _GenerateMSVSProject(project, options, version, generator_flags)
# TODO: Avoid code duplication with _ValidateSourcesForOSX in make.py.
def _ValidateSourcesForMSVSProject(spec, version):
"""Makes sure if duplicate basenames are not specified in the source list.
Arguments:
spec: The target dictionary containing the properties of the target.
version: The VisualStudioVersion object.
"""
# This validation should not be applied to MSVC2010 and later.
assert not version.UsesVcxproj()
# TODO: Check if MSVC allows this for loadable_module targets.
if spec.get('type', None) not in ('static_library', 'shared_library'):
return
sources = spec.get('sources', [])
basenames = {}
for source in sources:
name, ext = os.path.splitext(source)
is_compiled_file = ext in [
'.c', '.cc', '.cpp', '.cxx', '.m', '.mm', '.s', '.S']
if not is_compiled_file:
continue
basename = os.path.basename(name) # Don't include extension.
basenames.setdefault(basename, []).append(source)
error = ''
for basename, files in basenames.iteritems():
if len(files) > 1:
error += ' %s: %s\n' % (basename, ' '.join(files))
if error:
print('static library %s has several files with the same basename:\n' %
spec['target_name'] + error + 'MSVC08 cannot handle that.')
raise GypError('Duplicate basenames in sources section, see list above')
def _GenerateMSVSProject(project, options, version, generator_flags):
"""Generates a .vcproj file. It may create .rules and .user files too.
Arguments:
project: The project object we will generate the file for.
options: Global options passed to the generator.
version: The VisualStudioVersion object.
generator_flags: dict of generator-specific flags.
"""
spec = project.spec
gyp.common.EnsureDirExists(project.path)
platforms = _GetUniquePlatforms(spec)
p = MSVSProject.Writer(project.path, version, spec['target_name'],
project.guid, platforms)
# Get directory project file is in.
project_dir = os.path.split(project.path)[0]
gyp_path = _NormalizedSource(project.build_file)
relative_path_of_gyp_file = gyp.common.RelativePath(gyp_path, project_dir)
config_type = _GetMSVSConfigurationType(spec, project.build_file)
for config_name, config in spec['configurations'].iteritems():
_AddConfigurationToMSVSProject(p, spec, config_type, config_name, config)
# MSVC08 and prior version cannot handle duplicate basenames in the same
# target.
# TODO: Take excluded sources into consideration if possible.
_ValidateSourcesForMSVSProject(spec, version)
# Prepare list of sources and excluded sources.
gyp_file = os.path.split(project.build_file)[1]
sources, excluded_sources = _PrepareListOfSources(spec, generator_flags,
gyp_file)
# Add rules.
actions_to_add = {}
_GenerateRulesForMSVS(p, project_dir, options, spec,
sources, excluded_sources,
actions_to_add)
list_excluded = generator_flags.get('msvs_list_excluded_files', True)
sources, excluded_sources, excluded_idl = (
_AdjustSourcesAndConvertToFilterHierarchy(spec, options, project_dir,
sources, excluded_sources,
list_excluded, version))
# Add in files.
missing_sources = _VerifySourcesExist(sources, project_dir)
p.AddFiles(sources)
_AddToolFilesToMSVS(p, spec)
_HandlePreCompiledHeaders(p, sources, spec)
_AddActions(actions_to_add, spec, relative_path_of_gyp_file)
_AddCopies(actions_to_add, spec)
_WriteMSVSUserFile(project.path, version, spec)
# NOTE: this stanza must appear after all actions have been decided.
# Don't excluded sources with actions attached, or they won't run.
excluded_sources = _FilterActionsFromExcluded(
excluded_sources, actions_to_add)
_ExcludeFilesFromBeingBuilt(p, spec, excluded_sources, excluded_idl,
list_excluded)
_AddAccumulatedActionsToMSVS(p, spec, actions_to_add)
# Write it out.
p.WriteIfChanged()
return missing_sources
def _GetUniquePlatforms(spec):
"""Returns the list of unique platforms for this spec, e.g ['win32', ...].
Arguments:
spec: The target dictionary containing the properties of the target.
Returns:
The MSVSUserFile object created.
"""
# Gather list of unique platforms.
platforms = OrderedSet()
for configuration in spec['configurations']:
platforms.add(_ConfigPlatform(spec['configurations'][configuration]))
platforms = list(platforms)
return platforms
def _CreateMSVSUserFile(proj_path, version, spec):
"""Generates a .user file for the user running this Gyp program.
Arguments:
proj_path: The path of the project file being created. The .user file
shares the same path (with an appropriate suffix).
version: The VisualStudioVersion object.
spec: The target dictionary containing the properties of the target.
Returns:
The MSVSUserFile object created.
"""
(domain, username) = _GetDomainAndUserName()
vcuser_filename = '.'.join([proj_path, domain, username, 'user'])
user_file = MSVSUserFile.Writer(vcuser_filename, version,
spec['target_name'])
return user_file
def _GetMSVSConfigurationType(spec, build_file):
"""Returns the configuration type for this project.
It's a number defined by Microsoft. May raise an exception.
Args:
spec: The target dictionary containing the properties of the target.
build_file: The path of the gyp file.
Returns:
An integer, the configuration type.
"""
try:
config_type = {
'executable': '1', # .exe
'shared_library': '2', # .dll
'loadable_module': '2', # .dll
'static_library': '4', # .lib
'none': '10', # Utility type
}[spec['type']]
except KeyError:
if spec.get('type'):
raise GypError('Target type %s is not a valid target type for '
'target %s in %s.' %
(spec['type'], spec['target_name'], build_file))
else:
raise GypError('Missing type field for target %s in %s.' %
(spec['target_name'], build_file))
return config_type
def _AddConfigurationToMSVSProject(p, spec, config_type, config_name, config):
"""Adds a configuration to the MSVS project.
Many settings in a vcproj file are specific to a configuration. This
function the main part of the vcproj file that's configuration specific.
Arguments:
p: The target project being generated.
spec: The target dictionary containing the properties of the target.
config_type: The configuration type, a number as defined by Microsoft.
config_name: The name of the configuration.
config: The dictionary that defines the special processing to be done
for this configuration.
"""
# Get the information for this configuration
include_dirs, midl_include_dirs, resource_include_dirs = \
_GetIncludeDirs(config)
libraries = _GetLibraries(spec)
library_dirs = _GetLibraryDirs(config)
out_file, vc_tool, _ = _GetOutputFilePathAndTool(spec, msbuild=False)
defines = _GetDefines(config)
defines = [_EscapeCppDefineForMSVS(d) for d in defines]
disabled_warnings = _GetDisabledWarnings(config)
prebuild = config.get('msvs_prebuild')
postbuild = config.get('msvs_postbuild')
def_file = _GetModuleDefinition(spec)
precompiled_header = config.get('msvs_precompiled_header')
# Prepare the list of tools as a dictionary.
tools = dict()
# Add in user specified msvs_settings.
msvs_settings = config.get('msvs_settings', {})
MSVSSettings.ValidateMSVSSettings(msvs_settings)
# Prevent default library inheritance from the environment.
_ToolAppend(tools, 'VCLinkerTool', 'AdditionalDependencies', ['$(NOINHERIT)'])
for tool in msvs_settings:
settings = config['msvs_settings'][tool]
for setting in settings:
_ToolAppend(tools, tool, setting, settings[setting])
# Add the information to the appropriate tool
_ToolAppend(tools, 'VCCLCompilerTool',
'AdditionalIncludeDirectories', include_dirs)
_ToolAppend(tools, 'VCMIDLTool',
'AdditionalIncludeDirectories', midl_include_dirs)
_ToolAppend(tools, 'VCResourceCompilerTool',
'AdditionalIncludeDirectories', resource_include_dirs)
# Add in libraries.
_ToolAppend(tools, 'VCLinkerTool', 'AdditionalDependencies', libraries)
_ToolAppend(tools, 'VCLinkerTool', 'AdditionalLibraryDirectories',
library_dirs)
if out_file:
_ToolAppend(tools, vc_tool, 'OutputFile', out_file, only_if_unset=True)
# Add defines.
_ToolAppend(tools, 'VCCLCompilerTool', 'PreprocessorDefinitions', defines)
_ToolAppend(tools, 'VCResourceCompilerTool', 'PreprocessorDefinitions',
defines)
# Change program database directory to prevent collisions.
_ToolAppend(tools, 'VCCLCompilerTool', 'ProgramDataBaseFileName',
'$(IntDir)$(ProjectName)\\vc80.pdb', only_if_unset=True)
# Add disabled warnings.
_ToolAppend(tools, 'VCCLCompilerTool',
'DisableSpecificWarnings', disabled_warnings)
# Add Pre-build.
_ToolAppend(tools, 'VCPreBuildEventTool', 'CommandLine', prebuild)
# Add Post-build.
_ToolAppend(tools, 'VCPostBuildEventTool', 'CommandLine', postbuild)
# Turn on precompiled headers if appropriate.
if precompiled_header:
precompiled_header = os.path.split(precompiled_header)[1]
_ToolAppend(tools, 'VCCLCompilerTool', 'UsePrecompiledHeader', '2')
_ToolAppend(tools, 'VCCLCompilerTool',
'PrecompiledHeaderThrough', precompiled_header)
_ToolAppend(tools, 'VCCLCompilerTool',
'ForcedIncludeFiles', precompiled_header)
# Loadable modules don't generate import libraries;
# tell dependent projects to not expect one.
if spec['type'] == 'loadable_module':
_ToolAppend(tools, 'VCLinkerTool', 'IgnoreImportLibrary', 'true')
# Set the module definition file if any.
if def_file:
_ToolAppend(tools, 'VCLinkerTool', 'ModuleDefinitionFile', def_file)
_AddConfigurationToMSVS(p, spec, tools, config, config_type, config_name)
def _GetIncludeDirs(config):
"""Returns the list of directories to be used for #include directives.
Arguments:
config: The dictionary that defines the special processing to be done
for this configuration.
Returns:
The list of directory paths.
"""
# TODO(bradnelson): include_dirs should really be flexible enough not to
# require this sort of thing.
include_dirs = (
config.get('include_dirs', []) +
config.get('msvs_system_include_dirs', []))
midl_include_dirs = (
config.get('midl_include_dirs', []) +
config.get('msvs_system_include_dirs', []))
resource_include_dirs = config.get('resource_include_dirs', include_dirs)
include_dirs = _FixPaths(include_dirs)
midl_include_dirs = _FixPaths(midl_include_dirs)
resource_include_dirs = _FixPaths(resource_include_dirs)
return include_dirs, midl_include_dirs, resource_include_dirs
def _GetLibraryDirs(config):
"""Returns the list of directories to be used for library search paths.
Arguments:
config: The dictionary that defines the special processing to be done
for this configuration.
Returns:
The list of directory paths.
"""
library_dirs = config.get('library_dirs', [])
library_dirs = _FixPaths(library_dirs)
return library_dirs
def _GetLibraries(spec):
"""Returns the list of libraries for this configuration.
Arguments:
spec: The target dictionary containing the properties of the target.
Returns:
The list of directory paths.
"""
libraries = spec.get('libraries', [])
# Strip out -l, as it is not used on windows (but is needed so we can pass
# in libraries that are assumed to be in the default library path).
# Also remove duplicate entries, leaving only the last duplicate, while
# preserving order.
found = OrderedSet()
unique_libraries_list = []
for entry in reversed(libraries):
library = re.sub(r'^\-l', '', entry)
if not os.path.splitext(library)[1]:
library += '.lib'
if library not in found:
found.add(library)
unique_libraries_list.append(library)
unique_libraries_list.reverse()
return unique_libraries_list
def _GetOutputFilePathAndTool(spec, msbuild):
"""Returns the path and tool to use for this target.
Figures out the path of the file this spec will create and the name of
the VC tool that will create it.
Arguments:
spec: The target dictionary containing the properties of the target.
Returns:
A triple of (file path, name of the vc tool, name of the msbuild tool)
"""
# Select a name for the output file.
out_file = ''
vc_tool = ''
msbuild_tool = ''
output_file_map = {
'executable': ('VCLinkerTool', 'Link', '$(OutDir)', '.exe'),
'shared_library': ('VCLinkerTool', 'Link', '$(OutDir)', '.dll'),
'loadable_module': ('VCLinkerTool', 'Link', '$(OutDir)', '.dll'),
'static_library': ('VCLibrarianTool', 'Lib', '$(OutDir)lib\\', '.lib'),
}
output_file_props = output_file_map.get(spec['type'])
if output_file_props and int(spec.get('msvs_auto_output_file', 1)):
vc_tool, msbuild_tool, out_dir, suffix = output_file_props
if spec.get('standalone_static_library', 0):
out_dir = '$(OutDir)'
out_dir = spec.get('product_dir', out_dir)
product_extension = spec.get('product_extension')
if product_extension:
suffix = '.' + product_extension
elif msbuild:
suffix = '$(TargetExt)'
prefix = spec.get('product_prefix', '')
product_name = spec.get('product_name', '$(ProjectName)')
out_file = ntpath.join(out_dir, prefix + product_name + suffix)
return out_file, vc_tool, msbuild_tool
def _GetOutputTargetExt(spec):
"""Returns the extension for this target, including the dot
If product_extension is specified, set target_extension to this to avoid
MSB8012, returns None otherwise. Ignores any target_extension settings in
the input files.
Arguments:
spec: The target dictionary containing the properties of the target.
Returns:
A string with the extension, or None
"""
target_extension = spec.get('product_extension')
if target_extension:
return '.' + target_extension
return None
def _GetDefines(config):
"""Returns the list of preprocessor definitions for this configuation.
Arguments:
config: The dictionary that defines the special processing to be done
for this configuration.
Returns:
The list of preprocessor definitions.
"""
defines = []
for d in config.get('defines', []):
if type(d) == list:
fd = '='.join([str(dpart) for dpart in d])
else:
fd = str(d)
defines.append(fd)
return defines
def _GetDisabledWarnings(config):
return [str(i) for i in config.get('msvs_disabled_warnings', [])]
def _GetModuleDefinition(spec):
def_file = ''
if spec['type'] in ['shared_library', 'loadable_module', 'executable']:
def_files = [s for s in spec.get('sources', []) if s.endswith('.def')]
if len(def_files) == 1:
def_file = _FixPath(def_files[0])
elif def_files:
raise ValueError(
'Multiple module definition files in one target, target %s lists '
'multiple .def files: %s' % (
spec['target_name'], ' '.join(def_files)))
return def_file
def _ConvertToolsToExpectedForm(tools):
"""Convert tools to a form expected by Visual Studio.
Arguments:
tools: A dictionary of settings; the tool name is the key.
Returns:
A list of Tool objects.
"""
tool_list = []
for tool, settings in tools.iteritems():
# Collapse settings with lists.
settings_fixed = {}
for setting, value in settings.iteritems():
if type(value) == list:
if ((tool == 'VCLinkerTool' and
setting == 'AdditionalDependencies') or
setting == 'AdditionalOptions'):
settings_fixed[setting] = ' '.join(value)
else:
settings_fixed[setting] = ';'.join(value)
else:
settings_fixed[setting] = value
# Add in this tool.
tool_list.append(MSVSProject.Tool(tool, settings_fixed))
return tool_list
def _AddConfigurationToMSVS(p, spec, tools, config, config_type, config_name):
"""Add to the project file the configuration specified by config.
Arguments:
p: The target project being generated.
spec: the target project dict.
tools: A dictionary of settings; the tool name is the key.
config: The dictionary that defines the special processing to be done
for this configuration.
config_type: The configuration type, a number as defined by Microsoft.
config_name: The name of the configuration.
"""
attributes = _GetMSVSAttributes(spec, config, config_type)
# Add in this configuration.
tool_list = _ConvertToolsToExpectedForm(tools)
p.AddConfig(_ConfigFullName(config_name, config),
attrs=attributes, tools=tool_list)
def _GetMSVSAttributes(spec, config, config_type):
# Prepare configuration attributes.
prepared_attrs = {}
source_attrs = config.get('msvs_configuration_attributes', {})
for a in source_attrs:
prepared_attrs[a] = source_attrs[a]
# Add props files.
vsprops_dirs = config.get('msvs_props', [])
vsprops_dirs = _FixPaths(vsprops_dirs)
if vsprops_dirs:
prepared_attrs['InheritedPropertySheets'] = ';'.join(vsprops_dirs)
# Set configuration type.
prepared_attrs['ConfigurationType'] = config_type
output_dir = prepared_attrs.get('OutputDirectory',
'$(SolutionDir)$(ConfigurationName)')
prepared_attrs['OutputDirectory'] = _FixPath(output_dir) + '\\'
if 'IntermediateDirectory' not in prepared_attrs:
intermediate = '$(ConfigurationName)\\obj\\$(ProjectName)'
prepared_attrs['IntermediateDirectory'] = _FixPath(intermediate) + '\\'
else:
intermediate = _FixPath(prepared_attrs['IntermediateDirectory']) + '\\'
intermediate = MSVSSettings.FixVCMacroSlashes(intermediate)
prepared_attrs['IntermediateDirectory'] = intermediate
return prepared_attrs
def _AddNormalizedSources(sources_set, sources_array):
sources_set.update(_NormalizedSource(s) for s in sources_array)
def _PrepareListOfSources(spec, generator_flags, gyp_file):
"""Prepare list of sources and excluded sources.
Besides the sources specified directly in the spec, adds the gyp file so
that a change to it will cause a re-compile. Also adds appropriate sources
for actions and copies. Assumes later stage will un-exclude files which
have custom build steps attached.
Arguments:
spec: The target dictionary containing the properties of the target.
gyp_file: The name of the gyp file.
Returns:
A pair of (list of sources, list of excluded sources).
The sources will be relative to the gyp file.
"""
sources = OrderedSet()
_AddNormalizedSources(sources, spec.get('sources', []))
excluded_sources = OrderedSet()
# Add in the gyp file.
if not generator_flags.get('standalone'):
sources.add(gyp_file)
# Add in 'action' inputs and outputs.
for a in spec.get('actions', []):
inputs = a['inputs']
inputs = [_NormalizedSource(i) for i in inputs]
# Add all inputs to sources and excluded sources.
inputs = OrderedSet(inputs)
sources.update(inputs)
if not spec.get('msvs_external_builder'):
excluded_sources.update(inputs)
if int(a.get('process_outputs_as_sources', False)):
_AddNormalizedSources(sources, a.get('outputs', []))
# Add in 'copies' inputs and outputs.
for cpy in spec.get('copies', []):
_AddNormalizedSources(sources, cpy.get('files', []))
return (sources, excluded_sources)
def _AdjustSourcesAndConvertToFilterHierarchy(
spec, options, gyp_dir, sources, excluded_sources, list_excluded, version):
"""Adjusts the list of sources and excluded sources.
Also converts the sets to lists.
Arguments:
spec: The target dictionary containing the properties of the target.
options: Global generator options.
gyp_dir: The path to the gyp file being processed.
sources: A set of sources to be included for this project.
excluded_sources: A set of sources to be excluded for this project.
version: A MSVSVersion object.
Returns:
A trio of (list of sources, list of excluded sources,
path of excluded IDL file)
"""
# Exclude excluded sources coming into the generator.
excluded_sources.update(OrderedSet(spec.get('sources_excluded', [])))
# Add excluded sources into sources for good measure.
sources.update(excluded_sources)
# Convert to proper windows form.
# NOTE: sources goes from being a set to a list here.
# NOTE: excluded_sources goes from being a set to a list here.
sources = _FixPaths(sources)
# Convert to proper windows form.
excluded_sources = _FixPaths(excluded_sources)
excluded_idl = _IdlFilesHandledNonNatively(spec, sources)
precompiled_related = _GetPrecompileRelatedFiles(spec)
# Find the excluded ones, minus the precompiled header related ones.
fully_excluded = [i for i in excluded_sources if i not in precompiled_related]
# Convert to folders and the right slashes.
sources = [i.split('\\') for i in sources]
sources = _ConvertSourcesToFilterHierarchy(sources, excluded=fully_excluded,
list_excluded=list_excluded,
msvs_version=version)
# Prune filters with a single child to flatten ugly directory structures
# such as ../../src/modules/module1 etc.
if version.UsesVcxproj():
while all([isinstance(s, MSVSProject.Filter) for s in sources]) \
and len(set([s.name for s in sources])) == 1:
assert all([len(s.contents) == 1 for s in sources])
sources = [s.contents[0] for s in sources]
else:
while len(sources) == 1 and isinstance(sources[0], MSVSProject.Filter):
sources = sources[0].contents
return sources, excluded_sources, excluded_idl
def _IdlFilesHandledNonNatively(spec, sources):
# If any non-native rules use 'idl' as an extension exclude idl files.
# Gather a list here to use later.
using_idl = False
for rule in spec.get('rules', []):
if rule['extension'] == 'idl' and int(rule.get('msvs_external_rule', 0)):
using_idl = True
break
if using_idl:
excluded_idl = [i for i in sources if i.endswith('.idl')]
else:
excluded_idl = []
return excluded_idl
def _GetPrecompileRelatedFiles(spec):
# Gather a list of precompiled header related sources.
precompiled_related = []
for _, config in spec['configurations'].iteritems():
for k in precomp_keys:
f = config.get(k)
if f:
precompiled_related.append(_FixPath(f))
return precompiled_related
def _ExcludeFilesFromBeingBuilt(p, spec, excluded_sources, excluded_idl,
list_excluded):
exclusions = _GetExcludedFilesFromBuild(spec, excluded_sources, excluded_idl)
for file_name, excluded_configs in exclusions.iteritems():
if (not list_excluded and
len(excluded_configs) == len(spec['configurations'])):
# If we're not listing excluded files, then they won't appear in the
# project, so don't try to configure them to be excluded.
pass
else:
for config_name, config in excluded_configs:
p.AddFileConfig(file_name, _ConfigFullName(config_name, config),
{'ExcludedFromBuild': 'true'})
def _GetExcludedFilesFromBuild(spec, excluded_sources, excluded_idl):
exclusions = {}
# Exclude excluded sources from being built.
for f in excluded_sources:
excluded_configs = []
for config_name, config in spec['configurations'].iteritems():
precomped = [_FixPath(config.get(i, '')) for i in precomp_keys]
# Don't do this for ones that are precompiled header related.
if f not in precomped:
excluded_configs.append((config_name, config))
exclusions[f] = excluded_configs
# If any non-native rules use 'idl' as an extension exclude idl files.
# Exclude them now.
for f in excluded_idl:
excluded_configs = []
for config_name, config in spec['configurations'].iteritems():
excluded_configs.append((config_name, config))
exclusions[f] = excluded_configs
return exclusions
def _AddToolFilesToMSVS(p, spec):
# Add in tool files (rules).
tool_files = OrderedSet()
for _, config in spec['configurations'].iteritems():
for f in config.get('msvs_tool_files', []):
tool_files.add(f)
for f in tool_files:
p.AddToolFile(f)
def _HandlePreCompiledHeaders(p, sources, spec):
# Pre-compiled header source stubs need a different compiler flag
# (generate precompiled header) and any source file not of the same
# kind (i.e. C vs. C++) as the precompiled header source stub needs
# to have use of precompiled headers disabled.
extensions_excluded_from_precompile = []
for config_name, config in spec['configurations'].iteritems():
source = config.get('msvs_precompiled_source')
if source:
source = _FixPath(source)
# UsePrecompiledHeader=1 for if using precompiled headers.
tool = MSVSProject.Tool('VCCLCompilerTool',
{'UsePrecompiledHeader': '1'})
p.AddFileConfig(source, _ConfigFullName(config_name, config),
{}, tools=[tool])
basename, extension = os.path.splitext(source)
if extension == '.c':
extensions_excluded_from_precompile = ['.cc', '.cpp', '.cxx']
else:
extensions_excluded_from_precompile = ['.c']
def DisableForSourceTree(source_tree):
for source in source_tree:
if isinstance(source, MSVSProject.Filter):
DisableForSourceTree(source.contents)
else:
basename, extension = os.path.splitext(source)
if extension in extensions_excluded_from_precompile:
for config_name, config in spec['configurations'].iteritems():
tool = MSVSProject.Tool('VCCLCompilerTool',
{'UsePrecompiledHeader': '0',
'ForcedIncludeFiles': '$(NOINHERIT)'})
p.AddFileConfig(_FixPath(source),
_ConfigFullName(config_name, config),
{}, tools=[tool])
# Do nothing if there was no precompiled source.
if extensions_excluded_from_precompile:
DisableForSourceTree(sources)
def _AddActions(actions_to_add, spec, relative_path_of_gyp_file):
# Add actions.
actions = spec.get('actions', [])
# Don't setup_env every time. When all the actions are run together in one
# batch file in VS, the PATH will grow too long.
# Membership in this set means that the cygwin environment has been set up,
# and does not need to be set up again.
have_setup_env = set()
for a in actions:
# Attach actions to the gyp file if nothing else is there.
inputs = a.get('inputs') or [relative_path_of_gyp_file]
attached_to = inputs[0]
need_setup_env = attached_to not in have_setup_env
cmd = _BuildCommandLineForRule(spec, a, has_input_path=False,
do_setup_env=need_setup_env)
have_setup_env.add(attached_to)
# Add the action.
_AddActionStep(actions_to_add,
inputs=inputs,
outputs=a.get('outputs', []),
description=a.get('message', a['action_name']),
command=cmd)
def _WriteMSVSUserFile(project_path, version, spec):
# Add run_as and test targets.
if 'run_as' in spec:
run_as = spec['run_as']
action = run_as.get('action', [])
environment = run_as.get('environment', [])
working_directory = run_as.get('working_directory', '.')
elif int(spec.get('test', 0)):
action = ['$(TargetPath)', '--gtest_print_time']
environment = []
working_directory = '.'
else:
return # Nothing to add
# Write out the user file.
user_file = _CreateMSVSUserFile(project_path, version, spec)
for config_name, c_data in spec['configurations'].iteritems():
user_file.AddDebugSettings(_ConfigFullName(config_name, c_data),
action, environment, working_directory)
user_file.WriteIfChanged()
def _AddCopies(actions_to_add, spec):
copies = _GetCopies(spec)
for inputs, outputs, cmd, description in copies:
_AddActionStep(actions_to_add, inputs=inputs, outputs=outputs,
description=description, command=cmd)
def _GetCopies(spec):
copies = []
# Add copies.
for cpy in spec.get('copies', []):
for src in cpy.get('files', []):
dst = os.path.join(cpy['destination'], os.path.basename(src))
# _AddCustomBuildToolForMSVS() will call _FixPath() on the inputs and
# outputs, so do the same for our generated command line.
if src.endswith('/'):
src_bare = src[:-1]
base_dir = posixpath.split(src_bare)[0]
outer_dir = posixpath.split(src_bare)[1]
cmd = 'cd "%s" && xcopy /e /f /y "%s" "%s\\%s\\"' % (
_FixPath(base_dir), outer_dir, _FixPath(dst), outer_dir)
copies.append(([src], ['dummy_copies', dst], cmd,
'Copying %s to %s' % (src, dst)))
else:
cmd = 'mkdir "%s" 2>nul & set ERRORLEVEL=0 & copy /Y "%s" "%s"' % (
_FixPath(cpy['destination']), _FixPath(src), _FixPath(dst))
copies.append(([src], [dst], cmd, 'Copying %s to %s' % (src, dst)))
return copies
def _GetPathDict(root, path):
# |path| will eventually be empty (in the recursive calls) if it was initially
# relative; otherwise it will eventually end up as '\', 'D:\', etc.
if not path or path.endswith(os.sep):
return root
parent, folder = os.path.split(path)
parent_dict = _GetPathDict(root, parent)
if folder not in parent_dict:
parent_dict[folder] = dict()
return parent_dict[folder]
def _DictsToFolders(base_path, bucket, flat):
# Convert to folders recursively.
children = []
for folder, contents in bucket.iteritems():
if type(contents) == dict:
folder_children = _DictsToFolders(os.path.join(base_path, folder),
contents, flat)
if flat:
children += folder_children
else:
folder_children = MSVSNew.MSVSFolder(os.path.join(base_path, folder),
name='(' + folder + ')',
entries=folder_children)
children.append(folder_children)
else:
children.append(contents)
return children
def _CollapseSingles(parent, node):
# Recursively explorer the tree of dicts looking for projects which are
# the sole item in a folder which has the same name as the project. Bring
# such projects up one level.
if (type(node) == dict and
len(node) == 1 and
node.keys()[0] == parent + '.vcproj'):
return node[node.keys()[0]]
if type(node) != dict:
return node
for child in node:
node[child] = _CollapseSingles(child, node[child])
return node
def _GatherSolutionFolders(sln_projects, project_objects, flat):
root = {}
# Convert into a tree of dicts on path.
for p in sln_projects:
gyp_file, target = gyp.common.ParseQualifiedTarget(p)[0:2]
gyp_dir = os.path.dirname(gyp_file)
path_dict = _GetPathDict(root, gyp_dir)
path_dict[target + '.vcproj'] = project_objects[p]
# Walk down from the top until we hit a folder that has more than one entry.
# In practice, this strips the top-level "src/" dir from the hierarchy in
# the solution.
while len(root) == 1 and type(root[root.keys()[0]]) == dict:
root = root[root.keys()[0]]
# Collapse singles.
root = _CollapseSingles('', root)
# Merge buckets until everything is a root entry.
return _DictsToFolders('', root, flat)
def _GetPathOfProject(qualified_target, spec, options, msvs_version):
default_config = _GetDefaultConfiguration(spec)
proj_filename = default_config.get('msvs_existing_vcproj')
if not proj_filename:
proj_filename = (spec['target_name'] + options.suffix +
msvs_version.ProjectExtension())
build_file = gyp.common.BuildFile(qualified_target)
proj_path = os.path.join(os.path.dirname(build_file), proj_filename)
fix_prefix = None
if options.generator_output:
project_dir_path = os.path.dirname(os.path.abspath(proj_path))
proj_path = os.path.join(options.generator_output, proj_path)
fix_prefix = gyp.common.RelativePath(project_dir_path,
os.path.dirname(proj_path))
return proj_path, fix_prefix
def _GetPlatformOverridesOfProject(spec):
# Prepare a dict indicating which project configurations are used for which
# solution configurations for this target.
config_platform_overrides = {}
for config_name, c in spec['configurations'].iteritems():
config_fullname = _ConfigFullName(config_name, c)
platform = c.get('msvs_target_platform', _ConfigPlatform(c))
fixed_config_fullname = '%s|%s' % (
_ConfigBaseName(config_name, _ConfigPlatform(c)), platform)
config_platform_overrides[config_fullname] = fixed_config_fullname
return config_platform_overrides
def _CreateProjectObjects(target_list, target_dicts, options, msvs_version):
"""Create a MSVSProject object for the targets found in target list.
Arguments:
target_list: the list of targets to generate project objects for.
target_dicts: the dictionary of specifications.
options: global generator options.
msvs_version: the MSVSVersion object.
Returns:
A set of created projects, keyed by target.
"""
global fixpath_prefix
# Generate each project.
projects = {}
for qualified_target in target_list:
spec = target_dicts[qualified_target]
if spec['toolset'] != 'target':
raise GypError(
'Multiple toolsets not supported in msvs build (target %s)' %
qualified_target)
proj_path, fixpath_prefix = _GetPathOfProject(qualified_target, spec,
options, msvs_version)
guid = _GetGuidOfProject(proj_path, spec)
overrides = _GetPlatformOverridesOfProject(spec)
build_file = gyp.common.BuildFile(qualified_target)
# Create object for this project.
obj = MSVSNew.MSVSProject(
proj_path,
name=spec['target_name'],
guid=guid,
spec=spec,
build_file=build_file,
config_platform_overrides=overrides,
fixpath_prefix=fixpath_prefix)
# Set project toolset if any (MS build only)
if msvs_version.UsesVcxproj():
obj.set_msbuild_toolset(
_GetMsbuildToolsetOfProject(proj_path, spec, msvs_version))
projects[qualified_target] = obj
# Set all the dependencies, but not if we are using an external builder like
# ninja
for project in projects.values():
if not project.spec.get('msvs_external_builder'):
deps = project.spec.get('dependencies', [])
deps = [projects[d] for d in deps]
project.set_dependencies(deps)
return projects
def _InitNinjaFlavor(params, target_list, target_dicts):
"""Initialize targets for the ninja flavor.
This sets up the necessary variables in the targets to generate msvs projects
that use ninja as an external builder. The variables in the spec are only set
if they have not been set. This allows individual specs to override the
default values initialized here.
Arguments:
params: Params provided to the generator.
target_list: List of target pairs: 'base/base.gyp:base'.
target_dicts: Dict of target properties keyed on target pair.
"""
for qualified_target in target_list:
spec = target_dicts[qualified_target]
if spec.get('msvs_external_builder'):
# The spec explicitly defined an external builder, so don't change it.
continue
path_to_ninja = spec.get('msvs_path_to_ninja', 'ninja.exe')
spec['msvs_external_builder'] = 'ninja'
if not spec.get('msvs_external_builder_out_dir'):
gyp_file, _, _ = gyp.common.ParseQualifiedTarget(qualified_target)
gyp_dir = os.path.dirname(gyp_file)
configuration = '$(Configuration)'
if params.get('target_arch') == 'x64':
configuration += '_x64'
spec['msvs_external_builder_out_dir'] = os.path.join(
gyp.common.RelativePath(params['options'].toplevel_dir, gyp_dir),
ninja_generator.ComputeOutputDir(params),
configuration)
if not spec.get('msvs_external_builder_build_cmd'):
spec['msvs_external_builder_build_cmd'] = [
path_to_ninja,
'-C',
'$(OutDir)',
'$(ProjectName)',
]
if not spec.get('msvs_external_builder_clean_cmd'):
spec['msvs_external_builder_clean_cmd'] = [
path_to_ninja,
'-C',
'$(OutDir)',
'-tclean',
'$(ProjectName)',
]
def CalculateVariables(default_variables, params):
"""Generated variables that require params to be known."""
generator_flags = params.get('generator_flags', {})
# Select project file format version (if unset, default to auto detecting).
msvs_version = MSVSVersion.SelectVisualStudioVersion(
generator_flags.get('msvs_version', 'auto'))
# Stash msvs_version for later (so we don't have to probe the system twice).
params['msvs_version'] = msvs_version
# Set a variable so conditions can be based on msvs_version.
default_variables['MSVS_VERSION'] = msvs_version.ShortName()
# To determine processor word size on Windows, in addition to checking
# PROCESSOR_ARCHITECTURE (which reflects the word size of the current
# process), it is also necessary to check PROCESSOR_ARCITEW6432 (which
# contains the actual word size of the system when running thru WOW64).
if (os.environ.get('PROCESSOR_ARCHITECTURE', '').find('64') >= 0 or
os.environ.get('PROCESSOR_ARCHITEW6432', '').find('64') >= 0):
default_variables['MSVS_OS_BITS'] = 64
else:
default_variables['MSVS_OS_BITS'] = 32
if gyp.common.GetFlavor(params) == 'ninja':
default_variables['SHARED_INTERMEDIATE_DIR'] = '$(OutDir)gen'
def PerformBuild(data, configurations, params):
options = params['options']
msvs_version = params['msvs_version']
devenv = os.path.join(msvs_version.path, 'Common7', 'IDE', 'devenv.com')
for build_file, build_file_dict in data.iteritems():
(build_file_root, build_file_ext) = os.path.splitext(build_file)
if build_file_ext != '.gyp':
continue
sln_path = build_file_root + options.suffix + '.sln'
if options.generator_output:
sln_path = os.path.join(options.generator_output, sln_path)
for config in configurations:
arguments = [devenv, sln_path, '/Build', config]
print 'Building [%s]: %s' % (config, arguments)
rtn = subprocess.check_call(arguments)
def GenerateOutput(target_list, target_dicts, data, params):
"""Generate .sln and .vcproj files.
This is the entry point for this generator.
Arguments:
target_list: List of target pairs: 'base/base.gyp:base'.
target_dicts: Dict of target properties keyed on target pair.
data: Dictionary containing per .gyp data.
"""
global fixpath_prefix
options = params['options']
# Get the project file format version back out of where we stashed it in
# GeneratorCalculatedVariables.
msvs_version = params['msvs_version']
generator_flags = params.get('generator_flags', {})
# Optionally shard targets marked with 'msvs_shard': SHARD_COUNT.
(target_list, target_dicts) = MSVSUtil.ShardTargets(target_list, target_dicts)
# Optionally use the large PDB workaround for targets marked with
# 'msvs_large_pdb': 1.
(target_list, target_dicts) = MSVSUtil.InsertLargePdbShims(
target_list, target_dicts, generator_default_variables)
# Optionally configure each spec to use ninja as the external builder.
if params.get('flavor') == 'ninja':
_InitNinjaFlavor(params, target_list, target_dicts)
# Prepare the set of configurations.
configs = set()
for qualified_target in target_list:
spec = target_dicts[qualified_target]
for config_name, config in spec['configurations'].iteritems():
configs.add(_ConfigFullName(config_name, config))
configs = list(configs)
# Figure out all the projects that will be generated and their guids
project_objects = _CreateProjectObjects(target_list, target_dicts, options,
msvs_version)
# Generate each project.
missing_sources = []
for project in project_objects.values():
fixpath_prefix = project.fixpath_prefix
missing_sources.extend(_GenerateProject(project, options, msvs_version,
generator_flags))
fixpath_prefix = None
for build_file in data:
# Validate build_file extension
if not build_file.endswith('.gyp'):
continue
sln_path = os.path.splitext(build_file)[0] + options.suffix + '.sln'
if options.generator_output:
sln_path = os.path.join(options.generator_output, sln_path)
# Get projects in the solution, and their dependents.
sln_projects = gyp.common.BuildFileTargets(target_list, build_file)
sln_projects += gyp.common.DeepDependencyTargets(target_dicts, sln_projects)
# Create folder hierarchy.
root_entries = _GatherSolutionFolders(
sln_projects, project_objects, flat=msvs_version.FlatSolution())
# Create solution.
sln = MSVSNew.MSVSSolution(sln_path,
entries=root_entries,
variants=configs,
websiteProperties=False,
version=msvs_version)
sln.Write()
if missing_sources:
error_message = "Missing input files:\n" + \
'\n'.join(set(missing_sources))
if generator_flags.get('msvs_error_on_missing_sources', False):
raise GypError(error_message)
else:
print >> sys.stdout, "Warning: " + error_message
def _GenerateMSBuildFiltersFile(filters_path, source_files,
rule_dependencies, extension_to_rule_name):
"""Generate the filters file.
This file is used by Visual Studio to organize the presentation of source
files into folders.
Arguments:
filters_path: The path of the file to be created.
source_files: The hierarchical structure of all the sources.
extension_to_rule_name: A dictionary mapping file extensions to rules.
"""
filter_group = []
source_group = []
_AppendFiltersForMSBuild('', source_files, rule_dependencies,
extension_to_rule_name, filter_group, source_group)
if filter_group:
content = ['Project',
{'ToolsVersion': '4.0',
'xmlns': 'http://schemas.microsoft.com/developer/msbuild/2003'
},
['ItemGroup'] + filter_group,
['ItemGroup'] + source_group
]
easy_xml.WriteXmlIfChanged(content, filters_path, pretty=True, win32=True)
elif os.path.exists(filters_path):
# We don't need this filter anymore. Delete the old filter file.
os.unlink(filters_path)
def _AppendFiltersForMSBuild(parent_filter_name, sources, rule_dependencies,
extension_to_rule_name,
filter_group, source_group):
"""Creates the list of filters and sources to be added in the filter file.
Args:
parent_filter_name: The name of the filter under which the sources are
found.
sources: The hierarchy of filters and sources to process.
extension_to_rule_name: A dictionary mapping file extensions to rules.
filter_group: The list to which filter entries will be appended.
source_group: The list to which source entries will be appeneded.
"""
for source in sources:
if isinstance(source, MSVSProject.Filter):
# We have a sub-filter. Create the name of that sub-filter.
if not parent_filter_name:
filter_name = source.name
else:
filter_name = '%s\\%s' % (parent_filter_name, source.name)
# Add the filter to the group.
filter_group.append(
['Filter', {'Include': filter_name},
['UniqueIdentifier', MSVSNew.MakeGuid(source.name)]])
# Recurse and add its dependents.
_AppendFiltersForMSBuild(filter_name, source.contents,
rule_dependencies, extension_to_rule_name,
filter_group, source_group)
else:
# It's a source. Create a source entry.
_, element = _MapFileToMsBuildSourceType(source, rule_dependencies,
extension_to_rule_name)
source_entry = [element, {'Include': source}]
# Specify the filter it is part of, if any.
if parent_filter_name:
source_entry.append(['Filter', parent_filter_name])
source_group.append(source_entry)
def _MapFileToMsBuildSourceType(source, rule_dependencies,
extension_to_rule_name):
"""Returns the group and element type of the source file.
Arguments:
source: The source file name.
extension_to_rule_name: A dictionary mapping file extensions to rules.
Returns:
A pair of (group this file should be part of, the label of element)
"""
_, ext = os.path.splitext(source)
if ext in extension_to_rule_name:
group = 'rule'
element = extension_to_rule_name[ext]
elif ext in ['.cc', '.cpp', '.c', '.cxx']:
group = 'compile'
element = 'ClCompile'
elif ext in ['.h', '.hxx']:
group = 'include'
element = 'ClInclude'
elif ext == '.rc':
group = 'resource'
element = 'ResourceCompile'
elif ext == '.asm':
group = 'masm'
element = 'MASM'
elif ext == '.idl':
group = 'midl'
element = 'Midl'
elif source in rule_dependencies:
group = 'rule_dependency'
element = 'CustomBuild'
else:
group = 'none'
element = 'None'
return (group, element)
def _GenerateRulesForMSBuild(output_dir, options, spec,
sources, excluded_sources,
props_files_of_rules, targets_files_of_rules,
actions_to_add, rule_dependencies,
extension_to_rule_name):
# MSBuild rules are implemented using three files: an XML file, a .targets
# file and a .props file.
# See http://blogs.msdn.com/b/vcblog/archive/2010/04/21/quick-help-on-vs2010-custom-build-rule.aspx
# for more details.
rules = spec.get('rules', [])
rules_native = [r for r in rules if not int(r.get('msvs_external_rule', 0))]
rules_external = [r for r in rules if int(r.get('msvs_external_rule', 0))]
msbuild_rules = []
for rule in rules_native:
# Skip a rule with no action and no inputs.
if 'action' not in rule and not rule.get('rule_sources', []):
continue
msbuild_rule = MSBuildRule(rule, spec)
msbuild_rules.append(msbuild_rule)
rule_dependencies.update(msbuild_rule.additional_dependencies.split(';'))
extension_to_rule_name[msbuild_rule.extension] = msbuild_rule.rule_name
if msbuild_rules:
base = spec['target_name'] + options.suffix
props_name = base + '.props'
targets_name = base + '.targets'
xml_name = base + '.xml'
props_files_of_rules.add(props_name)
targets_files_of_rules.add(targets_name)
props_path = os.path.join(output_dir, props_name)
targets_path = os.path.join(output_dir, targets_name)
xml_path = os.path.join(output_dir, xml_name)
_GenerateMSBuildRulePropsFile(props_path, msbuild_rules)
_GenerateMSBuildRuleTargetsFile(targets_path, msbuild_rules)
_GenerateMSBuildRuleXmlFile(xml_path, msbuild_rules)
if rules_external:
_GenerateExternalRules(rules_external, output_dir, spec,
sources, options, actions_to_add)
_AdjustSourcesForRules(rules, sources, excluded_sources, True)
class MSBuildRule(object):
"""Used to store information used to generate an MSBuild rule.
Attributes:
rule_name: The rule name, sanitized to use in XML.
target_name: The name of the target.
after_targets: The name of the AfterTargets element.
before_targets: The name of the BeforeTargets element.
depends_on: The name of the DependsOn element.
compute_output: The name of the ComputeOutput element.
dirs_to_make: The name of the DirsToMake element.
inputs: The name of the _inputs element.
tlog: The name of the _tlog element.
extension: The extension this rule applies to.
description: The message displayed when this rule is invoked.
additional_dependencies: A string listing additional dependencies.
outputs: The outputs of this rule.
command: The command used to run the rule.
"""
def __init__(self, rule, spec):
self.display_name = rule['rule_name']
# Assure that the rule name is only characters and numbers
self.rule_name = re.sub(r'\W', '_', self.display_name)
# Create the various element names, following the example set by the
# Visual Studio 2008 to 2010 conversion. I don't know if VS2010
# is sensitive to the exact names.
self.target_name = '_' + self.rule_name
self.after_targets = self.rule_name + 'AfterTargets'
self.before_targets = self.rule_name + 'BeforeTargets'
self.depends_on = self.rule_name + 'DependsOn'
self.compute_output = 'Compute%sOutput' % self.rule_name
self.dirs_to_make = self.rule_name + 'DirsToMake'
self.inputs = self.rule_name + '_inputs'
self.tlog = self.rule_name + '_tlog'
self.extension = rule['extension']
if not self.extension.startswith('.'):
self.extension = '.' + self.extension
self.description = MSVSSettings.ConvertVCMacrosToMSBuild(
rule.get('message', self.rule_name))
old_additional_dependencies = _FixPaths(rule.get('inputs', []))
self.additional_dependencies = (
';'.join([MSVSSettings.ConvertVCMacrosToMSBuild(i)
for i in old_additional_dependencies]))
old_outputs = _FixPaths(rule.get('outputs', []))
self.outputs = ';'.join([MSVSSettings.ConvertVCMacrosToMSBuild(i)
for i in old_outputs])
old_command = _BuildCommandLineForRule(spec, rule, has_input_path=True,
do_setup_env=True)
self.command = MSVSSettings.ConvertVCMacrosToMSBuild(old_command)
def _GenerateMSBuildRulePropsFile(props_path, msbuild_rules):
"""Generate the .props file."""
content = ['Project',
{'xmlns': 'http://schemas.microsoft.com/developer/msbuild/2003'}]
for rule in msbuild_rules:
content.extend([
['PropertyGroup',
{'Condition': "'$(%s)' == '' and '$(%s)' == '' and "
"'$(ConfigurationType)' != 'Makefile'" % (rule.before_targets,
rule.after_targets)
},
[rule.before_targets, 'Midl'],
[rule.after_targets, 'CustomBuild'],
],
['PropertyGroup',
[rule.depends_on,
{'Condition': "'$(ConfigurationType)' != 'Makefile'"},
'_SelectedFiles;$(%s)' % rule.depends_on
],
],
['ItemDefinitionGroup',
[rule.rule_name,
['CommandLineTemplate', rule.command],
['Outputs', rule.outputs],
['ExecutionDescription', rule.description],
['AdditionalDependencies', rule.additional_dependencies],
],
]
])
easy_xml.WriteXmlIfChanged(content, props_path, pretty=True, win32=True)
def _GenerateMSBuildRuleTargetsFile(targets_path, msbuild_rules):
"""Generate the .targets file."""
content = ['Project',
{'xmlns': 'http://schemas.microsoft.com/developer/msbuild/2003'
}
]
item_group = [
'ItemGroup',
['PropertyPageSchema',
{'Include': '$(MSBuildThisFileDirectory)$(MSBuildThisFileName).xml'}
]
]
for rule in msbuild_rules:
item_group.append(
['AvailableItemName',
{'Include': rule.rule_name},
['Targets', rule.target_name],
])
content.append(item_group)
for rule in msbuild_rules:
content.append(
['UsingTask',
{'TaskName': rule.rule_name,
'TaskFactory': 'XamlTaskFactory',
'AssemblyName': 'Microsoft.Build.Tasks.v4.0'
},
['Task', '$(MSBuildThisFileDirectory)$(MSBuildThisFileName).xml'],
])
for rule in msbuild_rules:
rule_name = rule.rule_name
target_outputs = '%%(%s.Outputs)' % rule_name
target_inputs = ('%%(%s.Identity);%%(%s.AdditionalDependencies);'
'$(MSBuildProjectFile)') % (rule_name, rule_name)
rule_inputs = '%%(%s.Identity)' % rule_name
extension_condition = ("'%(Extension)'=='.obj' or "
"'%(Extension)'=='.res' or "
"'%(Extension)'=='.rsc' or "
"'%(Extension)'=='.lib'")
remove_section = [
'ItemGroup',
{'Condition': "'@(SelectedFiles)' != ''"},
[rule_name,
{'Remove': '@(%s)' % rule_name,
'Condition': "'%(Identity)' != '@(SelectedFiles)'"
}
]
]
inputs_section = [
'ItemGroup',
[rule.inputs, {'Include': '%%(%s.AdditionalDependencies)' % rule_name}]
]
logging_section = [
'ItemGroup',
[rule.tlog,
{'Include': '%%(%s.Outputs)' % rule_name,
'Condition': ("'%%(%s.Outputs)' != '' and "
"'%%(%s.ExcludedFromBuild)' != 'true'" %
(rule_name, rule_name))
},
['Source', "@(%s, '|')" % rule_name],
['Inputs', "@(%s -> '%%(Fullpath)', ';')" % rule.inputs],
],
]
message_section = [
'Message',
{'Importance': 'High',
'Text': '%%(%s.ExecutionDescription)' % rule_name
}
]
write_tlog_section = [
'WriteLinesToFile',
{'Condition': "'@(%s)' != '' and '%%(%s.ExcludedFromBuild)' != "
"'true'" % (rule.tlog, rule.tlog),
'File': '$(IntDir)$(ProjectName).write.1.tlog',
'Lines': "^%%(%s.Source);@(%s->'%%(Fullpath)')" % (rule.tlog,
rule.tlog)
}
]
read_tlog_section = [
'WriteLinesToFile',
{'Condition': "'@(%s)' != '' and '%%(%s.ExcludedFromBuild)' != "
"'true'" % (rule.tlog, rule.tlog),
'File': '$(IntDir)$(ProjectName).read.1.tlog',
'Lines': "^%%(%s.Source);%%(%s.Inputs)" % (rule.tlog, rule.tlog)
}
]
command_and_input_section = [
rule_name,
{'Condition': "'@(%s)' != '' and '%%(%s.ExcludedFromBuild)' != "
"'true'" % (rule_name, rule_name),
'EchoOff': 'true',
'StandardOutputImportance': 'High',
'StandardErrorImportance': 'High',
'CommandLineTemplate': '%%(%s.CommandLineTemplate)' % rule_name,
'AdditionalOptions': '%%(%s.AdditionalOptions)' % rule_name,
'Inputs': rule_inputs
}
]
content.extend([
['Target',
{'Name': rule.target_name,
'BeforeTargets': '$(%s)' % rule.before_targets,
'AfterTargets': '$(%s)' % rule.after_targets,
'Condition': "'@(%s)' != ''" % rule_name,
'DependsOnTargets': '$(%s);%s' % (rule.depends_on,
rule.compute_output),
'Outputs': target_outputs,
'Inputs': target_inputs
},
remove_section,
inputs_section,
logging_section,
message_section,
write_tlog_section,
read_tlog_section,
command_and_input_section,
],
['PropertyGroup',
['ComputeLinkInputsTargets',
'$(ComputeLinkInputsTargets);',
'%s;' % rule.compute_output
],
['ComputeLibInputsTargets',
'$(ComputeLibInputsTargets);',
'%s;' % rule.compute_output
],
],
['Target',
{'Name': rule.compute_output,
'Condition': "'@(%s)' != ''" % rule_name
},
['ItemGroup',
[rule.dirs_to_make,
{'Condition': "'@(%s)' != '' and "
"'%%(%s.ExcludedFromBuild)' != 'true'" % (rule_name, rule_name),
'Include': '%%(%s.Outputs)' % rule_name
}
],
['Link',
{'Include': '%%(%s.Identity)' % rule.dirs_to_make,
'Condition': extension_condition
}
],
['Lib',
{'Include': '%%(%s.Identity)' % rule.dirs_to_make,
'Condition': extension_condition
}
],
['ImpLib',
{'Include': '%%(%s.Identity)' % rule.dirs_to_make,
'Condition': extension_condition
}
],
],
['MakeDir',
{'Directories': ("@(%s->'%%(RootDir)%%(Directory)')" %
rule.dirs_to_make)
}
]
],
])
easy_xml.WriteXmlIfChanged(content, targets_path, pretty=True, win32=True)
def _GenerateMSBuildRuleXmlFile(xml_path, msbuild_rules):
# Generate the .xml file
content = [
'ProjectSchemaDefinitions',
{'xmlns': ('clr-namespace:Microsoft.Build.Framework.XamlTypes;'
'assembly=Microsoft.Build.Framework'),
'xmlns:x': 'http://schemas.microsoft.com/winfx/2006/xaml',
'xmlns:sys': 'clr-namespace:System;assembly=mscorlib',
'xmlns:transformCallback':
'Microsoft.Cpp.Dev10.ConvertPropertyCallback'
}
]
for rule in msbuild_rules:
content.extend([
['Rule',
{'Name': rule.rule_name,
'PageTemplate': 'tool',
'DisplayName': rule.display_name,
'Order': '200'
},
['Rule.DataSource',
['DataSource',
{'Persistence': 'ProjectFile',
'ItemType': rule.rule_name
}
]
],
['Rule.Categories',
['Category',
{'Name': 'General'},
['Category.DisplayName',
['sys:String', 'General'],
],
],
['Category',
{'Name': 'Command Line',
'Subtype': 'CommandLine'
},
['Category.DisplayName',
['sys:String', 'Command Line'],
],
],
],
['StringListProperty',
{'Name': 'Inputs',
'Category': 'Command Line',
'IsRequired': 'true',
'Switch': ' '
},
['StringListProperty.DataSource',
['DataSource',
{'Persistence': 'ProjectFile',
'ItemType': rule.rule_name,
'SourceType': 'Item'
}
]
],
],
['StringProperty',
{'Name': 'CommandLineTemplate',
'DisplayName': 'Command Line',
'Visible': 'False',
'IncludeInCommandLine': 'False'
}
],
['DynamicEnumProperty',
{'Name': rule.before_targets,
'Category': 'General',
'EnumProvider': 'Targets',
'IncludeInCommandLine': 'False'
},
['DynamicEnumProperty.DisplayName',
['sys:String', 'Execute Before'],
],
['DynamicEnumProperty.Description',
['sys:String', 'Specifies the targets for the build customization'
' to run before.'
],
],
['DynamicEnumProperty.ProviderSettings',
['NameValuePair',
{'Name': 'Exclude',
'Value': '^%s|^Compute' % rule.before_targets
}
]
],
['DynamicEnumProperty.DataSource',
['DataSource',
{'Persistence': 'ProjectFile',
'HasConfigurationCondition': 'true'
}
]
],
],
['DynamicEnumProperty',
{'Name': rule.after_targets,
'Category': 'General',
'EnumProvider': 'Targets',
'IncludeInCommandLine': 'False'
},
['DynamicEnumProperty.DisplayName',
['sys:String', 'Execute After'],
],
['DynamicEnumProperty.Description',
['sys:String', ('Specifies the targets for the build customization'
' to run after.')
],
],
['DynamicEnumProperty.ProviderSettings',
['NameValuePair',
{'Name': 'Exclude',
'Value': '^%s|^Compute' % rule.after_targets
}
]
],
['DynamicEnumProperty.DataSource',
['DataSource',
{'Persistence': 'ProjectFile',
'ItemType': '',
'HasConfigurationCondition': 'true'
}
]
],
],
['StringListProperty',
{'Name': 'Outputs',
'DisplayName': 'Outputs',
'Visible': 'False',
'IncludeInCommandLine': 'False'
}
],
['StringProperty',
{'Name': 'ExecutionDescription',
'DisplayName': 'Execution Description',
'Visible': 'False',
'IncludeInCommandLine': 'False'
}
],
['StringListProperty',
{'Name': 'AdditionalDependencies',
'DisplayName': 'Additional Dependencies',
'IncludeInCommandLine': 'False',
'Visible': 'false'
}
],
['StringProperty',
{'Subtype': 'AdditionalOptions',
'Name': 'AdditionalOptions',
'Category': 'Command Line'
},
['StringProperty.DisplayName',
['sys:String', 'Additional Options'],
],
['StringProperty.Description',
['sys:String', 'Additional Options'],
],
],
],
['ItemType',
{'Name': rule.rule_name,
'DisplayName': rule.display_name
}
],
['FileExtension',
{'Name': '*' + rule.extension,
'ContentType': rule.rule_name
}
],
['ContentType',
{'Name': rule.rule_name,
'DisplayName': '',
'ItemType': rule.rule_name
}
]
])
easy_xml.WriteXmlIfChanged(content, xml_path, pretty=True, win32=True)
def _GetConfigurationAndPlatform(name, settings):
configuration = name.rsplit('_', 1)[0]
platform = settings.get('msvs_configuration_platform', 'Win32')
return (configuration, platform)
def _GetConfigurationCondition(name, settings):
return (r"'$(Configuration)|$(Platform)'=='%s|%s'" %
_GetConfigurationAndPlatform(name, settings))
def _GetMSBuildProjectConfigurations(configurations):
group = ['ItemGroup', {'Label': 'ProjectConfigurations'}]
for (name, settings) in sorted(configurations.iteritems()):
configuration, platform = _GetConfigurationAndPlatform(name, settings)
designation = '%s|%s' % (configuration, platform)
group.append(
['ProjectConfiguration', {'Include': designation},
['Configuration', configuration],
['Platform', platform]])
return [group]
def _GetMSBuildGlobalProperties(spec, guid, gyp_file_name):
namespace = os.path.splitext(gyp_file_name)[0]
properties = [
['PropertyGroup', {'Label': 'Globals'},
['ProjectGuid', guid],
['Keyword', 'Win32Proj'],
['RootNamespace', namespace],
['IgnoreWarnCompileDuplicatedFilename', 'true'],
]
]
if os.environ.get('PROCESSOR_ARCHITECTURE') == 'AMD64' or \
os.environ.get('PROCESSOR_ARCHITEW6432') == 'AMD64':
properties[0].append(['PreferredToolArchitecture', 'x64'])
if spec.get('msvs_enable_winrt'):
properties[0].append(['DefaultLanguage', 'en-US'])
properties[0].append(['AppContainerApplication', 'true'])
if spec.get('msvs_application_type_revision'):
app_type_revision = spec.get('msvs_application_type_revision')
properties[0].append(['ApplicationTypeRevision', app_type_revision])
else:
properties[0].append(['ApplicationTypeRevision', '8.1'])
if spec.get('msvs_target_platform_version'):
target_platform_version = spec.get('msvs_target_platform_version')
properties[0].append(['WindowsTargetPlatformVersion',
target_platform_version])
if spec.get('msvs_target_platform_minversion'):
target_platform_minversion = spec.get('msvs_target_platform_minversion')
properties[0].append(['WindowsTargetPlatformMinVersion',
target_platform_minversion])
else:
properties[0].append(['WindowsTargetPlatformMinVersion',
target_platform_version])
if spec.get('msvs_enable_winphone'):
properties[0].append(['ApplicationType', 'Windows Phone'])
else:
properties[0].append(['ApplicationType', 'Windows Store'])
return properties
def _GetMSBuildConfigurationDetails(spec, build_file):
properties = {}
for name, settings in spec['configurations'].iteritems():
msbuild_attributes = _GetMSBuildAttributes(spec, settings, build_file)
condition = _GetConfigurationCondition(name, settings)
character_set = msbuild_attributes.get('CharacterSet')
_AddConditionalProperty(properties, condition, 'ConfigurationType',
msbuild_attributes['ConfigurationType'])
if character_set:
if 'msvs_enable_winrt' not in spec :
_AddConditionalProperty(properties, condition, 'CharacterSet',
character_set)
return _GetMSBuildPropertyGroup(spec, 'Configuration', properties)
def _GetMSBuildLocalProperties(msbuild_toolset):
# Currently the only local property we support is PlatformToolset
properties = {}
if msbuild_toolset:
properties = [
['PropertyGroup', {'Label': 'Locals'},
['PlatformToolset', msbuild_toolset],
]
]
return properties
def _GetMSBuildPropertySheets(configurations):
user_props = r'$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props'
additional_props = {}
props_specified = False
for name, settings in sorted(configurations.iteritems()):
configuration = _GetConfigurationCondition(name, settings)
if settings.has_key('msbuild_props'):
additional_props[configuration] = _FixPaths(settings['msbuild_props'])
props_specified = True
else:
additional_props[configuration] = ''
if not props_specified:
return [
['ImportGroup',
{'Label': 'PropertySheets'},
['Import',
{'Project': user_props,
'Condition': "exists('%s')" % user_props,
'Label': 'LocalAppDataPlatform'
}
]
]
]
else:
sheets = []
for condition, props in additional_props.iteritems():
import_group = [
'ImportGroup',
{'Label': 'PropertySheets',
'Condition': condition
},
['Import',
{'Project': user_props,
'Condition': "exists('%s')" % user_props,
'Label': 'LocalAppDataPlatform'
}
]
]
for props_file in props:
import_group.append(['Import', {'Project':props_file}])
sheets.append(import_group)
return sheets
def _ConvertMSVSBuildAttributes(spec, config, build_file):
config_type = _GetMSVSConfigurationType(spec, build_file)
msvs_attributes = _GetMSVSAttributes(spec, config, config_type)
msbuild_attributes = {}
for a in msvs_attributes:
if a in ['IntermediateDirectory', 'OutputDirectory']:
directory = MSVSSettings.ConvertVCMacrosToMSBuild(msvs_attributes[a])
if not directory.endswith('\\'):
directory += '\\'
msbuild_attributes[a] = directory
elif a == 'CharacterSet':
msbuild_attributes[a] = _ConvertMSVSCharacterSet(msvs_attributes[a])
elif a == 'ConfigurationType':
msbuild_attributes[a] = _ConvertMSVSConfigurationType(msvs_attributes[a])
else:
print 'Warning: Do not know how to convert MSVS attribute ' + a
return msbuild_attributes
def _ConvertMSVSCharacterSet(char_set):
if char_set.isdigit():
char_set = {
'0': 'MultiByte',
'1': 'Unicode',
'2': 'MultiByte',
}[char_set]
return char_set
def _ConvertMSVSConfigurationType(config_type):
if config_type.isdigit():
config_type = {
'1': 'Application',
'2': 'DynamicLibrary',
'4': 'StaticLibrary',
'10': 'Utility'
}[config_type]
return config_type
def _GetMSBuildAttributes(spec, config, build_file):
if 'msbuild_configuration_attributes' not in config:
msbuild_attributes = _ConvertMSVSBuildAttributes(spec, config, build_file)
else:
config_type = _GetMSVSConfigurationType(spec, build_file)
config_type = _ConvertMSVSConfigurationType(config_type)
msbuild_attributes = config.get('msbuild_configuration_attributes', {})
msbuild_attributes.setdefault('ConfigurationType', config_type)
output_dir = msbuild_attributes.get('OutputDirectory',
'$(SolutionDir)$(Configuration)')
msbuild_attributes['OutputDirectory'] = _FixPath(output_dir) + '\\'
if 'IntermediateDirectory' not in msbuild_attributes:
intermediate = _FixPath('$(Configuration)') + '\\'
msbuild_attributes['IntermediateDirectory'] = intermediate
if 'CharacterSet' in msbuild_attributes:
msbuild_attributes['CharacterSet'] = _ConvertMSVSCharacterSet(
msbuild_attributes['CharacterSet'])
if 'TargetName' not in msbuild_attributes:
prefix = spec.get('product_prefix', '')
product_name = spec.get('product_name', '$(ProjectName)')
target_name = prefix + product_name
msbuild_attributes['TargetName'] = target_name
if 'TargetExt' not in msbuild_attributes and 'product_extension' in spec:
ext = spec.get('product_extension')
msbuild_attributes['TargetExt'] = '.' + ext
if spec.get('msvs_external_builder'):
external_out_dir = spec.get('msvs_external_builder_out_dir', '.')
msbuild_attributes['OutputDirectory'] = _FixPath(external_out_dir) + '\\'
# Make sure that 'TargetPath' matches 'Lib.OutputFile' or 'Link.OutputFile'
# (depending on the tool used) to avoid MSB8012 warning.
msbuild_tool_map = {
'executable': 'Link',
'shared_library': 'Link',
'loadable_module': 'Link',
'static_library': 'Lib',
}
msbuild_tool = msbuild_tool_map.get(spec['type'])
if msbuild_tool:
msbuild_settings = config['finalized_msbuild_settings']
out_file = msbuild_settings[msbuild_tool].get('OutputFile')
if out_file:
msbuild_attributes['TargetPath'] = _FixPath(out_file)
target_ext = msbuild_settings[msbuild_tool].get('TargetExt')
if target_ext:
msbuild_attributes['TargetExt'] = target_ext
return msbuild_attributes
def _GetMSBuildConfigurationGlobalProperties(spec, configurations, build_file):
# TODO(jeanluc) We could optimize out the following and do it only if
# there are actions.
# TODO(jeanluc) Handle the equivalent of setting 'CYGWIN=nontsec'.
new_paths = []
cygwin_dirs = spec.get('msvs_cygwin_dirs', ['.'])[0]
if cygwin_dirs:
cyg_path = '$(MSBuildProjectDirectory)\\%s\\bin\\' % _FixPath(cygwin_dirs)
new_paths.append(cyg_path)
# TODO(jeanluc) Change the convention to have both a cygwin_dir and a
# python_dir.
python_path = cyg_path.replace('cygwin\\bin', 'python_26')
new_paths.append(python_path)
if new_paths:
new_paths = '$(ExecutablePath);' + ';'.join(new_paths)
properties = {}
for (name, configuration) in sorted(configurations.iteritems()):
condition = _GetConfigurationCondition(name, configuration)
attributes = _GetMSBuildAttributes(spec, configuration, build_file)
msbuild_settings = configuration['finalized_msbuild_settings']
_AddConditionalProperty(properties, condition, 'IntDir',
attributes['IntermediateDirectory'])
_AddConditionalProperty(properties, condition, 'OutDir',
attributes['OutputDirectory'])
_AddConditionalProperty(properties, condition, 'TargetName',
attributes['TargetName'])
if 'TargetExt' in attributes:
_AddConditionalProperty(properties, condition, 'TargetExt',
attributes['TargetExt'])
if attributes.get('TargetPath'):
_AddConditionalProperty(properties, condition, 'TargetPath',
attributes['TargetPath'])
if attributes.get('TargetExt'):
_AddConditionalProperty(properties, condition, 'TargetExt',
attributes['TargetExt'])
if new_paths:
_AddConditionalProperty(properties, condition, 'ExecutablePath',
new_paths)
tool_settings = msbuild_settings.get('', {})
for name, value in sorted(tool_settings.iteritems()):
formatted_value = _GetValueFormattedForMSBuild('', name, value)
_AddConditionalProperty(properties, condition, name, formatted_value)
return _GetMSBuildPropertyGroup(spec, None, properties)
def _AddConditionalProperty(properties, condition, name, value):
"""Adds a property / conditional value pair to a dictionary.
Arguments:
properties: The dictionary to be modified. The key is the name of the
property. The value is itself a dictionary; its key is the value and
the value a list of condition for which this value is true.
condition: The condition under which the named property has the value.
name: The name of the property.
value: The value of the property.
"""
if name not in properties:
properties[name] = {}
values = properties[name]
if value not in values:
values[value] = []
conditions = values[value]
conditions.append(condition)
# Regex for msvs variable references ( i.e. $(FOO) ).
MSVS_VARIABLE_REFERENCE = re.compile(r'\$\(([a-zA-Z_][a-zA-Z0-9_]*)\)')
def _GetMSBuildPropertyGroup(spec, label, properties):
"""Returns a PropertyGroup definition for the specified properties.
Arguments:
spec: The target project dict.
label: An optional label for the PropertyGroup.
properties: The dictionary to be converted. The key is the name of the
property. The value is itself a dictionary; its key is the value and
the value a list of condition for which this value is true.
"""
group = ['PropertyGroup']
if label:
group.append({'Label': label})
num_configurations = len(spec['configurations'])
def GetEdges(node):
# Use a definition of edges such that user_of_variable -> used_varible.
# This happens to be easier in this case, since a variable's
# definition contains all variables it references in a single string.
edges = set()
for value in sorted(properties[node].keys()):
# Add to edges all $(...) references to variables.
#
# Variable references that refer to names not in properties are excluded
# These can exist for instance to refer built in definitions like
# $(SolutionDir).
#
# Self references are ignored. Self reference is used in a few places to
# append to the default value. I.e. PATH=$(PATH);other_path
edges.update(set([v for v in MSVS_VARIABLE_REFERENCE.findall(value)
if v in properties and v != node]))
return edges
properties_ordered = gyp.common.TopologicallySorted(
properties.keys(), GetEdges)
# Walk properties in the reverse of a topological sort on
# user_of_variable -> used_variable as this ensures variables are
# defined before they are used.
# NOTE: reverse(topsort(DAG)) = topsort(reverse_edges(DAG))
for name in reversed(properties_ordered):
values = properties[name]
for value, conditions in sorted(values.iteritems()):
if len(conditions) == num_configurations:
# If the value is the same all configurations,
# just add one unconditional entry.
group.append([name, value])
else:
for condition in conditions:
group.append([name, {'Condition': condition}, value])
return [group]
def _GetMSBuildToolSettingsSections(spec, configurations):
groups = []
for (name, configuration) in sorted(configurations.iteritems()):
msbuild_settings = configuration['finalized_msbuild_settings']
group = ['ItemDefinitionGroup',
{'Condition': _GetConfigurationCondition(name, configuration)}
]
for tool_name, tool_settings in sorted(msbuild_settings.iteritems()):
# Skip the tool named '' which is a holder of global settings handled
# by _GetMSBuildConfigurationGlobalProperties.
if tool_name:
if tool_settings:
tool = [tool_name]
for name, value in sorted(tool_settings.iteritems()):
formatted_value = _GetValueFormattedForMSBuild(tool_name, name,
value)
tool.append([name, formatted_value])
group.append(tool)
groups.append(group)
return groups
def _FinalizeMSBuildSettings(spec, configuration):
if 'msbuild_settings' in configuration:
converted = False
msbuild_settings = configuration['msbuild_settings']
MSVSSettings.ValidateMSBuildSettings(msbuild_settings)
else:
converted = True
msvs_settings = configuration.get('msvs_settings', {})
msbuild_settings = MSVSSettings.ConvertToMSBuildSettings(msvs_settings)
include_dirs, midl_include_dirs, resource_include_dirs = \
_GetIncludeDirs(configuration)
libraries = _GetLibraries(spec)
library_dirs = _GetLibraryDirs(configuration)
out_file, _, msbuild_tool = _GetOutputFilePathAndTool(spec, msbuild=True)
target_ext = _GetOutputTargetExt(spec)
defines = _GetDefines(configuration)
if converted:
# Visual Studio 2010 has TR1
defines = [d for d in defines if d != '_HAS_TR1=0']
# Warn of ignored settings
ignored_settings = ['msvs_tool_files']
for ignored_setting in ignored_settings:
value = configuration.get(ignored_setting)
if value:
print ('Warning: The automatic conversion to MSBuild does not handle '
'%s. Ignoring setting of %s' % (ignored_setting, str(value)))
defines = [_EscapeCppDefineForMSBuild(d) for d in defines]
disabled_warnings = _GetDisabledWarnings(configuration)
prebuild = configuration.get('msvs_prebuild')
postbuild = configuration.get('msvs_postbuild')
def_file = _GetModuleDefinition(spec)
precompiled_header = configuration.get('msvs_precompiled_header')
# Add the information to the appropriate tool
# TODO(jeanluc) We could optimize and generate these settings only if
# the corresponding files are found, e.g. don't generate ResourceCompile
# if you don't have any resources.
_ToolAppend(msbuild_settings, 'ClCompile',
'AdditionalIncludeDirectories', include_dirs)
_ToolAppend(msbuild_settings, 'Midl',
'AdditionalIncludeDirectories', midl_include_dirs)
_ToolAppend(msbuild_settings, 'ResourceCompile',
'AdditionalIncludeDirectories', resource_include_dirs)
# Add in libraries, note that even for empty libraries, we want this
# set, to prevent inheriting default libraries from the enviroment.
_ToolSetOrAppend(msbuild_settings, 'Link', 'AdditionalDependencies',
libraries)
_ToolAppend(msbuild_settings, 'Link', 'AdditionalLibraryDirectories',
library_dirs)
if out_file:
_ToolAppend(msbuild_settings, msbuild_tool, 'OutputFile', out_file,
only_if_unset=True)
if target_ext:
_ToolAppend(msbuild_settings, msbuild_tool, 'TargetExt', target_ext,
only_if_unset=True)
# Add defines.
_ToolAppend(msbuild_settings, 'ClCompile',
'PreprocessorDefinitions', defines)
_ToolAppend(msbuild_settings, 'ResourceCompile',
'PreprocessorDefinitions', defines)
# Add disabled warnings.
_ToolAppend(msbuild_settings, 'ClCompile',
'DisableSpecificWarnings', disabled_warnings)
# Turn on precompiled headers if appropriate.
if precompiled_header:
precompiled_header = os.path.split(precompiled_header)[1]
_ToolAppend(msbuild_settings, 'ClCompile', 'PrecompiledHeader', 'Use')
_ToolAppend(msbuild_settings, 'ClCompile',
'PrecompiledHeaderFile', precompiled_header)
_ToolAppend(msbuild_settings, 'ClCompile',
'ForcedIncludeFiles', [precompiled_header])
else:
_ToolAppend(msbuild_settings, 'ClCompile', 'PrecompiledHeader', 'NotUsing')
# Turn off WinRT compilation
_ToolAppend(msbuild_settings, 'ClCompile', 'CompileAsWinRT', 'false')
# Turn on import libraries if appropriate
if spec.get('msvs_requires_importlibrary'):
_ToolAppend(msbuild_settings, '', 'IgnoreImportLibrary', 'false')
# Loadable modules don't generate import libraries;
# tell dependent projects to not expect one.
if spec['type'] == 'loadable_module':
_ToolAppend(msbuild_settings, '', 'IgnoreImportLibrary', 'true')
# Set the module definition file if any.
if def_file:
_ToolAppend(msbuild_settings, 'Link', 'ModuleDefinitionFile', def_file)
configuration['finalized_msbuild_settings'] = msbuild_settings
if prebuild:
_ToolAppend(msbuild_settings, 'PreBuildEvent', 'Command', prebuild)
if postbuild:
_ToolAppend(msbuild_settings, 'PostBuildEvent', 'Command', postbuild)
def _GetValueFormattedForMSBuild(tool_name, name, value):
if type(value) == list:
# For some settings, VS2010 does not automatically extends the settings
# TODO(jeanluc) Is this what we want?
if name in ['AdditionalIncludeDirectories',
'AdditionalLibraryDirectories',
'AdditionalOptions',
'DelayLoadDLLs',
'DisableSpecificWarnings',
'PreprocessorDefinitions']:
value.append('%%(%s)' % name)
# For most tools, entries in a list should be separated with ';' but some
# settings use a space. Check for those first.
exceptions = {
'ClCompile': ['AdditionalOptions'],
'Link': ['AdditionalOptions'],
'Lib': ['AdditionalOptions']}
if tool_name in exceptions and name in exceptions[tool_name]:
char = ' '
else:
char = ';'
formatted_value = char.join(
[MSVSSettings.ConvertVCMacrosToMSBuild(i) for i in value])
else:
formatted_value = MSVSSettings.ConvertVCMacrosToMSBuild(value)
return formatted_value
def _VerifySourcesExist(sources, root_dir):
"""Verifies that all source files exist on disk.
Checks that all regular source files, i.e. not created at run time,
exist on disk. Missing files cause needless recompilation but no otherwise
visible errors.
Arguments:
sources: A recursive list of Filter/file names.
root_dir: The root directory for the relative path names.
Returns:
A list of source files that cannot be found on disk.
"""
missing_sources = []
for source in sources:
if isinstance(source, MSVSProject.Filter):
missing_sources.extend(_VerifySourcesExist(source.contents, root_dir))
else:
if '$' not in source:
full_path = os.path.join(root_dir, source)
if not os.path.exists(full_path):
missing_sources.append(full_path)
return missing_sources
def _GetMSBuildSources(spec, sources, exclusions, rule_dependencies,
extension_to_rule_name, actions_spec,
sources_handled_by_action, list_excluded):
groups = ['none', 'masm', 'midl', 'include', 'compile', 'resource', 'rule',
'rule_dependency']
grouped_sources = {}
for g in groups:
grouped_sources[g] = []
_AddSources2(spec, sources, exclusions, grouped_sources,
rule_dependencies, extension_to_rule_name,
sources_handled_by_action, list_excluded)
sources = []
for g in groups:
if grouped_sources[g]:
sources.append(['ItemGroup'] + grouped_sources[g])
if actions_spec:
sources.append(['ItemGroup'] + actions_spec)
return sources
def _AddSources2(spec, sources, exclusions, grouped_sources,
rule_dependencies, extension_to_rule_name,
sources_handled_by_action,
list_excluded):
extensions_excluded_from_precompile = []
for source in sources:
if isinstance(source, MSVSProject.Filter):
_AddSources2(spec, source.contents, exclusions, grouped_sources,
rule_dependencies, extension_to_rule_name,
sources_handled_by_action,
list_excluded)
else:
if not source in sources_handled_by_action:
detail = []
excluded_configurations = exclusions.get(source, [])
if len(excluded_configurations) == len(spec['configurations']):
detail.append(['ExcludedFromBuild', 'true'])
else:
for config_name, configuration in sorted(excluded_configurations):
condition = _GetConfigurationCondition(config_name, configuration)
detail.append(['ExcludedFromBuild',
{'Condition': condition},
'true'])
# Add precompile if needed
for config_name, configuration in spec['configurations'].iteritems():
precompiled_source = configuration.get('msvs_precompiled_source', '')
if precompiled_source != '':
precompiled_source = _FixPath(precompiled_source)
if not extensions_excluded_from_precompile:
# If the precompiled header is generated by a C source, we must
# not try to use it for C++ sources, and vice versa.
basename, extension = os.path.splitext(precompiled_source)
if extension == '.c':
extensions_excluded_from_precompile = ['.cc', '.cpp', '.cxx']
else:
extensions_excluded_from_precompile = ['.c']
if precompiled_source == source:
condition = _GetConfigurationCondition(config_name, configuration)
detail.append(['PrecompiledHeader',
{'Condition': condition},
'Create'
])
else:
# Turn off precompiled header usage for source files of a
# different type than the file that generated the
# precompiled header.
for extension in extensions_excluded_from_precompile:
if source.endswith(extension):
detail.append(['PrecompiledHeader', ''])
detail.append(['ForcedIncludeFiles', ''])
group, element = _MapFileToMsBuildSourceType(source, rule_dependencies,
extension_to_rule_name)
grouped_sources[group].append([element, {'Include': source}] + detail)
def _GetMSBuildProjectReferences(project):
references = []
if project.dependencies:
group = ['ItemGroup']
for dependency in project.dependencies:
guid = dependency.guid
project_dir = os.path.split(project.path)[0]
relative_path = gyp.common.RelativePath(dependency.path, project_dir)
project_ref = ['ProjectReference',
{'Include': relative_path},
['Project', guid],
['ReferenceOutputAssembly', 'false']
]
for config in dependency.spec.get('configurations', {}).itervalues():
# If it's disabled in any config, turn it off in the reference.
if config.get('msvs_2010_disable_uldi_when_referenced', 0):
project_ref.append(['UseLibraryDependencyInputs', 'false'])
break
group.append(project_ref)
references.append(group)
return references
def _GenerateMSBuildProject(project, options, version, generator_flags):
spec = project.spec
configurations = spec['configurations']
project_dir, project_file_name = os.path.split(project.path)
gyp.common.EnsureDirExists(project.path)
# Prepare list of sources and excluded sources.
gyp_path = _NormalizedSource(project.build_file)
relative_path_of_gyp_file = gyp.common.RelativePath(gyp_path, project_dir)
gyp_file = os.path.split(project.build_file)[1]
sources, excluded_sources = _PrepareListOfSources(spec, generator_flags,
gyp_file)
# Add rules.
actions_to_add = {}
props_files_of_rules = set()
targets_files_of_rules = set()
rule_dependencies = set()
extension_to_rule_name = {}
list_excluded = generator_flags.get('msvs_list_excluded_files', True)
# Don't generate rules if we are using an external builder like ninja.
if not spec.get('msvs_external_builder'):
_GenerateRulesForMSBuild(project_dir, options, spec,
sources, excluded_sources,
props_files_of_rules, targets_files_of_rules,
actions_to_add, rule_dependencies,
extension_to_rule_name)
else:
rules = spec.get('rules', [])
_AdjustSourcesForRules(rules, sources, excluded_sources, True)
sources, excluded_sources, excluded_idl = (
_AdjustSourcesAndConvertToFilterHierarchy(spec, options,
project_dir, sources,
excluded_sources,
list_excluded, version))
# Don't add actions if we are using an external builder like ninja.
if not spec.get('msvs_external_builder'):
_AddActions(actions_to_add, spec, project.build_file)
_AddCopies(actions_to_add, spec)
# NOTE: this stanza must appear after all actions have been decided.
# Don't excluded sources with actions attached, or they won't run.
excluded_sources = _FilterActionsFromExcluded(
excluded_sources, actions_to_add)
exclusions = _GetExcludedFilesFromBuild(spec, excluded_sources, excluded_idl)
actions_spec, sources_handled_by_action = _GenerateActionsForMSBuild(
spec, actions_to_add)
_GenerateMSBuildFiltersFile(project.path + '.filters', sources,
rule_dependencies,
extension_to_rule_name)
missing_sources = _VerifySourcesExist(sources, project_dir)
for configuration in configurations.itervalues():
_FinalizeMSBuildSettings(spec, configuration)
# Add attributes to root element
import_default_section = [
['Import', {'Project': r'$(VCTargetsPath)\Microsoft.Cpp.Default.props'}]]
import_cpp_props_section = [
['Import', {'Project': r'$(VCTargetsPath)\Microsoft.Cpp.props'}]]
import_cpp_targets_section = [
['Import', {'Project': r'$(VCTargetsPath)\Microsoft.Cpp.targets'}]]
import_masm_props_section = [
['Import',
{'Project': r'$(VCTargetsPath)\BuildCustomizations\masm.props'}]]
import_masm_targets_section = [
['Import',
{'Project': r'$(VCTargetsPath)\BuildCustomizations\masm.targets'}]]
macro_section = [['PropertyGroup', {'Label': 'UserMacros'}]]
content = [
'Project',
{'xmlns': 'http://schemas.microsoft.com/developer/msbuild/2003',
'ToolsVersion': version.ProjectVersion(),
'DefaultTargets': 'Build'
}]
content += _GetMSBuildProjectConfigurations(configurations)
content += _GetMSBuildGlobalProperties(spec, project.guid, project_file_name)
content += import_default_section
content += _GetMSBuildConfigurationDetails(spec, project.build_file)
if spec.get('msvs_enable_winphone'):
content += _GetMSBuildLocalProperties('v120_wp81')
else:
content += _GetMSBuildLocalProperties(project.msbuild_toolset)
content += import_cpp_props_section
content += import_masm_props_section
content += _GetMSBuildExtensions(props_files_of_rules)
content += _GetMSBuildPropertySheets(configurations)
content += macro_section
content += _GetMSBuildConfigurationGlobalProperties(spec, configurations,
project.build_file)
content += _GetMSBuildToolSettingsSections(spec, configurations)
content += _GetMSBuildSources(
spec, sources, exclusions, rule_dependencies, extension_to_rule_name,
actions_spec, sources_handled_by_action, list_excluded)
content += _GetMSBuildProjectReferences(project)
content += import_cpp_targets_section
content += import_masm_targets_section
content += _GetMSBuildExtensionTargets(targets_files_of_rules)
if spec.get('msvs_external_builder'):
content += _GetMSBuildExternalBuilderTargets(spec)
# TODO(jeanluc) File a bug to get rid of runas. We had in MSVS:
# has_run_as = _WriteMSVSUserFile(project.path, version, spec)
easy_xml.WriteXmlIfChanged(content, project.path, pretty=True, win32=True)
return missing_sources
def _GetMSBuildExternalBuilderTargets(spec):
"""Return a list of MSBuild targets for external builders.
The "Build" and "Clean" targets are always generated. If the spec contains
'msvs_external_builder_clcompile_cmd', then the "ClCompile" target will also
be generated, to support building selected C/C++ files.
Arguments:
spec: The gyp target spec.
Returns:
List of MSBuild 'Target' specs.
"""
build_cmd = _BuildCommandLineForRuleRaw(
spec, spec['msvs_external_builder_build_cmd'],
False, False, False, False)
build_target = ['Target', {'Name': 'Build'}]
build_target.append(['Exec', {'Command': build_cmd}])
clean_cmd = _BuildCommandLineForRuleRaw(
spec, spec['msvs_external_builder_clean_cmd'],
False, False, False, False)
clean_target = ['Target', {'Name': 'Clean'}]
clean_target.append(['Exec', {'Command': clean_cmd}])
targets = [build_target, clean_target]
if spec.get('msvs_external_builder_clcompile_cmd'):
clcompile_cmd = _BuildCommandLineForRuleRaw(
spec, spec['msvs_external_builder_clcompile_cmd'],
False, False, False, False)
clcompile_target = ['Target', {'Name': 'ClCompile'}]
clcompile_target.append(['Exec', {'Command': clcompile_cmd}])
targets.append(clcompile_target)
return targets
def _GetMSBuildExtensions(props_files_of_rules):
extensions = ['ImportGroup', {'Label': 'ExtensionSettings'}]
for props_file in props_files_of_rules:
extensions.append(['Import', {'Project': props_file}])
return [extensions]
def _GetMSBuildExtensionTargets(targets_files_of_rules):
targets_node = ['ImportGroup', {'Label': 'ExtensionTargets'}]
for targets_file in sorted(targets_files_of_rules):
targets_node.append(['Import', {'Project': targets_file}])
return [targets_node]
def _GenerateActionsForMSBuild(spec, actions_to_add):
"""Add actions accumulated into an actions_to_add, merging as needed.
Arguments:
spec: the target project dict
actions_to_add: dictionary keyed on input name, which maps to a list of
dicts describing the actions attached to that input file.
Returns:
A pair of (action specification, the sources handled by this action).
"""
sources_handled_by_action = OrderedSet()
actions_spec = []
for primary_input, actions in actions_to_add.iteritems():
inputs = OrderedSet()
outputs = OrderedSet()
descriptions = []
commands = []
for action in actions:
inputs.update(OrderedSet(action['inputs']))
outputs.update(OrderedSet(action['outputs']))
descriptions.append(action['description'])
cmd = action['command']
# For most actions, add 'call' so that actions that invoke batch files
# return and continue executing. msbuild_use_call provides a way to
# disable this but I have not seen any adverse effect from doing that
# for everything.
if action.get('msbuild_use_call', True):
cmd = 'call ' + cmd
commands.append(cmd)
# Add the custom build action for one input file.
description = ', and also '.join(descriptions)
# We can't join the commands simply with && because the command line will
# get too long. See also _AddActions: cygwin's setup_env mustn't be called
# for every invocation or the command that sets the PATH will grow too
# long.
command = '\r\n'.join([c + '\r\nif %errorlevel% neq 0 exit /b %errorlevel%'
for c in commands])
_AddMSBuildAction(spec,
primary_input,
inputs,
outputs,
command,
description,
sources_handled_by_action,
actions_spec)
return actions_spec, sources_handled_by_action
def _AddMSBuildAction(spec, primary_input, inputs, outputs, cmd, description,
sources_handled_by_action, actions_spec):
command = MSVSSettings.ConvertVCMacrosToMSBuild(cmd)
primary_input = _FixPath(primary_input)
inputs_array = _FixPaths(inputs)
outputs_array = _FixPaths(outputs)
additional_inputs = ';'.join([i for i in inputs_array
if i != primary_input])
outputs = ';'.join(outputs_array)
sources_handled_by_action.add(primary_input)
action_spec = ['CustomBuild', {'Include': primary_input}]
action_spec.extend(
# TODO(jeanluc) 'Document' for all or just if as_sources?
[['FileType', 'Document'],
['Command', command],
['Message', description],
['Outputs', outputs]
])
if additional_inputs:
action_spec.append(['AdditionalInputs', additional_inputs])
actions_spec.append(action_spec)
| mit |
sri85/pyfakefs | fake_filesystem_shutil_test.py | 23 | 12406 | #! /usr/bin/env python
#
# Copyright 2009 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Unittest for fake_filesystem_shutil."""
import stat
import time
import sys
if sys.version_info < (2, 7):
import unittest2 as unittest
else:
import unittest
import fake_filesystem
import fake_filesystem_shutil
class FakeShutilModuleTest(unittest.TestCase):
def setUp(self):
self.filesystem = fake_filesystem.FakeFilesystem(path_separator='/')
self.shutil = fake_filesystem_shutil.FakeShutilModule(self.filesystem)
def testRmtree(self):
directory = 'xyzzy'
self.filesystem.CreateDirectory(directory)
self.filesystem.CreateDirectory('%s/subdir' % directory)
self.filesystem.CreateFile('%s/subfile' % directory)
self.assertTrue(self.filesystem.Exists(directory))
self.shutil.rmtree(directory)
self.assertFalse(self.filesystem.Exists(directory))
def testCopy(self):
src_file = 'xyzzy'
dst_file = 'xyzzy_copy'
src_obj = self.filesystem.CreateFile(src_file)
src_obj.st_mode = ((src_obj.st_mode & ~0o7777) | 0o750)
self.assertTrue(self.filesystem.Exists(src_file))
self.assertFalse(self.filesystem.Exists(dst_file))
self.shutil.copy(src_file, dst_file)
self.assertTrue(self.filesystem.Exists(dst_file))
dst_obj = self.filesystem.GetObject(dst_file)
self.assertEqual(src_obj.st_mode, dst_obj.st_mode)
def testCopyDirectory(self):
src_file = 'xyzzy'
parent_directory = 'parent'
dst_file = '%s/%s' % (parent_directory, src_file)
src_obj = self.filesystem.CreateFile(src_file)
self.filesystem.CreateDirectory(parent_directory)
src_obj.st_mode = ((src_obj.st_mode & ~0o7777) | 0o750)
self.assertTrue(self.filesystem.Exists(src_file))
self.assertTrue(self.filesystem.Exists(parent_directory))
self.assertFalse(self.filesystem.Exists(dst_file))
self.shutil.copy(src_file, parent_directory)
self.assertTrue(self.filesystem.Exists(dst_file))
dst_obj = self.filesystem.GetObject(dst_file)
self.assertEqual(src_obj.st_mode, dst_obj.st_mode)
def testCopystat(self):
src_file = 'xyzzy'
dst_file = 'xyzzy_copy'
src_obj = self.filesystem.CreateFile(src_file)
dst_obj = self.filesystem.CreateFile(dst_file)
src_obj.st_mode = ((src_obj.st_mode & ~0o7777) | 0o750)
src_obj.st_uid = 123
src_obj.st_gid = 123
src_obj.st_atime = time.time()
src_obj.st_mtime = time.time()
self.assertTrue(self.filesystem.Exists(src_file))
self.assertTrue(self.filesystem.Exists(dst_file))
self.shutil.copystat(src_file, dst_file)
self.assertEqual(src_obj.st_mode, dst_obj.st_mode)
self.assertEqual(src_obj.st_uid, dst_obj.st_uid)
self.assertEqual(src_obj.st_gid, dst_obj.st_gid)
self.assertEqual(src_obj.st_atime, dst_obj.st_atime)
self.assertEqual(src_obj.st_mtime, dst_obj.st_mtime)
def testCopy2(self):
src_file = 'xyzzy'
dst_file = 'xyzzy_copy'
src_obj = self.filesystem.CreateFile(src_file)
src_obj.st_mode = ((src_obj.st_mode & ~0o7777) | 0o750)
src_obj.st_uid = 123
src_obj.st_gid = 123
src_obj.st_atime = time.time()
src_obj.st_mtime = time.time()
self.assertTrue(self.filesystem.Exists(src_file))
self.assertFalse(self.filesystem.Exists(dst_file))
self.shutil.copy2(src_file, dst_file)
self.assertTrue(self.filesystem.Exists(dst_file))
dst_obj = self.filesystem.GetObject(dst_file)
self.assertEqual(src_obj.st_mode, dst_obj.st_mode)
self.assertEqual(src_obj.st_uid, dst_obj.st_uid)
self.assertEqual(src_obj.st_gid, dst_obj.st_gid)
self.assertEqual(src_obj.st_atime, dst_obj.st_atime)
self.assertEqual(src_obj.st_mtime, dst_obj.st_mtime)
def testCopy2Directory(self):
src_file = 'xyzzy'
parent_directory = 'parent'
dst_file = '%s/%s' % (parent_directory, src_file)
src_obj = self.filesystem.CreateFile(src_file)
self.filesystem.CreateDirectory(parent_directory)
src_obj.st_mode = ((src_obj.st_mode & ~0o7777) | 0o750)
src_obj.st_uid = 123
src_obj.st_gid = 123
src_obj.st_atime = time.time()
src_obj.st_mtime = time.time()
self.assertTrue(self.filesystem.Exists(src_file))
self.assertTrue(self.filesystem.Exists(parent_directory))
self.assertFalse(self.filesystem.Exists(dst_file))
self.shutil.copy2(src_file, parent_directory)
self.assertTrue(self.filesystem.Exists(dst_file))
dst_obj = self.filesystem.GetObject(dst_file)
self.assertEqual(src_obj.st_mode, dst_obj.st_mode)
self.assertEqual(src_obj.st_uid, dst_obj.st_uid)
self.assertEqual(src_obj.st_gid, dst_obj.st_gid)
self.assertEqual(src_obj.st_atime, dst_obj.st_atime)
self.assertEqual(src_obj.st_mtime, dst_obj.st_mtime)
def testCopytree(self):
src_directory = 'xyzzy'
dst_directory = 'xyzzy_copy'
self.filesystem.CreateDirectory(src_directory)
self.filesystem.CreateDirectory('%s/subdir' % src_directory)
self.filesystem.CreateFile('%s/subfile' % src_directory)
self.assertTrue(self.filesystem.Exists(src_directory))
self.assertFalse(self.filesystem.Exists(dst_directory))
self.shutil.copytree(src_directory, dst_directory)
self.assertTrue(self.filesystem.Exists(dst_directory))
self.assertTrue(self.filesystem.Exists('%s/subdir' % dst_directory))
self.assertTrue(self.filesystem.Exists('%s/subfile' % dst_directory))
def testCopytreeSrcIsFile(self):
src_file = 'xyzzy'
dst_directory = 'xyzzy_copy'
self.filesystem.CreateFile(src_file)
self.assertTrue(self.filesystem.Exists(src_file))
self.assertFalse(self.filesystem.Exists(dst_directory))
self.assertRaises(OSError,
self.shutil.copytree,
src_file,
dst_directory)
def testMoveFile(self):
src_file = 'original_xyzzy'
dst_file = 'moved_xyzzy'
self.filesystem.CreateFile(src_file)
self.assertTrue(self.filesystem.Exists(src_file))
self.assertFalse(self.filesystem.Exists(dst_file))
self.shutil.move(src_file, dst_file)
self.assertTrue(self.filesystem.Exists(dst_file))
self.assertFalse(self.filesystem.Exists(src_file))
def testMoveFileIntoDirectory(self):
src_file = 'xyzzy'
dst_directory = 'directory'
dst_file = '%s/%s' % (dst_directory, src_file)
self.filesystem.CreateFile(src_file)
self.filesystem.CreateDirectory(dst_directory)
self.assertTrue(self.filesystem.Exists(src_file))
self.assertFalse(self.filesystem.Exists(dst_file))
self.shutil.move(src_file, dst_directory)
self.assertTrue(self.filesystem.Exists(dst_file))
self.assertFalse(self.filesystem.Exists(src_file))
def testMoveDirectory(self):
src_directory = 'original_xyzzy'
dst_directory = 'moved_xyzzy'
self.filesystem.CreateDirectory(src_directory)
self.filesystem.CreateFile('%s/subfile' % src_directory)
self.filesystem.CreateDirectory('%s/subdir' % src_directory)
self.assertTrue(self.filesystem.Exists(src_directory))
self.assertFalse(self.filesystem.Exists(dst_directory))
self.shutil.move(src_directory, dst_directory)
self.assertTrue(self.filesystem.Exists(dst_directory))
self.assertTrue(self.filesystem.Exists('%s/subfile' % dst_directory))
self.assertTrue(self.filesystem.Exists('%s/subdir' % dst_directory))
self.assertFalse(self.filesystem.Exists(src_directory))
class CopyFileTest(unittest.TestCase):
def setUp(self):
self.filesystem = fake_filesystem.FakeFilesystem(path_separator='/')
self.shutil = fake_filesystem_shutil.FakeShutilModule(self.filesystem)
def testCommonCase(self):
src_file = 'xyzzy'
dst_file = 'xyzzy_copy'
contents = 'contents of file'
self.filesystem.CreateFile(src_file, contents=contents)
self.assertTrue(self.filesystem.Exists(src_file))
self.assertFalse(self.filesystem.Exists(dst_file))
self.shutil.copyfile(src_file, dst_file)
self.assertTrue(self.filesystem.Exists(dst_file))
self.assertEqual(contents, self.filesystem.GetObject(dst_file).contents)
def testRaisesIfSourceAndDestAreTheSameFile(self):
src_file = 'xyzzy'
dst_file = src_file
contents = 'contents of file'
self.filesystem.CreateFile(src_file, contents=contents)
self.assertTrue(self.filesystem.Exists(src_file))
self.assertRaises(self.shutil.Error,
self.shutil.copyfile, src_file, dst_file)
def testRaisesIfDestIsASymlinkToSrc(self):
src_file = '/tmp/foo'
dst_file = '/tmp/bar'
contents = 'contents of file'
self.filesystem.CreateFile(src_file, contents=contents)
self.filesystem.CreateLink(dst_file, src_file)
self.assertTrue(self.filesystem.Exists(src_file))
self.assertRaises(self.shutil.Error,
self.shutil.copyfile, src_file, dst_file)
def testSucceedsIfDestExistsAndIsWritable(self):
src_file = 'xyzzy'
dst_file = 'xyzzy_copy'
src_contents = 'contents of source file'
dst_contents = 'contents of dest file'
self.filesystem.CreateFile(src_file, contents=src_contents)
self.filesystem.CreateFile(dst_file, contents=dst_contents)
self.assertTrue(self.filesystem.Exists(src_file))
self.assertTrue(self.filesystem.Exists(dst_file))
self.shutil.copyfile(src_file, dst_file)
self.assertTrue(self.filesystem.Exists(dst_file))
self.assertEqual(src_contents,
self.filesystem.GetObject(dst_file).contents)
def testRaisesIfDestExistsAndIsNotWritable(self):
src_file = 'xyzzy'
dst_file = 'xyzzy_copy'
src_contents = 'contents of source file'
dst_contents = 'contents of dest file'
self.filesystem.CreateFile(src_file, contents=src_contents)
self.filesystem.CreateFile(dst_file,
st_mode=stat.S_IFREG | 0o400,
contents=dst_contents)
self.assertTrue(self.filesystem.Exists(src_file))
self.assertTrue(self.filesystem.Exists(dst_file))
self.assertRaises(IOError, self.shutil.copyfile, src_file, dst_file)
def testRaisesIfDestDirIsNotWritable(self):
src_file = 'xyzzy'
dst_dir = '/tmp/foo'
dst_file = '%s/%s' % (dst_dir, src_file)
src_contents = 'contents of source file'
self.filesystem.CreateFile(src_file, contents=src_contents)
self.filesystem.CreateDirectory(dst_dir, perm_bits=0o555)
self.assertTrue(self.filesystem.Exists(src_file))
self.assertTrue(self.filesystem.Exists(dst_dir))
self.assertRaises(IOError, self.shutil.copyfile, src_file, dst_file)
def testRaisesIfSrcDoesntExist(self):
src_file = 'xyzzy'
dst_file = 'xyzzy_copy'
self.assertFalse(self.filesystem.Exists(src_file))
self.assertRaises(IOError, self.shutil.copyfile, src_file, dst_file)
def testRaisesIfSrcNotReadable(self):
src_file = 'xyzzy'
dst_file = 'xyzzy_copy'
src_contents = 'contents of source file'
self.filesystem.CreateFile(src_file,
st_mode=stat.S_IFREG | 0o000,
contents=src_contents)
self.assertTrue(self.filesystem.Exists(src_file))
self.assertRaises(IOError, self.shutil.copyfile, src_file, dst_file)
def testRaisesIfSrcIsADirectory(self):
src_file = 'xyzzy'
dst_file = 'xyzzy_copy'
self.filesystem.CreateDirectory(src_file)
self.assertTrue(self.filesystem.Exists(src_file))
self.assertRaises(IOError, self.shutil.copyfile, src_file, dst_file)
def testRaisesIfDestIsADirectory(self):
src_file = 'xyzzy'
dst_dir = '/tmp/foo'
src_contents = 'contents of source file'
self.filesystem.CreateFile(src_file, contents=src_contents)
self.filesystem.CreateDirectory(dst_dir)
self.assertTrue(self.filesystem.Exists(src_file))
self.assertTrue(self.filesystem.Exists(dst_dir))
self.assertRaises(IOError, self.shutil.copyfile, src_file, dst_dir)
if __name__ == '__main__':
unittest.main()
| apache-2.0 |
jhonatanoliveira/pgmpy | pgmpy/models/ClusterGraph.py | 4 | 11908 | #!/usr/bin/env python3
from collections import defaultdict
import numpy as np
from pgmpy.base import UndirectedGraph
from pgmpy.factors.discrete import factor_product
from pgmpy.extern.six.moves import filter, range, zip
class ClusterGraph(UndirectedGraph):
r"""
Base class for representing Cluster Graph.
Cluster graph is an undirected graph which is associated with a subset of variables. The graph contains undirected
edges that connects clusters whose scopes have a non-empty intersection.
Formally, a cluster graph is :math:`\mathcal{U}` for a set of factors :math:`\Phi` over :math:`\mathcal{X}` is an
undirected graph, each of whose nodes :math:`i` is associated with a subset :math:`C_i \subseteq X`. A cluster
graph must be family-preserving - each factor :math:`\phi \in \Phi` must be associated with a cluster C, denoted
:math:`\alpha(\phi)`, such that :math:`Scope[\phi] \subseteq C_i`. Each edge between a pair of clusters :math:`C_i`
and :math:`C_j` is associated with a sepset :math:`S_{i,j} \subseteq C_i \cap C_j`.
Parameters
----------
data: input graph
Data to initialize graph. If data=None (default) an empty graph is created. The data is an edge list
Examples
--------
Create an empty ClusterGraph with no nodes and no edges
>>> from pgmpy.models import ClusterGraph
>>> G = ClusterGraph()
G can be grown by adding clique nodes.
**Nodes:**
Add a tuple (or list or set) of nodes as single clique node.
>>> G.add_node(('a', 'b', 'c'))
>>> G.add_nodes_from([('a', 'b'), ('a', 'b', 'c')])
**Edges:**
G can also be grown by adding edges.
>>> G.add_edge(('a', 'b', 'c'), ('a', 'b'))
or a list of edges
>>> G.add_edges_from([(('a', 'b', 'c'), ('a', 'b')),
... (('a', 'b', 'c'), ('a', 'c'))])
"""
def __init__(self, ebunch=None):
super(ClusterGraph, self).__init__()
if ebunch:
self.add_edges_from(ebunch)
self.factors = []
def add_node(self, node, **kwargs):
"""
Add a single node to the cluster graph.
Parameters
----------
node: node
A node should be a collection of nodes forming a clique. It can be
a list, set or tuple of nodes
Examples
--------
>>> from pgmpy.models import ClusterGraph
>>> G = ClusterGraph()
>>> G.add_node(('a', 'b', 'c'))
"""
if not isinstance(node, (list, set, tuple)):
raise TypeError('Node can only be a list, set or tuple of nodes forming a clique')
node = tuple(node)
super(ClusterGraph, self).add_node(node, **kwargs)
def add_nodes_from(self, nodes, **kwargs):
"""
Add multiple nodes to the cluster graph.
Parameters
----------
nodes: iterable container
A container of nodes (list, dict, set, etc.).
Examples
--------
>>> from pgmpy.models import ClusterGraph
>>> G = ClusterGraph()
>>> G.add_nodes_from([('a', 'b'), ('a', 'b', 'c')])
"""
for node in nodes:
self.add_node(node, **kwargs)
def add_edge(self, u, v, **kwargs):
"""
Add an edge between two clique nodes.
Parameters
----------
u, v: nodes
Nodes can be any list or set or tuple of nodes forming a clique.
Examples
--------
>>> from pgmpy.models import ClusterGraph
>>> G = ClusterGraph()
>>> G.add_nodes_from([('a', 'b', 'c'), ('a', 'b'), ('a', 'c')])
>>> G.add_edges_from([(('a', 'b', 'c'), ('a', 'b')),
... (('a', 'b', 'c'), ('a', 'c'))])
"""
set_u = set(u)
set_v = set(v)
if set_u.isdisjoint(set_v):
raise ValueError('No sepset found between these two edges.')
super(ClusterGraph, self).add_edge(u, v)
def add_factors(self, *factors):
"""
Associate a factor to the graph.
See factors class for the order of potential values
Parameters
----------
*factor: pgmpy.factors.factors object
A factor object on any subset of the variables of the model which
is to be associated with the model.
Returns
-------
None
Examples
--------
>>> from pgmpy.models import ClusterGraph
>>> from pgmpy.factors.discrete import DiscreteFactor
>>> student = ClusterGraph()
>>> student.add_node(('Alice', 'Bob'))
>>> factor = DiscreteFactor(['Alice', 'Bob'], cardinality=[3, 2],
... values=np.random.rand(6))
>>> student.add_factors(factor)
"""
for factor in factors:
factor_scope = set(factor.scope())
nodes = [set(node) for node in self.nodes()]
if factor_scope not in nodes:
raise ValueError('Factors defined on clusters of variable not'
'present in model')
self.factors.append(factor)
def get_factors(self, node=None):
"""
Return the factors that have been added till now to the graph.
If node is not None, it would return the factor corresponding to the
given node.
Examples
--------
>>> from pgmpy.models import ClusterGraph
>>> from pgmpy.factors.discrete import DiscreteFactor
>>> G = ClusterGraph()
>>> G.add_nodes_from([('a', 'b', 'c'), ('a', 'b'), ('a', 'c')])
>>> G.add_edges_from([(('a', 'b', 'c'), ('a', 'b')),
... (('a', 'b', 'c'), ('a', 'c'))])
>>> phi1 = DiscreteFactor(['a', 'b', 'c'], [2, 2, 2], np.random.rand(8))
>>> phi2 = DiscreteFactor(['a', 'b'], [2, 2], np.random.rand(4))
>>> phi3 = DiscreteFactor(['a', 'c'], [2, 2], np.random.rand(4))
>>> G.add_factors(phi1, phi2, phi3)
>>> G.get_factors()
>>> G.get_factors(node=('a', 'b', 'c'))
"""
if node is None:
return self.factors
else:
nodes = [set(n) for n in self.nodes()]
if set(node) not in nodes:
raise ValueError('Node not present in Cluster Graph')
factors = filter(lambda x: set(x.scope()) == set(node), self.factors)
return next(factors)
def remove_factors(self, *factors):
"""
Removes the given factors from the added factors.
Examples
--------
>>> from pgmpy.models import ClusterGraph
>>> from pgmpy.factors.discrete import DiscreteFactor
>>> student = ClusterGraph()
>>> factor = DiscreteFactor(['Alice', 'Bob'], cardinality=[2, 2],
... value=np.random.rand(4))
>>> student.add_factors(factor)
>>> student.remove_factors(factor)
"""
for factor in factors:
self.factors.remove(factor)
def get_cardinality(self, check_cardinality=False):
"""
Returns a dictionary with the given factors as keys and their respective
cardinality as values.
Parameters
----------
check_cardinality: boolean, optional
If, check_cardinality=True it checks if cardinality information
for all the variables is availble or not. If not it raises an error.
Examples
--------
>>> from pgmpy.models import ClusterGraph
>>> from pgmpy.factors.discrete import DiscreteFactor
>>> student = ClusterGraph()
>>> factor = DiscreteFactor(['Alice', 'Bob'], cardinality=[2, 2],
... values=np.random.rand(4))
>>> student.add_node(('Alice', 'Bob'))
>>> student.add_factors(factor)
>>> student.get_cardinality()
defaultdict(<class 'int'>, {'Bob': 2, 'Alice': 2})
"""
cardinalities = defaultdict(int)
for factor in self.factors:
for variable, cardinality in zip(factor.scope(), factor.cardinality):
cardinalities[variable] = cardinality
if check_cardinality and len(set((x for clique in self.nodes() for x in clique))) != len(cardinalities):
raise ValueError('Factors for all the variables not defined.')
return cardinalities
def get_partition_function(self):
r"""
Returns the partition function for a given undirected graph.
A partition function is defined as
.. math:: \sum_{X}(\prod_{i=1}^{m} \phi_i)
where m is the number of factors present in the graph
and X are all the random variables present.
Examples
--------
>>> from pgmpy.models import ClusterGraph
>>> from pgmpy.factors.discrete import DiscreteFactor
>>> G = ClusterGraph()
>>> G.add_nodes_from([('a', 'b', 'c'), ('a', 'b'), ('a', 'c')])
>>> G.add_edges_from([(('a', 'b', 'c'), ('a', 'b')),
... (('a', 'b', 'c'), ('a', 'c'))])
>>> phi1 = DiscreteFactor(['a', 'b', 'c'], [2, 2, 2], np.random.rand(8))
>>> phi2 = DiscreteFactor(['a', 'b'], [2, 2], np.random.rand(4))
>>> phi3 = DiscreteFactor(['a', 'c'], [2, 2], np.random.rand(4))
>>> G.add_factors(phi1, phi2, phi3)
>>> G.get_partition_function()
"""
if self.check_model():
factor = self.factors[0]
factor = factor_product(factor, *[self.factors[i] for i in range(1, len(self.factors))])
return np.sum(factor.values)
def check_model(self):
"""
Check the model for various errors. This method checks for the following
errors.
* Checks if factors are defined for all the cliques or not.
* Check for running intersection property is not done explicitly over
here as it done in the add_edges method.
* Check if cardinality of random variable remains same across all the
factors.
Returns
-------
check: boolean
True if all the checks are passed
"""
for clique in self.nodes():
factors = filter(lambda x: set(x.scope()) == set(clique), self.factors)
if not any(factors):
raise ValueError('Factors for all the cliques or clusters not defined.')
cardinalities = self.get_cardinality()
for factor in self.factors:
for variable, cardinality in zip(factor.scope(), factor.cardinality):
if (cardinalities[variable] != cardinality):
raise ValueError(
'Cardinality of variable {var} not matching among factors'.format(var=variable))
return True
def copy(self):
"""
Returns a copy of ClusterGraph.
Returns
-------
ClusterGraph: copy of ClusterGraph
Examples
-------
>>> from pgmpy.factors.discrete import DiscreteFactor
>>> G = ClusterGraph()
>>> G.add_nodes_from([('a', 'b'), ('b', 'c')])
>>> G.add_edge(('a', 'b'), ('b', 'c'))
>>> phi1 = DiscreteFactor(['a', 'b'], [2, 2], np.random.rand(4))
>>> phi2 = DiscreteFactor(['b', 'c'], [2, 2], np.random.rand(4))
>>> G.add_factors(phi1, phi2)
>>> graph_copy = G.copy()
>>> graph_copy.factors
[<DiscreteFactor representing phi(a:2, b:2) at 0xb71b19cc>, <DiscreteFactor representing phi(b:2, c:2) at 0xb4eaf3ac>]
>>> graph_copy.edges()
[(('a', 'b'), ('b', 'c'))]
>>> graph_copy.nodes()
[('a', 'b'), ('b', 'c')]
"""
copy = ClusterGraph(self.edges())
if self.factors:
factors_copy = [factor.copy() for factor in self.factors]
copy.add_factors(*factors_copy)
return copy
| mit |
PatrickKennedy/Sybil | docutils/parsers/rst/directives/admonitions.py | 2 | 2189 | # $Id: admonitions.py 4667 2006-07-12 21:40:56Z wiemann $
# Author: David Goodger <goodger@python.org>
# Copyright: This module has been placed in the public domain.
"""
Admonition directives.
"""
__docformat__ = 'reStructuredText'
from docutils.parsers.rst import Directive
from docutils.parsers.rst import states, directives
from docutils import nodes
class BaseAdmonition(Directive):
required_arguments = 0
optional_arguments = 0
final_argument_whitespace = True
option_spec = {}
has_content = True
node_class = None
"""Subclasses must set this to the appropriate admonition node class."""
def run(self):
self.assert_has_content()
text = '\n'.join(self.content)
admonition_node = self.node_class(text)
if self.arguments:
title_text = self.arguments[0]
textnodes, messages = self.state.inline_text(title_text,
self.lineno)
admonition_node += nodes.title(title_text, '', *textnodes)
admonition_node += messages
if self.options.has_key('class'):
classes = self.options['class']
else:
classes = ['admonition-' + nodes.make_id(title_text)]
admonition_node['classes'] += classes
self.state.nested_parse(self.content, self.content_offset,
admonition_node)
return [admonition_node]
class Admonition(BaseAdmonition):
required_arguments = 1
option_spec = {'class': directives.class_option}
node_class = nodes.admonition
class Attention(BaseAdmonition):
node_class = nodes.attention
class Caution(BaseAdmonition):
node_class = nodes.caution
class Danger(BaseAdmonition):
node_class = nodes.danger
class Error(BaseAdmonition):
node_class = nodes.error
class Hint(BaseAdmonition):
node_class = nodes.hint
class Important(BaseAdmonition):
node_class = nodes.important
class Note(BaseAdmonition):
node_class = nodes.note
class Tip(BaseAdmonition):
node_class = nodes.tip
class Warning(BaseAdmonition):
node_class = nodes.warning
| bsd-2-clause |
siddartha1992/cloud-custodian | tests/test_iam.py | 1 | 26717 | # Copyright 2016 Capital One Services, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import, division, print_function, unicode_literals
import json
import datetime
import os
import tempfile
from unittest import TestCase
from .common import load_data, BaseTest
from .test_offhours import mock_datetime_now
from dateutil import parser
from c7n.filters.iamaccess import check_cross_account, CrossAccountAccessFilter
from c7n.mu import LambdaManager, LambdaFunction, PythonPackageArchive
from c7n.resources.sns import SNS
from c7n.resources.iam import (
UserMfaDevice,
UsedIamPolicies, UnusedIamPolicies,
UsedInstanceProfiles,
UnusedInstanceProfiles,
UsedIamRole, UnusedIamRole,
IamGroupUsers, UserPolicy, GroupMembership,
UserCredentialReport, UserAccessKey,
IamRoleInlinePolicy, IamGroupInlinePolicy,
SpecificIamRoleManagedPolicy, NoSpecificIamRoleManagedPolicy)
from c7n.executor import MainThreadExecutor
class UserCredentialReportTest(BaseTest):
def test_credential_report_generatpoe(self):
session_factory = self.replay_flight_data('test_iam_user_unused_keys')
p = self.load_policy({
'name': 'user-access-unused-keys',
'resource': 'iam-user',
'filters': [
{'type': 'credential',
'key': 'access_keys.last_used_date',
'report_delay': 0.01,
'value': 'empty'}
],
}, session_factory=session_factory, cache=True)
resources = p.run()
self.assertEqual(len(resources), 4)
self.assertEqual(
sorted([r['UserName'] for r in resources]),
['Hazmat', 'charmworld', 'kaleb', 'kapilt'])
def test_access_key_last_service(self):
# Note we're reusing the old console users flight records
session_factory = self.replay_flight_data('test_iam_user_console_old')
p = self.load_policy({
'name': 'user-access-iam',
'resource': 'iam-user',
'filters': [
{'type': 'credential',
'report_max_age': 86400 * 7,
'key': 'access_keys.last_used_service',
'value': 'iam'}
],
}, session_factory=session_factory, cache=True)
with mock_datetime_now(
parser.parse('2016-11-25T20:27:00+00:00'), datetime):
resources = p.run()
self.assertEqual(len(resources), 1)
self.assertEqual(
sorted([r['UserName'] for r in resources]),
['kapil'])
def test_old_console_users(self):
session_factory = self.replay_flight_data('test_iam_user_console_old')
p = self.load_policy({
'name': 'old-console-only-users',
'resource': 'iam-user',
'filters': [
{'type': 'credential',
'report_delay': 0.01,
'key': 'access_keys',
'value': 'absent'},
{'type': 'credential',
'key': 'password_last_used',
'value_type': 'age',
'value': 30,
'op': 'greater-than'}
],
}, session_factory=session_factory, cache=True)
with mock_datetime_now(
parser.parse('2016-11-25T20:27:00+00:00'), datetime):
resources = p.run()
self.assertEqual(len(resources), 3)
self.assertEqual(
sorted([r['UserName'] for r in resources]),
['anthony', 'chrissy', 'matt'])
def test_record_transform(self):
info = {'access_key_2_active': 'false',
'password_next_rotation': '2017-01-24T13:15:33+00:00',
'access_key_2_last_rotated': 'N/A',
'mfa_active': 'true',
'cert_1_active': 'false',
'cert_1_last_rotated': 'N/A',
'access_key_1_last_used_date': 'N/A',
'arn': 'arn:aws:iam::644160558196:user/anthony',
'cert_2_active': 'false',
'password_enabled': 'true',
'access_key_2_last_used_region': 'N/A',
'password_last_changed': '2016-10-26T13:15:33+00:00',
'access_key_1_last_rotated': 'N/A',
'user_creation_time': '2016-10-06T16:11:27+00:00',
'access_key_1_last_used_service': 'N/A',
'user': 'anthony',
'password_last_used': '2016-10-26T13:14:37+00:00',
'cert_2_last_rotated': 'N/A',
'access_key_2_last_used_date': 'N/A',
'access_key_2_last_used_service': 'N/A',
'access_key_1_last_used_region': 'N/A',
'access_key_1_active': 'false'}
credential = UserCredentialReport({}, None)
credential.process_user_record(info)
self.assertEqual(
info,
{
'arn': 'arn:aws:iam::644160558196:user/anthony',
'mfa_active': True,
'password_enabled': True,
'password_last_changed': '2016-10-26T13:15:33+00:00',
'password_last_used': '2016-10-26T13:14:37+00:00',
'password_next_rotation': '2017-01-24T13:15:33+00:00',
'user': 'anthony',
'user_creation_time': '2016-10-06T16:11:27+00:00'})
class IAMMFAFilter(BaseTest):
def test_iam_mfa_filter(self):
self.patch(
UserMfaDevice, 'executor_factory', MainThreadExecutor)
session_factory = self.replay_flight_data('test_iam_mfa_filter')
p = self.load_policy({
'name': 'iam-mfa',
'resource': 'iam-user',
'filters': [
{'type': 'mfa-device',
'value': []}]}, session_factory=session_factory)
resources = p.run()
self.assertEqual(len(resources), 2)
class IamRoleFilterUsage(BaseTest):
def test_iam_role_inuse(self):
session_factory = self.replay_flight_data('test_iam_role_inuse')
self.patch(
UsedIamRole, 'executor_factory', MainThreadExecutor)
p = self.load_policy({
'name': 'iam-inuse-role',
'resource': 'iam-role',
'filters': ['used']}, session_factory=session_factory)
resources = p.run()
self.assertEqual(len(resources), 3)
def test_iam_role_unused(self):
session_factory = self.replay_flight_data('test_iam_role_unused')
self.patch(
UnusedIamRole, 'executor_factory', MainThreadExecutor)
p = self.load_policy({
'name': 'iam-inuse-role',
'resource': 'iam-role',
'filters': ['unused']}, session_factory=session_factory)
resources = p.run()
self.assertEqual(len(resources), 6)
class IamUserFilterUsage(BaseTest):
def test_iam_user_policy(self):
session_factory = self.replay_flight_data(
'test_iam_user_admin_policy')
self.patch(
UserPolicy, 'executor_factory', MainThreadExecutor)
p = self.load_policy({
'name': 'iam-user-policy',
'resource': 'iam-user',
'filters': [{
'type': 'policy',
'key': 'PolicyName',
'value': 'AdministratorAccess'}]},
session_factory=session_factory)
resources = p.run()
self.assertEqual(resources[0]['UserName'], 'alphabet_soup')
def test_iam_user_access_key_filter(self):
session_factory = self.replay_flight_data(
'test_iam_user_access_key_active')
self.patch(
UserAccessKey, 'executor_factory', MainThreadExecutor)
p = self.load_policy({
'name': 'iam-user-with-key',
'resource': 'iam-user',
'filters': [{
'type': 'access-key',
'key': 'Status',
'value': 'Active'}]}, session_factory=session_factory)
resources = p.run()
self.assertEqual(len(resources), 1)
self.assertEqual(resources[0]['UserName'], 'alphabet_soup')
class IamUserGroupMembership(BaseTest):
def test_iam_user_group_membership(self):
session_factory = self.replay_flight_data(
'test_iam_user_group_membership')
self.patch(
GroupMembership, 'executor_factory', MainThreadExecutor)
p = self.load_policy({
'name': 'iam-admin-users',
'resource': 'iam-user',
'filters': [{
'type': 'group',
'key': 'GroupName',
'value': 'QATester'}]},
session_factory=session_factory)
resources = p.run()
self.assertEqual(len(resources), 1)
self.assertEqual(resources[0]['UserName'], 'kapil')
self.assertTrue(resources[0]['c7n:Groups'])
class IamInstanceProfileFilterUsage(BaseTest):
def test_iam_instance_profile_inuse(self):
session_factory = self.replay_flight_data(
'test_iam_instance_profile_inuse')
self.patch(
UsedInstanceProfiles, 'executor_factory', MainThreadExecutor)
p = self.load_policy({
'name': 'iam-inuse-profiles',
'resource': 'iam-profile',
'filters': ['used']}, session_factory=session_factory)
resources = p.run()
self.assertEqual(len(resources), 1)
def test_iam_instance_profile_unused(self):
session_factory = self.replay_flight_data(
'test_iam_instance_profile_unused')
self.patch(
UnusedInstanceProfiles, 'executor_factory', MainThreadExecutor)
p = self.load_policy({
'name': 'iam-unused-profiles',
'resource': 'iam-profile',
'filters': ['unused']}, session_factory=session_factory)
resources = p.run()
self.assertEqual(len(resources), 2)
class IamPolicyFilterUsage(BaseTest):
def test_iam_policy_get_resources(self):
session_factory = self.replay_flight_data('test_iam_policy_get_resource')
p = self.load_policy({
'name': 'iam-attached-profiles',
'resource': 'iam-policy'}, session_factory=session_factory)
resources = p.resource_manager.get_resources(
['arn:aws:iam::aws:policy/AWSHealthFullAccess'])
self.assertEqual(len(resources), 1)
self.assertEqual(resources[0]['PolicyName'], 'AWSHealthFullAccess')
def test_iam_attached_policies(self):
session_factory = self.replay_flight_data('test_iam_policy_attached')
self.patch(
UsedIamPolicies, 'executor_factory', MainThreadExecutor)
p = self.load_policy({
'name': 'iam-attached-profiles',
'resource': 'iam-policy',
'filters': ['used']}, session_factory=session_factory)
resources = p.run()
self.assertEqual(len(resources), 6)
def test_iam_unattached_policies(self):
session_factory = self.replay_flight_data('test_iam_policy_unattached')
self.patch(
UnusedIamPolicies, 'executor_factory', MainThreadExecutor)
p = self.load_policy({
'name': 'iam-attached-profiles',
'resource': 'iam-policy',
'filters': ['unused']}, session_factory=session_factory)
resources = p.run()
self.assertEqual(len(resources), 203)
class IamPolicyHasAllowAll(BaseTest):
def test_iam_has_allow_all_policies(self):
session_factory = self.replay_flight_data('test_iam_policy_allow_all')
self.patch(
UnusedIamPolicies, 'executor_factory', MainThreadExecutor)
p = self.load_policy({
'name': 'iam-has-allow-all',
'resource': 'iam-policy',
'filters': [
{'type': 'value',
'key': 'PolicyName',
'value': 'AdministratorAccess'},
'has-allow-all'
]}, session_factory=session_factory)
resources = p.run()
self.assertEqual(len(resources), 1)
class IamGroupFilterUsage(BaseTest):
def test_iam_group_used_users(self):
session_factory = self.replay_flight_data(
'test_iam_group_used_users')
self.patch(
IamGroupUsers, 'executor_factory', MainThreadExecutor)
p = self.load_policy({
'name': 'iam-group-used',
'resource': 'iam-group',
'filters': [{
'type': 'has-users',
'value': True}]}, session_factory=session_factory)
resources = p.run()
self.assertEqual(len(resources), 2)
def test_iam_group_unused_users(self):
session_factory = self.replay_flight_data(
'test_iam_group_unused_users')
self.patch(
IamGroupUsers, 'executor_factory', MainThreadExecutor)
p = self.load_policy({
'name': 'iam-group-unused',
'resource': 'iam-group',
'filters': [{
'type': 'has-users',
'value': False}]}, session_factory=session_factory)
resources = p.run()
self.assertEqual(len(resources), 1)
class IamManagedPolicyUsage(BaseTest):
def test_iam_role_has_specific_managed_policy(self):
session_factory = self.replay_flight_data(
'test_iam_role_has_specific_managed_policy')
self.patch(
SpecificIamRoleManagedPolicy, 'executor_factory', MainThreadExecutor)
p = self.load_policy({
'name': 'iam-role-with-specific-managed-policy',
'resource': 'iam-role',
'filters': [
{'type': 'has-specific-managed-policy',
'value': 'TestForSpecificMP' }]}, session_factory=session_factory)
resources = p.run()
self.assertEqual(len(resources), 1)
def test_iam_role_no_specific_managed_policy(self):
session_factory = self.replay_flight_data(
'test_iam_role_no_specific_managed_policy')
self.patch(
NoSpecificIamRoleManagedPolicy, 'executor_factory', MainThreadExecutor)
p = self.load_policy({
'name': 'iam-role-no-specific-managed-policy',
'resource': 'iam-role',
'filters': [
{'type': 'no-specific-managed-policy',
'value': 'DoesNotExistPolicy' }]}, session_factory=session_factory)
resources = p.run()
self.assertEqual(len(resources), 7)
class IamInlinePolicyUsage(BaseTest):
def test_iam_role_has_inline_policy(self):
session_factory = self.replay_flight_data(
'test_iam_role_has_inline_policy')
self.patch(
IamRoleInlinePolicy, 'executor_factory', MainThreadExecutor)
p = self.load_policy({
'name': 'iam-role-with-inline-policy',
'resource': 'iam-role',
'filters': [
{'type': 'has-inline-policy',
'value': True}]}, session_factory=session_factory)
resources = p.run()
self.assertEqual(len(resources), 1)
def test_iam_role_no_inline_policy(self):
session_factory = self.replay_flight_data(
'test_iam_role_no_inline_policy')
self.patch(
IamRoleInlinePolicy, 'executor_factory', MainThreadExecutor)
p = self.load_policy({
'name': 'iam-role-without-inline-policy',
'resource': 'iam-role',
'filters': [
{'type': 'has-inline-policy',
'value': False}]},
session_factory=session_factory)
resources = p.run()
self.assertEqual(len(resources), 6)
def test_iam_group_has_inline_policy(self):
session_factory = self.replay_flight_data(
'test_iam_group_has_inline_policy')
self.patch(
IamGroupInlinePolicy, 'executor_factory', MainThreadExecutor)
p = self.load_policy({
'name': 'iam-group-with-inline-policy',
'resource': 'iam-group',
'filters': [{
'type': 'has-inline-policy',
'value': True}]}, session_factory=session_factory)
resources = p.run()
self.assertEqual(len(resources), 1)
def test_iam_group_has_inline_policy2(self):
session_factory = self.replay_flight_data(
'test_iam_group_has_inline_policy')
self.patch(
IamGroupInlinePolicy, 'executor_factory', MainThreadExecutor)
p = self.load_policy({
'name': 'iam-group-with-inline-policy',
'resource': 'iam-group',
'filters': [{
'type': 'has-inline-policy'}]},
session_factory=session_factory)
resources = p.run()
self.assertEqual(len(resources), 1)
def test_iam_group_no_inline_policy(self):
session_factory = self.replay_flight_data(
'test_iam_group_no_inline_policy')
self.patch(
IamGroupInlinePolicy, 'executor_factory', MainThreadExecutor)
p = self.load_policy({
'name': 'iam-group-without-inline-policy',
'resource': 'iam-group',
'filters': [{
'type': 'has-inline-policy',
'value': False}]}, session_factory=session_factory)
resources = p.run()
self.assertEqual(len(resources), 2)
class KMSCrossAccount(BaseTest):
def test_kms_cross_account(self):
self.patch(
CrossAccountAccessFilter, 'executor_factory', MainThreadExecutor)
session_factory = self.replay_flight_data('test_cross_account_kms')
client = session_factory().client('kms')
policy = {
'Id': 'Lulu',
'Version': '2012-10-17',
'Statement': [
{"Sid": "Enable IAM User Permissions",
"Effect": "Allow",
"Principal": {"AWS": "arn:aws:iam::644160558196:root"},
"Action": "kms:*",
"Resource": "*"},
{"Sid": "Enable Cross Account",
"Effect": "Allow",
"Principal": "*",
"Action": "kms:Encrypt",
"Resource": "*"}]
}
key_info = client.create_key(
Policy=json.dumps(policy),
Description='test-cross-account-3')['KeyMetadata']
# disable and schedule deletion
self.addCleanup(
client.schedule_key_deletion,
KeyId=key_info['KeyId'], PendingWindowInDays=7)
self.addCleanup(client.disable_key, KeyId=key_info['KeyId'])
p = self.load_policy(
{'name': 'kms-cross',
'resource': 'kms-key',
'filters': [
{'KeyState': 'Enabled'},
'cross-account']},
session_factory=session_factory)
resources = p.run()
self.assertEqual(len(resources), 1)
self.assertEqual(resources[0]['KeyId'], key_info['KeyId'])
class GlacierCrossAccount(BaseTest):
def test_glacier_cross_account(self):
self.patch(
CrossAccountAccessFilter, 'executor_factory', MainThreadExecutor)
session_factory = self.replay_flight_data('test_cross_account_glacier')
client = session_factory().client('glacier')
name = 'c7n-cross-check'
url = client.create_vault(vaultName=name)['location']
self.addCleanup(client.delete_vault, vaultName=name)
account_id = url.split('/')[1]
arn = "arn:aws:glacier:%s:%s:vaults/%s" % (
os.environ.get('AWS_DEFAULT_REGION', 'us-east-1'),
account_id, name)
policy = {
'Id': 'Foo',
"Version": "2012-10-17",
'Statement': [
{'Action': 'glacier:UploadArchive',
'Resource': arn,
'Effect': 'Allow',
'Principal': '*'}]}
client.set_vault_access_policy(
vaultName=name, policy={'Policy': json.dumps(policy)})
p = self.load_policy(
{'name': 'glacier-cross',
'resource': 'glacier',
'filters': ['cross-account']},
session_factory=session_factory)
resources = p.run()
self.assertEqual(len(resources), 1)
self.assertEqual(resources[0]['VaultName'], name)
LAMBDA_SRC = """\
def handler(event, context):
return {'Success': True}
"""
class LambdaCrossAccount(BaseTest):
role = "arn:aws:iam::644160558196:role/lambda_basic_execution"
def test_lambda_cross_account(self):
self.patch(
CrossAccountAccessFilter, 'executor_factory', MainThreadExecutor)
session_factory = self.replay_flight_data('test_cross_account_lambda')
client = session_factory().client('lambda')
name = 'c7n-cross-check'
tmp_dir = tempfile.mkdtemp()
self.addCleanup(os.rmdir, tmp_dir)
archive = PythonPackageArchive()
archive.add_contents('handler.py', LAMBDA_SRC)
archive.close()
func = LambdaFunction({
'runtime': 'python2.7',
'name': name, 'description': '',
'handler': 'handler.handler',
'memory_size': 128,
'timeout': 5,
'role': self.role}, archive)
manager = LambdaManager(session_factory)
info = manager.publish(func)
self.addCleanup(manager.remove, func)
client.add_permission(
FunctionName=name,
StatementId='oops',
Principal='*',
Action='lambda:InvokeFunction')
p = self.load_policy(
{'name': 'lambda-cross',
'resource': 'lambda',
'filters': ['cross-account']},
session_factory=session_factory)
resources = p.run()
self.assertEqual(len(resources), 1)
self.assertEqual(resources[0]['FunctionName'], name)
class ECRCrossAccount(BaseTest):
def test_ecr_cross_account(self):
session_factory = self.replay_flight_data('test_cross_account_ecr')
client = session_factory().client('ecr')
repo_name = 'c7n/cross-check'
repo = client.create_repository(repositoryName=repo_name)['repository']
self.addCleanup(client.delete_repository, repositoryName=repo_name)
policy = {
'Id': 'Foo',
"Version": "2012-10-17",
'Statement': [
{'Action': 'ecr:BatchGetImage',
'Effect': 'Allow',
'Principal': '*'}]}
client.set_repository_policy(
repositoryName=repo_name, policyText=json.dumps(policy))
p = self.load_policy(
{'name': 'ecr-cross',
'resource': 'ecr',
'filters': ['cross-account']},
session_factory=session_factory)
resources = p.run()
self.assertEqual(len(resources), 1)
self.assertEqual(resources[0]['repositoryName'], repo_name)
class SQSCrossAccount(BaseTest):
def test_sqs_cross_account(self):
session_factory = self.replay_flight_data('test_cross_account_sqs')
client = session_factory().client('sqs')
queue_name = 'c7n-cross-check'
url = client.create_queue(QueueName=queue_name)['QueueUrl']
self.addCleanup(client.delete_queue, QueueUrl=url)
account_id = url.split('/')[3]
arn = "arn:aws:sqs:%s:%s:%s" % (
os.environ.get('AWS_DEFAULT_REGION', 'us-east-1'),
account_id, queue_name)
policy = {
'Id': 'Foo',
"Version": "2012-10-17",
'Statement': [
{'Action': 'SQS:SendMessage',
'Effect': 'Allow',
'Resource': arn,
'Principal': '*'}]}
client.set_queue_attributes(
QueueUrl=url, Attributes={'Policy': json.dumps(policy)})
p = self.load_policy(
{'name': 'sqs-cross',
'resource': 'sqs',
'filters': ['cross-account']},
session_factory=session_factory)
resources = p.run()
self.assertEqual(len(resources), 1)
self.assertEqual(resources[0]['QueueUrl'], url)
class SNSCrossAccount(BaseTest):
def test_sns_cross_account(self):
self.patch(SNS, 'executor_factory', MainThreadExecutor)
session_factory = self.replay_flight_data('test_cross_account_sns')
client = session_factory().client('sns')
topic_name = 'c7n-cross-check'
arn = client.create_topic(Name=topic_name)['TopicArn']
self.addCleanup(client.delete_topic, TopicArn=arn)
policy = {
'Id': 'Foo',
"Version": "2012-10-17",
'Statement': [
{'Action': 'SNS:Publish',
'Effect': 'Allow',
'Resource': arn,
'Principal': '*'}]}
client.set_topic_attributes(
TopicArn=arn, AttributeName='Policy',
AttributeValue=json.dumps(policy))
p = self.load_policy(
{'name': 'sns-cross',
'resource': 'sns',
'filters': ['cross-account']},
session_factory=session_factory)
resources = p.run()
self.assertEqual(len(resources), 1)
self.assertEqual(resources[0]['TopicArn'], arn)
class CrossAccountChecker(TestCase):
def test_not_principal_allowed(self):
policy = {
'Id': 'Foo',
"Version": "2012-10-17",
'Statement': [
{'Action': 'SQS:ReceiveMessage',
'Effect': 'Deny',
'Principal': '*'},
{'Action': 'SQS:SendMessage',
'Effect': 'Allow',
'NotPrincipal': '90120'}]}
self.assertTrue(
bool(check_cross_account(policy, set(['221800032964']))))
def test_sqs_policies(self):
policies = load_data('iam/sqs-policies.json')
for p, expected in zip(
policies, [False, True, True, False,
False, False, False, False]):
violations = check_cross_account(p, set(['221800032964']))
self.assertEqual(bool(violations), expected)
| apache-2.0 |
p4datasystems/CarnotKEdist | dist/Lib/encodings/gb2312.py | 816 | 1027 | #
# gb2312.py: Python Unicode Codec for GB2312
#
# Written by Hye-Shik Chang <perky@FreeBSD.org>
#
import _codecs_cn, codecs
import _multibytecodec as mbc
codec = _codecs_cn.getcodec('gb2312')
class Codec(codecs.Codec):
encode = codec.encode
decode = codec.decode
class IncrementalEncoder(mbc.MultibyteIncrementalEncoder,
codecs.IncrementalEncoder):
codec = codec
class IncrementalDecoder(mbc.MultibyteIncrementalDecoder,
codecs.IncrementalDecoder):
codec = codec
class StreamReader(Codec, mbc.MultibyteStreamReader, codecs.StreamReader):
codec = codec
class StreamWriter(Codec, mbc.MultibyteStreamWriter, codecs.StreamWriter):
codec = codec
def getregentry():
return codecs.CodecInfo(
name='gb2312',
encode=Codec().encode,
decode=Codec().decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
| apache-2.0 |
FabriceSalvaire/sphinx-microdata | setup.py | 1 | 1052 | # -*- coding: utf-8 -*-
from setuptools import setup, find_packages
requires = ['Sphinx>=0.6']
setup(
name='sphinxcontrib-microdata',
version='0.1',
url='http://bitbucket.org/birkenfeld/sphinx-contrib',
download_url='http://pypi.python.org/pypi/sphinxcontrib-microdata',
license='LGPL v3',
author='Fabrice Salvaire',
author_email='fabrice.salvaire@orange.fr',
description='Sphinx "microdata" extension',
long_description=open('README.rst').read(),
zip_safe=False,
classifiers=[
'Development Status :: 4 - Beta',
'Environment :: Console',
'Environment :: Web Environment',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Topic :: Documentation',
'Topic :: Utilities',
],
platforms='any',
packages=find_packages(),
include_package_data=True,
install_requires=requires,
namespace_packages=['sphinxcontrib'],
)
| lgpl-3.0 |
srluge/SickRage | lib/rarfile/__init__.py | 22 | 58060 | # rarfile.py
#
# Copyright (c) 2005-2014 Marko Kreen <markokr@gmail.com>
#
# Permission to use, copy, modify, and/or distribute this software for any
# purpose with or without fee is hereby granted, provided that the above
# copyright notice and this permission notice appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
# OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
r"""RAR archive reader.
This is Python module for Rar archive reading. The interface
is made as :mod:`zipfile`-like as possible.
Basic logic:
- Parse archive structure with Python.
- Extract non-compressed files with Python
- Extract compressed files with unrar.
- Optionally write compressed data to temp file to speed up unrar,
otherwise it needs to scan whole archive on each execution.
Example::
import rarfile
rf = rarfile.RarFile('myarchive.rar')
for f in rf.infolist():
print f.filename, f.file_size
if f.filename == 'README':
print(rf.read(f))
Archive files can also be accessed via file-like object returned
by :meth:`RarFile.open`::
import rarfile
with rarfile.RarFile('archive.rar') as rf:
with rf.open('README') as f:
for ln in f:
print(ln.strip())
There are few module-level parameters to tune behaviour,
here they are with defaults, and reason to change it::
import rarfile
# Set to full path of unrar.exe if it is not in PATH
rarfile.UNRAR_TOOL = "unrar"
# Set to 0 if you don't look at comments and want to
# avoid wasting time for parsing them
rarfile.NEED_COMMENTS = 1
# Set up to 1 if you don't want to deal with decoding comments
# from unknown encoding. rarfile will try couple of common
# encodings in sequence.
rarfile.UNICODE_COMMENTS = 0
# Set to 1 if you prefer timestamps to be datetime objects
# instead tuples
rarfile.USE_DATETIME = 0
# Set to '/' to be more compatible with zipfile
rarfile.PATH_SEP = '\\'
For more details, refer to source.
"""
__version__ = '2.7'
# export only interesting items
__all__ = ['is_rarfile', 'RarInfo', 'RarFile', 'RarExtFile']
##
## Imports and compat - support both Python 2.x and 3.x
##
import sys, os, struct, errno
from struct import pack, unpack
from binascii import crc32
from tempfile import mkstemp
from subprocess import Popen, PIPE, STDOUT
from datetime import datetime
# only needed for encryped headers
try:
from Crypto.Cipher import AES
try:
from hashlib import sha1
except ImportError:
from sha import new as sha1
_have_crypto = 1
except ImportError:
_have_crypto = 0
# compat with 2.x
if sys.hexversion < 0x3000000:
# prefer 3.x behaviour
range = xrange
# py2.6 has broken bytes()
def bytes(s, enc):
return str(s)
else:
unicode = str
# see if compat bytearray() is needed
try:
bytearray
except NameError:
import array
class bytearray:
def __init__(self, val = ''):
self.arr = array.array('B', val)
self.append = self.arr.append
self.__getitem__ = self.arr.__getitem__
self.__len__ = self.arr.__len__
def decode(self, *args):
return self.arr.tostring().decode(*args)
# Optimized .readinto() requires memoryview
try:
memoryview
have_memoryview = 1
except NameError:
have_memoryview = 0
# Struct() for older python
try:
from struct import Struct
except ImportError:
class Struct:
def __init__(self, fmt):
self.format = fmt
self.size = struct.calcsize(fmt)
def unpack(self, buf):
return unpack(self.format, buf)
def unpack_from(self, buf, ofs = 0):
return unpack(self.format, buf[ofs : ofs + self.size])
def pack(self, *args):
return pack(self.format, *args)
# file object superclass
try:
from io import RawIOBase
except ImportError:
class RawIOBase(object):
def close(self):
pass
##
## Module configuration. Can be tuned after importing.
##
#: default fallback charset
DEFAULT_CHARSET = "windows-1252"
#: list of encodings to try, with fallback to DEFAULT_CHARSET if none succeed
TRY_ENCODINGS = ('utf8', 'utf-16le')
#: 'unrar', 'rar' or full path to either one
UNRAR_TOOL = "unrar"
#: Command line args to use for opening file for reading.
OPEN_ARGS = ('p', '-inul')
#: Command line args to use for extracting file to disk.
EXTRACT_ARGS = ('x', '-y', '-idq')
#: args for testrar()
TEST_ARGS = ('t', '-idq')
#
# Allow use of tool that is not compatible with unrar.
#
# By default use 'bsdtar' which is 'tar' program that
# sits on top of libarchive.
#
# Problems with libarchive RAR backend:
# - Does not support solid archives.
# - Does not support password-protected archives.
#
ALT_TOOL = 'bsdtar'
ALT_OPEN_ARGS = ('-x', '--to-stdout', '-f')
ALT_EXTRACT_ARGS = ('-x', '-f')
ALT_TEST_ARGS = ('-t', '-f')
ALT_CHECK_ARGS = ('--help',)
#: whether to speed up decompression by using tmp archive
USE_EXTRACT_HACK = 1
#: limit the filesize for tmp archive usage
HACK_SIZE_LIMIT = 20*1024*1024
#: whether to parse file/archive comments.
NEED_COMMENTS = 1
#: whether to convert comments to unicode strings
UNICODE_COMMENTS = 0
#: Convert RAR time tuple into datetime() object
USE_DATETIME = 0
#: Separator for path name components. RAR internally uses '\\'.
#: Use '/' to be similar with zipfile.
PATH_SEP = '\\'
##
## rar constants
##
# block types
RAR_BLOCK_MARK = 0x72 # r
RAR_BLOCK_MAIN = 0x73 # s
RAR_BLOCK_FILE = 0x74 # t
RAR_BLOCK_OLD_COMMENT = 0x75 # u
RAR_BLOCK_OLD_EXTRA = 0x76 # v
RAR_BLOCK_OLD_SUB = 0x77 # w
RAR_BLOCK_OLD_RECOVERY = 0x78 # x
RAR_BLOCK_OLD_AUTH = 0x79 # y
RAR_BLOCK_SUB = 0x7a # z
RAR_BLOCK_ENDARC = 0x7b # {
# flags for RAR_BLOCK_MAIN
RAR_MAIN_VOLUME = 0x0001
RAR_MAIN_COMMENT = 0x0002
RAR_MAIN_LOCK = 0x0004
RAR_MAIN_SOLID = 0x0008
RAR_MAIN_NEWNUMBERING = 0x0010
RAR_MAIN_AUTH = 0x0020
RAR_MAIN_RECOVERY = 0x0040
RAR_MAIN_PASSWORD = 0x0080
RAR_MAIN_FIRSTVOLUME = 0x0100
RAR_MAIN_ENCRYPTVER = 0x0200
# flags for RAR_BLOCK_FILE
RAR_FILE_SPLIT_BEFORE = 0x0001
RAR_FILE_SPLIT_AFTER = 0x0002
RAR_FILE_PASSWORD = 0x0004
RAR_FILE_COMMENT = 0x0008
RAR_FILE_SOLID = 0x0010
RAR_FILE_DICTMASK = 0x00e0
RAR_FILE_DICT64 = 0x0000
RAR_FILE_DICT128 = 0x0020
RAR_FILE_DICT256 = 0x0040
RAR_FILE_DICT512 = 0x0060
RAR_FILE_DICT1024 = 0x0080
RAR_FILE_DICT2048 = 0x00a0
RAR_FILE_DICT4096 = 0x00c0
RAR_FILE_DIRECTORY = 0x00e0
RAR_FILE_LARGE = 0x0100
RAR_FILE_UNICODE = 0x0200
RAR_FILE_SALT = 0x0400
RAR_FILE_VERSION = 0x0800
RAR_FILE_EXTTIME = 0x1000
RAR_FILE_EXTFLAGS = 0x2000
# flags for RAR_BLOCK_ENDARC
RAR_ENDARC_NEXT_VOLUME = 0x0001
RAR_ENDARC_DATACRC = 0x0002
RAR_ENDARC_REVSPACE = 0x0004
RAR_ENDARC_VOLNR = 0x0008
# flags common to all blocks
RAR_SKIP_IF_UNKNOWN = 0x4000
RAR_LONG_BLOCK = 0x8000
# Host OS types
RAR_OS_MSDOS = 0
RAR_OS_OS2 = 1
RAR_OS_WIN32 = 2
RAR_OS_UNIX = 3
RAR_OS_MACOS = 4
RAR_OS_BEOS = 5
# Compression methods - '0'..'5'
RAR_M0 = 0x30
RAR_M1 = 0x31
RAR_M2 = 0x32
RAR_M3 = 0x33
RAR_M4 = 0x34
RAR_M5 = 0x35
##
## internal constants
##
RAR_ID = bytes("Rar!\x1a\x07\x00", 'ascii')
ZERO = bytes("\0", 'ascii')
EMPTY = bytes("", 'ascii')
S_BLK_HDR = Struct('<HBHH')
S_FILE_HDR = Struct('<LLBLLBBHL')
S_LONG = Struct('<L')
S_SHORT = Struct('<H')
S_BYTE = Struct('<B')
S_COMMENT_HDR = Struct('<HBBH')
##
## Public interface
##
class Error(Exception):
"""Base class for rarfile errors."""
class BadRarFile(Error):
"""Incorrect data in archive."""
class NotRarFile(Error):
"""The file is not RAR archive."""
class BadRarName(Error):
"""Cannot guess multipart name components."""
class NoRarEntry(Error):
"""File not found in RAR"""
class PasswordRequired(Error):
"""File requires password"""
class NeedFirstVolume(Error):
"""Need to start from first volume."""
class NoCrypto(Error):
"""Cannot parse encrypted headers - no crypto available."""
class RarExecError(Error):
"""Problem reported by unrar/rar."""
class RarWarning(RarExecError):
"""Non-fatal error"""
class RarFatalError(RarExecError):
"""Fatal error"""
class RarCRCError(RarExecError):
"""CRC error during unpacking"""
class RarLockedArchiveError(RarExecError):
"""Must not modify locked archive"""
class RarWriteError(RarExecError):
"""Write error"""
class RarOpenError(RarExecError):
"""Open error"""
class RarUserError(RarExecError):
"""User error"""
class RarMemoryError(RarExecError):
"""Memory error"""
class RarCreateError(RarExecError):
"""Create error"""
class RarNoFilesError(RarExecError):
"""No files that match pattern were found"""
class RarUserBreak(RarExecError):
"""User stop"""
class RarUnknownError(RarExecError):
"""Unknown exit code"""
class RarSignalExit(RarExecError):
"""Unrar exited with signal"""
class RarCannotExec(RarExecError):
"""Executable not found."""
def is_rarfile(xfile):
'''Check quickly whether file is rar archive.'''
fd = XFile(xfile)
buf = fd.read(len(RAR_ID))
fd.close()
return buf == RAR_ID
class RarInfo(object):
r'''An entry in rar archive.
:mod:`zipfile`-compatible fields:
filename
File name with relative path.
Default path separator is '\\', to change set rarfile.PATH_SEP.
Always unicode string.
date_time
Modification time, tuple of (year, month, day, hour, minute, second).
Or datetime() object if USE_DATETIME is set.
file_size
Uncompressed size.
compress_size
Compressed size.
CRC
CRC-32 of uncompressed file, unsigned int.
comment
File comment. Byte string or None. Use UNICODE_COMMENTS
to get automatic decoding to unicode.
volume
Volume nr, starting from 0.
RAR-specific fields:
compress_type
Compression method: 0x30 - 0x35.
extract_version
Minimal Rar version needed for decompressing.
host_os
Host OS type, one of RAR_OS_* constants.
mode
File attributes. May be either dos-style or unix-style, depending on host_os.
volume_file
Volume file name, where file starts.
mtime
Optional time field: Modification time, with float seconds.
Same as .date_time but with more precision.
ctime
Optional time field: creation time, with float seconds.
atime
Optional time field: last access time, with float seconds.
arctime
Optional time field: archival time, with float seconds.
Internal fields:
type
One of RAR_BLOCK_* types. Only entries with type==RAR_BLOCK_FILE are shown in .infolist().
flags
For files, RAR_FILE_* bits.
'''
__slots__ = (
# zipfile-compatible fields
'filename',
'file_size',
'compress_size',
'date_time',
'comment',
'CRC',
'volume',
'orig_filename', # bytes in unknown encoding
# rar-specific fields
'extract_version',
'compress_type',
'host_os',
'mode',
'type',
'flags',
# optional extended time fields
# tuple where the sec is float, or datetime().
'mtime', # same as .date_time
'ctime',
'atime',
'arctime',
# RAR internals
'name_size',
'header_size',
'header_crc',
'file_offset',
'add_size',
'header_data',
'header_base',
'header_offset',
'salt',
'volume_file',
)
def isdir(self):
'''Returns True if the entry is a directory.'''
if self.type == RAR_BLOCK_FILE:
return (self.flags & RAR_FILE_DIRECTORY) == RAR_FILE_DIRECTORY
return False
def needs_password(self):
return self.flags & RAR_FILE_PASSWORD
class RarFile(object):
'''Parse RAR structure, provide access to files in archive.
'''
#: Archive comment. Byte string or None. Use :data:`UNICODE_COMMENTS`
#: to get automatic decoding to unicode.
comment = None
def __init__(self, rarfile, mode="r", charset=None, info_callback=None,
crc_check = True, errors = "stop"):
"""Open and parse a RAR archive.
Parameters:
rarfile
archive file name
mode
only 'r' is supported.
charset
fallback charset to use, if filenames are not already Unicode-enabled.
info_callback
debug callback, gets to see all archive entries.
crc_check
set to False to disable CRC checks
errors
Either "stop" to quietly stop parsing on errors,
or "strict" to raise errors. Default is "stop".
"""
self.rarfile = rarfile
self.comment = None
self._charset = charset or DEFAULT_CHARSET
self._info_callback = info_callback
self._info_list = []
self._info_map = {}
self._needs_password = False
self._password = None
self._crc_check = crc_check
self._vol_list = []
if errors == "stop":
self._strict = False
elif errors == "strict":
self._strict = True
else:
raise ValueError("Invalid value for 'errors' parameter.")
self._main = None
if mode != "r":
raise NotImplementedError("RarFile supports only mode=r")
self._parse()
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
self.close()
def setpassword(self, password):
'''Sets the password to use when extracting.'''
self._password = password
if not self._main:
self._parse()
def needs_password(self):
'''Returns True if any archive entries require password for extraction.'''
return self._needs_password
def namelist(self):
'''Return list of filenames in archive.'''
return [f.filename for f in self._info_list]
def infolist(self):
'''Return RarInfo objects for all files/directories in archive.'''
return self._info_list
def volumelist(self):
'''Returns filenames of archive volumes.
In case of single-volume archive, the list contains
just the name of main archive file.
'''
return self._vol_list
def getinfo(self, fname):
'''Return RarInfo for file.'''
if isinstance(fname, RarInfo):
return fname
# accept both ways here
if PATH_SEP == '/':
fname2 = fname.replace("\\", "/")
else:
fname2 = fname.replace("/", "\\")
try:
return self._info_map[fname]
except KeyError:
try:
return self._info_map[fname2]
except KeyError:
raise NoRarEntry("No such file: "+fname)
def open(self, fname, mode = 'r', psw = None):
'''Returns file-like object (:class:`RarExtFile`),
from where the data can be read.
The object implements :class:`io.RawIOBase` interface, so it can
be further wrapped with :class:`io.BufferedReader`
and :class:`io.TextIOWrapper`.
On older Python where io module is not available, it implements
only .read(), .seek(), .tell() and .close() methods.
The object is seekable, although the seeking is fast only on
uncompressed files, on compressed files the seeking is implemented
by reading ahead and/or restarting the decompression.
Parameters:
fname
file name or RarInfo instance.
mode
must be 'r'
psw
password to use for extracting.
'''
if mode != 'r':
raise NotImplementedError("RarFile.open() supports only mode=r")
# entry lookup
inf = self.getinfo(fname)
if inf.isdir():
raise TypeError("Directory does not have any data: " + inf.filename)
if inf.flags & RAR_FILE_SPLIT_BEFORE:
raise NeedFirstVolume("Partial file, please start from first volume: " + inf.filename)
# check password
if inf.needs_password():
psw = psw or self._password
if psw is None:
raise PasswordRequired("File %s requires password" % inf.filename)
else:
psw = None
# is temp write usable?
use_hack = 1
if not self._main:
use_hack = 0
elif self._main.flags & (RAR_MAIN_SOLID | RAR_MAIN_PASSWORD):
use_hack = 0
elif inf.flags & (RAR_FILE_SPLIT_BEFORE | RAR_FILE_SPLIT_AFTER):
use_hack = 0
elif is_filelike(self.rarfile):
pass
elif inf.file_size > HACK_SIZE_LIMIT:
use_hack = 0
elif not USE_EXTRACT_HACK:
use_hack = 0
# now extract
if inf.compress_type == RAR_M0 and (inf.flags & RAR_FILE_PASSWORD) == 0:
return self._open_clear(inf)
elif use_hack:
return self._open_hack(inf, psw)
else:
return self._open_unrar(self.rarfile, inf, psw)
def read(self, fname, psw = None):
"""Return uncompressed data for archive entry.
For longer files using :meth:`RarFile.open` may be better idea.
Parameters:
fname
filename or RarInfo instance
psw
password to use for extracting.
"""
f = self.open(fname, 'r', psw)
try:
return f.read()
finally:
f.close()
def close(self):
"""Release open resources."""
pass
def printdir(self):
"""Print archive file list to stdout."""
for f in self._info_list:
print(f.filename)
def extract(self, member, path=None, pwd=None):
"""Extract single file into current directory.
Parameters:
member
filename or :class:`RarInfo` instance
path
optional destination path
pwd
optional password to use
"""
if isinstance(member, RarInfo):
fname = member.filename
else:
fname = member
self._extract([fname], path, pwd)
def extractall(self, path=None, members=None, pwd=None):
"""Extract all files into current directory.
Parameters:
path
optional destination path
members
optional filename or :class:`RarInfo` instance list to extract
pwd
optional password to use
"""
fnlist = []
if members is not None:
for m in members:
if isinstance(m, RarInfo):
fnlist.append(m.filename)
else:
fnlist.append(m)
self._extract(fnlist, path, pwd)
def testrar(self):
"""Let 'unrar' test the archive.
"""
cmd = [UNRAR_TOOL] + list(TEST_ARGS)
add_password_arg(cmd, self._password)
cmd.append(self.rarfile)
p = custom_popen(cmd)
output = p.communicate()[0]
check_returncode(p, output)
def strerror(self):
"""Return error string if parsing failed,
or None if no problems.
"""
return self._parse_error
##
## private methods
##
def _set_error(self, msg, *args):
if args:
msg = msg % args
self._parse_error = msg
if self._strict:
raise BadRarFile(msg)
# store entry
def _process_entry(self, item):
if item.type == RAR_BLOCK_FILE:
# use only first part
if (item.flags & RAR_FILE_SPLIT_BEFORE) == 0:
self._info_map[item.filename] = item
self._info_list.append(item)
# remember if any items require password
if item.needs_password():
self._needs_password = True
elif len(self._info_list) > 0:
# final crc is in last block
old = self._info_list[-1]
old.CRC = item.CRC
old.compress_size += item.compress_size
# parse new-style comment
if item.type == RAR_BLOCK_SUB and item.filename == 'CMT':
if not NEED_COMMENTS:
pass
elif item.flags & (RAR_FILE_SPLIT_BEFORE | RAR_FILE_SPLIT_AFTER):
pass
elif item.flags & RAR_FILE_SOLID:
# file comment
cmt = self._read_comment_v3(item, self._password)
if len(self._info_list) > 0:
old = self._info_list[-1]
old.comment = cmt
else:
# archive comment
cmt = self._read_comment_v3(item, self._password)
self.comment = cmt
if self._info_callback:
self._info_callback(item)
# read rar
def _parse(self):
self._fd = None
try:
self._parse_real()
finally:
if self._fd:
self._fd.close()
self._fd = None
def _parse_real(self):
fd = XFile(self.rarfile)
self._fd = fd
id = fd.read(len(RAR_ID))
if id != RAR_ID:
raise NotRarFile("Not a Rar archive: "+self.rarfile)
volume = 0 # first vol (.rar) is 0
more_vols = 0
endarc = 0
volfile = self.rarfile
self._vol_list = [self.rarfile]
while 1:
if endarc:
h = None # don't read past ENDARC
else:
h = self._parse_header(fd)
if not h:
if more_vols:
volume += 1
fd.close()
try:
volfile = self._next_volname(volfile)
fd = XFile(volfile)
except IOError:
self._set_error("Cannot open next volume: %s", volfile)
break
self._fd = fd
more_vols = 0
endarc = 0
self._vol_list.append(volfile)
continue
break
h.volume = volume
h.volume_file = volfile
if h.type == RAR_BLOCK_MAIN and not self._main:
self._main = h
if h.flags & RAR_MAIN_NEWNUMBERING:
# RAR 2.x does not set FIRSTVOLUME,
# so check it only if NEWNUMBERING is used
if (h.flags & RAR_MAIN_FIRSTVOLUME) == 0:
raise NeedFirstVolume("Need to start from first volume")
if h.flags & RAR_MAIN_PASSWORD:
self._needs_password = True
if not self._password:
self._main = None
break
elif h.type == RAR_BLOCK_ENDARC:
more_vols = h.flags & RAR_ENDARC_NEXT_VOLUME
endarc = 1
elif h.type == RAR_BLOCK_FILE:
# RAR 2.x does not write RAR_BLOCK_ENDARC
if h.flags & RAR_FILE_SPLIT_AFTER:
more_vols = 1
# RAR 2.x does not set RAR_MAIN_FIRSTVOLUME
if volume == 0 and h.flags & RAR_FILE_SPLIT_BEFORE:
raise NeedFirstVolume("Need to start from first volume")
# store it
self._process_entry(h)
# go to next header
if h.add_size > 0:
fd.seek(h.file_offset + h.add_size, 0)
# AES encrypted headers
_last_aes_key = (None, None, None) # (salt, key, iv)
def _decrypt_header(self, fd):
if not _have_crypto:
raise NoCrypto('Cannot parse encrypted headers - no crypto')
salt = fd.read(8)
if self._last_aes_key[0] == salt:
key, iv = self._last_aes_key[1:]
else:
key, iv = rar3_s2k(self._password, salt)
self._last_aes_key = (salt, key, iv)
return HeaderDecrypt(fd, key, iv)
# read single header
def _parse_header(self, fd):
try:
# handle encrypted headers
if self._main and self._main.flags & RAR_MAIN_PASSWORD:
if not self._password:
return
fd = self._decrypt_header(fd)
# now read actual header
return self._parse_block_header(fd)
except struct.error:
self._set_error('Broken header in RAR file')
return None
# common header
def _parse_block_header(self, fd):
h = RarInfo()
h.header_offset = fd.tell()
h.comment = None
# read and parse base header
buf = fd.read(S_BLK_HDR.size)
if not buf:
return None
t = S_BLK_HDR.unpack_from(buf)
h.header_crc, h.type, h.flags, h.header_size = t
h.header_base = S_BLK_HDR.size
pos = S_BLK_HDR.size
# read full header
if h.header_size > S_BLK_HDR.size:
h.header_data = buf + fd.read(h.header_size - S_BLK_HDR.size)
else:
h.header_data = buf
h.file_offset = fd.tell()
# unexpected EOF?
if len(h.header_data) != h.header_size:
self._set_error('Unexpected EOF when reading header')
return None
# block has data assiciated with it?
if h.flags & RAR_LONG_BLOCK:
h.add_size = S_LONG.unpack_from(h.header_data, pos)[0]
else:
h.add_size = 0
# parse interesting ones, decide header boundaries for crc
if h.type == RAR_BLOCK_MARK:
return h
elif h.type == RAR_BLOCK_MAIN:
h.header_base += 6
if h.flags & RAR_MAIN_ENCRYPTVER:
h.header_base += 1
if h.flags & RAR_MAIN_COMMENT:
self._parse_subblocks(h, h.header_base)
self.comment = h.comment
elif h.type == RAR_BLOCK_FILE:
self._parse_file_header(h, pos)
elif h.type == RAR_BLOCK_SUB:
self._parse_file_header(h, pos)
h.header_base = h.header_size
elif h.type == RAR_BLOCK_OLD_AUTH:
h.header_base += 8
elif h.type == RAR_BLOCK_OLD_EXTRA:
h.header_base += 7
else:
h.header_base = h.header_size
# check crc
if h.type == RAR_BLOCK_OLD_SUB:
crcdat = h.header_data[2:] + fd.read(h.add_size)
else:
crcdat = h.header_data[2:h.header_base]
calc_crc = crc32(crcdat) & 0xFFFF
# return good header
if h.header_crc == calc_crc:
return h
# header parsing failed.
self._set_error('Header CRC error (%02x): exp=%x got=%x (xlen = %d)',
h.type, h.header_crc, calc_crc, len(crcdat))
# instead panicing, send eof
return None
# read file-specific header
def _parse_file_header(self, h, pos):
fld = S_FILE_HDR.unpack_from(h.header_data, pos)
h.compress_size = fld[0]
h.file_size = fld[1]
h.host_os = fld[2]
h.CRC = fld[3]
h.date_time = parse_dos_time(fld[4])
h.extract_version = fld[5]
h.compress_type = fld[6]
h.name_size = fld[7]
h.mode = fld[8]
pos += S_FILE_HDR.size
if h.flags & RAR_FILE_LARGE:
h1 = S_LONG.unpack_from(h.header_data, pos)[0]
h2 = S_LONG.unpack_from(h.header_data, pos + 4)[0]
h.compress_size |= h1 << 32
h.file_size |= h2 << 32
pos += 8
h.add_size = h.compress_size
name = h.header_data[pos : pos + h.name_size ]
pos += h.name_size
if h.flags & RAR_FILE_UNICODE:
nul = name.find(ZERO)
h.orig_filename = name[:nul]
u = UnicodeFilename(h.orig_filename, name[nul + 1 : ])
h.filename = u.decode()
# if parsing failed fall back to simple name
if u.failed:
h.filename = self._decode(h.orig_filename)
else:
h.orig_filename = name
h.filename = self._decode(name)
# change separator, if requested
if PATH_SEP != '\\':
h.filename = h.filename.replace('\\', PATH_SEP)
if h.flags & RAR_FILE_SALT:
h.salt = h.header_data[pos : pos + 8]
pos += 8
else:
h.salt = None
# optional extended time stamps
if h.flags & RAR_FILE_EXTTIME:
pos = self._parse_ext_time(h, pos)
else:
h.mtime = h.atime = h.ctime = h.arctime = None
# base header end
h.header_base = pos
if h.flags & RAR_FILE_COMMENT:
self._parse_subblocks(h, pos)
# convert timestamps
if USE_DATETIME:
h.date_time = to_datetime(h.date_time)
h.mtime = to_datetime(h.mtime)
h.atime = to_datetime(h.atime)
h.ctime = to_datetime(h.ctime)
h.arctime = to_datetime(h.arctime)
# .mtime is .date_time with more precision
if h.mtime:
if USE_DATETIME:
h.date_time = h.mtime
else:
# keep seconds int
h.date_time = h.mtime[:5] + (int(h.mtime[5]),)
return pos
# find old-style comment subblock
def _parse_subblocks(self, h, pos):
hdata = h.header_data
while pos < len(hdata):
# ordinary block header
t = S_BLK_HDR.unpack_from(hdata, pos)
scrc, stype, sflags, slen = t
pos_next = pos + slen
pos += S_BLK_HDR.size
# corrupt header
if pos_next < pos:
break
# followed by block-specific header
if stype == RAR_BLOCK_OLD_COMMENT and pos + S_COMMENT_HDR.size <= pos_next:
declen, ver, meth, crc = S_COMMENT_HDR.unpack_from(hdata, pos)
pos += S_COMMENT_HDR.size
data = hdata[pos : pos_next]
cmt = rar_decompress(ver, meth, data, declen, sflags,
crc, self._password)
if not self._crc_check:
h.comment = self._decode_comment(cmt)
elif crc32(cmt) & 0xFFFF == crc:
h.comment = self._decode_comment(cmt)
pos = pos_next
def _parse_ext_time(self, h, pos):
data = h.header_data
# flags and rest of data can be missing
flags = 0
if pos + 2 <= len(data):
flags = S_SHORT.unpack_from(data, pos)[0]
pos += 2
h.mtime, pos = self._parse_xtime(flags >> 3*4, data, pos, h.date_time)
h.ctime, pos = self._parse_xtime(flags >> 2*4, data, pos)
h.atime, pos = self._parse_xtime(flags >> 1*4, data, pos)
h.arctime, pos = self._parse_xtime(flags >> 0*4, data, pos)
return pos
def _parse_xtime(self, flag, data, pos, dostime = None):
unit = 10000000.0 # 100 ns units
if flag & 8:
if not dostime:
t = S_LONG.unpack_from(data, pos)[0]
dostime = parse_dos_time(t)
pos += 4
rem = 0
cnt = flag & 3
for i in range(cnt):
b = S_BYTE.unpack_from(data, pos)[0]
rem = (b << 16) | (rem >> 8)
pos += 1
sec = dostime[5] + rem / unit
if flag & 4:
sec += 1
dostime = dostime[:5] + (sec,)
return dostime, pos
# given current vol name, construct next one
def _next_volname(self, volfile):
if is_filelike(volfile):
raise IOError("Working on single FD")
if self._main.flags & RAR_MAIN_NEWNUMBERING:
return self._next_newvol(volfile)
return self._next_oldvol(volfile)
# new-style next volume
def _next_newvol(self, volfile):
i = len(volfile) - 1
while i >= 0:
if volfile[i] >= '0' and volfile[i] <= '9':
return self._inc_volname(volfile, i)
i -= 1
raise BadRarName("Cannot construct volume name: "+volfile)
# old-style next volume
def _next_oldvol(self, volfile):
# rar -> r00
if volfile[-4:].lower() == '.rar':
return volfile[:-2] + '00'
return self._inc_volname(volfile, len(volfile) - 1)
# increase digits with carry, otherwise just increment char
def _inc_volname(self, volfile, i):
fn = list(volfile)
while i >= 0:
if fn[i] != '9':
fn[i] = chr(ord(fn[i]) + 1)
break
fn[i] = '0'
i -= 1
return ''.join(fn)
def _open_clear(self, inf):
return DirectReader(self, inf)
# put file compressed data into temporary .rar archive, and run
# unrar on that, thus avoiding unrar going over whole archive
def _open_hack(self, inf, psw = None):
BSIZE = 32*1024
size = inf.compress_size + inf.header_size
rf = XFile(inf.volume_file, 0)
rf.seek(inf.header_offset)
tmpfd, tmpname = mkstemp(suffix='.rar')
tmpf = os.fdopen(tmpfd, "wb")
try:
# create main header: crc, type, flags, size, res1, res2
mh = S_BLK_HDR.pack(0x90CF, 0x73, 0, 13) + ZERO * (2+4)
tmpf.write(RAR_ID + mh)
while size > 0:
if size > BSIZE:
buf = rf.read(BSIZE)
else:
buf = rf.read(size)
if not buf:
raise BadRarFile('read failed: ' + inf.filename)
tmpf.write(buf)
size -= len(buf)
tmpf.close()
rf.close()
except:
rf.close()
tmpf.close()
os.unlink(tmpname)
raise
return self._open_unrar(tmpname, inf, psw, tmpname)
def _read_comment_v3(self, inf, psw=None):
# read data
rf = XFile(inf.volume_file)
rf.seek(inf.file_offset)
data = rf.read(inf.compress_size)
rf.close()
# decompress
cmt = rar_decompress(inf.extract_version, inf.compress_type, data,
inf.file_size, inf.flags, inf.CRC, psw, inf.salt)
# check crc
if self._crc_check:
crc = crc32(cmt)
if crc < 0:
crc += (long(1) << 32)
if crc != inf.CRC:
return None
return self._decode_comment(cmt)
# extract using unrar
def _open_unrar(self, rarfile, inf, psw = None, tmpfile = None):
if is_filelike(rarfile):
raise ValueError("Cannot use unrar directly on memory buffer")
cmd = [UNRAR_TOOL] + list(OPEN_ARGS)
add_password_arg(cmd, psw)
cmd.append(rarfile)
# not giving filename avoids encoding related problems
if not tmpfile:
fn = inf.filename
if PATH_SEP != os.sep:
fn = fn.replace(PATH_SEP, os.sep)
cmd.append(fn)
# read from unrar pipe
return PipeReader(self, inf, cmd, tmpfile)
def _decode(self, val):
for c in TRY_ENCODINGS:
try:
return val.decode(c)
except UnicodeError:
pass
return val.decode(self._charset, 'replace')
def _decode_comment(self, val):
if UNICODE_COMMENTS:
return self._decode(val)
return val
# call unrar to extract a file
def _extract(self, fnlist, path=None, psw=None):
cmd = [UNRAR_TOOL] + list(EXTRACT_ARGS)
# pasoword
psw = psw or self._password
add_password_arg(cmd, psw)
# rar file
cmd.append(self.rarfile)
# file list
for fn in fnlist:
if os.sep != PATH_SEP:
fn = fn.replace(PATH_SEP, os.sep)
cmd.append(fn)
# destination path
if path is not None:
cmd.append(path + os.sep)
# call
p = custom_popen(cmd)
output = p.communicate()[0]
check_returncode(p, output)
##
## Utility classes
##
class UnicodeFilename:
"""Handle unicode filename decompression"""
def __init__(self, name, encdata):
self.std_name = bytearray(name)
self.encdata = bytearray(encdata)
self.pos = self.encpos = 0
self.buf = bytearray()
self.failed = 0
def enc_byte(self):
try:
c = self.encdata[self.encpos]
self.encpos += 1
return c
except IndexError:
self.failed = 1
return 0
def std_byte(self):
try:
return self.std_name[self.pos]
except IndexError:
self.failed = 1
return ord('?')
def put(self, lo, hi):
self.buf.append(lo)
self.buf.append(hi)
self.pos += 1
def decode(self):
hi = self.enc_byte()
flagbits = 0
while self.encpos < len(self.encdata):
if flagbits == 0:
flags = self.enc_byte()
flagbits = 8
flagbits -= 2
t = (flags >> flagbits) & 3
if t == 0:
self.put(self.enc_byte(), 0)
elif t == 1:
self.put(self.enc_byte(), hi)
elif t == 2:
self.put(self.enc_byte(), self.enc_byte())
else:
n = self.enc_byte()
if n & 0x80:
c = self.enc_byte()
for i in range((n & 0x7f) + 2):
lo = (self.std_byte() + c) & 0xFF
self.put(lo, hi)
else:
for i in range(n + 2):
self.put(self.std_byte(), 0)
return self.buf.decode("utf-16le", "replace")
class RarExtFile(RawIOBase):
"""Base class for file-like object that :meth:`RarFile.open` returns.
Provides public methods and common crc checking.
Behaviour:
- no short reads - .read() and .readinfo() read as much as requested.
- no internal buffer, use io.BufferedReader for that.
If :mod:`io` module is available (Python 2.6+, 3.x), then this calls
will inherit from :class:`io.RawIOBase` class. This makes line-based
access available: :meth:`RarExtFile.readline` and ``for ln in f``.
"""
#: Filename of the archive entry
name = None
def __init__(self, rf, inf):
RawIOBase.__init__(self)
# standard io.* properties
self.name = inf.filename
self.mode = 'rb'
self.rf = rf
self.inf = inf
self.crc_check = rf._crc_check
self.fd = None
self.CRC = 0
self.remain = 0
self.returncode = 0
self._open()
def _open(self):
if self.fd:
self.fd.close()
self.fd = None
self.CRC = 0
self.remain = self.inf.file_size
def read(self, cnt = None):
"""Read all or specified amount of data from archive entry."""
# sanitize cnt
if cnt is None or cnt < 0:
cnt = self.remain
elif cnt > self.remain:
cnt = self.remain
if cnt == 0:
return EMPTY
# actual read
data = self._read(cnt)
if data:
self.CRC = crc32(data, self.CRC)
self.remain -= len(data)
if len(data) != cnt:
raise BadRarFile("Failed the read enough data")
# done?
if not data or self.remain == 0:
#self.close()
self._check()
return data
def _check(self):
"""Check final CRC."""
if not self.crc_check:
return
if self.returncode:
check_returncode(self, '')
if self.remain != 0:
raise BadRarFile("Failed the read enough data")
crc = self.CRC
if crc < 0:
crc += (long(1) << 32)
if crc != self.inf.CRC:
raise BadRarFile("Corrupt file - CRC check failed: " + self.inf.filename)
def _read(self, cnt):
"""Actual read that gets sanitized cnt."""
def close(self):
"""Close open resources."""
RawIOBase.close(self)
if self.fd:
self.fd.close()
self.fd = None
def __del__(self):
"""Hook delete to make sure tempfile is removed."""
self.close()
def readinto(self, buf):
"""Zero-copy read directly into buffer.
Returns bytes read.
"""
data = self.read(len(buf))
n = len(data)
try:
buf[:n] = data
except TypeError:
import array
if not isinstance(buf, array.array):
raise
buf[:n] = array.array(buf.typecode, data)
return n
def tell(self):
"""Return current reading position in uncompressed data."""
return self.inf.file_size - self.remain
def seek(self, ofs, whence = 0):
"""Seek in data.
On uncompressed files, the seeking works by actual
seeks so it's fast. On compresses files its slow
- forward seeking happends by reading ahead,
backwards by re-opening and decompressing from the start.
"""
# disable crc check when seeking
self.crc_check = 0
fsize = self.inf.file_size
cur_ofs = self.tell()
if whence == 0: # seek from beginning of file
new_ofs = ofs
elif whence == 1: # seek from current position
new_ofs = cur_ofs + ofs
elif whence == 2: # seek from end of file
new_ofs = fsize + ofs
else:
raise ValueError('Invalid value for whence')
# sanity check
if new_ofs < 0:
new_ofs = 0
elif new_ofs > fsize:
new_ofs = fsize
# do the actual seek
if new_ofs >= cur_ofs:
self._skip(new_ofs - cur_ofs)
else:
# process old data ?
#self._skip(fsize - cur_ofs)
# reopen and seek
self._open()
self._skip(new_ofs)
return self.tell()
def _skip(self, cnt):
"""Read and discard data"""
while cnt > 0:
if cnt > 8192:
buf = self.read(8192)
else:
buf = self.read(cnt)
if not buf:
break
cnt -= len(buf)
def readable(self):
"""Returns True"""
return True
def writable(self):
"""Returns False.
Writing is not supported."""
return False
def seekable(self):
"""Returns True.
Seeking is supported, although it's slow on compressed files.
"""
return True
def readall(self):
"""Read all remaining data"""
# avoid RawIOBase default impl
return self.read()
class PipeReader(RarExtFile):
"""Read data from pipe, handle tempfile cleanup."""
def __init__(self, rf, inf, cmd, tempfile=None):
self.cmd = cmd
self.proc = None
self.tempfile = tempfile
RarExtFile.__init__(self, rf, inf)
def _close_proc(self):
if not self.proc:
return
if self.proc.stdout:
self.proc.stdout.close()
if self.proc.stdin:
self.proc.stdin.close()
if self.proc.stderr:
self.proc.stderr.close()
self.proc.wait()
self.returncode = self.proc.returncode
self.proc = None
def _open(self):
RarExtFile._open(self)
# stop old process
self._close_proc()
# launch new process
self.returncode = 0
self.proc = custom_popen(self.cmd)
self.fd = self.proc.stdout
# avoid situation where unrar waits on stdin
if self.proc.stdin:
self.proc.stdin.close()
def _read(self, cnt):
"""Read from pipe."""
# normal read is usually enough
data = self.fd.read(cnt)
if len(data) == cnt or not data:
return data
# short read, try looping
buf = [data]
cnt -= len(data)
while cnt > 0:
data = self.fd.read(cnt)
if not data:
break
cnt -= len(data)
buf.append(data)
return EMPTY.join(buf)
def close(self):
"""Close open resources."""
self._close_proc()
RarExtFile.close(self)
if self.tempfile:
try:
os.unlink(self.tempfile)
except OSError:
pass
self.tempfile = None
if have_memoryview:
def readinto(self, buf):
"""Zero-copy read directly into buffer."""
cnt = len(buf)
if cnt > self.remain:
cnt = self.remain
vbuf = memoryview(buf)
res = got = 0
while got < cnt:
res = self.fd.readinto(vbuf[got : cnt])
if not res:
break
if self.crc_check:
self.CRC = crc32(vbuf[got : got + res], self.CRC)
self.remain -= res
got += res
return got
class DirectReader(RarExtFile):
"""Read uncompressed data directly from archive."""
def _open(self):
RarExtFile._open(self)
self.volfile = self.inf.volume_file
self.fd = XFile(self.volfile, 0)
self.fd.seek(self.inf.header_offset, 0)
self.cur = self.rf._parse_header(self.fd)
self.cur_avail = self.cur.add_size
def _skip(self, cnt):
"""RAR Seek, skipping through rar files to get to correct position
"""
while cnt > 0:
# next vol needed?
if self.cur_avail == 0:
if not self._open_next():
break
# fd is in read pos, do the read
if cnt > self.cur_avail:
cnt -= self.cur_avail
self.remain -= self.cur_avail
self.cur_avail = 0
else:
self.fd.seek(cnt, 1)
self.cur_avail -= cnt
self.remain -= cnt
cnt = 0
def _read(self, cnt):
"""Read from potentially multi-volume archive."""
buf = []
while cnt > 0:
# next vol needed?
if self.cur_avail == 0:
if not self._open_next():
break
# fd is in read pos, do the read
if cnt > self.cur_avail:
data = self.fd.read(self.cur_avail)
else:
data = self.fd.read(cnt)
if not data:
break
# got some data
cnt -= len(data)
self.cur_avail -= len(data)
buf.append(data)
if len(buf) == 1:
return buf[0]
return EMPTY.join(buf)
def _open_next(self):
"""Proceed to next volume."""
# is the file split over archives?
if (self.cur.flags & RAR_FILE_SPLIT_AFTER) == 0:
return False
if self.fd:
self.fd.close()
self.fd = None
# open next part
self.volfile = self.rf._next_volname(self.volfile)
fd = open(self.volfile, "rb", 0)
self.fd = fd
# loop until first file header
while 1:
cur = self.rf._parse_header(fd)
if not cur:
raise BadRarFile("Unexpected EOF")
if cur.type in (RAR_BLOCK_MARK, RAR_BLOCK_MAIN):
if cur.add_size:
fd.seek(cur.add_size, 1)
continue
if cur.orig_filename != self.inf.orig_filename:
raise BadRarFile("Did not found file entry")
self.cur = cur
self.cur_avail = cur.add_size
return True
if have_memoryview:
def readinto(self, buf):
"""Zero-copy read directly into buffer."""
got = 0
vbuf = memoryview(buf)
while got < len(buf):
# next vol needed?
if self.cur_avail == 0:
if not self._open_next():
break
# lenght for next read
cnt = len(buf) - got
if cnt > self.cur_avail:
cnt = self.cur_avail
# read into temp view
res = self.fd.readinto(vbuf[got : got + cnt])
if not res:
break
if self.crc_check:
self.CRC = crc32(vbuf[got : got + res], self.CRC)
self.cur_avail -= res
self.remain -= res
got += res
return got
class HeaderDecrypt:
"""File-like object that decrypts from another file"""
def __init__(self, f, key, iv):
self.f = f
self.ciph = AES.new(key, AES.MODE_CBC, iv)
self.buf = EMPTY
def tell(self):
return self.f.tell()
def read(self, cnt=None):
if cnt > 8*1024:
raise BadRarFile('Bad count to header decrypt - wrong password?')
# consume old data
if cnt <= len(self.buf):
res = self.buf[:cnt]
self.buf = self.buf[cnt:]
return res
res = self.buf
self.buf = EMPTY
cnt -= len(res)
# decrypt new data
BLK = self.ciph.block_size
while cnt > 0:
enc = self.f.read(BLK)
if len(enc) < BLK:
break
dec = self.ciph.decrypt(enc)
if cnt >= len(dec):
res += dec
cnt -= len(dec)
else:
res += dec[:cnt]
self.buf = dec[cnt:]
cnt = 0
return res
# handle (filename|filelike) object
class XFile(object):
__slots__ = ('_fd', '_need_close')
def __init__(self, xfile, bufsize = 1024):
if is_filelike(xfile):
self._need_close = False
self._fd = xfile
self._fd.seek(0)
else:
self._need_close = True
self._fd = open(xfile, 'rb', bufsize)
def read(self, n=None):
return self._fd.read(n)
def tell(self):
return self._fd.tell()
def seek(self, ofs, whence=0):
return self._fd.seek(ofs, whence)
def readinto(self, dst):
return self._fd.readinto(dst)
def close(self):
if self._need_close:
self._fd.close()
def __enter__(self):
return self
def __exit__(self, typ, val, tb):
self.close()
##
## Utility functions
##
def is_filelike(obj):
if isinstance(obj, str) or isinstance(obj, unicode):
return False
res = True
for a in ('read', 'tell', 'seek'):
res = res and hasattr(obj, a)
if not res:
raise ValueError("Invalid object passed as file")
return True
def rar3_s2k(psw, salt):
"""String-to-key hash for RAR3."""
seed = psw.encode('utf-16le') + salt
iv = EMPTY
h = sha1()
for i in range(16):
for j in range(0x4000):
cnt = S_LONG.pack(i*0x4000 + j)
h.update(seed + cnt[:3])
if j == 0:
iv += h.digest()[19:20]
key_be = h.digest()[:16]
key_le = pack("<LLLL", *unpack(">LLLL", key_be))
return key_le, iv
def rar_decompress(vers, meth, data, declen=0, flags=0, crc=0, psw=None, salt=None):
"""Decompress blob of compressed data.
Used for data with non-standard header - eg. comments.
"""
# already uncompressed?
if meth == RAR_M0 and (flags & RAR_FILE_PASSWORD) == 0:
return data
# take only necessary flags
flags = flags & (RAR_FILE_PASSWORD | RAR_FILE_SALT | RAR_FILE_DICTMASK)
flags |= RAR_LONG_BLOCK
# file header
fname = bytes('data', 'ascii')
date = 0
mode = 0x20
fhdr = S_FILE_HDR.pack(len(data), declen, RAR_OS_MSDOS, crc,
date, vers, meth, len(fname), mode)
fhdr += fname
if flags & RAR_FILE_SALT:
if not salt:
return EMPTY
fhdr += salt
# full header
hlen = S_BLK_HDR.size + len(fhdr)
hdr = S_BLK_HDR.pack(0, RAR_BLOCK_FILE, flags, hlen) + fhdr
hcrc = crc32(hdr[2:]) & 0xFFFF
hdr = S_BLK_HDR.pack(hcrc, RAR_BLOCK_FILE, flags, hlen) + fhdr
# archive main header
mh = S_BLK_HDR.pack(0x90CF, RAR_BLOCK_MAIN, 0, 13) + ZERO * (2+4)
# decompress via temp rar
tmpfd, tmpname = mkstemp(suffix='.rar')
tmpf = os.fdopen(tmpfd, "wb")
try:
tmpf.write(RAR_ID + mh + hdr + data)
tmpf.close()
cmd = [UNRAR_TOOL] + list(OPEN_ARGS)
add_password_arg(cmd, psw, (flags & RAR_FILE_PASSWORD))
cmd.append(tmpname)
p = custom_popen(cmd)
return p.communicate()[0]
finally:
tmpf.close()
os.unlink(tmpname)
def to_datetime(t):
"""Convert 6-part time tuple into datetime object."""
if t is None:
return None
# extract values
year, mon, day, h, m, xs = t
s = int(xs)
us = int(1000000 * (xs - s))
# assume the values are valid
try:
return datetime(year, mon, day, h, m, s, us)
except ValueError:
pass
# sanitize invalid values
MDAY = (0, 31, 29, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31)
if mon < 1: mon = 1
if mon > 12: mon = 12
if day < 1: day = 1
if day > MDAY[mon]: day = MDAY[mon]
if h > 23: h = 23
if m > 59: m = 59
if s > 59: s = 59
if mon == 2 and day == 29:
try:
return datetime(year, mon, day, h, m, s, us)
except ValueError:
day = 28
return datetime(year, mon, day, h, m, s, us)
def parse_dos_time(stamp):
"""Parse standard 32-bit DOS timestamp."""
sec = stamp & 0x1F; stamp = stamp >> 5
min = stamp & 0x3F; stamp = stamp >> 6
hr = stamp & 0x1F; stamp = stamp >> 5
day = stamp & 0x1F; stamp = stamp >> 5
mon = stamp & 0x0F; stamp = stamp >> 4
yr = (stamp & 0x7F) + 1980
return (yr, mon, day, hr, min, sec * 2)
def custom_popen(cmd):
"""Disconnect cmd from parent fds, read only from stdout."""
# needed for py2exe
creationflags = 0
if sys.platform == 'win32':
creationflags = 0x08000000 # CREATE_NO_WINDOW
# run command
try:
p = Popen(cmd, bufsize = 0,
stdout = PIPE, stdin = PIPE, stderr = STDOUT,
creationflags = creationflags)
except OSError:
ex = sys.exc_info()[1]
if ex.errno == errno.ENOENT:
raise RarCannotExec("Unrar not installed? (rarfile.UNRAR_TOOL=%r)" % UNRAR_TOOL)
raise
return p
def custom_check(cmd, ignore_retcode=False):
"""Run command, collect output, raise error if needed."""
p = custom_popen(cmd)
out, err = p.communicate()
if p.returncode and not ignore_retcode:
raise RarExecError("Check-run failed")
return out
def add_password_arg(cmd, psw, required=False):
"""Append password switch to commandline."""
if UNRAR_TOOL == ALT_TOOL:
return
if psw is not None:
cmd.append('-p' + psw)
else:
cmd.append('-p-')
def check_returncode(p, out):
"""Raise exception according to unrar exit code"""
code = p.returncode
if code == 0:
return
# map return code to exception class
errmap = [None,
RarWarning, RarFatalError, RarCRCError, RarLockedArchiveError,
RarWriteError, RarOpenError, RarUserError, RarMemoryError,
RarCreateError, RarNoFilesError] # codes from rar.txt
if UNRAR_TOOL == ALT_TOOL:
errmap = [None]
if code > 0 and code < len(errmap):
exc = errmap[code]
elif code == 255:
exc = RarUserBreak
elif code < 0:
exc = RarSignalExit
else:
exc = RarUnknownError
# format message
if out:
msg = "%s [%d]: %s" % (exc.__doc__, p.returncode, out)
else:
msg = "%s [%d]" % (exc.__doc__, p.returncode)
raise exc(msg)
#
# Check if unrar works
#
try:
# does UNRAR_TOOL work?
custom_check([UNRAR_TOOL], True)
except RarCannotExec:
try:
# does ALT_TOOL work?
custom_check([ALT_TOOL] + list(ALT_CHECK_ARGS), True)
# replace config
UNRAR_TOOL = ALT_TOOL
OPEN_ARGS = ALT_OPEN_ARGS
EXTRACT_ARGS = ALT_EXTRACT_ARGS
TEST_ARGS = ALT_TEST_ARGS
except RarCannotExec:
# no usable tool, only uncompressed archives work
pass
| gpl-3.0 |
michaelaye/scikit-image | skimage/measure/tests/test_regionprops.py | 14 | 12345 | from numpy.testing import assert_array_equal, assert_almost_equal, \
assert_array_almost_equal, assert_raises, assert_equal
import numpy as np
import math
from skimage.measure._regionprops import regionprops, PROPS, perimeter
from skimage._shared._warnings import expected_warnings
SAMPLE = np.array(
[[0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 1, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0],
[0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0],
[1, 0, 1, 0, 0, 1, 1, 0, 1, 1, 0, 0, 1, 1, 1, 1, 1, 0],
[0, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 0, 0, 0, 1, 1, 1, 1],
[0, 1, 1, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1]]
)
INTENSITY_SAMPLE = SAMPLE.copy()
INTENSITY_SAMPLE[1, 9:11] = 2
def test_all_props():
region = regionprops(SAMPLE, INTENSITY_SAMPLE)[0]
for prop in PROPS:
assert_almost_equal(region[prop], getattr(region, PROPS[prop]))
def test_dtype():
regionprops(np.zeros((10, 10), dtype=np.int))
regionprops(np.zeros((10, 10), dtype=np.uint))
assert_raises((TypeError, RuntimeError), regionprops,
np.zeros((10, 10), dtype=np.double))
def test_ndim():
regionprops(np.zeros((10, 10), dtype=np.int))
regionprops(np.zeros((10, 10, 1), dtype=np.int))
regionprops(np.zeros((10, 10, 1, 1), dtype=np.int))
assert_raises(TypeError, regionprops, np.zeros((10, 10, 2), dtype=np.int))
def test_area():
area = regionprops(SAMPLE)[0].area
assert area == np.sum(SAMPLE)
def test_bbox():
bbox = regionprops(SAMPLE)[0].bbox
assert_array_almost_equal(bbox, (0, 0, SAMPLE.shape[0], SAMPLE.shape[1]))
SAMPLE_mod = SAMPLE.copy()
SAMPLE_mod[:, -1] = 0
bbox = regionprops(SAMPLE_mod)[0].bbox
assert_array_almost_equal(bbox, (0, 0, SAMPLE.shape[0], SAMPLE.shape[1]-1))
def test_moments_central():
mu = regionprops(SAMPLE)[0].moments_central
# determined with OpenCV
assert_almost_equal(mu[0,2], 436.00000000000045)
# different from OpenCV results, bug in OpenCV
assert_almost_equal(mu[0,3], -737.333333333333)
assert_almost_equal(mu[1,1], -87.33333333333303)
assert_almost_equal(mu[1,2], -127.5555555555593)
assert_almost_equal(mu[2,0], 1259.7777777777774)
assert_almost_equal(mu[2,1], 2000.296296296291)
assert_almost_equal(mu[3,0], -760.0246913580195)
def test_centroid():
centroid = regionprops(SAMPLE)[0].centroid
# determined with MATLAB
assert_array_almost_equal(centroid, (5.66666666666666, 9.444444444444444))
def test_convex_area():
area = regionprops(SAMPLE)[0].convex_area
# determined with MATLAB
assert area == 124
def test_convex_image():
img = regionprops(SAMPLE)[0].convex_image
# determined with MATLAB
ref = np.array(
[[0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0],
[0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0],
[0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0],
[0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0],
[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]
)
assert_array_equal(img, ref)
def test_coordinates():
sample = np.zeros((10, 10), dtype=np.int8)
coords = np.array([[3, 2], [3, 3], [3, 4]])
sample[coords[:, 0], coords[:, 1]] = 1
prop_coords = regionprops(sample)[0].coords
assert_array_equal(prop_coords, coords)
def test_eccentricity():
eps = regionprops(SAMPLE)[0].eccentricity
assert_almost_equal(eps, 0.814629313427)
img = np.zeros((5, 5), dtype=np.int)
img[2, 2] = 1
eps = regionprops(img)[0].eccentricity
assert_almost_equal(eps, 0)
def test_equiv_diameter():
diameter = regionprops(SAMPLE)[0].equivalent_diameter
# determined with MATLAB
assert_almost_equal(diameter, 9.57461472963)
def test_euler_number():
with expected_warnings(['`background`|CObject type']):
en = regionprops(SAMPLE)[0].euler_number
assert en == 0
SAMPLE_mod = SAMPLE.copy()
SAMPLE_mod[7, -3] = 0
with expected_warnings(['`background`|CObject type']):
en = regionprops(SAMPLE_mod)[0].euler_number
assert en == -1
def test_extent():
extent = regionprops(SAMPLE)[0].extent
assert_almost_equal(extent, 0.4)
def test_moments_hu():
hu = regionprops(SAMPLE)[0].moments_hu
ref = np.array([
3.27117627e-01,
2.63869194e-02,
2.35390060e-02,
1.23151193e-03,
1.38882330e-06,
-2.72586158e-05,
6.48350653e-06
])
# bug in OpenCV caused in Central Moments calculation?
assert_array_almost_equal(hu, ref)
def test_image():
img = regionprops(SAMPLE)[0].image
assert_array_equal(img, SAMPLE)
def test_label():
label = regionprops(SAMPLE)[0].label
assert_array_equal(label, 1)
def test_filled_area():
area = regionprops(SAMPLE)[0].filled_area
assert area == np.sum(SAMPLE)
SAMPLE_mod = SAMPLE.copy()
SAMPLE_mod[7, -3] = 0
area = regionprops(SAMPLE_mod)[0].filled_area
assert area == np.sum(SAMPLE)
def test_filled_image():
img = regionprops(SAMPLE)[0].filled_image
assert_array_equal(img, SAMPLE)
def test_major_axis_length():
length = regionprops(SAMPLE)[0].major_axis_length
# MATLAB has different interpretation of ellipse than found in literature,
# here implemented as found in literature
assert_almost_equal(length, 16.7924234999)
def test_max_intensity():
intensity = regionprops(SAMPLE, intensity_image=INTENSITY_SAMPLE
)[0].max_intensity
assert_almost_equal(intensity, 2)
def test_mean_intensity():
intensity = regionprops(SAMPLE, intensity_image=INTENSITY_SAMPLE
)[0].mean_intensity
assert_almost_equal(intensity, 1.02777777777777)
def test_min_intensity():
intensity = regionprops(SAMPLE, intensity_image=INTENSITY_SAMPLE
)[0].min_intensity
assert_almost_equal(intensity, 1)
def test_minor_axis_length():
length = regionprops(SAMPLE)[0].minor_axis_length
# MATLAB has different interpretation of ellipse than found in literature,
# here implemented as found in literature
assert_almost_equal(length, 9.739302807263)
def test_moments():
m = regionprops(SAMPLE)[0].moments
# determined with OpenCV
assert_almost_equal(m[0,0], 72.0)
assert_almost_equal(m[0,1], 408.0)
assert_almost_equal(m[0,2], 2748.0)
assert_almost_equal(m[0,3], 19776.0)
assert_almost_equal(m[1,0], 680.0)
assert_almost_equal(m[1,1], 3766.0)
assert_almost_equal(m[1,2], 24836.0)
assert_almost_equal(m[2,0], 7682.0)
assert_almost_equal(m[2,1], 43882.0)
assert_almost_equal(m[3,0], 95588.0)
def test_moments_normalized():
nu = regionprops(SAMPLE)[0].moments_normalized
# determined with OpenCV
assert_almost_equal(nu[0,2], 0.08410493827160502)
assert_almost_equal(nu[1,1], -0.016846707818929982)
assert_almost_equal(nu[1,2], -0.002899800614433943)
assert_almost_equal(nu[2,0], 0.24301268861454037)
assert_almost_equal(nu[2,1], 0.045473992910668816)
assert_almost_equal(nu[3,0], -0.017278118992041805)
def test_orientation():
orientation = regionprops(SAMPLE)[0].orientation
# determined with MATLAB
assert_almost_equal(orientation, 0.10446844651921)
# test correct quadrant determination
orientation2 = regionprops(SAMPLE.T)[0].orientation
assert_almost_equal(orientation2, math.pi / 2 - orientation)
# test diagonal regions
diag = np.eye(10, dtype=int)
orientation_diag = regionprops(diag)[0].orientation
assert_almost_equal(orientation_diag, -math.pi / 4)
orientation_diag = regionprops(np.flipud(diag))[0].orientation
assert_almost_equal(orientation_diag, math.pi / 4)
orientation_diag = regionprops(np.fliplr(diag))[0].orientation
assert_almost_equal(orientation_diag, math.pi / 4)
orientation_diag = regionprops(np.fliplr(np.flipud(diag)))[0].orientation
assert_almost_equal(orientation_diag, -math.pi / 4)
def test_perimeter():
per = regionprops(SAMPLE)[0].perimeter
assert_almost_equal(per, 55.2487373415)
per = perimeter(SAMPLE.astype('double'), neighbourhood=8)
assert_almost_equal(per, 46.8284271247)
def test_solidity():
solidity = regionprops(SAMPLE)[0].solidity
# determined with MATLAB
assert_almost_equal(solidity, 0.580645161290323)
def test_weighted_moments_central():
wmu = regionprops(SAMPLE, intensity_image=INTENSITY_SAMPLE
)[0].weighted_moments_central
ref = np.array(
[[ 7.4000000000e+01, -2.1316282073e-13, 4.7837837838e+02,
-7.5943608473e+02],
[ 3.7303493627e-14, -8.7837837838e+01, -1.4801314828e+02,
-1.2714707125e+03],
[ 1.2602837838e+03, 2.1571526662e+03, 6.6989799420e+03,
1.5304076361e+04],
[ -7.6561796932e+02, -4.2385971907e+03, -9.9501164076e+03,
-3.3156729271e+04]]
)
np.set_printoptions(precision=10)
assert_array_almost_equal(wmu, ref)
def test_weighted_centroid():
centroid = regionprops(SAMPLE, intensity_image=INTENSITY_SAMPLE
)[0].weighted_centroid
assert_array_almost_equal(centroid, (5.540540540540, 9.445945945945))
def test_weighted_moments_hu():
whu = regionprops(SAMPLE, intensity_image=INTENSITY_SAMPLE
)[0].weighted_moments_hu
ref = np.array([
3.1750587329e-01,
2.1417517159e-02,
2.3609322038e-02,
1.2565683360e-03,
8.3014209421e-07,
-3.5073773473e-05,
6.7936409056e-06
])
assert_array_almost_equal(whu, ref)
def test_weighted_moments():
wm = regionprops(SAMPLE, intensity_image=INTENSITY_SAMPLE
)[0].weighted_moments
ref = np.array(
[[ 7.4000000000e+01, 4.1000000000e+02, 2.7500000000e+03,
1.9778000000e+04],
[ 6.9900000000e+02, 3.7850000000e+03, 2.4855000000e+04,
1.7500100000e+05],
[ 7.8630000000e+03, 4.4063000000e+04, 2.9347700000e+05,
2.0810510000e+06],
[ 9.7317000000e+04, 5.7256700000e+05, 3.9007170000e+06,
2.8078871000e+07]]
)
assert_array_almost_equal(wm, ref)
def test_weighted_moments_normalized():
wnu = regionprops(SAMPLE, intensity_image=INTENSITY_SAMPLE
)[0].weighted_moments_normalized
ref = np.array(
[[ np.nan, np.nan, 0.0873590903, -0.0161217406],
[ np.nan, -0.0160405109, -0.0031421072, -0.0031376984],
[ 0.230146783, 0.0457932622, 0.0165315478, 0.0043903193],
[-0.0162529732, -0.0104598869, -0.0028544152, -0.0011057191]]
)
assert_array_almost_equal(wnu, ref)
def test_label_sequence():
a = np.empty((2, 2), dtype=np.int)
a[:, :] = 2
ps = regionprops(a)
assert len(ps) == 1
assert ps[0].label == 2
def test_pure_background():
a = np.zeros((2, 2), dtype=np.int)
ps = regionprops(a)
assert len(ps) == 0
def test_invalid():
ps = regionprops(SAMPLE)
def get_intensity_image():
ps[0].intensity_image
assert_raises(AttributeError, get_intensity_image)
def test_equals():
arr = np.zeros((100, 100), dtype=np.int)
arr[0:25, 0:25] = 1
arr[50:99, 50:99] = 2
regions = regionprops(arr)
r1 = regions[0]
regions = regionprops(arr)
r2 = regions[0]
r3 = regions[1]
with expected_warnings(['`background`|CObject type']):
assert_equal(r1 == r2, True, "Same regionprops are not equal")
assert_equal(r1 != r3, True, "Different regionprops are equal")
if __name__ == "__main__":
from numpy.testing import run_module_suite
run_module_suite()
| bsd-3-clause |
atandon/Ghost | node_modules/grunt-docker/node_modules/docker/node_modules/pygmentize-bundled/vendor/pygments/build-2.7/pygments/formatters/svg.py | 362 | 5867 | # -*- coding: utf-8 -*-
"""
pygments.formatters.svg
~~~~~~~~~~~~~~~~~~~~~~~
Formatter for SVG output.
:copyright: Copyright 2006-2013 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
from pygments.formatter import Formatter
from pygments.util import get_bool_opt, get_int_opt
__all__ = ['SvgFormatter']
def escape_html(text):
"""Escape &, <, > as well as single and double quotes for HTML."""
return text.replace('&', '&'). \
replace('<', '<'). \
replace('>', '>'). \
replace('"', '"'). \
replace("'", ''')
class2style = {}
class SvgFormatter(Formatter):
"""
Format tokens as an SVG graphics file. This formatter is still experimental.
Each line of code is a ``<text>`` element with explicit ``x`` and ``y``
coordinates containing ``<tspan>`` elements with the individual token styles.
By default, this formatter outputs a full SVG document including doctype
declaration and the ``<svg>`` root element.
*New in Pygments 0.9.*
Additional options accepted:
`nowrap`
Don't wrap the SVG ``<text>`` elements in ``<svg><g>`` elements and
don't add a XML declaration and a doctype. If true, the `fontfamily`
and `fontsize` options are ignored. Defaults to ``False``.
`fontfamily`
The value to give the wrapping ``<g>`` element's ``font-family``
attribute, defaults to ``"monospace"``.
`fontsize`
The value to give the wrapping ``<g>`` element's ``font-size``
attribute, defaults to ``"14px"``.
`xoffset`
Starting offset in X direction, defaults to ``0``.
`yoffset`
Starting offset in Y direction, defaults to the font size if it is given
in pixels, or ``20`` else. (This is necessary since text coordinates
refer to the text baseline, not the top edge.)
`ystep`
Offset to add to the Y coordinate for each subsequent line. This should
roughly be the text size plus 5. It defaults to that value if the text
size is given in pixels, or ``25`` else.
`spacehack`
Convert spaces in the source to `` ``, which are non-breaking
spaces. SVG provides the ``xml:space`` attribute to control how
whitespace inside tags is handled, in theory, the ``preserve`` value
could be used to keep all whitespace as-is. However, many current SVG
viewers don't obey that rule, so this option is provided as a workaround
and defaults to ``True``.
"""
name = 'SVG'
aliases = ['svg']
filenames = ['*.svg']
def __init__(self, **options):
# XXX outencoding
Formatter.__init__(self, **options)
self.nowrap = get_bool_opt(options, 'nowrap', False)
self.fontfamily = options.get('fontfamily', 'monospace')
self.fontsize = options.get('fontsize', '14px')
self.xoffset = get_int_opt(options, 'xoffset', 0)
fs = self.fontsize.strip()
if fs.endswith('px'): fs = fs[:-2].strip()
try:
int_fs = int(fs)
except:
int_fs = 20
self.yoffset = get_int_opt(options, 'yoffset', int_fs)
self.ystep = get_int_opt(options, 'ystep', int_fs + 5)
self.spacehack = get_bool_opt(options, 'spacehack', True)
self._stylecache = {}
def format_unencoded(self, tokensource, outfile):
"""
Format ``tokensource``, an iterable of ``(tokentype, tokenstring)``
tuples and write it into ``outfile``.
For our implementation we put all lines in their own 'line group'.
"""
x = self.xoffset
y = self.yoffset
if not self.nowrap:
if self.encoding:
outfile.write('<?xml version="1.0" encoding="%s"?>\n' %
self.encoding)
else:
outfile.write('<?xml version="1.0"?>\n')
outfile.write('<!DOCTYPE svg PUBLIC "-//W3C//DTD SVG 1.0//EN" '
'"http://www.w3.org/TR/2001/REC-SVG-20010904/DTD/'
'svg10.dtd">\n')
outfile.write('<svg xmlns="http://www.w3.org/2000/svg">\n')
outfile.write('<g font-family="%s" font-size="%s">\n' %
(self.fontfamily, self.fontsize))
outfile.write('<text x="%s" y="%s" xml:space="preserve">' % (x, y))
for ttype, value in tokensource:
style = self._get_style(ttype)
tspan = style and '<tspan' + style + '>' or ''
tspanend = tspan and '</tspan>' or ''
value = escape_html(value)
if self.spacehack:
value = value.expandtabs().replace(' ', ' ')
parts = value.split('\n')
for part in parts[:-1]:
outfile.write(tspan + part + tspanend)
y += self.ystep
outfile.write('</text>\n<text x="%s" y="%s" '
'xml:space="preserve">' % (x, y))
outfile.write(tspan + parts[-1] + tspanend)
outfile.write('</text>')
if not self.nowrap:
outfile.write('</g></svg>\n')
def _get_style(self, tokentype):
if tokentype in self._stylecache:
return self._stylecache[tokentype]
otokentype = tokentype
while not self.style.styles_token(tokentype):
tokentype = tokentype.parent
value = self.style.style_for_token(tokentype)
result = ''
if value['color']:
result = ' fill="#' + value['color'] + '"'
if value['bold']:
result += ' font-weight="bold"'
if value['italic']:
result += ' font-style="italic"'
self._stylecache[otokentype] = result
return result
| mit |
pdellaert/ansible | lib/ansible/modules/windows/win_psmodule.py | 38 | 4683 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2018, Wojciech Sciesinski <wojciech[at]sciesinski[dot]net>
# Copyright: (c) 2017, Daniele Lazzari <lazzari@mailup.com>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
# this is a windows documentation stub. actual code lives in the .ps1
# file of the same name
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = r'''
---
module: win_psmodule
version_added: "2.4"
short_description: Adds or removes a Windows PowerShell module
description:
- This module helps to install Windows PowerShell modules and register custom modules repository on Windows-based systems.
options:
name:
description:
- Name of the Windows PowerShell module that has to be installed.
type: str
required: yes
state:
description:
- If C(present) a new module is installed.
- If C(absent) a module is removed.
- If C(latest) a module is updated to the newest version. This option was added in version 2.8.
type: str
choices: [ absent, latest, present ]
default: present
required_version:
description:
- The exact version of the PowerShell module that has to be installed.
type: str
version_added: "2.8"
minimum_version:
description:
- The minimum version of the PowerShell module that has to be installed.
type: str
version_added: "2.8"
maximum_version:
description:
- The maximum version of the PowerShell module that has to be installed.
type: str
version_added: "2.8"
allow_clobber:
description:
- If C(yes) allows install modules that contains commands those have the same names as commands that already exists.
type: bool
default: no
skip_publisher_check:
description:
- If C(yes), allows you to install a different version of a module that already exists on your computer in the case when a different one
is not digitally signed by a trusted publisher and the newest existing module is digitally signed by a trusted publisher.
type: bool
default: no
version_added: "2.8"
allow_prerelease:
description:
- If C(yes) installs modules marked as prereleases.
- It doesn't work with the parameters C(minimum_version) and/or C(maximum_version).
- It doesn't work with the C(state) set to absent.
type: bool
default: no
version_added: "2.8"
repository:
description:
- Name of the custom repository to use.
type: str
url:
description:
- URL of the custom repository to register.
- This option is deprecated and will be removed in Ansible 2.12. Use the
M(win_psrepository) module instead.
type: str
notes:
- PowerShell modules needed
- PowerShellGet >= 1.6.0
- PackageManagement >= 1.1.7
- PowerShell package provider needed
- NuGet >= 2.8.5.201
- On PowerShell 5.x required modules and a package provider will be updated under the first run of the win_psmodule module.
- On PowerShell 3.x and 4.x you have to install them before using the win_psmodule.
seealso:
- module: win_psrepository
author:
- Wojciech Sciesinski (@it-praktyk)
- Daniele Lazzari (@dlazz)
'''
EXAMPLES = r'''
---
- name: Add a PowerShell module
win_psmodule:
name: PowerShellModule
state: present
- name: Add an exact version of PowerShell module
win_psmodule:
name: PowerShellModule
required_version: "4.0.2"
state: present
- name: Install or update an existing PowerShell module to the newest version
win_psmodule:
name: PowerShellModule
state: latest
- name: Install newer version of built-in Windows module
win_psmodule:
name: Pester
skip_publisher_check: yes
state: present
- name: Add a PowerShell module and register a repository
win_psmodule:
name: MyCustomModule
repository: MyRepository
state: present
- name: Add a PowerShell module from a specific repository
win_psmodule:
name: PowerShellModule
repository: MyRepository
state: present
- name: Remove a PowerShell module
win_psmodule:
name: PowerShellModule
state: absent
'''
RETURN = r'''
---
output:
description: A message describing the task result.
returned: always
sample: "Module PowerShellCookbook installed"
type: str
nuget_changed:
description: True when Nuget package provider is installed.
returned: always
type: bool
sample: true
repository_changed:
description: True when a custom repository is installed or removed.
returned: always
type: bool
sample: true
'''
| gpl-3.0 |
YeelerG/scrapy | tests/test_utils_conf.py | 22 | 3065 | import unittest
from scrapy.settings import BaseSettings
from scrapy.utils.conf import build_component_list, arglist_to_dict
class BuildComponentListTest(unittest.TestCase):
def test_build_dict(self):
d = {'one': 1, 'two': None, 'three': 8, 'four': 4}
self.assertEqual(build_component_list(d, convert=lambda x: x),
['one', 'four', 'three'])
def test_backwards_compatible_build_dict(self):
base = {'one': 1, 'two': 2, 'three': 3, 'five': 5, 'six': None}
custom = {'two': None, 'three': 8, 'four': 4}
self.assertEqual(build_component_list(base, custom,
convert=lambda x: x),
['one', 'four', 'five', 'three'])
def test_return_list(self):
custom = ['a', 'b', 'c']
self.assertEqual(build_component_list(None, custom,
convert=lambda x: x),
custom)
def test_map_dict(self):
custom = {'one': 1, 'two': 2, 'three': 3}
self.assertEqual(build_component_list({}, custom,
convert=lambda x: x.upper()),
['ONE', 'TWO', 'THREE'])
def test_map_list(self):
custom = ['a', 'b', 'c']
self.assertEqual(build_component_list(None, custom,
lambda x: x.upper()),
['A', 'B', 'C'])
def test_duplicate_components_in_dict(self):
duplicate_dict = {'one': 1, 'two': 2, 'ONE': 4}
self.assertRaises(ValueError, build_component_list, {}, duplicate_dict,
convert=lambda x: x.lower())
def test_duplicate_components_in_list(self):
duplicate_list = ['a', 'b', 'a']
self.assertRaises(ValueError, build_component_list, None,
duplicate_list, convert=lambda x: x)
def test_duplicate_components_in_basesettings(self):
# Higher priority takes precedence
duplicate_bs = BaseSettings({'one': 1, 'two': 2}, priority=0)
duplicate_bs.set('ONE', 4, priority=10)
self.assertEqual(build_component_list(duplicate_bs,
convert=lambda x: x.lower()),
['two', 'one'])
duplicate_bs.set('one', duplicate_bs['one'], priority=20)
self.assertEqual(build_component_list(duplicate_bs,
convert=lambda x: x.lower()),
['one', 'two'])
# Same priority raises ValueError
duplicate_bs.set('ONE', duplicate_bs['ONE'], priority=20)
self.assertRaises(ValueError, build_component_list, duplicate_bs,
convert=lambda x: x.lower())
class UtilsConfTestCase(unittest.TestCase):
def test_arglist_to_dict(self):
self.assertEqual(arglist_to_dict(['arg1=val1', 'arg2=val2']),
{'arg1': 'val1', 'arg2': 'val2'})
if __name__ == "__main__":
unittest.main()
| bsd-3-clause |
Jumpscale/jumpscale6_core | lib/JumpScale/lib/cisco_ios/CiscoSwitchManager.py | 1 | 3904 | from JumpScale import j
# import JumpScale.baselib.remote
class CiscoSwitchManager(object):
def get(self, host, login,password):
return CiscoSwitch(host, login,password)
#!/usr/bin/python
from Router import Router
class CiscoSwitch(object):
def __init__(self, host, login,password):
R1 = Router(hostname=host, logfile='cisco.log')
login_cmd = 'telnet ' + host
login_expect = '#'#.format(hostname) #@TODO NEEDS TO BE ADJUSTED
out = R1.login(login_cmd, login, password, login_expect)
# if out != R1._LOGIN_USERNAME_PROMPTS:
# R1.logout()
# time.sleep(60)
# R1 = Router(hostname, logfile='cisco.log')
# password = Localhost1.get_rsa_token()
# out = R1.login(login_cmd, login, password, login_expect)
self.cisco=R1
self.host=host
self.login=login
self.password=password
# if res<>True: #adjust to check @TODO
# raise RuntimeError("Could not login into cisco switch: %s"%host)
# inputsentence = []
cmd="terminal length 0"
self.do(cmd)
self.do("configure terminal","#")
self.do("hostname %s"%host,"#")
self.do("exit")
def logout(self):
self._client.logout()
def do(self,cmd,prompt=None):
if prompt=="":
prompt="%s#"%self.cisco.hostname
return self.cisco.exec_cmd(cmd,prompt=prompt)
def interface_getvlanconfig(self,interfaceName):
"""
return vlan config of interface
"""
def interface_setvlan(self,interfaceName,fromVlanId,toVlanId,reset=False):
"""
configure set of vlan's on interface
@param reset when True older info is deleted and only this vlanrange is added
"""
def _normalizespaces(self,line):
while line.find(" ")<>-1:
line=line.replace(" "," ")
return line
def interface_getArpMAC(self):
"""
returns mac addresses an interface knows about (can be used to detect connected ports from servers)
return dict as follows
{$interfacename:[$macaddr1,$macaddr2,...]}
"""
result={}
out=self.do("sh mac-address-table")
for line in out.split("\n"):
line=line.strip()
if line=="" or line[0]<>"*":
continue
line=self._normalizespaces(line)
splitted=line.split(" ")
if len(splitted)>5:
vlan=splitted[1]
mac=splitted[2].replace(".","").lower()
ttype=splitted[3]
interface=splitted[5]
if not result.has_key(interface):
result[interface]=[]
result[interface].append(mac)
else:
pass
return result
def interface_getall(self):
"""
return info about interfaces on switch (name, macaddresses, types, ...)
"""
raise RuntimeError("implement")
return r
def interface_getnames(self):
raise RuntimeError("implement")
return r
def backup(self,name,destinationdir):
config=self.do("show running-config")
raise RuntimeError("implement")
return r
self.do("/system/backup/save", args={"name":name})
path="%s.backup"%name
self.download(path, j.system.fs.joinPaths(destinationdir,path))
self.do("/export", args={"file":name})
path="%s.rsc"%name
self.download(path, j.system.fs.joinPaths(destinationdir,path))
def download(self,path,dest):
#@todo now sure how that works on cisco sw
from ftplib import FTP
ftp=FTP(host=self.host, user=self.login, passwd=self.password)
ftp.retrbinary('RETR %s'%path, open(dest, 'wb').write)
ftp.close()
| bsd-2-clause |
Dhivyap/ansible | lib/ansible/modules/packaging/os/homebrew.py | 10 | 27824 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2013, Andrew Dunham <andrew@du.nham.ca>
# (c) 2013, Daniel Jaouen <dcj24@cornell.edu>
# (c) 2015, Indrajit Raychaudhuri <irc+code@indrajit.com>
#
# Based on macports (Jimmy Tang <jcftang@gmail.com>)
#
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: homebrew
author:
- "Indrajit Raychaudhuri (@indrajitr)"
- "Daniel Jaouen (@danieljaouen)"
- "Andrew Dunham (@andrew-d)"
requirements:
- "python >= 2.6"
- homebrew must already be installed on the target system
short_description: Package manager for Homebrew
description:
- Manages Homebrew packages
version_added: "1.1"
options:
name:
description:
- list of names of packages to install/remove
aliases: ['pkg', 'package', 'formula']
path:
description:
- "A ':' separated list of paths to search for 'brew' executable.
Since a package (I(formula) in homebrew parlance) location is prefixed relative to the actual path of I(brew) command,
providing an alternative I(brew) path enables managing different set of packages in an alternative location in the system."
default: '/usr/local/bin'
state:
description:
- state of the package
choices: [ 'head', 'latest', 'present', 'absent', 'linked', 'unlinked' ]
default: present
update_homebrew:
description:
- update homebrew itself first
type: bool
default: 'no'
aliases: ['update-brew']
upgrade_all:
description:
- upgrade all homebrew packages
type: bool
default: 'no'
aliases: ['upgrade']
install_options:
description:
- options flags to install a package
aliases: ['options']
version_added: "1.4"
notes:
- When used with a `loop:` each package will be processed individually,
it is much more efficient to pass the list directly to the `name` option.
'''
EXAMPLES = '''
# Install formula foo with 'brew' in default path (C(/usr/local/bin))
- homebrew:
name: foo
state: present
# Install formula foo with 'brew' in alternate path C(/my/other/location/bin)
- homebrew:
name: foo
path: /my/other/location/bin
state: present
# Update homebrew first and install formula foo with 'brew' in default path
- homebrew:
name: foo
state: present
update_homebrew: yes
# Update homebrew first and upgrade formula foo to latest available with 'brew' in default path
- homebrew:
name: foo
state: latest
update_homebrew: yes
# Update homebrew and upgrade all packages
- homebrew:
update_homebrew: yes
upgrade_all: yes
# Miscellaneous other examples
- homebrew:
name: foo
state: head
- homebrew:
name: foo
state: linked
- homebrew:
name: foo
state: absent
- homebrew:
name: foo,bar
state: absent
- homebrew:
name: foo
state: present
install_options: with-baz,enable-debug
'''
import os.path
import re
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.six import iteritems, string_types
# exceptions -------------------------------------------------------------- {{{
class HomebrewException(Exception):
pass
# /exceptions ------------------------------------------------------------- }}}
# utils ------------------------------------------------------------------- {{{
def _create_regex_group(s):
lines = (line.strip() for line in s.split('\n') if line.strip())
chars = filter(None, (line.split('#')[0].strip() for line in lines))
group = r'[^' + r''.join(chars) + r']'
return re.compile(group)
# /utils ------------------------------------------------------------------ }}}
class Homebrew(object):
'''A class to manage Homebrew packages.'''
# class regexes ------------------------------------------------ {{{
VALID_PATH_CHARS = r'''
\w # alphanumeric characters (i.e., [a-zA-Z0-9_])
\s # spaces
: # colons
{sep} # the OS-specific path separator
. # dots
- # dashes
'''.format(sep=os.path.sep)
VALID_BREW_PATH_CHARS = r'''
\w # alphanumeric characters (i.e., [a-zA-Z0-9_])
\s # spaces
{sep} # the OS-specific path separator
. # dots
- # dashes
'''.format(sep=os.path.sep)
VALID_PACKAGE_CHARS = r'''
\w # alphanumeric characters (i.e., [a-zA-Z0-9_])
. # dots
/ # slash (for taps)
\+ # plusses
- # dashes
: # colons (for URLs)
@ # at-sign
'''
INVALID_PATH_REGEX = _create_regex_group(VALID_PATH_CHARS)
INVALID_BREW_PATH_REGEX = _create_regex_group(VALID_BREW_PATH_CHARS)
INVALID_PACKAGE_REGEX = _create_regex_group(VALID_PACKAGE_CHARS)
# /class regexes ----------------------------------------------- }}}
# class validations -------------------------------------------- {{{
@classmethod
def valid_path(cls, path):
'''
`path` must be one of:
- list of paths
- a string containing only:
- alphanumeric characters
- dashes
- dots
- spaces
- colons
- os.path.sep
'''
if isinstance(path, string_types):
return not cls.INVALID_PATH_REGEX.search(path)
try:
iter(path)
except TypeError:
return False
else:
paths = path
return all(cls.valid_brew_path(path_) for path_ in paths)
@classmethod
def valid_brew_path(cls, brew_path):
'''
`brew_path` must be one of:
- None
- a string containing only:
- alphanumeric characters
- dashes
- dots
- spaces
- os.path.sep
'''
if brew_path is None:
return True
return (
isinstance(brew_path, string_types)
and not cls.INVALID_BREW_PATH_REGEX.search(brew_path)
)
@classmethod
def valid_package(cls, package):
'''A valid package is either None or alphanumeric.'''
if package is None:
return True
return (
isinstance(package, string_types)
and not cls.INVALID_PACKAGE_REGEX.search(package)
)
@classmethod
def valid_state(cls, state):
'''
A valid state is one of:
- None
- installed
- upgraded
- head
- linked
- unlinked
- absent
'''
if state is None:
return True
else:
return (
isinstance(state, string_types)
and state.lower() in (
'installed',
'upgraded',
'head',
'linked',
'unlinked',
'absent',
)
)
@classmethod
def valid_module(cls, module):
'''A valid module is an instance of AnsibleModule.'''
return isinstance(module, AnsibleModule)
# /class validations ------------------------------------------- }}}
# class properties --------------------------------------------- {{{
@property
def module(self):
return self._module
@module.setter
def module(self, module):
if not self.valid_module(module):
self._module = None
self.failed = True
self.message = 'Invalid module: {0}.'.format(module)
raise HomebrewException(self.message)
else:
self._module = module
return module
@property
def path(self):
return self._path
@path.setter
def path(self, path):
if not self.valid_path(path):
self._path = []
self.failed = True
self.message = 'Invalid path: {0}.'.format(path)
raise HomebrewException(self.message)
else:
if isinstance(path, string_types):
self._path = path.split(':')
else:
self._path = path
return path
@property
def brew_path(self):
return self._brew_path
@brew_path.setter
def brew_path(self, brew_path):
if not self.valid_brew_path(brew_path):
self._brew_path = None
self.failed = True
self.message = 'Invalid brew_path: {0}.'.format(brew_path)
raise HomebrewException(self.message)
else:
self._brew_path = brew_path
return brew_path
@property
def params(self):
return self._params
@params.setter
def params(self, params):
self._params = self.module.params
return self._params
@property
def current_package(self):
return self._current_package
@current_package.setter
def current_package(self, package):
if not self.valid_package(package):
self._current_package = None
self.failed = True
self.message = 'Invalid package: {0}.'.format(package)
raise HomebrewException(self.message)
else:
self._current_package = package
return package
# /class properties -------------------------------------------- }}}
def __init__(self, module, path, packages=None, state=None,
update_homebrew=False, upgrade_all=False,
install_options=None):
if not install_options:
install_options = list()
self._setup_status_vars()
self._setup_instance_vars(module=module, path=path, packages=packages,
state=state, update_homebrew=update_homebrew,
upgrade_all=upgrade_all,
install_options=install_options, )
self._prep()
# prep --------------------------------------------------------- {{{
def _setup_status_vars(self):
self.failed = False
self.changed = False
self.changed_count = 0
self.unchanged_count = 0
self.message = ''
def _setup_instance_vars(self, **kwargs):
for key, val in iteritems(kwargs):
setattr(self, key, val)
def _prep(self):
self._prep_brew_path()
def _prep_brew_path(self):
if not self.module:
self.brew_path = None
self.failed = True
self.message = 'AnsibleModule not set.'
raise HomebrewException(self.message)
self.brew_path = self.module.get_bin_path(
'brew',
required=True,
opt_dirs=self.path,
)
if not self.brew_path:
self.brew_path = None
self.failed = True
self.message = 'Unable to locate homebrew executable.'
raise HomebrewException('Unable to locate homebrew executable.')
return self.brew_path
def _status(self):
return (self.failed, self.changed, self.message)
# /prep -------------------------------------------------------- }}}
def run(self):
try:
self._run()
except HomebrewException:
pass
if not self.failed and (self.changed_count + self.unchanged_count > 1):
self.message = "Changed: %d, Unchanged: %d" % (
self.changed_count,
self.unchanged_count,
)
(failed, changed, message) = self._status()
return (failed, changed, message)
# checks ------------------------------------------------------- {{{
def _current_package_is_installed(self):
if not self.valid_package(self.current_package):
self.failed = True
self.message = 'Invalid package: {0}.'.format(self.current_package)
raise HomebrewException(self.message)
cmd = [
"{brew_path}".format(brew_path=self.brew_path),
"info",
self.current_package,
]
rc, out, err = self.module.run_command(cmd)
for line in out.split('\n'):
if (
re.search(r'Built from source', line)
or re.search(r'Poured from bottle', line)
):
return True
return False
def _current_package_is_outdated(self):
if not self.valid_package(self.current_package):
return False
rc, out, err = self.module.run_command([
self.brew_path,
'outdated',
self.current_package,
])
return rc != 0
def _current_package_is_installed_from_head(self):
if not Homebrew.valid_package(self.current_package):
return False
elif not self._current_package_is_installed():
return False
rc, out, err = self.module.run_command([
self.brew_path,
'info',
self.current_package,
])
try:
version_info = [line for line in out.split('\n') if line][0]
except IndexError:
return False
return version_info.split(' ')[-1] == 'HEAD'
# /checks ------------------------------------------------------ }}}
# commands ----------------------------------------------------- {{{
def _run(self):
if self.update_homebrew:
self._update_homebrew()
if self.upgrade_all:
self._upgrade_all()
if self.packages:
if self.state == 'installed':
return self._install_packages()
elif self.state == 'upgraded':
return self._upgrade_packages()
elif self.state == 'head':
return self._install_packages()
elif self.state == 'linked':
return self._link_packages()
elif self.state == 'unlinked':
return self._unlink_packages()
elif self.state == 'absent':
return self._uninstall_packages()
# updated -------------------------------- {{{
def _update_homebrew(self):
rc, out, err = self.module.run_command([
self.brew_path,
'update',
])
if rc == 0:
if out and isinstance(out, string_types):
already_updated = any(
re.search(r'Already up-to-date.', s.strip(), re.IGNORECASE)
for s in out.split('\n')
if s
)
if not already_updated:
self.changed = True
self.message = 'Homebrew updated successfully.'
else:
self.message = 'Homebrew already up-to-date.'
return True
else:
self.failed = True
self.message = err.strip()
raise HomebrewException(self.message)
# /updated ------------------------------- }}}
# _upgrade_all --------------------------- {{{
def _upgrade_all(self):
rc, out, err = self.module.run_command([
self.brew_path,
'upgrade',
])
if rc == 0:
if not out:
self.message = 'Homebrew packages already upgraded.'
else:
self.changed = True
self.message = 'Homebrew upgraded.'
return True
else:
self.failed = True
self.message = err.strip()
raise HomebrewException(self.message)
# /_upgrade_all -------------------------- }}}
# installed ------------------------------ {{{
def _install_current_package(self):
if not self.valid_package(self.current_package):
self.failed = True
self.message = 'Invalid package: {0}.'.format(self.current_package)
raise HomebrewException(self.message)
if self._current_package_is_installed():
self.unchanged_count += 1
self.message = 'Package already installed: {0}'.format(
self.current_package,
)
return True
if self.module.check_mode:
self.changed = True
self.message = 'Package would be installed: {0}'.format(
self.current_package
)
raise HomebrewException(self.message)
if self.state == 'head':
head = '--HEAD'
else:
head = None
opts = (
[self.brew_path, 'install']
+ self.install_options
+ [self.current_package, head]
)
cmd = [opt for opt in opts if opt]
rc, out, err = self.module.run_command(cmd)
if self._current_package_is_installed():
self.changed_count += 1
self.changed = True
self.message = 'Package installed: {0}'.format(self.current_package)
return True
else:
self.failed = True
self.message = err.strip()
raise HomebrewException(self.message)
def _install_packages(self):
for package in self.packages:
self.current_package = package
self._install_current_package()
return True
# /installed ----------------------------- }}}
# upgraded ------------------------------- {{{
def _upgrade_current_package(self):
command = 'upgrade'
if not self.valid_package(self.current_package):
self.failed = True
self.message = 'Invalid package: {0}.'.format(self.current_package)
raise HomebrewException(self.message)
if not self._current_package_is_installed():
command = 'install'
if self._current_package_is_installed() and not self._current_package_is_outdated():
self.message = 'Package is already upgraded: {0}'.format(
self.current_package,
)
self.unchanged_count += 1
return True
if self.module.check_mode:
self.changed = True
self.message = 'Package would be upgraded: {0}'.format(
self.current_package
)
raise HomebrewException(self.message)
opts = (
[self.brew_path, command]
+ self.install_options
+ [self.current_package]
)
cmd = [opt for opt in opts if opt]
rc, out, err = self.module.run_command(cmd)
if self._current_package_is_installed() and not self._current_package_is_outdated():
self.changed_count += 1
self.changed = True
self.message = 'Package upgraded: {0}'.format(self.current_package)
return True
else:
self.failed = True
self.message = err.strip()
raise HomebrewException(self.message)
def _upgrade_all_packages(self):
opts = (
[self.brew_path, 'upgrade']
+ self.install_options
)
cmd = [opt for opt in opts if opt]
rc, out, err = self.module.run_command(cmd)
if rc == 0:
self.changed = True
self.message = 'All packages upgraded.'
return True
else:
self.failed = True
self.message = err.strip()
raise HomebrewException(self.message)
def _upgrade_packages(self):
if not self.packages:
self._upgrade_all_packages()
else:
for package in self.packages:
self.current_package = package
self._upgrade_current_package()
return True
# /upgraded ------------------------------ }}}
# uninstalled ---------------------------- {{{
def _uninstall_current_package(self):
if not self.valid_package(self.current_package):
self.failed = True
self.message = 'Invalid package: {0}.'.format(self.current_package)
raise HomebrewException(self.message)
if not self._current_package_is_installed():
self.unchanged_count += 1
self.message = 'Package already uninstalled: {0}'.format(
self.current_package,
)
return True
if self.module.check_mode:
self.changed = True
self.message = 'Package would be uninstalled: {0}'.format(
self.current_package
)
raise HomebrewException(self.message)
opts = (
[self.brew_path, 'uninstall', '--force']
+ self.install_options
+ [self.current_package]
)
cmd = [opt for opt in opts if opt]
rc, out, err = self.module.run_command(cmd)
if not self._current_package_is_installed():
self.changed_count += 1
self.changed = True
self.message = 'Package uninstalled: {0}'.format(self.current_package)
return True
else:
self.failed = True
self.message = err.strip()
raise HomebrewException(self.message)
def _uninstall_packages(self):
for package in self.packages:
self.current_package = package
self._uninstall_current_package()
return True
# /uninstalled ----------------------------- }}}
# linked --------------------------------- {{{
def _link_current_package(self):
if not self.valid_package(self.current_package):
self.failed = True
self.message = 'Invalid package: {0}.'.format(self.current_package)
raise HomebrewException(self.message)
if not self._current_package_is_installed():
self.failed = True
self.message = 'Package not installed: {0}.'.format(self.current_package)
raise HomebrewException(self.message)
if self.module.check_mode:
self.changed = True
self.message = 'Package would be linked: {0}'.format(
self.current_package
)
raise HomebrewException(self.message)
opts = (
[self.brew_path, 'link']
+ self.install_options
+ [self.current_package]
)
cmd = [opt for opt in opts if opt]
rc, out, err = self.module.run_command(cmd)
if rc == 0:
self.changed_count += 1
self.changed = True
self.message = 'Package linked: {0}'.format(self.current_package)
return True
else:
self.failed = True
self.message = 'Package could not be linked: {0}.'.format(self.current_package)
raise HomebrewException(self.message)
def _link_packages(self):
for package in self.packages:
self.current_package = package
self._link_current_package()
return True
# /linked -------------------------------- }}}
# unlinked ------------------------------- {{{
def _unlink_current_package(self):
if not self.valid_package(self.current_package):
self.failed = True
self.message = 'Invalid package: {0}.'.format(self.current_package)
raise HomebrewException(self.message)
if not self._current_package_is_installed():
self.failed = True
self.message = 'Package not installed: {0}.'.format(self.current_package)
raise HomebrewException(self.message)
if self.module.check_mode:
self.changed = True
self.message = 'Package would be unlinked: {0}'.format(
self.current_package
)
raise HomebrewException(self.message)
opts = (
[self.brew_path, 'unlink']
+ self.install_options
+ [self.current_package]
)
cmd = [opt for opt in opts if opt]
rc, out, err = self.module.run_command(cmd)
if rc == 0:
self.changed_count += 1
self.changed = True
self.message = 'Package unlinked: {0}'.format(self.current_package)
return True
else:
self.failed = True
self.message = 'Package could not be unlinked: {0}.'.format(self.current_package)
raise HomebrewException(self.message)
def _unlink_packages(self):
for package in self.packages:
self.current_package = package
self._unlink_current_package()
return True
# /unlinked ------------------------------ }}}
# /commands ---------------------------------------------------- }}}
def main():
module = AnsibleModule(
argument_spec=dict(
name=dict(
aliases=["pkg", "package", "formula"],
required=False,
type='list',
),
path=dict(
default="/usr/local/bin",
required=False,
type='path',
),
state=dict(
default="present",
choices=[
"present", "installed",
"latest", "upgraded", "head",
"linked", "unlinked",
"absent", "removed", "uninstalled",
],
),
update_homebrew=dict(
default=False,
aliases=["update-brew"],
type='bool',
),
upgrade_all=dict(
default=False,
aliases=["upgrade"],
type='bool',
),
install_options=dict(
default=None,
aliases=['options'],
type='list',
)
),
supports_check_mode=True,
)
module.run_command_environ_update = dict(LANG='C', LC_ALL='C', LC_MESSAGES='C', LC_CTYPE='C')
p = module.params
if p['name']:
packages = p['name']
else:
packages = None
path = p['path']
if path:
path = path.split(':')
state = p['state']
if state in ('present', 'installed'):
state = 'installed'
if state in ('head', ):
state = 'head'
if state in ('latest', 'upgraded'):
state = 'upgraded'
if state == 'linked':
state = 'linked'
if state == 'unlinked':
state = 'unlinked'
if state in ('absent', 'removed', 'uninstalled'):
state = 'absent'
update_homebrew = p['update_homebrew']
upgrade_all = p['upgrade_all']
p['install_options'] = p['install_options'] or []
install_options = ['--{0}'.format(install_option)
for install_option in p['install_options']]
brew = Homebrew(module=module, path=path, packages=packages,
state=state, update_homebrew=update_homebrew,
upgrade_all=upgrade_all, install_options=install_options)
(failed, changed, message) = brew.run()
if failed:
module.fail_json(msg=message)
else:
module.exit_json(changed=changed, msg=message)
if __name__ == '__main__':
main()
| gpl-3.0 |
jotes/moto | tests/test_cloudformation/fixtures/route53_roundrobin.py | 21 | 1935 | from __future__ import unicode_literals
template = {
"AWSTemplateFormatVersion" : "2010-09-09",
"Description" : "AWS CloudFormation Sample Template Route53_RoundRobin: Sample template showing how to use weighted round robin (WRR) DNS entried via Amazon Route 53. This contrived sample uses weighted CNAME records to illustrate that the weighting influences the return records. It assumes that you already have a Hosted Zone registered with Amazon Route 53. **WARNING** This template creates one or more AWS resources. You will be billed for the AWS resources used if you create a stack from this template.",
"Resources" : {
"MyZone": {
"Type" : "AWS::Route53::HostedZone",
"Properties" : {
"Name" : "my_zone"
}
},
"MyDNSRecord" : {
"Type" : "AWS::Route53::RecordSetGroup",
"Properties" : {
"HostedZoneName" : {"Ref": "MyZone"},
"Comment" : "Contrived example to redirect to aws.amazon.com 75% of the time and www.amazon.com 25% of the time.",
"RecordSets" : [{
"SetIdentifier" : { "Fn::Join" : [ " ", [{"Ref" : "AWS::StackName"}, "AWS" ]]},
"Name" : { "Fn::Join" : [ "", [{"Ref" : "AWS::StackName"}, ".", {"Ref" : "AWS::Region"}, ".", {"Ref" : "MyZone"}, "."]]},
"Type" : "CNAME",
"TTL" : "900",
"ResourceRecords" : ["aws.amazon.com"],
"Weight" : "3"
},{
"SetIdentifier" : { "Fn::Join" : [ " ", [{"Ref" : "AWS::StackName"}, "Amazon" ]]},
"Name" : { "Fn::Join" : [ "", [{"Ref" : "AWS::StackName"}, ".", {"Ref" : "AWS::Region"}, ".", {"Ref" : "MyZone"}, "."]]},
"Type" : "CNAME",
"TTL" : "900",
"ResourceRecords" : ["www.amazon.com"],
"Weight" : "1"
}]
}
}
},
"Outputs" : {
"DomainName" : {
"Description" : "Fully qualified domain name",
"Value" : { "Ref" : "MyDNSRecord" }
}
}
} | apache-2.0 |
laborautonomo/poedit | deps/boost/libs/numeric/odeint/performance/performance.py | 12 | 1882 | """
Copyright 2009-2012 Karsten Ahnert
Copyright 2009-2012 Mario Mulansky
Distributed under the Boost Software License, Version 1.0.
(See accompanying file LICENSE_1_0.txt or
copy at http://www.boost.org/LICENSE_1_0.txt)
"""
from os import popen
from os import system
from os.path import isfile
from numpy import *
#from pylab import *
#toolset = "gcc-4.5"
#toolset = "intel-11.1"
toolset = "msvc"
#toolset = "msvc-10.0"
#bin_path = "bin/gcc-4.5/release/"
#bin_path = "bin/intel-linux-11.1/release/"
bin_path = "bin\\msvc-10.0\\release\\threading-multi\\"
extension = ".exe"
#extension = ""
bins = [ "odeint_rk4_lorenz_array" , "odeint_rk4_lorenz_range" , "generic_odeint_rk4_lorenz" , "nr_rk4_lorenz" , "rt_generic_rk4_lorenz" , "gsl_rk4_lorenz" ]
results = []
print "Performance tests for " , bin_path
print
for bin in bins:
#system( "bjam toolset=" + toolset + " -a " + bin );
if isfile( bin_path + bin + extension):
print "Running" , bin
res = popen( bin_path+bin+extension ).read()
print bin , res
results.append( res )
else:
print "no executable found:" , bin_path + bin + extension
results.append( 0 )
print "Results from" , bin_path
print
for i in range(len(bins)):
print bins[i] , results[i]
res = array( results , dtype='float' )
savetxt( bin_path + "rk4_lorenz.dat" , res )
res = 100*res[0]/res
bar_width = 0.6
"""
figure(1)
title("Runge-Kutta 4 with " + toolset , fontsize=20)
bar( arange(6) , res , bar_width , color='blue' , linewidth=4 , edgecolor='blue' , ecolor='red') #, elinewidth=2, ecolor='red' )
xlim( -0.5 , 5.5+bar_width )
xticks( arange(6)+bar_width/2 , ('array' , 'range' , 'generic' , 'NR' , 'rt gen' , 'gsl' ) )
ylabel('Performance in %' , fontsize=20)
savefig( bin_path + "rk4_lorenz.png" )
show()
"""
| mit |
mikewiebe-ansible/ansible | test/units/module_utils/common/validation/test_check_mutually_exclusive.py | 39 | 1605 | # -*- coding: utf-8 -*-
# Copyright (c) 2019 Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
import pytest
from ansible.module_utils._text import to_native
from ansible.module_utils.common.validation import check_mutually_exclusive
@pytest.fixture
def mutually_exclusive_terms():
return [
('string1', 'string2',),
('box', 'fox', 'socks'),
]
def test_check_mutually_exclusive(mutually_exclusive_terms):
params = {
'string1': 'cat',
'fox': 'hat',
}
assert check_mutually_exclusive(mutually_exclusive_terms, params) == []
def test_check_mutually_exclusive_found(mutually_exclusive_terms):
params = {
'string1': 'cat',
'string2': 'hat',
'fox': 'red',
'socks': 'blue',
}
expected = "TypeError('parameters are mutually exclusive: string1|string2, box|fox|socks',)"
with pytest.raises(TypeError) as e:
check_mutually_exclusive(mutually_exclusive_terms, params)
assert e.value == expected
def test_check_mutually_exclusive_none():
terms = None
params = {
'string1': 'cat',
'fox': 'hat',
}
assert check_mutually_exclusive(terms, params) == []
def test_check_mutually_exclusive_no_params(mutually_exclusive_terms):
with pytest.raises(TypeError) as te:
check_mutually_exclusive(mutually_exclusive_terms, None)
assert "TypeError: 'NoneType' object is not iterable" in to_native(te.error)
| gpl-3.0 |
wyc/django | tests/postgres_tests/fields.py | 302 | 1087 | """
Indirection layer for PostgreSQL-specific fields, so the tests don't fail when
run with a backend other than PostgreSQL.
"""
from django.db import models
try:
from django.contrib.postgres.fields import (
ArrayField, BigIntegerRangeField, DateRangeField, DateTimeRangeField,
FloatRangeField, HStoreField, IntegerRangeField, JSONField,
)
except ImportError:
class DummyArrayField(models.Field):
def __init__(self, base_field, size=None, **kwargs):
super(DummyArrayField, self).__init__(**kwargs)
def deconstruct(self):
name, path, args, kwargs = super(DummyArrayField, self).deconstruct()
kwargs.update({
'base_field': '',
'size': 1,
})
return name, path, args, kwargs
ArrayField = DummyArrayField
BigIntegerRangeField = models.Field
DateRangeField = models.Field
DateTimeRangeField = models.Field
FloatRangeField = models.Field
HStoreField = models.Field
IntegerRangeField = models.Field
JSONField = models.Field
| bsd-3-clause |
falcong/or-tools | examples/python/crypta.py | 34 | 3413 | # Copyright 2010 Hakan Kjellerstrand hakank@bonetmail.com
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Cryptarithmetic puzzle in Google CP Solver.
Prolog benchmark problem GNU Prolog (crypta.pl)
'''
Name : crypta.pl
Title : crypt-arithmetic
Original Source: P. Van Hentenryck's book
Adapted by : Daniel Diaz - INRIA France
Date : September 1992
Solve the operation:
B A I J J A J I I A H F C F E B B J E A
+ D H F G A B C D I D B I F F A G F E J E
-----------------------------------------
= G J E G A C D D H F A F J B F I H E E F
'''
Compare with the following models:
* Comet: http://hakank.org/comet/crypta.co
* MiniZinc: http://hakank.org/minizinc/crypta.mzn
* ECLiPSe: http://hakank.org/eclipse/crypta.ecl
* Gecode: http://hakank.org/gecode/crypta.cpp
* SICStus: http://hakank.org/sicstus/crypta.pl
This model was created by Hakan Kjellerstrand (hakank@bonetmail.com)
Also see my other Google CP Solver models:
http://www.hakank.org/google_or_tools/
"""
from ortools.constraint_solver import pywrapcp
def main():
# Create the solver.
solver = pywrapcp.Solver("Crypta")
#
# data
#
#
# variables
#
LD = [solver.IntVar(0, 9, "LD[%i]" % i) for i in range(0, 10)]
A, B, C, D, E, F, G, H, I, J = LD
Sr1 = solver.IntVar(0, 1, "Sr1")
Sr2 = solver.IntVar(0, 1, "Sr2")
#
# constraints
#
solver.Add(solver.AllDifferent(LD))
solver.Add(B >= 1)
solver.Add(D >= 1)
solver.Add(G >= 1)
solver.Add(A + 10 * E + 100 * J + 1000 * B + 10000 * B + 100000 * E + 1000000 * F +
E + 10 * J + 100 * E + 1000 * F + 10000 * G + 100000 * A + 1000000 * F
== F + 10 * E + 100 * E + 1000 * H + 10000 * I + 100000 * F + 1000000 * B + 10000000 * Sr1)
solver.Add(C + 10 * F + 100 * H + 1000 * A + 10000 * I + 100000 * I + 1000000 * J +
F + 10 * I + 100 * B + 1000 * D + 10000 * I + 100000 * D + 1000000 * C + Sr1
== J + 10 * F + 100 * A + 1000 * F + 10000 * H + 100000 * D + 1000000 * D + 10000000 * Sr2)
solver.Add(A + 10 * J + 100 * J + 1000 * I + 10000 * A + 100000 * B +
B + 10 * A + 100 * G + 1000 * F + 10000 * H + 100000 * D + Sr2
== C + 10 * A + 100 * G + 1000 * E + 10000 * J + 100000 * G)
#
# search and result
#
db = solver.Phase(LD,
solver.INT_VAR_SIMPLE,
solver.INT_VALUE_SIMPLE)
solver.NewSearch(db)
num_solutions = 0
str = "ABCDEFGHIJ"
while solver.NextSolution():
num_solutions += 1
for (letter, val) in [(str[i], LD[i].Value()) for i in range(len(LD))]:
print "%s: %i" % (letter, val)
print
solver.EndSearch()
print
print "num_solutions:", num_solutions
print "failures:", solver.Failures()
print "branches:", solver.Branches()
print "WallTime:", solver.WallTime()
if __name__ == "__main__":
main()
| apache-2.0 |
agconti/Shopify-Django | venv/lib/python2.7/site-packages/django/contrib/localflavor/it/forms.py | 100 | 3180 | """
IT-specific Form helpers
"""
from __future__ import absolute_import, unicode_literals
import re
from django.contrib.localflavor.it.it_province import PROVINCE_CHOICES
from django.contrib.localflavor.it.it_region import REGION_CHOICES
from django.contrib.localflavor.it.util import ssn_check_digit, vat_number_check_digit
from django.core.validators import EMPTY_VALUES
from django.forms import ValidationError
from django.forms.fields import Field, RegexField, Select
from django.utils.translation import ugettext_lazy as _
from django.utils.encoding import smart_text
class ITZipCodeField(RegexField):
default_error_messages = {
'invalid': _('Enter a valid zip code.'),
}
def __init__(self, max_length=None, min_length=None, *args, **kwargs):
super(ITZipCodeField, self).__init__(r'^\d{5}$',
max_length, min_length, *args, **kwargs)
class ITRegionSelect(Select):
"""
A Select widget that uses a list of IT regions as its choices.
"""
def __init__(self, attrs=None):
super(ITRegionSelect, self).__init__(attrs, choices=REGION_CHOICES)
class ITProvinceSelect(Select):
"""
A Select widget that uses a list of IT provinces as its choices.
"""
def __init__(self, attrs=None):
super(ITProvinceSelect, self).__init__(attrs, choices=PROVINCE_CHOICES)
class ITSocialSecurityNumberField(RegexField):
"""
A form field that validates Italian Social Security numbers (codice fiscale).
For reference see http://www.agenziaentrate.it/ and search for
'Informazioni sulla codificazione delle persone fisiche'.
"""
default_error_messages = {
'invalid': _('Enter a valid Social Security number.'),
}
def __init__(self, max_length=None, min_length=None, *args, **kwargs):
super(ITSocialSecurityNumberField, self).__init__(r'^\w{3}\s*\w{3}\s*\w{5}\s*\w{5}$',
max_length, min_length, *args, **kwargs)
def clean(self, value):
value = super(ITSocialSecurityNumberField, self).clean(value)
if value in EMPTY_VALUES:
return ''
value = re.sub('\s', '', value).upper()
try:
check_digit = ssn_check_digit(value)
except ValueError:
raise ValidationError(self.error_messages['invalid'])
if not value[15] == check_digit:
raise ValidationError(self.error_messages['invalid'])
return value
class ITVatNumberField(Field):
"""
A form field that validates Italian VAT numbers (partita IVA).
"""
default_error_messages = {
'invalid': _('Enter a valid VAT number.'),
}
def clean(self, value):
value = super(ITVatNumberField, self).clean(value)
if value in EMPTY_VALUES:
return ''
try:
vat_number = int(value)
except ValueError:
raise ValidationError(self.error_messages['invalid'])
vat_number = str(vat_number).zfill(11)
check_digit = vat_number_check_digit(vat_number[0:10])
if not vat_number[10] == check_digit:
raise ValidationError(self.error_messages['invalid'])
return smart_text(vat_number)
| mit |
LarsDu/DeepPixelMonster | dpixelmonster/du_utils.py | 1 | 1200 | import tensorflow as tf
def tanh_to_sig_scale(tanh_input,scale_multiplier=1.):
"""For image data scaled between [-1,1],
rescale to [0,1] * scale_multiplier (e.g. 255 for RGB images)
"""
#Due to operator overloading, this also works with tensors
return ((tanh_input+1.)/2.)*scale_multiplier
def random_image_transforms(image):
#input should be [0,1]
rand_flip=True
rand_bright=True
rand_contrast=True
rand_hue=True
rand_sat=False
do_rescale_tanh = True
if rand_flip:
image = tf.image.random_flip_left_right(image)
if rand_bright:
image = tf.image.random_brightness(image,max_delta=.15)
if rand_contrast:
image=tf.image.random_contrast(image,lower=0.80,upper=1.2)
if rand_hue:
image=tf.image.random_hue(image,max_delta=0.07)
if rand_sat:
image=tf.image.random_saturation(image,lower=.95,upper=1.05)
# Limit pixel values to [0, 1]
#https://github.com/tensorflow/tensorflow/issues/3816
image = tf.minimum(image, 1.0)
image = tf.maximum(image, 0.)
if do_rescale_tanh:
#Scale from [0,1] to [-1,1]
image = (2*image)-1
return image
| apache-2.0 |
alkyl1978/gnuradio | gr-blocks/python/blocks/qa_threshold.py | 57 | 1537 | #!/usr/bin/env python
#
# Copyright 2013 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
from gnuradio import gr, gr_unittest, blocks
class test_threshold(gr_unittest.TestCase):
def setUp(self):
self.tb = gr.top_block()
def tearDown(self):
self.tb = None
def test_01(self):
tb = self.tb
data = [0, 0, 0, 2, 2, 2, 2, 0, 0, 0, 2, 2, 2]
expected_result = (0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1)
src = blocks.vector_source_f(data, False)
op = blocks.threshold_ff(1, 1)
dst = blocks.vector_sink_f()
tb.connect(src, op)
tb.connect(op, dst)
tb.run()
dst_data = dst.data()
self.assertEqual(expected_result, dst_data)
if __name__ == '__main__':
gr_unittest.run(test_threshold, "test_threshold.xml")
| gpl-3.0 |
mhahn/stacker | stacker/commands/stacker/destroy.py | 1 | 1084 | """Destroys CloudFormation stacks based on the given config.
Stacker will determine the order in which stacks should be destroyed based on
any manual requirements they specify or output values they rely on from other
stacks.
"""
from .base import BaseCommand
from ...actions import destroy
class Destroy(BaseCommand):
name = "destroy"
description = __doc__
def add_arguments(self, parser):
super(Destroy, self).add_arguments(parser)
parser.add_argument("-f", "--force", action="store_true",
help="Whether or not you want to go through "
" with destroying the stacks")
parser.add_argument("-t", "--tail", action="store_true",
help="Tail the CloudFormation logs while working"
"with stacks")
def run(self, options, **kwargs):
super(Destroy, self).run(options, **kwargs)
action = destroy.Action(options.context, provider=options.provider)
action.execute(force=options.force, tail=options.tail)
| bsd-2-clause |
tscholze/py-hasi-home-analytical-system-interface | hasi/gadgets/views.py | 1 | 1263 | # -*- coding: utf-8 -*-
#
# Erstellt von Martin Gutmair
# modified 04.05.2011
#
from django.http import HttpResponse, HttpResponseRedirect
from django.shortcuts import render_to_response
from django.template import RequestContext
from django.contrib.auth.models import User
from hasi.gadgets.models import Gadget, GadgetSubdevice, UserGadget
def gadgets(request):
# Ladet Userspezifisch die Gadgets in die richtigen Spalten und übergibt sie an
# das Template. Falls kein User eingeloggt ist, kommt "Nicht eingeloggt"
if(request.user.is_authenticated()):
gadgets = Gadget.objects.filter(usergadget__users=request.user)
gadgets_left = gadgets.filter(usergadget__position=0)
gadgets_center = gadgets.filter(usergadget__position=10)
gadgets_right = gadgets.filter(usergadget__position=20)
return render_to_response(
'gadgets/index.html',
{'gadgets_left': gadgets_left,
'gadgets_center': gadgets_center,
'gadgets_right': gadgets_right},
context_instance = RequestContext(request))
else:
if not request.user.is_authenticated():
return HttpResponseRedirect('/login.html?next=%s' % request.path)
| mit |
ajnirp/servo | tests/wpt/web-platform-tests/old-tests/webdriver/windows/window_manipulation.py | 142 | 1556 | # -*- mode: python; fill-column: 100; comment-column: 100; -*-
import os
import sys
import unittest
sys.path.insert(1, os.path.abspath(os.path.join(__file__, "../..")))
import base_test
from selenium.common import exceptions
class WindowingTest(base_test.WebDriverBaseTest):
def test_maximize(self):
#self.driver.get(self.webserver.where_is("windows/res/win1.html"))
self.driver.maximize_window()
def test_window_size_manipulation(self):
#self.driver.get(self.webserver.where_is("windows/res/win1.html"))
self.driver.set_window_size(400, 400)
window_size = self.driver.get_window_size()
self.assertTrue("width" in window_size)
self.assertTrue("height" in window_size)
self.assertEquals({"width": 400, "height":400}, window_size)
"""
todo: make that work
see: https://w3c.github.io/webdriver/webdriver-spec.html#setwindowsize
result = self.driver.set_window_size(100, 100)
self.assertTrue("status" in result)
self.assertEquals(result["status"], 500)
"""
def test_window_position_manipulation(self):
#self.driver.get(self.webserver.where_is("windows/res/win1.html"))
self.driver.set_window_position(400, 400)
window_position = self.driver.get_window_position()
self.assertTrue("x" in window_position)
self.assertTrue("y" in window_position)
self.assertEquals({"x": 400, "y": 400}, window_position)
if __name__ == "__main__":
unittest.main()
| mpl-2.0 |
robotwholearned/dotfilesCustom | Sublime Text 2/Packages/Package Control/package_control/downloaders/cli_downloader.py | 11 | 2385 | import os
import subprocess
from ..console_write import console_write
from ..cmd import create_cmd
from .non_clean_exit_error import NonCleanExitError
from .binary_not_found_error import BinaryNotFoundError
class CliDownloader(object):
"""
Base for downloaders that use a command line program
:param settings:
A dict of the various Package Control settings. The Sublime Text
Settings API is not used because this code is run in a thread.
"""
def __init__(self, settings):
self.settings = settings
def clean_tmp_file(self):
if os.path.exists(self.tmp_file):
os.remove(self.tmp_file)
def find_binary(self, name):
"""
Finds the given executable name in the system PATH
:param name:
The exact name of the executable to find
:return:
The absolute path to the executable
:raises:
BinaryNotFoundError when the executable can not be found
"""
dirs = os.environ['PATH'].split(os.pathsep)
if os.name != 'nt':
# This is mostly for OS X, which seems to launch ST with a
# minimal set of environmental variables
dirs.append('/usr/local/bin')
for dir_ in dirs:
path = os.path.join(dir_, name)
if os.path.exists(path):
return path
raise BinaryNotFoundError('The binary %s could not be located' % name)
def execute(self, args):
"""
Runs the executable and args and returns the result
:param args:
A list of the executable path and all arguments to be passed to it
:return:
The text output of the executable
:raises:
NonCleanExitError when the executable exits with an error
"""
if self.settings.get('debug'):
console_write(u"Trying to execute command %s" % create_cmd(args), True)
proc = subprocess.Popen(args, stdin=subprocess.PIPE,
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
output = proc.stdout.read()
self.stderr = proc.stderr.read()
returncode = proc.wait()
if returncode != 0:
error = NonCleanExitError(returncode)
error.stderr = self.stderr
error.stdout = output
raise error
return output
| mit |
huiyiqun/check_mk | web/htdocs/htmllib.py | 1 | 105737 | #!/usr/bin/env python
# -*- encoding: utf-8; py-indent-offset: 4 -*-
# +------------------------------------------------------------------+
# | ____ _ _ __ __ _ __ |
# | / ___| |__ ___ ___| | __ | \/ | |/ / |
# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / |
# | | |___| | | | __/ (__| < | | | | . \ |
# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ |
# | |
# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de |
# +------------------------------------------------------------------+
#
# This file is part of Check_MK.
# The official homepage is at http://mathias-kettner.de/check_mk.
#
# check_mk is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by
# the Free Software Foundation in version 2. check_mk is distributed
# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with-
# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A
# PARTICULAR PURPOSE. See the GNU General Public License for more de-
# tails. You should have received a copy of the GNU General Public
# License along with GNU Make; see the file COPYING. If not, write
# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor,
# Boston, MA 02110-1301 USA.
# TODO:
#
# Notes for future rewrite:
#
# - Find all call sites which do something like "int(html.var(...))"
# and replace it with html.get_integer_input(...)
#
# - Make clear which functions return values and which write out values
# render_*, add_*, write_* (e.g. icon() -> outputs directly,
# render_icon() -> returns icon
#
# - Order of arguments:
# e.g. icon(help, icon) -> change and make help otional?
#
# - Fix names of message() show_error() show_warning()
#
# - change naming of html.attrencode() to html.render()
#
# - General rules:
# 1. values of type str that are passed as arguments or
# return values or are stored in datastructures must not contain
# non-Ascii characters! UTF-8 encoding must just be used in
# the last few CPU cycles before outputting. Conversion from
# input to str or unicode must happen as early as possible,
# directly when reading from file or URL.
#
# - indentify internal helper methods and prefix them with "_"
#
# - Split HTML handling (page generating) code and generic request
# handling (vars, cookies, ...) up into separate classes to make
# the different tasks clearer. For example a RequestHandler()
# and a HTMLGenerator() or similar.
import time
import os
import urllib
import ast
import random
import re
import __builtin__
import signal
import json
from collections import deque
from contextlib import contextmanager
# Monkey patch in order to make the HTML class below json-serializable without changing the default json calls.
def _default(self, obj):
return getattr(obj.__class__, "to_json", _default.default)(obj)
_default.default = json.JSONEncoder().default # Save unmodified default.
json.JSONEncoder.default = _default # replacement
from cmk.exceptions import MKGeneralException, MKException
from gui_exceptions import MKUserError
# Information about uri
class InvalidUserInput(Exception):
def __init__(self, varname, text):
self.varname = varname
self.text = text
super(InvalidUserInput, self).__init__(varname, text)
class RequestTimeout(MKException):
pass
#.
# .--Escaper-------------------------------------------------------------.
# | _____ |
# | | ____|___ ___ __ _ _ __ ___ _ __ |
# | | _| / __|/ __/ _` | '_ \ / _ \ '__| |
# | | |___\__ \ (_| (_| | |_) | __/ | |
# | |_____|___/\___\__,_| .__/ \___|_| |
# | |_| |
# +----------------------------------------------------------------------+
# | |
# '----------------------------------------------------------------------
class Escaper(object):
def __init__(self):
super(Escaper, self).__init__()
self._unescaper_text = re.compile(r'<(/?)(h1|h2|b|tt|i|u|br(?: /)?|nobr(?: /)?|pre|a|sup|p|li|ul|ol)>')
self._unescaper_href = re.compile(r'<a href=(?:"|\')(.*?)(?:"|\')>')
self._unescaper_href_target = re.compile(r'<a href=(?:"|\')(.*?)(?:"|\') target=(?:"|\')(.*?)(?:"|\')>')
# Encode HTML attributes. Replace HTML syntax with HTML text.
# For example: replace '"' with '"', '<' with '<'.
# This code is slow. Works on str and unicode without changing
# the type. Also works on things that can be converted with '%s'.
def escape_attribute(self, value):
attr_type = type(value)
if value is None:
return ''
elif attr_type == int:
return str(value)
elif isinstance(value, HTML):
return "%s" % value # This is HTML code which must not be escaped
elif attr_type not in [str, unicode]: # also possible: type Exception!
value = "%s" % value # Note: this allows Unicode. value might not have type str now
return value.replace("&", "&")\
.replace('"', """)\
.replace("<", "<")\
.replace(">", ">")
def unescape_attributes(self, value):
return value.replace("&", "&")\
.replace(""", "\"")\
.replace("<", "<")\
.replace(">", ">")
# render HTML text.
# We only strip od some tags and allow some simple tags
# such as <h1>, <b> or <i> to be part of the string.
# This is useful for messages where we want to keep formatting
# options. (Formerly known as 'permissive_attrencode') """
# for the escaping functions
def escape_text(self, text):
if isinstance(text, HTML):
return "%s" % text # This is HTML code which must not be escaped
text = self.escape_attribute(text)
text = self._unescaper_text.sub(r'<\1\2>', text)
# Also repair link definitions
text = self._unescaper_href_target.sub(r'<a href="\1" target="\2">', text)
text = self._unescaper_href.sub(r'<a href="\1">', text)
text = re.sub(r'&nbsp;', ' ', text)
return text
#.
# .--Encoding------------------------------------------------------------.
# | _____ _ _ |
# | | ____|_ __ ___ ___ __| (_)_ __ __ _ |
# | | _| | '_ \ / __/ _ \ / _` | | '_ \ / _` | |
# | | |___| | | | (_| (_) | (_| | | | | | (_| | |
# | |_____|_| |_|\___\___/ \__,_|_|_| |_|\__, | |
# | |___/ |
# +----------------------------------------------------------------------+
# | |
# '----------------------------------------------------------------------'
class Encoder(object):
# This function returns a str object, never unicode!
# Beware: this code is crucial for the performance of Multisite!
# Changing from the self coded urlencode to urllib.quote
# is saving more then 90% of the total HTML generating time
# on more complex pages!
#
# TODO: Cleanup self.urlencode_vars, self.urlencode and self.urlencode_plus.
# urlencode_vars() should directly use urlencode or urlencode_vars and
# not fallback to self.urlencode on it's own. self.urlencode() should
# work for a single value exacly as urlencode_vars() does for multiple
def urlencode_vars(self, vars):
output = []
for varname, value in sorted(vars):
if type(value) == int:
value = str(value)
elif type(value) == unicode:
value = value.encode("utf-8")
try:
# urllib is not able to encode non-Ascii characters. Yurks
output.append(varname + '=' + urllib.quote(value))
except:
output.append(varname + '=' + self.urlencode(value)) # slow but working
return '&'.join(output)
def urlencode(self, value):
if type(value) == unicode:
value = value.encode("utf-8")
elif value == None:
return ""
ret = ""
for c in value:
if c == " ":
c = "+"
elif ord(c) <= 32 or ord(c) > 127 or c in [ '#', '+', '"', "'", "=", "&", ":", "%" ]:
c = "%%%02x" % ord(c)
ret += c
return ret
# Like urllib.quote() but also replaces spaces and /
def urlencode_plus(self, value):
if type(value) == unicode:
value = value.encode("utf-8")
elif value == None:
return ""
return urllib.quote_plus(value)
# Escape a variable name so that it only uses allowed charachters for URL variables
def varencode(self, varname):
if varname == None:
return "None"
if type(varname) == int:
return varname
ret = ""
for c in varname:
if not c.isdigit() and not c.isalnum() and c != "_":
ret += "%%%02x" % ord(c)
else:
ret += c
return ret
def u8(self, c):
if ord(c) > 127:
return "&#%d;" % ord(c)
else:
return c
def utf8_to_entities(self, text):
if type(text) != unicode:
return text
else:
return text.encode("utf-8")
#.
# .--HTML----------------------------------------------------------------.
# | _ _ _____ __ __ _ |
# | | | | |_ _| \/ | | |
# | | |_| | | | | |\/| | | |
# | | _ | | | | | | | |___ |
# | |_| |_| |_| |_| |_|_____| |
# | |
# +----------------------------------------------------------------------+
# | This is a simple class which wraps a unicode string provided by |
# | the caller to make html.attrencode() know that this string should |
# | not be escaped. |
# | |
# | This way we can implement encodings while still allowing HTML code. |
# | This is useful when one needs to print out HTML tables in messages |
# | or help texts. |
# | |
# | The HTML class is implemented as an immutable type. |
# | Every instance of the class is a unicode string. |
# | Only utf-8 compatible encodings are supported. |
# '----------------------------------------------------------------------'
class HTML(object):
def __init__(self, value = u''):
super(HTML, self).__init__()
self.value = self._ensure_unicode(value)
def __unicode__(self):
return self.value
def _ensure_unicode(self, thing, encoding_index=0):
try:
return unicode(thing)
except UnicodeDecodeError:
return thing.decode("utf-8")
def __bytebatzen__(self):
return self.value.encode("utf-8")
def __str__(self):
# Against the sense of the __str__() method, we need to return the value
# as unicode here. Why? There are many cases where something like
# "%s" % HTML(...) is done in the GUI code. This calls the __str__ function
# because the origin is a str() object. The call will then return a UTF-8
# encoded str() object. This brings a lot of compatbility issues to the code
# which can not be solved easily.
# To deal with this situation we need the implicit conversion from str to
# unicode that happens when we return a unicode value here. In all relevant
# cases this does exactly what we need. It would only fail if the origin
# string contained characters that can not be decoded with the ascii codec
# which is not relevant for us here.
#
# This is problematic:
# html.write("%s" % HTML("ä"))
#
# Bottom line: We should relly cleanup internal unicode/str handling.
return self.value
def __repr__(self):
return ("HTML(\"%s\")" % self.value).encode("utf-8")
def to_json(self):
return self.value
def __add__(self, other):
return HTML(self.value + self._ensure_unicode(other))
def __iadd__(self, other):
return self.__add__(other)
def __radd__(self, other):
return HTML(self._ensure_unicode(other) + self.value)
def join(self, iterable):
return HTML(self.value.join(map(self._ensure_unicode, iterable)))
def __eq__(self, other):
return self.value == self._ensure_unicode(other)
def __ne__(self, other):
return self.value != self._ensure_unicode(other)
def __len__(self):
return len(self.value)
def __getitem__(self, index):
return HTML(self.value[index])
def __contains__(self, item):
return self._ensure_unicode(item) in self.value
def count(self, sub, *args):
return self.value.count(self._ensure_unicode(sub), *args)
def index(self, sub, *args):
return self.value.index(self._ensure_unicode(sub), *args)
def lstrip(self, *args):
args = tuple(map(self._ensure_unicode, args[:1])) + args[1:]
return HTML(self.value.lstrip(*args))
def rstrip(self, *args):
args = tuple(map(self._ensure_unicode, args[:1])) + args[1:]
return HTML(self.value.rstrip(*args))
def strip(self, *args):
args = tuple(map(self._ensure_unicode, args[:1])) + args[1:]
return HTML(self.value.strip(*args))
def lower(self):
return HTML(self.value.lower())
def upper(self):
return HTML(self.value.upper())
def startswith(self, prefix, *args):
return self.value.startswith(self._ensure_unicode(prefix), *args)
__builtin__.HTML = HTML
#.
# .--OutputFunnel--------------------------------------------------------.
# | ___ _ _ _____ _ |
# | / _ \ _ _| |_ _ __ _ _| |_| ___| _ _ __ _ __ ___| | |
# | | | | | | | | __| '_ \| | | | __| |_ | | | | '_ \| '_ \ / _ \ | |
# | | |_| | |_| | |_| |_) | |_| | |_| _|| |_| | | | | | | | __/ | |
# | \___/ \__,_|\__| .__/ \__,_|\__|_| \__,_|_| |_|_| |_|\___|_| |
# | |_| |
# +----------------------------------------------------------------------+
# | Provides the write functionality. The method lowlevel_write needs to |
# | to be overwritten in the specific subclass! |
# | |
# | Usage of plugged context: |
# | with html.plugged(): |
# | html.write("something") |
# | html_code = html.drain() |
# | print html_code |
# '----------------------------------------------------------------------'
class OutputFunnel(object):
def __init__(self):
super(OutputFunnel, self).__init__()
self.plug_level = -1
self.plug_text = []
# Accepts str and unicode objects only!
# The plugged functionality can be used for debugging.
def write(self, text):
if not text:
return
if isinstance(text, HTML):
text = "%s" % text
if type(text) not in [str, unicode]: # also possible: type Exception!
raise MKGeneralException(_('Type Error: html.write accepts str and unicode input objects only!'))
if self.is_plugged():
self.plug_text[self.plug_level].append(text)
else:
# encode when really writing out the data. Not when writing plugged,
# because the plugged code will be handled somehow by our code. We
# only encode when leaving the pythonic world.
if type(text) == unicode:
text = text.encode("utf-8")
self.lowlevel_write(text)
def lowlevel_write(self, text):
raise NotImplementedError()
@contextmanager
def plugged(self):
self.plug()
try:
yield
except Exception, e:
self.drain()
raise
finally:
self.unplug()
# Put in a plug which stops the text stream and redirects it to a sink.
def plug(self):
self.plug_text.append([])
self.plug_level += 1
def is_plugged(self):
return self.plug_level > -1
# Pull the plug for a moment to allow the sink content to pass through.
def flush(self):
if not self.is_plugged():
return None
text = "".join(self.plug_text[self.plug_level])
self.plug_text[self.plug_level] = []
self.plug_level -= 1
self.write(text)
self.plug_level += 1
# Get the sink content in order to do something with it.
def drain(self):
if not self.is_plugged():
return ''
text = "".join(self.plug_text[self.plug_level])
self.plug_text[self.plug_level] = []
return text
def unplug(self):
if not self.is_plugged():
return
self.flush()
self.plug_text.pop()
self.plug_level -= 1
def unplug_all(self):
while(self.is_plugged()):
self.unplug()
#.
# .--HTML Generator------------------------------------------------------.
# | _ _ _____ __ __ _ |
# | | | | |_ _| \/ | | |
# | | |_| | | | | |\/| | | |
# | | _ | | | | | | | |___ |
# | |_| |_| |_| |_| |_|_____| |
# | |
# | ____ _ |
# | / ___| ___ _ __ ___ _ __ __ _| |_ ___ _ __ |
# | | | _ / _ \ '_ \ / _ \ '__/ _` | __/ _ \| '__| |
# | | |_| | __/ | | | __/ | | (_| | || (_) | | |
# | \____|\___|_| |_|\___|_| \__,_|\__\___/|_| |
# | |
# +----------------------------------------------------------------------+
# | Generator which provides top level HTML writing functionality. |
# '----------------------------------------------------------------------'
class HTMLGenerator(OutputFunnel):
""" Usage Notes:
- Tags can be opened using the open_[tag]() call where [tag] is one of the possible tag names.
All attributes can be passed as function arguments, such as open_div(class_="example").
However, python specific key words need to be escaped using a trailing underscore.
One can also provide a dictionary as attributes: open_div(**{"class": "example"}).
- All tags can be closed again using the close_[tag]() syntax.
- For tags which shall only contain plain text (i.e. no tags other than highlighting tags)
you can a the direct call using the tag name only as function name,
self.div("Text content", **attrs). Tags featuring this functionality are listed in
the "featured shortcuts" list.
- Some tags require mandatory arguments. Those are defined explicitly below.
For example an a tag needs the href attribute everytime.
- If you want to provide plain HTML to a tag, please use the tag_content function or
facillitate the HTML class.
HOWTO HTML Attributes:
- Python specific attributes have to be escaped using a trailing underscore
- All attributes can be python objects. However, some attributes can also be lists of attrs:
'class' attributes will be concatenated using one whitespace
'style' attributes will be concatenated using the semicolon and one whitespace
Behaviorial attributes such as 'onclick', 'onmouseover' will bec concatenated using
a semicolon and one whitespace.
- All attributes will be escaped, i.e. the characters '&', '<', '>', '"' will be replaced by
non HtML relevant signs '&', '<', '>' and '"'. """
# TODO: Replace u, i, b with underline, italic, bold, usw.
# these tags can be called by their tag names, e.g. 'self.title(content)'
_shortcut_tags = set(["title", "h1", "h2", "h3", "h4", "th", "tr", "td", "center", "pre", "style", "iframe",\
"div", "p", "span", "canvas", "strong", "sub", "tt", "u", "i", "b", "x", "option"])
# these tags can be called by open_name(), close_name() and render_name(), e.g. 'self.open_html()'
_tag_names = set(['html', 'head', 'body', 'header', 'footer', 'a', 'b', 'sup',\
'script', 'form', 'button', 'p', 'select', 'fieldset',\
'table', 'tbody', 'row', 'ul', 'li', 'br', 'nobr', 'input', 'span'])
# Of course all shortcut tags can be used as well.
_tag_names.update(_shortcut_tags)
def __init__(self):
super(HTMLGenerator, self).__init__()
self.indent_level = 0
self.indent = 2
self.testing_mode = False
self.escaper = Escaper()
#
# Rendering
#
def _render_attributes(self, **attrs):
# TODO: REMOVE AFTER REFACTORING IS DONE!!
if self.testing_mode:
for key in attrs:
assert key.rstrip('_') in ['class', 'id', 'src', 'type', 'name',\
'onclick', 'ondblclick', 'onsubmit', 'onmouseover', 'onmouseout', 'onfocus', 'onkeydown', 'onchange',\
'size', 'autocomplete', 'readonly', 'value', 'checked','rows', 'cols',\
'content', 'href', 'http-equiv', 'rel', 'for', 'title', 'target','multiple',\
'align', 'valign', 'style', 'width', 'height', 'colspan', 'data-type', 'data-role','selected',\
'cellspacing', 'cellpadding', 'border', 'allowTransparency', 'frameborder'], key
# make class attribute foolproof
css = []
for k in ["class_", "css", "cssclass", "class"]:
if k in attrs:
if isinstance(attrs[k], list):
css.extend(attrs.pop(k))
elif attrs[k] is not None:
css.append(attrs.pop(k))
if css:
attrs["class"] = css
# options such as 'selected' and 'checked' dont have a value in html tags
options = []
# render all attributes
for k, v in attrs.iteritems():
if v is None:
continue
k = self.escaper.escape_attribute(k.rstrip('_'))
if v == '':
options.append(k)
continue
if not isinstance(v, list):
v = self.escaper.escape_attribute(v)
else:
if k == "class":
sep = ' '
elif k == "style" or k.startswith('on'):
sep = '; '
else:
sep = '_'
v = sep.join([a for a in (self.escaper.escape_attribute(vi) for vi in v) if a])
if sep.startswith(';'):
v = re.sub(';+', ';', v)
yield ' %s=\"%s\"' %(k, v)
for k in options:
yield " %s=\'\'" % k
# applies attribute encoding to prevent code injections.
def _render_opening_tag(self, tag_name, close_tag=False, **attrs):
""" You have to replace attributes which are also python elements such as
'class', 'id', 'for' or 'type' using a trailing underscore (e.g. 'class_' or 'id_'). """
#self.indent_level += self.indent
indent = ' ' * (self.indent_level - self.indent)
return HTML("%s<%s%s%s>" % (indent, tag_name,\
'' if not attrs else ''.join(self._render_attributes(**attrs)),\
'' if not close_tag else ' /'))
def _render_closing_tag(self, tag_name):
#self.indent_level -= self.indent if self.indent_level < 0 else 0
return HTML("%s</%s>" % (' ' * self.indent_level, tag_name))
def _render_content_tag(self, tag_name, tag_content, **attrs):
tag = self._render_opening_tag(tag_name, **attrs)
if tag_content in ['', None]:
pass
else:
tag = tag.rstrip("\n")
if isinstance(tag_content, HTML):
tag += tag_content.lstrip(' ').rstrip('\n')
else:
tag += self.escaper.escape_text(tag_content)
tag += "</%s>" % (tag_name)
#self.indent_level -= 1
return HTML(tag)
# This is used to create all the render_tag() and close_tag() functions
def __getattr__(self, name):
""" All closing tags can be called like this:
self.close_html(), self.close_tr(), etc. """
parts = name.split('_')
# generating the shortcut tag calls
if len(parts) == 1 and name in self._shortcut_tags:
return lambda content, **attrs: self.write_html(self._render_content_tag(name, content, **attrs))
# generating the open, close and render calls
elif len(parts) == 2:
what, tag_name = parts[0], parts[1]
if what == "open" and tag_name in self._tag_names:
return lambda **attrs: self.write_html(self._render_opening_tag(tag_name, **attrs))
elif what == "close" and tag_name in self._tag_names:
return lambda : self.write_html(self._render_closing_tag(tag_name))
elif what == "render" and tag_name in self._tag_names:
return lambda content, **attrs: HTML(self._render_content_tag(tag_name, content, **attrs))
else:
# FIXME: This returns None, which is not a very informative error message
return object.__getattribute__(self, name)
#
# HTML element methods
# If an argument is mandatory, it is used as default and it will overwrite an
# implicit argument (e.g. id_ will overwrite attrs["id"]).
#
#
# basic elements
#
def render_text(self, text):
return HTML(self.escaper.escape_text(text))
def write_text(self, text):
""" Write text. Highlighting tags such as h2|b|tt|i|br|pre|a|sup|p|li|ul|ol are not escaped. """
self.write(self.render_text(text))
def write_html(self, content):
""" Write HTML code directly, without escaping. """
self.write(content)
def comment(self, comment_text):
self.write("<!--%s-->" % self.encode_attribute(comment_text))
def meta(self, httpequiv=None, **attrs):
if httpequiv:
attrs['http-equiv'] = httpequiv
self.write_html(self._render_opening_tag('meta', close_tag=True, **attrs))
def base(self, target):
self.write_html(self._render_opening_tag('base', close_tag=True, target=target))
def open_a(self, href, **attrs):
attrs['href'] = href
self.write_html(self._render_opening_tag('a', **attrs))
def render_a(self, content, href, **attrs):
attrs['href'] = href
return self._render_content_tag('a', content, **attrs)
def a(self, content, href, **attrs):
self.write_html(self.render_a(content, href, **attrs))
def stylesheet(self, href):
self.write_html(self._render_opening_tag('link', rel="stylesheet", type_="text/css", href=href, close_tag=True))
#
# Scriptingi
#
def render_javascript(self, code):
return HTML("<script type=\"text/javascript\">\n%s\n</script>\n" % code)
def javascript(self, code):
self.write_html(self.render_javascript(code))
def javascript_file(self, src):
""" <script type="text/javascript" src="%(name)"/>\n """
self.write_html(self._render_content_tag('script', '', type_="text/javascript", src=src))
def render_img(self, src, **attrs):
attrs['src'] = src
return self._render_opening_tag('img', close_tag=True, **attrs)
def img(self, src, **attrs):
self.write_html(self.render_img(src, **attrs))
def open_button(self, type_, **attrs):
attrs['type'] = type_
self.write_html(self._render_opening_tag('button', close_tag=True, **attrs))
def play_sound(self, url):
self.write_html(self._render_opening_tag('audio autoplay', src_=url))
#
# form elements
#
def render_label(self, content, for_, **attrs):
attrs['for'] = for_
return self._render_content_tag('label', content, **attrs)
def label(self, content, for_, **attrs):
self.write_html(self.render_label(content, for_, **attrs))
def render_input(self, name, type_, **attrs):
attrs['type_'] = type_
attrs['name'] = name
return self._render_opening_tag('input', close_tag=True, **attrs)
def input(self, name, type_, **attrs):
self.write_html(self.render_input(name, type_, **attrs))
#
# table and list elements
#
def td(self, content, **attrs):
""" Only for text content. You can't put HTML structure here. """
self.write_html(self._render_content_tag('td', content, **attrs))
def li(self, content, **attrs):
""" Only for text content. You can't put HTML structure here. """
self.write_html(self._render_content_tag('li', content, **attrs))
#
# structural text elements
#
def render_heading(self, content):
""" <h2>%(content)</h2> """
return self._render_content_tag('h2', content)
def heading(self, content):
self.write_html(self.render_heading(content))
def render_br(self):
return HTML("<br/>")
def br(self):
self.write_html(self.render_br())
def render_hr(self, **attrs):
return self._render_opening_tag('hr', close_tag=True, **attrs)
def hr(self, **attrs):
self.write_html(self.render_hr(**attrs))
def rule(self):
return self.hr()
def render_nbsp(self):
return HTML(" ")
def nbsp(self):
self.write_html(self.render_nbsp())
#.
# .--RequestHandler----------------------------------------------------------------.
# | ____ _ _ _ _ _ |
# | | _ \ ___ __ _ _ _ ___ ___| |_| | | | __ _ _ __ __| | | ___ _ __ |
# | | |_) / _ \/ _` | | | |/ _ \/ __| __| |_| |/ _` | '_ \ / _` | |/ _ \ '__| |
# | | _ < __/ (_| | |_| | __/\__ \ |_| _ | (_| | | | | (_| | | __/ | |
# | |_| \_\___|\__, |\__,_|\___||___/\__|_| |_|\__,_|_| |_|\__,_|_|\___|_| |
# | |_| |
# +--------------------------------------------------------------------------------+
# | |
# '--------------------------------------------------------------------------------'
class RequestHandler(object):
def __init__(self):
super(RequestHandler, self).__init__()
# Variable management
self.vars = {}
self.listvars = {} # for variables with more than one occurrance
self.uploads = {}
self.var_stash = []
self.cookies = {}
# Transaction IDs
self.new_transids = []
self.ignore_transids = False
self.current_transid = None
# Timing
self._request_timeout = 110 # seconds
#
# Request settings
#
# The system web servers configured request timeout. This is the time
# before the request is terminated from the view of the client.
def client_request_timeout(self):
raise NotImplementedError()
def is_ssl_request(self):
raise NotImplementedError()
def request_method(self):
raise NotImplementedError()
def get_user_agent(self):
raise NotImplementedError()
def get_referer(self):
raise NotImplementedError()
def http_redirect(self, url):
raise MKGeneralException("http_redirect not implemented")
#
# Request Processing
#
def var(self, varname, deflt = None):
return self.vars.get(varname, deflt)
def has_var(self, varname):
return varname in self.vars
# Checks if a variable with a given prefix is present
def has_var_prefix(self, prefix):
for varname in self.vars:
if varname.startswith(prefix):
return True
return False
def var_utf8(self, varname, deflt = None):
val = self.vars.get(varname, deflt)
if type(val) == str:
return val.decode("utf-8")
else:
return val
def all_vars(self):
return self.vars
def all_varnames_with_prefix(self, prefix):
for varname in self.vars.keys():
if varname.startswith(prefix):
yield varname
# Return all values of a variable that possible occurs more
# than once in the URL. note: self.listvars does contain those
# variable only, if the really occur more than once.
def list_var(self, varname):
if varname in self.listvars:
return self.listvars[varname]
elif varname in self.vars:
return [self.vars[varname]]
else:
return []
# Adds a variable to listvars and also set it
def add_var(self, varname, value):
self.listvars.setdefault(varname, [])
self.listvars[varname].append(value)
self.vars[varname] = value
def set_var(self, varname, value):
if value is None:
self.del_var(varname)
elif type(value) in [ str, unicode ]:
self.vars[varname] = value
else:
# crash report please
raise TypeError(_("Only str and unicode values are allowed, got %s") % type(value))
def del_var(self, varname):
if varname in self.vars:
del self.vars[varname]
if varname in self.listvars:
del self.listvars[varname]
def del_all_vars(self, prefix = None):
if not prefix:
self.vars = {}
self.listvars = {}
else:
self.vars = dict([(k,v) for (k,v) in self.vars.iteritems()
if not k.startswith(prefix)])
self.listvars = dict([(k,v) for (k,v) in self.listvars.iteritems()
if not k.startswith(prefix)])
def stash_vars(self):
self.var_stash.append(self.vars.copy())
def unstash_vars(self):
self.vars = self.var_stash.pop()
def uploaded_file(self, varname, default = None):
return self.uploads.get(varname, default)
#
# Cookie handling
#
def has_cookie(self, varname):
return varname in self.cookies
def get_cookie_names(self):
return self.cookies.keys()
def cookie(self, varname, deflt):
try:
return self.cookies[varname].value
except:
return deflt
#
# Request timeout handling
#
# The system apache process will end the communication with the client after
# the timeout configured for the proxy connection from system apache to site
# apache. This is done in /omd/sites/[site]/etc/apache/proxy-port.conf file
# in the "timeout=x" parameter of the ProxyPass statement.
#
# The regular request timeout configured here should always be lower to make
# it possible to abort the page processing and send a helpful answer to the
# client.
#
# It is possible to disable the applications request timeout (temoporarily)
# or totally for specific calls, but the timeout to the client will always
# be applied by the system webserver. So the client will always get a error
# page while the site apache continues processing the request (until the
# first try to write anything to the client) which will result in an
# exception.
#
# The timeout of the Check_MK GUI request processing. When the timeout handling
# has been enabled with enable_request_timeout(), after this time an alarm signal
# will be raised to give the application the option to end the processing in a
# gentle way.
def request_timeout(self):
return self._request_timeout
def enable_request_timeout(self):
signal.signal(signal.SIGALRM, self.handle_request_timeout)
signal.alarm(self.request_timeout())
def disable_request_timeout(self):
signal.alarm(0)
def handle_request_timeout(self, signum, frame):
raise RequestTimeout(_("Your request timed out after %d seconds. This issue may be "
"related to a local configuration problem or a request which works "
"with a too large number of objects. But if you think this "
"issue is a bug, please send a crash report.") %
self.request_timeout())
#
# Request processing
#
def get_unicode_input(self, varname, deflt = None):
try:
return self.var_utf8(varname, deflt)
except UnicodeDecodeError:
raise MKUserError(varname, _("The given text is wrong encoded. "
"You need to provide a UTF-8 encoded text."))
def get_integer_input(self, varname, deflt=None):
if deflt is not None and not self.has_var(varname):
return deflt
try:
return int(self.var(varname))
except TypeError:
raise MKUserError(varname, _("The parameter \"%s\" is missing.") % varname)
except ValueError:
raise MKUserError(varname, _("The parameter \"%s\" is not an integer.") % varname)
# Returns a dictionary containing all parameters the user handed over to this request.
# The concept is that the user can either provide the data in a single "request" variable,
# which contains the request data encoded as JSON, or provide multiple GET/POST vars which
# are then used as top level entries in the request object.
def get_request(self, exclude_vars=None):
if exclude_vars == None:
exclude_vars = []
if self.var("request_format") == "python":
try:
python_request = self.var("request", "{}")
request = ast.literal_eval(python_request)
except (SyntaxError, ValueError) as e:
raise MKUserError("request", _("Failed to parse Python request: '%s': %s") %
(python_request, e))
else:
try:
json_request = self.var("request", "{}")
request = json.loads(json_request)
request["request_format"] = "json"
except ValueError, e: # Python3: json.JSONDecodeError
raise MKUserError("request", _("Failed to parse JSON request: '%s': %s") %
(json_request, e))
for key, val in self.all_vars().items():
if key not in [ "request", "output_format" ] + exclude_vars:
request[key] = val.decode("utf-8")
return request
def parse_field_storage(self, fields, handle_uploads_as_file_obj = False):
self.vars = {}
self.listvars = {} # for variables with more than one occurrance
self.uploads = {}
# TODO: Fix this regex. +-\ selects all from + to \, not +, - and \!
varname_regex = re.compile('^[\w\d_.%+-\\\*]+$')
for field in fields.list:
varname = field.name
# To prevent variours injections, we only allow a defined set
# of characters to be used in variables
if not varname_regex.match(varname):
continue
# put uploaded file infos into separate storage
if field.filename is not None:
if handle_uploads_as_file_obj:
value = field.file
else:
value = field.value
self.uploads[varname] = (field.filename, field.type, value)
else: # normal variable
# Multiple occurrance of a variable? Store in extra list dict
if varname in self.vars:
if varname in self.listvars:
self.listvars[varname].append(field.value)
else:
self.listvars[varname] = [ self.vars[varname], field.value ]
# In the single-value-store the last occurrance of a variable
# has precedence. That makes appending variables to the current
# URL simpler.
self.vars[varname] = field.value
#
# Content Type
#
def set_output_format(self, f):
self.output_format = f
if f == "json":
content_type = "application/json; charset=UTF-8"
elif f == "jsonp":
content_type = "application/javascript; charset=UTF-8"
elif f in ("csv", "csv_export"): # Cleanup: drop one of these
content_type = "text/csv; charset=UTF-8"
elif f == "python":
content_type = "text/plain; charset=UTF-8"
elif f == "text":
content_type = "text/plain; charset=UTF-8"
elif f == "html":
content_type = "text/html; charset=UTF-8"
elif f == "xml":
content_type = "text/xml; charset=UTF-8"
elif f == "pdf":
content_type = "application/pdf"
else:
raise MKGeneralException(_("Unsupported context type '%s'") % f)
self.set_content_type(content_type)
def set_content_type(self, ty):
raise NotImplementedError()
def is_api_call(self):
return self.output_format != "html"
#
# Transaction IDs
#
def set_ignore_transids(self):
self.ignore_transids = True
# Compute a (hopefully) unique transaction id. This is generated during rendering
# of a form or an action link, stored in a user specific file for later validation,
# sent to the users browser via HTML code, then submitted by the user together
# with the action (link / form) and then validated if it is a known transid. When
# it is a known transid, it will be used and invalidated. If the id is not known,
# the action will not be processed.
def fresh_transid(self):
transid = "%d/%d" % (int(time.time()), random.getrandbits(32))
self.new_transids.append(transid)
return transid
def get_transid(self):
if not self.current_transid:
self.current_transid = self.fresh_transid()
return self.current_transid
# All generated transids are saved per user. They are stored in the transids.mk.
# Per user only up to 20 transids of the already existing ones are kept. The transids
# generated on the current page are all kept. IDs older than one day are deleted.
def store_new_transids(self):
if self.new_transids:
valid_ids = self.load_transids(lock = True)
cleared_ids = []
now = time.time()
for valid_id in valid_ids:
timestamp = valid_id.split("/")[0]
if now - int(timestamp) < 86400: # one day
cleared_ids.append(valid_id)
self.save_transids((cleared_ids[-20:] + self.new_transids))
# Remove the used transid from the list of valid ones
def invalidate_transid(self, used_id):
valid_ids = self.load_transids(lock = True)
try:
valid_ids.remove(used_id)
except ValueError:
return
self.save_transids(valid_ids)
# Checks, if the current transaction is valid, i.e. in case of
# browser reload a browser reload, the form submit should not
# be handled a second time.. The HTML variable _transid must be present.
#
# In case of automation users (authed by _secret in URL): If it is empty
# or -1, then it's always valid (this is used for webservice calls).
# This was also possible for normal users, but has been removed to preven
# security related issues.
def transaction_valid(self):
if not self.has_var("_transid"):
return False
id = self.var("_transid")
if self.ignore_transids and (not id or id == '-1'):
return True # automation
if '/' not in id:
return False
# Normal user/password auth user handling
timestamp = id.split("/", 1)[0]
# If age is too old (one week), it is always
# invalid:
now = time.time()
if now - int(timestamp) >= 604800: # 7 * 24 hours
return False
# Now check, if this id is a valid one
if id in self.load_transids():
#self.guitest_set_transid_valid()
return True
else:
return False
# Checks, if the current page is a transation, i.e. something
# that is secured by a transid (such as a submitted form)
def is_transaction(self):
return self.has_var("_transid")
# called by page functions in order to check, if this was
# a reload or the original form submission. Increases the
# transid of the user, if the latter was the case.
# There are three return codes:
# True: -> positive confirmation by the user
# False: -> not yet confirmed, question is being shown
# None: -> a browser reload or a negative confirmation
def check_transaction(self):
if self.transaction_valid():
id = self.var("_transid")
if id and id != "-1":
self.invalidate_transid(id)
return True
else:
return False
def load_transids(self, lock=False):
raise NotImplementedError()
def save_transids(self, used_ids):
raise NotImplementedError()
#.
# .--html----------------------------------------------------------------.
# | _ _ _ |
# | | |__ | |_ _ __ ___ | | |
# | | '_ \| __| '_ ` _ \| | |
# | | | | | |_| | | | | | | |
# | |_| |_|\__|_| |_| |_|_| |
# | |
# +----------------------------------------------------------------------+
# | Caution! The class needs to be derived from Outputfunnel first! |
# '----------------------------------------------------------------------'
class html(HTMLGenerator, RequestHandler):
def __init__(self):
super(html, self).__init__()
# rendering state
self.html_is_open = False
self.header_sent = False
self.context_buttons_open = False
self.context_buttons_hidden = False
# style options
self.body_classes = ['main']
self._default_stylesheets = [ "check_mk", "graphs" ]
self._default_javascripts = [ "checkmk", "graphs" ]
# behaviour options
self.render_headfoot = True
self.enable_debug = False
self.screenshotmode = False
self.have_help = False
self.help_visible = False
# browser options
self.output_format = "html"
self.browser_reload = 0
self.browser_redirect = ''
self.link_target = None
self.keybindings_enabled = True
self._requested_url = None
self.myfile = None
# Browser options
self._user_id = None
self.user_errors = {}
self.focus_object = None
self.events = set([]) # currently used only for sounds
self.status_icons = {}
self.final_javascript_code = ""
self.caches = {}
self.treestates = None
self.page_context = {}
# Settings
self.io_error = False
self.mobile = False
self.buffering = True
self.keybindings_enabled = True
self.keybindings = []
# Forms
self.form_name = None
self.form_vars = []
# Time measurement
self.times = {}
self.start_time = time.time()
self.last_measurement = self.start_time
# FIXME: Drop this
self.auto_id = 0
# encoding
self.encoder = Encoder()
RETURN = 13
SHIFT = 16
CTRL = 17
ALT = 18
BACKSPACE = 8
F1 = 112
#
# Encoding
#
def urlencode_vars(self, vars):
return self.encoder.urlencode_vars(vars)
def urlencode(self, value):
return self.encoder.urlencode(value)
def urlencode_plus(self, value):
return self.encoder.urlencode_plus(value)
# Escape a variable name so that it only uses allowed charachters for URL variables
def varencode(self, varname):
return self.encoder.varencode(varname)
def u8(self, c):
return self.encoder.u8(c)
def utf8_to_entities(self, text):
return self.encoder.utf8_to_entities(text)
#
# escaping - deprecated functions
#
# Encode HTML attributes: e.g. replace '"' with '"', '<' and '>' with '<' and '>'
def attrencode(self, value):
return self.escaper.escape_attribute(value)
# Only strip off some tags. We allow some simple tags like <b> or <tt>.
def permissive_attrencode(self, obj):
return self.escaper.escape_text(obj)
#
# Stripping
#
# remove all HTML-tags
def strip_tags(self, ht):
if isinstance(ht, HTML):
ht = "%s" % ht
if type(ht) not in [str, unicode]:
return ht
while True:
x = ht.find('<')
if x == -1:
break
y = ht.find('>', x)
if y == -1:
break
ht = ht[0:x] + ht[y+1:]
return ht.replace(" ", " ")
def strip_scripts(self, ht):
while True:
x = ht.find('<script')
if x == -1:
break
y = ht.find('</script>')
if y == -1:
break
ht = ht[0:x] + ht[y+9:]
return ht
# TODO: Can this please be dropped?
def some_id(self):
self.auto_id += 1
return "id_%d" % self.auto_id
def measure_time(self, name):
self.times.setdefault(name, 0.0)
now = time.time()
elapsed = now - self.last_measurement
self.times[name] += elapsed
self.last_measurement = now
def set_user_id(self, user_id):
self._user_id = user_id
def is_mobile(self):
return self.mobile
def set_page_context(self, c):
self.page_context = c
def set_buffering(self, b):
self.buffering = b
def set_link_target(self, framename):
self.link_target = framename
def set_focus(self, varname):
self.focus_object = (self.form_name, varname)
def set_focus_by_id(self, dom_id):
self.focus_object = dom_id
def set_render_headfoot(self, render):
self.render_headfoot = render
def set_browser_reload(self, secs):
self.browser_reload = secs
def set_browser_redirect(self, secs, url):
self.browser_reload = secs
self.browser_redirect = url
def add_default_stylesheet(self, name):
if name not in self._default_stylesheets:
self._default_stylesheets.append(name)
def add_default_javascript(self, name):
if name not in self._default_javascripts:
self._default_javascripts.append(name)
def immediate_browser_redirect(self, secs, url):
self.javascript("set_reload(%s, '%s');" % (secs, url))
def add_body_css_class(self, cls):
self.body_classes.append(cls)
def add_status_icon(self, img, tooltip, url = None):
if url:
self.status_icons[img] = tooltip, url
else:
self.status_icons[img] = tooltip
def final_javascript(self, code):
self.final_javascript_code += code + "\n"
def reload_sidebar(self):
if not self.has_var("_ajaxid"):
self.write_html(self.render_reload_sidebar())
def render_reload_sidebar(self):
return self.render_javascript("reload_sidebar()")
#
# Tree states
#
def get_tree_states(self, tree):
self.load_tree_states()
return self.treestates.get(tree, {})
def set_tree_state(self, tree, key, val):
self.load_tree_states()
if tree not in self.treestates:
self.treestates[tree] = {}
self.treestates[tree][key] = val
def set_tree_states(self, tree, val):
self.load_tree_states()
self.treestates[tree] = val
def load_tree_states(self):
raise NotImplementedError()
def save_tree_states(self):
raise NotImplementedError()
def get_request_timeout(self):
return self._request_timeout
#
# Messages
#
def show_info(self, msg):
self.message(msg, 'message')
def show_error(self, msg):
self.message(msg, 'error')
def show_warning(self, msg):
self.message(msg, 'warning')
def render_info(self, msg):
return self.render_message(msg, 'message')
def render_error(self, msg):
return self.render_message(msg, 'error')
def render_warning(self, msg):
return self.render_message(msg, 'warning')
def message(self, msg, what='message'):
self.write(self.render_message(msg, what))
# obj might be either a string (str or unicode) or an exception object
def render_message(self, msg, what='message'):
if what == 'message':
cls = 'success'
prefix = _('MESSAGE')
elif what == 'warning':
cls = 'warning'
prefix = _('WARNING')
else:
cls = 'error'
prefix = _('ERROR')
code = ""
if self.output_format == "html":
code += self.render_div(self.render_text(msg), class_=cls)
if self.mobile:
code += self.render_center(code)
else:
code += self.render_text('%s: %s\n' % (prefix, self.strip_tags(msg)))
return code
def show_localization_hint(self):
url = "wato.py?mode=edit_configvar&varname=user_localizations"
self.message(self.render_sup("*") +
_("These texts may be localized depending on the users' "
"language. You can configure the localizations %s.")
% self.render_a("in the global settings", href=url))
def help(self, text):
self.write_html(self.render_help(text))
# Embed help box, whose visibility is controlled by a global button in the page.
def render_help(self, text):
if text and text.strip():
self.have_help = True
style = "display: %s;" % ("block" if self.help_visible else "none")
c = self.render_div(text.strip(), class_="help", style=style)
return c
else:
return ""
#
# Debugging, diagnose and logging
#
def debug(self, *x):
import pprint
for element in x:
try:
formatted = pprint.pformat(element)
except UnicodeDecodeError:
formatted = repr(element)
self.lowlevel_write("%s" % self.render_pre(formatted))
#
# URL building
#
def requested_url(self):
"""Returns the URL requested by the user.
This is not the bare original URL used by the user. Some HTTP variables may
have been filtered by Check_MK while parsing the incoming request."""
return self._requested_url
# [('varname1', value1), ('varname2', value2) ]
def makeuri(self, addvars, remove_prefix=None, filename=None, delvars=None):
new_vars = [ nv[0] for nv in addvars ]
vars = [ (v, self.var(v))
for v in self.vars
if v[0] != "_" and v not in new_vars and (not delvars or v not in delvars) ]
if remove_prefix != None:
vars = [ i for i in vars if not i[0].startswith(remove_prefix) ]
vars = vars + addvars
if filename == None:
filename = self.urlencode(self.myfile) + ".py"
if vars:
return filename + "?" + self.urlencode_vars(vars)
else:
return filename
def makeuri_contextless(self, vars, filename=None):
if not filename:
filename = self.myfile + ".py"
if vars:
return filename + "?" + self.urlencode_vars(vars)
else:
return filename
def makeactionuri(self, addvars, filename=None, delvars=None):
return self.makeuri(addvars + [("_transid", self.get_transid())],
filename=filename, delvars=delvars)
def makeactionuri_contextless(self, addvars, filename=None):
return self.makeuri_contextless(addvars + [("_transid", self.get_transid())],
filename=filename)
#
# HTML heading and footer rendering
#
def default_html_headers(self):
self.meta(httpequiv="Content-Type", content="text/html; charset=utf-8")
self.meta(httpequiv="X-UA-Compatible", content="IE=edge")
self.write_html(self._render_opening_tag('link', rel="shortcut icon", href="images/favicon.ico", type_="image/ico", close_tag=True))
def _head(self, title, javascripts=None, stylesheets=None):
javascripts = javascripts if javascripts else []
stylesheets = stylesheets if stylesheets else ["pages"]
self.open_head()
self.default_html_headers()
self.title(title)
# If the variable _link_target is set, then all links in this page
# should be targetted to the HTML frame named by _link_target. This
# is e.g. useful in the dash-board
if self.link_target:
self.base(target=self.link_target)
# Load all specified style sheets and all user style sheets in htdocs/css
for css in self._default_stylesheets + stylesheets:
fname = self.css_filename_for_browser(css)
if fname is not None:
self.stylesheet(fname)
# write css for internet explorer
fname = self.css_filename_for_browser("ie")
if fname is not None:
self.write_html("<!--[if IE]>\n")
self.stylesheet(fname)
self.write_html("<![endif]-->\n")
self.add_custom_style_sheet()
# Load all scripts
for js in self._default_javascripts + javascripts:
filename_for_browser = self.javascript_filename_for_browser(js)
if filename_for_browser:
self.javascript_file(filename_for_browser)
if self.browser_reload != 0:
if self.browser_redirect != '':
self.javascript('set_reload(%s, \'%s\')' % (self.browser_reload, self.browser_redirect))
else:
self.javascript('set_reload(%s)' % (self.browser_reload))
self.close_head()
def html_head(self, title, javascripts=None, stylesheets=None, force=False):
force_new_document = force # for backward stability and better readability
#TODO: html_is_open?
if force_new_document:
self.header_sent = False
if not self.header_sent:
self.write_html('<!DOCTYPE HTML>\n')
self.open_html()
self._head(title, javascripts, stylesheets)
self.header_sent = True
def header(self, title='', javascripts=None, stylesheets=None, force=False,
show_body_start=True, show_top_heading=True):
if self.output_format == "html":
if not self.header_sent:
if show_body_start:
self.body_start(title, javascripts=javascripts, stylesheets=stylesheets, force=force)
self.header_sent = True
if self.render_headfoot and show_top_heading:
self.top_heading(title)
def body_start(self, title='', javascripts=None, stylesheets=None, force=False):
self.html_head(title, javascripts, stylesheets, force)
self.open_body(class_=self._get_body_css_classes())
def _get_body_css_classes(self):
if self.screenshotmode:
return self.body_classes + ["screenshotmode"]
else:
return self.body_classes
def add_custom_style_sheet(self):
raise NotImplementedError()
def css_filename_for_browser(self, css):
raise NotImplementedError()
def javascript_filename_for_browser(self, jsname):
raise NotImplementedError()
def html_foot(self):
self.close_html()
def top_heading(self, title):
raise NotImplementedError()
def top_heading_left(self, title):
self.open_table(class_="header")
self.open_tr()
self.open_td(width="*", class_="heading")
self.a(title, href="#", onfocus="if (this.blur) this.blur();",
onclick="this.innerHTML=\'%s\'; document.location.reload();" % _("Reloading..."))
self.close_td()
def top_heading_right(self):
cssclass = "active" if self.help_visible else "passive"
self.icon_button(None, _("Toggle context help texts"), "help", id="helpbutton",
onclick="toggle_help()", style="display:none", ty="icon", cssclass=cssclass)
self.open_a(href="https://mathias-kettner.com", class_="head_logo")
self.img(src="images/logo_cmk_small.png")
self.close_a()
self.close_td()
self.close_tr()
self.close_table()
self.hr(class_="header")
if self.enable_debug:
self._dump_get_vars()
def footer(self, show_footer=True, show_body_end=True):
if self.output_format == "html":
if show_footer:
self.bottom_footer()
if show_body_end:
self.body_end()
def bottom_footer(self):
if self.header_sent:
self.bottom_focuscode()
if self.render_headfoot:
self.open_table(class_="footer")
self.open_tr()
self.open_td(class_="left")
self._write_status_icons()
self.close_td()
self.td('', class_="middle")
self.open_td(class_="right")
content = _("refresh: %s secs") % self.render_div(self.browser_reload, id_="foot_refresh_time")
style = "display:inline-block;" if self.browser_reload else "display:none;"
self.div(HTML(content), id_="foot_refresh", style=style)
self.close_td()
self.close_tr()
self.close_table()
def bottom_focuscode(self):
if self.focus_object:
if type(self.focus_object) == tuple:
formname, varname = self.focus_object
obj_ident = formname + "." + varname
else:
obj_ident = "getElementById(\"%s\")" % self.focus_object
js_code = "<!--\n" \
"var focus_obj = document.%s;\n" \
"if (focus_obj) {\n" \
" focus_obj.focus();\n" \
" if (focus_obj.select)\n" \
" focus_obj.select();\n" \
"}\n" \
"// -->\n" % obj_ident
self.javascript(js_code)
def focus_here(self):
self.a("", href="#focus_me", id_="focus_me")
self.set_focus_by_id("focus_me")
def body_end(self):
if self.have_help:
self.javascript("enable_help();")
if self.keybindings_enabled and self.keybindings:
self.javascript("var keybindings = %s;\n"
"document.body.onkeydown = keybindings_keydown;\n"
"document.body.onkeyup = keybindings_keyup;\n"
"document.body.onfocus = keybindings_focus;\n" %
json.dumps(self.keybindings))
if self.final_javascript_code:
self.javascript(self.final_javascript_code)
self.javascript("initialize_visibility_detection();")
self.close_body()
self.close_html()
# Hopefully this is the correct place to performe some "finalization" tasks.
self.store_new_transids()
#
# HTML form rendering
#
def begin_form(self, name, action = None, method = "GET",
onsubmit = None, add_transid = True):
self.form_vars = []
if action == None:
action = self.myfile + ".py"
self.current_form = name
self.open_form(id_="form_%s" % name, name=name, class_=name,
action=action, method=method, onsubmit=onsubmit,
enctype="multipart/form-data" if method.lower() == "post" else None)
self.hidden_field("filled_in", name, add_var=True)
if add_transid:
self.hidden_field("_transid", str(self.get_transid()))
self.form_name = name
def end_form(self):
self.close_form()
self.form_name = None
def in_form(self):
return self.form_name != None
def prevent_password_auto_completion(self):
# These fields are not really used by the form. They are used to prevent the browsers
# from filling the default password and previous input fields in the form
# with password which are eventually saved in the browsers password store.
self.input(name=None, type_="text", style="display:none;")
self.input(name=None, type_="password", style="display:none;")
# Needed if input elements are put into forms without the helper
# functions of us. TODO: Should really be removed and cleaned up!
def add_form_var(self, varname):
self.form_vars.append(varname)
# Beware: call this method just before end_form(). It will
# add all current non-underscored HTML variables as hiddedn
# field to the form - *if* they are not used in any input
# field. (this is the reason why you must not add any further
# input fields after this method has been called).
def hidden_fields(self, varlist = None, **args):
add_action_vars = args.get("add_action_vars", False)
if varlist != None:
for var in varlist:
value = self.vars.get(var, "")
self.hidden_field(var, value)
else: # add *all* get variables, that are not set by any input!
for var in self.vars:
if var not in self.form_vars and \
(var[0] != "_" or add_action_vars): # and var != "filled_in":
self.hidden_field(var, self.get_unicode_input(var))
def hidden_field(self, var, value, id=None, add_var=False, class_=None):
self.write_html(self.render_hidden_field(var=var, value=value, id=id, add_var=add_var, class_=class_))
def render_hidden_field(self, var, value, id=None, add_var=False, class_=None):
# TODO: Refactor
id_ = id
if value == None:
return ""
if add_var:
self.add_form_var(var)
return self.render_input(name=var, type_="hidden", id_=id_, value=value, class_=class_)
#
# Form submission and variable handling
#
# Check if the current form is currently filled in (i.e. we display
# the form a second time while showing value typed in at the first
# time and complaining about invalid user input)
def form_filled_in(self, form_name = None):
if form_name == None:
form_name = self.form_name
return self.has_var("filled_in") and (
form_name == None or \
form_name in self.list_var("filled_in"))
def do_actions(self):
return self.var("_do_actions") not in [ "", None, _("No") ]
def form_submitted(self, form_name=None):
if form_name:
return self.var("filled_in") == form_name
else:
return self.has_var("filled_in")
# Get value of checkbox. Return True, False or None. None means
# that no form has been submitted. The problem here is the distintion
# between False and None. The browser does not set the variables for
# Checkboxes that are not checked :-(
def get_checkbox(self, varname, form_name = None):
if self.has_var(varname):
return not not self.var(varname)
elif not self.form_filled_in(form_name):
return None
else:
# Form filled in but variable missing -> Checkbox not checked
return False
#
# Button elements
#
def button(self, varname, title, cssclass = None, style=None, help=None):
self.write_html(self.render_button(varname, title, cssclass, style))
def render_button(self, varname, title, cssclass = None, style=None, help=None):
self.add_form_var(varname)
return self.render_input(name=varname, type_="submit",
id_=varname, class_=["button", cssclass if cssclass else None],
value=title, title=help, style=style)
def buttonlink(self, href, text, add_transid=False, obj_id=None, style=None, title=None, disabled=None):
if add_transid:
href += "&_transid=%s" % self.get_transid()
if not obj_id:
obj_id = self.some_id()
self.input(name=obj_id, type_="button",
id_=obj_id, class_=["button", "buttonlink"],
value=text, style=style,
title=title, disabled=disabled,
onclick="location.href=\'%s\'" % href)
# TODO: Refactor the arguments. It is only used in views/wato
def toggle_button(self, id, isopen, icon, help, hidden=False, disabled=False, onclick=None, is_context_button=True):
if is_context_button:
self.begin_context_buttons() # TODO: Check all calls. If done before, remove this!
if not onclick and not disabled:
onclick = "view_toggle_form(this.parentNode, '%s');" % id
if disabled:
state = "off" if disabled else "on"
cssclass = ""
help = ""
else:
state = "on"
if isopen:
cssclass = "down"
else:
cssclass = "up"
self.open_div(
id_="%s_%s" % (id, state),
class_=["togglebutton", state, icon, cssclass],
title=help,
style='display:none' if hidden else None,
)
self.open_a("javascript:void(0)", onclick=onclick)
self.img(src="images/icon_%s.png" % icon)
self.close_a()
self.close_div()
def get_button_counts(self):
raise NotImplementedError()
def empty_icon_button(self):
self.write(self.render_icon("images/trans.png", cssclass="iconbutton trans"))
def disabled_icon_button(self, icon):
self.write(self.render_icon(icon, cssclass="iconbutton"))
# TODO: Cleanup to use standard attributes etc.
def jsbutton(self, varname, text, onclick, style='', cssclass=""):
# autocomplete="off": Is needed for firefox not to set "disabled="disabled" during page reload
# when it has been set on a page via javascript before. Needed for WATO activate changes page.
self.input(name=varname, type_="button", id_=varname, class_=["button", cssclass],
autocomplete="off", onclick=onclick, style=style, value=text)
#
# Other input elements
#
def user_error(self, e):
assert isinstance(e, MKUserError), "ERROR: This exception is not a user error!"
self.open_div(class_="error")
self.write("%s" % e.message)
self.close_div()
self.add_user_error(e.varname, e)
# user errors are used by input elements to show invalid input
def add_user_error(self, varname, msg_or_exc):
if isinstance(msg_or_exc, Exception):
message = "%s" % msg_or_exc
else:
message = msg_or_exc
if type(varname) == list:
for v in varname:
self.add_user_error(v, message)
else:
self.user_errors[varname] = message
def has_user_errors(self):
return len(self.user_errors) > 0
def show_user_errors(self):
if self.has_user_errors():
self.open_div(class_="error")
self.write('<br>'.join(self.user_errors.values()))
self.close_div()
def text_input(self, varname, default_value = "", cssclass = "text", label = None, id_ = None,
submit = None, attrs = None, **args):
if attrs is None:
attrs = {}
# Model
error = self.user_errors.get(varname)
value = self.vars.get(varname, default_value)
if not value:
value = ""
if error:
self.set_focus(varname)
self.form_vars.append(varname)
# View
style, size = None, None
if args.get("try_max_width"):
style = "width: calc(100% - 10px); "
if "size" in args:
cols = int(args["size"])
else:
cols = 16
style += "min-width: %d.8ex; " % cols
elif "size" in args and args["size"]:
if args["size"] == "max":
style = "width: 100%;"
else:
size = "%d" % (args["size"] + 1)
if not args.get('omit_css_width', False) and "width:" not in args.get("style", "") and not self.mobile:
style = "width: %d.8ex;" % args["size"]
if args.get("style"):
style = [style, args["style"]]
if (submit or label) and not id_:
id_ = "ti_%s" % varname
onkeydown = None if not submit else HTML('textinput_enter_submit(\'%s\');' % submit)
attributes = {"class" : cssclass,
"id" : id_,
"style" : style,
"size" : size,
"autocomplete" : args.get("autocomplete"),
"readonly" : "true" if args.get("read_only") else None,
"value" : value,
"onkeydown" : onkeydown, }
for key, val in attrs.iteritems():
if key not in attributes and key not in ["name", "type", "type_"]:
attributes[key] = val
elif key in attributes and attributes[key] is None:
attributes[key] = val
if error:
self.open_x(class_="inputerror")
if label:
self.label(label, for_=id_)
self.write_html(self.render_input(varname, type_=args.get("type", "text"), **attributes))
if error:
self.close_x()
# Shows a colored badge with text (used on WATO activation page for the site status)
def status_label(self, content, status, help, **attrs):
self.status_label_button(content, status, help, onclick=None, **attrs)
# Shows a colored button with text (used in site and customer status snapins)
def status_label_button(self, content, status, help, onclick, **attrs):
button_cls = "button" if onclick else None
self.div(content, title=help, class_=[ "status_label", button_cls, status ],
onclick=onclick, **attrs)
def toggle_switch(self, enabled, help, **attrs):
# Same API as other elements: class_ can be a list or string/None
if "class_" in attrs:
if type(attrs["class_"]) != list:
attrs["class_"] = [ attrs["class_"] ]
else:
attrs["class_"] = []
attrs["class_"] += [ "toggle_switch", "on" if enabled else "off", ]
link_attrs = {
"href" : attrs.pop("href", "javascript:void(0)"),
"onclick" : attrs.pop("onclick", None),
}
self.open_div(**attrs)
self.a(_("on") if enabled else _("off"), title=help, **link_attrs)
self.close_div()
def number_input(self, varname, deflt = "", size=8, style="", submit=None):
if deflt != None:
deflt = str(deflt)
self.text_input(varname, deflt, "number", size=size, style=style, submit=submit)
def password_input(self, varname, default_value = "", size=12, **args):
self.text_input(varname, default_value, type="password", size = size, **args)
def text_area(self, varname, deflt="", rows=4, cols=30, attrs=None, try_max_width=False):
if attrs is None:
attrs = {}
# Model
value = self.var(varname, deflt)
error = self.user_errors.get(varname)
self.form_vars.append(varname)
if error:
self.set_focus(varname)
# View
style = "width: %d.8ex;" % cols
if try_max_width:
style += "width: calc(100%% - 10px); min-width: %d.8ex;" % cols
attrs["style"] = style
attrs["rows"] = rows
attrs["cols"] = cols
attrs["name"] = varname
if error:
self.open_x(class_="inputerror")
self.write_html(self._render_content_tag("textarea", value, **attrs))
if error:
self.close_x()
# TODO: DEPRECATED!!
def sorted_select(self, varname, choices, deflt='', onchange=None, attrs=None):
if attrs is None:
attrs = {}
self.dropdown(varname, choices, deflt=deflt, onchange=onchange, sorted = True, **attrs)
# TODO: DEPRECATED!!
def select(self, varname, choices, deflt='', onchange=None, attrs=None):
if attrs is None:
attrs = {}
self.dropdown(varname, choices, deflt=deflt, onchange=onchange, **attrs)
# TODO: DEPRECATED!!
def icon_select(self, varname, choices, deflt=''):
self.icon_dropdown(varname, choices, deflt=deflt)
# Choices is a list pairs of (key, title). They keys of the choices
# and the default value must be of type None, str or unicode.
def dropdown(self, varname, choices, deflt='', sorted='', **attrs):
current = self.get_unicode_input(varname, deflt)
error = self.user_errors.get(varname)
if varname:
self.form_vars.append(varname)
attrs.setdefault('size', 1)
chs = choices[:]
if sorted:
# Sort according to display texts, not keys
chs.sort(key=lambda a: a[1].lower())
if error:
self.open_x(class_="inputerror")
if "read_only" in attrs and attrs.pop("read_only"):
attrs["disabled"] = "disabled"
self.hidden_field(varname, current, add_var=False)
self.open_select(name=varname, id_=varname, **attrs)
for value, text in chs:
# if both the default in choices and current was '' then selected depended on the order in choices
selected = (value == current) or (not value and not current)
self.option(text, value=value if value else "",
selected="" if selected else None)
self.close_select()
if error:
self.close_x()
def icon_dropdown(self, varname, choices, deflt=""):
current = self.var(varname, deflt)
if varname:
self.form_vars.append(varname)
self.open_select(class_="icon", name=varname, id_=varname, size="1")
for value, text, icon in choices:
# if both the default in choices and current was '' then selected depended on the order in choices
selected = (value == current) or (not value and not current)
self.option(text, value=value if value else "",
selected='' if selected else None,
style="background-image:url(images/icon_%s.png);" % icon)
self.close_select()
# Wrapper for DualListChoice
def multi_select(self, varname, choices, deflt='', sorted='', **attrs):
attrs["multiple"] = "multiple"
self.dropdown(varname, choices, deflt=deflt, sorted=sorted, **attrs)
def upload_file(self, varname):
error = self.user_errors.get(varname)
if error:
self.open_x(class_="inputerror")
self.input(name=varname, type_="file")
if error:
self.close_x()
self.form_vars.append(varname)
# The confirm dialog is normally not a dialog which need to be protected
# by a transid itselfs. It is only a intermediate step to the real action
# But there are use cases where the confirm dialog is used during rendering
# a normal page, for example when deleting a dashlet from a dashboard. In
# such cases, the transid must be added by the confirm dialog.
# add_header: A title can be given to make the confirm method render the HTML
# header when showing the confirm message.
def confirm(self, msg, method="POST", action=None, add_transid=False, add_header=False):
if self.var("_do_actions") == _("No"):
# User has pressed "No", now invalidate the unused transid
self.check_transaction()
return # None --> "No"
if not self.has_var("_do_confirm"):
if add_header != False:
self.header(add_header)
if self.mobile:
self.open_center()
self.open_div(class_="really")
self.write_text(msg)
# FIXME: When this confirms another form, use the form name from self.vars()
self.begin_form("confirm", method=method, action=action, add_transid=add_transid)
self.hidden_fields(add_action_vars = True)
self.button("_do_confirm", _("Yes!"), "really")
self.button("_do_actions", _("No"), "")
self.end_form()
self.close_div()
if self.mobile:
self.close_center()
return False # False --> "Dialog shown, no answer yet"
else:
# Now check the transaction
return self.check_transaction() and True or None # True: "Yes", None --> Browser reload of "yes" page
#
# Radio groups
#
def begin_radio_group(self, horizontal=False):
if self.mobile:
attrs = {'data-type' : "horizontal" if horizontal else None,
'data-role' : "controlgroup"}
self.write(self._render_opening_tag("fieldset", **attrs))
def end_radio_group(self):
if self.mobile:
self.write(self._render_closing_tag("fieldset"))
def radiobutton(self, varname, value, checked, label):
# Model
self.form_vars.append(varname)
# Controller
if self.has_var(varname):
checked = self.var(varname) == value
# View
id_="rb_%s_%s" % (varname, value) if label else None
self.input(name=varname, type_="radio", value = value,
checked='' if checked else None, id_=id_)
if label:
self.label(label, for_=id_)
#
# Checkbox groups
#
def begin_checkbox_group(self, horizonal=False):
self.begin_radio_group(horizonal)
def end_checkbox_group(self):
self.end_radio_group()
def checkbox(self, *args, **kwargs):
self.write(self.render_checkbox(*args, **kwargs))
def render_checkbox(self, varname, deflt = False, label = '', id = None, **add_attr):
# Problem with checkboxes: The browser will add the variable
# only to the URL if the box is checked. So in order to detect
# whether we should add the default value, we need to detect
# if the form is printed for the first time. This is the
# case if "filled_in" is not set.
value = self.get_checkbox(varname)
if value is None: # form not yet filled in
value = deflt
error = self.user_errors.get(varname)
if id is None:
id = "cb_%s" % varname
add_attr["id"] = id
add_attr["CHECKED"] = '' if value else None
code = self.render_input(name=varname, type_="checkbox", **add_attr)\
+ self.render_label(label, for_=id)
code = self.render_span(code, class_="checkbox")
if error:
code = self.render_x(code, class_="inputerror")
self.form_vars.append(varname)
return code
#
# Foldable context
#
def begin_foldable_container(self, treename, id, isopen, title, indent=True,
first=False, icon=None, fetch_url=None, title_url=None,
tree_img="tree"):
self.folding_indent = indent
if self._user_id:
isopen = self.foldable_container_is_open(treename, id, isopen)
onclick = "toggle_foldable_container(\'%s\', \'%s\', \'%s\')"\
% (treename, id, fetch_url if fetch_url else '')
img_id = "treeimg.%s.%s" % (treename, id)
if indent == "nform":
self.open_tr(class_="heading")
self.open_td(id_="nform.%s.%s" % (treename, id), onclick=onclick, colspan="2")
if icon:
self.img(class_=["treeangle", "title"], src="images/icon_%s.png" % icon)
else:
self.img(id_=img_id, class_=["treeangle", "nform", "open" if isopen else "closed"],
src="images/%s_closed.png" % tree_img, align="absbottom")
self.write_text(title)
self.close_td()
self.close_tr()
else:
self.open_div(class_="foldable")
if not icon:
self.img(id_="treeimg.%s.%s" % (treename, id),
class_=["treeangle", "open" if isopen else "closed"],
src="images/%s_closed.png" % tree_img, align="absbottom", onclick=onclick)
if isinstance(title, HTML): # custom HTML code
if icon:
self.img(class_=["treeangle", "title"], src="images/icon_%s.png" % icon, onclick=onclick)
self.write_text(title)
if indent != "form":
self.br()
else:
self.open_b(class_=["treeangle", "title"], onclick=None if title_url else onclick)
if icon:
self.img(class_=["treeangle", "title"], src="images/icon_%s.png" % icon)
if title_url:
self.a(title, href=title_url)
else:
self.write_text(title)
self.close_b()
self.br()
indent_style = "padding-left: %dpx; " % (indent == True and 15 or 0)
if indent == "form":
self.close_td()
self.close_tr()
self.close_table()
indent_style += "margin: 0; "
self.open_ul(id_="tree.%s.%s" % (treename, id),
class_=["treeangle", "open" if isopen else "closed"], style=indent_style)
# give caller information about current toggling state (needed for nform)
return isopen
def end_foldable_container(self):
if self.folding_indent != "nform":
self.close_ul()
self.close_div()
def foldable_container_is_open(self, treename, id, isopen):
# try to get persisted state of tree
tree_state = self.get_tree_states(treename)
if id in tree_state:
isopen = tree_state[id] == "on"
return isopen
#
# Context Buttons
#
def begin_context_buttons(self):
if not self.context_buttons_open:
self.context_button_hidden = False
self.open_div(class_="contextlinks")
self.context_buttons_open = True
def end_context_buttons(self):
if self.context_buttons_open:
if self.context_button_hidden:
self.open_div(title=_("Show all buttons"), id="toggle", class_=["contextlink", "short"])
self.a("...", onclick='unhide_context_buttons(this);', href='#')
self.close_div()
self.div("", class_="end")
self.close_div()
self.context_buttons_open = False
def context_button(self, title, url, icon=None, hot=False, id=None, bestof=None, hover_title=None, fkey=None, class_=None):
# TODO: REFACTOR
id_ = id
self._context_button(title, url, icon=icon, hot=hot, id_=id_, bestof=bestof, hover_title=hover_title, fkey=fkey, class_=class_)
if fkey and self.keybindings_enabled:
self.add_keybinding([self.F1 + (fkey - 1)], "document.location='%s';" % self.escaper.escape_attribute(url))
def _context_button(self, title, url, icon=None, hot=False, id_=None, bestof=None, hover_title=None, fkey=None, class_=None):
title = self.attrencode(title)
display = "block"
if bestof:
counts = self.get_button_counts()
weights = counts.items()
weights.sort(cmp = lambda a,b: cmp(a[1], b[1]))
best = dict(weights[-bestof:]) # pylint: disable=invalid-unary-operand-type
if id_ not in best:
display="none"
self.context_button_hidden = True
if not self.context_buttons_open:
self.begin_context_buttons()
css_classes = [ "contextlink" ]
if hot:
css_classes.append("hot")
if fkey and self.keybindings_enabled:
css_classes.append("button")
if class_:
if type(class_) == list:
css_classes += class_
else:
css_classes += class_.split(" ")
self.open_div(class_=css_classes, id_=id_, style="display:%s;" % display)
self.open_a(href=url, title=hover_title, onclick="count_context_button(this);" if bestof else None)
if icon:
self.icon('', icon, cssclass="inline", middle=False)
self.span(title)
if fkey and self.keybindings_enabled:
self.div("F%d" % fkey, class_="keysym")
self.close_a()
self.close_div()
#
# Floating Options
#
def begin_floating_options(self, div_id, is_open):
self.open_div(id_=div_id, class_=["view_form"], style="display: none" if not is_open else None)
self.open_table(class_=["filterform"], cellpadding="0", cellspacing="0", border="0")
self.open_tr()
self.open_td()
def end_floating_options(self, reset_url=None):
self.close_td()
self.close_tr()
self.open_tr()
self.open_td()
self.button("apply", _("Apply"), "submit")
if reset_url:
self.buttonlink(reset_url, _("Reset to defaults"))
self.close_td()
self.close_tr()
self.close_table()
self.close_div()
def render_floating_option(self, name, height, varprefix, valuespec, value):
self.open_div(class_=["floatfilter", height, name])
self.div(valuespec.title(), class_=["legend"])
self.open_div(class_=["content"])
valuespec.render_input(varprefix + name, value)
self.close_div()
self.close_div()
#
# HTML icon rendering
#
def detect_icon_path(self, icon_name):
raise NotImplementedError()
# FIXME: Change order of input arguments in one: icon and render_icon!!
def icon(self, help, icon, **kwargs):
#TODO: Refactor
title = help
icon_name = icon
self.write_html(self.render_icon(icon_name=icon_name, help=title, **kwargs))
def empty_icon(self):
self.write_html(self.render_icon("images/trans.png"))
def render_icon(self, icon_name, help=None, middle=True, id=None, cssclass=None, class_=None):
# TODO: Refactor
title = help
id_ = id
attributes = {'title' : title,
'id' : id_,
'class' : ["icon", cssclass],
'align' : 'absmiddle' if middle else None,
'src' : icon_name if "/" in icon_name else self.detect_icon_path(icon_name)}
if class_:
attributes['class'].extend(class_)
return self._render_opening_tag('img', close_tag=True, **attributes)
def render_icon_button(self, url, help, icon, id=None, onclick=None,
style=None, target=None, cssclass=None, ty="button"):
# TODO: Refactor
title = help
id_ = id
# TODO: Can we clean this up and move all button_*.png to internal_icons/*.png?
if ty == "button":
icon = "images/button_" + icon + ".png"
icon = HTML(self.render_icon(icon, cssclass="iconbutton"))
return self.render_a(icon, **{'title' : title,
'id' : id_,
'class' : cssclass,
'style' : style,
'target' : target if target else '',
'href' : url if not onclick else "javascript:void(0)",
'onfocus' : "if (this.blur) this.blur();",
'onclick' : onclick })
def icon_button(self, *args, **kwargs):
self.write_html(self.render_icon_button(*args, **kwargs))
def popup_trigger(self, *args, **kwargs):
self.write_html(self.render_popup_trigger(*args, **kwargs))
def render_popup_trigger(self, content, ident, what=None, data=None, url_vars=None,
style=None, menu_content=None, cssclass=None, onclose=None,
resizable=False):
onclick = 'toggle_popup(event, this, %s, %s, %s, %s, %s, %s, %s);' % \
("'%s'" % ident,
"'%s'" % what if what else 'null',
json.dumps(data) if data else 'null',
"'%s'" % self.urlencode_vars(url_vars) if url_vars else 'null',
"'%s'" % menu_content if menu_content else 'null',
"'%s'" % onclose.replace("'", "\\'") if onclose else 'null',
json.dumps(resizable))
#TODO: Check if HTML'ing content is correct and necessary!
atag = self.render_a(HTML(content), class_="popup_trigger",
href="javascript:void(0);",
onclick=onclick)
return self.render_div(atag, class_=["popup_trigger", cssclass],
id_="popup_trigger_%s" % ident,
style = style)
def element_dragger_url(self, dragging_tag, base_url):
self.write_html(self.render_element_dragger(dragging_tag,
drop_handler="function(index){return element_drag_url_drop_handler(%s, index);})" %
json.dumps(base_url)))
def element_dragger_js(self, dragging_tag, drop_handler, handler_args):
self.write_html(self.render_element_dragger(dragging_tag,
drop_handler="function(new_index){return %s(%s, new_index);})" %
(drop_handler, json.dumps(handler_args))))
# Currently only tested with tables. But with some small changes it may work with other
# structures too.
def render_element_dragger(self, dragging_tag, drop_handler):
return self.render_a(self.render_icon("drag", _("Move this entry")),
href="javascript:void(0)",
class_=["element_dragger"],
onmousedown="element_drag_start(event, this, %s, %s" %
(json.dumps(dragging_tag.upper()), drop_handler)
)
#
# HTML - All the common and more complex HTML rendering methods
#
def _dump_get_vars(self):
self.begin_foldable_container("html", "debug_vars", True, _("GET/POST variables of this page"))
self.debug_vars(hide_with_mouse = False)
self.end_foldable_container()
def debug_vars(self, prefix=None, hide_with_mouse=True, vars=None):
if not vars:
vars = self.vars
hover = "this.style.display=\'none\';"
self.open_table(class_=["debug_vars"], onmouseover=hover if hide_with_mouse else None)
self.tr(self.render_th(_("POST / GET Variables"), colspan="2"))
for name, value in sorted(vars.items()):
if name in [ "_password", "password" ]:
value = "***"
if not prefix or name.startswith(prefix):
self.tr(self.render_td(name, class_="left") + self.render_td(value, class_="right"))
self.close_table()
# TODO: Rename the status_icons because they are not only showing states. There are also actions.
# Something like footer icons or similar seems to be better
def _write_status_icons(self):
self.icon_button(self.makeuri([]), _("URL to this frame"),
"frameurl", target="_top", cssclass="inline")
self.icon_button("index.py?" + self.urlencode_vars([("start_url", self.makeuri([]))]),
_("URL to this page including sidebar"),
"pageurl", target="_top", cssclass="inline")
# TODO: Move this away from here. Make a context button. The view should handle this
if self.myfile == "view" and self.var('mode') != 'availability':
self.icon_button(self.makeuri([("output_format", "csv_export")]),
_("Export as CSV"),
"download_csv", target="_top", cssclass="inline")
# TODO: This needs to be realized as plugin mechanism
if self.myfile == "view":
mode_name = self.var('mode') == "availability" and "availability" or "view"
encoded_vars = {}
for k, v in self.page_context.items():
if v == None:
v = ''
elif type(v) == unicode:
v = v.encode('utf-8')
encoded_vars[k] = v
self.popup_trigger(
self.render_icon("menu", _("Add this view to..."), cssclass="iconbutton inline"),
'add_visual', 'add_visual', data=[mode_name, encoded_vars, {'name': self.var('view_name')}],
url_vars=[("add_type", mode_name)])
# TODO: This should be handled by pagetypes.py
elif self.myfile == "graph_collection":
self.popup_trigger(
self.render_icon("menu", _("Add this graph collection to..."),
cssclass="iconbutton inline"),
'add_visual', 'add_visual', data=["graph_collection", {}, {'name': self.var('name')}],
url_vars=[("add_type", "graph_collection")])
for img, tooltip in self.status_icons.items():
if type(tooltip) == tuple:
tooltip, url = tooltip
self.icon_button(url, tooltip, img, cssclass="inline")
else:
self.icon(tooltip, img, cssclass="inline")
if self.times:
self.measure_time('body')
self.open_div(class_=["execution_times"])
entries = self.times.items()
entries.sort()
for name, duration in entries:
self.div("%s: %.1fms" % (name, duration * 1000))
self.close_div()
#
# Per request caching
#
def set_cache(self, name, value):
self.caches[name] = value
return value
def set_cache_default(self, name, value):
if self.is_cached(name):
return self.get_cached(name)
else:
return self.set_cache(name, value)
def is_cached(self, name):
return name in self.caches
def get_cached(self, name):
return self.caches.get(name)
def del_cache(self, name):
if name in self.caches:
del self.caches[name]
#
# Keyboard control
# TODO: Can we move this specific feature to AQ?
#
def add_keybinding(self, keylist, jscode):
self.keybindings.append([keylist, jscode])
def add_keybindings(self, bindings):
self.keybindings += bindings
def disable_keybindings(self):
self.keybindings_enabled = False
#
# FIXME: Legacy functions
#
# TODO: Remove this specific legacy function. Change code using this to valuespecs
def datetime_input(self, varname, default_value, submit=None):
try:
t = self.get_datetime_input(varname)
except:
t = default_value
if varname in self.user_errors:
self.add_user_error(varname + "_date", self.user_errors[varname])
self.add_user_error(varname + "_time", self.user_errors[varname])
self.set_focus(varname + "_date")
br = time.localtime(t)
self.date_input(varname + "_date", br.tm_year, br.tm_mon, br.tm_mday, submit=submit)
self.write_text(" ")
self.time_input(varname + "_time", br.tm_hour, br.tm_min, submit=submit)
self.form_vars.append(varname + "_date")
self.form_vars.append(varname + "_time")
# TODO: Remove this specific legacy function. Change code using this to valuespecs
def time_input(self, varname, hours, mins, submit=None):
self.text_input(varname, "%02d:%02d" % (hours, mins), cssclass="time", size=5,
submit=submit, omit_css_width = True)
# TODO: Remove this specific legacy function. Change code using this to valuespecs
def date_input(self, varname, year, month, day, submit=None):
self.text_input(varname, "%04d-%02d-%02d" % (year, month, day),
cssclass="date", size=10, submit=submit, omit_css_width = True)
# TODO: Remove this specific legacy function. Change code using this to valuespecs
def get_datetime_input(self, varname):
t = self.var(varname + "_time")
d = self.var(varname + "_date")
if not t or not d:
raise MKUserError([varname + "_date", varname + "_time"],
_("Please specify a date and time."))
try:
br = time.strptime(d + " " + t, "%Y-%m-%d %H:%M")
except:
raise MKUserError([varname + "_date", varname + "_time"],
_("Please enter the date/time in the format YYYY-MM-DD HH:MM."))
return int(time.mktime(br))
# TODO: Remove this specific legacy function. Change code using this to valuespecs
def get_time_input(self, varname, what):
t = self.var(varname)
if not t:
raise MKUserError(varname, _("Please specify %s.") % what)
try:
h, m = t.split(":")
m = int(m)
h = int(h)
if m < 0 or m > 59 or h < 0:
raise Exception()
except:
raise MKUserError(varname, _("Please enter the time in the format HH:MM."))
return m * 60 + h * 3600
| gpl-2.0 |
karlch/vimiv | vimiv/eventhandler.py | 1 | 6686 | # vim: ft=python fileencoding=utf-8 sw=4 et sts=4
"""Handles the keyboard for vimiv."""
from gi.repository import Gdk, GLib, GObject
from vimiv.config_parser import parse_keys
from vimiv.helpers import get_float, get_int
class EventHandler(GObject.Object):
"""Handle keyboard/mouse/touch events for vimiv.
Attributes:
num_str: String containing repetition number for commands.
_app: Main vimiv application to interact with.
_keys: Keybindings from configfiles.
_timer_id: ID of the current timer running to clear num_str.
_timer_id_touch: ID of the current timer running to reconnect clicks
after a touch by touchscreen.
"""
def __init__(self, app):
"""Initialize defaults."""
super(EventHandler, self).__init__()
self._num_str = ""
self._app = app
self._timer_id = 0
self._timer_id_touch = 0
self._keys = parse_keys(running_tests=self._app.running_tests)
def on_key_press(self, widget, event, widget_name):
"""Handle key press event."""
if event.type == Gdk.EventType.KEY_PRESS:
keyval = event.keyval
keyname = Gdk.keyval_name(keyval)
keyname = self._check_modifiers(event, keyname)
return self._run(keyname, widget_name)
def on_click(self, widget, event, widget_name):
"""Handle click event."""
# Do not handle double clicks
if not event.type == Gdk.EventType.BUTTON_PRESS:
return
button_name = "Button" + str(event.button)
button_name = self._check_modifiers(event, button_name)
return self._run(button_name, widget_name)
def _run(self, keyname, widget_name):
"""Run the correct function per keypress.
Args:
keyname: Name of the key that was pressed/clicked/...
widget_name: Name of widget to operate on: image, library...
"""
# Numbers for the num_str
if widget_name != "COMMAND" and keyname.isdigit():
self.num_append(keyname)
return True
# Get the relevant keybindings for the window from the various
# sections in the keys.conf file
keys = self._keys[widget_name]
# Get the command to which the pressed key is bound and run it
if keyname in keys:
keybinding = keys[keyname]
# Write keybinding and key to log in debug mode
if self._app.debug:
self._app["log"].write_message("key",
keyname + ": " + keybinding)
self._app["commandline"].run_command(keybinding, keyname)
return True # Deactivates default bindings
# Activate default keybindings
else:
return False
def on_touch(self, widget, event):
"""Clear mouse connection when touching screen.
This stops calling the ButtonX bindings when using the touch screen.
Reasoning: We do not want to e.g. move to the next image when trying to
zoom in.
"""
try:
self._app["window"].disconnect_by_func(self.on_click)
# Was already disconnected
except TypeError:
pass
if self._timer_id_touch:
GLib.source_remove(self._timer_id_touch)
self._timer_id_touch = GLib.timeout_add(5, self._reconnect_click)
return True
def _reconnect_click(self):
"""Reconnect the click signal after a touch event."""
self._app["window"].connect("button_press_event",
self.on_click, "IMAGE")
self._timer_id_touch = 0
def _check_modifiers(self, event, keyname):
"""Update keyname according to modifiers in event."""
shiftkeys = ["space", "Return", "Tab", "Escape", "BackSpace",
"Up", "Down", "Left", "Right"]
# Check for Control (^), Mod1 (Alt) or Shift
if event.get_state() & Gdk.ModifierType.CONTROL_MASK:
keyname = "^" + keyname
if event.get_state() & Gdk.ModifierType.MOD1_MASK:
keyname = "Alt+" + keyname
# Shift+ for all letters and for keys that don't support it
if (event.get_state() & Gdk.ModifierType.SHIFT_MASK and
(len(keyname) < 2 or keyname in shiftkeys
or keyname.startswith("Button"))):
keyname = "Shift+" + keyname.lower()
if keyname == "ISO_Left_Tab": # Tab is named really weird under shift
keyname = "Shift+Tab"
return keyname
def num_append(self, num, remove_by_timeout=True):
"""Add a new char to num_str.
Args:
num: The number to append to the string.
remove_by_timeout: If True, add a timeout to clear the num_str.
"""
# Remove old timers if we have new numbers
if self._timer_id:
GLib.source_remove(self._timer_id)
self._timer_id = GLib.timeout_add_seconds(1, self.num_clear)
self._num_str += num
self._convert_trailing_zeros()
# Write number to log file in debug mode
if self._app.debug:
self._app["log"].write_message("number", num + "->" + self._num_str)
self._app["statusbar"].update_info()
def num_clear(self):
"""Clear num_str."""
# Remove any timers as we are clearing now anyway
if self._timer_id:
GLib.source_remove(self._timer_id)
# Write number cleared to log file in debug mode
if self._app.debug and self._num_str:
self._app["log"].write_message("number", "cleared")
self._timer_id = 0
# Reset
self._num_str = ""
self._app["statusbar"].update_info()
def num_receive(self, number=1, to_float=False):
"""Receive self._num_str and clear it.
Args:
number: Number to return if self._num_str is empty.
to_float: If True, convert num_str to float. Else to int.
Return:
The received number or default.
"""
if self._num_str:
number = get_float(self._num_str) \
if to_float else get_int(self._num_str)
self.num_clear()
return number
def get_num_str(self):
return self._num_str
def set_num_str(self, number):
self.num_clear()
self.num_append(str(number))
def _convert_trailing_zeros(self):
"""If prefixed with zero add a decimal point to self._num_str."""
if self._num_str.startswith("0") and not self._num_str.startswith("0."):
self._num_str = self._num_str.replace("0", "0.")
| mit |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.