repo_name stringlengths 5 100 | path stringlengths 4 375 | copies stringclasses 991
values | size stringlengths 4 7 | content stringlengths 666 1M | license stringclasses 15
values |
|---|---|---|---|---|---|
felixma/nova | nova/virt/driver.py | 29 | 58892 | # Copyright 2011 Justin Santa Barbara
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Driver base-classes:
(Beginning of) the contract that compute drivers must follow, and shared
types that support that contract
"""
import sys
from oslo_config import cfg
from oslo_log import log as logging
from oslo_utils import importutils
from nova.i18n import _, _LE, _LI
from nova import utils
from nova.virt import event as virtevent
driver_opts = [
cfg.StrOpt('compute_driver',
help='Driver to use for controlling virtualization. Options '
'include: libvirt.LibvirtDriver, xenapi.XenAPIDriver, '
'fake.FakeDriver, ironic.IronicDriver, '
'vmwareapi.VMwareVCDriver, hyperv.HyperVDriver'),
cfg.StrOpt('default_ephemeral_format',
help='The default format an ephemeral_volume will be '
'formatted with on creation.'),
cfg.StrOpt('preallocate_images',
default='none',
choices=('none', 'space'),
help='VM image preallocation mode: '
'"none" => no storage provisioning is done up front, '
'"space" => storage is fully allocated at instance start'),
cfg.BoolOpt('use_cow_images',
default=True,
help='Whether to use cow images'),
cfg.BoolOpt('vif_plugging_is_fatal',
default=True,
help="Fail instance boot if vif plugging fails"),
cfg.IntOpt('vif_plugging_timeout',
default=300,
help='Number of seconds to wait for neutron vif plugging '
'events to arrive before continuing or failing (see '
'vif_plugging_is_fatal). If this is set to zero and '
'vif_plugging_is_fatal is False, events should not '
'be expected to arrive at all.'),
]
CONF = cfg.CONF
CONF.register_opts(driver_opts)
LOG = logging.getLogger(__name__)
def driver_dict_from_config(named_driver_config, *args, **kwargs):
driver_registry = dict()
for driver_str in named_driver_config:
driver_type, _sep, driver = driver_str.partition('=')
driver_class = importutils.import_class(driver)
driver_registry[driver_type] = driver_class(*args, **kwargs)
return driver_registry
def get_block_device_info(instance, block_device_mapping):
"""Converts block device mappings for an instance to driver format.
Virt drivers expect block device mapping to be presented in the format
of a dict containing the following keys:
- root_device_name: device name of the root disk
- ephemerals: a (potentially empty) list of DriverEphemeralBlockDevice
instances
- swap: An instance of DriverSwapBlockDevice or None
- block_device_mapping: a (potentially empty) list of
DriverVolumeBlockDevice or any of it's more
specialized subclasses.
"""
from nova.virt import block_device as virt_block_device
block_device_info = {
'root_device_name': instance.root_device_name,
'ephemerals': virt_block_device.convert_ephemerals(
block_device_mapping),
'block_device_mapping':
virt_block_device.convert_all_volumes(*block_device_mapping)
}
swap_list = virt_block_device.convert_swap(block_device_mapping)
block_device_info['swap'] = virt_block_device.get_swap(swap_list)
return block_device_info
def block_device_info_get_root(block_device_info):
block_device_info = block_device_info or {}
return block_device_info.get('root_device_name')
def block_device_info_get_swap(block_device_info):
block_device_info = block_device_info or {}
return block_device_info.get('swap') or {'device_name': None,
'swap_size': 0}
def swap_is_usable(swap):
return swap and swap['device_name'] and swap['swap_size'] > 0
def block_device_info_get_ephemerals(block_device_info):
block_device_info = block_device_info or {}
ephemerals = block_device_info.get('ephemerals') or []
return ephemerals
def block_device_info_get_mapping(block_device_info):
block_device_info = block_device_info or {}
block_device_mapping = block_device_info.get('block_device_mapping') or []
return block_device_mapping
class ComputeDriver(object):
"""Base class for compute drivers.
The interface to this class talks in terms of 'instances' (Amazon EC2 and
internal Nova terminology), by which we mean 'running virtual machine'
(XenAPI terminology) or domain (Xen or libvirt terminology).
An instance has an ID, which is the identifier chosen by Nova to represent
the instance further up the stack. This is unfortunately also called a
'name' elsewhere. As far as this layer is concerned, 'instance ID' and
'instance name' are synonyms.
Note that the instance ID or name is not human-readable or
customer-controlled -- it's an internal ID chosen by Nova. At the
nova.virt layer, instances do not have human-readable names at all -- such
things are only known higher up the stack.
Most virtualization platforms will also have their own identity schemes,
to uniquely identify a VM or domain. These IDs must stay internal to the
platform-specific layer, and never escape the connection interface. The
platform-specific layer is responsible for keeping track of which instance
ID maps to which platform-specific ID, and vice versa.
Some methods here take an instance of nova.compute.service.Instance. This
is the data structure used by nova.compute to store details regarding an
instance, and pass them into this layer. This layer is responsible for
translating that generic data structure into terms that are specific to the
virtualization platform.
"""
capabilities = {
"has_imagecache": False,
"supports_recreate": False,
"supports_migrate_to_same_host": False
}
def __init__(self, virtapi):
self.virtapi = virtapi
self._compute_event_callback = None
def init_host(self, host):
"""Initialize anything that is necessary for the driver to function,
including catching up with currently running VM's on the given host.
"""
# TODO(Vek): Need to pass context in for access to auth_token
raise NotImplementedError()
def cleanup_host(self, host):
"""Clean up anything that is necessary for the driver gracefully stop,
including ending remote sessions. This is optional.
"""
pass
def get_info(self, instance):
"""Get the current status of an instance, by name (not ID!)
:param instance: nova.objects.instance.Instance object
Returns a InstanceInfo object
"""
# TODO(Vek): Need to pass context in for access to auth_token
raise NotImplementedError()
def get_num_instances(self):
"""Return the total number of virtual machines.
Return the number of virtual machines that the hypervisor knows
about.
.. note::
This implementation works for all drivers, but it is
not particularly efficient. Maintainers of the virt drivers are
encouraged to override this method with something more
efficient.
"""
return len(self.list_instances())
def instance_exists(self, instance):
"""Checks existence of an instance on the host.
:param instance: The instance to lookup
Returns True if an instance with the supplied ID exists on
the host, False otherwise.
.. note::
This implementation works for all drivers, but it is
not particularly efficient. Maintainers of the virt drivers are
encouraged to override this method with something more
efficient.
"""
try:
return instance.uuid in self.list_instance_uuids()
except NotImplementedError:
return instance.name in self.list_instances()
def estimate_instance_overhead(self, instance_info):
"""Estimate the virtualization overhead required to build an instance
of the given flavor.
Defaults to zero, drivers should override if per-instance overhead
calculations are desired.
:param instance_info: Instance/flavor to calculate overhead for.
:returns: Dict of estimated overhead values.
"""
return {'memory_mb': 0}
def list_instances(self):
"""Return the names of all the instances known to the virtualization
layer, as a list.
"""
# TODO(Vek): Need to pass context in for access to auth_token
raise NotImplementedError()
def list_instance_uuids(self):
"""Return the UUIDS of all the instances known to the virtualization
layer, as a list.
"""
raise NotImplementedError()
def rebuild(self, context, instance, image_meta, injected_files,
admin_password, bdms, detach_block_devices,
attach_block_devices, network_info=None,
recreate=False, block_device_info=None,
preserve_ephemeral=False):
"""Destroy and re-make this instance.
A 'rebuild' effectively purges all existing data from the system and
remakes the VM with given 'metadata' and 'personalities'.
This base class method shuts down the VM, detaches all block devices,
then spins up the new VM afterwards. It may be overridden by
hypervisors that need to - e.g. for optimisations, or when the 'VM'
is actually proxied and needs to be held across the shutdown + spin
up steps.
:param context: security context
:param instance: nova.objects.instance.Instance
This function should use the data there to guide
the creation of the new instance.
:param image_meta: image object returned by nova.image.glance that
defines the image from which to boot this instance
:param injected_files: User files to inject into instance.
:param admin_password: Administrator password to set in instance.
:param bdms: block-device-mappings to use for rebuild
:param detach_block_devices: function to detach block devices. See
nova.compute.manager.ComputeManager:_rebuild_default_impl for
usage.
:param attach_block_devices: function to attach block devices. See
nova.compute.manager.ComputeManager:_rebuild_default_impl for
usage.
:param network_info:
:py:meth:`~nova.network.manager.NetworkManager.get_instance_nw_info`
:param recreate: True if the instance is being recreated on a new
hypervisor - all the cleanup of old state is skipped.
:param block_device_info: Information about block devices to be
attached to the instance.
:param preserve_ephemeral: True if the default ephemeral storage
partition must be preserved on rebuild
"""
raise NotImplementedError()
def spawn(self, context, instance, image_meta, injected_files,
admin_password, network_info=None, block_device_info=None):
"""Create a new instance/VM/domain on the virtualization platform.
Once this successfully completes, the instance should be
running (power_state.RUNNING).
If this fails, any partial instance should be completely
cleaned up, and the virtualization platform should be in the state
that it was before this call began.
:param context: security context
:param instance: nova.objects.instance.Instance
This function should use the data there to guide
the creation of the new instance.
:param image_meta: image object returned by nova.image.glance that
defines the image from which to boot this instance
:param injected_files: User files to inject into instance.
:param admin_password: Administrator password to set in instance.
:param network_info:
:py:meth:`~nova.network.manager.NetworkManager.get_instance_nw_info`
:param block_device_info: Information about block devices to be
attached to the instance.
"""
raise NotImplementedError()
def destroy(self, context, instance, network_info, block_device_info=None,
destroy_disks=True, migrate_data=None):
"""Destroy the specified instance from the Hypervisor.
If the instance is not found (for example if networking failed), this
function should still succeed. It's probably a good idea to log a
warning in that case.
:param context: security context
:param instance: Instance object as returned by DB layer.
:param network_info:
:py:meth:`~nova.network.manager.NetworkManager.get_instance_nw_info`
:param block_device_info: Information about block devices that should
be detached from the instance.
:param destroy_disks: Indicates if disks should be destroyed
:param migrate_data: implementation specific params
"""
raise NotImplementedError()
def cleanup(self, context, instance, network_info, block_device_info=None,
destroy_disks=True, migrate_data=None, destroy_vifs=True):
"""Cleanup the instance resources .
Instance should have been destroyed from the Hypervisor before calling
this method.
:param context: security context
:param instance: Instance object as returned by DB layer.
:param network_info:
:py:meth:`~nova.network.manager.NetworkManager.get_instance_nw_info`
:param block_device_info: Information about block devices that should
be detached from the instance.
:param destroy_disks: Indicates if disks should be destroyed
:param migrate_data: implementation specific params
"""
raise NotImplementedError()
def reboot(self, context, instance, network_info, reboot_type,
block_device_info=None, bad_volumes_callback=None):
"""Reboot the specified instance.
After this is called successfully, the instance's state
goes back to power_state.RUNNING. The virtualization
platform should ensure that the reboot action has completed
successfully even in cases in which the underlying domain/vm
is paused or halted/stopped.
:param instance: nova.objects.instance.Instance
:param network_info:
:py:meth:`~nova.network.manager.NetworkManager.get_instance_nw_info`
:param reboot_type: Either a HARD or SOFT reboot
:param block_device_info: Info pertaining to attached volumes
:param bad_volumes_callback: Function to handle any bad volumes
encountered
"""
raise NotImplementedError()
def get_console_pool_info(self, console_type):
# TODO(Vek): Need to pass context in for access to auth_token
raise NotImplementedError()
def get_console_output(self, context, instance):
"""Get console output for an instance
:param context: security context
:param instance: nova.objects.instance.Instance
"""
raise NotImplementedError()
def get_vnc_console(self, context, instance):
"""Get connection info for a vnc console.
:param context: security context
:param instance: nova.objects.instance.Instance
:returns an instance of console.type.ConsoleVNC
"""
raise NotImplementedError()
def get_spice_console(self, context, instance):
"""Get connection info for a spice console.
:param context: security context
:param instance: nova.objects.instance.Instance
:returns an instance of console.type.ConsoleSpice
"""
raise NotImplementedError()
def get_rdp_console(self, context, instance):
"""Get connection info for a rdp console.
:param context: security context
:param instance: nova.objects.instance.Instance
:returns an instance of console.type.ConsoleRDP
"""
raise NotImplementedError()
def get_serial_console(self, context, instance):
"""Get connection info for a serial console.
:param context: security context
:param instance: nova.objects.instance.Instance
:returns an instance of console.type.ConsoleSerial
"""
raise NotImplementedError()
def get_mks_console(self, context, instance):
"""Get connection info for a MKS console.
:param context: security context
:param instance: nova.objects.instance.Instance
:returns an instance of console.type.ConsoleMKS
"""
raise NotImplementedError()
def get_diagnostics(self, instance):
"""Return data about VM diagnostics.
:param instance: nova.objects.instance.Instance
"""
# TODO(Vek): Need to pass context in for access to auth_token
raise NotImplementedError()
def get_instance_diagnostics(self, instance):
"""Return data about VM diagnostics.
:param instance: nova.objects.instance.Instance
"""
raise NotImplementedError()
def get_all_bw_counters(self, instances):
"""Return bandwidth usage counters for each interface on each
running VM.
:param instances: nova.objects.instance.InstanceList
"""
raise NotImplementedError()
def get_all_volume_usage(self, context, compute_host_bdms):
"""Return usage info for volumes attached to vms on
a given host.-
"""
raise NotImplementedError()
def get_host_ip_addr(self):
"""Retrieves the IP address of the dom0
"""
# TODO(Vek): Need to pass context in for access to auth_token
raise NotImplementedError()
def attach_volume(self, context, connection_info, instance, mountpoint,
disk_bus=None, device_type=None, encryption=None):
"""Attach the disk to the instance at mountpoint using info."""
raise NotImplementedError()
def detach_volume(self, connection_info, instance, mountpoint,
encryption=None):
"""Detach the disk attached to the instance."""
raise NotImplementedError()
def swap_volume(self, old_connection_info, new_connection_info,
instance, mountpoint, resize_to):
"""Replace the disk attached to the instance.
:param instance: nova.objects.instance.Instance
:param resize_to: This parameter is used to indicate the new volume
size when the new volume lager than old volume.
And the units is Gigabyte.
"""
raise NotImplementedError()
def attach_interface(self, instance, image_meta, vif):
"""Attach an interface to the instance.
:param instance: nova.objects.instance.Instance
"""
raise NotImplementedError()
def detach_interface(self, instance, vif):
"""Detach an interface from the instance.
:param instance: nova.objects.instance.Instance
"""
raise NotImplementedError()
def migrate_disk_and_power_off(self, context, instance, dest,
flavor, network_info,
block_device_info=None,
timeout=0, retry_interval=0):
"""Transfers the disk of a running instance in multiple phases, turning
off the instance before the end.
:param instance: nova.objects.instance.Instance
:param timeout: time to wait for GuestOS to shutdown
:param retry_interval: How often to signal guest while
waiting for it to shutdown
"""
raise NotImplementedError()
def snapshot(self, context, instance, image_id, update_task_state):
"""Snapshots the specified instance.
:param context: security context
:param instance: nova.objects.instance.Instance
:param image_id: Reference to a pre-created image that will
hold the snapshot.
"""
raise NotImplementedError()
def post_interrupted_snapshot_cleanup(self, context, instance):
"""Cleans up any resources left after an interrupted snapshot.
:param context: security context
:param instance: nova.objects.instance.Instance
"""
pass
def finish_migration(self, context, migration, instance, disk_info,
network_info, image_meta, resize_instance,
block_device_info=None, power_on=True):
"""Completes a resize.
:param context: the context for the migration/resize
:param migration: the migrate/resize information
:param instance: nova.objects.instance.Instance being migrated/resized
:param disk_info: the newly transferred disk information
:param network_info:
:py:meth:`~nova.network.manager.NetworkManager.get_instance_nw_info`
:param image_meta: image object returned by nova.image.glance that
defines the image from which this instance
was created
:param resize_instance: True if the instance is being resized,
False otherwise
:param block_device_info: instance volume block device info
:param power_on: True if the instance should be powered on, False
otherwise
"""
raise NotImplementedError()
def confirm_migration(self, migration, instance, network_info):
"""Confirms a resize, destroying the source VM.
:param instance: nova.objects.instance.Instance
"""
# TODO(Vek): Need to pass context in for access to auth_token
raise NotImplementedError()
def finish_revert_migration(self, context, instance, network_info,
block_device_info=None, power_on=True):
"""Finish reverting a resize.
:param context: the context for the finish_revert_migration
:param instance: nova.objects.instance.Instance being migrated/resized
:param network_info:
:py:meth:`~nova.network.manager.NetworkManager.get_instance_nw_info`
:param block_device_info: instance volume block device info
:param power_on: True if the instance should be powered on, False
otherwise
"""
raise NotImplementedError()
def pause(self, instance):
"""Pause the specified instance.
:param instance: nova.objects.instance.Instance
"""
# TODO(Vek): Need to pass context in for access to auth_token
raise NotImplementedError()
def unpause(self, instance):
"""Unpause paused VM instance.
:param instance: nova.objects.instance.Instance
"""
# TODO(Vek): Need to pass context in for access to auth_token
raise NotImplementedError()
def suspend(self, context, instance):
"""suspend the specified instance.
:param context: the context for the suspend
:param instance: nova.objects.instance.Instance
"""
raise NotImplementedError()
def resume(self, context, instance, network_info, block_device_info=None):
"""resume the specified instance.
:param context: the context for the resume
:param instance: nova.objects.instance.Instance being resumed
:param network_info:
:py:meth:`~nova.network.manager.NetworkManager.get_instance_nw_info`
:param block_device_info: instance volume block device info
"""
raise NotImplementedError()
def resume_state_on_host_boot(self, context, instance, network_info,
block_device_info=None):
"""resume guest state when a host is booted.
:param instance: nova.objects.instance.Instance
"""
raise NotImplementedError()
def rescue(self, context, instance, network_info, image_meta,
rescue_password):
"""Rescue the specified instance.
:param instance: nova.objects.instance.Instance
"""
raise NotImplementedError()
def set_bootable(self, instance, is_bootable):
"""Set the ability to power on/off an instance.
:param instance: nova.objects.instance.Instance
"""
raise NotImplementedError()
def unrescue(self, instance, network_info):
"""Unrescue the specified instance.
:param instance: nova.objects.instance.Instance
"""
# TODO(Vek): Need to pass context in for access to auth_token
raise NotImplementedError()
def power_off(self, instance, timeout=0, retry_interval=0):
"""Power off the specified instance.
:param instance: nova.objects.instance.Instance
:param timeout: time to wait for GuestOS to shutdown
:param retry_interval: How often to signal guest while
waiting for it to shutdown
"""
raise NotImplementedError()
def power_on(self, context, instance, network_info,
block_device_info=None):
"""Power on the specified instance.
:param instance: nova.objects.instance.Instance
"""
raise NotImplementedError()
def inject_nmi(self, instance):
"""Inject an NMI to the specified instance.
:param instance: nova objects.instance.Instance
"""
raise NotImplementedError()
def soft_delete(self, instance):
"""Soft delete the specified instance.
:param instance: nova.objects.instance.Instance
"""
raise NotImplementedError()
def restore(self, instance):
"""Restore the specified instance.
:param instance: nova.objects.instance.Instance
"""
raise NotImplementedError()
def get_available_resource(self, nodename):
"""Retrieve resource information.
This method is called when nova-compute launches, and
as part of a periodic task that records the results in the DB.
:param nodename:
node which the caller want to get resources from
a driver that manages only one node can safely ignore this
:returns: Dictionary describing resources
"""
raise NotImplementedError()
def pre_live_migration(self, context, instance, block_device_info,
network_info, disk_info, migrate_data=None):
"""Prepare an instance for live migration
:param context: security context
:param instance: nova.objects.instance.Instance object
:param block_device_info: instance block device information
:param network_info: instance network information
:param disk_info: instance disk information
:param migrate_data: implementation specific data dict.
"""
raise NotImplementedError()
def live_migration(self, context, instance, dest,
post_method, recover_method, block_migration=False,
migrate_data=None):
"""Live migration of an instance to another host.
:param context: security context
:param instance:
nova.db.sqlalchemy.models.Instance object
instance object that is migrated.
:param dest: destination host
:param post_method:
post operation method.
expected nova.compute.manager._post_live_migration.
:param recover_method:
recovery method when any exception occurs.
expected nova.compute.manager._rollback_live_migration.
:param block_migration: if true, migrate VM disk.
:param migrate_data: implementation specific params.
"""
raise NotImplementedError()
def rollback_live_migration_at_destination(self, context, instance,
network_info,
block_device_info,
destroy_disks=True,
migrate_data=None):
"""Clean up destination node after a failed live migration.
:param context: security context
:param instance: instance object that was being migrated
:param network_info: instance network information
:param block_device_info: instance block device information
:param destroy_disks:
if true, destroy disks at destination during cleanup
:param migrate_data: implementation specific params
"""
raise NotImplementedError()
def post_live_migration(self, context, instance, block_device_info,
migrate_data=None):
"""Post operation of live migration at source host.
:param context: security context
:instance: instance object that was migrated
:block_device_info: instance block device information
:param migrate_data: if not None, it is a dict which has data
"""
pass
def post_live_migration_at_source(self, context, instance, network_info):
"""Unplug VIFs from networks at source.
:param context: security context
:param instance: instance object reference
:param network_info: instance network information
"""
raise NotImplementedError(_("Hypervisor driver does not support "
"post_live_migration_at_source method"))
def post_live_migration_at_destination(self, context, instance,
network_info,
block_migration=False,
block_device_info=None):
"""Post operation of live migration at destination host.
:param context: security context
:param instance: instance object that is migrated
:param network_info: instance network information
:param block_migration: if true, post operation of block_migration.
"""
raise NotImplementedError()
def check_instance_shared_storage_local(self, context, instance):
"""Check if instance files located on shared storage.
This runs check on the destination host, and then calls
back to the source host to check the results.
:param context: security context
:param instance: nova.objects.instance.Instance object
"""
raise NotImplementedError()
def check_instance_shared_storage_remote(self, context, data):
"""Check if instance files located on shared storage.
:param context: security context
:param data: result of check_instance_shared_storage_local
"""
raise NotImplementedError()
def check_instance_shared_storage_cleanup(self, context, data):
"""Do cleanup on host after check_instance_shared_storage calls
:param context: security context
:param data: result of check_instance_shared_storage_local
"""
pass
def check_can_live_migrate_destination(self, context, instance,
src_compute_info, dst_compute_info,
block_migration=False,
disk_over_commit=False):
"""Check if it is possible to execute live migration.
This runs checks on the destination host, and then calls
back to the source host to check the results.
:param context: security context
:param instance: nova.db.sqlalchemy.models.Instance
:param src_compute_info: Info about the sending machine
:param dst_compute_info: Info about the receiving machine
:param block_migration: if true, prepare for block migration
:param disk_over_commit: if true, allow disk over commit
:returns: a dict containing migration info (hypervisor-dependent)
"""
raise NotImplementedError()
def check_can_live_migrate_destination_cleanup(self, context,
dest_check_data):
"""Do required cleanup on dest host after check_can_live_migrate calls
:param context: security context
:param dest_check_data: result of check_can_live_migrate_destination
"""
raise NotImplementedError()
def check_can_live_migrate_source(self, context, instance,
dest_check_data, block_device_info=None):
"""Check if it is possible to execute live migration.
This checks if the live migration can succeed, based on the
results from check_can_live_migrate_destination.
:param context: security context
:param instance: nova.db.sqlalchemy.models.Instance
:param dest_check_data: result of check_can_live_migrate_destination
:param block_device_info: result of _get_instance_block_device_info
:returns: a dict containing migration info (hypervisor-dependent)
"""
raise NotImplementedError()
def get_instance_disk_info(self, instance,
block_device_info=None):
"""Retrieve information about actual disk sizes of an instance.
:param instance: nova.objects.Instance
:param block_device_info:
Optional; Can be used to filter out devices which are
actually volumes.
:return:
json strings with below format::
"[{'path':'disk',
'type':'raw',
'virt_disk_size':'10737418240',
'backing_file':'backing_file',
'disk_size':'83886080'
'over_committed_disk_size':'10737418240'},
...]"
"""
raise NotImplementedError()
def refresh_security_group_rules(self, security_group_id):
"""This method is called after a change to security groups.
All security groups and their associated rules live in the datastore,
and calling this method should apply the updated rules to instances
running the specified security group.
An error should be raised if the operation cannot complete.
"""
# TODO(Vek): Need to pass context in for access to auth_token
raise NotImplementedError()
def refresh_security_group_members(self, security_group_id):
"""This method is called when a security group is added to an instance.
This message is sent to the virtualization drivers on hosts that are
running an instance that belongs to a security group that has a rule
that references the security group identified by `security_group_id`.
It is the responsibility of this method to make sure any rules
that authorize traffic flow with members of the security group are
updated and any new members can communicate, and any removed members
cannot.
Scenario:
* we are running on host 'H0' and we have an instance 'i-0'.
* instance 'i-0' is a member of security group 'speaks-b'
* group 'speaks-b' has an ingress rule that authorizes group 'b'
* another host 'H1' runs an instance 'i-1'
* instance 'i-1' is a member of security group 'b'
When 'i-1' launches or terminates we will receive the message
to update members of group 'b', at which time we will make
any changes needed to the rules for instance 'i-0' to allow
or deny traffic coming from 'i-1', depending on if it is being
added or removed from the group.
In this scenario, 'i-1' could just as easily have been running on our
host 'H0' and this method would still have been called. The point was
that this method isn't called on the host where instances of that
group are running (as is the case with
:py:meth:`refresh_security_group_rules`) but is called where references
are made to authorizing those instances.
An error should be raised if the operation cannot complete.
"""
# TODO(Vek): Need to pass context in for access to auth_token
raise NotImplementedError()
def refresh_provider_fw_rules(self):
"""This triggers a firewall update based on database changes.
When this is called, rules have either been added or removed from the
datastore. You can retrieve rules with
:py:meth:`nova.db.provider_fw_rule_get_all`.
Provider rules take precedence over security group rules. If an IP
would be allowed by a security group ingress rule, but blocked by
a provider rule, then packets from the IP are dropped. This includes
intra-project traffic in the case of the allow_project_net_traffic
flag for the libvirt-derived classes.
"""
# TODO(Vek): Need to pass context in for access to auth_token
raise NotImplementedError()
def refresh_instance_security_rules(self, instance):
"""Refresh security group rules
Gets called when an instance gets added to or removed from
the security group the instance is a member of or if the
group gains or loses a rule.
"""
raise NotImplementedError()
def reset_network(self, instance):
"""reset networking for specified instance."""
# TODO(Vek): Need to pass context in for access to auth_token
pass
def ensure_filtering_rules_for_instance(self, instance, network_info):
"""Setting up filtering rules and waiting for its completion.
To migrate an instance, filtering rules to hypervisors
and firewalls are inevitable on destination host.
( Waiting only for filtering rules to hypervisor,
since filtering rules to firewall rules can be set faster).
Concretely, the below method must be called.
- setup_basic_filtering (for nova-basic, etc.)
- prepare_instance_filter(for nova-instance-instance-xxx, etc.)
to_xml may have to be called since it defines PROJNET, PROJMASK.
but libvirt migrates those value through migrateToURI(),
so , no need to be called.
Don't use thread for this method since migration should
not be started when setting-up filtering rules operations
are not completed.
:param instance: nova.objects.instance.Instance object
"""
# TODO(Vek): Need to pass context in for access to auth_token
raise NotImplementedError()
def filter_defer_apply_on(self):
"""Defer application of IPTables rules."""
pass
def filter_defer_apply_off(self):
"""Turn off deferral of IPTables rules and apply the rules now."""
pass
def unfilter_instance(self, instance, network_info):
"""Stop filtering instance."""
# TODO(Vek): Need to pass context in for access to auth_token
raise NotImplementedError()
def set_admin_password(self, instance, new_pass):
"""Set the root password on the specified instance.
:param instance: nova.objects.instance.Instance
:param new_pass: the new password
"""
raise NotImplementedError()
def inject_file(self, instance, b64_path, b64_contents):
"""Writes a file on the specified instance.
The first parameter is an instance of nova.compute.service.Instance,
and so the instance is being specified as instance.name. The second
parameter is the base64-encoded path to which the file is to be
written on the instance; the third is the contents of the file, also
base64-encoded.
NOTE(russellb) This method is deprecated and will be removed once it
can be removed from nova.compute.manager.
"""
# TODO(Vek): Need to pass context in for access to auth_token
raise NotImplementedError()
def change_instance_metadata(self, context, instance, diff):
"""Applies a diff to the instance metadata.
This is an optional driver method which is used to publish
changes to the instance's metadata to the hypervisor. If the
hypervisor has no means of publishing the instance metadata to
the instance, then this method should not be implemented.
:param context: security context
:param instance: nova.objects.instance.Instance
"""
pass
def inject_network_info(self, instance, nw_info):
"""inject network info for specified instance."""
# TODO(Vek): Need to pass context in for access to auth_token
pass
def poll_rebooting_instances(self, timeout, instances):
"""Poll for rebooting instances
:param timeout: the currently configured timeout for considering
rebooting instances to be stuck
:param instances: instances that have been in rebooting state
longer than the configured timeout
"""
# TODO(Vek): Need to pass context in for access to auth_token
raise NotImplementedError()
def host_power_action(self, action):
"""Reboots, shuts down or powers up the host."""
raise NotImplementedError()
def host_maintenance_mode(self, host, mode):
"""Start/Stop host maintenance window. On start, it triggers
guest VMs evacuation.
"""
raise NotImplementedError()
def set_host_enabled(self, enabled):
"""Sets the specified host's ability to accept new instances."""
# TODO(Vek): Need to pass context in for access to auth_token
raise NotImplementedError()
def get_host_uptime(self):
"""Returns the result of calling "uptime" on the target host."""
# TODO(Vek): Need to pass context in for access to auth_token
raise NotImplementedError()
def plug_vifs(self, instance, network_info):
"""Plug VIFs into networks.
:param instance: nova.objects.instance.Instance
"""
# TODO(Vek): Need to pass context in for access to auth_token
raise NotImplementedError()
def unplug_vifs(self, instance, network_info):
"""Unplug VIFs from networks.
:param instance: nova.objects.instance.Instance
"""
raise NotImplementedError()
def get_host_cpu_stats(self):
"""Get the currently known host CPU stats.
:returns: a dict containing the CPU stat info, eg:
| {'kernel': kern,
| 'idle': idle,
| 'user': user,
| 'iowait': wait,
| 'frequency': freq},
where kern and user indicate the cumulative CPU time
(nanoseconds) spent by kernel and user processes
respectively, idle indicates the cumulative idle CPU time
(nanoseconds), wait indicates the cumulative I/O wait CPU
time (nanoseconds), since the host is booting up; freq
indicates the current CPU frequency (MHz). All values are
long integers.
"""
raise NotImplementedError()
def block_stats(self, instance, disk_id):
"""Return performance counters associated with the given disk_id on the
given instance. These are returned as [rd_req, rd_bytes, wr_req,
wr_bytes, errs], where rd indicates read, wr indicates write, req is
the total number of I/O requests made, bytes is the total number of
bytes transferred, and errs is the number of requests held up due to a
full pipeline.
All counters are long integers.
This method is optional. On some platforms (e.g. XenAPI) performance
statistics can be retrieved directly in aggregate form, without Nova
having to do the aggregation. On those platforms, this method is
unused.
Note that this function takes an instance ID.
"""
raise NotImplementedError()
def deallocate_networks_on_reschedule(self, instance):
"""Does the driver want networks deallocated on reschedule?"""
return False
def macs_for_instance(self, instance):
"""What MAC addresses must this instance have?
Some hypervisors (such as bare metal) cannot do freeform virtualisation
of MAC addresses. This method allows drivers to return a set of MAC
addresses that the instance is to have. allocate_for_instance will take
this into consideration when provisioning networking for the instance.
Mapping of MAC addresses to actual networks (or permitting them to be
freeform) is up to the network implementation layer. For instance,
with openflow switches, fixed MAC addresses can still be virtualised
onto any L2 domain, with arbitrary VLANs etc, but regular switches
require pre-configured MAC->network mappings that will match the
actual configuration.
Most hypervisors can use the default implementation which returns None.
Hypervisors with MAC limits should return a set of MAC addresses, which
will be supplied to the allocate_for_instance call by the compute
manager, and it is up to that call to ensure that all assigned network
details are compatible with the set of MAC addresses.
This is called during spawn_instance by the compute manager.
:return: None, or a set of MAC ids (e.g. set(['12:34:56:78:90:ab'])).
None means 'no constraints', a set means 'these and only these
MAC addresses'.
"""
return None
def dhcp_options_for_instance(self, instance):
"""Get DHCP options for this instance.
Some hypervisors (such as bare metal) require that instances boot from
the network, and manage their own TFTP service. This requires passing
the appropriate options out to the DHCP service. Most hypervisors can
use the default implementation which returns None.
This is called during spawn_instance by the compute manager.
Note that the format of the return value is specific to Quantum
client API.
:return: None, or a set of DHCP options, eg:
| [{'opt_name': 'bootfile-name',
| 'opt_value': '/tftpboot/path/to/config'},
| {'opt_name': 'server-ip-address',
| 'opt_value': '1.2.3.4'},
| {'opt_name': 'tftp-server',
| 'opt_value': '1.2.3.4'}
| ]
"""
return None
def manage_image_cache(self, context, all_instances):
"""Manage the driver's local image cache.
Some drivers chose to cache images for instances on disk. This method
is an opportunity to do management of that cache which isn't directly
related to other calls into the driver. The prime example is to clean
the cache and remove images which are no longer of interest.
:param all_instances: nova.objects.instance.InstanceList
"""
pass
def add_to_aggregate(self, context, aggregate, host, **kwargs):
"""Add a compute host to an aggregate."""
# NOTE(jogo) Currently only used for XenAPI-Pool
raise NotImplementedError()
def remove_from_aggregate(self, context, aggregate, host, **kwargs):
"""Remove a compute host from an aggregate."""
raise NotImplementedError()
def undo_aggregate_operation(self, context, op, aggregate,
host, set_error=True):
"""Undo for Resource Pools."""
raise NotImplementedError()
def get_volume_connector(self, instance):
"""Get connector information for the instance for attaching to volumes.
Connector information is a dictionary representing the ip of the
machine that will be making the connection, the name of the iscsi
initiator and the hostname of the machine as follows::
{
'ip': ip,
'initiator': initiator,
'host': hostname
}
"""
raise NotImplementedError()
def get_available_nodes(self, refresh=False):
"""Returns nodenames of all nodes managed by the compute service.
This method is for multi compute-nodes support. If a driver supports
multi compute-nodes, this method returns a list of nodenames managed
by the service. Otherwise, this method should return
[hypervisor_hostname].
"""
raise NotImplementedError()
def node_is_available(self, nodename):
"""Return whether this compute service manages a particular node."""
if nodename in self.get_available_nodes():
return True
# Refresh and check again.
return nodename in self.get_available_nodes(refresh=True)
def get_per_instance_usage(self):
"""Get information about instance resource usage.
:returns: dict of nova uuid => dict of usage info
"""
return {}
def instance_on_disk(self, instance):
"""Checks access of instance files on the host.
:param instance: nova.objects.instance.Instance to lookup
Returns True if files of an instance with the supplied ID accessible on
the host, False otherwise.
.. note::
Used in rebuild for HA implementation and required for validation
of access to instance shared disk files
"""
return False
def register_event_listener(self, callback):
"""Register a callback to receive events.
Register a callback to receive asynchronous event
notifications from hypervisors. The callback will
be invoked with a single parameter, which will be
an instance of the nova.virt.event.Event class.
"""
self._compute_event_callback = callback
def emit_event(self, event):
"""Dispatches an event to the compute manager.
Invokes the event callback registered by the
compute manager to dispatch the event. This
must only be invoked from a green thread.
"""
if not self._compute_event_callback:
LOG.debug("Discarding event %s", str(event))
return
if not isinstance(event, virtevent.Event):
raise ValueError(
_("Event must be an instance of nova.virt.event.Event"))
try:
LOG.debug("Emitting event %s", str(event))
self._compute_event_callback(event)
except Exception as ex:
LOG.error(_LE("Exception dispatching event %(event)s: %(ex)s"),
{'event': event, 'ex': ex})
def delete_instance_files(self, instance):
"""Delete any lingering instance files for an instance.
:param instance: nova.objects.instance.Instance
:returns: True if the instance was deleted from disk, False otherwise.
"""
return True
@property
def need_legacy_block_device_info(self):
"""Tell the caller if the driver requires legacy block device info.
Tell the caller whether we expect the legacy format of block
device info to be passed in to methods that expect it.
"""
return True
def volume_snapshot_create(self, context, instance, volume_id,
create_info):
"""Snapshots volumes attached to a specified instance.
:param context: request context
:param instance: nova.objects.instance.Instance that has the volume
attached
:param volume_id: Volume to be snapshotted
:param create_info: The data needed for nova to be able to attach
to the volume. This is the same data format returned by
Cinder's initialize_connection() API call. In the case of
doing a snapshot, it is the image file Cinder expects to be
used as the active disk after the snapshot operation has
completed. There may be other data included as well that is
needed for creating the snapshot.
"""
raise NotImplementedError()
def volume_snapshot_delete(self, context, instance, volume_id,
snapshot_id, delete_info):
"""Snapshots volumes attached to a specified instance.
:param context: request context
:param instance: nova.objects.instance.Instance that has the volume
attached
:param volume_id: Attached volume associated with the snapshot
:param snapshot_id: The snapshot to delete.
:param delete_info: Volume backend technology specific data needed to
be able to complete the snapshot. For example, in the case of
qcow2 backed snapshots, this would include the file being
merged, and the file being merged into (if appropriate).
"""
raise NotImplementedError()
def default_root_device_name(self, instance, image_meta, root_bdm):
"""Provide a default root device name for the driver."""
raise NotImplementedError()
def default_device_names_for_instance(self, instance, root_device_name,
*block_device_lists):
"""Default the missing device names in the block device mapping."""
raise NotImplementedError()
def get_device_name_for_instance(self, instance,
bdms, block_device_obj):
"""Get the next device name based on the block device mapping.
:param instance: nova.objects.instance.Instance that volume is
requesting a device name
:param bdms: a nova.objects.BlockDeviceMappingList for the instance
:param block_device_obj: A nova.objects.BlockDeviceMapping instance
with all info about the requested block
device. device_name does not need to be set,
and should be decided by the driver
implementation if not set.
:returns: The chosen device name.
"""
raise NotImplementedError()
def is_supported_fs_format(self, fs_type):
"""Check whether the file format is supported by this driver
:param fs_type: the file system type to be checked,
the validate values are defined at disk API module.
"""
# NOTE(jichenjc): Return False here so that every hypervisor
# need to define their supported file system
# type and implement this function at their
# virt layer.
return False
def quiesce(self, context, instance, image_meta):
"""Quiesce the specified instance to prepare for snapshots.
If the specified instance doesn't support quiescing,
InstanceQuiesceNotSupported is raised. When it fails to quiesce by
other errors (e.g. agent timeout), NovaException is raised.
:param context: request context
:param instance: nova.objects.instance.Instance to be quiesced
:param image_meta: image object returned by nova.image.glance that
defines the image from which this instance
was created
"""
raise NotImplementedError()
def unquiesce(self, context, instance, image_meta):
"""Unquiesce the specified instance after snapshots.
If the specified instance doesn't support quiescing,
InstanceQuiesceNotSupported is raised. When it fails to quiesce by
other errors (e.g. agent timeout), NovaException is raised.
:param context: request context
:param instance: nova.objects.instance.Instance to be unquiesced
:param image_meta: image object returned by nova.image.glance that
defines the image from which this instance
was created
"""
raise NotImplementedError()
def load_compute_driver(virtapi, compute_driver=None):
"""Load a compute driver module.
Load the compute driver module specified by the compute_driver
configuration option or, if supplied, the driver name supplied as an
argument.
Compute drivers constructors take a VirtAPI object as their first object
and this must be supplied.
:param virtapi: a VirtAPI instance
:param compute_driver: a compute driver name to override the config opt
:returns: a ComputeDriver instance
"""
if not compute_driver:
compute_driver = CONF.compute_driver
if not compute_driver:
LOG.error(_LE("Compute driver option required, but not specified"))
sys.exit(1)
LOG.info(_LI("Loading compute driver '%s'"), compute_driver)
try:
driver = importutils.import_object_ns('nova.virt',
compute_driver,
virtapi)
return utils.check_isinstance(driver, ComputeDriver)
except ImportError:
LOG.exception(_LE("Unable to load the virtualization driver"))
sys.exit(1)
def compute_driver_matches(match):
return CONF.compute_driver and CONF.compute_driver.endswith(match)
| apache-2.0 |
clef/python-social-auth | social/backends/readability.py | 83 | 1351 | """
Readability OAuth1 backend, docs at:
http://psa.matiasaguirre.net/docs/backends/readability.html
"""
from social.backends.oauth import BaseOAuth1
READABILITY_API = 'https://www.readability.com/api/rest/v1'
class ReadabilityOAuth(BaseOAuth1):
"""Readability OAuth authentication backend"""
name = 'readability'
ID_KEY = 'username'
AUTHORIZATION_URL = '{0}/oauth/authorize/'.format(READABILITY_API)
REQUEST_TOKEN_URL = '{0}/oauth/request_token/'.format(READABILITY_API)
ACCESS_TOKEN_URL = '{0}/oauth/access_token/'.format(READABILITY_API)
EXTRA_DATA = [('date_joined', 'date_joined'),
('kindle_email_address', 'kindle_email_address'),
('avatar_url', 'avatar_url'),
('email_into_address', 'email_into_address')]
def get_user_details(self, response):
fullname, first_name, last_name = self.get_user_names(
first_name=response['first_name'],
last_name=response['last_name']
)
return {'username': response['username'],
'fullname': fullname,
'first_name': first_name,
'last_name': last_name}
def user_data(self, access_token):
return self.get_json(READABILITY_API + '/users/_current',
auth=self.oauth_auth(access_token))
| bsd-3-clause |
Peddle/hue | desktop/core/ext-py/pycparser-2.14/pycparser/lextab.py | 35 | 7457 | # pycparser.lextab.py. This file automatically created by PLY (version 3.4). Don't edit!
_tabversion = '3.4'
_lextokens = {'VOID': 1, 'LBRACKET': 1, 'WCHAR_CONST': 1, 'FLOAT_CONST': 1, 'MINUS': 1, 'RPAREN': 1, 'LONG': 1, 'PLUS': 1, 'ELLIPSIS': 1, 'GT': 1, 'GOTO': 1, 'ENUM': 1, 'PERIOD': 1, 'GE': 1, 'INT_CONST_DEC': 1, 'ARROW': 1, 'HEX_FLOAT_CONST': 1, 'DOUBLE': 1, 'MINUSEQUAL': 1, 'INT_CONST_OCT': 1, 'TIMESEQUAL': 1, 'OR': 1, 'SHORT': 1, 'RETURN': 1, 'RSHIFTEQUAL': 1, 'RESTRICT': 1, 'STATIC': 1, 'SIZEOF': 1, 'UNSIGNED': 1, 'UNION': 1, 'COLON': 1, 'WSTRING_LITERAL': 1, 'DIVIDE': 1, 'FOR': 1, 'PLUSPLUS': 1, 'EQUALS': 1, 'ELSE': 1, 'INLINE': 1, 'EQ': 1, 'AND': 1, 'TYPEID': 1, 'LBRACE': 1, 'PPHASH': 1, 'INT': 1, 'SIGNED': 1, 'CONTINUE': 1, 'NOT': 1, 'OREQUAL': 1, 'MOD': 1, 'RSHIFT': 1, 'DEFAULT': 1, 'CHAR': 1, 'WHILE': 1, 'DIVEQUAL': 1, 'EXTERN': 1, 'CASE': 1, 'LAND': 1, 'REGISTER': 1, 'MODEQUAL': 1, 'NE': 1, 'SWITCH': 1, 'INT_CONST_HEX': 1, '_COMPLEX': 1, 'PLUSEQUAL': 1, 'STRUCT': 1, 'CONDOP': 1, 'BREAK': 1, 'VOLATILE': 1, 'ANDEQUAL': 1, 'INT_CONST_BIN': 1, 'DO': 1, 'LNOT': 1, 'CONST': 1, 'LOR': 1, 'CHAR_CONST': 1, 'LSHIFT': 1, 'RBRACE': 1, '_BOOL': 1, 'LE': 1, 'SEMI': 1, 'LT': 1, 'COMMA': 1, 'OFFSETOF': 1, 'TYPEDEF': 1, 'XOR': 1, 'AUTO': 1, 'TIMES': 1, 'LPAREN': 1, 'MINUSMINUS': 1, 'ID': 1, 'IF': 1, 'STRING_LITERAL': 1, 'FLOAT': 1, 'XOREQUAL': 1, 'LSHIFTEQUAL': 1, 'RBRACKET': 1}
_lexreflags = 0
_lexliterals = ''
_lexstateinfo = {'ppline': 'exclusive', 'pppragma': 'exclusive', 'INITIAL': 'inclusive'}
_lexstatere = {'ppline': [('(?P<t_ppline_FILENAME>"([^"\\\\\\n]|(\\\\(([a-zA-Z._~!=&\\^\\-\\\\?\'"])|(\\d+)|(x[0-9a-fA-F]+))))*")|(?P<t_ppline_LINE_NUMBER>(0(([uU]ll)|([uU]LL)|(ll[uU]?)|(LL[uU]?)|([uU][lL])|([lL][uU]?)|[uU])?)|([1-9][0-9]*(([uU]ll)|([uU]LL)|(ll[uU]?)|(LL[uU]?)|([uU][lL])|([lL][uU]?)|[uU])?))|(?P<t_ppline_NEWLINE>\\n)|(?P<t_ppline_PPLINE>line)', [None, ('t_ppline_FILENAME', 'FILENAME'), None, None, None, None, None, None, ('t_ppline_LINE_NUMBER', 'LINE_NUMBER'), None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, ('t_ppline_NEWLINE', 'NEWLINE'), ('t_ppline_PPLINE', 'PPLINE')])], 'pppragma': [('(?P<t_pppragma_NEWLINE>\\n)|(?P<t_pppragma_PPPRAGMA>pragma)|(?P<t_pppragma_STR>"([^"\\\\\\n]|(\\\\(([a-zA-Z._~!=&\\^\\-\\\\?\'"])|(\\d+)|(x[0-9a-fA-F]+))))*")|(?P<t_pppragma_ID>[a-zA-Z_$][0-9a-zA-Z_$]*)', [None, ('t_pppragma_NEWLINE', 'NEWLINE'), ('t_pppragma_PPPRAGMA', 'PPPRAGMA'), ('t_pppragma_STR', 'STR'), None, None, None, None, None, None, ('t_pppragma_ID', 'ID')])], 'INITIAL': [('(?P<t_PPHASH>[ \\t]*\\#)|(?P<t_NEWLINE>\\n+)|(?P<t_LBRACE>\\{)|(?P<t_RBRACE>\\})|(?P<t_FLOAT_CONST>((((([0-9]*\\.[0-9]+)|([0-9]+\\.))([eE][-+]?[0-9]+)?)|([0-9]+([eE][-+]?[0-9]+)))[FfLl]?))|(?P<t_HEX_FLOAT_CONST>(0[xX]([0-9a-fA-F]+|((([0-9a-fA-F]+)?\\.[0-9a-fA-F]+)|([0-9a-fA-F]+\\.)))([pP][+-]?[0-9]+)[FfLl]?))|(?P<t_INT_CONST_HEX>0[xX][0-9a-fA-F]+(([uU]ll)|([uU]LL)|(ll[uU]?)|(LL[uU]?)|([uU][lL])|([lL][uU]?)|[uU])?)', [None, ('t_PPHASH', 'PPHASH'), ('t_NEWLINE', 'NEWLINE'), ('t_LBRACE', 'LBRACE'), ('t_RBRACE', 'RBRACE'), ('t_FLOAT_CONST', 'FLOAT_CONST'), None, None, None, None, None, None, None, None, None, ('t_HEX_FLOAT_CONST', 'HEX_FLOAT_CONST'), None, None, None, None, None, None, None, ('t_INT_CONST_HEX', 'INT_CONST_HEX')]), ('(?P<t_INT_CONST_BIN>0[bB][01]+(([uU]ll)|([uU]LL)|(ll[uU]?)|(LL[uU]?)|([uU][lL])|([lL][uU]?)|[uU])?)|(?P<t_BAD_CONST_OCT>0[0-7]*[89])|(?P<t_INT_CONST_OCT>0[0-7]*(([uU]ll)|([uU]LL)|(ll[uU]?)|(LL[uU]?)|([uU][lL])|([lL][uU]?)|[uU])?)|(?P<t_INT_CONST_DEC>(0(([uU]ll)|([uU]LL)|(ll[uU]?)|(LL[uU]?)|([uU][lL])|([lL][uU]?)|[uU])?)|([1-9][0-9]*(([uU]ll)|([uU]LL)|(ll[uU]?)|(LL[uU]?)|([uU][lL])|([lL][uU]?)|[uU])?))|(?P<t_CHAR_CONST>\'([^\'\\\\\\n]|(\\\\(([a-zA-Z._~!=&\\^\\-\\\\?\'"])|(\\d+)|(x[0-9a-fA-F]+))))\')|(?P<t_WCHAR_CONST>L\'([^\'\\\\\\n]|(\\\\(([a-zA-Z._~!=&\\^\\-\\\\?\'"])|(\\d+)|(x[0-9a-fA-F]+))))\')|(?P<t_UNMATCHED_QUOTE>(\'([^\'\\\\\\n]|(\\\\(([a-zA-Z._~!=&\\^\\-\\\\?\'"])|(\\d+)|(x[0-9a-fA-F]+))))*\\n)|(\'([^\'\\\\\\n]|(\\\\(([a-zA-Z._~!=&\\^\\-\\\\?\'"])|(\\d+)|(x[0-9a-fA-F]+))))*$))|(?P<t_BAD_CHAR_CONST>(\'([^\'\\\\\\n]|(\\\\(([a-zA-Z._~!=&\\^\\-\\\\?\'"])|(\\d+)|(x[0-9a-fA-F]+))))[^\'\n]+\')|(\'\')|(\'([\\\\][^a-zA-Z._~^!=&\\^\\-\\\\?\'"x0-7])[^\'\\n]*\'))', [None, ('t_INT_CONST_BIN', 'INT_CONST_BIN'), None, None, None, None, None, None, None, ('t_BAD_CONST_OCT', 'BAD_CONST_OCT'), ('t_INT_CONST_OCT', 'INT_CONST_OCT'), None, None, None, None, None, None, None, ('t_INT_CONST_DEC', 'INT_CONST_DEC'), None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, ('t_CHAR_CONST', 'CHAR_CONST'), None, None, None, None, None, None, ('t_WCHAR_CONST', 'WCHAR_CONST'), None, None, None, None, None, None, ('t_UNMATCHED_QUOTE', 'UNMATCHED_QUOTE'), None, None, None, None, None, None, None, None, None, None, None, None, None, None, ('t_BAD_CHAR_CONST', 'BAD_CHAR_CONST')]), ('(?P<t_WSTRING_LITERAL>L"([^"\\\\\\n]|(\\\\(([a-zA-Z._~!=&\\^\\-\\\\?\'"])|(\\d+)|(x[0-9a-fA-F]+))))*")|(?P<t_BAD_STRING_LITERAL>"([^"\\\\\\n]|(\\\\(([a-zA-Z._~!=&\\^\\-\\\\?\'"])|(\\d+)|(x[0-9a-fA-F]+))))*([\\\\][^a-zA-Z._~^!=&\\^\\-\\\\?\'"x0-7])([^"\\\\\\n]|(\\\\(([a-zA-Z._~!=&\\^\\-\\\\?\'"])|(\\d+)|(x[0-9a-fA-F]+))))*")|(?P<t_ID>[a-zA-Z_$][0-9a-zA-Z_$]*)|(?P<t_STRING_LITERAL>"([^"\\\\\\n]|(\\\\(([a-zA-Z._~!=&\\^\\-\\\\?\'"])|(\\d+)|(x[0-9a-fA-F]+))))*")|(?P<t_ELLIPSIS>\\.\\.\\.)|(?P<t_PLUSPLUS>\\+\\+)|(?P<t_LOR>\\|\\|)|(?P<t_XOREQUAL>\\^=)|(?P<t_OREQUAL>\\|=)|(?P<t_LSHIFTEQUAL><<=)|(?P<t_RSHIFTEQUAL>>>=)|(?P<t_PLUSEQUAL>\\+=)|(?P<t_TIMESEQUAL>\\*=)|(?P<t_PLUS>\\+)|(?P<t_MODEQUAL>%=)|(?P<t_DIVEQUAL>/=)', [None, ('t_WSTRING_LITERAL', 'WSTRING_LITERAL'), None, None, None, None, None, None, ('t_BAD_STRING_LITERAL', 'BAD_STRING_LITERAL'), None, None, None, None, None, None, None, None, None, None, None, None, None, ('t_ID', 'ID'), (None, 'STRING_LITERAL'), None, None, None, None, None, None, (None, 'ELLIPSIS'), (None, 'PLUSPLUS'), (None, 'LOR'), (None, 'XOREQUAL'), (None, 'OREQUAL'), (None, 'LSHIFTEQUAL'), (None, 'RSHIFTEQUAL'), (None, 'PLUSEQUAL'), (None, 'TIMESEQUAL'), (None, 'PLUS'), (None, 'MODEQUAL'), (None, 'DIVEQUAL')]), ('(?P<t_RBRACKET>\\])|(?P<t_CONDOP>\\?)|(?P<t_XOR>\\^)|(?P<t_LSHIFT><<)|(?P<t_LE><=)|(?P<t_LPAREN>\\()|(?P<t_ARROW>->)|(?P<t_EQ>==)|(?P<t_NE>!=)|(?P<t_MINUSMINUS>--)|(?P<t_OR>\\|)|(?P<t_TIMES>\\*)|(?P<t_LBRACKET>\\[)|(?P<t_GE>>=)|(?P<t_RPAREN>\\))|(?P<t_LAND>&&)|(?P<t_RSHIFT>>>)|(?P<t_MINUSEQUAL>-=)|(?P<t_PERIOD>\\.)|(?P<t_ANDEQUAL>&=)|(?P<t_EQUALS>=)|(?P<t_LT><)|(?P<t_COMMA>,)|(?P<t_DIVIDE>/)|(?P<t_AND>&)|(?P<t_MOD>%)|(?P<t_SEMI>;)|(?P<t_MINUS>-)|(?P<t_GT>>)|(?P<t_COLON>:)|(?P<t_NOT>~)|(?P<t_LNOT>!)', [None, (None, 'RBRACKET'), (None, 'CONDOP'), (None, 'XOR'), (None, 'LSHIFT'), (None, 'LE'), (None, 'LPAREN'), (None, 'ARROW'), (None, 'EQ'), (None, 'NE'), (None, 'MINUSMINUS'), (None, 'OR'), (None, 'TIMES'), (None, 'LBRACKET'), (None, 'GE'), (None, 'RPAREN'), (None, 'LAND'), (None, 'RSHIFT'), (None, 'MINUSEQUAL'), (None, 'PERIOD'), (None, 'ANDEQUAL'), (None, 'EQUALS'), (None, 'LT'), (None, 'COMMA'), (None, 'DIVIDE'), (None, 'AND'), (None, 'MOD'), (None, 'SEMI'), (None, 'MINUS'), (None, 'GT'), (None, 'COLON'), (None, 'NOT'), (None, 'LNOT')])]}
_lexstateignore = {'ppline': ' \t', 'pppragma': ' \t<>.-{}();=+-*/$%@&^~!?:,0123456789', 'INITIAL': ' \t'}
_lexstateerrorf = {'ppline': 't_ppline_error', 'pppragma': 't_pppragma_error', 'INITIAL': 't_error'}
| apache-2.0 |
gnarula/eden_deployment | modules/unit_tests/modules/s3layouts.py | 15 | 3100 | # -*- coding: utf-8 -*-
#
# Layouts Unit Tests
#
# To run this script use:
# python web2py.py -S eden -M -R applications/eden/modules/unit_tests/modules/s3layouts.py
#
import unittest
from gluon import current
from s3.s3layouts import homepage, S3AddResourceLink
# =============================================================================
class LayoutTests(unittest.TestCase):
""" Layout Tests """
# -------------------------------------------------------------------------
def testHomepageFunction(self):
""" Test homepage() navigation item """
# Test existing module
hp = homepage("pr")
self.assertTrue(hp is not None)
# Test non-existent (deactivated) module
hp = homepage("nonexistent")
self.assertTrue(hp is not None)
self.assertFalse(hp.check_active())
rendered_hp = hp.xml()
self.assertEqual(rendered_hp, "")
# -------------------------------------------------------------------------
def testAddResourceLink(self):
""" Test AddResourceLink """
auth = current.auth
deployment_settings = current.deployment_settings
comment = S3AddResourceLink(c="pr", f="person")
# If the module is active, the comment should always be active
self.assertEqual(comment.check_active(),
deployment_settings.has_module("pr"))
self.assertEqual(comment.method, "create")
# Label should fall back to CRUD string
from s3.s3crud import S3CRUD
crud_string = S3CRUD.crud_string("pr_person", "label_create")
self.assertEqual(comment.label, crud_string)
if "inv" in deployment_settings.modules:
comment = S3AddResourceLink(c="inv", f="inv_item")
# Deactivate module
inv = deployment_settings.modules["inv"]
del deployment_settings.modules["inv"]
# Comment should auto-deactivate
self.assertFalse(comment.check_active())
# Restore module
deployment_settings.modules["inv"] = inv
# Comment should auto-reactivate
self.assertTrue(comment.check_active())
self.assertFalse(comment.check_permission())
self.assertEqual(comment.xml(), "")
auth.s3_impersonate("admin@example.com")
self.assertTrue(comment.check_permission())
output = comment.xml()
self.assertTrue(type(output) is str)
self.assertNotEqual(output, "")
auth.s3_impersonate(None)
# =============================================================================
def run_suite(*test_classes):
""" Run the test suite """
loader = unittest.TestLoader()
suite = unittest.TestSuite()
for test_class in test_classes:
tests = loader.loadTestsFromTestCase(test_class)
suite.addTests(tests)
if suite is not None:
unittest.TextTestRunner().run(suite)
return
if __name__ == "__main__":
run_suite(
LayoutTests,
)
# END ========================================================================
| mit |
blueboxgroup/nova | nova/tests/unit/objects/test_network_request.py | 53 | 4041 | # Copyright 2014 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from nova import objects
from nova.tests.unit.objects import test_objects
FAKE_UUID = '0C5C9AD2-F967-4E92-A7F3-24410F697440'
class _TestNetworkRequestObject(object):
def test_basic(self):
request = objects.NetworkRequest()
request.network_id = '456'
request.address = '1.2.3.4'
request.port_id = FAKE_UUID
def test_load(self):
request = objects.NetworkRequest()
self.assertIsNone(request.port_id)
def test_to_tuple_neutron(self):
request = objects.NetworkRequest(network_id='123',
address='1.2.3.4',
port_id=FAKE_UUID,
)
with mock.patch('nova.utils.is_neutron', return_value=True):
self.assertEqual(('123', '1.2.3.4', FAKE_UUID, None),
request.to_tuple())
def test_to_tuple_nova(self):
request = objects.NetworkRequest(network_id='123',
address='1.2.3.4',
port_id=FAKE_UUID)
with mock.patch('nova.utils.is_neutron', return_value=False):
self.assertEqual(('123', '1.2.3.4'),
request.to_tuple())
def test_from_tuple_neutron(self):
request = objects.NetworkRequest.from_tuple(
('123', '1.2.3.4', FAKE_UUID, None))
self.assertEqual('123', request.network_id)
self.assertEqual('1.2.3.4', str(request.address))
self.assertEqual(FAKE_UUID, request.port_id)
def test_from_tuple_neutron_without_pci_request_id(self):
request = objects.NetworkRequest.from_tuple(
('123', '1.2.3.4', FAKE_UUID))
self.assertEqual('123', request.network_id)
self.assertEqual('1.2.3.4', str(request.address))
self.assertEqual(FAKE_UUID, request.port_id)
def test_from_tuple_nova(self):
request = objects.NetworkRequest.from_tuple(
('123', '1.2.3.4'))
self.assertEqual('123', request.network_id)
self.assertEqual('1.2.3.4', str(request.address))
self.assertIsNone(request.port_id)
@mock.patch('nova.utils.is_neutron', return_value=True)
def test_list_as_tuples(self, is_neutron):
requests = objects.NetworkRequestList(
objects=[objects.NetworkRequest(network_id='123'),
objects.NetworkRequest(network_id='456')])
self.assertEqual(
[('123', None, None, None), ('456', None, None, None)],
requests.as_tuples())
def test_is_single_unspecified(self):
requests = objects.NetworkRequestList(
objects=[objects.NetworkRequest(network_id='123')])
self.assertFalse(requests.is_single_unspecified)
requests = objects.NetworkRequestList(
objects=[objects.NetworkRequest(),
objects.NetworkRequest()])
self.assertFalse(requests.is_single_unspecified)
requests = objects.NetworkRequestList(
objects=[objects.NetworkRequest()])
self.assertTrue(requests.is_single_unspecified)
class TestNetworkRequestObject(test_objects._LocalTest,
_TestNetworkRequestObject):
pass
class TestNetworkRequestRemoteObject(test_objects._RemoteTest,
_TestNetworkRequestObject):
pass
| apache-2.0 |
takeshineshiro/nova | nova/virt/virtapi.py | 88 | 1026 | # Copyright 2012 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import contextlib
class VirtAPI(object):
def provider_fw_rule_get_all(self, context):
"""Get the provider firewall rules
:param context: security context
"""
raise NotImplementedError()
@contextlib.contextmanager
def wait_for_instance_event(self, instance, event_names, deadline=300,
error_callback=None):
raise NotImplementedError()
| apache-2.0 |
hgl888/chromium-crosswalk-efl | base/PRESUBMIT.py | 33 | 2047 | # Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Chromium presubmit script for src/base.
See http://dev.chromium.org/developers/how-tos/depottools/presubmit-scripts
for more details on the presubmit API built into gcl.
"""
def _CheckNoInterfacesInBase(input_api, output_api):
"""Checks to make sure no files in libbase.a have |@interface|."""
pattern = input_api.re.compile(r'^\s*@interface', input_api.re.MULTILINE)
files = []
for f in input_api.AffectedSourceFiles(input_api.FilterSourceFile):
if (f.LocalPath().startswith('base/') and
not "/test/" in f.LocalPath() and
not f.LocalPath().endswith('_unittest.mm') and
not f.LocalPath().endswith('mac/sdk_forward_declarations.h')):
contents = input_api.ReadFile(f)
if pattern.search(contents):
files.append(f)
if len(files):
return [ output_api.PresubmitError(
'Objective-C interfaces or categories are forbidden in libbase. ' +
'See http://groups.google.com/a/chromium.org/group/chromium-dev/' +
'browse_thread/thread/efb28c10435987fd',
files) ]
return []
def _CommonChecks(input_api, output_api):
"""Checks common to both upload and commit."""
results = []
results.extend(_CheckNoInterfacesInBase(input_api, output_api))
return results
def CheckChangeOnUpload(input_api, output_api):
results = []
results.extend(_CommonChecks(input_api, output_api))
return results
def CheckChangeOnCommit(input_api, output_api):
results = []
results.extend(_CommonChecks(input_api, output_api))
return results
def GetPreferredTryMasters(project, change):
return {
'tryserver.chromium.linux': {
'linux_chromium_rel_swarming': set(['defaulttests']),
},
'tryserver.chromium.mac': {
'mac_chromium_rel_swarming': set(['defaulttests']),
},
'tryserver.chromium.win': {
'win_chromium_rel_swarming': set(['defaulttests']),
}
}
| bsd-3-clause |
petrus-v/odoo | addons/stock_invoice_directly/__init__.py | 374 | 1085 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import stock_invoice_directly
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
chaeplin/p2pool-drk | p2pool/util/graph.py | 226 | 7325 | from __future__ import absolute_import
from __future__ import division
import math
from p2pool.util import math as math2
class DataViewDescription(object):
def __init__(self, bin_count, total_width):
self.bin_count = bin_count
self.bin_width = total_width/bin_count
def _shift(x, shift, pad_item):
left_pad = math2.clip(shift, (0, len(x)))
right_pad = math2.clip(-shift, (0, len(x)))
return [pad_item]*left_pad + x[right_pad:-left_pad if left_pad else None] + [pad_item]*right_pad
combine_bins = math2.add_dicts_ext(lambda (a1, b1), (a2, b2): (a1+a2, b1+b2), (0, 0))
nothing = object()
def keep_largest(n, squash_key=nothing, key=lambda x: x, add_func=lambda a, b: a+b):
def _(d):
items = sorted(d.iteritems(), key=lambda (k, v): (k != squash_key, key(v)), reverse=True)
while len(items) > n:
k, v = items.pop()
if squash_key is not nothing:
items[-1] = squash_key, add_func(items[-1][1], v)
return dict(items)
return _
def _shift_bins_so_t_is_not_past_end(bins, last_bin_end, bin_width, t):
# returns new_bins, new_last_bin_end
shift = max(0, int(math.ceil((t - last_bin_end)/bin_width)))
return _shift(bins, shift, {}), last_bin_end + shift*bin_width
class DataView(object):
def __init__(self, desc, ds_desc, last_bin_end, bins):
assert len(bins) == desc.bin_count
self.desc = desc
self.ds_desc = ds_desc
self.last_bin_end = last_bin_end
self.bins = bins
def _add_datum(self, t, value):
if not self.ds_desc.multivalues:
value = {'null': value}
elif self.ds_desc.multivalue_undefined_means_0 and 'null' not in value:
value = dict(value, null=0) # use null to hold sample counter
self.bins, self.last_bin_end = _shift_bins_so_t_is_not_past_end(self.bins, self.last_bin_end, self.desc.bin_width, t)
bin = int(math.floor((self.last_bin_end - t)/self.desc.bin_width))
assert bin >= 0
if bin < self.desc.bin_count:
self.bins[bin] = self.ds_desc.keep_largest_func(combine_bins(self.bins[bin], dict((k, (v, 1)) for k, v in value.iteritems())))
def get_data(self, t):
bins, last_bin_end = _shift_bins_so_t_is_not_past_end(self.bins, self.last_bin_end, self.desc.bin_width, t)
assert last_bin_end - self.desc.bin_width <= t <= last_bin_end
def _((i, bin)):
left, right = last_bin_end - self.desc.bin_width*(i + 1), min(t, last_bin_end - self.desc.bin_width*i)
center, width = (left+right)/2, right-left
if self.ds_desc.is_gauge and self.ds_desc.multivalue_undefined_means_0:
real_count = max([0] + [count for total, count in bin.itervalues()])
if real_count == 0:
val = None
else:
val = dict((k, total/real_count) for k, (total, count) in bin.iteritems())
default = 0
elif self.ds_desc.is_gauge and not self.ds_desc.multivalue_undefined_means_0:
val = dict((k, total/count) for k, (total, count) in bin.iteritems())
default = None
else:
val = dict((k, total/width) for k, (total, count) in bin.iteritems())
default = 0
if not self.ds_desc.multivalues:
val = None if val is None else val.get('null', default)
return center, val, width, default
return map(_, enumerate(bins))
class DataStreamDescription(object):
def __init__(self, dataview_descriptions, is_gauge=True, multivalues=False, multivalues_keep=20, multivalues_squash_key=None, multivalue_undefined_means_0=False, default_func=None):
self.dataview_descriptions = dataview_descriptions
self.is_gauge = is_gauge
self.multivalues = multivalues
self.keep_largest_func = keep_largest(multivalues_keep, multivalues_squash_key, key=lambda (t, c): t/c if self.is_gauge else t, add_func=lambda (a1, b1), (a2, b2): (a1+a2, b1+b2))
self.multivalue_undefined_means_0 = multivalue_undefined_means_0
self.default_func = default_func
class DataStream(object):
def __init__(self, desc, dataviews):
self.desc = desc
self.dataviews = dataviews
def add_datum(self, t, value=1):
for dv_name, dv in self.dataviews.iteritems():
dv._add_datum(t, value)
class HistoryDatabase(object):
@classmethod
def from_obj(cls, datastream_descriptions, obj={}):
def convert_bin(bin):
if isinstance(bin, dict):
return bin
total, count = bin
if not isinstance(total, dict):
total = {'null': total}
return dict((k, (v, count)) for k, v in total.iteritems()) if count else {}
def get_dataview(ds_name, ds_desc, dv_name, dv_desc):
if ds_name in obj:
ds_data = obj[ds_name]
if dv_name in ds_data:
dv_data = ds_data[dv_name]
if dv_data['bin_width'] == dv_desc.bin_width and len(dv_data['bins']) == dv_desc.bin_count:
return DataView(dv_desc, ds_desc, dv_data['last_bin_end'], map(convert_bin, dv_data['bins']))
elif ds_desc.default_func is None:
return DataView(dv_desc, ds_desc, 0, dv_desc.bin_count*[{}])
else:
return ds_desc.default_func(ds_name, ds_desc, dv_name, dv_desc, obj)
return cls(dict(
(ds_name, DataStream(ds_desc, dict(
(dv_name, get_dataview(ds_name, ds_desc, dv_name, dv_desc))
for dv_name, dv_desc in ds_desc.dataview_descriptions.iteritems()
)))
for ds_name, ds_desc in datastream_descriptions.iteritems()
))
def __init__(self, datastreams):
self.datastreams = datastreams
def to_obj(self):
return dict((ds_name, dict((dv_name, dict(last_bin_end=dv.last_bin_end, bin_width=dv.desc.bin_width, bins=dv.bins))
for dv_name, dv in ds.dataviews.iteritems())) for ds_name, ds in self.datastreams.iteritems())
def make_multivalue_migrator(multivalue_keys, post_func=lambda bins: bins):
def _(ds_name, ds_desc, dv_name, dv_desc, obj):
if not obj:
last_bin_end = 0
bins = dv_desc.bin_count*[{}]
else:
inputs = dict((k, obj.get(v, {dv_name: dict(bins=[{}]*dv_desc.bin_count, last_bin_end=0)})[dv_name]) for k, v in multivalue_keys.iteritems())
last_bin_end = max(inp['last_bin_end'] for inp in inputs.itervalues()) if inputs else 0
assert all(len(inp['bins']) == dv_desc.bin_count for inp in inputs.itervalues())
inputs = dict((k, dict(zip(['bins', 'last_bin_end'], _shift_bins_so_t_is_not_past_end(v['bins'], v['last_bin_end'], dv_desc.bin_width, last_bin_end)))) for k, v in inputs.iteritems())
assert len(set(inp['last_bin_end'] for inp in inputs.itervalues())) <= 1
bins = post_func([dict((k, v['bins'][i]['null']) for k, v in inputs.iteritems() if 'null' in v['bins'][i]) for i in xrange(dv_desc.bin_count)])
return DataView(dv_desc, ds_desc, last_bin_end, bins)
return _
| gpl-3.0 |
pointhi/searx | searx/engines/github.py | 7 | 1321 | """
Github (It)
@website https://github.com/
@provide-api yes (https://developer.github.com/v3/)
@using-api yes
@results JSON
@stable yes (using api)
@parse url, title, content
"""
from urllib import urlencode
from json import loads
from cgi import escape
# engine dependent config
categories = ['it']
# search-url
search_url = 'https://api.github.com/search/repositories?sort=stars&order=desc&{query}' # noqa
accept_header = 'application/vnd.github.preview.text-match+json'
# do search-request
def request(query, params):
params['url'] = search_url.format(query=urlencode({'q': query}))
params['headers']['Accept'] = accept_header
return params
# get response from search-request
def response(resp):
results = []
search_res = loads(resp.text)
# check if items are recieved
if 'items' not in search_res:
return []
# parse results
for res in search_res['items']:
title = res['name']
url = res['html_url']
if res['description']:
content = escape(res['description'][:500])
else:
content = ''
# append result
results.append({'url': url,
'title': title,
'content': content})
# return results
return results
| agpl-3.0 |
rupran/ansible | lib/ansible/modules/windows/win_msg.py | 32 | 3434 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2017, Jon Hawkesworth (@jhawkesworth) <figs@unity.demon.co.uk>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# this is a windows documentation stub. actual code lives in the .ps1
# file of the same name
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = r'''
---
module: win_msg
version_added: "2.3"
short_description: Sends a message to logged in users on Windows hosts.
description:
- Wraps the msg.exe command in order to send messages to Windows hosts.
options:
to:
description:
- Who to send the message to. Can be a username, sessionname or sessionid.
default: '*'
display_seconds:
description:
- How long to wait for receiver to acknowledge message, in seconds.
default: 10
wait:
description:
- Whether to wait for users to respond. Module will only wait for the number of seconds specified in display_seconds or 10 seconds if not specified.
However, if I(wait) is true, the message is sent to each logged on user in turn, waiting for the user to either press 'ok' or for
the timeout to elapse before moving on to the next user.
required: false
default: false
msg:
description:
- The text of the message to be displayed.
default: Hello world!
author: "Jon Hawkesworth (@jhawkesworth)"
notes:
- This module must run on a windows host, so ensure your play targets windows
hosts, or delegates to a windows host.
- Messages are only sent to the local host where the module is run.
- The module does not support sending to users listed in a file.
- Setting wait to true can result in long run times on systems with many logged in users.
'''
EXAMPLES = r'''
# Warn logged in users of impending upgrade
win_msg:
display_seconds: 60
msg: "Automated upgrade about to start. Please save your work and log off before {{ deployment_start_time }}"
'''
RETURN = r'''
msg:
description: Test of the message that was sent.
returned: changed
type: string
sample: "Automated upgrade about to start. Please save your work and log off before 22 July 2016 18:00:00"
display_seconds:
description: Value of display_seconds module parameter.
returned: success
type: string
sample: 10
runtime_seconds:
description: How long the module took to run on the remote windows host.
returned: success
type: string
sample: 22 July 2016 17:45:51
sent_localtime:
description: local time from windows host when the message was sent.
returned: success
type: string
sample: 22 July 2016 17:45:51
wait:
description: Value of wait module parameter.
returned: success
type: boolean
sample: false
'''
| gpl-3.0 |
lcostantino/healing-os | healing/handler_manager.py | 1 | 7067 | # -*- encoding: utf-8 -*-
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Load plugins
"""
from stevedore import extension
from healing import exceptions
from healing import task_scheduler as scheduler
from healing.handler_plugins import plugin_config
from healing.openstack.common import log
from healing.openstack.common import threadgroup
LOG = log.getLogger(__name__)
CURRENT_HANDLER = None
class HandlerManager(object):
def __init__(self):
self.mgr = extension.ExtensionManager(namespace='healing.handlers',
invoke_on_load=True,
invoke_args=(),)
self.restrictions = extension.ExtensionManager(
namespace='healing.handler_restrictions',
invoke_on_load=False,
invoke_args=(),)
self.setup_config()
self.thread_pool = threadgroup.ThreadGroup(thread_pool_size=5)
def setup_config(self):
plain_data = {}
for x in self.restrictions:
plain_data[x.name] = x.plugin.CFG_PARAMS
plugin_names = [x.name for x in self.mgr]
self.config_manager = plugin_config.setup_config(plain_data,
plugin_names)
def plugin_list(self):
data_plugins = []
for x in self.mgr:
data_plugins.append({'name': x.name,
'description': x.plugin.DESCRIPTION})
return data_plugins
def can_execute(self, name, *args, **kwargs):
""" Check restrictions associated to the plugin.
Right now we cannot combine results neither set a mandatory check
as superset result.
With only one TRUE we stop the check and start execution,
because this are logical restrictions with dependencies
need to think about it...
"""
checks = self.config_manager.get_restriction_config_for(name)
if not checks:
return True
for x in checks:
try:
if not self.run_restriction(config=x.get('config'),
name=x.get('name'),
*args, **kwargs):
LOG.warning('Failed check for %s due to restriction %s',
name, x.get('name'))
else:
LOG.info('Restriction ok %s: ', x.get('name'))
return True
except Exception as e:
LOG.exception(e)
return False
def start_plugins_group(self, ctx, plugin_group, block=False):
"""
:param plugin group A list of ActionData objects
to invoke start plugin using threadgroups.
This need lot of tests, to assure context and self still
available. or refactor.
This will wait till all threads finish, so be carefull
on handling to long the request until the async version
is ready.
Return number of executions
"""
run_plugins = []
runners = []
for x in plugin_group:
try:
plug = self._get_and_check_plugin(x.name, action=x)
if plug:
run_plugins.append((plug, x))
except exceptions.CannotStartPlugin:
pass
for plug_obj,data in run_plugins:
# encapsulate in heat task ( why?... maybe it's usefull later)
# but, anyways, clients are not ready for yield ops so...
#this may block if check_for_status, we planned to have this check
#in another service async, but.
#runner.append(task)
# we could launch the tasks, and do a check_if_finished at the end,
# but if you need a task after the another it will break...
# The thread will block until the action finish if it's blocking,
# so it may impact the operation time on each sleep.
# for poc is ok, but ....
# Also, now that we have a notification listener, it could be used
# to track action states and report real progress without blocking
task = scheduler.TaskRunner(plug_obj.start, ctx=ctx, action=data,
block=block)
self.thread_pool.add_thread(task.start)
self.thread_pool.wait()
return len(run_plugins)
def _get_and_check_plugin(self, name, *args, **kwargs):
"""
if kwargs contains omit_checks, won't apply
restrictions.
"""
try:
plugin = self.get_plugin(name)()
plugin.prepare_for_checks(*args, **kwargs)
if not kwargs.get('omit_checks', False):
if not self.can_execute(name, last_action=plugin.last_action,
*args, **kwargs):
return None
return plugin
except Exception as e:
#add pluginnotfound exception or something
LOG.exception(e)
raise exceptions.CannotStartPlugin(name=name)
return None
def start_plugin(self, name, *args, **kwargs):
"""
Call start , check restrictions
:name should be data.name
:params args mandatory -> ctx and data
if kwargs contains omit_checks, won't apply
restrictions.
"""
#TODO: add with reraise exception
plugin = self._get_and_check_plugin(name, *args, **kwargs)
if plugin:
return plugin.start(*args, **kwargs)
return None
def check_plugin_name(self, name):
return self.mgr[name]
def get_plugin(self, name):
try:
return self.mgr[name].plugin
except Exception:
raise exceptions.NotFoundException('Plugin %s not found' % name)
def run_restriction(self, name, *args, **kwargs):
""" If false, restriction not passed."""
restriction = self.get_restriction(name)
if restriction:
return restriction().can_execute(*args, **kwargs)
return True
def get_restriction(self, name):
try:
return self.restrictions[name].plugin
except Exception as e:
LOG.exception(e)
return None
def get_plugin_handler():
global CURRENT_HANDLER
if not CURRENT_HANDLER:
CURRENT_HANDLER = HandlerManager()
return CURRENT_HANDLER
| apache-2.0 |
kswiat/django | django/core/validators.py | 29 | 10826 | from __future__ import unicode_literals
import re
from django.core.exceptions import ValidationError
from django.utils.deconstruct import deconstructible
from django.utils.translation import ugettext_lazy as _, ungettext_lazy
from django.utils.encoding import force_text
from django.utils.ipv6 import is_valid_ipv6_address
from django.utils import six
from django.utils.six.moves.urllib.parse import urlsplit, urlunsplit
# These values, if given to validate(), will trigger the self.required check.
EMPTY_VALUES = (None, '', [], (), {})
@deconstructible
class RegexValidator(object):
regex = ''
message = _('Enter a valid value.')
code = 'invalid'
inverse_match = False
flags = 0
def __init__(self, regex=None, message=None, code=None, inverse_match=None, flags=None):
if regex is not None:
self.regex = regex
if message is not None:
self.message = message
if code is not None:
self.code = code
if inverse_match is not None:
self.inverse_match = inverse_match
if flags is not None:
self.flags = flags
if self.flags and not isinstance(self.regex, six.string_types):
raise TypeError("If the flags are set, regex must be a regular expression string.")
# Compile the regex if it was not passed pre-compiled.
if isinstance(self.regex, six.string_types):
self.regex = re.compile(self.regex, self.flags)
def __call__(self, value):
"""
Validates that the input matches the regular expression
if inverse_match is False, otherwise raises ValidationError.
"""
if not (self.inverse_match is not bool(self.regex.search(
force_text(value)))):
raise ValidationError(self.message, code=self.code)
def __eq__(self, other):
return (
isinstance(other, RegexValidator) and
self.regex.pattern == other.regex.pattern and
self.regex.flags == other.regex.flags and
(self.message == other.message) and
(self.code == other.code) and
(self.inverse_match == other.inverse_match)
)
def __ne__(self, other):
return not (self == other)
@deconstructible
class URLValidator(RegexValidator):
regex = re.compile(
r'^(?:[a-z0-9\.\-]*)://' # scheme is validated separately
r'(?:(?:[A-Z0-9](?:[A-Z0-9-]{0,61}[A-Z0-9])?\.)+(?:[A-Z]{2,6}\.?|[A-Z0-9-]{2,}(?<!-)\.?)|' # domain...
r'localhost|' # localhost...
r'\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}|' # ...or ipv4
r'\[?[A-F0-9]*:[A-F0-9:]+\]?)' # ...or ipv6
r'(?::\d+)?' # optional port
r'(?:/?|[/?]\S+)$', re.IGNORECASE)
message = _('Enter a valid URL.')
schemes = ['http', 'https', 'ftp', 'ftps']
def __init__(self, schemes=None, **kwargs):
super(URLValidator, self).__init__(**kwargs)
if schemes is not None:
self.schemes = schemes
def __call__(self, value):
value = force_text(value)
# Check first if the scheme is valid
scheme = value.split('://')[0].lower()
if scheme not in self.schemes:
raise ValidationError(self.message, code=self.code)
# Then check full URL
try:
super(URLValidator, self).__call__(value)
except ValidationError as e:
# Trivial case failed. Try for possible IDN domain
if value:
scheme, netloc, path, query, fragment = urlsplit(value)
try:
netloc = netloc.encode('idna').decode('ascii') # IDN -> ACE
except UnicodeError: # invalid domain part
raise e
url = urlunsplit((scheme, netloc, path, query, fragment))
super(URLValidator, self).__call__(url)
else:
raise
else:
url = value
def validate_integer(value):
try:
int(value)
except (ValueError, TypeError):
raise ValidationError(_('Enter a valid integer.'), code='invalid')
@deconstructible
class EmailValidator(object):
message = _('Enter a valid email address.')
code = 'invalid'
user_regex = re.compile(
r"(^[-!#$%&'*+/=?^_`{}|~0-9A-Z]+(\.[-!#$%&'*+/=?^_`{}|~0-9A-Z]+)*$" # dot-atom
r'|^"([\001-\010\013\014\016-\037!#-\[\]-\177]|\\[\001-\011\013\014\016-\177])*"$)', # quoted-string
re.IGNORECASE)
domain_regex = re.compile(
# max length of the domain is 249: 254 (max email length) minus one
# period, two characters for the TLD, @ sign, & one character before @.
r'(?:[A-Z0-9](?:[A-Z0-9-]{0,247}[A-Z0-9])?\.)+(?:[A-Z]{2,6}|[A-Z0-9-]{2,}(?<!-))$',
re.IGNORECASE)
literal_regex = re.compile(
# literal form, ipv4 or ipv6 address (SMTP 4.1.3)
r'\[([A-f0-9:\.]+)\]$',
re.IGNORECASE)
domain_whitelist = ['localhost']
def __init__(self, message=None, code=None, whitelist=None):
if message is not None:
self.message = message
if code is not None:
self.code = code
if whitelist is not None:
self.domain_whitelist = whitelist
def __call__(self, value):
value = force_text(value)
if not value or '@' not in value:
raise ValidationError(self.message, code=self.code)
user_part, domain_part = value.rsplit('@', 1)
if not self.user_regex.match(user_part):
raise ValidationError(self.message, code=self.code)
if (domain_part not in self.domain_whitelist and
not self.validate_domain_part(domain_part)):
# Try for possible IDN domain-part
try:
domain_part = domain_part.encode('idna').decode('ascii')
if self.validate_domain_part(domain_part):
return
except UnicodeError:
pass
raise ValidationError(self.message, code=self.code)
def validate_domain_part(self, domain_part):
if self.domain_regex.match(domain_part):
return True
literal_match = self.literal_regex.match(domain_part)
if literal_match:
ip_address = literal_match.group(1)
try:
validate_ipv46_address(ip_address)
return True
except ValidationError:
pass
return False
def __eq__(self, other):
return (
isinstance(other, EmailValidator) and
(self.domain_whitelist == other.domain_whitelist) and
(self.message == other.message) and
(self.code == other.code)
)
validate_email = EmailValidator()
slug_re = re.compile(r'^[-a-zA-Z0-9_]+$')
validate_slug = RegexValidator(
slug_re,
_("Enter a valid 'slug' consisting of letters, numbers, underscores or hyphens."),
'invalid'
)
ipv4_re = re.compile(r'^(25[0-5]|2[0-4]\d|[0-1]?\d?\d)(\.(25[0-5]|2[0-4]\d|[0-1]?\d?\d)){3}$')
validate_ipv4_address = RegexValidator(ipv4_re, _('Enter a valid IPv4 address.'), 'invalid')
def validate_ipv6_address(value):
if not is_valid_ipv6_address(value):
raise ValidationError(_('Enter a valid IPv6 address.'), code='invalid')
def validate_ipv46_address(value):
try:
validate_ipv4_address(value)
except ValidationError:
try:
validate_ipv6_address(value)
except ValidationError:
raise ValidationError(_('Enter a valid IPv4 or IPv6 address.'), code='invalid')
ip_address_validator_map = {
'both': ([validate_ipv46_address], _('Enter a valid IPv4 or IPv6 address.')),
'ipv4': ([validate_ipv4_address], _('Enter a valid IPv4 address.')),
'ipv6': ([validate_ipv6_address], _('Enter a valid IPv6 address.')),
}
def ip_address_validators(protocol, unpack_ipv4):
"""
Depending on the given parameters returns the appropriate validators for
the GenericIPAddressField.
This code is here, because it is exactly the same for the model and the form field.
"""
if protocol != 'both' and unpack_ipv4:
raise ValueError(
"You can only use `unpack_ipv4` if `protocol` is set to 'both'")
try:
return ip_address_validator_map[protocol.lower()]
except KeyError:
raise ValueError("The protocol '%s' is unknown. Supported: %s"
% (protocol, list(ip_address_validator_map)))
comma_separated_int_list_re = re.compile('^[\d,]+$')
validate_comma_separated_integer_list = RegexValidator(
comma_separated_int_list_re,
_('Enter only digits separated by commas.'),
'invalid'
)
@deconstructible
class BaseValidator(object):
compare = lambda self, a, b: a is not b
clean = lambda self, x: x
message = _('Ensure this value is %(limit_value)s (it is %(show_value)s).')
code = 'limit_value'
def __init__(self, limit_value, message=None):
self.limit_value = limit_value
if message:
self.message = message
def __call__(self, value):
cleaned = self.clean(value)
params = {'limit_value': self.limit_value, 'show_value': cleaned, 'value': value}
if self.compare(cleaned, self.limit_value):
raise ValidationError(self.message, code=self.code, params=params)
def __eq__(self, other):
return (
isinstance(other, self.__class__) and
(self.limit_value == other.limit_value)
and (self.message == other.message)
and (self.code == other.code)
)
@deconstructible
class MaxValueValidator(BaseValidator):
compare = lambda self, a, b: a > b
message = _('Ensure this value is less than or equal to %(limit_value)s.')
code = 'max_value'
@deconstructible
class MinValueValidator(BaseValidator):
compare = lambda self, a, b: a < b
message = _('Ensure this value is greater than or equal to %(limit_value)s.')
code = 'min_value'
@deconstructible
class MinLengthValidator(BaseValidator):
compare = lambda self, a, b: a < b
clean = lambda self, x: len(x)
message = ungettext_lazy(
'Ensure this value has at least %(limit_value)d character (it has %(show_value)d).',
'Ensure this value has at least %(limit_value)d characters (it has %(show_value)d).',
'limit_value')
code = 'min_length'
@deconstructible
class MaxLengthValidator(BaseValidator):
compare = lambda self, a, b: a > b
clean = lambda self, x: len(x)
message = ungettext_lazy(
'Ensure this value has at most %(limit_value)d character (it has %(show_value)d).',
'Ensure this value has at most %(limit_value)d characters (it has %(show_value)d).',
'limit_value')
code = 'max_length'
| bsd-3-clause |
blademainer/intellij-community | python/lib/Lib/mimetools.py | 92 | 6841 | """Various tools used by MIME-reading or MIME-writing programs."""
import os
import rfc822
import tempfile
__all__ = ["Message","choose_boundary","encode","decode","copyliteral",
"copybinary"]
class Message(rfc822.Message):
"""A derived class of rfc822.Message that knows about MIME headers and
contains some hooks for decoding encoded and multipart messages."""
def __init__(self, fp, seekable = 1):
rfc822.Message.__init__(self, fp, seekable)
self.encodingheader = \
self.getheader('content-transfer-encoding')
self.typeheader = \
self.getheader('content-type')
self.parsetype()
self.parseplist()
def parsetype(self):
str = self.typeheader
if str is None:
str = 'text/plain'
if ';' in str:
i = str.index(';')
self.plisttext = str[i:]
str = str[:i]
else:
self.plisttext = ''
fields = str.split('/')
for i in range(len(fields)):
fields[i] = fields[i].strip().lower()
self.type = '/'.join(fields)
self.maintype = fields[0]
self.subtype = '/'.join(fields[1:])
def parseplist(self):
str = self.plisttext
self.plist = []
while str[:1] == ';':
str = str[1:]
if ';' in str:
# XXX Should parse quotes!
end = str.index(';')
else:
end = len(str)
f = str[:end]
if '=' in f:
i = f.index('=')
f = f[:i].strip().lower() + \
'=' + f[i+1:].strip()
self.plist.append(f.strip())
str = str[end:]
def getplist(self):
return self.plist
def getparam(self, name):
name = name.lower() + '='
n = len(name)
for p in self.plist:
if p[:n] == name:
return rfc822.unquote(p[n:])
return None
def getparamnames(self):
result = []
for p in self.plist:
i = p.find('=')
if i >= 0:
result.append(p[:i].lower())
return result
def getencoding(self):
if self.encodingheader is None:
return '7bit'
return self.encodingheader.lower()
def gettype(self):
return self.type
def getmaintype(self):
return self.maintype
def getsubtype(self):
return self.subtype
# Utility functions
# -----------------
try:
import thread
except ImportError:
import dummy_thread as thread
_counter_lock = thread.allocate_lock()
del thread
_counter = 0
def _get_next_counter():
global _counter
_counter_lock.acquire()
_counter += 1
result = _counter
_counter_lock.release()
return result
_prefix = None
def choose_boundary():
"""Return a string usable as a multipart boundary.
The string chosen is unique within a single program run, and
incorporates the user id (if available), process id (if available),
and current time. So it's very unlikely the returned string appears
in message text, but there's no guarantee.
The boundary contains dots so you have to quote it in the header."""
global _prefix
import time
if _prefix is None:
import socket
try:
hostid = socket.gethostbyname(socket.gethostname())
except socket.gaierror:
hostid = '127.0.0.1'
try:
uid = repr(os.getuid())
except AttributeError:
uid = '1'
try:
pid = repr(os.getpid())
except AttributeError:
pid = '1'
_prefix = hostid + '.' + uid + '.' + pid
return "%s.%.3f.%d" % (_prefix, time.time(), _get_next_counter())
# Subroutines for decoding some common content-transfer-types
def decode(input, output, encoding):
"""Decode common content-transfer-encodings (base64, quopri, uuencode)."""
if encoding == 'base64':
import base64
return base64.decode(input, output)
if encoding == 'quoted-printable':
import quopri
return quopri.decode(input, output)
if encoding in ('uuencode', 'x-uuencode', 'uue', 'x-uue'):
import uu
return uu.decode(input, output)
if encoding in ('7bit', '8bit'):
return output.write(input.read())
if encoding in decodetab:
pipethrough(input, decodetab[encoding], output)
else:
raise ValueError, \
'unknown Content-Transfer-Encoding: %s' % encoding
def encode(input, output, encoding):
"""Encode common content-transfer-encodings (base64, quopri, uuencode)."""
if encoding == 'base64':
import base64
return base64.encode(input, output)
if encoding == 'quoted-printable':
import quopri
return quopri.encode(input, output, 0)
if encoding in ('uuencode', 'x-uuencode', 'uue', 'x-uue'):
import uu
return uu.encode(input, output)
if encoding in ('7bit', '8bit'):
return output.write(input.read())
if encoding in encodetab:
pipethrough(input, encodetab[encoding], output)
else:
raise ValueError, \
'unknown Content-Transfer-Encoding: %s' % encoding
# The following is no longer used for standard encodings
# XXX This requires that uudecode and mmencode are in $PATH
uudecode_pipe = '''(
TEMP=/tmp/@uu.$$
sed "s%^begin [0-7][0-7]* .*%begin 600 $TEMP%" | uudecode
cat $TEMP
rm $TEMP
)'''
decodetab = {
'uuencode': uudecode_pipe,
'x-uuencode': uudecode_pipe,
'uue': uudecode_pipe,
'x-uue': uudecode_pipe,
'quoted-printable': 'mmencode -u -q',
'base64': 'mmencode -u -b',
}
encodetab = {
'x-uuencode': 'uuencode tempfile',
'uuencode': 'uuencode tempfile',
'x-uue': 'uuencode tempfile',
'uue': 'uuencode tempfile',
'quoted-printable': 'mmencode -q',
'base64': 'mmencode -b',
}
def pipeto(input, command):
pipe = os.popen(command, 'w')
copyliteral(input, pipe)
pipe.close()
def pipethrough(input, command, output):
(fd, tempname) = tempfile.mkstemp()
temp = os.fdopen(fd, 'w')
copyliteral(input, temp)
temp.close()
pipe = os.popen(command + ' <' + tempname, 'r')
copybinary(pipe, output)
pipe.close()
os.unlink(tempname)
def copyliteral(input, output):
while 1:
line = input.readline()
if not line: break
output.write(line)
def copybinary(input, output):
BUFSIZE = 8192
while 1:
line = input.read(BUFSIZE)
if not line: break
output.write(line)
| apache-2.0 |
tedi3231/openerp | openerp/addons/l10n_bo/__init__.py | 52 | 1453 | # -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (c) 2011 Cubic ERP - Teradata SAC. (http://cubicerp.com).
#
# WARNING: This program as such is intended to be used by professional
# programmers who take the whole responsability of assessing all potential
# consequences resulting from its eventual inadequacies and bugs
# End users who are looking for a ready-to-use solution with commercial
# garantees and support are strongly adviced to contract a Free Software
# Service Company
#
# This program is Free Software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
#
##############################################################################
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
iwm911/plaso | plaso/formatters/msie_webcache.py | 1 | 3243 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright 2014 The Plaso Project Authors.
# Please see the AUTHORS file for details on individual authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Formatters for the MSIE WebCache ESE database events."""
from plaso.lib import eventdata
class MsieWebCacheContainerEventFormatter(eventdata.ConditionalEventFormatter):
"""Formatter for a MSIE WebCache ESE database Container_# table record."""
DATA_TYPE = 'msie:webcache:container'
FORMAT_STRING_PIECES = [
u'Entry identifier: {entry_identifier}',
u'Container identifier: {container_identifier}',
u'Cache identifier: {cache_identifier}',
u'URL: {url}',
u'Redirect URL: {redirect_url}',
u'Access count: {access_count}',
u'Sync count: {sync_count}',
u'Filename: {cached_filename}',
u'File extension: {file_extension}',
u'Cached file size: {cached_file_size}',
u'Request headers: {request_headers}',
u'Response headers: {response_headers}']
FORMAT_STRING_SHORT_PIECES = [
u'URL: {url}']
SOURCE_LONG = 'MSIE WebCache container record'
SOURCE_SHORT = 'WEBHIST'
class MsieWebCacheContainersEventFormatter(eventdata.ConditionalEventFormatter):
"""Formatter for a MSIE WebCache ESE database Containers table record."""
DATA_TYPE = 'msie:webcache:containers'
FORMAT_STRING_PIECES = [
u'Container identifier: {container_identifier}',
u'Set identifier: {set_identifier}',
u'Name: {name}',
u'Directory: {directory}',
u'Table: Container_{container_identifier}']
FORMAT_STRING_SHORT_PIECES = [
u'Directory: {directory}']
SOURCE_LONG = 'MSIE WebCache containers record'
SOURCE_SHORT = 'WEBHIST'
class MsieWebCacheLeakFilesEventFormatter(eventdata.ConditionalEventFormatter):
"""Formatter for a MSIE WebCache ESE database LeakFiles table record."""
DATA_TYPE = 'msie:webcache:leak_file'
FORMAT_STRING_PIECES = [
u'Leak identifier: {leak_identifier}',
u'Filename: {cached_filename}']
FORMAT_STRING_SHORT_PIECES = [
u'Filename: {cached_filename}']
SOURCE_LONG = 'MSIE WebCache partitions record'
SOURCE_SHORT = 'WEBHIST'
class MsieWebCachePartitionsEventFormatter(eventdata.ConditionalEventFormatter):
"""Formatter for a MSIE WebCache ESE database Partitions table record."""
DATA_TYPE = 'msie:webcache:partitions'
FORMAT_STRING_PIECES = [
u'Partition identifier: {partition_identifier}',
u'Partition type: {partition_type}',
u'Directory: {directory}',
u'Table identifier: {table_identifier}']
FORMAT_STRING_SHORT_PIECES = [
u'Directory: {directory}']
SOURCE_LONG = 'MSIE WebCache partitions record'
SOURCE_SHORT = 'WEBHIST'
| apache-2.0 |
g12mcgov/home-assistant | homeassistant/components/sensor/rpi_gpio.py | 7 | 3869 | # -*- coding: utf-8 -*-
"""
homeassistant.components.sensor.rpi_gpio
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Allows to configure a binary state sensor using RPi GPIO.
Note: To use RPi GPIO, Home Assistant must be run as root.
sensor:
platform: rpi_gpio
pull_mode: "UP"
value_high: "Active"
value_low: "Inactive"
ports:
11: PIR Office
12: PIR Bedroom
Variables:
pull_mode
*Optional
The internal pull to use (UP or DOWN). Default is UP.
value_high
*Optional
The value of the sensor when the port is HIGH. Default is "HIGH".
value_low
*Optional
The value of the sensor when the port is LOW. Default is "LOW".
bouncetime
*Optional
The time in milliseconds for port debouncing. Default is 50ms.
ports
*Required
An array specifying the GPIO ports to use and the name to use in the frontend.
"""
import logging
from homeassistant.helpers.entity import Entity
try:
import RPi.GPIO as GPIO
except ImportError:
GPIO = None
from homeassistant.const import (DEVICE_DEFAULT_NAME,
EVENT_HOMEASSISTANT_START,
EVENT_HOMEASSISTANT_STOP)
DEFAULT_PULL_MODE = "UP"
DEFAULT_VALUE_HIGH = "HIGH"
DEFAULT_VALUE_LOW = "LOW"
DEFAULT_BOUNCETIME = 50
REQUIREMENTS = ['RPi.GPIO==0.5.11']
_LOGGER = logging.getLogger(__name__)
# pylint: disable=unused-argument
def setup_platform(hass, config, add_devices, discovery_info=None):
""" Sets up the Raspberry PI GPIO ports. """
if GPIO is None:
_LOGGER.error('RPi.GPIO not available. rpi_gpio ports ignored.')
return
# pylint: disable=no-member
GPIO.setmode(GPIO.BCM)
sensors = []
pull_mode = config.get('pull_mode', DEFAULT_PULL_MODE)
value_high = config.get('value_high', DEFAULT_VALUE_HIGH)
value_low = config.get('value_low', DEFAULT_VALUE_LOW)
bouncetime = config.get('bouncetime', DEFAULT_BOUNCETIME)
ports = config.get('ports')
for port_num, port_name in ports.items():
sensors.append(RPiGPIOSensor(
port_name, port_num, pull_mode,
value_high, value_low, bouncetime))
add_devices(sensors)
def cleanup_gpio(event):
""" Stuff to do before stop home assistant. """
# pylint: disable=no-member
GPIO.cleanup()
def prepare_gpio(event):
""" Stuff to do when home assistant starts. """
hass.bus.listen_once(EVENT_HOMEASSISTANT_STOP, cleanup_gpio)
hass.bus.listen_once(EVENT_HOMEASSISTANT_START, prepare_gpio)
# pylint: disable=too-many-arguments, too-many-instance-attributes
class RPiGPIOSensor(Entity):
""" Sets up the Raspberry PI GPIO ports. """
def __init__(self, port_name, port_num, pull_mode,
value_high, value_low, bouncetime):
# pylint: disable=no-member
self._name = port_name or DEVICE_DEFAULT_NAME
self._port = port_num
self._pull = GPIO.PUD_DOWN if pull_mode == "DOWN" else GPIO.PUD_UP
self._vhigh = value_high
self._vlow = value_low
self._bouncetime = bouncetime
GPIO.setup(self._port, GPIO.IN, pull_up_down=self._pull)
self._state = self._vhigh if GPIO.input(self._port) else self._vlow
def edge_callback(channel):
""" port changed state """
# pylint: disable=no-member
self._state = self._vhigh if GPIO.input(channel) else self._vlow
self.update_ha_state()
GPIO.add_event_detect(
self._port,
GPIO.BOTH,
callback=edge_callback,
bouncetime=self._bouncetime)
@property
def should_poll(self):
""" No polling needed. """
return False
@property
def name(self):
""" The name of the sensor. """
return self._name
@property
def state(self):
""" Returns the state of the entity. """
return self._state
| mit |
fengyuanjs/catawampus | dm/management_server.py | 6 | 5844 | #!/usr/bin/python
# Copyright 2011 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# TR-069 has mandatory attribute names that don't comply with policy
#pylint: disable-msg=C6409
"""Implementation of tr-181 Device.ManagementServer hierarchy of objects.
Handles the Device.ManagementServer portion of TR-181, as described
in http://www.broadband-forum.org/cwmp/tr-181-2-2-0.html
"""
__author__ = 'dgentry@google.com (Denton Gentry)'
import tr.core
import tr.tr098_v1_4
import tr.tr181_v2_2
BASEMGMT181 = tr.tr181_v2_2.Device_v2_2.Device.ManagementServer
BASE98IGD = tr.tr098_v1_4.InternetGatewayDevice_v1_10.InternetGatewayDevice
BASEMGMT98 = BASE98IGD.ManagementServer
class ManagementServer181(BASEMGMT181):
"""Implementation of tr-181 Device.ManagementServer."""
MGMTATTRS = frozenset([
'CWMPRetryIntervalMultiplier', 'CWMPRetryMinimumWaitInterval',
'ConnectionRequestPassword', 'ConnectionRequestURL',
'ConnectionRequestUsername', 'DefaultActiveNotificationThrottle',
'EnableCWMP', 'ParameterKey', 'Password', 'PeriodicInformEnable',
'PeriodicInformInterval', 'PeriodicInformTime', 'URL', 'Username'])
def __init__(self, mgmt):
"""Proxy object for tr-181 ManagementServer support.
All requests for active, supported parameters pass through to the
underlying management server implementation.
Args:
mgmt: the real management configuration object.
"""
super(ManagementServer181, self).__init__()
self.mgmt = mgmt
self.Unexport('DownloadProgressURL')
self.Unexport('KickURL')
self.Unexport('NATDetected')
self.Unexport('STUNMaximumKeepAlivePeriod')
self.Unexport('STUNMinimumKeepAlivePeriod')
self.Unexport('STUNPassword')
self.Unexport('STUNServerAddress')
self.Unexport('STUNServerPort')
self.Unexport('STUNUsername')
self.Unexport('UDPConnectionRequestAddress')
self.ManageableDeviceList = {}
self.ManageableDeviceNumberOfEntries = 0
def StartTransaction(self):
self.mgmt.StartTransaction()
def AbandonTransaction(self):
self.mgmt.AbandonTransaction()
def CommitTransaction(self):
self.mgmt.CommitTransaction()
@property
def STUNEnable(self):
return False
@property
def UpgradesManaged(self):
return True
def __getattr__(self, name):
if name in self.MGMTATTRS:
return getattr(self.mgmt, name)
else:
raise KeyError('No such attribute %s' % name)
def __setattr__(self, name, value):
if name in self.MGMTATTRS:
setattr(self.mgmt, name, value)
else:
BASEMGMT181.__setattr__(self, name, value)
def __delattr__(self, name):
if name in self.MGMTATTRS:
return delattr(self.mgmt, name)
else:
return BASEMGMT181.__delattr__(self, name)
class ManagementServer98(BASEMGMT98):
"""Implementation of tr-98 InternetGatewayDevice.ManagementServer."""
MGMTATTRS = frozenset([
'CWMPRetryIntervalMultiplier', 'CWMPRetryMinimumWaitInterval',
'ConnectionRequestPassword', 'ConnectionRequestURL',
'ConnectionRequestUsername', 'DefaultActiveNotificationThrottle',
'EnableCWMP', 'ParameterKey', 'Password', 'PeriodicInformEnable',
'PeriodicInformInterval', 'PeriodicInformTime', 'URL', 'Username'])
def __init__(self, mgmt):
"""Proxy object for tr-98 ManagementServer support.
All requests for active, supported parameters pass through to the
underlying management server implementation.
Args:
mgmt: the real management configuration object.
"""
super(ManagementServer98, self).__init__()
self.mgmt = mgmt
self.Unexport('AliasBasedAddressing')
self.Unexport('AutoCreateInstances')
self.Unexport('DownloadProgressURL')
self.Unexport('InstanceMode')
self.Unexport('KickURL')
self.Unexport('ManageableDeviceNotificationLimit')
self.Unexport('NATDetected')
self.Unexport('STUNEnable')
self.Unexport('STUNMaximumKeepAlivePeriod')
self.Unexport('STUNMinimumKeepAlivePeriod')
self.Unexport('STUNPassword')
self.Unexport('STUNServerAddress')
self.Unexport('STUNServerPort')
self.Unexport('STUNUsername')
self.Unexport('UDPConnectionRequestAddress')
self.Unexport('UDPConnectionRequestAddressNotificationLimit')
self.EmbeddedDeviceList = {}
self.ManageableDeviceList = {}
self.VirtualDeviceList = {}
def StartTransaction(self):
self.mgmt.StartTransaction()
def AbandonTransaction(self):
self.mgmt.AbandonTransaction()
def CommitTransaction(self):
self.mgmt.CommitTransaction()
@property
def ManageableDeviceNumberOfEntries(self):
return 0
@property
def UpgradesManaged(self):
return True
@property
def VirtualDeviceNumberOfEntries(self):
return 0
def __getattr__(self, name):
if name in self.MGMTATTRS:
return getattr(self.mgmt, name)
else:
raise KeyError('No such attribute %s' % name)
def __setattr__(self, name, value):
if name in self.MGMTATTRS:
return setattr(self.mgmt, name, value)
else:
return BASEMGMT98.__setattr__(self, name, value)
def __delattr__(self, name):
if name in self.MGMTATTRS:
return delattr(self.mgmt, name)
else:
return BASEMGMT98.__delattr__(self, name)
def main():
pass
if __name__ == '__main__':
main()
| apache-2.0 |
nekulin/arangodb | 3rdParty/V8-4.3.61/third_party/python_26/Lib/email/quoprimime.py | 54 | 10839 | # Copyright (C) 2001-2006 Python Software Foundation
# Author: Ben Gertzfield
# Contact: email-sig@python.org
"""Quoted-printable content transfer encoding per RFCs 2045-2047.
This module handles the content transfer encoding method defined in RFC 2045
to encode US ASCII-like 8-bit data called `quoted-printable'. It is used to
safely encode text that is in a character set similar to the 7-bit US ASCII
character set, but that includes some 8-bit characters that are normally not
allowed in email bodies or headers.
Quoted-printable is very space-inefficient for encoding binary files; use the
email.base64MIME module for that instead.
This module provides an interface to encode and decode both headers and bodies
with quoted-printable encoding.
RFC 2045 defines a method for including character set information in an
`encoded-word' in a header. This method is commonly used for 8-bit real names
in To:/From:/Cc: etc. fields, as well as Subject: lines.
This module does not do the line wrapping or end-of-line character
conversion necessary for proper internationalized headers; it only
does dumb encoding and decoding. To deal with the various line
wrapping issues, use the email.Header module.
"""
__all__ = [
'body_decode',
'body_encode',
'body_quopri_check',
'body_quopri_len',
'decode',
'decodestring',
'encode',
'encodestring',
'header_decode',
'header_encode',
'header_quopri_check',
'header_quopri_len',
'quote',
'unquote',
]
import re
from string import hexdigits
from email.utils import fix_eols
CRLF = '\r\n'
NL = '\n'
# See also Charset.py
MISC_LEN = 7
hqre = re.compile(r'[^-a-zA-Z0-9!*+/ ]')
bqre = re.compile(r'[^ !-<>-~\t]')
# Helpers
def header_quopri_check(c):
"""Return True if the character should be escaped with header quopri."""
return bool(hqre.match(c))
def body_quopri_check(c):
"""Return True if the character should be escaped with body quopri."""
return bool(bqre.match(c))
def header_quopri_len(s):
"""Return the length of str when it is encoded with header quopri."""
count = 0
for c in s:
if hqre.match(c):
count += 3
else:
count += 1
return count
def body_quopri_len(str):
"""Return the length of str when it is encoded with body quopri."""
count = 0
for c in str:
if bqre.match(c):
count += 3
else:
count += 1
return count
def _max_append(L, s, maxlen, extra=''):
if not L:
L.append(s.lstrip())
elif len(L[-1]) + len(s) <= maxlen:
L[-1] += extra + s
else:
L.append(s.lstrip())
def unquote(s):
"""Turn a string in the form =AB to the ASCII character with value 0xab"""
return chr(int(s[1:3], 16))
def quote(c):
return "=%02X" % ord(c)
def header_encode(header, charset="iso-8859-1", keep_eols=False,
maxlinelen=76, eol=NL):
"""Encode a single header line with quoted-printable (like) encoding.
Defined in RFC 2045, this `Q' encoding is similar to quoted-printable, but
used specifically for email header fields to allow charsets with mostly 7
bit characters (and some 8 bit) to remain more or less readable in non-RFC
2045 aware mail clients.
charset names the character set to use to encode the header. It defaults
to iso-8859-1.
The resulting string will be in the form:
"=?charset?q?I_f=E2rt_in_your_g=E8n=E8ral_dire=E7tion?\\n
=?charset?q?Silly_=C8nglish_Kn=EEghts?="
with each line wrapped safely at, at most, maxlinelen characters (defaults
to 76 characters). If maxlinelen is None, the entire string is encoded in
one chunk with no splitting.
End-of-line characters (\\r, \\n, \\r\\n) will be automatically converted
to the canonical email line separator \\r\\n unless the keep_eols
parameter is True (the default is False).
Each line of the header will be terminated in the value of eol, which
defaults to "\\n". Set this to "\\r\\n" if you are using the result of
this function directly in email.
"""
# Return empty headers unchanged
if not header:
return header
if not keep_eols:
header = fix_eols(header)
# Quopri encode each line, in encoded chunks no greater than maxlinelen in
# length, after the RFC chrome is added in.
quoted = []
if maxlinelen is None:
# An obnoxiously large number that's good enough
max_encoded = 100000
else:
max_encoded = maxlinelen - len(charset) - MISC_LEN - 1
for c in header:
# Space may be represented as _ instead of =20 for readability
if c == ' ':
_max_append(quoted, '_', max_encoded)
# These characters can be included verbatim
elif not hqre.match(c):
_max_append(quoted, c, max_encoded)
# Otherwise, replace with hex value like =E2
else:
_max_append(quoted, "=%02X" % ord(c), max_encoded)
# Now add the RFC chrome to each encoded chunk and glue the chunks
# together. BAW: should we be able to specify the leading whitespace in
# the joiner?
joiner = eol + ' '
return joiner.join(['=?%s?q?%s?=' % (charset, line) for line in quoted])
def encode(body, binary=False, maxlinelen=76, eol=NL):
"""Encode with quoted-printable, wrapping at maxlinelen characters.
If binary is False (the default), end-of-line characters will be converted
to the canonical email end-of-line sequence \\r\\n. Otherwise they will
be left verbatim.
Each line of encoded text will end with eol, which defaults to "\\n". Set
this to "\\r\\n" if you will be using the result of this function directly
in an email.
Each line will be wrapped at, at most, maxlinelen characters (defaults to
76 characters). Long lines will have the `soft linefeed' quoted-printable
character "=" appended to them, so the decoded text will be identical to
the original text.
"""
if not body:
return body
if not binary:
body = fix_eols(body)
# BAW: We're accumulating the body text by string concatenation. That
# can't be very efficient, but I don't have time now to rewrite it. It
# just feels like this algorithm could be more efficient.
encoded_body = ''
lineno = -1
# Preserve line endings here so we can check later to see an eol needs to
# be added to the output later.
lines = body.splitlines(1)
for line in lines:
# But strip off line-endings for processing this line.
if line.endswith(CRLF):
line = line[:-2]
elif line[-1] in CRLF:
line = line[:-1]
lineno += 1
encoded_line = ''
prev = None
linelen = len(line)
# Now we need to examine every character to see if it needs to be
# quopri encoded. BAW: again, string concatenation is inefficient.
for j in range(linelen):
c = line[j]
prev = c
if bqre.match(c):
c = quote(c)
elif j+1 == linelen:
# Check for whitespace at end of line; special case
if c not in ' \t':
encoded_line += c
prev = c
continue
# Check to see to see if the line has reached its maximum length
if len(encoded_line) + len(c) >= maxlinelen:
encoded_body += encoded_line + '=' + eol
encoded_line = ''
encoded_line += c
# Now at end of line..
if prev and prev in ' \t':
# Special case for whitespace at end of file
if lineno + 1 == len(lines):
prev = quote(prev)
if len(encoded_line) + len(prev) > maxlinelen:
encoded_body += encoded_line + '=' + eol + prev
else:
encoded_body += encoded_line + prev
# Just normal whitespace at end of line
else:
encoded_body += encoded_line + prev + '=' + eol
encoded_line = ''
# Now look at the line we just finished and it has a line ending, we
# need to add eol to the end of the line.
if lines[lineno].endswith(CRLF) or lines[lineno][-1] in CRLF:
encoded_body += encoded_line + eol
else:
encoded_body += encoded_line
encoded_line = ''
return encoded_body
# For convenience and backwards compatibility w/ standard base64 module
body_encode = encode
encodestring = encode
# BAW: I'm not sure if the intent was for the signature of this function to be
# the same as base64MIME.decode() or not...
def decode(encoded, eol=NL):
"""Decode a quoted-printable string.
Lines are separated with eol, which defaults to \\n.
"""
if not encoded:
return encoded
# BAW: see comment in encode() above. Again, we're building up the
# decoded string with string concatenation, which could be done much more
# efficiently.
decoded = ''
for line in encoded.splitlines():
line = line.rstrip()
if not line:
decoded += eol
continue
i = 0
n = len(line)
while i < n:
c = line[i]
if c != '=':
decoded += c
i += 1
# Otherwise, c == "=". Are we at the end of the line? If so, add
# a soft line break.
elif i+1 == n:
i += 1
continue
# Decode if in form =AB
elif i+2 < n and line[i+1] in hexdigits and line[i+2] in hexdigits:
decoded += unquote(line[i:i+3])
i += 3
# Otherwise, not in form =AB, pass literally
else:
decoded += c
i += 1
if i == n:
decoded += eol
# Special case if original string did not end with eol
if not encoded.endswith(eol) and decoded.endswith(eol):
decoded = decoded[:-1]
return decoded
# For convenience and backwards compatibility w/ standard base64 module
body_decode = decode
decodestring = decode
def _unquote_match(match):
"""Turn a match in the form =AB to the ASCII character with value 0xab"""
s = match.group(0)
return unquote(s)
# Header decoding is done a bit differently
def header_decode(s):
"""Decode a string encoded with RFC 2045 MIME header `Q' encoding.
This function does not parse a full MIME header value encoded with
quoted-printable (like =?iso-8895-1?q?Hello_World?=) -- please use
the high level email.Header class for that functionality.
"""
s = s.replace('_', ' ')
return re.sub(r'=\w{2}', _unquote_match, s)
| apache-2.0 |
dlt-rilmta/hunlp-GATE | attic/playground/choosana/chooseana.py | 3 | 3326 | #!/usr/bin/env python
# Input: .stem file (tsv: surface form, lemma, pos-tag, morphanas delimited by "||")
# Output: .stem file with only 1 ana in the last column (see heuristics below) and the lemma column is the lemma from the chosen ana
import sys
def get_lemma(ana, surf):
'''Return the lemma for an ana: either the lemma it contains or the surf if it's not the proper format (lemma/morphinfo)
Attempt to assemble lemma if it's a compound.
'''
if '+' in ana:
ret = ''.join([x.split('/')[0] for x in ana.split('+')])
return surf if ret == '' else ret
tmp = ana.split('/')
if len(tmp) == 1 or tmp[0] == '':
return surf
return tmp[0]
def get_minimal_partition(l, k):
'''for a list l, return the sublist whose elements all have with the minimal value using key function k'''
tmp = sorted(l, key=k)
minc = k(tmp[0])
out = []
for x in tmp:
if k(x) > minc:
break
else:
out.append(x)
return out
if len(sys.argv) != 2:
sys.exit('Missing input file name')
for line in open(sys.argv[1]):
line = line.rstrip()
if not line or line.startswith('#'):
print(line)
continue
t = line.split('\t')
if len(t) != 4:
sys.stderr.write('Incorrect line format: ' + line)
break
surf = t[0]
pos = t[2]
anas = list(set(t[3].split('||'))) # 0th filter: uniq the anas
if len(anas) == 1: # there was only 1 ana anyway: nothing to do
print('\t'.join([surf, get_lemma(anas[0], surf), pos, anas[0]]))
continue
# 1st filter: filter out anas that don't end with /PoS-tag or don't contain PoS-tag
anas2 = [x for x in anas if x.endswith('/' + pos)]
if not anas2: # if no anas left that end with /postag:
anas2 = [x for x in anas if pos in x ] # try again w/ more relaxed criterion: anas should only contain postag
if not anas2: # still no anas left with pos:
#sys.stderr.write('WARNING: no anas with PoS-tag: ' + line + '\n')
anas2 = anas # give up on filtering using PoS-tag, carry on with all anas
if len(anas2) == 1: # only 1 left: we're done
print('\t'.join([surf, get_lemma(anas2[0], surf), pos, anas2[0]]))
continue
# 2nd filter: filter out anas that have more number of compounding operations than minimal ana (w/ respect to no. of comp. ops.)
anas3 = get_minimal_partition(anas2, lambda x: x.count('+'))
if len(anas3) == 1: # only 1 left: we're done
print('\t'.join([surf, get_lemma(anas3[0], surf), pos, anas3[0]]))
continue
# 3rd filter: filter out anas that have more number of derivation operations than minimal ana (w/ respect to no. of deriv. ops.)
anas4 = get_minimal_partition(anas3, lambda x: x.count('['))
if len(anas4) == 1: # only 1 left: we're done
print('\t'.join([surf, get_lemma(anas4[0], surf), pos, anas4[0]]))
continue
# 4th filter: choose anas with the longest lemma
anas5 = sorted(anas4, key=lambda x: len(x.split('/')[0]), reverse=True)
if len(anas5) == 1: # only 1 left: we're done
print('\t'.join([surf, get_lemma(anas5[0], surf), pos, anas5[0]]))
continue
# 5th filter: choose 1st ana whose lemma has more similar initial capitalization to surface form
anas6 = sorted(anas5, key=lambda x: 1 if x.split('/')[0][0] == surf[0] else 0, reverse=True)
print('\t'.join([surf, get_lemma(anas6[0], surf), pos, anas6[0]]))
| gpl-3.0 |
jumpojoy/neutron | neutron/tests/unit/plugins/ml2/test_plugin.py | 3 | 83924 | # Copyright (c) 2013 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import functools
import fixtures
import mock
import six
import testtools
import uuid
import webob
from oslo_db import exception as db_exc
from oslo_utils import uuidutils
from sqlalchemy.orm import exc as sqla_exc
from neutron.callbacks import registry
from neutron.common import constants
from neutron.common import exceptions as exc
from neutron.common import utils
from neutron import context
from neutron.db import api as db_api
from neutron.db import db_base_plugin_v2 as base_plugin
from neutron.db import l3_db
from neutron.db import models_v2
from neutron.extensions import external_net
from neutron.extensions import multiprovidernet as mpnet
from neutron.extensions import portbindings
from neutron.extensions import providernet as pnet
from neutron import manager
from neutron.plugins.common import constants as p_const
from neutron.plugins.ml2.common import exceptions as ml2_exc
from neutron.plugins.ml2 import config
from neutron.plugins.ml2 import db as ml2_db
from neutron.plugins.ml2 import driver_api
from neutron.plugins.ml2 import driver_context
from neutron.plugins.ml2.drivers import type_vlan
from neutron.plugins.ml2 import models
from neutron.plugins.ml2 import plugin as ml2_plugin
from neutron.services.qos import qos_consts
from neutron.tests import base
from neutron.tests.unit import _test_extension_portbindings as test_bindings
from neutron.tests.unit.agent import test_securitygroups_rpc as test_sg_rpc
from neutron.tests.unit.db import test_allowedaddresspairs_db as test_pair
from neutron.tests.unit.db import test_db_base_plugin_v2 as test_plugin
from neutron.tests.unit.extensions import test_extra_dhcp_opt as test_dhcpopts
from neutron.tests.unit.plugins.ml2.drivers import mechanism_logger as \
mech_logger
from neutron.tests.unit.plugins.ml2.drivers import mechanism_test as mech_test
config.cfg.CONF.import_opt('network_vlan_ranges',
'neutron.plugins.ml2.drivers.type_vlan',
group='ml2_type_vlan')
PLUGIN_NAME = 'neutron.plugins.ml2.plugin.Ml2Plugin'
DEVICE_OWNER_COMPUTE = 'compute:None'
HOST = 'fake_host'
# TODO(marun) - Move to somewhere common for reuse
class PluginConfFixture(fixtures.Fixture):
"""Plugin configuration shared across the unit and functional tests."""
def __init__(self, plugin_name, parent_setup=None):
super(PluginConfFixture, self).__init__()
self.plugin_name = plugin_name
self.parent_setup = parent_setup
def _setUp(self):
if self.parent_setup:
self.parent_setup()
class Ml2ConfFixture(PluginConfFixture):
def __init__(self, parent_setup=None):
super(Ml2ConfFixture, self).__init__(PLUGIN_NAME, parent_setup)
class Ml2PluginV2TestCase(test_plugin.NeutronDbPluginV2TestCase):
_mechanism_drivers = ['logger', 'test']
l3_plugin = ('neutron.tests.unit.extensions.test_l3.'
'TestL3NatServicePlugin')
def setup_parent(self):
"""Perform parent setup with the common plugin configuration class."""
service_plugins = {'l3_plugin_name': self.l3_plugin}
# Ensure that the parent setup can be called without arguments
# by the common configuration setUp.
parent_setup = functools.partial(
super(Ml2PluginV2TestCase, self).setUp,
plugin=PLUGIN_NAME,
service_plugins=service_plugins,
)
self.useFixture(Ml2ConfFixture(parent_setup))
self.port_create_status = 'DOWN'
def setUp(self):
# Enable the test mechanism driver to ensure that
# we can successfully call through to all mechanism
# driver apis.
config.cfg.CONF.set_override('mechanism_drivers',
self._mechanism_drivers,
group='ml2')
self.physnet = 'physnet1'
self.vlan_range = '1:100'
self.vlan_range2 = '200:300'
self.physnet2 = 'physnet2'
self.phys_vrange = ':'.join([self.physnet, self.vlan_range])
self.phys2_vrange = ':'.join([self.physnet2, self.vlan_range2])
config.cfg.CONF.set_override('network_vlan_ranges',
[self.phys_vrange, self.phys2_vrange],
group='ml2_type_vlan')
self.setup_parent()
self.driver = ml2_plugin.Ml2Plugin()
self.context = context.get_admin_context()
class TestMl2BulkToggleWithoutBulkless(Ml2PluginV2TestCase):
_mechanism_drivers = ['logger', 'test']
def test_bulk_enabled_with_bulk_drivers(self):
self.assertFalse(self._skip_native_bulk)
class TestMl2SupportedQosRuleTypes(Ml2PluginV2TestCase):
def test_empty_driver_list(self, *mocks):
mech_drivers_mock = mock.PropertyMock(return_value=[])
with mock.patch.object(self.driver.mechanism_manager,
'ordered_mech_drivers',
new_callable=mech_drivers_mock):
self.assertEqual(
[], self.driver.mechanism_manager.supported_qos_rule_types)
def test_no_rule_types_in_common(self):
self.assertEqual(
[], self.driver.mechanism_manager.supported_qos_rule_types)
@mock.patch.object(mech_logger.LoggerMechanismDriver,
'supported_qos_rule_types',
new_callable=mock.PropertyMock,
create=True)
@mock.patch.object(mech_test.TestMechanismDriver,
'supported_qos_rule_types',
new_callable=mock.PropertyMock,
create=True)
def test_rule_type_in_common(self, *mocks):
# make sure both plugins have the same supported qos rule types
for mock_ in mocks:
mock_.return_value = qos_consts.VALID_RULE_TYPES
self.assertEqual(
qos_consts.VALID_RULE_TYPES,
self.driver.mechanism_manager.supported_qos_rule_types)
@mock.patch.object(mech_test.TestMechanismDriver,
'supported_qos_rule_types',
new_callable=mock.PropertyMock,
return_value=qos_consts.VALID_RULE_TYPES,
create=True)
@mock.patch.object(mech_logger.LoggerMechanismDriver,
'_supports_port_binding',
new_callable=mock.PropertyMock,
return_value=False)
def test_rule_types_with_driver_that_does_not_implement_binding(self,
*mocks):
self.assertEqual(
qos_consts.VALID_RULE_TYPES,
self.driver.mechanism_manager.supported_qos_rule_types)
class TestMl2BasicGet(test_plugin.TestBasicGet,
Ml2PluginV2TestCase):
pass
class TestMl2V2HTTPResponse(test_plugin.TestV2HTTPResponse,
Ml2PluginV2TestCase):
pass
class TestMl2NetworksV2(test_plugin.TestNetworksV2,
Ml2PluginV2TestCase):
def setUp(self, plugin=None):
super(TestMl2NetworksV2, self).setUp()
# provider networks
self.pnets = [{'name': 'net1',
pnet.NETWORK_TYPE: 'vlan',
pnet.PHYSICAL_NETWORK: 'physnet1',
pnet.SEGMENTATION_ID: 1,
'tenant_id': 'tenant_one'},
{'name': 'net2',
pnet.NETWORK_TYPE: 'vlan',
pnet.PHYSICAL_NETWORK: 'physnet2',
pnet.SEGMENTATION_ID: 210,
'tenant_id': 'tenant_one'},
{'name': 'net3',
pnet.NETWORK_TYPE: 'vlan',
pnet.PHYSICAL_NETWORK: 'physnet2',
pnet.SEGMENTATION_ID: 220,
'tenant_id': 'tenant_one'}
]
# multiprovider networks
self.mp_nets = [{'name': 'net4',
mpnet.SEGMENTS:
[{pnet.NETWORK_TYPE: 'vlan',
pnet.PHYSICAL_NETWORK: 'physnet2',
pnet.SEGMENTATION_ID: 1},
{pnet.NETWORK_TYPE: 'vlan',
pnet.PHYSICAL_NETWORK: 'physnet2',
pnet.SEGMENTATION_ID: 202}],
'tenant_id': 'tenant_one'}
]
self.nets = self.mp_nets + self.pnets
def test_port_delete_helper_tolerates_failure(self):
plugin = manager.NeutronManager.get_plugin()
with mock.patch.object(plugin, "delete_port",
side_effect=exc.PortNotFound(port_id="123")):
plugin._delete_ports(mock.MagicMock(), [mock.MagicMock()])
with mock.patch.object(plugin, "delete_port",
side_effect=sqla_exc.ObjectDeletedError(None)):
plugin._delete_ports(mock.MagicMock(), [mock.MagicMock()])
def test_subnet_delete_helper_tolerates_failure(self):
plugin = manager.NeutronManager.get_plugin()
with mock.patch.object(plugin, "delete_subnet",
side_effect=exc.SubnetNotFound(subnet_id="1")):
plugin._delete_subnets(mock.MagicMock(), [mock.MagicMock()])
with mock.patch.object(plugin, "delete_subnet",
side_effect=sqla_exc.ObjectDeletedError(None)):
plugin._delete_subnets(mock.MagicMock(), [mock.MagicMock()])
def _create_and_verify_networks(self, networks):
for net_idx, net in enumerate(networks):
# create
req = self.new_create_request('networks',
{'network': net})
# verify
network = self.deserialize(self.fmt,
req.get_response(self.api))['network']
if mpnet.SEGMENTS not in net:
for k, v in six.iteritems(net):
self.assertEqual(net[k], network[k])
self.assertNotIn(mpnet.SEGMENTS, network)
else:
segments = network[mpnet.SEGMENTS]
expected_segments = net[mpnet.SEGMENTS]
self.assertEqual(len(expected_segments), len(segments))
for expected, actual in zip(expected_segments, segments):
self.assertEqual(expected, actual)
def _lookup_network_by_segmentation_id(self, seg_id, num_expected_nets):
params_str = "%s=%s" % (pnet.SEGMENTATION_ID, seg_id)
net_req = self.new_list_request('networks', None,
params=params_str)
networks = self.deserialize(self.fmt, net_req.get_response(self.api))
if num_expected_nets:
self.assertIsNotNone(networks)
self.assertEqual(num_expected_nets, len(networks['networks']))
else:
self.assertIsNone(networks)
return networks
def test_list_networks_with_segmentation_id(self):
self._create_and_verify_networks(self.pnets)
# verify we can find the network that we expect
lookup_vlan_id = 1
expected_net = [n for n in self.pnets
if n[pnet.SEGMENTATION_ID] == lookup_vlan_id].pop()
networks = self._lookup_network_by_segmentation_id(lookup_vlan_id, 1)
# verify all provider attributes
network = networks['networks'][0]
for attr in pnet.ATTRIBUTES:
self.assertEqual(expected_net[attr], network[attr])
def test_list_mpnetworks_with_segmentation_id(self):
self._create_and_verify_networks(self.nets)
# get all networks with seg_id=1 (including multisegment networks)
lookup_vlan_id = 1
networks = self._lookup_network_by_segmentation_id(lookup_vlan_id, 2)
# get the mpnet
networks = [n for n in networks['networks'] if mpnet.SEGMENTS in n]
network = networks.pop()
# verify attributes of the looked up item
segments = network[mpnet.SEGMENTS]
expected_segments = self.mp_nets[0][mpnet.SEGMENTS]
self.assertEqual(len(expected_segments), len(segments))
for expected, actual in zip(expected_segments, segments):
self.assertEqual(expected, actual)
def test_create_network_segment_allocation_fails(self):
plugin = manager.NeutronManager.get_plugin()
with mock.patch.object(
plugin.type_manager, 'create_network_segments',
side_effect=db_exc.RetryRequest(ValueError())
) as f:
data = {'network': {'tenant_id': 'sometenant', 'name': 'dummy',
'admin_state_up': True, 'shared': False}}
req = self.new_create_request('networks', data)
res = req.get_response(self.api)
self.assertEqual(500, res.status_int)
self.assertEqual(db_api.MAX_RETRIES + 1, f.call_count)
class TestExternalNetwork(Ml2PluginV2TestCase):
def _create_external_network(self):
data = {'network': {'name': 'net1',
'router:external': 'True',
'tenant_id': 'tenant_one'}}
network_req = self.new_create_request('networks', data)
network = self.deserialize(self.fmt,
network_req.get_response(self.api))
return network
def test_external_network_type_none(self):
config.cfg.CONF.set_default('external_network_type',
None,
group='ml2')
network = self._create_external_network()
# For external network, expected network type to be
# tenant_network_types which is by default 'local'.
self.assertEqual(p_const.TYPE_LOCAL,
network['network'][pnet.NETWORK_TYPE])
# No physical network specified, expected 'None'.
self.assertIsNone(network['network'][pnet.PHYSICAL_NETWORK])
# External network will not have a segmentation id.
self.assertIsNone(network['network'][pnet.SEGMENTATION_ID])
# External network will not have multiple segments.
self.assertNotIn(mpnet.SEGMENTS, network['network'])
def test_external_network_type_vlan(self):
config.cfg.CONF.set_default('external_network_type',
p_const.TYPE_VLAN,
group='ml2')
network = self._create_external_network()
# For external network, expected network type to be 'vlan'.
self.assertEqual(p_const.TYPE_VLAN,
network['network'][pnet.NETWORK_TYPE])
# Physical network is expected.
self.assertIsNotNone(network['network'][pnet.PHYSICAL_NETWORK])
# External network will have a segmentation id.
self.assertIsNotNone(network['network'][pnet.SEGMENTATION_ID])
# External network will not have multiple segments.
self.assertNotIn(mpnet.SEGMENTS, network['network'])
class TestMl2NetworksWithVlanTransparencyAndMTU(TestMl2NetworksV2):
def setUp(self, plugin=None):
config.cfg.CONF.set_override('path_mtu', 1000, group='ml2')
config.cfg.CONF.set_override('segment_mtu', 1000, group='ml2')
config.cfg.CONF.set_override('advertise_mtu', True)
config.cfg.CONF.set_override('vlan_transparent', True)
super(TestMl2NetworksWithVlanTransparencyAndMTU, self).setUp(plugin)
def test_create_network_vlan_transparent_and_mtu(self):
data = {'network': {'name': 'net1',
mpnet.SEGMENTS:
[{pnet.NETWORK_TYPE: 'vlan',
pnet.PHYSICAL_NETWORK: 'physnet1'}],
'tenant_id': 'tenant_one'}}
network_req = self.new_create_request('networks', data)
res = network_req.get_response(self.api)
self.assertEqual(201, res.status_int)
network = self.deserialize(self.fmt, res)['network']
self.assertEqual(network['mtu'], 1000)
self.assertIn('vlan_transparent', network)
class TestMl2SubnetsV2(test_plugin.TestSubnetsV2,
Ml2PluginV2TestCase):
def test_delete_subnet_race_with_dhcp_port_creation(self):
with self.network() as network:
with self.subnet(network=network) as subnet:
subnet_id = subnet['subnet']['id']
attempt = [0]
def check_and_create_ports(context, subnet_id):
"""A method to emulate race condition.
Adds dhcp port in the middle of subnet delete
"""
if attempt[0] > 0:
return False
attempt[0] += 1
data = {'port': {'network_id': network['network']['id'],
'tenant_id':
network['network']['tenant_id'],
'name': 'port1',
'admin_state_up': 1,
'device_owner':
constants.DEVICE_OWNER_DHCP,
'fixed_ips': [{'subnet_id': subnet_id}]}}
port_req = self.new_create_request('ports', data)
port_res = port_req.get_response(self.api)
self.assertEqual(201, port_res.status_int)
return (context.session.query(models_v2.IPAllocation).
filter_by(subnet_id=subnet_id).
join(models_v2.Port).first())
plugin = manager.NeutronManager.get_plugin()
# we mock _subnet_check_ip_allocations with method
# that creates DHCP port 'in the middle' of subnet_delete
# causing retry this way subnet is deleted on the
# second attempt
with mock.patch.object(plugin, '_subnet_check_ip_allocations',
side_effect=check_and_create_ports):
req = self.new_delete_request('subnets', subnet_id)
res = req.get_response(self.api)
self.assertEqual(204, res.status_int)
class TestMl2PortsV2(test_plugin.TestPortsV2, Ml2PluginV2TestCase):
def test_update_port_status_build(self):
with self.port() as port:
self.assertEqual('DOWN', port['port']['status'])
self.assertEqual('DOWN', self.port_create_status)
def test_update_port_status_short_id(self):
ctx = context.get_admin_context()
plugin = manager.NeutronManager.get_plugin()
with self.port() as port:
with mock.patch.object(ml2_db, 'get_binding_levels',
return_value=[]) as mock_gbl:
port_id = port['port']['id']
short_id = port_id[:11]
plugin.update_port_status(ctx, short_id, 'UP')
mock_gbl.assert_called_once_with(mock.ANY, port_id, mock.ANY)
def test_update_port_fixed_ip_changed(self):
ctx = context.get_admin_context()
plugin = manager.NeutronManager.get_plugin()
with self.port() as port, mock.patch.object(
plugin.notifier,
'security_groups_member_updated') as sg_member_update:
port['port']['fixed_ips'][0]['ip_address'] = '10.0.0.3'
plugin.update_port(ctx, port['port']['id'], port)
self.assertTrue(sg_member_update.called)
def test_update_port_status_with_network(self):
ctx = context.get_admin_context()
plugin = manager.NeutronManager.get_plugin()
with self.port() as port:
net = plugin.get_network(ctx, port['port']['network_id'])
with mock.patch.object(plugin, 'get_network') as get_net:
plugin.update_port_status(ctx, port['port']['id'], 'UP',
network=net)
self.assertFalse(get_net.called)
def test_update_port_mac(self):
self.check_update_port_mac(
host_arg={portbindings.HOST_ID: HOST},
arg_list=(portbindings.HOST_ID,))
def test_update_non_existent_port(self):
ctx = context.get_admin_context()
plugin = manager.NeutronManager.get_plugin()
data = {'port': {'admin_state_up': False}}
self.assertRaises(exc.PortNotFound, plugin.update_port, ctx,
'invalid-uuid', data)
def test_delete_non_existent_port(self):
ctx = context.get_admin_context()
plugin = manager.NeutronManager.get_plugin()
with mock.patch.object(ml2_plugin.LOG, 'debug') as log_debug:
plugin.delete_port(ctx, 'invalid-uuid', l3_port_check=False)
log_debug.assert_has_calls([
mock.call(_("Deleting port %s"), 'invalid-uuid'),
mock.call(_("The port '%s' was deleted"), 'invalid-uuid')
])
def test_l3_cleanup_on_net_delete(self):
l3plugin = manager.NeutronManager.get_service_plugins().get(
p_const.L3_ROUTER_NAT)
kwargs = {'arg_list': (external_net.EXTERNAL,),
external_net.EXTERNAL: True}
with self.network(**kwargs) as n:
with self.subnet(network=n, cidr='200.0.0.0/22'):
l3plugin.create_floatingip(
context.get_admin_context(),
{'floatingip': {'floating_network_id': n['network']['id'],
'tenant_id': n['network']['tenant_id']}}
)
self._delete('networks', n['network']['id'])
flips = l3plugin.get_floatingips(context.get_admin_context())
self.assertFalse(flips)
def test_create_ports_bulk_port_binding_failure(self):
ctx = context.get_admin_context()
with self.network() as net:
plugin = manager.NeutronManager.get_plugin()
with mock.patch.object(plugin, '_bind_port_if_needed',
side_effect=ml2_exc.MechanismDriverError(
method='create_port_bulk')) as _bind_port_if_needed:
res = self._create_port_bulk(self.fmt, 2, net['network']['id'],
'test', True, context=ctx)
self.assertTrue(_bind_port_if_needed.called)
# We expect a 500 as we injected a fault in the plugin
self._validate_behavior_on_bulk_failure(
res, 'ports', webob.exc.HTTPServerError.code)
def test_create_ports_bulk_with_sec_grp(self):
ctx = context.get_admin_context()
plugin = manager.NeutronManager.get_plugin()
with self.network() as net,\
mock.patch.object(plugin.notifier,
'security_groups_member_updated') as m_upd,\
mock.patch.object(plugin.notifier,
'security_groups_provider_updated') as p_upd:
res = self._create_port_bulk(self.fmt, 3, net['network']['id'],
'test', True, context=ctx)
ports = self.deserialize(self.fmt, res)
used_sg = ports['ports'][0]['security_groups']
m_upd.assert_called_once_with(ctx, used_sg)
self.assertFalse(p_upd.called)
def _check_security_groups_provider_updated_args(self, p_upd_mock, net_id):
query_params = "network_id=%s" % net_id
network_ports = self._list('ports', query_params=query_params)
network_ports_ids = [port['id'] for port in network_ports['ports']]
self.assertTrue(p_upd_mock.called)
p_upd_args = p_upd_mock.call_args
ports_ids = p_upd_args[0][1]
self.assertEqual(sorted(network_ports_ids), sorted(ports_ids))
def test_create_ports_bulk_with_sec_grp_member_provider_update(self):
ctx = context.get_admin_context()
plugin = manager.NeutronManager.get_plugin()
with self.network() as net,\
mock.patch.object(plugin.notifier,
'security_groups_member_updated') as m_upd,\
mock.patch.object(plugin.notifier,
'security_groups_provider_updated') as p_upd:
net_id = net['network']['id']
data = [{
'network_id': net_id,
'tenant_id': self._tenant_id
},
{
'network_id': net_id,
'tenant_id': self._tenant_id,
'device_owner': constants.DEVICE_OWNER_DHCP
}
]
res = self._create_bulk_from_list(self.fmt, 'port',
data, context=ctx)
ports = self.deserialize(self.fmt, res)
used_sg = ports['ports'][0]['security_groups']
m_upd.assert_called_once_with(ctx, used_sg)
self._check_security_groups_provider_updated_args(p_upd, net_id)
m_upd.reset_mock()
p_upd.reset_mock()
data[0]['device_owner'] = constants.DEVICE_OWNER_DHCP
self._create_bulk_from_list(self.fmt, 'port',
data, context=ctx)
self.assertFalse(m_upd.called)
self._check_security_groups_provider_updated_args(p_upd, net_id)
def test_create_ports_bulk_with_sec_grp_provider_update_ipv6(self):
ctx = context.get_admin_context()
plugin = manager.NeutronManager.get_plugin()
fake_prefix = '2001:db8::/64'
fake_gateway = 'fe80::1'
with self.network() as net:
with self.subnet(net,
gateway_ip=fake_gateway,
cidr=fake_prefix,
ip_version=6) as snet_v6,\
mock.patch.object(
plugin.notifier,
'security_groups_member_updated') as m_upd,\
mock.patch.object(
plugin.notifier,
'security_groups_provider_updated') as p_upd:
net_id = net['network']['id']
data = [{
'network_id': net_id,
'tenant_id': self._tenant_id,
'fixed_ips': [{'subnet_id': snet_v6['subnet']['id']}],
'device_owner': constants.DEVICE_OWNER_ROUTER_INTF
}
]
self._create_bulk_from_list(self.fmt, 'port',
data, context=ctx)
self.assertFalse(m_upd.called)
self._check_security_groups_provider_updated_args(
p_upd, net_id)
def test_delete_port_no_notify_in_disassociate_floatingips(self):
ctx = context.get_admin_context()
plugin = manager.NeutronManager.get_plugin()
l3plugin = manager.NeutronManager.get_service_plugins().get(
p_const.L3_ROUTER_NAT)
with self.port() as port,\
mock.patch.object(
l3plugin,
'disassociate_floatingips') as disassociate_floatingips,\
mock.patch.object(registry, 'notify') as notify:
port_id = port['port']['id']
plugin.delete_port(ctx, port_id)
# check that no notification was requested while under
# transaction
disassociate_floatingips.assert_has_calls([
mock.call(ctx, port_id, do_notify=False)
])
# check that notifier was still triggered
self.assertTrue(notify.call_counts)
def test_check_if_compute_port_serviced_by_dvr(self):
self.assertTrue(utils.is_dvr_serviced('compute:None'))
def test_check_if_lbaas_vip_port_serviced_by_dvr(self):
self.assertTrue(utils.is_dvr_serviced(
constants.DEVICE_OWNER_LOADBALANCER))
def test_check_if_lbaasv2_vip_port_serviced_by_dvr(self):
self.assertTrue(utils.is_dvr_serviced(
constants.DEVICE_OWNER_LOADBALANCERV2))
def test_check_if_dhcp_port_serviced_by_dvr(self):
self.assertTrue(utils.is_dvr_serviced(constants.DEVICE_OWNER_DHCP))
def test_check_if_port_not_serviced_by_dvr(self):
self.assertFalse(utils.is_dvr_serviced(
constants.DEVICE_OWNER_ROUTER_INTF))
def test_disassociate_floatingips_do_notify_returns_nothing(self):
ctx = context.get_admin_context()
l3plugin = manager.NeutronManager.get_service_plugins().get(
p_const.L3_ROUTER_NAT)
with self.port() as port:
port_id = port['port']['id']
# check that nothing is returned when notifications are handled
# by the called method
self.assertIsNone(l3plugin.disassociate_floatingips(ctx, port_id))
def test_create_port_tolerates_db_deadlock(self):
ctx = context.get_admin_context()
with self.network() as net:
with self.subnet(network=net) as subnet:
segments = ml2_db.get_network_segments(ctx.session,
net['network']['id'])
with mock.patch('neutron.plugins.ml2.plugin.'
'db.get_network_segments') as get_seg_mock:
get_seg_mock.side_effect = [db_exc.DBDeadlock, segments,
segments, segments]
with self.port(subnet=subnet) as port:
self.assertTrue(port['port']['id'])
self.assertEqual(4, get_seg_mock.call_count)
def test_delete_port_tolerates_db_deadlock(self):
ctx = context.get_admin_context()
plugin = manager.NeutronManager.get_plugin()
with self.port() as port:
port_db, binding = ml2_db.get_locked_port_and_binding(
ctx.session, port['port']['id'])
with mock.patch('neutron.plugins.ml2.plugin.'
'db.get_locked_port_and_binding') as lock:
lock.side_effect = [db_exc.DBDeadlock,
(port_db, binding)]
req = self.new_delete_request('ports', port['port']['id'])
res = req.get_response(self.api)
self.assertEqual(204, res.status_int)
self.assertEqual(2, lock.call_count)
self.assertRaises(
exc.PortNotFound, plugin.get_port, ctx, port['port']['id'])
class TestMl2PluginOnly(Ml2PluginV2TestCase):
"""For testing methods that don't call drivers"""
def _test_check_mac_update_allowed(self, vif_type, expect_change=True):
plugin = manager.NeutronManager.get_plugin()
port = {'mac_address': "fake_mac", 'id': "fake_id"}
if expect_change:
new_attrs = {"mac_address": "dummy_mac"}
else:
new_attrs = {"mac_address": port['mac_address']}
binding = mock.Mock()
binding.vif_type = vif_type
mac_changed = plugin._check_mac_update_allowed(port, new_attrs,
binding)
self.assertEqual(expect_change, mac_changed)
def test_check_mac_update_allowed_if_no_mac_change(self):
self._test_check_mac_update_allowed(portbindings.VIF_TYPE_UNBOUND,
expect_change=False)
def test_check_mac_update_allowed_unless_bound(self):
with testtools.ExpectedException(exc.PortBound):
self._test_check_mac_update_allowed(portbindings.VIF_TYPE_OVS)
def test__device_to_port_id_prefix_names(self):
input_output = [('sg-abcdefg', 'abcdefg'),
('tap123456', '123456'),
('qvo567890', '567890')]
for device, expected in input_output:
self.assertEqual(expected,
ml2_plugin.Ml2Plugin._device_to_port_id(
self.context, device))
def test__device_to_port_id_mac_address(self):
with self.port() as p:
mac = p['port']['mac_address']
port_id = p['port']['id']
self.assertEqual(port_id,
ml2_plugin.Ml2Plugin._device_to_port_id(
self.context, mac))
def test__device_to_port_id_not_uuid_not_mac(self):
dev = '1234567'
self.assertEqual(dev, ml2_plugin.Ml2Plugin._device_to_port_id(
self.context, dev))
def test__device_to_port_id_UUID(self):
port_id = uuidutils.generate_uuid()
self.assertEqual(port_id, ml2_plugin.Ml2Plugin._device_to_port_id(
self.context, port_id))
class TestMl2DvrPortsV2(TestMl2PortsV2):
def setUp(self):
super(TestMl2DvrPortsV2, self).setUp()
extensions = ['router',
constants.L3_AGENT_SCHEDULER_EXT_ALIAS,
constants.L3_DISTRIBUTED_EXT_ALIAS]
self.plugin = manager.NeutronManager.get_plugin()
self.l3plugin = mock.Mock()
type(self.l3plugin).supported_extension_aliases = (
mock.PropertyMock(return_value=extensions))
self.service_plugins = {'L3_ROUTER_NAT': self.l3plugin}
def _test_delete_dvr_serviced_port(self, device_owner, floating_ip=False):
ns_to_delete = {'host': 'myhost', 'agent_id': 'vm_l3_agent',
'router_id': 'my_router'}
fip_set = set()
if floating_ip:
fip_set.add(ns_to_delete['router_id'])
with mock.patch.object(manager.NeutronManager,
'get_service_plugins',
return_value=self.service_plugins),\
self.port(device_owner=device_owner) as port,\
mock.patch.object(registry, 'notify') as notify,\
mock.patch.object(self.l3plugin,
'disassociate_floatingips',
return_value=fip_set),\
mock.patch.object(
self.l3plugin,
'dvr_deletens_if_no_port',
return_value=[ns_to_delete]) as dvr_delns_ifno_port:
port_id = port['port']['id']
self.plugin.delete_port(self.context, port_id)
self.assertTrue(notify.call_count)
dvr_delns_ifno_port.assert_called_once_with(self.context,
port['port']['id'])
def test_delete_last_vm_port(self):
self._test_delete_dvr_serviced_port(device_owner='compute:None')
def test_delete_last_vm_port_with_floatingip(self):
self._test_delete_dvr_serviced_port(device_owner='compute:None',
floating_ip=True)
def test_delete_lbaas_vip_port(self):
self._test_delete_dvr_serviced_port(
device_owner=constants.DEVICE_OWNER_LOADBALANCER)
def test_delete_lbaasv2_vip_port(self):
self._test_delete_dvr_serviced_port(
device_owner=constants.DEVICE_OWNER_LOADBALANCERV2)
def test_concurrent_csnat_port_delete(self):
plugin = manager.NeutronManager.get_service_plugins()[
p_const.L3_ROUTER_NAT]
r = plugin.create_router(
self.context,
{'router': {'name': 'router', 'admin_state_up': True}})
with self.subnet() as s:
p = plugin.add_router_interface(self.context, r['id'],
{'subnet_id': s['subnet']['id']})
# lie to turn the port into an SNAT interface
with self.context.session.begin():
rp = self.context.session.query(l3_db.RouterPort).filter_by(
port_id=p['port_id']).first()
rp.port_type = constants.DEVICE_OWNER_ROUTER_SNAT
# take the port away before csnat gets a chance to delete it
# to simulate a concurrent delete
orig_get_ports = plugin._core_plugin.get_ports
def get_ports_with_delete_first(*args, **kwargs):
plugin._core_plugin.delete_port(self.context,
p['port_id'],
l3_port_check=False)
return orig_get_ports(*args, **kwargs)
plugin._core_plugin.get_ports = get_ports_with_delete_first
# This should be able to handle a concurrent delete without raising
# an exception
router = plugin._get_router(self.context, r['id'])
plugin.delete_csnat_router_interface_ports(self.context, router)
class TestMl2PortBinding(Ml2PluginV2TestCase,
test_bindings.PortBindingsTestCase):
# Test case does not set binding:host_id, so ml2 does not attempt
# to bind port
VIF_TYPE = portbindings.VIF_TYPE_UNBOUND
HAS_PORT_FILTER = False
ENABLE_SG = True
FIREWALL_DRIVER = test_sg_rpc.FIREWALL_HYBRID_DRIVER
def setUp(self, firewall_driver=None):
test_sg_rpc.set_firewall_driver(self.FIREWALL_DRIVER)
config.cfg.CONF.set_override(
'enable_security_group', self.ENABLE_SG,
group='SECURITYGROUP')
super(TestMl2PortBinding, self).setUp()
def _check_port_binding_profile(self, port, profile=None):
self.assertIn('id', port)
self.assertIn(portbindings.PROFILE, port)
value = port[portbindings.PROFILE]
self.assertEqual(profile or {}, value)
def test_create_port_binding_profile(self):
self._test_create_port_binding_profile({'a': 1, 'b': 2})
def test_update_port_binding_profile(self):
self._test_update_port_binding_profile({'c': 3})
def test_create_port_binding_profile_too_big(self):
s = 'x' * 5000
profile_arg = {portbindings.PROFILE: {'d': s}}
try:
with self.port(expected_res_status=400,
arg_list=(portbindings.PROFILE,),
**profile_arg):
pass
except webob.exc.HTTPClientError:
pass
def test_remove_port_binding_profile(self):
profile = {'e': 5}
profile_arg = {portbindings.PROFILE: profile}
with self.port(arg_list=(portbindings.PROFILE,),
**profile_arg) as port:
self._check_port_binding_profile(port['port'], profile)
port_id = port['port']['id']
profile_arg = {portbindings.PROFILE: None}
port = self._update('ports', port_id,
{'port': profile_arg})['port']
self._check_port_binding_profile(port)
port = self._show('ports', port_id)['port']
self._check_port_binding_profile(port)
def test_return_on_concurrent_delete_and_binding(self):
# create a port and delete it so we have an expired mechanism context
with self.port() as port:
plugin = manager.NeutronManager.get_plugin()
binding = ml2_db.get_locked_port_and_binding(self.context.session,
port['port']['id'])[1]
binding['host'] = 'test'
mech_context = driver_context.PortContext(
plugin, self.context, port['port'],
plugin.get_network(self.context, port['port']['network_id']),
binding, None)
with mock.patch(
'neutron.plugins.ml2.plugin.' 'db.get_locked_port_and_binding',
return_value=(None, None)) as glpab_mock,\
mock.patch('neutron.plugins.ml2.plugin.Ml2Plugin.'
'_make_port_dict') as mpd_mock:
plugin._bind_port_if_needed(mech_context)
# called during deletion to get port
self.assertTrue(glpab_mock.mock_calls)
# should have returned before calling _make_port_dict
self.assertFalse(mpd_mock.mock_calls)
def test_port_binding_profile_not_changed(self):
profile = {'e': 5}
profile_arg = {portbindings.PROFILE: profile}
with self.port(arg_list=(portbindings.PROFILE,),
**profile_arg) as port:
self._check_port_binding_profile(port['port'], profile)
port_id = port['port']['id']
state_arg = {'admin_state_up': True}
port = self._update('ports', port_id,
{'port': state_arg})['port']
self._check_port_binding_profile(port, profile)
port = self._show('ports', port_id)['port']
self._check_port_binding_profile(port, profile)
def test_update_port_binding_host_id_none(self):
with self.port() as port:
plugin = manager.NeutronManager.get_plugin()
binding = ml2_db.get_locked_port_and_binding(self.context.session,
port['port']['id'])[1]
binding['host'] = 'test'
mech_context = driver_context.PortContext(
plugin, self.context, port['port'],
plugin.get_network(self.context, port['port']['network_id']),
binding, None)
with mock.patch('neutron.plugins.ml2.plugin.Ml2Plugin.'
'_update_port_dict_binding') as update_mock:
attrs = {portbindings.HOST_ID: None}
plugin._process_port_binding(mech_context, attrs)
self.assertTrue(update_mock.mock_calls)
self.assertEqual('', binding.host)
def test_update_port_binding_host_id_not_changed(self):
with self.port() as port:
plugin = manager.NeutronManager.get_plugin()
binding = ml2_db.get_locked_port_and_binding(self.context.session,
port['port']['id'])[1]
binding['host'] = 'test'
mech_context = driver_context.PortContext(
plugin, self.context, port['port'],
plugin.get_network(self.context, port['port']['network_id']),
binding, None)
with mock.patch('neutron.plugins.ml2.plugin.Ml2Plugin.'
'_update_port_dict_binding') as update_mock:
attrs = {portbindings.PROFILE: {'e': 5}}
plugin._process_port_binding(mech_context, attrs)
self.assertTrue(update_mock.mock_calls)
self.assertEqual('test', binding.host)
def test_process_dvr_port_binding_update_router_id(self):
host_id = 'host'
binding = models.DVRPortBinding(
port_id='port_id',
host=host_id,
router_id='old_router_id',
vif_type=portbindings.VIF_TYPE_OVS,
vnic_type=portbindings.VNIC_NORMAL,
status=constants.PORT_STATUS_DOWN)
plugin = manager.NeutronManager.get_plugin()
mock_network = {'id': 'net_id'}
mock_port = {'id': 'port_id'}
context = mock.Mock()
new_router_id = 'new_router'
attrs = {'device_id': new_router_id, portbindings.HOST_ID: host_id}
with mock.patch.object(plugin, '_update_port_dict_binding'):
with mock.patch.object(ml2_db, 'get_network_segments',
return_value=[]):
mech_context = driver_context.PortContext(
self, context, mock_port, mock_network, binding, None)
plugin._process_dvr_port_binding(mech_context, context, attrs)
self.assertEqual(new_router_id,
mech_context._binding.router_id)
self.assertEqual(host_id, mech_context._binding.host)
def test_update_dvr_port_binding_on_non_existent_port(self):
plugin = manager.NeutronManager.get_plugin()
port = {
'id': 'foo_port_id',
'binding:host_id': 'foo_host',
}
with mock.patch.object(ml2_db, 'ensure_dvr_port_binding') as mock_dvr:
plugin.update_dvr_port_binding(
self.context, 'foo_port_id', {'port': port})
self.assertFalse(mock_dvr.called)
class TestMl2PortBindingNoSG(TestMl2PortBinding):
HAS_PORT_FILTER = False
ENABLE_SG = False
FIREWALL_DRIVER = test_sg_rpc.FIREWALL_NOOP_DRIVER
class TestMl2PortBindingHost(Ml2PluginV2TestCase,
test_bindings.PortBindingsHostTestCaseMixin):
pass
class TestMl2PortBindingVnicType(Ml2PluginV2TestCase,
test_bindings.PortBindingsVnicTestCaseMixin):
pass
class TestMultiSegmentNetworks(Ml2PluginV2TestCase):
def setUp(self, plugin=None):
super(TestMultiSegmentNetworks, self).setUp()
def test_allocate_dynamic_segment(self):
data = {'network': {'name': 'net1',
'tenant_id': 'tenant_one'}}
network_req = self.new_create_request('networks', data)
network = self.deserialize(self.fmt,
network_req.get_response(self.api))
segment = {driver_api.NETWORK_TYPE: 'vlan',
driver_api.PHYSICAL_NETWORK: 'physnet1'}
network_id = network['network']['id']
self.driver.type_manager.allocate_dynamic_segment(
self.context.session, network_id, segment)
dynamic_segment = ml2_db.get_dynamic_segment(self.context.session,
network_id,
'physnet1')
self.assertEqual('vlan', dynamic_segment[driver_api.NETWORK_TYPE])
self.assertEqual('physnet1',
dynamic_segment[driver_api.PHYSICAL_NETWORK])
self.assertTrue(dynamic_segment[driver_api.SEGMENTATION_ID] > 0)
segment2 = {driver_api.NETWORK_TYPE: 'vlan',
driver_api.SEGMENTATION_ID: 1234,
driver_api.PHYSICAL_NETWORK: 'physnet3'}
self.driver.type_manager.allocate_dynamic_segment(
self.context.session, network_id, segment2)
dynamic_segment = ml2_db.get_dynamic_segment(self.context.session,
network_id,
segmentation_id='1234')
self.assertEqual('vlan', dynamic_segment[driver_api.NETWORK_TYPE])
self.assertEqual('physnet3',
dynamic_segment[driver_api.PHYSICAL_NETWORK])
self.assertEqual(dynamic_segment[driver_api.SEGMENTATION_ID], 1234)
def test_allocate_dynamic_segment_multiple_physnets(self):
data = {'network': {'name': 'net1',
'tenant_id': 'tenant_one'}}
network_req = self.new_create_request('networks', data)
network = self.deserialize(self.fmt,
network_req.get_response(self.api))
segment = {driver_api.NETWORK_TYPE: 'vlan',
driver_api.PHYSICAL_NETWORK: 'physnet1'}
network_id = network['network']['id']
self.driver.type_manager.allocate_dynamic_segment(
self.context.session, network_id, segment)
dynamic_segment = ml2_db.get_dynamic_segment(self.context.session,
network_id,
'physnet1')
self.assertEqual('vlan', dynamic_segment[driver_api.NETWORK_TYPE])
self.assertEqual('physnet1',
dynamic_segment[driver_api.PHYSICAL_NETWORK])
dynamic_segmentation_id = dynamic_segment[driver_api.SEGMENTATION_ID]
self.assertTrue(dynamic_segmentation_id > 0)
dynamic_segment1 = ml2_db.get_dynamic_segment(self.context.session,
network_id,
'physnet1')
dynamic_segment1_id = dynamic_segment1[driver_api.SEGMENTATION_ID]
self.assertEqual(dynamic_segmentation_id, dynamic_segment1_id)
segment2 = {driver_api.NETWORK_TYPE: 'vlan',
driver_api.PHYSICAL_NETWORK: 'physnet2'}
self.driver.type_manager.allocate_dynamic_segment(
self.context.session, network_id, segment2)
dynamic_segment2 = ml2_db.get_dynamic_segment(self.context.session,
network_id,
'physnet2')
dynamic_segmentation2_id = dynamic_segment2[driver_api.SEGMENTATION_ID]
self.assertNotEqual(dynamic_segmentation_id, dynamic_segmentation2_id)
def test_allocate_release_dynamic_segment(self):
data = {'network': {'name': 'net1',
'tenant_id': 'tenant_one'}}
network_req = self.new_create_request('networks', data)
network = self.deserialize(self.fmt,
network_req.get_response(self.api))
segment = {driver_api.NETWORK_TYPE: 'vlan',
driver_api.PHYSICAL_NETWORK: 'physnet1'}
network_id = network['network']['id']
self.driver.type_manager.allocate_dynamic_segment(
self.context.session, network_id, segment)
dynamic_segment = ml2_db.get_dynamic_segment(self.context.session,
network_id,
'physnet1')
self.assertEqual('vlan', dynamic_segment[driver_api.NETWORK_TYPE])
self.assertEqual('physnet1',
dynamic_segment[driver_api.PHYSICAL_NETWORK])
dynamic_segmentation_id = dynamic_segment[driver_api.SEGMENTATION_ID]
self.assertTrue(dynamic_segmentation_id > 0)
self.driver.type_manager.release_dynamic_segment(
self.context.session, dynamic_segment[driver_api.ID])
self.assertIsNone(ml2_db.get_dynamic_segment(
self.context.session, network_id, 'physnet1'))
def test_create_network_provider(self):
data = {'network': {'name': 'net1',
pnet.NETWORK_TYPE: 'vlan',
pnet.PHYSICAL_NETWORK: 'physnet1',
pnet.SEGMENTATION_ID: 1,
'tenant_id': 'tenant_one'}}
network_req = self.new_create_request('networks', data)
network = self.deserialize(self.fmt,
network_req.get_response(self.api))
self.assertEqual('vlan', network['network'][pnet.NETWORK_TYPE])
self.assertEqual('physnet1', network['network'][pnet.PHYSICAL_NETWORK])
self.assertEqual(1, network['network'][pnet.SEGMENTATION_ID])
self.assertNotIn(mpnet.SEGMENTS, network['network'])
def test_create_network_single_multiprovider(self):
data = {'network': {'name': 'net1',
mpnet.SEGMENTS:
[{pnet.NETWORK_TYPE: 'vlan',
pnet.PHYSICAL_NETWORK: 'physnet1',
pnet.SEGMENTATION_ID: 1}],
'tenant_id': 'tenant_one'}}
net_req = self.new_create_request('networks', data)
network = self.deserialize(self.fmt, net_req.get_response(self.api))
self.assertEqual('vlan', network['network'][pnet.NETWORK_TYPE])
self.assertEqual('physnet1', network['network'][pnet.PHYSICAL_NETWORK])
self.assertEqual(1, network['network'][pnet.SEGMENTATION_ID])
self.assertNotIn(mpnet.SEGMENTS, network['network'])
# Tests get_network()
net_req = self.new_show_request('networks', network['network']['id'])
network = self.deserialize(self.fmt, net_req.get_response(self.api))
self.assertEqual('vlan', network['network'][pnet.NETWORK_TYPE])
self.assertEqual('physnet1', network['network'][pnet.PHYSICAL_NETWORK])
self.assertEqual(1, network['network'][pnet.SEGMENTATION_ID])
self.assertNotIn(mpnet.SEGMENTS, network['network'])
def test_create_network_multiprovider(self):
data = {'network': {'name': 'net1',
mpnet.SEGMENTS:
[{pnet.NETWORK_TYPE: 'vlan',
pnet.PHYSICAL_NETWORK: 'physnet1',
pnet.SEGMENTATION_ID: 1},
{pnet.NETWORK_TYPE: 'vlan',
pnet.PHYSICAL_NETWORK: 'physnet1',
pnet.SEGMENTATION_ID: 2}],
'tenant_id': 'tenant_one'}}
network_req = self.new_create_request('networks', data)
network = self.deserialize(self.fmt,
network_req.get_response(self.api))
segments = network['network'][mpnet.SEGMENTS]
for segment_index, segment in enumerate(data['network']
[mpnet.SEGMENTS]):
for field in [pnet.NETWORK_TYPE, pnet.PHYSICAL_NETWORK,
pnet.SEGMENTATION_ID]:
self.assertEqual(segment.get(field),
segments[segment_index][field])
# Tests get_network()
net_req = self.new_show_request('networks', network['network']['id'])
network = self.deserialize(self.fmt, net_req.get_response(self.api))
segments = network['network'][mpnet.SEGMENTS]
for segment_index, segment in enumerate(data['network']
[mpnet.SEGMENTS]):
for field in [pnet.NETWORK_TYPE, pnet.PHYSICAL_NETWORK,
pnet.SEGMENTATION_ID]:
self.assertEqual(segment.get(field),
segments[segment_index][field])
def test_create_network_with_provider_and_multiprovider_fail(self):
data = {'network': {'name': 'net1',
mpnet.SEGMENTS:
[{pnet.NETWORK_TYPE: 'vlan',
pnet.PHYSICAL_NETWORK: 'physnet1',
pnet.SEGMENTATION_ID: 1}],
pnet.NETWORK_TYPE: 'vlan',
pnet.PHYSICAL_NETWORK: 'physnet1',
pnet.SEGMENTATION_ID: 1,
'tenant_id': 'tenant_one'}}
network_req = self.new_create_request('networks', data)
res = network_req.get_response(self.api)
self.assertEqual(400, res.status_int)
def test_create_network_duplicate_full_segments(self):
data = {'network': {'name': 'net1',
mpnet.SEGMENTS:
[{pnet.NETWORK_TYPE: 'vlan',
pnet.PHYSICAL_NETWORK: 'physnet1',
pnet.SEGMENTATION_ID: 1},
{pnet.NETWORK_TYPE: 'vlan',
pnet.PHYSICAL_NETWORK: 'physnet1',
pnet.SEGMENTATION_ID: 1}],
'tenant_id': 'tenant_one'}}
network_req = self.new_create_request('networks', data)
res = network_req.get_response(self.api)
self.assertEqual(400, res.status_int)
def test_create_network_duplicate_partial_segments(self):
data = {'network': {'name': 'net1',
mpnet.SEGMENTS:
[{pnet.NETWORK_TYPE: 'vlan',
pnet.PHYSICAL_NETWORK: 'physnet1'},
{pnet.NETWORK_TYPE: 'vlan',
pnet.PHYSICAL_NETWORK: 'physnet1'}],
'tenant_id': 'tenant_one'}}
network_req = self.new_create_request('networks', data)
res = network_req.get_response(self.api)
self.assertEqual(201, res.status_int)
def test_release_network_segments(self):
data = {'network': {'name': 'net1',
'admin_state_up': True,
'shared': False,
pnet.NETWORK_TYPE: 'vlan',
pnet.PHYSICAL_NETWORK: 'physnet1',
pnet.SEGMENTATION_ID: 1,
'tenant_id': 'tenant_one'}}
network_req = self.new_create_request('networks', data)
res = network_req.get_response(self.api)
network = self.deserialize(self.fmt, res)
network_id = network['network']['id']
segment = {driver_api.NETWORK_TYPE: 'vlan',
driver_api.PHYSICAL_NETWORK: 'physnet2'}
self.driver.type_manager.allocate_dynamic_segment(
self.context.session, network_id, segment)
dynamic_segment = ml2_db.get_dynamic_segment(self.context.session,
network_id,
'physnet2')
self.assertEqual('vlan', dynamic_segment[driver_api.NETWORK_TYPE])
self.assertEqual('physnet2',
dynamic_segment[driver_api.PHYSICAL_NETWORK])
self.assertTrue(dynamic_segment[driver_api.SEGMENTATION_ID] > 0)
with mock.patch.object(type_vlan.VlanTypeDriver,
'release_segment') as rs:
req = self.new_delete_request('networks', network_id)
res = req.get_response(self.api)
self.assertEqual(2, rs.call_count)
self.assertEqual(ml2_db.get_network_segments(
self.context.session, network_id), [])
self.assertIsNone(ml2_db.get_dynamic_segment(
self.context.session, network_id, 'physnet2'))
def test_release_segment_no_type_driver(self):
data = {'network': {'name': 'net1',
'admin_state_up': True,
'shared': False,
pnet.NETWORK_TYPE: 'vlan',
pnet.PHYSICAL_NETWORK: 'physnet1',
pnet.SEGMENTATION_ID: 1,
'tenant_id': 'tenant_one'}}
network_req = self.new_create_request('networks', data)
res = network_req.get_response(self.api)
network = self.deserialize(self.fmt, res)
network_id = network['network']['id']
segment = {driver_api.NETWORK_TYPE: 'faketype',
driver_api.PHYSICAL_NETWORK: 'physnet1',
driver_api.ID: 1}
with mock.patch('neutron.plugins.ml2.managers.LOG') as log:
with mock.patch('neutron.plugins.ml2.managers.db') as db:
db.get_network_segments.return_value = (segment,)
self.driver.type_manager.release_network_segments(
self.context.session, network_id)
log.error.assert_called_once_with(
"Failed to release segment '%s' because "
"network type is not supported.", segment)
def test_create_provider_fail(self):
segment = {pnet.NETWORK_TYPE: None,
pnet.PHYSICAL_NETWORK: 'phys_net',
pnet.SEGMENTATION_ID: None}
with testtools.ExpectedException(exc.InvalidInput):
self.driver.type_manager._process_provider_create(segment)
def test_create_network_plugin(self):
data = {'network': {'name': 'net1',
'admin_state_up': True,
'shared': False,
pnet.NETWORK_TYPE: 'vlan',
pnet.PHYSICAL_NETWORK: 'physnet1',
pnet.SEGMENTATION_ID: 1,
'tenant_id': 'tenant_one'}}
def raise_mechanism_exc(*args, **kwargs):
raise ml2_exc.MechanismDriverError(
method='create_network_postcommit')
with mock.patch('neutron.plugins.ml2.managers.MechanismManager.'
'create_network_precommit', new=raise_mechanism_exc):
with testtools.ExpectedException(ml2_exc.MechanismDriverError):
self.driver.create_network(self.context, data)
def test_extend_dictionary_no_segments(self):
network = dict(name='net_no_segment', id='5', tenant_id='tenant_one')
self.driver.type_manager.extend_network_dict_provider(self.context,
network)
self.assertIsNone(network[pnet.NETWORK_TYPE])
self.assertIsNone(network[pnet.PHYSICAL_NETWORK])
self.assertIsNone(network[pnet.SEGMENTATION_ID])
class TestMl2AllowedAddressPairs(Ml2PluginV2TestCase,
test_pair.TestAllowedAddressPairs):
_extension_drivers = ['port_security']
def setUp(self, plugin=None):
config.cfg.CONF.set_override('extension_drivers',
self._extension_drivers,
group='ml2')
super(test_pair.TestAllowedAddressPairs, self).setUp(
plugin=PLUGIN_NAME)
class DHCPOptsTestCase(test_dhcpopts.TestExtraDhcpOpt):
def setUp(self, plugin=None):
super(test_dhcpopts.ExtraDhcpOptDBTestCase, self).setUp(
plugin=PLUGIN_NAME)
class Ml2PluginV2FaultyDriverTestCase(test_plugin.NeutronDbPluginV2TestCase):
def setUp(self):
# Enable the test mechanism driver to ensure that
# we can successfully call through to all mechanism
# driver apis.
config.cfg.CONF.set_override('mechanism_drivers',
['test', 'logger'],
group='ml2')
super(Ml2PluginV2FaultyDriverTestCase, self).setUp(PLUGIN_NAME)
self.port_create_status = 'DOWN'
class TestFaultyMechansimDriver(Ml2PluginV2FaultyDriverTestCase):
def test_create_network_faulty(self):
with mock.patch.object(mech_test.TestMechanismDriver,
'create_network_postcommit',
side_effect=ml2_exc.MechanismDriverError):
tenant_id = str(uuid.uuid4())
data = {'network': {'name': 'net1',
'tenant_id': tenant_id}}
req = self.new_create_request('networks', data)
res = req.get_response(self.api)
self.assertEqual(500, res.status_int)
error = self.deserialize(self.fmt, res)
self.assertEqual('MechanismDriverError',
error['NeutronError']['type'])
query_params = "tenant_id=%s" % tenant_id
nets = self._list('networks', query_params=query_params)
self.assertFalse(nets['networks'])
def test_delete_network_faulty(self):
with mock.patch.object(mech_test.TestMechanismDriver,
'delete_network_postcommit',
side_effect=ml2_exc.MechanismDriverError):
with mock.patch.object(mech_logger.LoggerMechanismDriver,
'delete_network_postcommit') as dnp:
data = {'network': {'name': 'net1',
'tenant_id': 'tenant_one'}}
network_req = self.new_create_request('networks', data)
network_res = network_req.get_response(self.api)
self.assertEqual(201, network_res.status_int)
network = self.deserialize(self.fmt, network_res)
net_id = network['network']['id']
req = self.new_delete_request('networks', net_id)
res = req.get_response(self.api)
self.assertEqual(204, res.status_int)
# Test if other mechanism driver was called
self.assertTrue(dnp.called)
self._show('networks', net_id,
expected_code=webob.exc.HTTPNotFound.code)
def test_update_network_faulty(self):
with mock.patch.object(mech_test.TestMechanismDriver,
'update_network_postcommit',
side_effect=ml2_exc.MechanismDriverError):
with mock.patch.object(mech_logger.LoggerMechanismDriver,
'update_network_postcommit') as unp:
data = {'network': {'name': 'net1',
'tenant_id': 'tenant_one'}}
network_req = self.new_create_request('networks', data)
network_res = network_req.get_response(self.api)
self.assertEqual(201, network_res.status_int)
network = self.deserialize(self.fmt, network_res)
net_id = network['network']['id']
new_name = 'a_brand_new_name'
data = {'network': {'name': new_name}}
req = self.new_update_request('networks', data, net_id)
res = req.get_response(self.api)
self.assertEqual(500, res.status_int)
error = self.deserialize(self.fmt, res)
self.assertEqual('MechanismDriverError',
error['NeutronError']['type'])
# Test if other mechanism driver was called
self.assertTrue(unp.called)
net = self._show('networks', net_id)
self.assertEqual(new_name, net['network']['name'])
self._delete('networks', net_id)
def test_create_subnet_faulty(self):
with mock.patch.object(mech_test.TestMechanismDriver,
'create_subnet_postcommit',
side_effect=ml2_exc.MechanismDriverError):
with self.network() as network:
net_id = network['network']['id']
data = {'subnet': {'network_id': net_id,
'cidr': '10.0.20.0/24',
'ip_version': '4',
'name': 'subnet1',
'tenant_id':
network['network']['tenant_id'],
'gateway_ip': '10.0.20.1'}}
req = self.new_create_request('subnets', data)
res = req.get_response(self.api)
self.assertEqual(500, res.status_int)
error = self.deserialize(self.fmt, res)
self.assertEqual('MechanismDriverError',
error['NeutronError']['type'])
query_params = "network_id=%s" % net_id
subnets = self._list('subnets', query_params=query_params)
self.assertFalse(subnets['subnets'])
def test_delete_subnet_faulty(self):
with mock.patch.object(mech_test.TestMechanismDriver,
'delete_subnet_postcommit',
side_effect=ml2_exc.MechanismDriverError):
with mock.patch.object(mech_logger.LoggerMechanismDriver,
'delete_subnet_postcommit') as dsp:
with self.network() as network:
data = {'subnet': {'network_id':
network['network']['id'],
'cidr': '10.0.20.0/24',
'ip_version': '4',
'name': 'subnet1',
'tenant_id':
network['network']['tenant_id'],
'gateway_ip': '10.0.20.1'}}
subnet_req = self.new_create_request('subnets', data)
subnet_res = subnet_req.get_response(self.api)
self.assertEqual(201, subnet_res.status_int)
subnet = self.deserialize(self.fmt, subnet_res)
subnet_id = subnet['subnet']['id']
req = self.new_delete_request('subnets', subnet_id)
res = req.get_response(self.api)
self.assertEqual(204, res.status_int)
# Test if other mechanism driver was called
self.assertTrue(dsp.called)
self._show('subnets', subnet_id,
expected_code=webob.exc.HTTPNotFound.code)
def test_update_subnet_faulty(self):
with mock.patch.object(mech_test.TestMechanismDriver,
'update_subnet_postcommit',
side_effect=ml2_exc.MechanismDriverError):
with mock.patch.object(mech_logger.LoggerMechanismDriver,
'update_subnet_postcommit') as usp:
with self.network() as network:
data = {'subnet': {'network_id':
network['network']['id'],
'cidr': '10.0.20.0/24',
'ip_version': '4',
'name': 'subnet1',
'tenant_id':
network['network']['tenant_id'],
'gateway_ip': '10.0.20.1'}}
subnet_req = self.new_create_request('subnets', data)
subnet_res = subnet_req.get_response(self.api)
self.assertEqual(201, subnet_res.status_int)
subnet = self.deserialize(self.fmt, subnet_res)
subnet_id = subnet['subnet']['id']
new_name = 'a_brand_new_name'
data = {'subnet': {'name': new_name}}
req = self.new_update_request('subnets', data, subnet_id)
res = req.get_response(self.api)
self.assertEqual(500, res.status_int)
error = self.deserialize(self.fmt, res)
self.assertEqual('MechanismDriverError',
error['NeutronError']['type'])
# Test if other mechanism driver was called
self.assertTrue(usp.called)
subnet = self._show('subnets', subnet_id)
self.assertEqual(new_name, subnet['subnet']['name'])
self._delete('subnets', subnet['subnet']['id'])
def test_create_port_faulty(self):
with mock.patch.object(mech_test.TestMechanismDriver,
'create_port_postcommit',
side_effect=ml2_exc.MechanismDriverError):
with self.network() as network:
net_id = network['network']['id']
data = {'port': {'network_id': net_id,
'tenant_id':
network['network']['tenant_id'],
'name': 'port1',
'admin_state_up': 1,
'fixed_ips': []}}
req = self.new_create_request('ports', data)
res = req.get_response(self.api)
self.assertEqual(500, res.status_int)
error = self.deserialize(self.fmt, res)
self.assertEqual('MechanismDriverError',
error['NeutronError']['type'])
query_params = "network_id=%s" % net_id
ports = self._list('ports', query_params=query_params)
self.assertFalse(ports['ports'])
def test_update_port_faulty(self):
with mock.patch.object(mech_test.TestMechanismDriver,
'update_port_postcommit',
side_effect=ml2_exc.MechanismDriverError):
with mock.patch.object(mech_logger.LoggerMechanismDriver,
'update_port_postcommit') as upp:
with self.network() as network:
data = {'port': {'network_id': network['network']['id'],
'tenant_id':
network['network']['tenant_id'],
'name': 'port1',
'admin_state_up': 1,
'fixed_ips': []}}
port_req = self.new_create_request('ports', data)
port_res = port_req.get_response(self.api)
self.assertEqual(201, port_res.status_int)
port = self.deserialize(self.fmt, port_res)
port_id = port['port']['id']
new_name = 'a_brand_new_name'
data = {'port': {'name': new_name}}
req = self.new_update_request('ports', data, port_id)
res = req.get_response(self.api)
self.assertEqual(200, res.status_int)
# Test if other mechanism driver was called
self.assertTrue(upp.called)
port = self._show('ports', port_id)
self.assertEqual(new_name, port['port']['name'])
self._delete('ports', port['port']['id'])
def test_update_dvr_router_interface_port(self):
"""Test validate dvr router interface update succeeds."""
host_id = 'host'
binding = models.DVRPortBinding(
port_id='port_id',
host=host_id,
router_id='old_router_id',
vif_type=portbindings.VIF_TYPE_OVS,
vnic_type=portbindings.VNIC_NORMAL,
status=constants.PORT_STATUS_DOWN)
with mock.patch.object(
mech_test.TestMechanismDriver,
'update_port_postcommit',
side_effect=ml2_exc.MechanismDriverError) as port_post,\
mock.patch.object(
mech_test.TestMechanismDriver,
'update_port_precommit') as port_pre,\
mock.patch.object(ml2_db,
'get_dvr_port_bindings') as dvr_bindings:
dvr_bindings.return_value = [binding]
port_pre.return_value = True
with self.network() as network:
with self.subnet(network=network) as subnet:
subnet_id = subnet['subnet']['id']
data = {'port': {
'network_id': network['network']['id'],
'tenant_id':
network['network']['tenant_id'],
'name': 'port1',
'device_owner':
'network:router_interface_distributed',
'admin_state_up': 1,
'fixed_ips':
[{'subnet_id': subnet_id}]}}
port_req = self.new_create_request('ports', data)
port_res = port_req.get_response(self.api)
self.assertEqual(201, port_res.status_int)
port = self.deserialize(self.fmt, port_res)
port_id = port['port']['id']
new_name = 'a_brand_new_name'
data = {'port': {'name': new_name}}
req = self.new_update_request('ports', data, port_id)
res = req.get_response(self.api)
self.assertEqual(200, res.status_int)
self.assertTrue(dvr_bindings.called)
self.assertTrue(port_pre.called)
self.assertTrue(port_post.called)
port = self._show('ports', port_id)
self.assertEqual(new_name, port['port']['name'])
class TestMl2PluginCreateUpdateDeletePort(base.BaseTestCase):
def setUp(self):
super(TestMl2PluginCreateUpdateDeletePort, self).setUp()
self.context = mock.MagicMock()
self.notify_p = mock.patch('neutron.callbacks.registry.notify')
self.notify = self.notify_p.start()
def _ensure_transaction_is_closed(self):
transaction = self.context.session.begin(subtransactions=True)
enter = transaction.__enter__.call_count
exit = transaction.__exit__.call_count
self.assertEqual(enter, exit)
def _create_plugin_for_create_update_port(self, new_host_port):
plugin = ml2_plugin.Ml2Plugin()
plugin.extension_manager = mock.Mock()
plugin.type_manager = mock.Mock()
plugin.mechanism_manager = mock.Mock()
plugin.notifier = mock.Mock()
plugin._get_host_port_if_changed = mock.Mock(
return_value=new_host_port)
plugin._check_mac_update_allowed = mock.Mock(return_value=True)
self.notify.side_effect = (
lambda r, e, t, **kwargs: self._ensure_transaction_is_closed())
return plugin
def _test__get_host_port_if_changed(
self, mech_context, attrs=None, expected_retval=None):
with mock.patch.object(ml2_plugin.Ml2Plugin,
'__init__',
return_value=None):
plugin = ml2_plugin.Ml2Plugin()
test_return = plugin._get_host_port_if_changed(mech_context, attrs)
self.assertEqual(expected_retval, test_return)
def test__get_host_port_if_changed_no_attrs(self):
mech_context = mock.Mock()
mech_context._binding.host = 'Host-1'
self._test__get_host_port_if_changed(
mech_context, attrs=None, expected_retval=None)
def test__get_host_port_if_changed_no_binding_change(self):
mech_context = mock.Mock()
mech_context._binding.host = 'Host-1'
mech_context.current = {
'id': 'fake-id',
'mac_address': '2a:2b:2c:2d:2e:2f'
}
attrs = {'mac_address': '0a:0b:0c:0d:0e:0f'}
self._test__get_host_port_if_changed(
mech_context, attrs=attrs, expected_retval=None)
attrs = {
portbindings.HOST_ID: 'Host-1',
'mac_address': '0a:0b:0c:0d:0e:0f',
}
self._test__get_host_port_if_changed(
mech_context, attrs=attrs, expected_retval=None)
def test__get_host_port_if_changed_with_binding_removed(self):
expected_return = {
'id': 'fake-id',
portbindings.HOST_ID: None,
'mac_address': '2a:2b:2c:2d:2e:2f'
}
mech_context = mock.Mock()
mech_context._binding.host = 'Host-1'
mech_context.current = expected_return
attrs = {portbindings.HOST_ID: None}
self._test__get_host_port_if_changed(
mech_context, attrs=attrs, expected_retval=expected_return)
def test__get_host_port_if_changed_with_binding_added(self):
expected_return = {
'id': 'fake-id',
portbindings.HOST_ID: 'host-1',
'mac_address': '2a:2b:2c:2d:2e:2f'
}
mech_context = mock.Mock()
mech_context.current = expected_return
attrs = {portbindings.HOST_ID: 'host-1'}
self._test__get_host_port_if_changed(
mech_context, attrs=attrs, expected_retval=expected_return)
def test_create_port_rpc_outside_transaction(self):
with mock.patch.object(ml2_plugin.Ml2Plugin, '__init__') as init,\
mock.patch.object(base_plugin.NeutronDbPluginV2,
'create_port'):
init.return_value = None
new_host_port = mock.Mock()
plugin = self._create_plugin_for_create_update_port(new_host_port)
plugin.create_port(self.context, mock.MagicMock())
kwargs = {'context': self.context, 'port': new_host_port}
self.notify.assert_called_once_with('port', 'after_create',
plugin, **kwargs)
def test_update_port_rpc_outside_transaction(self):
port_id = 'fake_id'
net_id = 'mynet'
original_port_db = models_v2.Port(
id=port_id,
tenant_id='tenant',
network_id=net_id,
mac_address='08:00:01:02:03:04',
admin_state_up=True,
status='ACTIVE',
device_id='vm_id',
device_owner='compute:None')
binding = mock.Mock()
binding.port_id = port_id
binding.host = 'vm_host'
binding.vnic_type = portbindings.VNIC_NORMAL
binding.profile = ''
binding.vif_type = ''
binding.vif_details = ''
with mock.patch.object(ml2_plugin.Ml2Plugin, '__init__') as init,\
mock.patch.object(ml2_db, 'get_locked_port_and_binding',
return_value=(original_port_db, binding)),\
mock.patch.object(base_plugin.NeutronDbPluginV2,
'update_port'):
init.return_value = None
new_host_port = mock.Mock()
plugin = self._create_plugin_for_create_update_port(new_host_port)
original_port = plugin._make_port_dict(original_port_db)
plugin.update_port(self.context, port_id, mock.MagicMock())
kwargs = {
'context': self.context,
'port': new_host_port,
'mac_address_updated': True,
'original_port': original_port,
}
self.notify.assert_called_once_with('port', 'after_update',
plugin, **kwargs)
def test_notify_outside_of_delete_transaction(self):
self.notify.side_effect = (
lambda r, e, t, **kwargs: self._ensure_transaction_is_closed())
l3plugin = mock.Mock()
l3plugin.supported_extension_aliases = [
'router', constants.L3_AGENT_SCHEDULER_EXT_ALIAS,
constants.L3_DISTRIBUTED_EXT_ALIAS
]
with mock.patch.object(ml2_plugin.Ml2Plugin,
'__init__',
return_value=None),\
mock.patch.object(manager.NeutronManager,
'get_service_plugins',
return_value={'L3_ROUTER_NAT': l3plugin}):
plugin = self._create_plugin_for_create_update_port(mock.Mock())
# Set backend manually here since __init__ was mocked
plugin.set_ipam_backend()
# deleting the port will call registry.notify, which will
# run the transaction balancing function defined in this test
plugin.delete_port(self.context, 'fake_id')
self.assertTrue(self.notify.call_count)
| apache-2.0 |
lexus24/w16b_test | static/Brython3.1.3-20150514-095342/Lib/fractions.py | 722 | 23203 | # Originally contributed by Sjoerd Mullender.
# Significantly modified by Jeffrey Yasskin <jyasskin at gmail.com>.
"""Fraction, infinite-precision, real numbers."""
from decimal import Decimal
import math
import numbers
import operator
import re
import sys
__all__ = ['Fraction', 'gcd']
def gcd(a, b):
"""Calculate the Greatest Common Divisor of a and b.
Unless b==0, the result will have the same sign as b (so that when
b is divided by it, the result comes out positive).
"""
while b:
a, b = b, a%b
return a
# Constants related to the hash implementation; hash(x) is based
# on the reduction of x modulo the prime _PyHASH_MODULUS.
_PyHASH_MODULUS = sys.hash_info.modulus
# Value to be used for rationals that reduce to infinity modulo
# _PyHASH_MODULUS.
_PyHASH_INF = sys.hash_info.inf
_RATIONAL_FORMAT = re.compile(r"""
\A\s* # optional whitespace at the start, then
(?P<sign>[-+]?) # an optional sign, then
(?=\d|\.\d) # lookahead for digit or .digit
(?P<num>\d*) # numerator (possibly empty)
(?: # followed by
(?:/(?P<denom>\d+))? # an optional denominator
| # or
(?:\.(?P<decimal>\d*))? # an optional fractional part
(?:E(?P<exp>[-+]?\d+))? # and optional exponent
)
\s*\Z # and optional whitespace to finish
""", re.VERBOSE | re.IGNORECASE)
class Fraction(numbers.Rational):
"""This class implements rational numbers.
In the two-argument form of the constructor, Fraction(8, 6) will
produce a rational number equivalent to 4/3. Both arguments must
be Rational. The numerator defaults to 0 and the denominator
defaults to 1 so that Fraction(3) == 3 and Fraction() == 0.
Fractions can also be constructed from:
- numeric strings similar to those accepted by the
float constructor (for example, '-2.3' or '1e10')
- strings of the form '123/456'
- float and Decimal instances
- other Rational instances (including integers)
"""
__slots__ = ('_numerator', '_denominator')
# We're immutable, so use __new__ not __init__
def __new__(cls, numerator=0, denominator=None):
"""Constructs a Rational.
Takes a string like '3/2' or '1.5', another Rational instance, a
numerator/denominator pair, or a float.
Examples
--------
>>> Fraction(10, -8)
Fraction(-5, 4)
>>> Fraction(Fraction(1, 7), 5)
Fraction(1, 35)
>>> Fraction(Fraction(1, 7), Fraction(2, 3))
Fraction(3, 14)
>>> Fraction('314')
Fraction(314, 1)
>>> Fraction('-35/4')
Fraction(-35, 4)
>>> Fraction('3.1415') # conversion from numeric string
Fraction(6283, 2000)
>>> Fraction('-47e-2') # string may include a decimal exponent
Fraction(-47, 100)
>>> Fraction(1.47) # direct construction from float (exact conversion)
Fraction(6620291452234629, 4503599627370496)
>>> Fraction(2.25)
Fraction(9, 4)
>>> Fraction(Decimal('1.47'))
Fraction(147, 100)
"""
self = super(Fraction, cls).__new__(cls)
if denominator is None:
if isinstance(numerator, numbers.Rational):
self._numerator = numerator.numerator
self._denominator = numerator.denominator
return self
elif isinstance(numerator, float):
# Exact conversion from float
value = Fraction.from_float(numerator)
self._numerator = value._numerator
self._denominator = value._denominator
return self
elif isinstance(numerator, Decimal):
value = Fraction.from_decimal(numerator)
self._numerator = value._numerator
self._denominator = value._denominator
return self
elif isinstance(numerator, str):
# Handle construction from strings.
m = _RATIONAL_FORMAT.match(numerator)
if m is None:
raise ValueError('Invalid literal for Fraction: %r' %
numerator)
numerator = int(m.group('num') or '0')
denom = m.group('denom')
if denom:
denominator = int(denom)
else:
denominator = 1
decimal = m.group('decimal')
if decimal:
scale = 10**len(decimal)
numerator = numerator * scale + int(decimal)
denominator *= scale
exp = m.group('exp')
if exp:
exp = int(exp)
if exp >= 0:
numerator *= 10**exp
else:
denominator *= 10**-exp
if m.group('sign') == '-':
numerator = -numerator
else:
raise TypeError("argument should be a string "
"or a Rational instance")
elif (isinstance(numerator, numbers.Rational) and
isinstance(denominator, numbers.Rational)):
numerator, denominator = (
numerator.numerator * denominator.denominator,
denominator.numerator * numerator.denominator
)
else:
raise TypeError("both arguments should be "
"Rational instances")
if denominator == 0:
raise ZeroDivisionError('Fraction(%s, 0)' % numerator)
g = gcd(numerator, denominator)
self._numerator = numerator // g
self._denominator = denominator // g
return self
@classmethod
def from_float(cls, f):
"""Converts a finite float to a rational number, exactly.
Beware that Fraction.from_float(0.3) != Fraction(3, 10).
"""
if isinstance(f, numbers.Integral):
return cls(f)
elif not isinstance(f, float):
raise TypeError("%s.from_float() only takes floats, not %r (%s)" %
(cls.__name__, f, type(f).__name__))
if math.isnan(f):
raise ValueError("Cannot convert %r to %s." % (f, cls.__name__))
if math.isinf(f):
raise OverflowError("Cannot convert %r to %s." % (f, cls.__name__))
return cls(*f.as_integer_ratio())
@classmethod
def from_decimal(cls, dec):
"""Converts a finite Decimal instance to a rational number, exactly."""
from decimal import Decimal
if isinstance(dec, numbers.Integral):
dec = Decimal(int(dec))
elif not isinstance(dec, Decimal):
raise TypeError(
"%s.from_decimal() only takes Decimals, not %r (%s)" %
(cls.__name__, dec, type(dec).__name__))
if dec.is_infinite():
raise OverflowError(
"Cannot convert %s to %s." % (dec, cls.__name__))
if dec.is_nan():
raise ValueError("Cannot convert %s to %s." % (dec, cls.__name__))
sign, digits, exp = dec.as_tuple()
digits = int(''.join(map(str, digits)))
if sign:
digits = -digits
if exp >= 0:
return cls(digits * 10 ** exp)
else:
return cls(digits, 10 ** -exp)
def limit_denominator(self, max_denominator=1000000):
"""Closest Fraction to self with denominator at most max_denominator.
>>> Fraction('3.141592653589793').limit_denominator(10)
Fraction(22, 7)
>>> Fraction('3.141592653589793').limit_denominator(100)
Fraction(311, 99)
>>> Fraction(4321, 8765).limit_denominator(10000)
Fraction(4321, 8765)
"""
# Algorithm notes: For any real number x, define a *best upper
# approximation* to x to be a rational number p/q such that:
#
# (1) p/q >= x, and
# (2) if p/q > r/s >= x then s > q, for any rational r/s.
#
# Define *best lower approximation* similarly. Then it can be
# proved that a rational number is a best upper or lower
# approximation to x if, and only if, it is a convergent or
# semiconvergent of the (unique shortest) continued fraction
# associated to x.
#
# To find a best rational approximation with denominator <= M,
# we find the best upper and lower approximations with
# denominator <= M and take whichever of these is closer to x.
# In the event of a tie, the bound with smaller denominator is
# chosen. If both denominators are equal (which can happen
# only when max_denominator == 1 and self is midway between
# two integers) the lower bound---i.e., the floor of self, is
# taken.
if max_denominator < 1:
raise ValueError("max_denominator should be at least 1")
if self._denominator <= max_denominator:
return Fraction(self)
p0, q0, p1, q1 = 0, 1, 1, 0
n, d = self._numerator, self._denominator
while True:
a = n//d
q2 = q0+a*q1
if q2 > max_denominator:
break
p0, q0, p1, q1 = p1, q1, p0+a*p1, q2
n, d = d, n-a*d
k = (max_denominator-q0)//q1
bound1 = Fraction(p0+k*p1, q0+k*q1)
bound2 = Fraction(p1, q1)
if abs(bound2 - self) <= abs(bound1-self):
return bound2
else:
return bound1
@property
def numerator(a):
return a._numerator
@property
def denominator(a):
return a._denominator
def __repr__(self):
"""repr(self)"""
return ('Fraction(%s, %s)' % (self._numerator, self._denominator))
def __str__(self):
"""str(self)"""
if self._denominator == 1:
return str(self._numerator)
else:
return '%s/%s' % (self._numerator, self._denominator)
def _operator_fallbacks(monomorphic_operator, fallback_operator):
"""Generates forward and reverse operators given a purely-rational
operator and a function from the operator module.
Use this like:
__op__, __rop__ = _operator_fallbacks(just_rational_op, operator.op)
In general, we want to implement the arithmetic operations so
that mixed-mode operations either call an implementation whose
author knew about the types of both arguments, or convert both
to the nearest built in type and do the operation there. In
Fraction, that means that we define __add__ and __radd__ as:
def __add__(self, other):
# Both types have numerators/denominator attributes,
# so do the operation directly
if isinstance(other, (int, Fraction)):
return Fraction(self.numerator * other.denominator +
other.numerator * self.denominator,
self.denominator * other.denominator)
# float and complex don't have those operations, but we
# know about those types, so special case them.
elif isinstance(other, float):
return float(self) + other
elif isinstance(other, complex):
return complex(self) + other
# Let the other type take over.
return NotImplemented
def __radd__(self, other):
# radd handles more types than add because there's
# nothing left to fall back to.
if isinstance(other, numbers.Rational):
return Fraction(self.numerator * other.denominator +
other.numerator * self.denominator,
self.denominator * other.denominator)
elif isinstance(other, Real):
return float(other) + float(self)
elif isinstance(other, Complex):
return complex(other) + complex(self)
return NotImplemented
There are 5 different cases for a mixed-type addition on
Fraction. I'll refer to all of the above code that doesn't
refer to Fraction, float, or complex as "boilerplate". 'r'
will be an instance of Fraction, which is a subtype of
Rational (r : Fraction <: Rational), and b : B <:
Complex. The first three involve 'r + b':
1. If B <: Fraction, int, float, or complex, we handle
that specially, and all is well.
2. If Fraction falls back to the boilerplate code, and it
were to return a value from __add__, we'd miss the
possibility that B defines a more intelligent __radd__,
so the boilerplate should return NotImplemented from
__add__. In particular, we don't handle Rational
here, even though we could get an exact answer, in case
the other type wants to do something special.
3. If B <: Fraction, Python tries B.__radd__ before
Fraction.__add__. This is ok, because it was
implemented with knowledge of Fraction, so it can
handle those instances before delegating to Real or
Complex.
The next two situations describe 'b + r'. We assume that b
didn't know about Fraction in its implementation, and that it
uses similar boilerplate code:
4. If B <: Rational, then __radd_ converts both to the
builtin rational type (hey look, that's us) and
proceeds.
5. Otherwise, __radd__ tries to find the nearest common
base ABC, and fall back to its builtin type. Since this
class doesn't subclass a concrete type, there's no
implementation to fall back to, so we need to try as
hard as possible to return an actual value, or the user
will get a TypeError.
"""
def forward(a, b):
if isinstance(b, (int, Fraction)):
return monomorphic_operator(a, b)
elif isinstance(b, float):
return fallback_operator(float(a), b)
elif isinstance(b, complex):
return fallback_operator(complex(a), b)
else:
return NotImplemented
forward.__name__ = '__' + fallback_operator.__name__ + '__'
forward.__doc__ = monomorphic_operator.__doc__
def reverse(b, a):
if isinstance(a, numbers.Rational):
# Includes ints.
return monomorphic_operator(a, b)
elif isinstance(a, numbers.Real):
return fallback_operator(float(a), float(b))
elif isinstance(a, numbers.Complex):
return fallback_operator(complex(a), complex(b))
else:
return NotImplemented
reverse.__name__ = '__r' + fallback_operator.__name__ + '__'
reverse.__doc__ = monomorphic_operator.__doc__
return forward, reverse
def _add(a, b):
"""a + b"""
return Fraction(a.numerator * b.denominator +
b.numerator * a.denominator,
a.denominator * b.denominator)
__add__, __radd__ = _operator_fallbacks(_add, operator.add)
def _sub(a, b):
"""a - b"""
return Fraction(a.numerator * b.denominator -
b.numerator * a.denominator,
a.denominator * b.denominator)
__sub__, __rsub__ = _operator_fallbacks(_sub, operator.sub)
def _mul(a, b):
"""a * b"""
return Fraction(a.numerator * b.numerator, a.denominator * b.denominator)
__mul__, __rmul__ = _operator_fallbacks(_mul, operator.mul)
def _div(a, b):
"""a / b"""
return Fraction(a.numerator * b.denominator,
a.denominator * b.numerator)
__truediv__, __rtruediv__ = _operator_fallbacks(_div, operator.truediv)
def __floordiv__(a, b):
"""a // b"""
return math.floor(a / b)
def __rfloordiv__(b, a):
"""a // b"""
return math.floor(a / b)
def __mod__(a, b):
"""a % b"""
div = a // b
return a - b * div
def __rmod__(b, a):
"""a % b"""
div = a // b
return a - b * div
def __pow__(a, b):
"""a ** b
If b is not an integer, the result will be a float or complex
since roots are generally irrational. If b is an integer, the
result will be rational.
"""
if isinstance(b, numbers.Rational):
if b.denominator == 1:
power = b.numerator
if power >= 0:
return Fraction(a._numerator ** power,
a._denominator ** power)
else:
return Fraction(a._denominator ** -power,
a._numerator ** -power)
else:
# A fractional power will generally produce an
# irrational number.
return float(a) ** float(b)
else:
return float(a) ** b
def __rpow__(b, a):
"""a ** b"""
if b._denominator == 1 and b._numerator >= 0:
# If a is an int, keep it that way if possible.
return a ** b._numerator
if isinstance(a, numbers.Rational):
return Fraction(a.numerator, a.denominator) ** b
if b._denominator == 1:
return a ** b._numerator
return a ** float(b)
def __pos__(a):
"""+a: Coerces a subclass instance to Fraction"""
return Fraction(a._numerator, a._denominator)
def __neg__(a):
"""-a"""
return Fraction(-a._numerator, a._denominator)
def __abs__(a):
"""abs(a)"""
return Fraction(abs(a._numerator), a._denominator)
def __trunc__(a):
"""trunc(a)"""
if a._numerator < 0:
return -(-a._numerator // a._denominator)
else:
return a._numerator // a._denominator
def __floor__(a):
"""Will be math.floor(a) in 3.0."""
return a.numerator // a.denominator
def __ceil__(a):
"""Will be math.ceil(a) in 3.0."""
# The negations cleverly convince floordiv to return the ceiling.
return -(-a.numerator // a.denominator)
def __round__(self, ndigits=None):
"""Will be round(self, ndigits) in 3.0.
Rounds half toward even.
"""
if ndigits is None:
floor, remainder = divmod(self.numerator, self.denominator)
if remainder * 2 < self.denominator:
return floor
elif remainder * 2 > self.denominator:
return floor + 1
# Deal with the half case:
elif floor % 2 == 0:
return floor
else:
return floor + 1
shift = 10**abs(ndigits)
# See _operator_fallbacks.forward to check that the results of
# these operations will always be Fraction and therefore have
# round().
if ndigits > 0:
return Fraction(round(self * shift), shift)
else:
return Fraction(round(self / shift) * shift)
def __hash__(self):
"""hash(self)"""
# XXX since this method is expensive, consider caching the result
# In order to make sure that the hash of a Fraction agrees
# with the hash of a numerically equal integer, float or
# Decimal instance, we follow the rules for numeric hashes
# outlined in the documentation. (See library docs, 'Built-in
# Types').
# dinv is the inverse of self._denominator modulo the prime
# _PyHASH_MODULUS, or 0 if self._denominator is divisible by
# _PyHASH_MODULUS.
dinv = pow(self._denominator, _PyHASH_MODULUS - 2, _PyHASH_MODULUS)
if not dinv:
hash_ = _PyHASH_INF
else:
hash_ = abs(self._numerator) * dinv % _PyHASH_MODULUS
result = hash_ if self >= 0 else -hash_
return -2 if result == -1 else result
def __eq__(a, b):
"""a == b"""
if isinstance(b, numbers.Rational):
return (a._numerator == b.numerator and
a._denominator == b.denominator)
if isinstance(b, numbers.Complex) and b.imag == 0:
b = b.real
if isinstance(b, float):
if math.isnan(b) or math.isinf(b):
# comparisons with an infinity or nan should behave in
# the same way for any finite a, so treat a as zero.
return 0.0 == b
else:
return a == a.from_float(b)
else:
# Since a doesn't know how to compare with b, let's give b
# a chance to compare itself with a.
return NotImplemented
def _richcmp(self, other, op):
"""Helper for comparison operators, for internal use only.
Implement comparison between a Rational instance `self`, and
either another Rational instance or a float `other`. If
`other` is not a Rational instance or a float, return
NotImplemented. `op` should be one of the six standard
comparison operators.
"""
# convert other to a Rational instance where reasonable.
if isinstance(other, numbers.Rational):
return op(self._numerator * other.denominator,
self._denominator * other.numerator)
if isinstance(other, float):
if math.isnan(other) or math.isinf(other):
return op(0.0, other)
else:
return op(self, self.from_float(other))
else:
return NotImplemented
def __lt__(a, b):
"""a < b"""
return a._richcmp(b, operator.lt)
def __gt__(a, b):
"""a > b"""
return a._richcmp(b, operator.gt)
def __le__(a, b):
"""a <= b"""
return a._richcmp(b, operator.le)
def __ge__(a, b):
"""a >= b"""
return a._richcmp(b, operator.ge)
def __bool__(a):
"""a != 0"""
return a._numerator != 0
# support for pickling, copy, and deepcopy
def __reduce__(self):
return (self.__class__, (str(self),))
def __copy__(self):
if type(self) == Fraction:
return self # I'm immutable; therefore I am my own clone
return self.__class__(self._numerator, self._denominator)
def __deepcopy__(self, memo):
if type(self) == Fraction:
return self # My components are also immutable
return self.__class__(self._numerator, self._denominator)
| agpl-3.0 |
AlexanderSavelyev/rdkit | Code/GraphMol/SLNParse/Wrap/testSLN.py | 6 | 2617 | # $Id$
#
# Copyright (c) 2008, Novartis Institutes for BioMedical Research Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of Novartis Institutes for BioMedical Research Inc.
# nor the names of its contributors may be used to endorse or promote
# products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Created by Greg Landrum, September 2006
#
from rdkit import Chem
from rdkit.Chem import rdSLNParse
from rdkit import Geometry
from rdkit import RDConfig
import unittest
import os,sys
class TestCase(unittest.TestCase) :
def setUp(self):
self.dataDir = os.path.join(RDConfig.RDBaseDir,'Code','GraphMol','SLNParse','testData')
def test1Basics(self):
m1 = rdSLNParse.MolFromSLN('CH3CH3')
self.failUnless(m1)
self.failUnless(m1.GetNumAtoms()==2)
m1 = rdSLNParse.MolFromSLN('C[1]H:CH:CH:CH:CH:CH:@1')
self.failUnless(m1)
self.failUnless(m1.GetNumAtoms()==6)
def test2Queries(self):
patt = rdSLNParse.MolFromQuerySLN('C[HC=2]~O')
self.failUnless(patt)
self.failUnless(patt.GetNumAtoms()==2)
m=Chem.MolFromSmiles('COCC=O')
self.failUnless(m.HasSubstructMatch(patt))
ms = m.GetSubstructMatches(patt)
self.failUnless(len(ms)==1)
if __name__ == '__main__':
unittest.main()
| bsd-3-clause |
ehashman/oh-mainline | vendor/packages/gdata/samples/docs/samplerunner.py | 39 | 1484 | #!/usr/bin/python
#
# Copyright 2009 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Sample running boilerplate."""
__author__ = 'afshar@google.com (Ali Afshar)'
def Run(source_file):
"""Load a source file and run a sample from it."""
source = open(source_file).read()
global_dict = {'__file__': source_file}
exec source in global_dict
samples = [global_dict[k] for k in global_dict if k.endswith('Sample')]
lines = source.splitlines()
for i, sample in enumerate(samples):
print str(i).rjust(2), sample.__name__, '-', sample.__doc__
try:
i = int(raw_input('Select sample: ').strip())
sample = samples[i]
print '-' * 80
print 'def', '%s():' % sample.__name__
# print each line until a blank one (or eof).
for line in lines[sample.func_code.co_firstlineno:]:
if not line:
break
print line
print '-' * 80
sample()
except (ValueError, IndexError):
print 'Bad selection.'
| agpl-3.0 |
Arcanemagus/plexpy | lib/html5lib/treewalkers/etree.py | 658 | 4613 | from __future__ import absolute_import, division, unicode_literals
try:
from collections import OrderedDict
except ImportError:
try:
from ordereddict import OrderedDict
except ImportError:
OrderedDict = dict
import gettext
_ = gettext.gettext
import re
from six import text_type
from . import _base
from ..utils import moduleFactoryFactory
tag_regexp = re.compile("{([^}]*)}(.*)")
def getETreeBuilder(ElementTreeImplementation):
ElementTree = ElementTreeImplementation
ElementTreeCommentType = ElementTree.Comment("asd").tag
class TreeWalker(_base.NonRecursiveTreeWalker):
"""Given the particular ElementTree representation, this implementation,
to avoid using recursion, returns "nodes" as tuples with the following
content:
1. The current element
2. The index of the element relative to its parent
3. A stack of ancestor elements
4. A flag "text", "tail" or None to indicate if the current node is a
text node; either the text or tail of the current element (1)
"""
def getNodeDetails(self, node):
if isinstance(node, tuple): # It might be the root Element
elt, key, parents, flag = node
if flag in ("text", "tail"):
return _base.TEXT, getattr(elt, flag)
else:
node = elt
if not(hasattr(node, "tag")):
node = node.getroot()
if node.tag in ("DOCUMENT_ROOT", "DOCUMENT_FRAGMENT"):
return (_base.DOCUMENT,)
elif node.tag == "<!DOCTYPE>":
return (_base.DOCTYPE, node.text,
node.get("publicId"), node.get("systemId"))
elif node.tag == ElementTreeCommentType:
return _base.COMMENT, node.text
else:
assert type(node.tag) == text_type, type(node.tag)
# This is assumed to be an ordinary element
match = tag_regexp.match(node.tag)
if match:
namespace, tag = match.groups()
else:
namespace = None
tag = node.tag
attrs = OrderedDict()
for name, value in list(node.attrib.items()):
match = tag_regexp.match(name)
if match:
attrs[(match.group(1), match.group(2))] = value
else:
attrs[(None, name)] = value
return (_base.ELEMENT, namespace, tag,
attrs, len(node) or node.text)
def getFirstChild(self, node):
if isinstance(node, tuple):
element, key, parents, flag = node
else:
element, key, parents, flag = node, None, [], None
if flag in ("text", "tail"):
return None
else:
if element.text:
return element, key, parents, "text"
elif len(element):
parents.append(element)
return element[0], 0, parents, None
else:
return None
def getNextSibling(self, node):
if isinstance(node, tuple):
element, key, parents, flag = node
else:
return None
if flag == "text":
if len(element):
parents.append(element)
return element[0], 0, parents, None
else:
return None
else:
if element.tail and flag != "tail":
return element, key, parents, "tail"
elif key < len(parents[-1]) - 1:
return parents[-1][key + 1], key + 1, parents, None
else:
return None
def getParentNode(self, node):
if isinstance(node, tuple):
element, key, parents, flag = node
else:
return None
if flag == "text":
if not parents:
return element
else:
return element, key, parents, None
else:
parent = parents.pop()
if not parents:
return parent
else:
return parent, list(parents[-1]).index(parent), parents, None
return locals()
getETreeModule = moduleFactoryFactory(getETreeBuilder)
| gpl-3.0 |
ostrokach/biskit | Biskit/difflib_old.py | 1 | 40386 | #! /usr/bin/env python2.2
"""
Older version of difflib. Here due to compability problems.
"""
from __future__ import generators
"""
Module difflib -- helpers for computing deltas between objects.
Function get_close_matches(word, possibilities, n=3, cutoff=0.6):
Use SequenceMatcher to return list of the best "good enough" matches.
Function ndiff(a, b):
Return a delta: the difference between `a` and `b` (lists of strings).
Function restore(delta, which):
Return one of the two sequences that generated an ndiff delta.
Class SequenceMatcher:
A flexible class for comparing pairs of sequences of any type.
Class Differ:
For producing human-readable deltas from sequences of lines of text.
"""
__all__ = ['get_close_matches', 'ndiff', 'restore', 'SequenceMatcher',
'Differ', 'IS_CHARACTER_JUNK', 'IS_LINE_JUNK']
class SequenceMatcher:
"""
SequenceMatcher is a flexible class for comparing pairs of sequences of
any type, so long as the sequence elements are hashable. The basic
algorithm predates, and is a little fancier than, an algorithm
published in the late 1980's by Ratcliff and Obershelp under the
hyperbolic name "gestalt pattern matching". The basic idea is to find
the longest contiguous matching subsequence that contains no "junk"
elements (R-O doesn't address junk). The same idea is then applied
recursively to the pieces of the sequences to the left and to the right
of the matching subsequence. This does not yield minimal edit
sequences, but does tend to yield matches that "look right" to people.
SequenceMatcher tries to compute a "human-friendly diff" between two
sequences. Unlike e.g. UNIX(tm) diff, the fundamental notion is the
longest *contiguous* & junk-free matching subsequence. That's what
catches peoples' eyes. The Windows(tm) windiff has another interesting
notion, pairing up elements that appear uniquely in each sequence.
That, and the method here, appear to yield more intuitive difference
reports than does diff. This method appears to be the least vulnerable
to synching up on blocks of "junk lines", though (like blank lines in
ordinary text files, or maybe "<P>" lines in HTML files). That may be
because this is the only method of the 3 that has a *concept* of
"junk" <wink>.
Example, comparing two strings, and considering blanks to be "junk":
>>> s = SequenceMatcher(lambda x: x == " ",
... "private Thread currentThread;",
... "private volatile Thread currentThread;")
>>>
.ratio() returns a float in [0, 1], measuring the "similarity" of the
sequences. As a rule of thumb, a .ratio() value over 0.6 means the
sequences are close matches:
>>> print round(s.ratio(), 3)
... 0.866
>>>
If you're only interested in where the sequences match,
.get_matching_blocks() is handy:
>>> for block in s.get_matching_blocks():
>>> print "a[%d] and b[%d] match for %d elements" % block
a[0] and b[0] match for 8 elements
a[8] and b[17] match for 6 elements
a[14] and b[23] match for 15 elements
a[29] and b[38] match for 0 elements
>>>
Note that the last tuple returned by .get_matching_blocks() is always a
dummy, (len(a), len(b), 0), and this is the only case in which the last
tuple element (number of elements matched) is 0.
If you want to know how to change the first sequence into the second,
use .get_opcodes():
>>> for opcode in s.get_opcodes():
... print "%6s a[%d:%d] b[%d:%d]" % opcode
equal a[0:8] b[0:8]
insert a[8:8] b[8:17]
equal a[8:14] b[17:23]
equal a[14:29] b[23:38]
See the Differ class for a fancy human-friendly file differencer, which
uses SequenceMatcher both to compare sequences of lines, and to compare
sequences of characters within similar (near-matching) lines.
See also function get_close_matches() in this module, which shows how
simple code building on SequenceMatcher can be used to do useful work.
Timing: Basic R-O is cubic time worst case and quadratic time expected
case. SequenceMatcher is quadratic time for the worst case and has
expected-case behavior dependent in a complicated way on how many
elements the sequences have in common; best case time is linear.
Methods::
__init__(isjunk=None, a='', b='')
Construct a SequenceMatcher.
set_seqs(a, b)
Set the two sequences to be compared.
set_seq1(a)
Set the first sequence to be compared.
set_seq2(b)
Set the second sequence to be compared.
find_longest_match(alo, ahi, blo, bhi)
Find longest matching block in a[alo:ahi] and b[blo:bhi].
get_matching_blocks()
Return list of triples describing matching subsequences.
get_opcodes()
Return list of 5-tuples describing how to turn a into b.
ratio()
Return a measure of the sequences' similarity (float in [0,1]).
quick_ratio()
Return an upper bound on .ratio() relatively quickly.
real_quick_ratio()
Return an upper bound on ratio() very quickly.
"""
def __init__(self, isjunk=None, a='', b=''):
"""Construct a SequenceMatcher.
Optional arg isjunk is None (the default), or a one-argument
function that takes a sequence element and returns true iff the
element is junk. None is equivalent to passing "lambda x: 0", i.e.
no elements are considered to be junk. For example, pass
lambda x: x in " \\t"
if you're comparing lines as sequences of characters, and don't
want to synch up on blanks or hard tabs.
Optional arg a is the first of two sequences to be compared. By
default, an empty string. The elements of a must be hashable. See
also .set_seqs() and .set_seq1().
Optional arg b is the second of two sequences to be compared. By
default, an empty string. The elements of b must be hashable. See
also .set_seqs() and .set_seq2().
"""
# Members:
# a
# first sequence
# b
# second sequence; differences are computed as "what do
# we need to do to 'a' to change it into 'b'?"
# b2j
# for x in b, b2j[x] is a list of the indices (into b)
# at which x appears; junk elements do not appear
# b2jhas
# b2j.has_key
# fullbcount
# for x in b, fullbcount[x] == the number of times x
# appears in b; only materialized if really needed (used
# only for computing quick_ratio())
# matching_blocks
# a list of (i, j, k) triples, where a[i:i+k] == b[j:j+k];
# ascending & non-overlapping in i and in j; terminated by
# a dummy (len(a), len(b), 0) sentinel
# opcodes
# a list of (tag, i1, i2, j1, j2) tuples, where tag is
# one of
# 'replace' a[i1:i2] should be replaced by b[j1:j2]
# 'delete' a[i1:i2] should be deleted
# 'insert' b[j1:j2] should be inserted
# 'equal' a[i1:i2] == b[j1:j2]
# isjunk
# a user-supplied function taking a sequence element and
# returning true iff the element is "junk" -- this has
# subtle but helpful effects on the algorithm, which I'll
# get around to writing up someday <0.9 wink>.
# DON'T USE! Only __chain_b uses this. Use isbjunk.
# isbjunk
# for x in b, isbjunk(x) == isjunk(x) but much faster;
# it's really the has_key method of a hidden dict.
# DOES NOT WORK for x in a!
self.isjunk = isjunk
self.a = self.b = None
self.set_seqs(a, b)
def set_seqs(self, a, b):
"""Set the two sequences to be compared.
>>> s = SequenceMatcher()
>>> s.set_seqs("abcd", "bcde")
>>> s.ratio()
0.75
"""
self.set_seq1(a)
self.set_seq2(b)
def set_seq1(self, a):
"""Set the first sequence to be compared.
The second sequence to be compared is not changed.
>>> s = SequenceMatcher(None, "abcd", "bcde")
>>> s.ratio()
0.75
>>> s.set_seq1("bcde")
>>> s.ratio()
1.0
>>>
SequenceMatcher computes and caches detailed information about the
second sequence, so if you want to compare one sequence S against
many sequences, use .set_seq2(S) once and call .set_seq1(x)
repeatedly for each of the other sequences.
See also set_seqs() and set_seq2().
"""
if a is self.a:
return
self.a = a
self.matching_blocks = self.opcodes = None
def set_seq2(self, b):
"""Set the second sequence to be compared.
The first sequence to be compared is not changed.
>>> s = SequenceMatcher(None, "abcd", "bcde")
>>> s.ratio()
0.75
>>> s.set_seq2("abcd")
>>> s.ratio()
1.0
>>>
SequenceMatcher computes and caches detailed information about the
second sequence, so if you want to compare one sequence S against
many sequences, use .set_seq2(S) once and call .set_seq1(x)
repeatedly for each of the other sequences.
See also set_seqs() and set_seq1().
"""
if b is self.b:
return
self.b = b
self.matching_blocks = self.opcodes = None
self.fullbcount = None
self.__chain_b()
# For each element x in b, set b2j[x] to a list of the indices in
# b where x appears; the indices are in increasing order; note that
# the number of times x appears in b is len(b2j[x]) ...
# when self.isjunk is defined, junk elements don't show up in this
# map at all, which stops the central find_longest_match method
# from starting any matching block at a junk element ...
# also creates the fast isbjunk function ...
# note that this is only called when b changes; so for cross-product
# kinds of matches, it's best to call set_seq2 once, then set_seq1
# repeatedly
def __chain_b(self):
# Because isjunk is a user-defined (not C) function, and we test
# for junk a LOT, it's important to minimize the number of calls.
# Before the tricks described here, __chain_b was by far the most
# time-consuming routine in the whole module! If anyone sees
# Jim Roskind, thank him again for profile.py -- I never would
# have guessed that.
# The first trick is to build b2j ignoring the possibility
# of junk. I.e., we don't call isjunk at all yet. Throwing
# out the junk later is much cheaper than building b2j "right"
# from the start.
b = self.b
self.b2j = b2j = {}
self.b2jhas = b2jhas = b2j.has_key
for i in xrange(len(b)):
elt = b[i]
if b2jhas(elt):
b2j[elt].append(i)
else:
b2j[elt] = [i]
# Now b2j.keys() contains elements uniquely, and especially when
# the sequence is a string, that's usually a good deal smaller
# than len(string). The difference is the number of isjunk calls
# saved.
isjunk, junkdict = self.isjunk, {}
if isjunk:
for elt in b2j.keys():
if isjunk(elt):
junkdict[elt] = 1 # value irrelevant; it's a set
del b2j[elt]
# Now for x in b, isjunk(x) == junkdict.has_key(x), but the
# latter is much faster. Note too that while there may be a
# lot of junk in the sequence, the number of *unique* junk
# elements is probably small. So the memory burden of keeping
# this dict alive is likely trivial compared to the size of b2j.
self.isbjunk = junkdict.has_key
def find_longest_match(self, alo, ahi, blo, bhi):
"""
Find longest matching block in a[alo:ahi] and b[blo:bhi].
If isjunk is not defined::
Return (i,j,k) such that a[i:i+k] is equal to b[j:j+k], where
alo <= i <= i+k <= ahi
blo <= j <= j+k <= bhi
and for all (i',j',k') meeting those conditions,
k >= k'
i <= i'
and if i == i', j <= j'
In other words, of all maximal matching blocks, return one that
starts earliest in a, and of all those maximal matching blocks that
start earliest in a, return the one that starts earliest in b.
>>> s = SequenceMatcher(None, " abcd", "abcd abcd")
>>> s.find_longest_match(0, 5, 0, 9)
(0, 4, 5)
If isjunk is defined, first the longest matching block is
determined as above, but with the additional restriction that no
junk element appears in the block. Then that block is extended as
far as possible by matching (only) junk elements on both sides. So
the resulting block never matches on junk except as identical junk
happens to be adjacent to an "interesting" match.
Here's the same example as before, but considering blanks to be
junk. That prevents " abcd" from matching the " abcd" at the tail
end of the second sequence directly. Instead only the "abcd" can
match, and matches the leftmost "abcd" in the second sequence:
>>> s = SequenceMatcher(lambda x: x==" ", " abcd", "abcd abcd")
>>> s.find_longest_match(0, 5, 0, 9)
(1, 0, 4)
If no blocks match, return (alo, blo, 0).
>>> s = SequenceMatcher(None, "ab", "c")
>>> s.find_longest_match(0, 2, 0, 1)
(0, 0, 0)
"""
# CAUTION: stripping common prefix or suffix would be incorrect.
# E.g.,
# ab
# acab
# Longest matching block is "ab", but if common prefix is
# stripped, it's "a" (tied with "b"). UNIX(tm) diff does so
# strip, so ends up claiming that ab is changed to acab by
# inserting "ca" in the middle. That's minimal but unintuitive:
# "it's obvious" that someone inserted "ac" at the front.
# Windiff ends up at the same place as diff, but by pairing up
# the unique 'b's and then matching the first two 'a's.
a, b, b2j, isbjunk = self.a, self.b, self.b2j, self.isbjunk
besti, bestj, bestsize = alo, blo, 0
# find longest junk-free match
# during an iteration of the loop, j2len[j] = length of longest
# junk-free match ending with a[i-1] and b[j]
j2len = {}
nothing = []
for i in xrange(alo, ahi):
# look at all instances of a[i] in b; note that because
# b2j has no junk keys, the loop is skipped if a[i] is junk
j2lenget = j2len.get
newj2len = {}
for j in b2j.get(a[i], nothing):
# a[i] matches b[j]
if j < blo:
continue
if j >= bhi:
break
k = newj2len[j] = j2lenget(j-1, 0) + 1
if k > bestsize:
besti, bestj, bestsize = i-k+1, j-k+1, k
j2len = newj2len
# Now that we have a wholly interesting match (albeit possibly
# empty!), we may as well suck up the matching junk on each
# side of it too. Can't think of a good reason not to, and it
# saves post-processing the (possibly considerable) expense of
# figuring out what to do with it. In the case of an empty
# interesting match, this is clearly the right thing to do,
# because no other kind of match is possible in the regions.
while besti > alo and bestj > blo and \
isbjunk(b[bestj-1]) and \
a[besti-1] == b[bestj-1]:
besti, bestj, bestsize = besti-1, bestj-1, bestsize+1
while besti+bestsize < ahi and bestj+bestsize < bhi and \
isbjunk(b[bestj+bestsize]) and \
a[besti+bestsize] == b[bestj+bestsize]:
bestsize = bestsize + 1
return besti, bestj, bestsize
def get_matching_blocks(self):
"""Return list of triples describing matching subsequences.
Each triple is of the form (i, j, n), and means that
a[i:i+n] == b[j:j+n]. The triples are monotonically increasing in
i and in j.
The last triple is a dummy, (len(a), len(b), 0), and is the only
triple with n==0.
>>> s = SequenceMatcher(None, "abxcd", "abcd")
>>> s.get_matching_blocks()
[(0, 0, 2), (3, 2, 2), (5, 4, 0)]
>>>
"""
if self.matching_blocks is not None:
return self.matching_blocks
self.matching_blocks = []
la, lb = len(self.a), len(self.b)
self.__helper(0, la, 0, lb, self.matching_blocks)
self.matching_blocks.append( (la, lb, 0) )
return self.matching_blocks
# builds list of matching blocks covering a[alo:ahi] and
# b[blo:bhi], appending them in increasing order to answer
def __helper(self, alo, ahi, blo, bhi, answer):
i, j, k = x = self.find_longest_match(alo, ahi, blo, bhi)
# a[alo:i] vs b[blo:j] unknown
# a[i:i+k] same as b[j:j+k]
# a[i+k:ahi] vs b[j+k:bhi] unknown
if k:
if alo < i and blo < j:
self.__helper(alo, i, blo, j, answer)
answer.append(x)
if i+k < ahi and j+k < bhi:
self.__helper(i+k, ahi, j+k, bhi, answer)
def get_opcodes(self):
"""
Return list of 5-tuples describing how to turn a into b.
Each tuple is of the form (tag, i1, i2, j1, j2). The first tuple
has i1 == j1 == 0, and remaining tuples have i1 == the i2 from the
tuple preceding it, and likewise for j1 == the previous j2.
The tags are strings, with these meanings:
- 'replace': a[i1:i2] should be replaced by b[j1:j2]
- 'delete': a[i1:i2] should be deleted.
Note that j1==j2 in this case.
- 'insert': b[j1:j2] should be inserted at a[i1:i1].
Note that i1==i2 in this case.
- 'equal': a[i1:i2] == b[j1:j2]
>>> a = "qabxcd"
>>> b = "abycdf"
>>> s = SequenceMatcher(None, a, b)
>>> for tag, i1, i2, j1, j2 in s.get_opcodes():
... print ("%7s a[%d:%d] (%s) b[%d:%d] (%s)" %
... (tag, i1, i2, a[i1:i2], j1, j2, b[j1:j2]))
delete a[0:1] (q) b[0:0] ()
equal a[1:3] (ab) b[0:2] (ab)
replace a[3:4] (x) b[2:3] (y)
equal a[4:6] (cd) b[3:5] (cd)
insert a[6:6] () b[5:6] (f)
"""
if self.opcodes is not None:
return self.opcodes
i = j = 0
self.opcodes = answer = []
for ai, bj, size in self.get_matching_blocks():
# invariant: we've pumped out correct diffs to change
# a[:i] into b[:j], and the next matching block is
# a[ai:ai+size] == b[bj:bj+size]. So we need to pump
# out a diff to change a[i:ai] into b[j:bj], pump out
# the matching block, and move (i,j) beyond the match
tag = ''
if i < ai and j < bj:
tag = 'replace'
elif i < ai:
tag = 'delete'
elif j < bj:
tag = 'insert'
if tag:
answer.append( (tag, i, ai, j, bj) )
i, j = ai+size, bj+size
# the list of matching blocks is terminated by a
# sentinel with size 0
if size:
answer.append( ('equal', ai, i, bj, j) )
return answer
def ratio(self):
"""Return a measure of the sequences' similarity (float in [0,1]).
Where T is the total number of elements in both sequences, and
M is the number of matches, this is 2,0*M / T.
Note that this is 1 if the sequences are identical, and 0 if
they have nothing in common.
.ratio() is expensive to compute if you haven't already computed
.get_matching_blocks() or .get_opcodes(), in which case you may
want to try .quick_ratio() or .real_quick_ratio() first to get an
upper bound.
>>> s = SequenceMatcher(None, "abcd", "bcde")
>>> s.ratio()
0.75
>>> s.quick_ratio()
0.75
>>> s.real_quick_ratio()
1.0
"""
matches = reduce(lambda sum, triple: sum + triple[-1],
self.get_matching_blocks(), 0)
return 2.0 * matches / (len(self.a) + len(self.b))
def quick_ratio(self):
"""Return an upper bound on ratio() relatively quickly.
This isn't defined beyond that it is an upper bound on .ratio(), and
is faster to compute.
"""
# viewing a and b as multisets, set matches to the cardinality
# of their intersection; this counts the number of matches
# without regard to order, so is clearly an upper bound
if self.fullbcount is None:
self.fullbcount = fullbcount = {}
for elt in self.b:
fullbcount[elt] = fullbcount.get(elt, 0) + 1
fullbcount = self.fullbcount
# avail[x] is the number of times x appears in 'b' less the
# number of times we've seen it in 'a' so far ... kinda
avail = {}
availhas, matches = avail.has_key, 0
for elt in self.a:
if availhas(elt):
numb = avail[elt]
else:
numb = fullbcount.get(elt, 0)
avail[elt] = numb - 1
if numb > 0:
matches = matches + 1
return 2.0 * matches / (len(self.a) + len(self.b))
def real_quick_ratio(self):
"""Return an upper bound on ratio() very quickly.
This isn't defined beyond that it is an upper bound on .ratio(), and
is faster to compute than either .ratio() or .quick_ratio().
"""
la, lb = len(self.a), len(self.b)
# can't have more matches than the number of elements in the
# shorter sequence
return 2.0 * min(la, lb) / (la + lb)
def get_close_matches(word, possibilities, n=3, cutoff=0.6):
"""Use SequenceMatcher to return list of the best "good enough" matches.
word is a sequence for which close matches are desired (typically a
string).
possibilities is a list of sequences against which to match word
(typically a list of strings).
Optional arg n (default 3) is the maximum number of close matches to
return. n must be > 0.
Optional arg cutoff (default 0.6) is a float in [0, 1]. Possibilities
that don't score at least that similar to word are ignored.
The best (no more than n) matches among the possibilities are returned
in a list, sorted by similarity score, most similar first.
>>> get_close_matches("appel", ["ape", "apple", "peach", "puppy"])
['apple', 'ape']
>>> import keyword as _keyword
>>> get_close_matches("wheel", _keyword.kwlist)
['while']
>>> get_close_matches("apple", _keyword.kwlist)
[]
>>> get_close_matches("accept", _keyword.kwlist)
['except']
"""
if not n > 0:
raise ValueError("n must be > 0: " + `n`)
if not 0.0 <= cutoff <= 1.0:
raise ValueError("cutoff must be in [0.0, 1.0]: " + `cutoff`)
result = []
s = SequenceMatcher()
s.set_seq2(word)
for x in possibilities:
s.set_seq1(x)
if s.real_quick_ratio() >= cutoff and \
s.quick_ratio() >= cutoff and \
s.ratio() >= cutoff:
result.append((s.ratio(), x))
# Sort by score.
result.sort()
# Retain only the best n.
result = result[-n:]
# Move best-scorer to head of list.
result.reverse()
# Strip scores.
return [x for score, x in result]
def _count_leading(line, ch):
"""
Return number of `ch` characters at the start of `line`.
Example:
>>> _count_leading(' abc', ' ')
3
"""
i, n = 0, len(line)
while i < n and line[i] == ch:
i += 1
return i
class Differ:
r"""
Differ is a class for comparing sequences of lines of text, and
producing human-readable differences or deltas. Differ uses
SequenceMatcher both to compare sequences of lines, and to compare
sequences of characters within similar (near-matching) lines.
Each line of a Differ delta begins with a two-letter code::
'- ' line unique to sequence 1
'+ ' line unique to sequence 2
' ' line common to both sequences
'? ' line not present in either input sequence
Lines beginning with '? ' attempt to guide the eye to intraline
differences, and were not present in either input sequence. These lines
can be confusing if the sequences contain tab characters.
Note that Differ makes no claim to produce a *minimal* diff. To the
contrary, minimal diffs are often counter-intuitive, because they synch
up anywhere possible, sometimes accidental matches 100 pages apart.
Restricting synch points to contiguous matches preserves some notion of
locality, at the occasional cost of producing a longer diff.
Example: Comparing two texts.
First we set up the texts, sequences of individual single-line strings
ending with newlines (such sequences can also be obtained from the
`readlines()` method of file-like objects):
>>> text1 = ''' 1. Beautiful is better than ugly.
... 2. Explicit is better than implicit.
... 3. Simple is better than complex.
... 4. Complex is better than complicated.
... '''.splitlines(1)
>>> len(text1)
4
>>> text1[0][-1]
'\n'
>>> text2 = ''' 1. Beautiful is better than ugly.
... 3. Simple is better than complex.
... 4. Complicated is better than complex.
... 5. Flat is better than nested.
... '''.splitlines(1)
Next we instantiate a Differ object:
>>> d = Differ()
Note that when instantiating a Differ object we may pass functions to
filter out line and character 'junk'. See Differ.__init__ for details.
Finally, we compare the two:
>>> result = list(d.compare(text1, text2))
'result' is a list of strings, so let's pretty-print it:
>>> from pprint import pprint as _pprint
>>> _pprint(result)
[' 1. Beautiful is better than ugly.\n',
'- 2. Explicit is better than implicit.\n',
'- 3. Simple is better than complex.\n',
'+ 3. Simple is better than complex.\n',
'? ++\n',
'- 4. Complex is better than complicated.\n',
'? ^ ---- ^\n',
'+ 4. Complicated is better than complex.\n',
'? ++++ ^ ^\n',
'+ 5. Flat is better than nested.\n']
As a single multi-line string it looks like this:
>>> print ''.join(result),
1. Beautiful is better than ugly.
- 2. Explicit is better than implicit.
- 3. Simple is better than complex.
+ 3. Simple is better than complex.
? ++
- 4. Complex is better than complicated.
? ^ ---- ^
+ 4. Complicated is better than complex.
? ++++ ^ ^
+ 5. Flat is better than nested.
Methods::
__init__(linejunk=None, charjunk=None)
Construct a text differencer, with optional filters.
compare(a, b)
Compare two sequences of lines; generate the resulting delta.
"""
def __init__(self, linejunk=None, charjunk=None):
"""
Construct a text differencer, with optional filters.
The two optional keyword parameters are for filter functions:
- `linejunk`: A function that should accept a single string argument,
and return true iff the string is junk. The module-level function
`IS_LINE_JUNK` may be used to filter out lines without visible
characters, except for at most one splat ('#').
- `charjunk`: A function that should accept a string of length 1. The
module-level function `IS_CHARACTER_JUNK` may be used to filter out
whitespace characters (a blank or tab; **note**: bad idea to include
newline in this!).
"""
self.linejunk = linejunk
self.charjunk = charjunk
def compare(self, a, b):
r"""
Compare two sequences of lines; generate the resulting delta.
Each sequence must contain individual single-line strings ending with
newlines. Such sequences can be obtained from the `readlines()` method
of file-like objects. The delta generated also consists of newline-
terminated strings, ready to be printed as-is via the writeline()
method of a file-like object.
Example:
>>> print ''.join(Differ().compare('one\ntwo\nthree\n'.splitlines(1),
... 'ore\ntree\nemu\n'.splitlines(1))),
- one
? ^
+ ore
? ^
- two
- three
? -
+ tree
+ emu
"""
cruncher = SequenceMatcher(self.linejunk, a, b)
for tag, alo, ahi, blo, bhi in cruncher.get_opcodes():
if tag == 'replace':
g = self._fancy_replace(a, alo, ahi, b, blo, bhi)
elif tag == 'delete':
g = self._dump('-', a, alo, ahi)
elif tag == 'insert':
g = self._dump('+', b, blo, bhi)
elif tag == 'equal':
g = self._dump(' ', a, alo, ahi)
else:
raise ValueError, 'unknown tag ' + `tag`
for line in g:
yield line
def _dump(self, tag, x, lo, hi):
"""Generate comparison results for a same-tagged range."""
for i in xrange(lo, hi):
yield '%s %s' % (tag, x[i])
def _plain_replace(self, a, alo, ahi, b, blo, bhi):
assert alo < ahi and blo < bhi
# dump the shorter block first -- reduces the burden on short-term
# memory if the blocks are of very different sizes
if bhi - blo < ahi - alo:
first = self._dump('+', b, blo, bhi)
second = self._dump('-', a, alo, ahi)
else:
first = self._dump('-', a, alo, ahi)
second = self._dump('+', b, blo, bhi)
for g in first, second:
for line in g:
yield line
def _fancy_replace(self, a, alo, ahi, b, blo, bhi):
r"""
When replacing one block of lines with another, search the blocks
for *similar* lines; the best-matching pair (if any) is used as a
synch point, and intraline difference marking is done on the
similar pair. Lots of work, but often worth it.
Example:
>>> d = Differ()
>>> d._fancy_replace(['abcDefghiJkl\n'], 0, 1, ['abcdefGhijkl\n'], 0, 1)
>>> print ''.join(d.results),
- abcDefghiJkl
? ^ ^ ^
+ abcdefGhijkl
? ^ ^ ^
"""
# don't synch up unless the lines have a similarity score of at
# least cutoff; best_ratio tracks the best score seen so far
best_ratio, cutoff = 0.74, 0.75
cruncher = SequenceMatcher(self.charjunk)
eqi, eqj = None, None # 1st indices of equal lines (if any)
# search for the pair that matches best without being identical
# (identical lines must be junk lines, & we don't want to synch up
# on junk -- unless we have to)
for j in xrange(blo, bhi):
bj = b[j]
cruncher.set_seq2(bj)
for i in xrange(alo, ahi):
ai = a[i]
if ai == bj:
if eqi is None:
eqi, eqj = i, j
continue
cruncher.set_seq1(ai)
# computing similarity is expensive, so use the quick
# upper bounds first -- have seen this speed up messy
# compares by a factor of 3.
# note that ratio() is only expensive to compute the first
# time it's called on a sequence pair; the expensive part
# of the computation is cached by cruncher
if cruncher.real_quick_ratio() > best_ratio and \
cruncher.quick_ratio() > best_ratio and \
cruncher.ratio() > best_ratio:
best_ratio, best_i, best_j = cruncher.ratio(), i, j
if best_ratio < cutoff:
# no non-identical "pretty close" pair
if eqi is None:
# no identical pair either -- treat it as a straight replace
for line in self._plain_replace(a, alo, ahi, b, blo, bhi):
yield line
return
# no close pair, but an identical pair -- synch up on that
best_i, best_j, best_ratio = eqi, eqj, 1.0
else:
# there's a close pair, so forget the identical pair (if any)
eqi = None
# a[best_i] very similar to b[best_j]; eqi is None iff they're not
# identical
# pump out diffs from before the synch point
for line in self._fancy_helper(a, alo, best_i, b, blo, best_j):
yield line
# do intraline marking on the synch pair
aelt, belt = a[best_i], b[best_j]
if eqi is None:
# pump out a '-', '?', '+', '?' quad for the synched lines
atags = btags = ""
cruncher.set_seqs(aelt, belt)
for tag, ai1, ai2, bj1, bj2 in cruncher.get_opcodes():
la, lb = ai2 - ai1, bj2 - bj1
if tag == 'replace':
atags += '^' * la
btags += '^' * lb
elif tag == 'delete':
atags += '-' * la
elif tag == 'insert':
btags += '+' * lb
elif tag == 'equal':
atags += ' ' * la
btags += ' ' * lb
else:
raise ValueError, 'unknown tag ' + `tag`
for line in self._qformat(aelt, belt, atags, btags):
yield line
else:
# the synch pair is identical
yield ' ' + aelt
# pump out diffs from after the synch point
for line in self._fancy_helper(a, best_i+1, ahi, b, best_j+1, bhi):
yield line
def _fancy_helper(self, a, alo, ahi, b, blo, bhi):
g = []
if alo < ahi:
if blo < bhi:
g = self._fancy_replace(a, alo, ahi, b, blo, bhi)
else:
g = self._dump('-', a, alo, ahi)
elif blo < bhi:
g = self._dump('+', b, blo, bhi)
for line in g:
yield line
def _qformat(self, aline, bline, atags, btags):
r"""
Format "?" output and deal with leading tabs.
Example:
>>> d = Differ()
>>> d._qformat('\tabcDefghiJkl\n', '\t\tabcdefGhijkl\n',
... ' ^ ^ ^ ', '+ ^ ^ ^ ')
>>> for line in d.results: print repr(line)
...
'- \tabcDefghiJkl\n'
'? \t ^ ^ ^\n'
'+ \t\tabcdefGhijkl\n'
'? \t ^ ^ ^\n'
"""
# Can hurt, but will probably help most of the time.
common = min(_count_leading(aline, "\t"),
_count_leading(bline, "\t"))
common = min(common, _count_leading(atags[:common], " "))
atags = atags[common:].rstrip()
btags = btags[common:].rstrip()
yield "- " + aline
if atags:
yield "? %s%s\n" % ("\t" * common, atags)
yield "+ " + bline
if btags:
yield "? %s%s\n" % ("\t" * common, btags)
# With respect to junk, an earlier version of ndiff simply refused to
# *start* a match with a junk element. The result was cases like this:
# before: private Thread currentThread;
# after: private volatile Thread currentThread;
# If you consider whitespace to be junk, the longest contiguous match
# not starting with junk is "e Thread currentThread". So ndiff reported
# that "e volatil" was inserted between the 't' and the 'e' in "private".
# While an accurate view, to people that's absurd. The current version
# looks for matching blocks that are entirely junk-free, then extends the
# longest one of those as far as possible but only with matching junk.
# So now "currentThread" is matched, then extended to suck up the
# preceding blank; then "private" is matched, and extended to suck up the
# following blank; then "Thread" is matched; and finally ndiff reports
# that "volatile " was inserted before "Thread". The only quibble
# remaining is that perhaps it was really the case that " volatile"
# was inserted after "private". I can live with that <wink>.
import re
def IS_LINE_JUNK(line, pat=re.compile(r"\s*#?\s*$").match):
r"""
Return 1 for ignorable line: iff `line` is blank or contains a single '#'.
Examples:
>>> IS_LINE_JUNK('\n')
1
>>> IS_LINE_JUNK(' # \n')
1
>>> IS_LINE_JUNK('hello\n')
0
"""
return pat(line) is not None
def IS_CHARACTER_JUNK(ch, ws=" \t"):
r"""
Return 1 for ignorable character: iff `ch` is a space or tab.
Examples:
>>> IS_CHARACTER_JUNK(' ')
1
>>> IS_CHARACTER_JUNK('\t')
1
>>> IS_CHARACTER_JUNK('\n')
0
>>> IS_CHARACTER_JUNK('x')
0
"""
return ch in ws
del re
def ndiff(a, b, linejunk=IS_LINE_JUNK, charjunk=IS_CHARACTER_JUNK):
r"""
Compare `a` and `b` (lists of strings); return a `Differ`-style delta.
Optional keyword parameters `linejunk` and `charjunk` are for filter
functions (or None):
- linejunk: A function that should accept a single string argument, and
return true iff the string is junk. The default is module-level function
IS_LINE_JUNK, which filters out lines without visible characters, except
for at most one splat ('#').
- charjunk: A function that should accept a string of length 1. The
default is module-level function IS_CHARACTER_JUNK, which filters out
whitespace characters (a blank or tab; note: bad idea to include newline
in this!).
Tools/scripts/ndiff.py is a command-line front-end to this function.
Example:
>>> diff = ndiff('one\ntwo\nthree\n'.splitlines(1),
... 'ore\ntree\nemu\n'.splitlines(1))
>>> print ''.join(diff),
- one
? ^
+ ore
? ^
- two
- three
? -
+ tree
+ emu
"""
return Differ(linejunk, charjunk).compare(a, b)
def restore(delta, which):
r"""
Generate one of the two sequences that generated a delta.
Given a `delta` produced by `Differ.compare()` or `ndiff()`, extract
lines originating from file 1 or 2 (parameter `which`), stripping off line
prefixes.
Examples:
>>> diff = ndiff('one\ntwo\nthree\n'.splitlines(1),
... 'ore\ntree\nemu\n'.splitlines(1))
>>> diff = list(diff)
>>> print ''.join(restore(diff, 1)),
one
two
three
>>> print ''.join(restore(diff, 2)),
ore
tree
emu
"""
try:
tag = {1: "- ", 2: "+ "}[int(which)]
except KeyError:
raise ValueError, ('unknown delta choice (must be 1 or 2): %r'
% which)
prefixes = (" ", tag)
for line in delta:
if line[:2] in prefixes:
yield line[2:]
################
## empty test ##
import Biskit.test as BT
class Test(BT.BiskitTest):
"""Mock test"""
pass
def _test():
import doctest, difflib
return doctest.testmod(difflib)
if __name__ == "__main__":
_test()
| gpl-3.0 |
iut-ibk/DynaMind-ToolBox | DynaMind-Performance-Assessment/3rdparty/CD3Waterbalance/Module/helped to develop other modules/Flow_Routing.py | 2 | 4712 | # -*- coding: utf-8 -*-
"""
Created on Thu Oct 02 08:41:08 2014
@author: Acer
"""
import sys
import pycd3
import math
class NodeFactory(pycd3.INodeFactory):
def __init__(self, node):
pycd3.INodeFactory.__init__(self)
self.node = node
print "NodeFactory.__init__"
def getNodeName(self):
print "NodeFactory.getName"
return self.node.__name__
def createNode(self):
print "NodeFactory.createNode"
n = self.node()
n.__disown__()
print "NodeFactory.disowned"
return n
def getSource(self):
print "NodeFactory.getSource"
return "Practice.py"
class Muskingum(pycd3.Node):
def __init__(self):
pycd3.Node.__init__(self)
self.rain = pycd3.Flow()
self.runoff = pycd3.Flow()
self.inflow = pycd3.Flow()
#dir (self.inf)
print "init node"
self.addInPort("rain", self.rain)
self.addInPort("inflow", self.inflow)
self.addOutPort("runoff", self.runoff)
#Catchment area + fraction info of pervious and impervious parts
self.area_width = pycd3.Double(10)
self.addParameter("area_width [m]", self.area_width)
self.area_length = pycd3.Double(100)
self.addParameter("area_length [m]", self.area_length)
self.perv_area = pycd3.Double(0.4)
self.addParameter("perv_area [-]", self.perv_area)
self.imp_area_stormwater = pycd3.Double(0.4)
self.addParameter("imp_area_stormwater [-]", self.imp_area_stormwater)
self.imp_area_raintank = pycd3.Double(1)
self.addParameter("imp_area_raintank [-]", self.imp_area_raintank)
#number of subareas for flowconcentration
self.amount_subareas = pycd3.Double(1)
self.addParameter("amount_subareas [-]", self.amount_subareas)
#Muskingum parameters K flowtime for entire catchment
self.muskingum_veloc = pycd3.Double(0.4)
self.addParameter("muskingum_vel [m/s]", self.muskingum_veloc)
self.muskingum_X = pycd3.Double(0.07)
self.addParameter("muskingum_X [-]", self.muskingum_X)
def init(self, start, stop, dt):
print start
print stop
print dt
#calculation catchment area
self.area_property = self.area_length * self.area_width
#calculating the K values for a single subreach
self.muskingum_K_single_subreach = (self.area_length/self.amount_subareas)/self.muskingum_veloc
#calculating the Muskingum coefficients
self.C_x=(dt/2-self.muskingum_K_single_subreach*self.muskingum_X)/(dt/2+self.muskingum_K_single_subreach*(1-self.muskingum_X))
self.C_y=(1/(dt/2+self.muskingum_K_single_subreach*(1-self.muskingum_X)))
#preparing the storage coefficients for the stored Volume in each subreach
self.Q_i_storage_1 = []
self.Q_i_storage_2 = []
for i in range(self.amount_subareas):
self.Q_i_storage_1.append(0)
self.Q_i_storage_2.append(0)
return True
def f(self, current, dt):
#dividing are in 'amout_subareas' parts (same size)
self.subarea_size = self.area_property*self.imp_area_raintank/self.amount_subareas
#preparing the flow array
self.Q_i = []
for i in range(self.amount_subareas):
self.Q_i.append(0)
#calculating the flow in for each subreach
if i==0:
self.Q_i[i]=(self.inflow[0]*1000+self.rain[0]*self.subarea_size)*self.C_x+self.Q_i_storage_2[i]*self.C_y
self.Q_i_storage_2[i]=self.Q_i[i]*(1-self.C_x)*dt+self.Q_i_storage_1[i]*(1-self.C_y*dt)
self.Q_i_storage_1[i]=self.Q_i_storage_2[i]
else:
self.Q_i[i]=(self.Q_i[i-1]+self.rain[0]*self.subarea_size)*self.C_x+self.Q_i_storage_2[i]*self.C_y
self.Q_i_storage_2[i]=self.Q_i[i]*(1-self.C_x)*dt+self.Q_i_storage_1[i]*(1-self.C_y*dt)
self.Q_i_storage_1[i]=self.Q_i_storage_2[i]
#represents the inflow in knot
self.runoff[0]=self.Q_i[-1] /1000
return dt
def getClassName(self):
#print "getClassName"
return "Muskingum"
def register(nr):
for c in pycd3.Node.__subclasses__():
nf = NodeFactory(c)
nf.__disown__()
nr.addNodeFactory(nf)
# def test():
# nr = pycd3.NodeRegistry()
# nf = NodeFactory(Household).__disown__()
# nr.addNodeFactory(nf)
# node = nr.createNode("Household")
#test()
| gpl-2.0 |
spezi77/android_kernel_google_msm | tools/perf/scripts/python/syscall-counts.py | 11181 | 1522 | # system call counts
# (c) 2010, Tom Zanussi <tzanussi@gmail.com>
# Licensed under the terms of the GNU GPL License version 2
#
# Displays system-wide system call totals, broken down by syscall.
# If a [comm] arg is specified, only syscalls called by [comm] are displayed.
import os
import sys
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from perf_trace_context import *
from Core import *
from Util import syscall_name
usage = "perf script -s syscall-counts.py [comm]\n";
for_comm = None
if len(sys.argv) > 2:
sys.exit(usage)
if len(sys.argv) > 1:
for_comm = sys.argv[1]
syscalls = autodict()
def trace_begin():
print "Press control+C to stop and show the summary"
def trace_end():
print_syscall_totals()
def raw_syscalls__sys_enter(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
id, args):
if for_comm is not None:
if common_comm != for_comm:
return
try:
syscalls[id] += 1
except TypeError:
syscalls[id] = 1
def print_syscall_totals():
if for_comm is not None:
print "\nsyscall events for %s:\n\n" % (for_comm),
else:
print "\nsyscall events:\n\n",
print "%-40s %10s\n" % ("event", "count"),
print "%-40s %10s\n" % ("----------------------------------------", \
"-----------"),
for id, val in sorted(syscalls.iteritems(), key = lambda(k, v): (v, k), \
reverse = True):
print "%-40s %10d\n" % (syscall_name(id), val),
| gpl-2.0 |
switchboardOp/ansible | lib/ansible/modules/cloud/cloudstack/cs_staticnat.py | 18 | 7667 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# (c) 2015, René Moser <mail@renemoser.net>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['stableinterface'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: cs_staticnat
short_description: Manages static NATs on Apache CloudStack based clouds.
description:
- Create, update and remove static NATs.
version_added: '2.0'
author: "René Moser (@resmo)"
options:
ip_address:
description:
- Public IP address the static NAT is assigned to.
required: true
vm:
description:
- Name of virtual machine which we make the static NAT for.
- Required if C(state=present).
required: false
default: null
vm_guest_ip:
description:
- VM guest NIC secondary IP address for the static NAT.
required: false
default: false
network:
description:
- Network the IP address is related to.
required: false
default: null
version_added: "2.2"
vpc:
description:
- VPC the network related to.
required: false
default: null
version_added: "2.3"
state:
description:
- State of the static NAT.
required: false
default: 'present'
choices: [ 'present', 'absent' ]
domain:
description:
- Domain the static NAT is related to.
required: false
default: null
account:
description:
- Account the static NAT is related to.
required: false
default: null
project:
description:
- Name of the project the static NAT is related to.
required: false
default: null
zone:
description:
- Name of the zone in which the virtual machine is in.
- If not set, default zone is used.
required: false
default: null
poll_async:
description:
- Poll async jobs until job has finished.
required: false
default: true
extends_documentation_fragment: cloudstack
'''
EXAMPLES = '''
# create a static NAT: 1.2.3.4 -> web01
- local_action:
module: cs_staticnat
ip_address: 1.2.3.4
vm: web01
# remove a static NAT
- local_action:
module: cs_staticnat
ip_address: 1.2.3.4
state: absent
'''
RETURN = '''
---
id:
description: UUID of the ip_address.
returned: success
type: string
sample: a6f7a5fc-43f8-11e5-a151-feff819cdc9f
ip_address:
description: Public IP address.
returned: success
type: string
sample: 1.2.3.4
vm_name:
description: Name of the virtual machine.
returned: success
type: string
sample: web-01
vm_display_name:
description: Display name of the virtual machine.
returned: success
type: string
sample: web-01
vm_guest_ip:
description: IP of the virtual machine.
returned: success
type: string
sample: 10.101.65.152
zone:
description: Name of zone the static NAT is related to.
returned: success
type: string
sample: ch-gva-2
project:
description: Name of project the static NAT is related to.
returned: success
type: string
sample: Production
account:
description: Account the static NAT is related to.
returned: success
type: string
sample: example account
domain:
description: Domain the static NAT is related to.
returned: success
type: string
sample: example domain
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.cloudstack import (
AnsibleCloudStack,
cs_argument_spec,
cs_required_together,
)
class AnsibleCloudStackStaticNat(AnsibleCloudStack):
def __init__(self, module):
super(AnsibleCloudStackStaticNat, self).__init__(module)
self.returns = {
'virtualmachinedisplayname': 'vm_display_name',
'virtualmachinename': 'vm_name',
'ipaddress': 'ip_address',
'vmipaddress': 'vm_guest_ip',
}
def create_static_nat(self, ip_address):
self.result['changed'] = True
args = {
'virtualmachineid': self.get_vm(key='id'),
'ipaddressid': ip_address['id'],
'vmguestip': self.get_vm_guest_ip(),
'networkid': self.get_network(key='id')
}
if not self.module.check_mode:
self.query_api('enableStaticNat', **args)
# reset ip address and query new values
self.ip_address = None
ip_address = self.get_ip_address()
return ip_address
def update_static_nat(self, ip_address):
args = {
'virtualmachineid': self.get_vm(key='id'),
'ipaddressid': ip_address['id'],
'vmguestip': self.get_vm_guest_ip(),
'networkid': self.get_network(key='id')
}
# make an alias, so we can use has_changed()
ip_address['vmguestip'] = ip_address['vmipaddress']
if self.has_changed(args, ip_address, ['vmguestip', 'virtualmachineid']):
self.result['changed'] = True
if not self.module.check_mode:
res = self.query_api('disableStaticNat', ipaddressid=ip_address['id'])
self.poll_job(res, 'staticnat')
self.query_api('enableStaticNat', **args)
# reset ip address and query new values
self.ip_address = None
ip_address = self.get_ip_address()
return ip_address
def present_static_nat(self):
ip_address = self.get_ip_address()
if not ip_address['isstaticnat']:
ip_address = self.create_static_nat(ip_address)
else:
ip_address = self.update_static_nat(ip_address)
return ip_address
def absent_static_nat(self):
ip_address = self.get_ip_address()
if ip_address['isstaticnat']:
self.result['changed'] = True
if not self.module.check_mode:
res = self.query_api('disableStaticNat', ipaddressid=ip_address['id'])
poll_async = self.module.params.get('poll_async')
if poll_async:
self.poll_job(res, 'staticnat')
return ip_address
def main():
argument_spec = cs_argument_spec()
argument_spec.update(dict(
ip_address=dict(required=True),
vm=dict(),
vm_guest_ip=dict(),
network=dict(),
vpc=dict(),
state=dict(choices=['present', 'absent'], default='present'),
zone=dict(),
domain=dict(),
account=dict(),
project=dict(),
poll_async=dict(type='bool', default=True),
))
module = AnsibleModule(
argument_spec=argument_spec,
required_together=cs_required_together(),
supports_check_mode=True
)
acs_static_nat = AnsibleCloudStackStaticNat(module)
state = module.params.get('state')
if state in ['absent']:
ip_address = acs_static_nat.absent_static_nat()
else:
ip_address = acs_static_nat.present_static_nat()
result = acs_static_nat.get_result(ip_address)
module.exit_json(**result)
if __name__ == '__main__':
main()
| gpl-3.0 |
merfii/PythonExercises | renzongxian/0017/0017.py | 40 | 1684 | # Source:https://github.com/Show-Me-the-Code/show-me-the-code
# Author:renzongxian
# Date:2014-12-23
# Python 3.4
"""
第 0017 题: 将 第 0014 题中的 student.xls 文件中的内容写到 student.xml 文件中,如
下所示:
<?xml version="1.0" encoding="UTF-8"?>
<root>
<students>
<!--
学生信息表
"id" : [名字, 数学, 语文, 英文]
-->
{
"1" : ["张三", 150, 120, 100],
"2" : ["李四", 90, 99, 95],
"3" : ["王五", 60, 66, 68]
}
</students>
</root>
"""
import xlrd
from xml.dom import minidom, Node
def open_xls():
excel = xlrd.open_workbook("student.xls")
student_sheet = excel.sheet_by_name("student")
sheet_content = {}
for row in range(student_sheet.nrows):
row_value = student_sheet.row_values(row)
for i in range(len(row_value)):
if type(row_value[i]) == float:
row_value[i] = int(row_value[i])
sheet_content[str(row+1)] = row_value[1:]
return sheet_content
def build_xml(content):
# Create Dom Object
doc = minidom.Document()
# Create root tag
root = doc.createElement('root')
doc.appendChild(root)
# Create 'students' tag
students = doc.createElement('students')
root.appendChild(students)
# Create comment element
students.appendChild(doc.createComment("学生信息表\"id\" : [名字, 数学, 语文, 英文]"))
# Create text element
students.appendChild(doc.createTextNode(str(content)))
# Save the xml file
student_xml = open('student.xml', 'w')
student_xml.write(doc.toprettyxml())
student_xml.close()
if __name__ == '__main__':
_content = open_xls()
build_xml(_content)
| mit |
steebchen/youtube-dl | youtube_dl/extractor/chaturbate.py | 33 | 2693 | from __future__ import unicode_literals
import re
from .common import InfoExtractor
from ..utils import ExtractorError
class ChaturbateIE(InfoExtractor):
_VALID_URL = r'https?://(?:[^/]+\.)?chaturbate\.com/(?P<id>[^/?#]+)'
_TESTS = [{
'url': 'https://www.chaturbate.com/siswet19/',
'info_dict': {
'id': 'siswet19',
'ext': 'mp4',
'title': 're:^siswet19 [0-9]{4}-[0-9]{2}-[0-9]{2} [0-9]{2}:[0-9]{2}$',
'age_limit': 18,
'is_live': True,
},
'params': {
'skip_download': True,
},
'skip': 'Room is offline',
}, {
'url': 'https://en.chaturbate.com/siswet19/',
'only_matching': True,
}]
_ROOM_OFFLINE = 'Room is currently offline'
def _real_extract(self, url):
video_id = self._match_id(url)
webpage = self._download_webpage(url, video_id)
m3u8_urls = []
for m in re.finditer(
r'(["\'])(?P<url>http.+?\.m3u8.*?)\1', webpage):
m3u8_fast_url, m3u8_no_fast_url = m.group('url'), m.group(
'url').replace('_fast', '')
for m3u8_url in (m3u8_fast_url, m3u8_no_fast_url):
if m3u8_url not in m3u8_urls:
m3u8_urls.append(m3u8_url)
if not m3u8_urls:
error = self._search_regex(
[r'<span[^>]+class=(["\'])desc_span\1[^>]*>(?P<error>[^<]+)</span>',
r'<div[^>]+id=(["\'])defchat\1[^>]*>\s*<p><strong>(?P<error>[^<]+)<'],
webpage, 'error', group='error', default=None)
if not error:
if any(p in webpage for p in (
self._ROOM_OFFLINE, 'offline_tipping', 'tip_offline')):
error = self._ROOM_OFFLINE
if error:
raise ExtractorError(error, expected=True)
raise ExtractorError('Unable to find stream URL')
formats = []
for m3u8_url in m3u8_urls:
m3u8_id = 'fast' if '_fast' in m3u8_url else 'slow'
formats.extend(self._extract_m3u8_formats(
m3u8_url, video_id, ext='mp4',
# ffmpeg skips segments for fast m3u8
preference=-10 if m3u8_id == 'fast' else None,
m3u8_id=m3u8_id, fatal=False, live=True))
self._sort_formats(formats)
return {
'id': video_id,
'title': self._live_title(video_id),
'thumbnail': 'https://roomimg.stream.highwebmedia.com/ri/%s.jpg' % video_id,
'age_limit': self._rta_search(webpage),
'is_live': True,
'formats': formats,
}
| unlicense |
kalahbrown/HueBigSQL | apps/filebrowser/src/filebrowser/lib/rwx.py | 34 | 2192 | #!/usr/bin/env python
# Licensed to Cloudera, Inc. under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. Cloudera, Inc. licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Utilities for dealing with file modes.
import stat
def filetype(mode):
"""
Returns "dir" or "file" according to what type path is.
@param mode: file mode from "stat" command.
"""
if stat.S_ISLNK(mode):
return "link"
elif stat.S_ISDIR(mode):
return "dir"
elif stat.S_ISREG(mode):
return "file"
else:
return "unknown"
def rwxtype(mode):
""" Returns l/d/-/? for use in "rwx" style strings. """
if stat.S_ISLNK(mode):
return "l"
elif stat.S_ISDIR(mode):
return "d"
elif stat.S_ISREG(mode):
return "-"
else:
return "?"
BITS = (stat.S_IRUSR, stat.S_IWUSR, stat.S_IXUSR,
stat.S_IRGRP, stat.S_IWGRP, stat.S_IXGRP,
stat.S_IROTH, stat.S_IWOTH, stat.S_IXOTH,
stat.S_ISVTX)
def expand_mode(mode):
return map(lambda y: bool(mode & y), BITS)
def compress_mode(tup):
mode = 0
for b, n in zip(tup, BITS):
if b:
mode += n
return mode
def rwx(mode, aclBit=False):
"""
Returns "rwx"-style string like that ls would give you.
I couldn't find much extant code along these lines;
this is similar in spirit to the google-able "pathinfo.py".
"""
bools = expand_mode(mode)
s = list("rwxrwxrwxt")
for (i, v) in enumerate(bools[:-1]):
if not v:
s[i] = "-"
# Sticky bit should either be 't' or no char.
if not bools[-1]:
s = s[:-1]
return rwxtype(mode) + "".join(s) + ('+' if aclBit else '')
| apache-2.0 |
amgowano/oppia | core/storage/user/gae_models_test.py | 6 | 4504 | # coding: utf-8
#
# Copyright 2014 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import datetime
from core.platform import models
from core.tests import test_utils
(user_models,) = models.Registry.import_models([models.NAMES.user])
class ExplorationUserDataModelTest(test_utils.GenericTestBase):
"""Tests for the ExplorationUserDataModel class."""
DATETIME_OBJECT = datetime.datetime.strptime('2016-02-16', '%Y-%m-%d')
USER_ID = 'user_id'
EXP_ID_ONE = 'exp_id_one'
EXP_ID_TWO = 'exp_id_two'
def setUp(self):
super(ExplorationUserDataModelTest, self).setUp()
user_models.ExplorationUserDataModel(
id='%s.%s' % (self.USER_ID, self.EXP_ID_ONE), user_id=self.USER_ID,
exploration_id=self.EXP_ID_ONE, rating=2,
rated_on=self.DATETIME_OBJECT,
draft_change_list={'new_content': {}},
draft_change_list_last_updated=self.DATETIME_OBJECT,
draft_change_list_exp_version=3).put()
def test_create_success(self):
user_models.ExplorationUserDataModel.create(
self.USER_ID, self.EXP_ID_TWO).put()
retrieved_object = user_models.ExplorationUserDataModel.get_by_id(
'%s.%s' % (self.USER_ID, self.EXP_ID_TWO))
self.assertEqual(retrieved_object.user_id, self.USER_ID)
self.assertEqual(retrieved_object.exploration_id, self.EXP_ID_TWO)
def test_get_success(self):
retrieved_object = user_models.ExplorationUserDataModel.get(
self.USER_ID, self.EXP_ID_ONE)
self.assertEqual(retrieved_object.user_id, self.USER_ID)
self.assertEqual(retrieved_object.exploration_id, self.EXP_ID_ONE)
self.assertEqual(retrieved_object.rating, 2)
self.assertEqual(retrieved_object.rated_on, self.DATETIME_OBJECT)
self.assertEqual(
retrieved_object.draft_change_list, {'new_content': {}})
self.assertEqual(retrieved_object.draft_change_list_last_updated,
self.DATETIME_OBJECT)
self.assertEqual(retrieved_object.draft_change_list_exp_version, 3)
def test_get_failure(self):
retrieved_object = user_models.ExplorationUserDataModel.get(
self.USER_ID, 'unknown_exp_id')
self.assertEqual(retrieved_object, None)
class UserQueryModelTests(test_utils.GenericTestBase):
"""Tests for UserQueryModel."""
def test_instance_stores_correct_data(self):
submitter_id = 'submitter'
query_id = 'qid'
inactive_in_last_n_days = 5
created_at_least_n_exps = 1
created_fewer_than_n_exps = 3
edited_at_least_n_exps = 2
edited_fewer_than_n_exps = 5
has_not_logged_in_for_n_days = 10
user_models.UserQueryModel(
id=query_id,
inactive_in_last_n_days=inactive_in_last_n_days,
created_at_least_n_exps=created_at_least_n_exps,
created_fewer_than_n_exps=created_fewer_than_n_exps,
edited_at_least_n_exps=edited_at_least_n_exps,
edited_fewer_than_n_exps=edited_fewer_than_n_exps,
has_not_logged_in_for_n_days=has_not_logged_in_for_n_days,
submitter_id=submitter_id).put()
query_model = user_models.UserQueryModel.get(query_id)
self.assertEqual(query_model.submitter_id, submitter_id)
self.assertEqual(
query_model.inactive_in_last_n_days, inactive_in_last_n_days)
self.assertEqual(
query_model.has_not_logged_in_for_n_days,
has_not_logged_in_for_n_days)
self.assertEqual(
query_model.created_at_least_n_exps, created_at_least_n_exps)
self.assertEqual(
query_model.created_fewer_than_n_exps, created_fewer_than_n_exps)
self.assertEqual(
query_model.edited_at_least_n_exps, edited_at_least_n_exps)
self.assertEqual(
query_model.edited_fewer_than_n_exps, edited_fewer_than_n_exps)
| apache-2.0 |
edevil/django | django/contrib/gis/tests/distapp/models.py | 36 | 1383 | from django.contrib.gis.db import models
from django.contrib.gis.tests.utils import gisfield_may_be_null
from django.utils.encoding import python_2_unicode_compatible
@python_2_unicode_compatible
class NamedModel(models.Model):
name = models.CharField(max_length=30)
objects = models.GeoManager()
class Meta:
abstract = True
app_label = 'distapp'
def __str__(self):
return self.name
class SouthTexasCity(NamedModel):
"City model on projected coordinate system for South Texas."
point = models.PointField(srid=32140)
class SouthTexasCityFt(NamedModel):
"Same City model as above, but U.S. survey feet are the units."
point = models.PointField(srid=2278)
class AustraliaCity(NamedModel):
"City model for Australia, using WGS84."
point = models.PointField()
class CensusZipcode(NamedModel):
"Model for a few South Texas ZIP codes (in original Census NAD83)."
poly = models.PolygonField(srid=4269)
class SouthTexasZipcode(NamedModel):
"Model for a few South Texas ZIP codes."
poly = models.PolygonField(srid=32140, null=gisfield_may_be_null)
class Interstate(NamedModel):
"Geodetic model for U.S. Interstates."
path = models.LineStringField()
class SouthTexasInterstate(NamedModel):
"Projected model for South Texas Interstates."
path = models.LineStringField(srid=32140)
| bsd-3-clause |
hjy846/scrapy-web | crawler/scripts/stat_residences.py | 1 | 2032 | #! -*- coding: utf-8 -*-
import sys
import time
import urlparse
import os
from datetime import datetime, timedelta
import json
import pymongo
import logging
from scrapy.conf import settings
from collections import defaultdict
reload(sys)
sys.setdefaultencoding('utf-8')
def get_param(sys):
#处理时间
if len(sys.argv) >= 2:
crawl_date = sys.argv[1]
else: crawl_date = 'today'
now = datetime.now()
yesterday = now - timedelta(days=1)
now = datetime.now()
yesterday_str = yesterday.strftime('%Y-%m-%d')
today_str = now.strftime('%Y-%m-%d')
if crawl_date == 'today':
crawl_date = today_str
elif crawl_date == 'yesterday':
crawl_date = yesterday_str
else:
crawl_date = crawl_date if crawl_date else today_str
#settings['crawl_date'] = self.crawl_date
return {'crawl_date':crawl_date}
def stat_residence():
result = COLLECTION.find()
res_dict = defaultdict(int)
for res in result:
try:
building = res['info']['building']
except Exception as e:
building = res['info']['building_name'].split()[0]
res_dict[building] += 1
sort_res_dict = sorted(res_dict.items(), key = lambda x:-x[1])
for i in sort_res_dict[:100]:
print i[0], i[1]
print len(sort_res_dict)
PARAMS = get_param(sys)
SERVER = settings['MONGODB_SERVER']
PORT = settings['MONGODB_PORT']
DB = settings['MONGODB_DB']
RAW_DB = settings['MONGODB_DB_RAW']
CONNECTION = pymongo.MongoClient(SERVER, PORT)
RAW_DB = CONNECTION[RAW_DB]
DB = CONNECTION[DB]
RAW_COLLECTION = RAW_DB[PARAMS['crawl_date']]
IMAGE_COLLECTION = RAW_DB[settings['MONGODB_COLLECTION_IMAGE']]
COLLECTION = DB[settings['MONGODB_COLLECTION_ALL_RESIDENCES']]
COLLECTION_NEW_ADD = DB[settings['MONGODB_COLLECTION_NEW_ADD']]
COLLECTION_RESIDENCE_NUM_BY_DAY = DB[settings['MONGODB_COLLECTION_RESIDENCE_NUM_BY_DAY']]
if __name__ == '__main__':
stat_residence()
| gpl-3.0 |
pnorman/mapnik | scons/scons-local-2.4.1/SCons/Defaults.py | 6 | 18809 | """SCons.Defaults
Builders and other things for the local site. Here's where we'll
duplicate the functionality of autoconf until we move it into the
installation procedure or use something like qmconf.
The code that reads the registry to find MSVC components was borrowed
from distutils.msvccompiler.
"""
#
# Copyright (c) 2001 - 2015 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
from __future__ import division
__revision__ = "src/engine/SCons/Defaults.py rel_2.4.1:3453:73fefd3ea0b0 2015/11/09 03:25:05 bdbaddog"
import os
import errno
import shutil
import stat
import time
import sys
import SCons.Action
import SCons.Builder
import SCons.CacheDir
import SCons.Environment
import SCons.PathList
import SCons.Subst
import SCons.Tool
# A placeholder for a default Environment (for fetching source files
# from source code management systems and the like). This must be
# initialized later, after the top-level directory is set by the calling
# interface.
_default_env = None
# Lazily instantiate the default environment so the overhead of creating
# it doesn't apply when it's not needed.
def _fetch_DefaultEnvironment(*args, **kw):
"""
Returns the already-created default construction environment.
"""
global _default_env
return _default_env
def DefaultEnvironment(*args, **kw):
"""
Initial public entry point for creating the default construction
Environment.
After creating the environment, we overwrite our name
(DefaultEnvironment) with the _fetch_DefaultEnvironment() function,
which more efficiently returns the initialized default construction
environment without checking for its existence.
(This function still exists with its _default_check because someone
else (*cough* Script/__init__.py *cough*) may keep a reference
to this function. So we can't use the fully functional idiom of
having the name originally be a something that *only* creates the
construction environment and then overwrites the name.)
"""
global _default_env
if not _default_env:
import SCons.Util
_default_env = SCons.Environment.Environment(*args, **kw)
if SCons.Util.md5:
_default_env.Decider('MD5')
else:
_default_env.Decider('timestamp-match')
global DefaultEnvironment
DefaultEnvironment = _fetch_DefaultEnvironment
_default_env._CacheDir_path = None
return _default_env
# Emitters for setting the shared attribute on object files,
# and an action for checking that all of the source files
# going into a shared library are, in fact, shared.
def StaticObjectEmitter(target, source, env):
for tgt in target:
tgt.attributes.shared = None
return (target, source)
def SharedObjectEmitter(target, source, env):
for tgt in target:
tgt.attributes.shared = 1
return (target, source)
def SharedFlagChecker(source, target, env):
same = env.subst('$STATIC_AND_SHARED_OBJECTS_ARE_THE_SAME')
if same == '0' or same == '' or same == 'False':
for src in source:
try:
shared = src.attributes.shared
except AttributeError:
shared = None
if not shared:
raise SCons.Errors.UserError("Source file: %s is static and is not compatible with shared target: %s" % (src, target[0]))
SharedCheck = SCons.Action.Action(SharedFlagChecker, None)
# Some people were using these variable name before we made
# SourceFileScanner part of the public interface. Don't break their
# SConscript files until we've given them some fair warning and a
# transition period.
CScan = SCons.Tool.CScanner
DScan = SCons.Tool.DScanner
LaTeXScan = SCons.Tool.LaTeXScanner
ObjSourceScan = SCons.Tool.SourceFileScanner
ProgScan = SCons.Tool.ProgramScanner
# These aren't really tool scanners, so they don't quite belong with
# the rest of those in Tool/__init__.py, but I'm not sure where else
# they should go. Leave them here for now.
import SCons.Scanner.Dir
DirScanner = SCons.Scanner.Dir.DirScanner()
DirEntryScanner = SCons.Scanner.Dir.DirEntryScanner()
# Actions for common languages.
CAction = SCons.Action.Action("$CCCOM", "$CCCOMSTR")
ShCAction = SCons.Action.Action("$SHCCCOM", "$SHCCCOMSTR")
CXXAction = SCons.Action.Action("$CXXCOM", "$CXXCOMSTR")
ShCXXAction = SCons.Action.Action("$SHCXXCOM", "$SHCXXCOMSTR")
DAction = SCons.Action.Action("$DCOM", "$DCOMSTR")
ShDAction = SCons.Action.Action("$SHDCOM", "$SHDCOMSTR")
ASAction = SCons.Action.Action("$ASCOM", "$ASCOMSTR")
ASPPAction = SCons.Action.Action("$ASPPCOM", "$ASPPCOMSTR")
LinkAction = SCons.Action.Action("$LINKCOM", "$LINKCOMSTR")
ShLinkAction = SCons.Action.Action("$SHLINKCOM", "$SHLINKCOMSTR")
LdModuleLinkAction = SCons.Action.Action("$LDMODULECOM", "$LDMODULECOMSTR")
# Common tasks that we allow users to perform in platform-independent
# ways by creating ActionFactory instances.
ActionFactory = SCons.Action.ActionFactory
def get_paths_str(dest):
# If dest is a list, we need to manually call str() on each element
if SCons.Util.is_List(dest):
elem_strs = []
for element in dest:
elem_strs.append('"' + str(element) + '"')
return '[' + ', '.join(elem_strs) + ']'
else:
return '"' + str(dest) + '"'
def chmod_func(dest, mode):
SCons.Node.FS.invalidate_node_memos(dest)
if not SCons.Util.is_List(dest):
dest = [dest]
for element in dest:
os.chmod(str(element), mode)
def chmod_strfunc(dest, mode):
return 'Chmod(%s, 0%o)' % (get_paths_str(dest), mode)
Chmod = ActionFactory(chmod_func, chmod_strfunc)
def copy_func(dest, src, symlinks=True):
"""
If symlinks (is true), then a symbolic link will be
shallow copied and recreated as a symbolic link; otherwise, copying
a symbolic link will be equivalent to copying the symbolic link's
final target regardless of symbolic link depth.
"""
dest = str(dest)
src = str(src)
SCons.Node.FS.invalidate_node_memos(dest)
if SCons.Util.is_List(src) and os.path.isdir(dest):
for file in src:
shutil.copy2(file, dest)
return 0
elif os.path.islink(src):
if symlinks:
return os.symlink(os.readlink(src), dest)
else:
return copy_func(dest, os.path.realpath(src))
elif os.path.isfile(src):
return shutil.copy2(src, dest)
else:
return shutil.copytree(src, dest, symlinks)
Copy = ActionFactory(
copy_func,
lambda dest, src, symlinks=True: 'Copy("%s", "%s")' % (dest, src)
)
def delete_func(dest, must_exist=0):
SCons.Node.FS.invalidate_node_memos(dest)
if not SCons.Util.is_List(dest):
dest = [dest]
for entry in dest:
entry = str(entry)
# os.path.exists returns False with broken links that exist
entry_exists = os.path.exists(entry) or os.path.islink(entry)
if not entry_exists and not must_exist:
continue
# os.path.isdir returns True when entry is a link to a dir
if os.path.isdir(entry) and not os.path.islink(entry):
shutil.rmtree(entry, 1)
continue
os.unlink(entry)
def delete_strfunc(dest, must_exist=0):
return 'Delete(%s)' % get_paths_str(dest)
Delete = ActionFactory(delete_func, delete_strfunc)
def mkdir_func(dest):
SCons.Node.FS.invalidate_node_memos(dest)
if not SCons.Util.is_List(dest):
dest = [dest]
for entry in dest:
try:
os.makedirs(str(entry))
except os.error, e:
p = str(entry)
if (e.args[0] == errno.EEXIST or
(sys.platform=='win32' and e.args[0]==183)) \
and os.path.isdir(str(entry)):
pass # not an error if already exists
else:
raise
Mkdir = ActionFactory(mkdir_func,
lambda dir: 'Mkdir(%s)' % get_paths_str(dir))
def move_func(dest, src):
SCons.Node.FS.invalidate_node_memos(dest)
SCons.Node.FS.invalidate_node_memos(src)
shutil.move(src, dest)
Move = ActionFactory(move_func,
lambda dest, src: 'Move("%s", "%s")' % (dest, src),
convert=str)
def touch_func(dest):
SCons.Node.FS.invalidate_node_memos(dest)
if not SCons.Util.is_List(dest):
dest = [dest]
for file in dest:
file = str(file)
mtime = int(time.time())
if os.path.exists(file):
atime = os.path.getatime(file)
else:
open(file, 'w')
atime = mtime
os.utime(file, (atime, mtime))
Touch = ActionFactory(touch_func,
lambda file: 'Touch(%s)' % get_paths_str(file))
# Internal utility functions
def _concat(prefix, list, suffix, env, f=lambda x: x, target=None, source=None):
"""
Creates a new list from 'list' by first interpolating each element
in the list using the 'env' dictionary and then calling f on the
list, and finally calling _concat_ixes to concatenate 'prefix' and
'suffix' onto each element of the list.
"""
if not list:
return list
l = f(SCons.PathList.PathList(list).subst_path(env, target, source))
if l is not None:
list = l
return _concat_ixes(prefix, list, suffix, env)
def _concat_ixes(prefix, list, suffix, env):
"""
Creates a new list from 'list' by concatenating the 'prefix' and
'suffix' arguments onto each element of the list. A trailing space
on 'prefix' or leading space on 'suffix' will cause them to be put
into separate list elements rather than being concatenated.
"""
result = []
# ensure that prefix and suffix are strings
prefix = str(env.subst(prefix, SCons.Subst.SUBST_RAW))
suffix = str(env.subst(suffix, SCons.Subst.SUBST_RAW))
for x in list:
if isinstance(x, SCons.Node.FS.File):
result.append(x)
continue
x = str(x)
if x:
if prefix:
if prefix[-1] == ' ':
result.append(prefix[:-1])
elif x[:len(prefix)] != prefix:
x = prefix + x
result.append(x)
if suffix:
if suffix[0] == ' ':
result.append(suffix[1:])
elif x[-len(suffix):] != suffix:
result[-1] = result[-1]+suffix
return result
def _stripixes(prefix, itms, suffix, stripprefixes, stripsuffixes, env, c=None):
"""
This is a wrapper around _concat()/_concat_ixes() that checks for
the existence of prefixes or suffixes on list items and strips them
where it finds them. This is used by tools (like the GNU linker)
that need to turn something like 'libfoo.a' into '-lfoo'.
"""
if not itms:
return itms
if not callable(c):
env_c = env['_concat']
if env_c != _concat and callable(env_c):
# There's a custom _concat() method in the construction
# environment, and we've allowed people to set that in
# the past (see test/custom-concat.py), so preserve the
# backwards compatibility.
c = env_c
else:
c = _concat_ixes
stripprefixes = list(map(env.subst, SCons.Util.flatten(stripprefixes)))
stripsuffixes = list(map(env.subst, SCons.Util.flatten(stripsuffixes)))
stripped = []
for l in SCons.PathList.PathList(itms).subst_path(env, None, None):
if isinstance(l, SCons.Node.FS.File):
stripped.append(l)
continue
if not SCons.Util.is_String(l):
l = str(l)
for stripprefix in stripprefixes:
lsp = len(stripprefix)
if l[:lsp] == stripprefix:
l = l[lsp:]
# Do not strip more than one prefix
break
for stripsuffix in stripsuffixes:
lss = len(stripsuffix)
if l[-lss:] == stripsuffix:
l = l[:-lss]
# Do not strip more than one suffix
break
stripped.append(l)
return c(prefix, stripped, suffix, env)
def processDefines(defs):
"""process defines, resolving strings, lists, dictionaries, into a list of
strings
"""
if SCons.Util.is_List(defs):
l = []
for d in defs:
if d is None:
continue
elif SCons.Util.is_List(d) or isinstance(d, tuple):
if len(d) >= 2:
l.append(str(d[0]) + '=' + str(d[1]))
else:
l.append(str(d[0]))
elif SCons.Util.is_Dict(d):
for macro,value in d.iteritems():
if value is not None:
l.append(str(macro) + '=' + str(value))
else:
l.append(str(macro))
elif SCons.Util.is_String(d):
l.append(str(d))
else:
raise SCons.Errors.UserError("DEFINE %s is not a list, dict, string or None."%repr(d))
elif SCons.Util.is_Dict(defs):
# The items in a dictionary are stored in random order, but
# if the order of the command-line options changes from
# invocation to invocation, then the signature of the command
# line will change and we'll get random unnecessary rebuilds.
# Consequently, we have to sort the keys to ensure a
# consistent order...
l = []
for k,v in sorted(defs.items()):
if v is None:
l.append(str(k))
else:
l.append(str(k) + '=' + str(v))
else:
l = [str(defs)]
return l
def _defines(prefix, defs, suffix, env, c=_concat_ixes):
"""A wrapper around _concat_ixes that turns a list or string
into a list of C preprocessor command-line definitions.
"""
return c(prefix, env.subst_path(processDefines(defs)), suffix, env)
class NullCmdGenerator(object):
"""This is a callable class that can be used in place of other
command generators if you don't want them to do anything.
The __call__ method for this class simply returns the thing
you instantiated it with.
Example usage:
env["DO_NOTHING"] = NullCmdGenerator
env["LINKCOM"] = "${DO_NOTHING('$LINK $SOURCES $TARGET')}"
"""
def __init__(self, cmd):
self.cmd = cmd
def __call__(self, target, source, env, for_signature=None):
return self.cmd
class Variable_Method_Caller(object):
"""A class for finding a construction variable on the stack and
calling one of its methods.
We use this to support "construction variables" in our string
eval()s that actually stand in for methods--specifically, use
of "RDirs" in call to _concat that should actually execute the
"TARGET.RDirs" method. (We used to support this by creating a little
"build dictionary" that mapped RDirs to the method, but this got in
the way of Memoizing construction environments, because we had to
create new environment objects to hold the variables.)
"""
def __init__(self, variable, method):
self.variable = variable
self.method = method
def __call__(self, *args, **kw):
try: 1//0
except ZeroDivisionError:
# Don't start iterating with the current stack-frame to
# prevent creating reference cycles (f_back is safe).
frame = sys.exc_info()[2].tb_frame.f_back
variable = self.variable
while frame:
if variable in frame.f_locals:
v = frame.f_locals[variable]
if v:
method = getattr(v, self.method)
return method(*args, **kw)
frame = frame.f_back
return None
# if env[version_var] id defined, returns env[flags_var], otherwise returns None
def __libversionflags(env, version_var, flags_var):
try:
if env[version_var]:
return env[flags_var]
except KeyError:
pass
return None
ConstructionEnvironment = {
'BUILDERS' : {},
'SCANNERS' : [],
'CONFIGUREDIR' : '#/.sconf_temp',
'CONFIGURELOG' : '#/config.log',
'CPPSUFFIXES' : SCons.Tool.CSuffixes,
'DSUFFIXES' : SCons.Tool.DSuffixes,
'ENV' : {},
'IDLSUFFIXES' : SCons.Tool.IDLSuffixes,
# 'LATEXSUFFIXES' : SCons.Tool.LaTeXSuffixes, # moved to the TeX tools generate functions
'_concat' : _concat,
'_defines' : _defines,
'_stripixes' : _stripixes,
'_LIBFLAGS' : '${_concat(LIBLINKPREFIX, LIBS, LIBLINKSUFFIX, __env__)}',
'_LIBDIRFLAGS' : '$( ${_concat(LIBDIRPREFIX, LIBPATH, LIBDIRSUFFIX, __env__, RDirs, TARGET, SOURCE)} $)',
'_CPPINCFLAGS' : '$( ${_concat(INCPREFIX, CPPPATH, INCSUFFIX, __env__, RDirs, TARGET, SOURCE)} $)',
'_CPPDEFFLAGS' : '${_defines(CPPDEFPREFIX, CPPDEFINES, CPPDEFSUFFIX, __env__)}',
'__libversionflags' : __libversionflags,
'__SHLIBVERSIONFLAGS' : '${__libversionflags(__env__,"SHLIBVERSION","_SHLIBVERSIONFLAGS")}',
'__LDMODULEVERSIONFLAGS' : '${__libversionflags(__env__,"LDMODULEVERSION","_LDMODULEVERSIONFLAGS")}',
'__DSHLIBVERSIONFLAGS' : '${__libversionflags(__env__,"DSHLIBVERSION","_DSHLIBVERSIONFLAGS")}',
'TEMPFILE' : NullCmdGenerator,
'Dir' : Variable_Method_Caller('TARGET', 'Dir'),
'Dirs' : Variable_Method_Caller('TARGET', 'Dirs'),
'File' : Variable_Method_Caller('TARGET', 'File'),
'RDirs' : Variable_Method_Caller('TARGET', 'RDirs'),
}
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
| lgpl-2.1 |
aquavitae/rst2pdf | rst2pdf/pygments_code_block_directive.py | 2 | 13783 | # -*- coding: utf-8 -*-
# $URL$
# $Date$
# $Revision$
# :Author: a Pygments author|contributor; Felix Wiemann; Guenter Milde
# :Date: $Date$
# :Copyright: This module has been placed in the public domain.
#
# This is a merge of `Using Pygments in ReST documents`_ from the pygments_
# documentation, and a `proof of concept`_ by Felix Wiemann.
#
# ========== ===========================================================
# 2007-06-01 Removed redundancy from class values.
# 2007-06-04 Merge of successive tokens of same type
# (code taken from pygments.formatters.others).
# 2007-06-05 Separate docutils formatter script
# Use pygments' CSS class names (like the html formatter)
# allowing the use of pygments-produced style sheets.
# 2007-06-07 Merge in the formatting of the parsed tokens
# (misnamed as docutils_formatter) as class DocutilsInterface
# 2007-06-08 Failsave implementation (fallback to a standard literal block
# if pygments not found)
# ========== ===========================================================
#
# ::
"""
Define and register a code-block directive using pygments
"""
# Requirements
# ------------
# ::
import codecs
from docutils import nodes
from docutils.parsers.rst import directives
try:
import pygments
from pygments.lexers import get_lexer_by_name
from pygments.formatters.html import _get_ttype_class
except ImportError:
pass
from .log import log
# Customisation
# -------------
#
# Do not insert inline nodes for the following tokens.
# (You could add e.g. Token.Punctuation like ``['', 'p']``.) ::
unstyled_tokens = ['']
# DocutilsInterface
# -----------------
#
# This interface class combines code from
# pygments.formatters.html and pygments.formatters.others.
#
# It does not require anything of docutils and could also become a part of
# pygments::
class DocutilsInterface(object):
"""
Parse `code` string and yield "classified" tokens.
Arguments
code -- string of source code to parse
language -- formal language the code is written in.
Merge subsequent tokens of the same token-type.
Yields the tokens as ``(ttype_class, value)`` tuples,
where ttype_class is taken from pygments.token.STANDARD_TYPES and
corresponds to the class argument used in pygments html output.
"""
def __init__(self, code, language, custom_args=None):
self.code = code
self.language = language
self.custom_args = custom_args or {}
def lex(self):
# Get lexer for language (use text as fallback)
try:
if self.language and str(self.language).lower() != 'none':
lexer = get_lexer_by_name(self.language.lower(),
**self.custom_args)
else:
lexer = get_lexer_by_name('text', **self.custom_args)
except ValueError:
log.info("no pygments lexer for %s, using 'text'" % self.language)
# what happens if pygment isn't present ?
lexer = get_lexer_by_name('text')
return pygments.lex(self.code, lexer)
def join(self, tokens):
"""
Join subsequent tokens of same token-type
"""
tokens = iter(tokens)
(lasttype, lastval) = next(tokens)
for ttype, value in tokens:
if ttype is lasttype:
lastval += value
else:
yield(lasttype, lastval)
(lasttype, lastval) = (ttype, value)
yield(lasttype, lastval)
def __iter__(self):
"""
Parse code string and yield "clasified" tokens
"""
try:
tokens = self.lex()
except IOError:
log.info("Pygments lexer not found, using fallback")
# TODO: write message to INFO
yield ('', self.code)
return
for ttype, value in self.join(tokens):
yield (_get_ttype_class(ttype), value)
# code_block_directive
# --------------------
# ::
def code_block_directive(name, arguments, options, content, lineno,
content_offset, block_text, state, state_machine):
"""Parse and classify content of a code_block."""
if 'include' in options:
try:
if 'encoding' in options:
encoding = options['encoding']
else:
encoding = 'utf-8'
content = codecs.open(options['include'], 'r', encoding).read().rstrip()
except (IOError, UnicodeError): # no file or problem finding it or reading it
log.error('Error reading file: "%s" L %s' % (options['include'], lineno))
content = ''
line_offset = 0
if content:
# here we define the start-at and end-at options
# so that limit is included in extraction
# this is different than the start-after directive of docutils
# (docutils/parsers/rst/directives/misc.py L73+)
# which excludes the beginning
# the reason is we want to be able to define a start-at like
# def mymethod(self)
# and have such a definition included
after_text = options.get('start-at', None)
if after_text:
# skip content in include_text before *and NOT incl.* a matching text
after_index = content.find(after_text)
if after_index < 0:
raise state_machine.reporter.severe('Problem with "start-at" option of "%s" '
'code-block directive:\nText not found.' % options['start-at'])
# patch mmueller start
# Move the after_index to the beginning of the line with the
# match.
for char in content[after_index:0:-1]:
# codecs always opens binary. This works with '\n', '\r' and
# '\r\n'. We are going backwards, so '\n' is found first
# in '\r\n'.
# Going with .splitlines() seems more appropriate
# but needs a few more changes.
if char == '\n' or char == '\r':
break
after_index -= 1
# patch mmueller end
content = content[after_index:]
line_offset = len(content[:after_index].splitlines())
after_text = options.get('start-after', None)
if after_text:
# skip content in include_text before *and incl.* a matching text
after_index = content.find(after_text)
if after_index < 0:
raise state_machine.reporter.severe('Problem with "start-after" option of "%s" '
'code-block directive:\nText not found.' % options['start-after'])
line_offset = len(content[:after_index + len(after_text)].splitlines())
content = content[after_index + len(after_text):]
# same changes here for the same reason
before_text = options.get('end-at', None)
if before_text:
# skip content in include_text after *and incl.* a matching text
before_index = content.find(before_text)
if before_index < 0:
raise state_machine.reporter.severe('Problem with "end-at" option of "%s" '
'code-block directive:\nText not found.' % options['end-at'])
content = content[:before_index + len(before_text)]
before_text = options.get('end-before', None)
if before_text:
# skip content in include_text after *and NOT incl.* a matching text
before_index = content.find(before_text)
if before_index < 0:
raise state_machine.reporter.severe('Problem with "end-before" option of "%s" '
'code-block directive:\nText not found.' % options['end-before'])
content = content[:before_index]
else:
line_offset = options.get('linenos_offset')
content = '\n'.join(content)
if 'tabsize' in options:
tabw = options['tabsize']
else:
tabw = int(options.get('tab-width', 8))
content = content.replace('\t', ' ' * tabw)
withln = "linenos" in options
if not "linenos_offset" in options:
line_offset = 0
language = arguments[0]
# create a literal block element and set class argument
code_block = nodes.literal_block(classes=["code", language])
if withln:
lineno = 1 + line_offset
total_lines = content.count('\n') + 1 + line_offset
lnwidth = len(str(total_lines))
fstr = "\n%%%dd " % lnwidth
code_block += nodes.inline(fstr[1:] % lineno, fstr[1:] % lineno, classes=['linenumber'])
# parse content with pygments and add to code_block element
for cls, value in DocutilsInterface(content, language, options):
if withln and "\n" in value:
# Split on the "\n"s
values = value.split("\n")
# The first piece, pass as-is
code_block += nodes.Text(values[0], values[0])
# On the second and later pieces, insert \n and linenos
linenos = list(range(lineno, lineno + len(values)))
for chunk, ln in list(zip(values, linenos))[1:]:
if ln <= total_lines:
code_block += nodes.inline(fstr % ln, fstr % ln, classes=['linenumber'])
code_block += nodes.Text(chunk, chunk)
lineno += len(values) - 1
elif cls in unstyled_tokens:
# insert as Text to decrease the verbosity of the output.
code_block += nodes.Text(value, value)
else:
code_block += nodes.inline(value, value, classes=["pygments-" + cls])
return [code_block]
# Custom argument validators
# --------------------------
# ::
#
# Move to separated module??
def zero_or_positive_int(argument):
"""
Converts a string into python positive integer including zero.
None is a special case; it is regarded as zero.
"""
if argument is None:
return 0
elif argument == '0':
return 0
else:
return directives.positive_int(argument)
def string_list(argument):
"""
Converts a space- or comma-separated list of values into a python list
of strings.
(Directive option conversion function)
Based in positive_int_list of docutils.parsers.rst.directives
"""
if ',' in argument:
entries = argument.split(',')
else:
entries = argument.split()
return entries
def string_bool(argument):
"""
Converts True, true, False, False in python boolean values
"""
if argument is None:
msg = 'argument required but none supplied; choose from "True" or "False"'
raise ValueError(msg)
elif argument.lower() == 'true':
return True
elif argument.lower() == 'false':
return False
else:
raise ValueError('"%s" unknown; choose from "True" or "False"' %
argument)
def csharp_unicodelevel(argument):
return directives.choice(argument, ('none', 'basic', 'full'))
def lhs_litstyle(argument):
return directives.choice(argument, ('bird', 'latex'))
def raw_compress(argument):
return directives.choice(argument, ('gz', 'bz2'))
# Register Directive
# ------------------
# ::
code_block_directive.arguments = (1, 0, 1)
code_block_directive.content = 1
code_block_directive.options = {
'include': directives.unchanged_required,
'start-at': directives.unchanged_required,
'end-at': directives.unchanged_required,
'start-after': directives.unchanged_required,
'end-before': directives.unchanged_required,
'linenos': directives.unchanged,
'linenos_offset': zero_or_positive_int,
'tab-width': directives.unchanged,
# generic
'stripnl' : string_bool,
'stripall': string_bool,
'ensurenl': string_bool,
'tabsize' : directives.positive_int,
'encoding': directives.encoding,
# Lua
'func_name_hightlighting':string_bool,
'disabled_modules': string_list,
# Python Console
'python3': string_bool,
# Delphi
'turbopascal':string_bool,
'delphi' :string_bool,
'freepascal': string_bool,
'units': string_list,
# Modula2
'pim' : string_bool,
'iso' : string_bool,
'objm2' : string_bool,
'gm2ext': string_bool,
# CSharp
'unicodelevel' : csharp_unicodelevel,
# Literate haskell
'litstyle' : lhs_litstyle,
# Raw
'compress': raw_compress,
# Rst
'handlecodeblocks': string_bool,
# Php
'startinline': string_bool,
'funcnamehighlighting': string_bool,
'disabledmodules': string_list,
}
# .. _doctutils: http://docutils.sf.net/
# .. _pygments: http://pygments.org/
# .. _Using Pygments in ReST documents: http://pygments.org/docs/rstdirective/
# .. _proof of concept:
# http://article.gmane.org/gmane.text.docutils.user/3689
#
# Test output
# -----------
#
# If called from the command line, call the docutils publisher to render the
# input::
if __name__ == '__main__':
from docutils.core import publish_cmdline, default_description
from docutils.parsers.rst import directives
directives.register_directive('code-block', code_block_directive)
description = "code-block directive test output" + default_description
try:
import locale
locale.setlocale(locale.LC_ALL, '')
except Exception:
pass
publish_cmdline(writer_name='html', description=description)
| mit |
mcanthony/rethinkdb | scripts/pprint_sandbox.py | 48 | 11079 | # Copyright 2015 RethinkDB, all rights reserved.
from functools import reduce
# from . import ast
class Document(object):
"""Base class for documents to be pretty printed."""
pass
class Text(Document):
"""Textual element in a document."""
def __init__(self, text):
self._text = text
def __str__(self):
return "Text('%s')" % self._text
def width(self):
return len(self._text)
empty = Text('')
class Cond(Document):
"""Emit either left or tail, newline, right depending on line width"""
def __init__(self, left, right, tail=''):
self._left = left
self._right = right
self._tail = tail
def __str__(self):
return "Cond('%s','%s','%s')" % (self._left, self._right, self._tail)
def width(self):
return len(self._left)
br = Cond(' ', '', ' \\') # Python backslash
dot = Cond('.', '.', ' \\') # Python backslash
class Concat(Document):
"""Concatenate two documents."""
def __init__(self, *args):
self._docs = args
def __str__(self):
return "Concat(%s)" % ",".join([str(doc) for doc in self._docs])
def width(self):
from operator import add
return reduce(add, [doc.width() for doc in self._docs])
class Group(Document):
"""Specify unit whose linebreaks are interpeted consistently."""
def __init__(self, child):
self._child = child
def __str__(self):
return "Group(%s)" % str(self._child)
def width(self):
return self._child.width()
class Nest(Document):
"""Concatenate N documents with consistent indentation."""
def __init__(self, *args):
self._docs = args
def __str__(self):
return "Nest(%s)" % ",".join([str(doc) for doc in self._docs])
def width(self):
from operator import add
return reduce(add, [doc.width() for doc in self._docs])
def CommaSep(*args):
if len(args) == 0:
return empty
docs = [args[0]]
for subdoc in args[1:]:
docs.append(Text(','))
docs.append(br)
docs.append(subdoc)
return Nest(*docs)
def ArgList(*args):
return Concat(Text('('),
CommaSep(*args),
Text(')'))
def DotList(*args):
initial = args[0]
docs = []
for subdoc in args[1:]:
docs.append(dot)
docs.append(subdoc)
if len(docs) > 1:
docs[0] = Text('.') # prevent breaking before first dot
return Concat(initial, Nest(*docs))
def Call(name, *args):
return Concat(Text(name), ArgList(*args))
class TerriblePrettyPrinter(object):
"""A terrible, inefficient pretty printer for comparison."""
def __init__(self, width):
self._width = width
def render(self, document):
return self._format(False, self._width, 0, document)[0]
def _format(self, hasFit, widthLeft, nestIndent, document):
if isinstance(document, Text):
return (document._text, widthLeft - len(document._text))
elif isinstance(document, Cond) and hasFit:
return (document._left, widthLeft - len(document._left))
elif isinstance(document, Cond) and not hasFit:
return ('%s\n%s%s' % (document._tail, ' ' * nestIndent,
document._right),
self._width - nestIndent - len(document._right))
elif isinstance(document, Concat):
width = widthLeft
s = ""
for subelement in document._docs:
s1, width = self._format(hasFit, width, nestIndent, subelement)
s += s1
return (s, width)
elif isinstance(document, Group):
newFit = hasFit or document._child.width() <= widthLeft
return self._format(newFit, widthLeft, nestIndent, document._child)
elif isinstance(document, Nest):
currentPos = self._width - widthLeft
s = ""
for subelement in document._docs:
newFit = hasFit or subelement.width() <= widthLeft
s1, widthLeft = self._format(newFit, widthLeft,
currentPos, subelement)
s += s1
return (s, widthLeft)
else:
raise RuntimeError("invalid argument %s" % document)
doc1 = Group(Concat(Text("A"), br, Group(Concat(Text("B"), br, Text("C")))))
doc2 = DotList(Text('r'), Call('expr', Text('5')),
Call('add', DotList(Text('r'), Call('expr', Text('7')),
Call('frob'))),
Call('mul', DotList(Text('r'), Call('expr', Text('17'))),
Call('mul', DotList(Text('r'), Call('expr', Text('17')))),
Call('mul', DotList(Text('r'), Call('expr', Text('17'))))))
# print(TerriblePrettyPrinter(5).render(doc1))
# print(TerriblePrettyPrinter(3).render(doc1))
# print(TerriblePrettyPrinter(1).render(doc1))
# print(TerriblePrettyPrinter(5).render(doc2))
# print(TerriblePrettyPrinter(10).render(doc2))
# print(TerriblePrettyPrinter(80).render(doc2))
class Streamer(object):
pass
class TE(Streamer):
def __init__(self, string, hpos=None):
self._string = string
self._hpos = hpos
def __str__(self):
if self._hpos is None:
return "TE('%s')" % self._string
else:
return "TE(%d,'%s')" % (self._hpos, self._string)
class CD(Streamer):
def __init__(self, left, right, tail, hpos=None):
self._left = left
self._right = right
self._tail = tail
self._hpos = hpos
def __str__(self):
if self._hpos is None:
return "CD('%s','%s','%s')" % (self._left, self._right, self._tail)
else:
return "CD('%s','%s','%s',%d)" % (self._left, self._right,
self._tail, self._hpos)
class NBeg(Streamer):
def __init__(self, hpos=None):
self._hpos = hpos
def __str__(self):
if self._hpos is None:
return "NBeg"
else:
return "NBeg(%d)" % self._hpos
class NEnd(Streamer):
def __init__(self, hpos=None):
self._hpos = hpos
def __str__(self):
if self._hpos is None:
return "NEnd"
else:
return "NEnd(%d)" % self._hpos
class GBeg(Streamer):
def __init__(self, hpos=None):
self._hpos = hpos
def __str__(self):
if self._hpos is None:
return "GBeg"
else:
return "GBeg(%d)" % self._hpos
class GEnd(Streamer):
def __init__(self, hpos=None):
self._hpos = hpos
def __str__(self):
if self._hpos is None:
return "GEnd"
else:
return "GEnd(%d)" % self._hpos
def genStream(document):
stack = [document]
while len(stack) > 0:
top = stack.pop()
if isinstance(top, Text):
yield TE(top._text)
elif isinstance(top, Cond):
yield CD(top._left, top._right, top._tail)
elif isinstance(top, Concat):
newdocs = list(top._docs)
newdocs.reverse()
stack.extend(newdocs)
elif isinstance(top, Group):
yield GBeg()
stack.append(GEnd())
stack.append(top._child)
elif isinstance(top, Nest):
yield NBeg()
yield GBeg()
stack.append(NEnd())
stack.append(GEnd())
newdocs = list(top._docs)
newdocs.reverse()
stack.extend(newdocs)
elif isinstance(top, GEnd):
yield top
elif isinstance(top, NEnd):
yield top
else:
raise RuntimeError("invalid thing seen %s" % top)
# for elt in genStream(doc2):
# print("> %s" % elt)
def annotateStream(stream):
pos = 0
for element in stream:
if isinstance(element, TE):
pos += len(element._string)
yield TE(element._string, pos)
elif isinstance(element, CD):
pos += len(element._left)
yield CD(element._left, element._right, element._tail, pos)
elif isinstance(element, GBeg):
yield GBeg(pos)
elif isinstance(element, GEnd):
yield GEnd(pos)
elif isinstance(element, NBeg):
yield NBeg(pos)
elif isinstance(element, NEnd):
yield NEnd(pos)
# for elt in annotateStream(genStream(doc2)):
# print(">> %s" % elt)
def trackActualPosition(stream):
lookahead = []
for element in stream:
if isinstance(element, GBeg):
lookahead.append([])
elif isinstance(element, GEnd):
b = lookahead.pop()
if len(lookahead) == 0:
# topmost group, simple case
yield GBeg(element._hpos)
for subelt in b:
yield subelt
yield element
else:
lookahead[-1].append(GBeg(element._hpos))
lookahead[-1].extend(b)
lookahead[-1].append(element)
elif len(lookahead) == 0:
yield element
else:
lookahead[-1].append(element)
# for elt in trackActualPosition(annotateStream(genStream(doc2))):
# print(">! %s" % elt)
# Kiselyov adds a pruning step; this is overly complicated, useless in our
# environment, and requires that we guarantee that all documents have nonzero
# length, which I'm not prepared to do. So we use trackActualPosition instead.
def format(width, stream):
fittingElements = 0
rightEdge = width
hpos = 0
result = ""
indent = [0]
for element in stream:
if isinstance(element, TE):
result += element._string
hpos += len(element._string)
elif isinstance(element, CD):
indentation = indent[-1]
if fittingElements == 0:
result += "%s\n%s%s" % (element._tail, ' ' * indentation,
element._right)
fittingElements = 0
hpos = indentation + len(element._right)
rightEdge = (width - hpos) + element._hpos
else:
result += element._left
hpos += len(element._left)
elif isinstance(element, GBeg):
if fittingElements != 0 or element._hpos <= rightEdge:
fittingElements += 1
else:
fittingElements = 0
elif isinstance(element, GEnd):
fittingElements = max(fittingElements - 1, 0)
elif isinstance(element, NBeg):
indent.append(hpos)
elif isinstance(element, NEnd):
indent.pop()
return result
def pprint(width, document):
return format(width,
trackActualPosition(annotateStream(genStream(document))))
print(" " * 4 + "|")
print(pprint(5, doc2))
print("-" * 20)
print(" " * 39 + "|")
print(pprint(40, doc2))
print("-" * 20)
print(" " * 79 + "|")
print(pprint(80, doc2))
| agpl-3.0 |
xxsergzzxx/python-for-android | python-modules/zope/zope/interface/tests/test_interface.py | 50 | 16077 | ##############################################################################
#
# Copyright (c) 2001, 2002 Zope Foundation and Contributors.
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE.
#
##############################################################################
"""Test Interface implementation
"""
import doctest
import unittest
import sys
class InterfaceTests(unittest.TestCase):
def _makeDerivedInterface(self):
from zope.interface import Interface
from zope.interface import Attribute
class _I1(Interface):
a1 = Attribute("This is an attribute")
def f11():
pass
def f12():
pass
f12.optional = 1
class _I1_(_I1):
pass
class _I1__(_I1_):
pass
class _I2(_I1__):
def f21():
pass
def f22():
pass
f23 = f22
return _I2
def testInterfaceSetOnAttributes(self):
from zope.interface.tests.unitfixtures import FooInterface
self.assertEqual(FooInterface['foobar'].interface,
FooInterface)
self.assertEqual(FooInterface['aMethod'].interface,
FooInterface)
def testClassImplements(self):
from zope.interface.tests.unitfixtures import A
from zope.interface.tests.unitfixtures import B
from zope.interface.tests.unitfixtures import C
from zope.interface.tests.unitfixtures import D
from zope.interface.tests.unitfixtures import E
from zope.interface.tests.unitfixtures import I1
from zope.interface.tests.unitfixtures import I2
from zope.interface.tests.unitfixtures import IC
self.assert_(IC.implementedBy(C))
self.assert_(I1.implementedBy(A))
self.assert_(I1.implementedBy(B))
self.assert_(not I1.implementedBy(C))
self.assert_(I1.implementedBy(D))
self.assert_(I1.implementedBy(E))
self.assert_(not I2.implementedBy(A))
self.assert_(I2.implementedBy(B))
self.assert_(not I2.implementedBy(C))
# No longer after interfacegeddon
# self.assert_(not I2.implementedBy(D))
self.assert_(not I2.implementedBy(E))
def testUtil(self):
from zope.interface import implementedBy
from zope.interface import providedBy
from zope.interface.tests.unitfixtures import A
from zope.interface.tests.unitfixtures import B
from zope.interface.tests.unitfixtures import C
from zope.interface.tests.unitfixtures import I1
from zope.interface.tests.unitfixtures import I2
from zope.interface.tests.unitfixtures import IC
self.assert_(IC in implementedBy(C))
self.assert_(I1 in implementedBy(A))
self.assert_(not I1 in implementedBy(C))
self.assert_(I2 in implementedBy(B))
self.assert_(not I2 in implementedBy(C))
self.assert_(IC in providedBy(C()))
self.assert_(I1 in providedBy(A()))
self.assert_(not I1 in providedBy(C()))
self.assert_(I2 in providedBy(B()))
self.assert_(not I2 in providedBy(C()))
def testObjectImplements(self):
from zope.interface.tests.unitfixtures import A
from zope.interface.tests.unitfixtures import B
from zope.interface.tests.unitfixtures import C
from zope.interface.tests.unitfixtures import D
from zope.interface.tests.unitfixtures import E
from zope.interface.tests.unitfixtures import I1
from zope.interface.tests.unitfixtures import I2
from zope.interface.tests.unitfixtures import IC
self.assert_(IC.providedBy(C()))
self.assert_(I1.providedBy(A()))
self.assert_(I1.providedBy(B()))
self.assert_(not I1.providedBy(C()))
self.assert_(I1.providedBy(D()))
self.assert_(I1.providedBy(E()))
self.assert_(not I2.providedBy(A()))
self.assert_(I2.providedBy(B()))
self.assert_(not I2.providedBy(C()))
# Not after interface geddon
# self.assert_(not I2.providedBy(D()))
self.assert_(not I2.providedBy(E()))
def testDeferredClass(self):
from zope.interface.tests.unitfixtures import A
from zope.interface.exceptions import BrokenImplementation
a = A()
self.assertRaises(BrokenImplementation, a.ma)
def testInterfaceExtendsInterface(self):
from zope.interface.tests.unitfixtures import BazInterface
from zope.interface.tests.unitfixtures import BarInterface
from zope.interface.tests.unitfixtures import BobInterface
from zope.interface.tests.unitfixtures import FunInterface
self.assert_(BazInterface.extends(BobInterface))
self.assert_(BazInterface.extends(BarInterface))
self.assert_(BazInterface.extends(FunInterface))
self.assert_(not BobInterface.extends(FunInterface))
self.assert_(not BobInterface.extends(BarInterface))
self.assert_(BarInterface.extends(FunInterface))
self.assert_(not BarInterface.extends(BazInterface))
def testVerifyImplementation(self):
from zope.interface.verify import verifyClass
from zope.interface import Interface
from zope.interface.tests.unitfixtures import Foo
from zope.interface.tests.unitfixtures import FooInterface
from zope.interface.tests.unitfixtures import I1
self.assert_(verifyClass(FooInterface, Foo))
self.assert_(Interface.providedBy(I1))
def test_names(self):
iface = self._makeDerivedInterface()
names = list(iface.names())
names.sort()
self.assertEqual(names, ['f21', 'f22', 'f23'])
all = list(iface.names(all=True))
all.sort()
self.assertEqual(all, ['a1', 'f11', 'f12', 'f21', 'f22', 'f23'])
def test_namesAndDescriptions(self):
iface = self._makeDerivedInterface()
names = [nd[0] for nd in iface.namesAndDescriptions()]
names.sort()
self.assertEqual(names, ['f21', 'f22', 'f23'])
names = [nd[0] for nd in iface.namesAndDescriptions(1)]
names.sort()
self.assertEqual(names, ['a1', 'f11', 'f12', 'f21', 'f22', 'f23'])
for name, d in iface.namesAndDescriptions(1):
self.assertEqual(name, d.__name__)
def test_getDescriptionFor(self):
iface = self._makeDerivedInterface()
self.assertEqual(iface.getDescriptionFor('f11').__name__, 'f11')
self.assertEqual(iface.getDescriptionFor('f22').__name__, 'f22')
self.assertEqual(iface.queryDescriptionFor('f33', self), self)
self.assertRaises(KeyError, iface.getDescriptionFor, 'f33')
def test___getitem__(self):
iface = self._makeDerivedInterface()
self.assertEqual(iface['f11'].__name__, 'f11')
self.assertEqual(iface['f22'].__name__, 'f22')
self.assertEqual(iface.get('f33', self), self)
self.assertRaises(KeyError, iface.__getitem__, 'f33')
def test___contains__(self):
iface = self._makeDerivedInterface()
self.failUnless('f11' in iface)
self.failIf('f33' in iface)
def test___iter__(self):
iface = self._makeDerivedInterface()
names = list(iter(iface))
names.sort()
self.assertEqual(names, ['a1', 'f11', 'f12', 'f21', 'f22', 'f23'])
def testAttr(self):
iface = self._makeDerivedInterface()
description = iface.getDescriptionFor('a1')
self.assertEqual(description.__name__, 'a1')
self.assertEqual(description.__doc__, 'This is an attribute')
def testFunctionAttributes(self):
# Make sure function attributes become tagged values.
from zope.interface import Interface
class ITest(Interface):
def method():
pass
method.optional = 1
method = ITest['method']
self.assertEqual(method.getTaggedValue('optional'), 1)
def testInvariant(self):
from zope.interface.exceptions import Invalid
from zope.interface import directlyProvides
from zope.interface.tests.unitfixtures import BarGreaterThanFoo
from zope.interface.tests.unitfixtures import ifFooThenBar
from zope.interface.tests.unitfixtures import IInvariant
from zope.interface.tests.unitfixtures import InvariantC
from zope.interface.tests.unitfixtures import ISubInvariant
# set up
o = InvariantC()
directlyProvides(o, IInvariant)
# a helper
def errorsEqual(self, o, error_len, error_msgs, iface=None):
if iface is None:
iface = IInvariant
self.assertRaises(Invalid, iface.validateInvariants, o)
e = []
try:
iface.validateInvariants(o, e)
except Invalid, error:
self.assertEquals(error.args[0], e)
else:
self._assert(0) # validateInvariants should always raise
# Invalid
self.assertEquals(len(e), error_len)
msgs = [error.args[0] for error in e]
msgs.sort()
for msg in msgs:
self.assertEquals(msg, error_msgs.pop(0))
# the tests
self.assertEquals(IInvariant.getTaggedValue('invariants'),
[ifFooThenBar])
self.assertEquals(IInvariant.validateInvariants(o), None)
o.bar = 27
self.assertEquals(IInvariant.validateInvariants(o), None)
o.foo = 42
self.assertEquals(IInvariant.validateInvariants(o), None)
del o.bar
errorsEqual(self, o, 1, ['If Foo, then Bar!'])
# nested interfaces with invariants:
self.assertEquals(ISubInvariant.getTaggedValue('invariants'),
[BarGreaterThanFoo])
o = InvariantC()
directlyProvides(o, ISubInvariant)
o.foo = 42
# even though the interface has changed, we should still only have one
# error.
errorsEqual(self, o, 1, ['If Foo, then Bar!'], ISubInvariant)
# however, if we set foo to 0 (Boolean False) and bar to a negative
# number then we'll get the new error
o.foo = 2
o.bar = 1
errorsEqual(self, o, 1, ['Please, Boo MUST be greater than Foo!'],
ISubInvariant)
# and if we set foo to a positive number and boo to 0, we'll
# get both errors!
o.foo = 1
o.bar = 0
errorsEqual(self, o, 2, ['If Foo, then Bar!',
'Please, Boo MUST be greater than Foo!'],
ISubInvariant)
# for a happy ending, we'll make the invariants happy
o.foo = 1
o.bar = 2
self.assertEquals(IInvariant.validateInvariants(o), None) # woohoo
# now we'll do two invariants on the same interface,
# just to make sure that a small
# multi-invariant interface is at least minimally tested.
o = InvariantC()
directlyProvides(o, IInvariant)
o.foo = 42
old_invariants = IInvariant.getTaggedValue('invariants')
invariants = old_invariants[:]
invariants.append(BarGreaterThanFoo) # if you really need to mutate,
# then this would be the way to do it. Probably a bad idea, though. :-)
IInvariant.setTaggedValue('invariants', invariants)
#
# even though the interface has changed, we should still only have one
# error.
errorsEqual(self, o, 1, ['If Foo, then Bar!'])
# however, if we set foo to 0 (Boolean False) and bar to a negative
# number then we'll get the new error
o.foo = 2
o.bar = 1
errorsEqual(self, o, 1, ['Please, Boo MUST be greater than Foo!'])
# and if we set foo to a positive number and boo to 0, we'll
# get both errors!
o.foo = 1
o.bar = 0
errorsEqual(self, o, 2, ['If Foo, then Bar!',
'Please, Boo MUST be greater than Foo!'])
# for another happy ending, we'll make the invariants happy again
o.foo = 1
o.bar = 2
self.assertEquals(IInvariant.validateInvariants(o), None) # bliss
# clean up
IInvariant.setTaggedValue('invariants', old_invariants)
def test___doc___element(self):
from zope.interface import Interface
from zope.interface import Attribute
class I(Interface):
"xxx"
self.assertEqual(I.__doc__, "xxx")
self.assertEqual(list(I), [])
class I(Interface):
"xxx"
__doc__ = Attribute('the doc')
self.assertEqual(I.__doc__, "")
self.assertEqual(list(I), ['__doc__'])
def testIssue228(self):
from zope.interface import Interface
# Test for http://collector.zope.org/Zope3-dev/228
if sys.version[0] == '3':
# No old style classes in Python 3, so the test becomes moot.
return
class I(Interface):
"xxx"
class Bad:
__providedBy__ = None
# Old style classes don't have a '__class__' attribute
self.failUnlessRaises(AttributeError, I.providedBy, Bad)
if sys.version_info >= (2, 4):
def test_invariant_as_decorator():
"""Invaiants can be deined in line
>>> from zope.interface.exceptions import Invalid
>>> from zope.interface import Interface
>>> from zope.interface import Attribute
>>> from zope.interface import implements
>>> from zope.interface import invariant
>>> class IRange(Interface):
... min = Attribute("Lower bound")
... max = Attribute("Upper bound")
...
... @invariant
... def range_invariant(ob):
... if ob.max < ob.min:
... raise Invalid('max < min')
>>> class Range(object):
... implements(IRange)
...
... def __init__(self, min, max):
... self.min, self.max = min, max
>>> from zope.interface.exceptions import Invalid
>>> IRange.validateInvariants(Range(1,2))
>>> IRange.validateInvariants(Range(1,1))
>>> try:
... IRange.validateInvariants(Range(2,1))
... except Invalid, e:
... str(e)
'max < min'
"""
def test_description_cache_management():
""" See https://bugs.launchpad.net/zope.interface/+bug/185974
There was a bug where the cache used by Specification.get() was not
cleared when the bases were changed.
>>> from zope.interface import Interface
>>> from zope.interface import Attribute
>>> class I1(Interface):
... a = Attribute('a')
>>> class I2(I1):
... pass
>>> class I3(I2):
... pass
>>> I3.get('a') is I1.get('a')
True
>>> I2.__bases__ = (Interface,)
>>> I3.get('a') is None
True
"""
def test_suite():
suite = unittest.makeSuite(InterfaceTests)
suite.addTest(doctest.DocTestSuite("zope.interface.interface"))
if sys.version_info >= (2, 4):
suite.addTest(doctest.DocTestSuite())
suite.addTest(doctest.DocFileSuite(
'../README.txt',
globs={'__name__': '__main__'},
optionflags=doctest.NORMALIZE_WHITESPACE|doctest.ELLIPSIS,
))
suite.addTest(doctest.DocFileSuite(
'../README.ru.txt',
globs={'__name__': '__main__'},
optionflags=doctest.NORMALIZE_WHITESPACE|doctest.ELLIPSIS,
))
return suite
| apache-2.0 |
mancoast/CPythonPyc_test | crash/261_test_trace.py | 51 | 22301 | # Testing the line trace facility.
from test import test_support
import unittest
import sys
import difflib
import gc
# A very basic example. If this fails, we're in deep trouble.
def basic():
return 1
basic.events = [(0, 'call'),
(1, 'line'),
(1, 'return')]
# Many of the tests below are tricky because they involve pass statements.
# If there is implicit control flow around a pass statement (in an except
# clause or else caluse) under what conditions do you set a line number
# following that clause?
# The entire "while 0:" statement is optimized away. No code
# exists for it, so the line numbers skip directly from "del x"
# to "x = 1".
def arigo_example():
x = 1
del x
while 0:
pass
x = 1
arigo_example.events = [(0, 'call'),
(1, 'line'),
(2, 'line'),
(5, 'line'),
(5, 'return')]
# check that lines consisting of just one instruction get traced:
def one_instr_line():
x = 1
del x
x = 1
one_instr_line.events = [(0, 'call'),
(1, 'line'),
(2, 'line'),
(3, 'line'),
(3, 'return')]
def no_pop_tops(): # 0
x = 1 # 1
for a in range(2): # 2
if a: # 3
x = 1 # 4
else: # 5
x = 1 # 6
no_pop_tops.events = [(0, 'call'),
(1, 'line'),
(2, 'line'),
(3, 'line'),
(6, 'line'),
(2, 'line'),
(3, 'line'),
(4, 'line'),
(2, 'line'),
(2, 'return')]
def no_pop_blocks():
y = 1
while not y:
bla
x = 1
no_pop_blocks.events = [(0, 'call'),
(1, 'line'),
(2, 'line'),
(4, 'line'),
(4, 'return')]
def called(): # line -3
x = 1
def call(): # line 0
called()
call.events = [(0, 'call'),
(1, 'line'),
(-3, 'call'),
(-2, 'line'),
(-2, 'return'),
(1, 'return')]
def raises():
raise Exception
def test_raise():
try:
raises()
except Exception, exc:
x = 1
test_raise.events = [(0, 'call'),
(1, 'line'),
(2, 'line'),
(-3, 'call'),
(-2, 'line'),
(-2, 'exception'),
(-2, 'return'),
(2, 'exception'),
(3, 'line'),
(4, 'line'),
(4, 'return')]
def _settrace_and_return(tracefunc):
sys.settrace(tracefunc)
sys._getframe().f_back.f_trace = tracefunc
def settrace_and_return(tracefunc):
_settrace_and_return(tracefunc)
settrace_and_return.events = [(1, 'return')]
def _settrace_and_raise(tracefunc):
sys.settrace(tracefunc)
sys._getframe().f_back.f_trace = tracefunc
raise RuntimeError
def settrace_and_raise(tracefunc):
try:
_settrace_and_raise(tracefunc)
except RuntimeError, exc:
pass
settrace_and_raise.events = [(2, 'exception'),
(3, 'line'),
(4, 'line'),
(4, 'return')]
# implicit return example
# This test is interesting because of the else: pass
# part of the code. The code generate for the true
# part of the if contains a jump past the else branch.
# The compiler then generates an implicit "return None"
# Internally, the compiler visits the pass statement
# and stores its line number for use on the next instruction.
# The next instruction is the implicit return None.
def ireturn_example():
a = 5
b = 5
if a == b:
b = a+1
else:
pass
ireturn_example.events = [(0, 'call'),
(1, 'line'),
(2, 'line'),
(3, 'line'),
(4, 'line'),
(6, 'line'),
(6, 'return')]
# Tight loop with while(1) example (SF #765624)
def tightloop_example():
items = range(0, 3)
try:
i = 0
while 1:
b = items[i]; i+=1
except IndexError:
pass
tightloop_example.events = [(0, 'call'),
(1, 'line'),
(2, 'line'),
(3, 'line'),
(4, 'line'),
(5, 'line'),
(5, 'line'),
(5, 'line'),
(5, 'line'),
(5, 'exception'),
(6, 'line'),
(7, 'line'),
(7, 'return')]
def tighterloop_example():
items = range(1, 4)
try:
i = 0
while 1: i = items[i]
except IndexError:
pass
tighterloop_example.events = [(0, 'call'),
(1, 'line'),
(2, 'line'),
(3, 'line'),
(4, 'line'),
(4, 'line'),
(4, 'line'),
(4, 'line'),
(4, 'exception'),
(5, 'line'),
(6, 'line'),
(6, 'return')]
def generator_function():
try:
yield True
"continued"
finally:
"finally"
def generator_example():
# any() will leave the generator before its end
x = any(generator_function())
# the following lines were not traced
for x in range(10):
y = x
generator_example.events = ([(0, 'call'),
(2, 'line'),
(-6, 'call'),
(-5, 'line'),
(-4, 'line'),
(-4, 'return'),
(-4, 'call'),
(-4, 'exception'),
(-1, 'line'),
(-1, 'return')] +
[(5, 'line'), (6, 'line')] * 10 +
[(5, 'line'), (5, 'return')])
class Tracer:
def __init__(self):
self.events = []
def trace(self, frame, event, arg):
self.events.append((frame.f_lineno, event))
return self.trace
def traceWithGenexp(self, frame, event, arg):
(o for o in [1])
self.events.append((frame.f_lineno, event))
return self.trace
class TraceTestCase(unittest.TestCase):
# Disable gc collection when tracing, otherwise the
# deallocators may be traced as well.
def setUp(self):
self.using_gc = gc.isenabled()
gc.disable()
def tearDown(self):
if self.using_gc:
gc.enable()
def compare_events(self, line_offset, events, expected_events):
events = [(l - line_offset, e) for (l, e) in events]
if events != expected_events:
self.fail(
"events did not match expectation:\n" +
"\n".join(difflib.ndiff([str(x) for x in expected_events],
[str(x) for x in events])))
def run_and_compare(self, func, events):
tracer = Tracer()
sys.settrace(tracer.trace)
func()
sys.settrace(None)
self.compare_events(func.func_code.co_firstlineno,
tracer.events, events)
def run_test(self, func):
self.run_and_compare(func, func.events)
def run_test2(self, func):
tracer = Tracer()
func(tracer.trace)
sys.settrace(None)
self.compare_events(func.func_code.co_firstlineno,
tracer.events, func.events)
def set_and_retrieve_none(self):
sys.settrace(None)
assert sys.gettrace() is None
def set_and_retrieve_func(self):
def fn(*args):
pass
sys.settrace(fn)
try:
assert sys.gettrace() is fn
finally:
sys.settrace(None)
def test_01_basic(self):
self.run_test(basic)
def test_02_arigo(self):
self.run_test(arigo_example)
def test_03_one_instr(self):
self.run_test(one_instr_line)
def test_04_no_pop_blocks(self):
self.run_test(no_pop_blocks)
def test_05_no_pop_tops(self):
self.run_test(no_pop_tops)
def test_06_call(self):
self.run_test(call)
def test_07_raise(self):
self.run_test(test_raise)
def test_08_settrace_and_return(self):
self.run_test2(settrace_and_return)
def test_09_settrace_and_raise(self):
self.run_test2(settrace_and_raise)
def test_10_ireturn(self):
self.run_test(ireturn_example)
def test_11_tightloop(self):
self.run_test(tightloop_example)
def test_12_tighterloop(self):
self.run_test(tighterloop_example)
def test_13_genexp(self):
self.run_test(generator_example)
# issue1265: if the trace function contains a generator,
# and if the traced function contains another generator
# that is not completely exhausted, the trace stopped.
# Worse: the 'finally' clause was not invoked.
tracer = Tracer()
sys.settrace(tracer.traceWithGenexp)
generator_example()
sys.settrace(None)
self.compare_events(generator_example.__code__.co_firstlineno,
tracer.events, generator_example.events)
def test_14_onliner_if(self):
def onliners():
if True: False
else: True
return 0
self.run_and_compare(
onliners,
[(0, 'call'),
(1, 'line'),
(3, 'line'),
(3, 'return')])
def test_15_loops(self):
# issue1750076: "while" expression is skipped by debugger
def for_example():
for x in range(2):
pass
self.run_and_compare(
for_example,
[(0, 'call'),
(1, 'line'),
(2, 'line'),
(1, 'line'),
(2, 'line'),
(1, 'line'),
(1, 'return')])
def while_example():
# While expression should be traced on every loop
x = 2
while x > 0:
x -= 1
self.run_and_compare(
while_example,
[(0, 'call'),
(2, 'line'),
(3, 'line'),
(4, 'line'),
(3, 'line'),
(4, 'line'),
(3, 'line'),
(3, 'return')])
def test_16_blank_lines(self):
exec("def f():\n" + "\n" * 256 + " pass")
self.run_and_compare(
f,
[(0, 'call'),
(257, 'line'),
(257, 'return')])
class RaisingTraceFuncTestCase(unittest.TestCase):
def trace(self, frame, event, arg):
"""A trace function that raises an exception in response to a
specific trace event."""
if event == self.raiseOnEvent:
raise ValueError # just something that isn't RuntimeError
else:
return self.trace
def f(self):
"""The function to trace; raises an exception if that's the case
we're testing, so that the 'exception' trace event fires."""
if self.raiseOnEvent == 'exception':
x = 0
y = 1/x
else:
return 1
def run_test_for_event(self, event):
"""Tests that an exception raised in response to the given event is
handled OK."""
self.raiseOnEvent = event
try:
for i in xrange(sys.getrecursionlimit() + 1):
sys.settrace(self.trace)
try:
self.f()
except ValueError:
pass
else:
self.fail("exception not thrown!")
except RuntimeError:
self.fail("recursion counter not reset")
# Test the handling of exceptions raised by each kind of trace event.
def test_call(self):
self.run_test_for_event('call')
def test_line(self):
self.run_test_for_event('line')
def test_return(self):
self.run_test_for_event('return')
def test_exception(self):
self.run_test_for_event('exception')
def test_trash_stack(self):
def f():
for i in range(5):
print i # line tracing will raise an exception at this line
def g(frame, why, extra):
if (why == 'line' and
frame.f_lineno == f.func_code.co_firstlineno + 2):
raise RuntimeError, "i am crashing"
return g
sys.settrace(g)
try:
f()
except RuntimeError:
# the test is really that this doesn't segfault:
import gc
gc.collect()
else:
self.fail("exception not propagated")
# 'Jump' tests: assigning to frame.f_lineno within a trace function
# moves the execution position - it's how debuggers implement a Jump
# command (aka. "Set next statement").
class JumpTracer:
"""Defines a trace function that jumps from one place to another,
with the source and destination lines of the jump being defined by
the 'jump' property of the function under test."""
def __init__(self, function):
self.function = function
self.jumpFrom = function.jump[0]
self.jumpTo = function.jump[1]
self.done = False
def trace(self, frame, event, arg):
if not self.done and frame.f_code == self.function.func_code:
firstLine = frame.f_code.co_firstlineno
if frame.f_lineno == firstLine + self.jumpFrom:
# Cope with non-integer self.jumpTo (because of
# no_jump_to_non_integers below).
try:
frame.f_lineno = firstLine + self.jumpTo
except TypeError:
frame.f_lineno = self.jumpTo
self.done = True
return self.trace
# The first set of 'jump' tests are for things that are allowed:
def jump_simple_forwards(output):
output.append(1)
output.append(2)
output.append(3)
jump_simple_forwards.jump = (1, 3)
jump_simple_forwards.output = [3]
def jump_simple_backwards(output):
output.append(1)
output.append(2)
jump_simple_backwards.jump = (2, 1)
jump_simple_backwards.output = [1, 1, 2]
def jump_out_of_block_forwards(output):
for i in 1, 2:
output.append(2)
for j in [3]: # Also tests jumping over a block
output.append(4)
output.append(5)
jump_out_of_block_forwards.jump = (3, 5)
jump_out_of_block_forwards.output = [2, 5]
def jump_out_of_block_backwards(output):
output.append(1)
for i in [1]:
output.append(3)
for j in [2]: # Also tests jumping over a block
output.append(5)
output.append(6)
output.append(7)
jump_out_of_block_backwards.jump = (6, 1)
jump_out_of_block_backwards.output = [1, 3, 5, 1, 3, 5, 6, 7]
def jump_to_codeless_line(output):
output.append(1)
# Jumping to this line should skip to the next one.
output.append(3)
jump_to_codeless_line.jump = (1, 2)
jump_to_codeless_line.output = [3]
def jump_to_same_line(output):
output.append(1)
output.append(2)
output.append(3)
jump_to_same_line.jump = (2, 2)
jump_to_same_line.output = [1, 2, 3]
# Tests jumping within a finally block, and over one.
def jump_in_nested_finally(output):
try:
output.append(2)
finally:
output.append(4)
try:
output.append(6)
finally:
output.append(8)
output.append(9)
jump_in_nested_finally.jump = (4, 9)
jump_in_nested_finally.output = [2, 9]
# The second set of 'jump' tests are for things that are not allowed:
def no_jump_too_far_forwards(output):
try:
output.append(2)
output.append(3)
except ValueError, e:
output.append('after' in str(e))
no_jump_too_far_forwards.jump = (3, 6)
no_jump_too_far_forwards.output = [2, True]
def no_jump_too_far_backwards(output):
try:
output.append(2)
output.append(3)
except ValueError, e:
output.append('before' in str(e))
no_jump_too_far_backwards.jump = (3, -1)
no_jump_too_far_backwards.output = [2, True]
# Test each kind of 'except' line.
def no_jump_to_except_1(output):
try:
output.append(2)
except:
e = sys.exc_info()[1]
output.append('except' in str(e))
no_jump_to_except_1.jump = (2, 3)
no_jump_to_except_1.output = [True]
def no_jump_to_except_2(output):
try:
output.append(2)
except ValueError:
e = sys.exc_info()[1]
output.append('except' in str(e))
no_jump_to_except_2.jump = (2, 3)
no_jump_to_except_2.output = [True]
def no_jump_to_except_3(output):
try:
output.append(2)
except ValueError, e:
output.append('except' in str(e))
no_jump_to_except_3.jump = (2, 3)
no_jump_to_except_3.output = [True]
def no_jump_to_except_4(output):
try:
output.append(2)
except (ValueError, RuntimeError), e:
output.append('except' in str(e))
no_jump_to_except_4.jump = (2, 3)
no_jump_to_except_4.output = [True]
def no_jump_forwards_into_block(output):
try:
output.append(2)
for i in 1, 2:
output.append(4)
except ValueError, e:
output.append('into' in str(e))
no_jump_forwards_into_block.jump = (2, 4)
no_jump_forwards_into_block.output = [True]
def no_jump_backwards_into_block(output):
try:
for i in 1, 2:
output.append(3)
output.append(4)
except ValueError, e:
output.append('into' in str(e))
no_jump_backwards_into_block.jump = (4, 3)
no_jump_backwards_into_block.output = [3, 3, True]
def no_jump_into_finally_block(output):
try:
try:
output.append(3)
x = 1
finally:
output.append(6)
except ValueError, e:
output.append('finally' in str(e))
no_jump_into_finally_block.jump = (4, 6)
no_jump_into_finally_block.output = [3, 6, True] # The 'finally' still runs
def no_jump_out_of_finally_block(output):
try:
try:
output.append(3)
finally:
output.append(5)
output.append(6)
except ValueError, e:
output.append('finally' in str(e))
no_jump_out_of_finally_block.jump = (5, 1)
no_jump_out_of_finally_block.output = [3, True]
# This verifies the line-numbers-must-be-integers rule.
def no_jump_to_non_integers(output):
try:
output.append(2)
except ValueError, e:
output.append('integer' in str(e))
no_jump_to_non_integers.jump = (2, "Spam")
no_jump_to_non_integers.output = [True]
# This verifies that you can't set f_lineno via _getframe or similar
# trickery.
def no_jump_without_trace_function():
try:
previous_frame = sys._getframe().f_back
previous_frame.f_lineno = previous_frame.f_lineno
except ValueError, e:
# This is the exception we wanted; make sure the error message
# talks about trace functions.
if 'trace' not in str(e):
raise
else:
# Something's wrong - the expected exception wasn't raised.
raise RuntimeError, "Trace-function-less jump failed to fail"
class JumpTestCase(unittest.TestCase):
def compare_jump_output(self, expected, received):
if received != expected:
self.fail( "Outputs don't match:\n" +
"Expected: " + repr(expected) + "\n" +
"Received: " + repr(received))
def run_test(self, func):
tracer = JumpTracer(func)
sys.settrace(tracer.trace)
output = []
func(output)
sys.settrace(None)
self.compare_jump_output(func.output, output)
def test_01_jump_simple_forwards(self):
self.run_test(jump_simple_forwards)
def test_02_jump_simple_backwards(self):
self.run_test(jump_simple_backwards)
def test_03_jump_out_of_block_forwards(self):
self.run_test(jump_out_of_block_forwards)
def test_04_jump_out_of_block_backwards(self):
self.run_test(jump_out_of_block_backwards)
def test_05_jump_to_codeless_line(self):
self.run_test(jump_to_codeless_line)
def test_06_jump_to_same_line(self):
self.run_test(jump_to_same_line)
def test_07_jump_in_nested_finally(self):
self.run_test(jump_in_nested_finally)
def test_08_no_jump_too_far_forwards(self):
self.run_test(no_jump_too_far_forwards)
def test_09_no_jump_too_far_backwards(self):
self.run_test(no_jump_too_far_backwards)
def test_10_no_jump_to_except_1(self):
self.run_test(no_jump_to_except_1)
def test_11_no_jump_to_except_2(self):
self.run_test(no_jump_to_except_2)
def test_12_no_jump_to_except_3(self):
self.run_test(no_jump_to_except_3)
def test_13_no_jump_to_except_4(self):
self.run_test(no_jump_to_except_4)
def test_14_no_jump_forwards_into_block(self):
self.run_test(no_jump_forwards_into_block)
def test_15_no_jump_backwards_into_block(self):
self.run_test(no_jump_backwards_into_block)
def test_16_no_jump_into_finally_block(self):
self.run_test(no_jump_into_finally_block)
def test_17_no_jump_out_of_finally_block(self):
self.run_test(no_jump_out_of_finally_block)
def test_18_no_jump_to_non_integers(self):
self.run_test(no_jump_to_non_integers)
def test_19_no_jump_without_trace_function(self):
no_jump_without_trace_function()
def test_main():
test_support.run_unittest(
TraceTestCase,
RaisingTraceFuncTestCase,
JumpTestCase
)
if __name__ == "__main__":
test_main()
| gpl-3.0 |
yuezh/azure-linux-extensions | OSPatching/azure/http/httpclient.py | 46 | 8556 | #-------------------------------------------------------------------------
# Copyright (c) Microsoft. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#--------------------------------------------------------------------------
import base64
import os
import sys
if sys.version_info < (3,):
from httplib import (
HTTPSConnection,
HTTPConnection,
HTTP_PORT,
HTTPS_PORT,
)
from urlparse import urlparse
else:
from http.client import (
HTTPSConnection,
HTTPConnection,
HTTP_PORT,
HTTPS_PORT,
)
from urllib.parse import urlparse
from azure.http import HTTPError, HTTPResponse
from azure import _USER_AGENT_STRING, _update_request_uri_query
class _HTTPClient(object):
'''
Takes the request and sends it to cloud service and returns the response.
'''
def __init__(self, service_instance, cert_file=None, account_name=None,
account_key=None, protocol='https'):
'''
service_instance: service client instance.
cert_file:
certificate file name/location. This is only used in hosted
service management.
account_name: the storage account.
account_key:
the storage account access key.
'''
self.service_instance = service_instance
self.status = None
self.respheader = None
self.message = None
self.cert_file = cert_file
self.account_name = account_name
self.account_key = account_key
self.protocol = protocol
self.proxy_host = None
self.proxy_port = None
self.proxy_user = None
self.proxy_password = None
self.use_httplib = self.should_use_httplib()
def should_use_httplib(self):
if sys.platform.lower().startswith('win') and self.cert_file:
# On Windows, auto-detect between Windows Store Certificate
# (winhttp) and OpenSSL .pem certificate file (httplib).
#
# We used to only support certificates installed in the Windows
# Certificate Store.
# cert_file example: CURRENT_USER\my\CertificateName
#
# We now support using an OpenSSL .pem certificate file,
# for a consistent experience across all platforms.
# cert_file example: account\certificate.pem
#
# When using OpenSSL .pem certificate file on Windows, make sure
# you are on CPython 2.7.4 or later.
# If it's not an existing file on disk, then treat it as a path in
# the Windows Certificate Store, which means we can't use httplib.
if not os.path.isfile(self.cert_file):
return False
return True
def set_proxy(self, host, port, user, password):
'''
Sets the proxy server host and port for the HTTP CONNECT Tunnelling.
host: Address of the proxy. Ex: '192.168.0.100'
port: Port of the proxy. Ex: 6000
user: User for proxy authorization.
password: Password for proxy authorization.
'''
self.proxy_host = host
self.proxy_port = port
self.proxy_user = user
self.proxy_password = password
def get_uri(self, request):
''' Return the target uri for the request.'''
protocol = request.protocol_override \
if request.protocol_override else self.protocol
port = HTTP_PORT if protocol == 'http' else HTTPS_PORT
return protocol + '://' + request.host + ':' + str(port) + request.path
def get_connection(self, request):
''' Create connection for the request. '''
protocol = request.protocol_override \
if request.protocol_override else self.protocol
target_host = request.host
target_port = HTTP_PORT if protocol == 'http' else HTTPS_PORT
if not self.use_httplib:
import azure.http.winhttp
connection = azure.http.winhttp._HTTPConnection(
target_host, cert_file=self.cert_file, protocol=protocol)
proxy_host = self.proxy_host
proxy_port = self.proxy_port
else:
if ':' in target_host:
target_host, _, target_port = target_host.rpartition(':')
if self.proxy_host:
proxy_host = target_host
proxy_port = target_port
host = self.proxy_host
port = self.proxy_port
else:
host = target_host
port = target_port
if protocol == 'http':
connection = HTTPConnection(host, int(port))
else:
connection = HTTPSConnection(
host, int(port), cert_file=self.cert_file)
if self.proxy_host:
headers = None
if self.proxy_user and self.proxy_password:
auth = base64.encodestring(
"{0}:{1}".format(self.proxy_user, self.proxy_password))
headers = {'Proxy-Authorization': 'Basic {0}'.format(auth)}
connection.set_tunnel(proxy_host, int(proxy_port), headers)
return connection
def send_request_headers(self, connection, request_headers):
if self.use_httplib:
if self.proxy_host:
for i in connection._buffer:
if i.startswith("Host: "):
connection._buffer.remove(i)
connection.putheader(
'Host', "{0}:{1}".format(connection._tunnel_host,
connection._tunnel_port))
for name, value in request_headers:
if value:
connection.putheader(name, value)
connection.putheader('User-Agent', _USER_AGENT_STRING)
connection.endheaders()
def send_request_body(self, connection, request_body):
if request_body:
assert isinstance(request_body, bytes)
connection.send(request_body)
elif (not isinstance(connection, HTTPSConnection) and
not isinstance(connection, HTTPConnection)):
connection.send(None)
def perform_request(self, request):
''' Sends request to cloud service server and return the response. '''
connection = self.get_connection(request)
try:
connection.putrequest(request.method, request.path)
if not self.use_httplib:
if self.proxy_host and self.proxy_user:
connection.set_proxy_credentials(
self.proxy_user, self.proxy_password)
self.send_request_headers(connection, request.headers)
self.send_request_body(connection, request.body)
resp = connection.getresponse()
self.status = int(resp.status)
self.message = resp.reason
self.respheader = headers = resp.getheaders()
# for consistency across platforms, make header names lowercase
for i, value in enumerate(headers):
headers[i] = (value[0].lower(), value[1])
respbody = None
if resp.length is None:
respbody = resp.read()
elif resp.length > 0:
respbody = resp.read(resp.length)
response = HTTPResponse(
int(resp.status), resp.reason, headers, respbody)
if self.status == 307:
new_url = urlparse(dict(headers)['location'])
request.host = new_url.hostname
request.path = new_url.path
request.path, request.query = _update_request_uri_query(request)
return self.perform_request(request)
if self.status >= 300:
raise HTTPError(self.status, self.message,
self.respheader, respbody)
return response
finally:
connection.close()
| apache-2.0 |
SaschaMester/delicium | testing/scripts/common.py | 13 | 4437 | # Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import argparse
import contextlib
import json
import os
import subprocess
import sys
import tempfile
SCRIPT_DIR = os.path.abspath(os.path.dirname(__file__))
SRC_DIR = os.path.abspath(
os.path.join(SCRIPT_DIR, os.path.pardir, os.path.pardir))
# run-webkit-tests returns the number of failures as the return
# code, but caps the return code at 101 to avoid overflow or colliding
# with reserved values from the shell.
MAX_FAILURES_EXIT_STATUS = 101
def run_script(argv, funcs):
def parse_json(path):
with open(path) as f:
return json.load(f)
parser = argparse.ArgumentParser()
# TODO(phajdan.jr): Make build-config-fs required after passing it in recipe.
parser.add_argument('--build-config-fs')
parser.add_argument('--paths', type=parse_json, default={})
# Properties describe the environment of the build, and are the same per
# script invocation.
parser.add_argument('--properties', type=parse_json, default={})
# Args contains per-invocation arguments that potentially change the
# behavior of the script.
parser.add_argument('--args', type=parse_json, default=[])
subparsers = parser.add_subparsers()
run_parser = subparsers.add_parser('run')
run_parser.add_argument(
'--output', type=argparse.FileType('w'), required=True)
run_parser.add_argument('--filter-file', type=argparse.FileType('r'))
run_parser.set_defaults(func=funcs['run'])
run_parser = subparsers.add_parser('compile_targets')
run_parser.add_argument(
'--output', type=argparse.FileType('w'), required=True)
run_parser.set_defaults(func=funcs['compile_targets'])
args = parser.parse_args(argv)
return args.func(args)
def run_command(argv):
print 'Running %r' % argv
rc = subprocess.call(argv)
print 'Command %r returned exit code %d' % (argv, rc)
return rc
def run_runtest(cmd_args, runtest_args):
return run_command([
sys.executable,
os.path.join(cmd_args.paths['build'], 'scripts', 'tools', 'runit.py'),
'--show-path',
sys.executable,
os.path.join(cmd_args.paths['build'], 'scripts', 'slave', 'runtest.py'),
'--target', cmd_args.build_config_fs,
'--xvfb',
'--builder-name', cmd_args.properties['buildername'],
'--slave-name', cmd_args.properties['slavename'],
'--build-number', str(cmd_args.properties['buildnumber']),
'--build-properties', json.dumps(cmd_args.properties),
] + runtest_args)
@contextlib.contextmanager
def temporary_file():
fd, path = tempfile.mkstemp()
os.close(fd)
try:
yield path
finally:
os.remove(path)
def parse_common_test_results(json_results, test_separator='/'):
def convert_trie_to_flat_paths(trie, prefix=None):
# Also see webkitpy.layout_tests.layout_package.json_results_generator
result = {}
for name, data in trie.iteritems():
if prefix:
name = prefix + test_separator + name
if len(data) and not 'actual' in data and not 'expected' in data:
result.update(convert_trie_to_flat_paths(data, name))
else:
result[name] = data
return result
results = {
'passes': {},
'unexpected_passes': {},
'failures': {},
'unexpected_failures': {},
'flakes': {},
'unexpected_flakes': {},
}
# TODO(dpranke): crbug.com/357866 - we should simplify the handling of
# both the return code and parsing the actual results, below.
passing_statuses = ('PASS', 'SLOW', 'NEEDSREBASELINE',
'NEEDSMANUALREBASELINE')
for test, result in convert_trie_to_flat_paths(
json_results['tests']).iteritems():
key = 'unexpected_' if result.get('is_unexpected') else ''
data = result['actual']
actual_results = data.split()
last_result = actual_results[-1]
expected_results = result['expected'].split()
if (len(actual_results) > 1 and
(last_result in expected_results or last_result in passing_statuses)):
key += 'flakes'
elif last_result in passing_statuses:
key += 'passes'
# TODO(dpranke): crbug.com/357867 ... Why are we assigning result
# instead of actual_result here. Do we even need these things to be
# hashes, or just lists?
data = result
else:
key += 'failures'
results[key][test] = data
return results
| bsd-3-clause |
2Habibie/ctocpp | c2cpp/pmake.py | 1 | 1606 | #!/usr/bin/env python
"""
C to C++ Translator
Convert a C program or whole project to C++
Copyright (C) 2001-2009 Denis Sureau
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
webmaster@scriptol.com
http://www.scriptol.com
PMAKE
Compile a list of sources
"""
import os
import string
import sys
# remove unwanted codes from lines
def chop(n):
while (len(n) > 1) & (n[-1] in ("\n", "\r")):
n = n[0:-1]
return n
path = os.getcwd()
# read the list of files
fic = open("cdlist.prj","r")
liste = fic.readlines()
fic.close()
sortie = open("test", "w")
sys.stdout = sortie
# scan the list of sources and compile each .C one
for n in liste:
n = chop(n)
if os.path.isdir(n): continue
node, ext = os.path.splitext(n)
ext = string.upper(ext)
if ext in [ ".c", ".C" ]:
print "compiling " + n,
os.system("bcc32 -c " + node)
sortie.close()
| gpl-2.0 |
h3biomed/ansible | lib/ansible/executor/discovery/python_target.py | 84 | 1234 | # Copyright: (c) 2018 Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
# FUTURE: this could be swapped out for our bundled version of distro to move more complete platform
# logic to the targets, so long as we maintain Py2.6 compat and don't need to do any kind of script assembly
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import json
import platform
import io
import os
def read_utf8_file(path, encoding='utf-8'):
if not os.access(path, os.R_OK):
return None
with io.open(path, 'r', encoding=encoding) as fd:
content = fd.read()
return content
def get_platform_info():
result = dict(platform_dist_result=[])
if hasattr(platform, 'dist'):
result['platform_dist_result'] = platform.dist()
osrelease_content = read_utf8_file('/etc/os-release')
# try to fall back to /usr/lib/os-release
if not osrelease_content:
osrelease_content = read_utf8_file('/usr/lib/os-release')
result['osrelease_content'] = osrelease_content
return result
def main():
info = get_platform_info()
print(json.dumps(info))
if __name__ == '__main__':
main()
| gpl-3.0 |
bitesofcode/projexui | projexui/widgets/ximageslider/ximagescene.py | 2 | 1916 | """ [desc] """
# define authorship information
__authors__ = ['Eric Hulser']
__author__ = ','.join(__authors__)
__credits__ = []
__copyright__ = 'Copyright (c) 2011, Projex Software'
__license__ = 'LGPL'
import random
from projexui.qt.QtCore import Qt, QRectF
from projexui.qt.QtGui import QGraphicsScene,\
QLinearGradient,\
QColor
class XImageScene(QGraphicsScene):
def __init__(self, slider):
super(XImageScene, self).__init__()
self._slider = slider
def angle(self, item):
center = item.pos() + item.boundingRect().center()
vcenter = self._slider.mapFromScene(center)
dx = vcenter.x() - self._slider.rect().center().x()
w = item.boundingRect().width()
base = float(w * 2)
if base == 0:
return 0
if dx > 0:
return max(-60, 60 * dx / base)
else:
return min(60, 60 * dx / base)
def drawForeground(self, painter, rect):
palette = self._slider.palette()
color = palette.color(palette.Base)
trans = self._slider.viewportTransform()
rect = trans.mapRect(self._slider.rect())
width = rect.width()
rect.setX(abs(rect.x()))
rect.setWidth(width)
clear = QColor(0, 0, 0, 0)
grad = QLinearGradient()
grad.setStart(rect.left(), 0)
grad.setFinalStop(rect.right(), 0)
grad.setColorAt(0.0, color)
grad.setColorAt(0.3, clear)
grad.setColorAt(0.7, clear)
grad.setColorAt(1.0, color)
painter.setBrush(grad)
painter.setPen(Qt.NoPen)
painter.drawRect(rect)
def recalculate(self):
pass | lgpl-3.0 |
mikewiebe-ansible/ansible | lib/ansible/modules/cloud/rackspace/rax_cbs_attachments.py | 102 | 6575 | #!/usr/bin/python
# Copyright: Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: rax_cbs_attachments
short_description: Manipulate Rackspace Cloud Block Storage Volume Attachments
description:
- Manipulate Rackspace Cloud Block Storage Volume Attachments
version_added: 1.6
options:
device:
description:
- The device path to attach the volume to, e.g. /dev/xvde.
- Before 2.4 this was a required field. Now it can be left to null to auto assign the device name.
volume:
description:
- Name or id of the volume to attach/detach
required: true
server:
description:
- Name or id of the server to attach/detach
required: true
state:
description:
- Indicate desired state of the resource
choices:
- present
- absent
default: present
required: true
wait:
description:
- wait for the volume to be in 'in-use'/'available' state before returning
type: bool
default: 'no'
wait_timeout:
description:
- how long before wait gives up, in seconds
default: 300
author:
- "Christopher H. Laco (@claco)"
- "Matt Martz (@sivel)"
extends_documentation_fragment: rackspace.openstack
'''
EXAMPLES = '''
- name: Attach a Block Storage Volume
gather_facts: False
hosts: local
connection: local
tasks:
- name: Storage volume attach request
local_action:
module: rax_cbs_attachments
credentials: ~/.raxpub
volume: my-volume
server: my-server
device: /dev/xvdd
region: DFW
wait: yes
state: present
register: my_volume
'''
try:
import pyrax
HAS_PYRAX = True
except ImportError:
HAS_PYRAX = False
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.rax import (NON_CALLABLES,
rax_argument_spec,
rax_find_server,
rax_find_volume,
rax_required_together,
rax_to_dict,
setup_rax_module,
)
def cloud_block_storage_attachments(module, state, volume, server, device,
wait, wait_timeout):
cbs = pyrax.cloud_blockstorage
cs = pyrax.cloudservers
if cbs is None or cs is None:
module.fail_json(msg='Failed to instantiate client. This '
'typically indicates an invalid region or an '
'incorrectly capitalized region name.')
changed = False
instance = {}
volume = rax_find_volume(module, pyrax, volume)
if not volume:
module.fail_json(msg='No matching storage volumes were found')
if state == 'present':
server = rax_find_server(module, pyrax, server)
if (volume.attachments and
volume.attachments[0]['server_id'] == server.id):
changed = False
elif volume.attachments:
module.fail_json(msg='Volume is attached to another server')
else:
try:
volume.attach_to_instance(server, mountpoint=device)
changed = True
except Exception as e:
module.fail_json(msg='%s' % e.message)
volume.get()
for key, value in vars(volume).items():
if (isinstance(value, NON_CALLABLES) and
not key.startswith('_')):
instance[key] = value
result = dict(changed=changed)
if volume.status == 'error':
result['msg'] = '%s failed to build' % volume.id
elif wait:
attempts = wait_timeout // 5
pyrax.utils.wait_until(volume, 'status', 'in-use',
interval=5, attempts=attempts)
volume.get()
result['volume'] = rax_to_dict(volume)
if 'msg' in result:
module.fail_json(**result)
else:
module.exit_json(**result)
elif state == 'absent':
server = rax_find_server(module, pyrax, server)
if (volume.attachments and
volume.attachments[0]['server_id'] == server.id):
try:
volume.detach()
if wait:
pyrax.utils.wait_until(volume, 'status', 'available',
interval=3, attempts=0,
verbose=False)
changed = True
except Exception as e:
module.fail_json(msg='%s' % e.message)
volume.get()
changed = True
elif volume.attachments:
module.fail_json(msg='Volume is attached to another server')
result = dict(changed=changed, volume=rax_to_dict(volume))
if volume.status == 'error':
result['msg'] = '%s failed to build' % volume.id
if 'msg' in result:
module.fail_json(**result)
else:
module.exit_json(**result)
module.exit_json(changed=changed, volume=instance)
def main():
argument_spec = rax_argument_spec()
argument_spec.update(
dict(
device=dict(required=False),
volume=dict(required=True),
server=dict(required=True),
state=dict(default='present', choices=['present', 'absent']),
wait=dict(type='bool', default=False),
wait_timeout=dict(type='int', default=300)
)
)
module = AnsibleModule(
argument_spec=argument_spec,
required_together=rax_required_together()
)
if not HAS_PYRAX:
module.fail_json(msg='pyrax is required for this module')
device = module.params.get('device')
volume = module.params.get('volume')
server = module.params.get('server')
state = module.params.get('state')
wait = module.params.get('wait')
wait_timeout = module.params.get('wait_timeout')
setup_rax_module(module, pyrax)
cloud_block_storage_attachments(module, state, volume, server, device,
wait, wait_timeout)
if __name__ == '__main__':
main()
| gpl-3.0 |
xubenben/scikit-learn | sklearn/linear_model/tests/test_sparse_coordinate_descent.py | 244 | 9986 | import numpy as np
import scipy.sparse as sp
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import ignore_warnings
from sklearn.linear_model.coordinate_descent import (Lasso, ElasticNet,
LassoCV, ElasticNetCV)
def test_sparse_coef():
# Check that the sparse_coef propery works
clf = ElasticNet()
clf.coef_ = [1, 2, 3]
assert_true(sp.isspmatrix(clf.sparse_coef_))
assert_equal(clf.sparse_coef_.toarray().tolist()[0], clf.coef_)
def test_normalize_option():
# Check that the normalize option in enet works
X = sp.csc_matrix([[-1], [0], [1]])
y = [-1, 0, 1]
clf_dense = ElasticNet(fit_intercept=True, normalize=True)
clf_sparse = ElasticNet(fit_intercept=True, normalize=True)
clf_dense.fit(X, y)
X = sp.csc_matrix(X)
clf_sparse.fit(X, y)
assert_almost_equal(clf_dense.dual_gap_, 0)
assert_array_almost_equal(clf_dense.coef_, clf_sparse.coef_)
def test_lasso_zero():
# Check that the sparse lasso can handle zero data without crashing
X = sp.csc_matrix((3, 1))
y = [0, 0, 0]
T = np.array([[1], [2], [3]])
clf = Lasso().fit(X, y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [0])
assert_array_almost_equal(pred, [0, 0, 0])
assert_almost_equal(clf.dual_gap_, 0)
def test_enet_toy_list_input():
# Test ElasticNet for various values of alpha and l1_ratio with list X
X = np.array([[-1], [0], [1]])
X = sp.csc_matrix(X)
Y = [-1, 0, 1] # just a straight line
T = np.array([[2], [3], [4]]) # test sample
# this should be the same as unregularized least squares
clf = ElasticNet(alpha=0, l1_ratio=1.0)
# catch warning about alpha=0.
# this is discouraged but should work.
ignore_warnings(clf.fit)(X, Y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [1])
assert_array_almost_equal(pred, [2, 3, 4])
assert_almost_equal(clf.dual_gap_, 0)
clf = ElasticNet(alpha=0.5, l1_ratio=0.3, max_iter=1000)
clf.fit(X, Y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [0.50819], decimal=3)
assert_array_almost_equal(pred, [1.0163, 1.5245, 2.0327], decimal=3)
assert_almost_equal(clf.dual_gap_, 0)
clf = ElasticNet(alpha=0.5, l1_ratio=0.5)
clf.fit(X, Y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [0.45454], 3)
assert_array_almost_equal(pred, [0.9090, 1.3636, 1.8181], 3)
assert_almost_equal(clf.dual_gap_, 0)
def test_enet_toy_explicit_sparse_input():
# Test ElasticNet for various values of alpha and l1_ratio with sparse X
f = ignore_warnings
# training samples
X = sp.lil_matrix((3, 1))
X[0, 0] = -1
# X[1, 0] = 0
X[2, 0] = 1
Y = [-1, 0, 1] # just a straight line (the identity function)
# test samples
T = sp.lil_matrix((3, 1))
T[0, 0] = 2
T[1, 0] = 3
T[2, 0] = 4
# this should be the same as lasso
clf = ElasticNet(alpha=0, l1_ratio=1.0)
f(clf.fit)(X, Y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [1])
assert_array_almost_equal(pred, [2, 3, 4])
assert_almost_equal(clf.dual_gap_, 0)
clf = ElasticNet(alpha=0.5, l1_ratio=0.3, max_iter=1000)
clf.fit(X, Y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [0.50819], decimal=3)
assert_array_almost_equal(pred, [1.0163, 1.5245, 2.0327], decimal=3)
assert_almost_equal(clf.dual_gap_, 0)
clf = ElasticNet(alpha=0.5, l1_ratio=0.5)
clf.fit(X, Y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [0.45454], 3)
assert_array_almost_equal(pred, [0.9090, 1.3636, 1.8181], 3)
assert_almost_equal(clf.dual_gap_, 0)
def make_sparse_data(n_samples=100, n_features=100, n_informative=10, seed=42,
positive=False, n_targets=1):
random_state = np.random.RandomState(seed)
# build an ill-posed linear regression problem with many noisy features and
# comparatively few samples
# generate a ground truth model
w = random_state.randn(n_features, n_targets)
w[n_informative:] = 0.0 # only the top features are impacting the model
if positive:
w = np.abs(w)
X = random_state.randn(n_samples, n_features)
rnd = random_state.uniform(size=(n_samples, n_features))
X[rnd > 0.5] = 0.0 # 50% of zeros in input signal
# generate training ground truth labels
y = np.dot(X, w)
X = sp.csc_matrix(X)
if n_targets == 1:
y = np.ravel(y)
return X, y
def _test_sparse_enet_not_as_toy_dataset(alpha, fit_intercept, positive):
n_samples, n_features, max_iter = 100, 100, 1000
n_informative = 10
X, y = make_sparse_data(n_samples, n_features, n_informative,
positive=positive)
X_train, X_test = X[n_samples // 2:], X[:n_samples // 2]
y_train, y_test = y[n_samples // 2:], y[:n_samples // 2]
s_clf = ElasticNet(alpha=alpha, l1_ratio=0.8, fit_intercept=fit_intercept,
max_iter=max_iter, tol=1e-7, positive=positive,
warm_start=True)
s_clf.fit(X_train, y_train)
assert_almost_equal(s_clf.dual_gap_, 0, 4)
assert_greater(s_clf.score(X_test, y_test), 0.85)
# check the convergence is the same as the dense version
d_clf = ElasticNet(alpha=alpha, l1_ratio=0.8, fit_intercept=fit_intercept,
max_iter=max_iter, tol=1e-7, positive=positive,
warm_start=True)
d_clf.fit(X_train.toarray(), y_train)
assert_almost_equal(d_clf.dual_gap_, 0, 4)
assert_greater(d_clf.score(X_test, y_test), 0.85)
assert_almost_equal(s_clf.coef_, d_clf.coef_, 5)
assert_almost_equal(s_clf.intercept_, d_clf.intercept_, 5)
# check that the coefs are sparse
assert_less(np.sum(s_clf.coef_ != 0.0), 2 * n_informative)
def test_sparse_enet_not_as_toy_dataset():
_test_sparse_enet_not_as_toy_dataset(alpha=0.1, fit_intercept=False,
positive=False)
_test_sparse_enet_not_as_toy_dataset(alpha=0.1, fit_intercept=True,
positive=False)
_test_sparse_enet_not_as_toy_dataset(alpha=1e-3, fit_intercept=False,
positive=True)
_test_sparse_enet_not_as_toy_dataset(alpha=1e-3, fit_intercept=True,
positive=True)
def test_sparse_lasso_not_as_toy_dataset():
n_samples = 100
max_iter = 1000
n_informative = 10
X, y = make_sparse_data(n_samples=n_samples, n_informative=n_informative)
X_train, X_test = X[n_samples // 2:], X[:n_samples // 2]
y_train, y_test = y[n_samples // 2:], y[:n_samples // 2]
s_clf = Lasso(alpha=0.1, fit_intercept=False, max_iter=max_iter, tol=1e-7)
s_clf.fit(X_train, y_train)
assert_almost_equal(s_clf.dual_gap_, 0, 4)
assert_greater(s_clf.score(X_test, y_test), 0.85)
# check the convergence is the same as the dense version
d_clf = Lasso(alpha=0.1, fit_intercept=False, max_iter=max_iter, tol=1e-7)
d_clf.fit(X_train.toarray(), y_train)
assert_almost_equal(d_clf.dual_gap_, 0, 4)
assert_greater(d_clf.score(X_test, y_test), 0.85)
# check that the coefs are sparse
assert_equal(np.sum(s_clf.coef_ != 0.0), n_informative)
def test_enet_multitarget():
n_targets = 3
X, y = make_sparse_data(n_targets=n_targets)
estimator = ElasticNet(alpha=0.01, fit_intercept=True, precompute=None)
# XXX: There is a bug when precompute is not None!
estimator.fit(X, y)
coef, intercept, dual_gap = (estimator.coef_,
estimator.intercept_,
estimator.dual_gap_)
for k in range(n_targets):
estimator.fit(X, y[:, k])
assert_array_almost_equal(coef[k, :], estimator.coef_)
assert_array_almost_equal(intercept[k], estimator.intercept_)
assert_array_almost_equal(dual_gap[k], estimator.dual_gap_)
def test_path_parameters():
X, y = make_sparse_data()
max_iter = 50
n_alphas = 10
clf = ElasticNetCV(n_alphas=n_alphas, eps=1e-3, max_iter=max_iter,
l1_ratio=0.5, fit_intercept=False)
ignore_warnings(clf.fit)(X, y) # new params
assert_almost_equal(0.5, clf.l1_ratio)
assert_equal(n_alphas, clf.n_alphas)
assert_equal(n_alphas, len(clf.alphas_))
sparse_mse_path = clf.mse_path_
ignore_warnings(clf.fit)(X.toarray(), y) # compare with dense data
assert_almost_equal(clf.mse_path_, sparse_mse_path)
def test_same_output_sparse_dense_lasso_and_enet_cv():
X, y = make_sparse_data(n_samples=40, n_features=10)
for normalize in [True, False]:
clfs = ElasticNetCV(max_iter=100, cv=5, normalize=normalize)
ignore_warnings(clfs.fit)(X, y)
clfd = ElasticNetCV(max_iter=100, cv=5, normalize=normalize)
ignore_warnings(clfd.fit)(X.toarray(), y)
assert_almost_equal(clfs.alpha_, clfd.alpha_, 7)
assert_almost_equal(clfs.intercept_, clfd.intercept_, 7)
assert_array_almost_equal(clfs.mse_path_, clfd.mse_path_)
assert_array_almost_equal(clfs.alphas_, clfd.alphas_)
clfs = LassoCV(max_iter=100, cv=4, normalize=normalize)
ignore_warnings(clfs.fit)(X, y)
clfd = LassoCV(max_iter=100, cv=4, normalize=normalize)
ignore_warnings(clfd.fit)(X.toarray(), y)
assert_almost_equal(clfs.alpha_, clfd.alpha_, 7)
assert_almost_equal(clfs.intercept_, clfd.intercept_, 7)
assert_array_almost_equal(clfs.mse_path_, clfd.mse_path_)
assert_array_almost_equal(clfs.alphas_, clfd.alphas_)
| bsd-3-clause |
GarySparrow/mFlaskWeb | venv/Lib/site-packages/pygments/lexers/_asy_builtins.py | 48 | 27321 | # -*- coding: utf-8 -*-
"""
pygments.lexers._asy_builtins
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
This file contains the asy-function names and asy-variable names of
Asymptote.
Do not edit the ASYFUNCNAME and ASYVARNAME sets by hand.
TODO: perl/python script in Asymptote SVN similar to asy-list.pl but only
for function and variable names.
:copyright: Copyright 2006-2015 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
ASYFUNCNAME = set((
'AND',
'Arc',
'ArcArrow',
'ArcArrows',
'Arrow',
'Arrows',
'Automatic',
'AvantGarde',
'BBox',
'BWRainbow',
'BWRainbow2',
'Bar',
'Bars',
'BeginArcArrow',
'BeginArrow',
'BeginBar',
'BeginDotMargin',
'BeginMargin',
'BeginPenMargin',
'Blank',
'Bookman',
'Bottom',
'BottomTop',
'Bounds',
'Break',
'Broken',
'BrokenLog',
'Ceil',
'Circle',
'CircleBarIntervalMarker',
'Cos',
'Courier',
'CrossIntervalMarker',
'DefaultFormat',
'DefaultLogFormat',
'Degrees',
'Dir',
'DotMargin',
'DotMargins',
'Dotted',
'Draw',
'Drawline',
'Embed',
'EndArcArrow',
'EndArrow',
'EndBar',
'EndDotMargin',
'EndMargin',
'EndPenMargin',
'Fill',
'FillDraw',
'Floor',
'Format',
'Full',
'Gaussian',
'Gaussrand',
'Gaussrandpair',
'Gradient',
'Grayscale',
'Helvetica',
'Hermite',
'HookHead',
'InOutTicks',
'InTicks',
'J',
'Label',
'Landscape',
'Left',
'LeftRight',
'LeftTicks',
'Legend',
'Linear',
'Link',
'Log',
'LogFormat',
'Margin',
'Margins',
'Mark',
'MidArcArrow',
'MidArrow',
'NOT',
'NewCenturySchoolBook',
'NoBox',
'NoMargin',
'NoModifier',
'NoTicks',
'NoTicks3',
'NoZero',
'NoZeroFormat',
'None',
'OR',
'OmitFormat',
'OmitTick',
'OutTicks',
'Ox',
'Oy',
'Palatino',
'PaletteTicks',
'Pen',
'PenMargin',
'PenMargins',
'Pentype',
'Portrait',
'RadialShade',
'Rainbow',
'Range',
'Relative',
'Right',
'RightTicks',
'Rotate',
'Round',
'SQR',
'Scale',
'ScaleX',
'ScaleY',
'ScaleZ',
'Seascape',
'Shift',
'Sin',
'Slant',
'Spline',
'StickIntervalMarker',
'Straight',
'Symbol',
'Tan',
'TeXify',
'Ticks',
'Ticks3',
'TildeIntervalMarker',
'TimesRoman',
'Top',
'TrueMargin',
'UnFill',
'UpsideDown',
'Wheel',
'X',
'XEquals',
'XOR',
'XY',
'XYEquals',
'XYZero',
'XYgrid',
'XZEquals',
'XZZero',
'XZero',
'XZgrid',
'Y',
'YEquals',
'YXgrid',
'YZ',
'YZEquals',
'YZZero',
'YZero',
'YZgrid',
'Z',
'ZX',
'ZXgrid',
'ZYgrid',
'ZapfChancery',
'ZapfDingbats',
'_cputime',
'_draw',
'_eval',
'_image',
'_labelpath',
'_projection',
'_strokepath',
'_texpath',
'aCos',
'aSin',
'aTan',
'abort',
'abs',
'accel',
'acos',
'acosh',
'acot',
'acsc',
'add',
'addArrow',
'addMargins',
'addSaveFunction',
'addnode',
'addnodes',
'addpenarc',
'addpenline',
'addseg',
'adjust',
'alias',
'align',
'all',
'altitude',
'angabscissa',
'angle',
'angpoint',
'animate',
'annotate',
'anticomplementary',
'antipedal',
'apply',
'approximate',
'arc',
'arcarrowsize',
'arccircle',
'arcdir',
'arcfromcenter',
'arcfromfocus',
'arclength',
'arcnodesnumber',
'arcpoint',
'arcsubtended',
'arcsubtendedcenter',
'arctime',
'arctopath',
'array',
'arrow',
'arrow2',
'arrowbase',
'arrowbasepoints',
'arrowsize',
'asec',
'asin',
'asinh',
'ask',
'assert',
'asy',
'asycode',
'asydir',
'asyfigure',
'asyfilecode',
'asyinclude',
'asywrite',
'atan',
'atan2',
'atanh',
'atbreakpoint',
'atexit',
'atime',
'attach',
'attract',
'atupdate',
'autoformat',
'autoscale',
'autoscale3',
'axes',
'axes3',
'axialshade',
'axis',
'axiscoverage',
'azimuth',
'babel',
'background',
'bangles',
'bar',
'barmarksize',
'barsize',
'basealign',
'baseline',
'bbox',
'beep',
'begin',
'beginclip',
'begingroup',
'beginpoint',
'between',
'bevel',
'bezier',
'bezierP',
'bezierPP',
'bezierPPP',
'bezulate',
'bibliography',
'bibliographystyle',
'binarytree',
'binarytreeNode',
'binomial',
'binput',
'bins',
'bisector',
'bisectorpoint',
'blend',
'boutput',
'box',
'bqe',
'breakpoint',
'breakpoints',
'brick',
'buildRestoreDefaults',
'buildRestoreThunk',
'buildcycle',
'bulletcolor',
'canonical',
'canonicalcartesiansystem',
'cartesiansystem',
'case1',
'case2',
'case3',
'cbrt',
'cd',
'ceil',
'center',
'centerToFocus',
'centroid',
'cevian',
'change2',
'changecoordsys',
'checkSegment',
'checkconditionlength',
'checker',
'checklengths',
'checkposition',
'checktriangle',
'choose',
'circle',
'circlebarframe',
'circlemarkradius',
'circlenodesnumber',
'circumcenter',
'circumcircle',
'clamped',
'clear',
'clip',
'clipdraw',
'close',
'cmyk',
'code',
'colatitude',
'collect',
'collinear',
'color',
'colorless',
'colors',
'colorspace',
'comma',
'compassmark',
'complement',
'complementary',
'concat',
'concurrent',
'cone',
'conic',
'conicnodesnumber',
'conictype',
'conj',
'connect',
'containmentTree',
'contains',
'contour',
'contour3',
'controlSpecifier',
'convert',
'coordinates',
'coordsys',
'copy',
'cos',
'cosh',
'cot',
'countIntersections',
'cputime',
'crop',
'cropcode',
'cross',
'crossframe',
'crosshatch',
'crossmarksize',
'csc',
'cubicroots',
'curabscissa',
'curlSpecifier',
'curpoint',
'currentarrow',
'currentexitfunction',
'currentmomarrow',
'currentpolarconicroutine',
'curve',
'cut',
'cutafter',
'cutbefore',
'cyclic',
'cylinder',
'debugger',
'deconstruct',
'defaultdir',
'defaultformat',
'defaultpen',
'defined',
'degenerate',
'degrees',
'delete',
'deletepreamble',
'determinant',
'diagonal',
'diamond',
'diffdiv',
'dir',
'dirSpecifier',
'dirtime',
'display',
'distance',
'divisors',
'do_overpaint',
'dot',
'dotframe',
'dotsize',
'downcase',
'draw',
'drawAll',
'drawDoubleLine',
'drawFermion',
'drawGhost',
'drawGluon',
'drawMomArrow',
'drawPhoton',
'drawScalar',
'drawVertex',
'drawVertexBox',
'drawVertexBoxO',
'drawVertexBoxX',
'drawVertexO',
'drawVertexOX',
'drawVertexTriangle',
'drawVertexTriangleO',
'drawVertexX',
'drawarrow',
'drawarrow2',
'drawline',
'drawtick',
'duplicate',
'elle',
'ellipse',
'ellipsenodesnumber',
'embed',
'embed3',
'empty',
'enclose',
'end',
'endScript',
'endclip',
'endgroup',
'endl',
'endpoint',
'endpoints',
'eof',
'eol',
'equation',
'equations',
'erase',
'erasestep',
'erf',
'erfc',
'error',
'errorbar',
'errorbars',
'eval',
'excenter',
'excircle',
'exit',
'exitXasyMode',
'exitfunction',
'exp',
'expfactors',
'expi',
'expm1',
'exradius',
'extend',
'extension',
'extouch',
'fabs',
'factorial',
'fermat',
'fft',
'fhorner',
'figure',
'file',
'filecode',
'fill',
'filldraw',
'filloutside',
'fillrule',
'filltype',
'find',
'finite',
'finiteDifferenceJacobian',
'firstcut',
'firstframe',
'fit',
'fit2',
'fixedscaling',
'floor',
'flush',
'fmdefaults',
'fmod',
'focusToCenter',
'font',
'fontcommand',
'fontsize',
'foot',
'format',
'frac',
'frequency',
'fromCenter',
'fromFocus',
'fspline',
'functionshade',
'gamma',
'generate_random_backtrace',
'generateticks',
'gergonne',
'getc',
'getint',
'getpair',
'getreal',
'getstring',
'gettriple',
'gluon',
'gouraudshade',
'graph',
'graphic',
'gray',
'grestore',
'grid',
'grid3',
'gsave',
'halfbox',
'hatch',
'hdiffdiv',
'hermite',
'hex',
'histogram',
'history',
'hline',
'hprojection',
'hsv',
'hyperbola',
'hyperbolanodesnumber',
'hyperlink',
'hypot',
'identity',
'image',
'incenter',
'incentral',
'incircle',
'increasing',
'incrementposition',
'indexedTransform',
'indexedfigure',
'initXasyMode',
'initdefaults',
'input',
'inradius',
'insert',
'inside',
'integrate',
'interactive',
'interior',
'interp',
'interpolate',
'intersect',
'intersection',
'intersectionpoint',
'intersectionpoints',
'intersections',
'intouch',
'inverse',
'inversion',
'invisible',
'is3D',
'isDuplicate',
'isogonal',
'isogonalconjugate',
'isotomic',
'isotomicconjugate',
'isparabola',
'italic',
'item',
'key',
'kurtosis',
'kurtosisexcess',
'label',
'labelaxis',
'labelmargin',
'labelpath',
'labels',
'labeltick',
'labelx',
'labelx3',
'labely',
'labely3',
'labelz',
'labelz3',
'lastcut',
'latex',
'latitude',
'latticeshade',
'layer',
'layout',
'ldexp',
'leastsquares',
'legend',
'legenditem',
'length',
'lift',
'light',
'limits',
'line',
'linear',
'linecap',
'lineinversion',
'linejoin',
'linemargin',
'lineskip',
'linetype',
'linewidth',
'link',
'list',
'lm_enorm',
'lm_evaluate_default',
'lm_lmdif',
'lm_lmpar',
'lm_minimize',
'lm_print_default',
'lm_print_quiet',
'lm_qrfac',
'lm_qrsolv',
'locale',
'locate',
'locatefile',
'location',
'log',
'log10',
'log1p',
'logaxiscoverage',
'longitude',
'lookup',
'magnetize',
'makeNode',
'makedraw',
'makepen',
'map',
'margin',
'markangle',
'markangleradius',
'markanglespace',
'markarc',
'marker',
'markinterval',
'marknodes',
'markrightangle',
'markuniform',
'mass',
'masscenter',
'massformat',
'math',
'max',
'max3',
'maxbezier',
'maxbound',
'maxcoords',
'maxlength',
'maxratio',
'maxtimes',
'mean',
'medial',
'median',
'midpoint',
'min',
'min3',
'minbezier',
'minbound',
'minipage',
'minratio',
'mintimes',
'miterlimit',
'momArrowPath',
'momarrowsize',
'monotonic',
'multifigure',
'nativeformat',
'natural',
'needshipout',
'newl',
'newpage',
'newslide',
'newton',
'newtree',
'nextframe',
'nextnormal',
'nextpage',
'nib',
'nodabscissa',
'none',
'norm',
'normalvideo',
'notaknot',
'nowarn',
'numberpage',
'nurb',
'object',
'offset',
'onpath',
'opacity',
'opposite',
'orientation',
'orig_circlenodesnumber',
'orig_circlenodesnumber1',
'orig_draw',
'orig_ellipsenodesnumber',
'orig_ellipsenodesnumber1',
'orig_hyperbolanodesnumber',
'orig_parabolanodesnumber',
'origin',
'orthic',
'orthocentercenter',
'outformat',
'outline',
'outprefix',
'output',
'overloadedMessage',
'overwrite',
'pack',
'pad',
'pairs',
'palette',
'parabola',
'parabolanodesnumber',
'parallel',
'partialsum',
'path',
'path3',
'pattern',
'pause',
'pdf',
'pedal',
'periodic',
'perp',
'perpendicular',
'perpendicularmark',
'phantom',
'phi1',
'phi2',
'phi3',
'photon',
'piecewisestraight',
'point',
'polar',
'polarconicroutine',
'polargraph',
'polygon',
'postcontrol',
'postscript',
'pow10',
'ppoint',
'prc',
'prc0',
'precision',
'precontrol',
'prepend',
'print_random_addresses',
'project',
'projection',
'purge',
'pwhermite',
'quadrant',
'quadraticroots',
'quantize',
'quarticroots',
'quotient',
'radialshade',
'radians',
'radicalcenter',
'radicalline',
'radius',
'rand',
'randompath',
'rd',
'readline',
'realmult',
'realquarticroots',
'rectangle',
'rectangular',
'rectify',
'reflect',
'relabscissa',
'relative',
'relativedistance',
'reldir',
'relpoint',
'reltime',
'remainder',
'remark',
'removeDuplicates',
'rename',
'replace',
'report',
'resetdefaultpen',
'restore',
'restoredefaults',
'reverse',
'reversevideo',
'rf',
'rfind',
'rgb',
'rgba',
'rgbint',
'rms',
'rotate',
'rotateO',
'rotation',
'round',
'roundbox',
'roundedpath',
'roundrectangle',
'samecoordsys',
'sameside',
'sample',
'save',
'savedefaults',
'saveline',
'scale',
'scale3',
'scaleO',
'scaleT',
'scaleless',
'scientific',
'search',
'searchtree',
'sec',
'secondaryX',
'secondaryY',
'seconds',
'section',
'sector',
'seek',
'seekeof',
'segment',
'sequence',
'setpens',
'sgn',
'sgnd',
'sharpangle',
'sharpdegrees',
'shift',
'shiftless',
'shipout',
'shipout3',
'show',
'side',
'simeq',
'simpson',
'sin',
'single',
'sinh',
'size',
'size3',
'skewness',
'skip',
'slant',
'sleep',
'slope',
'slopefield',
'solve',
'solveBVP',
'sort',
'sourceline',
'sphere',
'split',
'sqrt',
'square',
'srand',
'standardizecoordsys',
'startScript',
'startTrembling',
'stdev',
'step',
'stickframe',
'stickmarksize',
'stickmarkspace',
'stop',
'straight',
'straightness',
'string',
'stripdirectory',
'stripextension',
'stripfile',
'strokepath',
'subdivide',
'subitem',
'subpath',
'substr',
'sum',
'surface',
'symmedial',
'symmedian',
'system',
'tab',
'tableau',
'tan',
'tangent',
'tangential',
'tangents',
'tanh',
'tell',
'tensionSpecifier',
'tensorshade',
'tex',
'texcolor',
'texify',
'texpath',
'texpreamble',
'texreset',
'texshipout',
'texsize',
'textpath',
'thick',
'thin',
'tick',
'tickMax',
'tickMax3',
'tickMin',
'tickMin3',
'ticklabelshift',
'ticklocate',
'tildeframe',
'tildemarksize',
'tile',
'tiling',
'time',
'times',
'title',
'titlepage',
'topbox',
'transform',
'transformation',
'transpose',
'tremble',
'trembleFuzz',
'tremble_circlenodesnumber',
'tremble_circlenodesnumber1',
'tremble_draw',
'tremble_ellipsenodesnumber',
'tremble_ellipsenodesnumber1',
'tremble_hyperbolanodesnumber',
'tremble_marknodes',
'tremble_markuniform',
'tremble_parabolanodesnumber',
'triangle',
'triangleAbc',
'triangleabc',
'triangulate',
'tricoef',
'tridiagonal',
'trilinear',
'trim',
'trueMagnetize',
'truepoint',
'tube',
'uncycle',
'unfill',
'uniform',
'unit',
'unitrand',
'unitsize',
'unityroot',
'unstraighten',
'upcase',
'updatefunction',
'uperiodic',
'upscale',
'uptodate',
'usepackage',
'usersetting',
'usetypescript',
'usleep',
'value',
'variance',
'variancebiased',
'vbox',
'vector',
'vectorfield',
'verbatim',
'view',
'vline',
'vperiodic',
'vprojection',
'warn',
'warning',
'windingnumber',
'write',
'xaxis',
'xaxis3',
'xaxis3At',
'xaxisAt',
'xequals',
'xinput',
'xlimits',
'xoutput',
'xpart',
'xscale',
'xscaleO',
'xtick',
'xtick3',
'xtrans',
'yaxis',
'yaxis3',
'yaxis3At',
'yaxisAt',
'yequals',
'ylimits',
'ypart',
'yscale',
'yscaleO',
'ytick',
'ytick3',
'ytrans',
'zaxis3',
'zaxis3At',
'zero',
'zero3',
'zlimits',
'zpart',
'ztick',
'ztick3',
'ztrans'
))
ASYVARNAME = set((
'AliceBlue',
'Align',
'Allow',
'AntiqueWhite',
'Apricot',
'Aqua',
'Aquamarine',
'Aspect',
'Azure',
'BeginPoint',
'Beige',
'Bisque',
'Bittersweet',
'Black',
'BlanchedAlmond',
'Blue',
'BlueGreen',
'BlueViolet',
'Both',
'Break',
'BrickRed',
'Brown',
'BurlyWood',
'BurntOrange',
'CCW',
'CW',
'CadetBlue',
'CarnationPink',
'Center',
'Centered',
'Cerulean',
'Chartreuse',
'Chocolate',
'Coeff',
'Coral',
'CornflowerBlue',
'Cornsilk',
'Crimson',
'Crop',
'Cyan',
'Dandelion',
'DarkBlue',
'DarkCyan',
'DarkGoldenrod',
'DarkGray',
'DarkGreen',
'DarkKhaki',
'DarkMagenta',
'DarkOliveGreen',
'DarkOrange',
'DarkOrchid',
'DarkRed',
'DarkSalmon',
'DarkSeaGreen',
'DarkSlateBlue',
'DarkSlateGray',
'DarkTurquoise',
'DarkViolet',
'DeepPink',
'DeepSkyBlue',
'DefaultHead',
'DimGray',
'DodgerBlue',
'Dotted',
'Draw',
'E',
'ENE',
'EPS',
'ESE',
'E_Euler',
'E_PC',
'E_RK2',
'E_RK3BS',
'Emerald',
'EndPoint',
'Euler',
'Fill',
'FillDraw',
'FireBrick',
'FloralWhite',
'ForestGreen',
'Fuchsia',
'Gainsboro',
'GhostWhite',
'Gold',
'Goldenrod',
'Gray',
'Green',
'GreenYellow',
'Honeydew',
'HookHead',
'Horizontal',
'HotPink',
'I',
'IgnoreAspect',
'IndianRed',
'Indigo',
'Ivory',
'JOIN_IN',
'JOIN_OUT',
'JungleGreen',
'Khaki',
'LM_DWARF',
'LM_MACHEP',
'LM_SQRT_DWARF',
'LM_SQRT_GIANT',
'LM_USERTOL',
'Label',
'Lavender',
'LavenderBlush',
'LawnGreen',
'LeftJustified',
'LeftSide',
'LemonChiffon',
'LightBlue',
'LightCoral',
'LightCyan',
'LightGoldenrodYellow',
'LightGreen',
'LightGrey',
'LightPink',
'LightSalmon',
'LightSeaGreen',
'LightSkyBlue',
'LightSlateGray',
'LightSteelBlue',
'LightYellow',
'Lime',
'LimeGreen',
'Linear',
'Linen',
'Log',
'Logarithmic',
'Magenta',
'Mahogany',
'Mark',
'MarkFill',
'Maroon',
'Max',
'MediumAquamarine',
'MediumBlue',
'MediumOrchid',
'MediumPurple',
'MediumSeaGreen',
'MediumSlateBlue',
'MediumSpringGreen',
'MediumTurquoise',
'MediumVioletRed',
'Melon',
'MidPoint',
'MidnightBlue',
'Min',
'MintCream',
'MistyRose',
'Moccasin',
'Move',
'MoveQuiet',
'Mulberry',
'N',
'NE',
'NNE',
'NNW',
'NW',
'NavajoWhite',
'Navy',
'NavyBlue',
'NoAlign',
'NoCrop',
'NoFill',
'NoSide',
'OldLace',
'Olive',
'OliveDrab',
'OliveGreen',
'Orange',
'OrangeRed',
'Orchid',
'Ox',
'Oy',
'PC',
'PaleGoldenrod',
'PaleGreen',
'PaleTurquoise',
'PaleVioletRed',
'PapayaWhip',
'Peach',
'PeachPuff',
'Periwinkle',
'Peru',
'PineGreen',
'Pink',
'Plum',
'PowderBlue',
'ProcessBlue',
'Purple',
'RK2',
'RK3',
'RK3BS',
'RK4',
'RK5',
'RK5DP',
'RK5F',
'RawSienna',
'Red',
'RedOrange',
'RedViolet',
'Rhodamine',
'RightJustified',
'RightSide',
'RosyBrown',
'RoyalBlue',
'RoyalPurple',
'RubineRed',
'S',
'SE',
'SSE',
'SSW',
'SW',
'SaddleBrown',
'Salmon',
'SandyBrown',
'SeaGreen',
'Seashell',
'Sepia',
'Sienna',
'Silver',
'SimpleHead',
'SkyBlue',
'SlateBlue',
'SlateGray',
'Snow',
'SpringGreen',
'SteelBlue',
'Suppress',
'SuppressQuiet',
'Tan',
'TeXHead',
'Teal',
'TealBlue',
'Thistle',
'Ticksize',
'Tomato',
'Turquoise',
'UnFill',
'VERSION',
'Value',
'Vertical',
'Violet',
'VioletRed',
'W',
'WNW',
'WSW',
'Wheat',
'White',
'WhiteSmoke',
'WildStrawberry',
'XYAlign',
'YAlign',
'Yellow',
'YellowGreen',
'YellowOrange',
'addpenarc',
'addpenline',
'align',
'allowstepping',
'angularsystem',
'animationdelay',
'appendsuffix',
'arcarrowangle',
'arcarrowfactor',
'arrow2sizelimit',
'arrowangle',
'arrowbarb',
'arrowdir',
'arrowfactor',
'arrowhookfactor',
'arrowlength',
'arrowsizelimit',
'arrowtexfactor',
'authorpen',
'axis',
'axiscoverage',
'axislabelfactor',
'background',
'backgroundcolor',
'backgroundpen',
'barfactor',
'barmarksizefactor',
'basealign',
'baselinetemplate',
'beveljoin',
'bigvertexpen',
'bigvertexsize',
'black',
'blue',
'bm',
'bottom',
'bp',
'brown',
'bullet',
'byfoci',
'byvertices',
'camerafactor',
'chartreuse',
'circlemarkradiusfactor',
'circlenodesnumberfactor',
'circleprecision',
'circlescale',
'cm',
'codefile',
'codepen',
'codeskip',
'colorPen',
'coloredNodes',
'coloredSegments',
'conditionlength',
'conicnodesfactor',
'count',
'cputimeformat',
'crossmarksizefactor',
'currentcoordsys',
'currentlight',
'currentpatterns',
'currentpen',
'currentpicture',
'currentposition',
'currentprojection',
'curvilinearsystem',
'cuttings',
'cyan',
'darkblue',
'darkbrown',
'darkcyan',
'darkgray',
'darkgreen',
'darkgrey',
'darkmagenta',
'darkolive',
'darkred',
'dashdotted',
'dashed',
'datepen',
'dateskip',
'debuggerlines',
'debugging',
'deepblue',
'deepcyan',
'deepgray',
'deepgreen',
'deepgrey',
'deepmagenta',
'deepred',
'default',
'defaultControl',
'defaultS',
'defaultbackpen',
'defaultcoordsys',
'defaultfilename',
'defaultformat',
'defaultmassformat',
'defaultpen',
'diagnostics',
'differentlengths',
'dot',
'dotfactor',
'dotframe',
'dotted',
'doublelinepen',
'doublelinespacing',
'down',
'duplicateFuzz',
'ellipsenodesnumberfactor',
'eps',
'epsgeo',
'epsilon',
'evenodd',
'extendcap',
'fermionpen',
'figureborder',
'figuremattpen',
'firstnode',
'firststep',
'foregroundcolor',
'fuchsia',
'fuzz',
'gapfactor',
'ghostpen',
'gluonamplitude',
'gluonpen',
'gluonratio',
'gray',
'green',
'grey',
'hatchepsilon',
'havepagenumber',
'heavyblue',
'heavycyan',
'heavygray',
'heavygreen',
'heavygrey',
'heavymagenta',
'heavyred',
'hline',
'hwratio',
'hyperbolanodesnumberfactor',
'identity4',
'ignore',
'inXasyMode',
'inch',
'inches',
'includegraphicscommand',
'inf',
'infinity',
'institutionpen',
'intMax',
'intMin',
'invert',
'invisible',
'itempen',
'itemskip',
'itemstep',
'labelmargin',
'landscape',
'lastnode',
'left',
'legendhskip',
'legendlinelength',
'legendmargin',
'legendmarkersize',
'legendmaxrelativewidth',
'legendvskip',
'lightblue',
'lightcyan',
'lightgray',
'lightgreen',
'lightgrey',
'lightmagenta',
'lightolive',
'lightred',
'lightyellow',
'linemargin',
'lm_infmsg',
'lm_shortmsg',
'longdashdotted',
'longdashed',
'magenta',
'magneticPoints',
'magneticRadius',
'mantissaBits',
'markangleradius',
'markangleradiusfactor',
'markanglespace',
'markanglespacefactor',
'mediumblue',
'mediumcyan',
'mediumgray',
'mediumgreen',
'mediumgrey',
'mediummagenta',
'mediumred',
'mediumyellow',
'middle',
'minDistDefault',
'minblockheight',
'minblockwidth',
'mincirclediameter',
'minipagemargin',
'minipagewidth',
'minvertexangle',
'miterjoin',
'mm',
'momarrowfactor',
'momarrowlength',
'momarrowmargin',
'momarrowoffset',
'momarrowpen',
'monoPen',
'morepoints',
'nCircle',
'newbulletcolor',
'ngraph',
'nil',
'nmesh',
'nobasealign',
'nodeMarginDefault',
'nodesystem',
'nomarker',
'nopoint',
'noprimary',
'nullpath',
'nullpen',
'numarray',
'ocgindex',
'oldbulletcolor',
'olive',
'orange',
'origin',
'overpaint',
'page',
'pageheight',
'pagemargin',
'pagenumberalign',
'pagenumberpen',
'pagenumberposition',
'pagewidth',
'paleblue',
'palecyan',
'palegray',
'palegreen',
'palegrey',
'palemagenta',
'palered',
'paleyellow',
'parabolanodesnumberfactor',
'perpfactor',
'phi',
'photonamplitude',
'photonpen',
'photonratio',
'pi',
'pink',
'plain',
'plus',
'preamblenodes',
'pt',
'purple',
'r3',
'r4a',
'r4b',
'randMax',
'realDigits',
'realEpsilon',
'realMax',
'realMin',
'red',
'relativesystem',
'reverse',
'right',
'roundcap',
'roundjoin',
'royalblue',
'salmon',
'saveFunctions',
'scalarpen',
'sequencereal',
'settings',
'shipped',
'signedtrailingzero',
'solid',
'springgreen',
'sqrtEpsilon',
'squarecap',
'squarepen',
'startposition',
'stdin',
'stdout',
'stepfactor',
'stepfraction',
'steppagenumberpen',
'stepping',
'stickframe',
'stickmarksizefactor',
'stickmarkspacefactor',
'textpen',
'ticksize',
'tildeframe',
'tildemarksizefactor',
'tinv',
'titlealign',
'titlepagepen',
'titlepageposition',
'titlepen',
'titleskip',
'top',
'trailingzero',
'treeLevelStep',
'treeMinNodeWidth',
'treeNodeStep',
'trembleAngle',
'trembleFrequency',
'trembleRandom',
'tremblingMode',
'undefined',
'unitcircle',
'unitsquare',
'up',
'urlpen',
'urlskip',
'version',
'vertexpen',
'vertexsize',
'viewportmargin',
'viewportsize',
'vline',
'white',
'wye',
'xformStack',
'yellow',
'ylabelwidth',
'zerotickfuzz',
'zerowinding'
))
| mit |
tianhao64/pyvmomi | tests/test_container_view.py | 3 | 1740 | # VMware vSphere Python SDK
# Copyright (c) 2008-2015 VMware, Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import tests
from pyVim import connect
from pyVmomi import vim
class ContainerViewTests(tests.VCRTestBase):
@tests.VCRTestBase.my_vcr.use_cassette('basic_container_view.yaml',
cassette_library_dir=tests.fixtures_path,
record_mode='once')
def test_basic_container_view(self):
# see: http://python3porting.com/noconv.html
si = connect.SmartConnect(host='vcsa',
user='my_user',
pwd='my_password')
content = si.RetrieveContent()
datacenter_object_view = content.viewManager.CreateContainerView(
content.rootFolder, [vim.Datacenter], True)
for datacenter in datacenter_object_view.view:
datastores = datacenter.datastore
# NOTE (hartsocks): the object handle here is a managed object
# reference, until we ask for more details, no other detail is
# transmitted. Our sample fixture is quite small.
self.assertEqual(1, len(datastores))
datacenter_object_view.Destroy()
| apache-2.0 |
scienceopen/pyrinex | src/georinex/hatanaka.py | 1 | 1490 | """
handle Hatanka CRINEX files
NOTE: This is a candidate for importlib.resources in Python >= 3.7
"""
import subprocess
import shutil
from pathlib import Path
from typing.io import TextIO
from .build import build
def crxexe(path: Path = Path(__file__).parent / "rnxcmp") -> str:
"""
Determines if CRINEX converter is available.
Don't use LRU_CACHE to allow for build-on-demand
Parameters
----------
path: pathlib.Path
path to crx2rnx executable
Returns
-------
exe: str
fullpath to crx2rnx executable
"""
exe = shutil.which("crx2rnx", path=str(path))
if not exe:
if build() != 0:
raise RuntimeError("could not build Hatanka converter. Do you have a C compiler?")
exe = shutil.which("crx2rnx", path=str(path))
if not exe:
raise RuntimeError("Hatanaka converter is broken or missing.")
# crx2rnx -h: returncode == 1
ret = subprocess.run([exe, "-h"], stderr=subprocess.PIPE, universal_newlines=True)
if ret.stderr.startswith("Usage"):
return exe
else:
raise RuntimeError("Hatanaka converter is broken.")
def opencrx(f: TextIO) -> str:
"""
Conversion to string is necessary because of a quirk where gzip.open() even with 'rt' doesn't decompress until read.
Nbytes is used to read first line.
"""
exe = crxexe()
ret = subprocess.check_output([exe, "-"], input=f.read(), universal_newlines=True)
return ret
| mit |
prashantmital/profiler-tools | timer.py | 1 | 1534 | from __future__ import division, print_function
from timeit import default_timer
class Timer(object):
""" Utility class used for monitoring code execution time. This class
offers two usage patterns - as a context manager or as a regular object.
"""
def __init__(self, start=False):
""" Initialize the Timer and optionally start it.
Parameters
----------
start : bool
Flag indicating whether to start the timer.
"""
self.reset(start=start)
def __enter__(self):
self.reset(start=True)
return self
def __exit__(self, exc_type, exc_value, traceback):
self.stop()
def reset(self, start=False):
""" Reset the Timer and the optionally start it.
Parameters
----------
start : bool
Flag indicating whether to start the timer.
"""
self._start, self._stop = 0, 0
self._stopped = True
if start:
self.start()
def start(self):
""" Start the Timer."""
self._start = default_timer()
self._stopped = False
def stop(self):
""" Stop the Timer."""
self._stop = default_timer()
self._stopped = True
@property
def elapsed(self):
""" Property for getting the elapsed time. Stops the Timer if it has
not been stopped explicitly.
"""
if not self._stopped:
self.stop()
return self.elapsed
return self._stop - self._start
| mit |
SnappleCap/oh-mainline | vendor/packages/python-social-auth/social/backends/yandex.py | 78 | 3022 | """
Yandex OpenID and OAuth2 support.
This contribution adds support for Yandex.ru OpenID service in the form
openid.yandex.ru/user. Username is retrieved from the identity url.
If username is not specified, OpenID 2.0 url used for authentication.
"""
from social.p3 import urlsplit
from social.backends.open_id import OpenIdAuth
from social.backends.oauth import BaseOAuth2
class YandexOpenId(OpenIdAuth):
"""Yandex OpenID authentication backend"""
name = 'yandex-openid'
URL = 'http://openid.yandex.ru'
def get_user_id(self, details, response):
return details['email'] or response.identity_url
def get_user_details(self, response):
"""Generate username from identity url"""
values = super(YandexOpenId, self).get_user_details(response)
values['username'] = values.get('username') or\
urlsplit(response.identity_url)\
.path.strip('/')
values['email'] = values.get('email', '')
return values
class YandexOAuth2(BaseOAuth2):
"""Legacy Yandex OAuth2 authentication backend"""
name = 'yandex-oauth2'
AUTHORIZATION_URL = 'https://oauth.yandex.com/authorize'
ACCESS_TOKEN_URL = 'https://oauth.yandex.com/token'
ACCESS_TOKEN_METHOD = 'POST'
REDIRECT_STATE = False
def get_user_details(self, response):
fullname, first_name, last_name = self.get_user_names(
response.get('real_name') or response.get('display_name') or ''
)
return {'username': response.get('display_name'),
'email': response.get('default_email') or
response.get('emails', [''])[0],
'fullname': fullname,
'first_name': first_name,
'last_name': last_name}
def user_data(self, access_token, response, *args, **kwargs):
return self.get_json('https://login.yandex.ru/info',
params={'oauth_token': access_token,
'format': 'json'})
class YaruOAuth2(BaseOAuth2):
name = 'yaru'
AUTHORIZATION_URL = 'https://oauth.yandex.com/authorize'
ACCESS_TOKEN_URL = 'https://oauth.yandex.com/token'
ACCESS_TOKEN_METHOD = 'POST'
REDIRECT_STATE = False
def get_user_details(self, response):
fullname, first_name, last_name = self.get_user_names(
response.get('real_name') or response.get('display_name') or ''
)
return {'username': response.get('display_name'),
'email': response.get('default_email') or
response.get('emails', [''])[0],
'fullname': fullname,
'first_name': first_name,
'last_name': last_name}
def user_data(self, access_token, response, *args, **kwargs):
return self.get_json('https://login.yandex.ru/info',
params={'oauth_token': access_token,
'format': 'json'})
| agpl-3.0 |
rohitwaghchaure/alec_frappe5_erpnext | erpnext/accounts/doctype/account/account.py | 6 | 7577 | # Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import frappe
from frappe.utils import cstr, cint
from frappe import throw, _
from frappe.model.document import Document
class Account(Document):
nsm_parent_field = 'parent_account'
def onload(self):
frozen_accounts_modifier = frappe.db.get_value("Accounts Settings", "Accounts Settings",
"frozen_accounts_modifier")
if not frozen_accounts_modifier or frozen_accounts_modifier in frappe.get_roles():
self.get("__onload").can_freeze_account = True
def autoname(self):
self.name = self.account_name.strip() + ' - ' + \
frappe.db.get_value("Company", self.company, "abbr")
def validate(self):
self.validate_parent()
self.validate_root_details()
self.set_root_and_report_type()
self.validate_mandatory()
self.validate_warehouse_account()
self.validate_frozen_accounts_modifier()
self.validate_balance_must_be_debit_or_credit()
def validate_parent(self):
"""Fetch Parent Details and validate parent account"""
if self.parent_account:
par = frappe.db.get_value("Account", self.parent_account,
["name", "is_group", "company"], as_dict=1)
if not par:
throw(_("Account {0}: Parent account {1} does not exist").format(self.name, self.parent_account))
elif par.name == self.name:
throw(_("Account {0}: You can not assign itself as parent account").format(self.name))
elif not par.is_group:
throw(_("Account {0}: Parent account {1} can not be a ledger").format(self.name, self.parent_account))
elif par.company != self.company:
throw(_("Account {0}: Parent account {1} does not belong to company: {2}")
.format(self.name, self.parent_account, self.company))
def set_root_and_report_type(self):
if self.parent_account:
par = frappe.db.get_value("Account", self.parent_account, ["report_type", "root_type"], as_dict=1)
if par.report_type:
self.report_type = par.report_type
if par.root_type:
self.root_type = par.root_type
if self.is_group:
db_value = frappe.db.get_value("Account", self.name, ["report_type", "root_type"], as_dict=1)
if db_value:
if self.report_type != db_value.report_type:
frappe.db.sql("update `tabAccount` set report_type=%s where lft > %s and rgt < %s",
(self.report_type, self.lft, self.rgt))
if self.root_type != db_value.root_type:
frappe.db.sql("update `tabAccount` set root_type=%s where lft > %s and rgt < %s",
(self.root_type, self.lft, self.rgt))
def validate_root_details(self):
# does not exists parent
if frappe.db.exists("Account", self.name):
if not frappe.db.get_value("Account", self.name, "parent_account"):
throw(_("Root cannot be edited."))
def validate_frozen_accounts_modifier(self):
old_value = frappe.db.get_value("Account", self.name, "freeze_account")
if old_value and old_value != self.freeze_account:
frozen_accounts_modifier = frappe.db.get_value('Accounts Settings', None, 'frozen_accounts_modifier')
if not frozen_accounts_modifier or \
frozen_accounts_modifier not in frappe.get_roles():
throw(_("You are not authorized to set Frozen value"))
def validate_balance_must_be_debit_or_credit(self):
from erpnext.accounts.utils import get_balance_on
if not self.get("__islocal") and self.balance_must_be:
account_balance = get_balance_on(self.name)
if account_balance > 0 and self.balance_must_be == "Credit":
frappe.throw(_("Account balance already in Debit, you are not allowed to set 'Balance Must Be' as 'Credit'"))
elif account_balance < 0 and self.balance_must_be == "Debit":
frappe.throw(_("Account balance already in Credit, you are not allowed to set 'Balance Must Be' as 'Debit'"))
def convert_group_to_ledger(self):
if self.check_if_child_exists():
throw(_("Account with child nodes cannot be converted to ledger"))
elif self.check_gle_exists():
throw(_("Account with existing transaction cannot be converted to ledger"))
else:
self.is_group = 0
self.save()
return 1
def convert_ledger_to_group(self):
if self.check_gle_exists():
throw(_("Account with existing transaction can not be converted to group."))
elif self.account_type:
throw(_("Cannot covert to Group because Account Type is selected."))
else:
self.is_group = 1
self.save()
return 1
# Check if any previous balance exists
def check_gle_exists(self):
return frappe.db.get_value("GL Entry", {"account": self.name})
def check_if_child_exists(self):
return frappe.db.sql("""select name from `tabAccount` where parent_account = %s
and docstatus != 2""", self.name)
def validate_mandatory(self):
if not self.report_type:
throw(_("Report Type is mandatory"))
if not self.root_type:
throw(_("Root Type is mandatory"))
def validate_warehouse_account(self):
if not cint(frappe.defaults.get_global_default("auto_accounting_for_stock")):
return
if self.account_type == "Warehouse":
if not self.warehouse:
throw(_("Warehouse is mandatory if account type is Warehouse"))
old_warehouse = cstr(frappe.db.get_value("Account", self.name, "warehouse"))
if old_warehouse != cstr(self.warehouse):
if old_warehouse:
self.validate_warehouse(old_warehouse)
if self.warehouse:
self.validate_warehouse(self.warehouse)
def validate_warehouse(self, warehouse):
if frappe.db.get_value("Stock Ledger Entry", {"warehouse": warehouse}):
throw(_("Stock entries exist against warehouse {0}, hence you cannot re-assign or modify Warehouse").format(warehouse))
def update_nsm_model(self):
"""update lft, rgt indices for nested set model"""
import frappe
import frappe.utils.nestedset
frappe.utils.nestedset.update_nsm(self)
def on_update(self):
self.update_nsm_model()
def validate_trash(self):
"""checks gl entries and if child exists"""
if not self.parent_account:
throw(_("Root account can not be deleted"))
if self.check_gle_exists():
throw(_("Account with existing transaction can not be deleted"))
if self.check_if_child_exists():
throw(_("Child account exists for this account. You can not delete this account."))
def on_trash(self):
self.validate_trash()
self.update_nsm_model()
def before_rename(self, old, new, merge=False):
# Add company abbr if not provided
from erpnext.setup.doctype.company.company import get_name_with_abbr
new_account = get_name_with_abbr(new, self.company)
# Validate properties before merging
if merge:
if not frappe.db.exists("Account", new):
throw(_("Account {0} does not exist").format(new))
val = list(frappe.db.get_value("Account", new_account,
["is_group", "root_type", "company"]))
if val != [self.is_group, self.root_type, self.company]:
throw(_("""Merging is only possible if following properties are same in both records. Is Group, Root Type, Company"""))
return new_account
def after_rename(self, old, new, merge=False):
if not merge:
frappe.db.set_value("Account", new, "account_name",
" - ".join(new.split(" - ")[:-1]))
else:
from frappe.utils.nestedset import rebuild_tree
rebuild_tree("Account", "parent_account")
def get_parent_account(doctype, txt, searchfield, start, page_len, filters):
return frappe.db.sql("""select name from tabAccount
where is_group = 1 and docstatus != 2 and company = %s
and %s like %s order by name limit %s, %s""" %
("%s", searchfield, "%s", "%s", "%s"),
(filters["company"], "%%%s%%" % txt, start, page_len), as_list=1)
| agpl-3.0 |
zooba/PTVS | Python/Product/Miniconda/Miniconda3-x64/Lib/site-packages/setuptools/build_meta.py | 5 | 9387 | """A PEP 517 interface to setuptools
Previously, when a user or a command line tool (let's call it a "frontend")
needed to make a request of setuptools to take a certain action, for
example, generating a list of installation requirements, the frontend would
would call "setup.py egg_info" or "setup.py bdist_wheel" on the command line.
PEP 517 defines a different method of interfacing with setuptools. Rather
than calling "setup.py" directly, the frontend should:
1. Set the current directory to the directory with a setup.py file
2. Import this module into a safe python interpreter (one in which
setuptools can potentially set global variables or crash hard).
3. Call one of the functions defined in PEP 517.
What each function does is defined in PEP 517. However, here is a "casual"
definition of the functions (this definition should not be relied on for
bug reports or API stability):
- `build_wheel`: build a wheel in the folder and return the basename
- `get_requires_for_build_wheel`: get the `setup_requires` to build
- `prepare_metadata_for_build_wheel`: get the `install_requires`
- `build_sdist`: build an sdist in the folder and return the basename
- `get_requires_for_build_sdist`: get the `setup_requires` to build
Again, this is not a formal definition! Just a "taste" of the module.
"""
import io
import os
import sys
import tokenize
import shutil
import contextlib
import setuptools
import distutils
from setuptools.py31compat import TemporaryDirectory
from pkg_resources import parse_requirements
__all__ = ['get_requires_for_build_sdist',
'get_requires_for_build_wheel',
'prepare_metadata_for_build_wheel',
'build_wheel',
'build_sdist',
'__legacy__',
'SetupRequirementsError']
class SetupRequirementsError(BaseException):
def __init__(self, specifiers):
self.specifiers = specifiers
class Distribution(setuptools.dist.Distribution):
def fetch_build_eggs(self, specifiers):
specifier_list = list(map(str, parse_requirements(specifiers)))
raise SetupRequirementsError(specifier_list)
@classmethod
@contextlib.contextmanager
def patch(cls):
"""
Replace
distutils.dist.Distribution with this class
for the duration of this context.
"""
orig = distutils.core.Distribution
distutils.core.Distribution = cls
try:
yield
finally:
distutils.core.Distribution = orig
def _to_str(s):
"""
Convert a filename to a string (on Python 2, explicitly
a byte string, not Unicode) as distutils checks for the
exact type str.
"""
if sys.version_info[0] == 2 and not isinstance(s, str):
# Assume it's Unicode, as that's what the PEP says
# should be provided.
return s.encode(sys.getfilesystemencoding())
return s
def _get_immediate_subdirectories(a_dir):
return [name for name in os.listdir(a_dir)
if os.path.isdir(os.path.join(a_dir, name))]
def _file_with_extension(directory, extension):
matching = (
f for f in os.listdir(directory)
if f.endswith(extension)
)
file, = matching
return file
def _open_setup_script(setup_script):
if not os.path.exists(setup_script):
# Supply a default setup.py
return io.StringIO(u"from setuptools import setup; setup()")
return getattr(tokenize, 'open', open)(setup_script)
class _BuildMetaBackend(object):
def _fix_config(self, config_settings):
config_settings = config_settings or {}
config_settings.setdefault('--global-option', [])
return config_settings
def _get_build_requires(self, config_settings, requirements):
config_settings = self._fix_config(config_settings)
sys.argv = sys.argv[:1] + ['egg_info'] + \
config_settings["--global-option"]
try:
with Distribution.patch():
self.run_setup()
except SetupRequirementsError as e:
requirements += e.specifiers
return requirements
def run_setup(self, setup_script='setup.py'):
# Note that we can reuse our build directory between calls
# Correctness comes first, then optimization later
__file__ = setup_script
__name__ = '__main__'
with _open_setup_script(__file__) as f:
code = f.read().replace(r'\r\n', r'\n')
exec(compile(code, __file__, 'exec'), locals())
def get_requires_for_build_wheel(self, config_settings=None):
config_settings = self._fix_config(config_settings)
return self._get_build_requires(config_settings, requirements=['wheel'])
def get_requires_for_build_sdist(self, config_settings=None):
config_settings = self._fix_config(config_settings)
return self._get_build_requires(config_settings, requirements=[])
def prepare_metadata_for_build_wheel(self, metadata_directory,
config_settings=None):
sys.argv = sys.argv[:1] + ['dist_info', '--egg-base',
_to_str(metadata_directory)]
self.run_setup()
dist_info_directory = metadata_directory
while True:
dist_infos = [f for f in os.listdir(dist_info_directory)
if f.endswith('.dist-info')]
if (len(dist_infos) == 0 and
len(_get_immediate_subdirectories(dist_info_directory)) == 1):
dist_info_directory = os.path.join(
dist_info_directory, os.listdir(dist_info_directory)[0])
continue
assert len(dist_infos) == 1
break
# PEP 517 requires that the .dist-info directory be placed in the
# metadata_directory. To comply, we MUST copy the directory to the root
if dist_info_directory != metadata_directory:
shutil.move(
os.path.join(dist_info_directory, dist_infos[0]),
metadata_directory)
shutil.rmtree(dist_info_directory, ignore_errors=True)
return dist_infos[0]
def build_wheel(self, wheel_directory, config_settings=None,
metadata_directory=None):
config_settings = self._fix_config(config_settings)
wheel_directory = os.path.abspath(wheel_directory)
# Build the wheel in a temporary directory, then copy to the target
with TemporaryDirectory(dir=wheel_directory) as tmp_dist_dir:
sys.argv = (sys.argv[:1] +
['bdist_wheel', '--dist-dir', tmp_dist_dir] +
config_settings["--global-option"])
self.run_setup()
wheel_basename = _file_with_extension(tmp_dist_dir, '.whl')
wheel_path = os.path.join(wheel_directory, wheel_basename)
if os.path.exists(wheel_path):
# os.rename will fail overwriting on non-unix env
os.remove(wheel_path)
os.rename(os.path.join(tmp_dist_dir, wheel_basename), wheel_path)
return wheel_basename
def build_sdist(self, sdist_directory, config_settings=None):
config_settings = self._fix_config(config_settings)
sdist_directory = os.path.abspath(sdist_directory)
sys.argv = sys.argv[:1] + ['sdist', '--formats', 'gztar'] + \
config_settings["--global-option"] + \
["--dist-dir", sdist_directory]
self.run_setup()
return _file_with_extension(sdist_directory, '.tar.gz')
class _BuildMetaLegacyBackend(_BuildMetaBackend):
"""Compatibility backend for setuptools
This is a version of setuptools.build_meta that endeavors to maintain backwards
compatibility with pre-PEP 517 modes of invocation. It exists as a temporary
bridge between the old packaging mechanism and the new packaging mechanism,
and will eventually be removed.
"""
def run_setup(self, setup_script='setup.py'):
# In order to maintain compatibility with scripts assuming that
# the setup.py script is in a directory on the PYTHONPATH, inject
# '' into sys.path. (pypa/setuptools#1642)
sys_path = list(sys.path) # Save the original path
script_dir = os.path.dirname(os.path.abspath(setup_script))
if script_dir not in sys.path:
sys.path.insert(0, script_dir)
try:
super(_BuildMetaLegacyBackend,
self).run_setup(setup_script=setup_script)
finally:
# While PEP 517 frontends should be calling each hook in a fresh
# subprocess according to the standard (and thus it should not be
# strictly necessary to restore the old sys.path), we'll restore
# the original path so that the path manipulation does not persist
# within the hook after run_setup is called.
sys.path[:] = sys_path
# The primary backend
_BACKEND = _BuildMetaBackend()
get_requires_for_build_wheel = _BACKEND.get_requires_for_build_wheel
get_requires_for_build_sdist = _BACKEND.get_requires_for_build_sdist
prepare_metadata_for_build_wheel = _BACKEND.prepare_metadata_for_build_wheel
build_wheel = _BACKEND.build_wheel
build_sdist = _BACKEND.build_sdist
# The legacy backend
__legacy__ = _BuildMetaLegacyBackend()
| apache-2.0 |
etal/fammer | fammerlib/build.py | 2 | 18868 | "Build alignments and profiles in a directory tree of sequence sets."
import itertools
import logging
import os
import subprocess
from glob import glob
from os.path import basename, isdir, isfile, join
from Bio import SeqIO
from Bio import AlignIO
from Bio.Seq import Seq
from Bio.SeqRecord import SeqRecord
from biocma import biocma
from biofrills import consensus, alnutils
from ._share import write_fasta
from .tasks import Task, ext, noext, sh, which, is_empty
from . import tmalign
class Result(object):
"""Output of taskify_subdirs. Collection of tasks.
Contains references to the tasks for the main alignment, the PDB seed
alignment, and, optionally, the corresponding HMMer .hmm profile and/or
MAPGAPS .cma and .tpl.
"""
def __init__(self, aln, pdbseq=None, hmm=None, cma=None, tpl=None):
self.aln = aln
self.pdbseq = pdbseq
self.hmm = hmm
self.cma = cma
self.tpl = tpl
def __str__(self):
return str(self.aln)
# ENH: __cmp__ or __eq__ and __le__ and full_ordering
# ENH: build() and clean() methods?
def cmd_build(args):
# Verify: 'base' directory exists
assert isdir(args.basedir)
base = args.basedir.rstrip('/')
# Write the directory layout as a Newick tree
if args.tree:
tree_str = treeify_subdirs(base) + ';\n'
with open(base + '.nwk', 'w+') as outfile:
outfile.write(tree_str)
logging.info("Wrote %s.nwk", base)
# Build all alignments, plus HMM .hmm or MAPGAPS .cma/.tpl
base_result = taskify_subdirs(base, args.hmmer, args.mapgaps, args.pdb, 0)
# HMM profiles
if args.hmmer:
T_base_all_hmm = Task(base + '_all.hmm',
action=all_hmm,
depends=base_result.hmm,
)
T_base_all_hmm.build()
# Clean everything
if args.clean:
T_base_all_hmm.clean()
# MAPGAPS profiles
if args.mapgaps:
T_mg_run_map = Task(base + '.mpa',
action=mg_run_map,
depends=[base_result.cma, base_result.tpl])
T_mg_run_map.build()
if args.clean:
T_mg_run_map.clean()
if not args.hmmer and not args.mapgaps:
# Just build the alignments
base_result.aln.build()
if args.clean:
base_result.aln.clean()
# To build the HMM (and CMA) profiles
def taskify_subdirs(topdir, hmmer, mapgaps, use_pdb, level):
"""Recursively define tasks for a directory tree.
Return a task to build the HMM profile for topdir.
this.hmm
this.aln
this.fasta -- consenses from each group/family
this/subdir1.hmm (recurse)
this/subdir2.hmm (recurse)
this/...
this/[others.hmm]
--> task: hmmemit from each of deez
How do we know which sub-hmms to build?
subdirs of this/ --> group names --> [this/groupname.hmm]
this/*.fasta --> [this/family.hmm]
If mapgaps is True, build CMA profiles and templates too.
"""
this = topdir.rstrip('/')
subdirs = filter(isdir,
[join(topdir, sd) for sd in sorted(os.listdir(topdir))])
# Each subtask pair: (HMM, CMA)-making tasks for the subdir
# Groups with their own families within -- recurse
subtask_group_results = [taskify_subdirs(sd, hmmer, mapgaps, use_pdb, level+1)
for sd in sorted(subdirs)]
# Families / tips of the profile tree -- build from scratch
subtask_family_results = []
subfamily_fastas = glob(join(topdir, '*.fasta'))
for subfa in sorted(subfamily_fastas):
# Skip the group sequence sets, they're already covered
if noext(subfa) in subdirs:
continue
subresult = Result(Task(ext(subfa, 'aln'),
action=align_fasta_mafft,
depends=subfa,
cleans=ext(subfa, 'seq'))) # mafft
# cleans=[subfa + '.1.fas', subfa + '.2.fas'])) # prank
if hmmer:
subresult.hmm = Task(ext(subfa, 'hmm'),
action=aln2hmm,
depends=subresult.aln,
cleans=ext(subfa, 'stk'))
if mapgaps:
subresult.cma = Task(ext(subfa, 'cma'),
action=mg_aln2cma,
kwargs={'level': level+1},
depends=subresult.aln,
cleans=[ext(subfa, 'cons.cma'),
ext(subfa, 'cons_iron.cma')])
subtask_family_results.append(subresult)
subtask_family_results.sort(key=str) # Needed? We sort FASTAs above
# Structural alignment of PDBs in this dir; reuse subfamily PDB alignments
these_pdbs = glob(join(topdir, '*.pdb'))
sub_pdb_seqs = [sgr.pdbseq for sgr in subtask_group_results]
task_pdbseq = Task(this + '.pdb.seq',
action=align_pdbs,
kwargs={'use_pdb': use_pdb},
depends=these_pdbs + sub_pdb_seqs)
# Aggregate those profile consensus sequences & make a meta-profile
result = Result(Task(this + '.aln',
action=align_profiles,
kwargs={'use_pdb': use_pdb},
depends=[r.aln
for r in (subtask_group_results +
subtask_family_results)
] + [task_pdbseq],
cleans=ext(map(str, subtask_group_results +
subtask_family_results),
'cons.seq') + [this + '.families.fa',
this + '.families.seq',
this + '.seq']),
pdbseq=task_pdbseq)
if hmmer:
result.hmm = Task(this + '.hmm',
action=aln2hmm,
depends=([result.aln] +
[sgr.hmm for sgr in subtask_group_results] +
[sfr.hmm for sfr in subtask_family_results]),
cleans=this + '.stk')
if mapgaps:
result.tpl = Task(this + '.tpl',
action=mg_aln2cma,
kwargs={'level': level},
depends=result.aln,
cleans=[this + '.cons.cma', this + '.cons_iron.cma'])
result.cma = Task(this + '.cma',
action=mg_cat_cma,
depends=list(itertools.chain(*[
(sgr.tpl, sgr.cma)
for sgr in subtask_group_results]
)) + [sfr.cma for sfr in subtask_family_results])
return result
# Actions
def align_fasta_mafft(task):
"""Align a FASTA file with MAFFT. Clustal output."""
seq = ext(task.depends[0], 'seq')
sh("mafft --quiet --amino --reorder --maxiterate 1000 "
"--genafpair --ep 0.123 %s > %s"
% (task.depends[0], seq))
# Convert FASTA to "pressed" (single-row) Clustal
records = list(SeqIO.parse(seq, 'fasta'))
# Check for 'X' characters in the sequences -- these cause problems
for rec in records:
if 'X' in str(rec.seq):
logging.warn('Sequence %r contains unknown residue X', rec.id)
max_id_len = max(len(r.id) for r in records)
with open(task.target, 'w+') as outfile:
outfile.write('CLUSTAL X (-like) multiple sequence alignment\n\n')
outfile.writelines(
['%s %s\n' % (rec.id.ljust(max_id_len), rec.seq)
for rec in records])
def align_fasta_prank(task):
"""Align a FASTA file with PRANK. Clustal output.
Cleans: [input].fasta.{1,2}.fas
"""
seq = task.depends[0] + '.2.fas'
sh("prank -d=%s -o=%s -twice -quiet" % (task.depends[0], task.depends[0]))
# Convert FASTA to "pressed" (single-row) Clustal
records = list(SeqIO.parse(seq, 'fasta'))
# Check for 'X' characters in the sequences -- these cause problems
for rec in records:
if 'X' in str(rec.seq):
logging.warn('Sequence %r contains unknown residue X', rec.id)
max_id_len = max(len(r.id) for r in records)
with open(task.target, 'w+') as outfile:
outfile.write('CLUSTAL X (-like) multiple sequence alignment\n\n')
outfile.writelines(
['%s %s\n' % (rec.id.ljust(max_id_len), rec.seq)
for rec in records])
def align_pdbs(task, sub_pdb_seqs=(), use_pdb=None):
"""Create a structure-based sequence alignment from PDB files.
Inputs are PDB files and FASTA alignments (of previously aligned PDBs).
"""
if not use_pdb or not task.depends:
# Just touch the '.pdb.seq' file; don't use TM-align
with open(task.target, 'a'):
return
pdbs = []
sub_pdb_seqs = []
for elem in task.depends:
if str(elem).endswith('.pdb'):
pdbs.append(elem)
else:
sub_pdb_seqs.append(str(elem))
# Scan existing PDB alignments to choose a reference PDB from each
# sub_pdb_seqs = filter(isfile, sub_pdb_seqs)
for sub_pdb_fname in map(str, sub_pdb_seqs):
if is_empty(sub_pdb_fname):
# Dummy seed alignment -- look in that dir for a .pdb
# ENH - recursively
sub_pdbs = glob(join(sub_pdb_fname[:-4], '*.pdb'))
if sub_pdbs:
logging.info("Picked up %d neglected PDBs: %s",
len(sub_pdbs), ' '.join(sub_pdbs))
pdbs.extend(sub_pdbs)
continue
best_tmscore = -1
best_pdb = None
for rec in SeqIO.parse(sub_pdb_fname, 'fasta'):
# Extract TM-score
for token in rec.description.split():
if token.startswith('TM-score'):
try:
this_tmscore = float(token.split('=', 1)[1])
if this_tmscore > best_tmscore:
best_tmscore = this_tmscore
best_pdb = rec.id
except Exception:
logging.warn("PDB seq parsing issue: %s",
rec.description)
finally:
break
if best_pdb is None:
logging.warn("Empty PDB alignment: " + sub_pdb_fname)
else:
logging.info("Best PDB of %s: %s", sub_pdb_fname, best_pdb)
pdbs.append(best_pdb)
pdbseedfnames = [seed for seed in map(str, sub_pdb_seqs)
if isfile(seed) and not is_empty(seed)]
try:
mustang_tmpfname = '_tmp_mustang.afasta'
if len(pdbs) > 1 and which(['mustang']):
# Align PDBs with MUSTANG.
subprocess.check_call(['mustang',
'-o', '_tmp_mustang',
'-F', 'fasta',
'-s', 'OFF',
'-i'] + pdbs)
pdbseedfnames.append(mustang_tmpfname)
# This is where the magic happens.
records = tmalign.align_structs(pdbs, pdbseedfnames)
finally:
if isfile(mustang_tmpfname):
os.remove(mustang_tmpfname)
# SeqIO.write(records, task.target, 'fasta')
write_fasta(records, task.target)
# if not records:
# logging.info("Created empty PDB alignment %s", task.target)
def align_profiles(task, use_pdb=None):
"""Align several FASTA files with MAFFT. Clustal output.
Cleans: [depends].cons.seq, [target].families.fa, [target].families.seq,
[target].seq
"""
seeds, singles = [], []
# PDB alignment -- include as a seed profile if requested
subalignments, pdb_seed = task.depends[:-1], str(task.depends[-1])
if use_pdb and not is_empty(pdb_seed):
seeds.append(pdb_seed)
else:
logging.info("Empty PDB alignment: %s", pdb_seed)
# Get subfamily and subgroup consensus sequences/profiles
for subaln in subalignments:
aln = AlignIO.read(str(subaln), 'clustal')
# with open(task.target, 'w+') as outfile:
with open(ext(subaln, 'cons.seq'), 'w+') as outfile:
outfile.write(">%s consensus\n" % basename(noext(subaln)))
cons_seq = consensus.consensus(aln, trim_ends=False,
gap_threshold=0.6)
if isdir(noext(subaln)):
# Group profiles: include the subfamily consenses, too
outfile.write(cons_seq + "\n")
for record in aln:
outfile.write(">%s\n" % record.id)
outfile.write("%s\n" % record.seq)
else:
# Ungapped family consensus sequences
outfile.write(cons_seq.replace('-', '') + "\n")
# Merge the sequences and profiles
for subconsseq in ext(subalignments, 'cons.seq'):
if isdir(subconsseq[:-9]):
# Group
seeds.append(subconsseq)
else:
singles.append(subconsseq)
# First, align/merge the single family consensus sequences
famfa = ext(task.target, 'families.fa')
allseq = ext(task.target, 'seq')
assert singles or seeds, \
'No .fasta files found to build %s' % task.target
if singles:
sh("cat %s > %s" % (' '.join(singles), famfa))
if seeds:
# Align the families with the groups
sh("mafft --quiet --amino --globalgenafpair --maxiterate 1000 %s %s > %s"
% (' '.join(['--seed '+s for s in seeds]), famfa, allseq))
# XXX fast version
# sh("mafft --quiet --amino --auto %s %s > %s"
# % (' '.join(['--seed '+s for s in seeds]), famfa, allseq))
else:
# No group profiles -- just align the families
sh("mafft --quiet --amino --globalgenafpair --maxiterate 1000 %s > %s"
% (famfa, allseq))
# Convert FASTA to "pressed" (single-row) Clustal
records = [rec for rec in SeqIO.parse(allseq, 'fasta')
# Drop PDB-derived sequences
# if ':' not in rec.id
if 'TMalign' not in rec.description and
'TM-score' not in rec.description and
not rec.id.endswith('.pdb')
]
records = list(alnutils.remove_empty_cols(records))
if seeds:
# MAFFT prefixes seed alignments with '_seed_' -- get rid of that
for rec in records:
if rec.id.startswith('_seed_'):
rec.id = rec.id[6:]
try:
max_id_len = max(len(r.id) for r in records)
except ValueError:
# Common effup
raise ValueError("Profile alignment failed for %s.\nInputs: %s"
% (task.target, ' '.join(map(str, task.depends))))
with open(task.target, 'w+') as outfile:
outfile.write('CLUSTAL X (-like) multiple sequence alignment\n\n')
outfile.writelines(
['%s %s\n' % (rec.id.ljust(max_id_len), rec.seq)
for rec in records])
def aln2hmm(task):
"""Convert a Clustal alignment to an HMM profile.
Cleans: .stk
"""
stk = ext(task.depends[0], 'stk')
SeqIO.convert(str(task.depends[0]), 'clustal', stk, 'stockholm')
sh('hmmbuild %s %s' % (task.target, stk))
def cat_sub_consenses(task):
"""Concatenate the subfamily consensus sequences."""
with open(task.target, 'w+') as outfile:
for subaln in ext(task.depends, 'aln'):
aln = AlignIO.read(str(subaln), 'clustal')
outfile.write(">%s consensus\n" % noext(subaln))
outfile.write(consensus.consensus(aln, trim_ends=False,
gap_threshold=0.6) + "\n")
# Group profiles: include the subfamily consenses, too
if isdir(noext(subaln)):
with open(ext(subaln, 'fasta')) as subfam_file:
outfile.write(subfam_file.read())
def all_hmm(task):
"""Concatenate all HMM profiles into a database & press."""
base = noext(task.depends[-1])
# TODO - filter out *_all.hmm from `find` hits
sh("cat %s.hmm `find %s/ -name '*.hmm' | grep -v '_all.hmm'` > %s"
% (base, base, task.target))
sh("hmmpress -f %s" % task.target)
# MAPGAPS actions
def mg_aln2cma(task, level=None):
"""Convert an alignment profile to CMA (or .tpl).
Depends: .aln
Cleans: .cons.cma, .cons_iron.cma
"""
base = noext(task.target)
name = basename(base)
# Add consensus back to the subfamily-consensus seq set (.aln)
# to produce a CMA (.cons.cma)
aln = AlignIO.read(str(task.depends[0]), 'clustal')
cons_rec = SeqRecord(Seq(consensus.consensus(aln, trim_ends=False,
gap_threshold=0.6)),
id=name, description=name + ' consensus')
aln._records.insert(0, cons_rec)
# Tidy up the CMA
cmaln = biocma.ChainMultiAlignment(aln, level=level)
biocma.write([cmaln], task.target, do_iron=True)
# -------------
### OR (HMMer only):
### hmmemit consensus & reuse the .stk (done for .hmm) directly
### see hmmalign --mapali option to include original .stk
### .stk is no longer for 'clean'; original must be retained
# stk = ext(task.target, 'stk')
# cons_fa = ext(task.target, 'cons.fa')
# sh('hmmalign --amino %s %s > %s'
# % (task.depends[1], cons_fa, stk))
# SeqIO.convert(stk, 'stockholm', cons_fa, 'fasta')
# sh("press < %s > %s" % (cons_fa, cons_seq))
# sh("fa2cma %s > %s" % (cons_seq, base + '.cons.cma'))
# -------------
def mg_cat_cma(task):
"""Concatenate subfamily MAPGAPS profiles.
Depends: .cma of each subfamily, .tpl of this & sub-groups.
"""
assert task.depends, 'No CMA files were given'
sh('cat %s > %s' % (' '.join(map(str, task.depends)), task.target))
def mg_run_map(task):
"""Build/compile the complete set of MAPGAPS profiles."""
sh("run_map %s" % noext(task.target))
# Tree
def treeify_subdirs(topdir):
"""Build a Newick string from the directory tree structure.
Internal nodes = names of non-empty dirs
External nodes = names of .fasta files
"""
# Full paths of directories under topdir
subdirs = filter(isdir,
[join(topdir, sd) for sd in sorted(os.listdir(topdir))])
# Do internal nodes first, then the tips
# Internal nodes: subtree Newick strings
subtree_strs = [treeify_subdirs(sd) for sd in sorted(subdirs)]
# Tips: string names (basename minus trailing .fasta)
tip_names = [basename(fafname)[:-len('.fasta')]
for fafname in glob(join(topdir, '*.fasta'))
if noext(fafname) not in subdirs]
tip_names.sort(key=str)
return '(%s)%s' % (
','.join(subtree_strs + tip_names),
basename(topdir.rstrip('/')))
| bsd-2-clause |
dbbhattacharya/kitsune | vendor/packages/Werkzeug/werkzeug/debug/repr.py | 7 | 8275 | # -*- coding: utf-8 -*-
"""
werkzeug.debug.repr
~~~~~~~~~~~~~~~~~~~
This module implements object representations for debugging purposes.
Unlike the default repr these reprs expose a lot more information and
produce HTML instead of ASCII.
Together with the CSS and JavaScript files of the debugger this gives
a colorful and more compact output.
:copyright: (c) 2009 by the Werkzeug Team, see AUTHORS for more details.
:license: BSD.
"""
import sys
import re
from traceback import format_exception_only
try:
from collections import deque
except ImportError:
deque = None
from werkzeug.utils import escape
from werkzeug.debug.utils import render_template
missing = object()
_paragraph_re = re.compile(r'(?:\r\n|\r|\n){2,}')
RegexType = type(_paragraph_re)
def debug_repr(obj):
"""Creates a debug repr of an object as HTML unicode string."""
return DebugReprGenerator().repr(obj)
def dump(obj=missing):
"""Print the object details to stdout._write (for the interactive
console of the web debugger.
"""
gen = DebugReprGenerator()
if obj is missing:
rv = gen.dump_locals(sys._getframe(1).f_locals)
else:
rv = gen.dump_object(obj)
sys.stdout._write(rv)
class _Helper(object):
"""Displays an HTML version of the normal help, for the interactive
debugger only because it requires a patched sys.stdout.
"""
def __call__(self, topic=None):
sys.stdout._write(self.get_help(topic))
def get_help(self, topic):
title = text = None
if topic is not None:
import pydoc
pydoc.help(topic)
rv = sys.stdout.reset().decode('utf-8', 'ignore')
paragraphs = _paragraph_re.split(rv)
if len(paragraphs) > 1:
title = paragraphs[0]
text = '\n\n'.join(paragraphs[1:])
else:
title = 'Help'
text = paragraphs[0]
return render_template('help_command.html', title=title, text=text)
helper = _Helper()
def _add_subclass_info(inner, obj, base):
if isinstance(base, tuple):
for base in base:
if type(obj) is base:
return inner
elif type(obj) is base:
return inner
module = ''
if obj.__class__.__module__ not in ('__builtin__', 'exceptions'):
module = '<span class="module">%s.</span>' % obj.__class__.__module__
return '%s%s(%s)' % (module, obj.__class__.__name__, inner)
class DebugReprGenerator(object):
def __init__(self):
self._stack = []
def _sequence_repr_maker(left, right, base=object(), limit=8):
def proxy(self, obj, recursive):
if recursive:
return _add_subclass_info(left + '...' + right, obj, base)
buf = [left]
have_extended_section = False
for idx, item in enumerate(obj):
if idx:
buf.append(', ')
if idx == limit:
buf.append('<span class="extended">')
have_extended_section = True
buf.append(self.repr(item))
if have_extended_section:
buf.append('</span>')
buf.append(right)
return _add_subclass_info(u''.join(buf), obj, base)
return proxy
list_repr = _sequence_repr_maker('[', ']', list)
tuple_repr = _sequence_repr_maker('(', ')', tuple)
set_repr = _sequence_repr_maker('set([', '])', set)
frozenset_repr = _sequence_repr_maker('frozenset([', '])', frozenset)
if deque is not None:
deque_repr = _sequence_repr_maker('<span class="module">collections.'
'</span>deque([', '])', deque)
del _sequence_repr_maker
def regex_repr(self, obj):
pattern = repr(obj.pattern).decode('string-escape', 'ignore')
if pattern[:1] == 'u':
pattern = 'ur' + pattern[1:]
else:
pattern = 'r' + pattern
return u're.compile(<span class="string regex">%s</span>)' % pattern
def string_repr(self, obj, limit=70):
buf = ['<span class="string">']
escaped = escape(obj)
a = repr(escaped[:limit])
b = repr(escaped[limit:])
if isinstance(obj, unicode):
buf.append('u')
a = a[1:]
b = b[1:]
if b != "''":
buf.extend((a[:-1], '<span class="extended">', b[1:], '</span>'))
else:
buf.append(a)
buf.append('</span>')
return _add_subclass_info(u''.join(buf), obj, (str, unicode))
def dict_repr(self, d, recursive, limit=5):
if recursive:
return _add_subclass_info(u'{...}', d, dict)
buf = ['{']
have_extended_section = False
for idx, (key, value) in enumerate(d.iteritems()):
if idx:
buf.append(', ')
if idx == limit - 1:
buf.append('<span class="extended">')
have_extended_section = True
buf.append('<span class="pair"><span class="key">%s</span>: '
'<span class="value">%s</span></span>' %
(self.repr(key), self.repr(value)))
if have_extended_section:
buf.append('</span>')
buf.append('}')
return _add_subclass_info(u''.join(buf), d, dict)
def object_repr(self, obj):
return u'<span class="object">%s</span>' % \
escape(repr(obj).decode('utf-8', 'replace'))
def dispatch_repr(self, obj, recursive):
if obj is helper:
return helper.get_help(None)
if isinstance(obj, (int, long, float, complex)):
return u'<span class="number">%r</span>' % obj
if isinstance(obj, basestring):
return self.string_repr(obj)
if isinstance(obj, RegexType):
return self.regex_repr(obj)
if isinstance(obj, list):
return self.list_repr(obj, recursive)
if isinstance(obj, tuple):
return self.tuple_repr(obj, recursive)
if isinstance(obj, set):
return self.set_repr(obj, recursive)
if isinstance(obj, frozenset):
return self.frozenset_repr(obj, recursive)
if isinstance(obj, dict):
return self.dict_repr(obj, recursive)
if deque is not None and isinstance(obj, deque):
return self.deque_repr(obj, recursive)
return self.object_repr(obj)
def fallback_repr(self):
try:
info = ''.join(format_exception_only(*sys.exc_info()[:2]))
except:
info = '?'
return u'<span class="brokenrepr"><broken repr (%s)>' \
u'</span>' % escape(info.decode('utf-8', 'ignore').strip())
def repr(self, obj):
recursive = False
for item in self._stack:
if item is obj:
recursive = True
break
self._stack.append(obj)
try:
try:
return self.dispatch_repr(obj, recursive)
except:
return self.fallback_repr()
finally:
self._stack.pop()
def dump_object(self, obj):
repr = items = None
if isinstance(obj, dict):
title = 'Contents of'
items = []
for key, value in obj.iteritems():
if not isinstance(key, basestring):
items = None
break
items.append((key, self.repr(value)))
if items is None:
items = []
repr = self.repr(obj)
for key in dir(obj):
try:
items.append((key, self.repr(getattr(obj, key))))
except:
pass
title = 'Details for'
title += ' ' + object.__repr__(obj)[1:-1]
return render_template('dump_object.html', items=items,
title=title, repr=repr)
def dump_locals(self, d):
items = [(key, self.repr(value)) for key, value in d.items()]
return render_template('dump_object.html', items=items,
title='Local variables in frame', repr=None)
| bsd-3-clause |
umaic/oraculo | umaic/pipelines.py | 1 | 2162 | # Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: http://doc.scrapy.org/topics/item-pipeline.html
#from scrapy import log
from twisted.enterprise import adbapi
import datetime
import time
from scrapy.exceptions import DropItem
class DuplicatesPipeline(object):
def __init__(self):
self.titles_seen = set()
def process_item(self, item, spider):
if item['title'] in self.titles_seen:
raise DropItem("Duplicate item found: %s" % item)
else:
self.titles_seen.add(item['title'])
return item
class MySQLStorePipeline(object):
def __init__(self, db, user, passwd):
self.db = db
self.user = user
self.passwd = passwd
@classmethod
def from_crawler(cls, crawler):
return cls(
db = crawler.settings.get('DB_NAME'),
user = crawler.settings.get('DB_USER'),
passwd = crawler.settings.get('DB_PASSWD')
)
def open_spider(self, spider):
self.dbpool = adbapi.ConnectionPool('MySQLdb',
db=self.db,
user=self.user,
passwd=self.passwd
)
def close_spider(self, spider):
self.dbpool.close()
def process_item(self, item, spider):
# run db query in thread pool
query = self.dbpool.runInteraction(self._conditional_insert, item)
return item
def _conditional_insert(self, tx, item):
tx.execute("SELECT * FROM news WHERE title = %s", (item['title'], ))
result = tx.fetchone()
if not result:
tx.execute(\
"INSERT INTO news (title, link, description, labels, source, cdate) "
"VALUES (%s, %s, %s, %s, %s, %s)",
(item['title'],
item['link'],
item['description'],
item['labels'],
item['source'],
#item['pdate'],
item['cdate'])
)
#log.msg("Item stored in db: %s" % item, level=log.DEBUG)
#def handle_error(self, e):
#log.err(e)
| gpl-3.0 |
bolkedebruin/airflow | tests/providers/google/cloud/operators/test_functions_system.py | 1 | 1325 | # -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from tests.providers.google.cloud.utils.gcp_authenticator import GCP_FUNCTION_KEY
from tests.test_utils.gcp_system_helpers import CLOUD_DAG_FOLDER, provide_gcp_context, skip_gcp_system
from tests.test_utils.system_tests_class import SystemTest
@skip_gcp_system(GCP_FUNCTION_KEY, require_local_executor=True)
class GcpFunctionExampleDagsSystemTest(SystemTest):
@provide_gcp_context(GCP_FUNCTION_KEY)
def test_run_example_dag_function(self):
self.run_dag('example_gcp_function', CLOUD_DAG_FOLDER)
| apache-2.0 |
VladimirVystupkin/AMRParsing1.x | stanfordnlp/unidecode/x02e.py | 252 | 4461 | data = (
'[?]', # 0x00
'[?]', # 0x01
'[?]', # 0x02
'[?]', # 0x03
'[?]', # 0x04
'[?]', # 0x05
'[?]', # 0x06
'[?]', # 0x07
'[?]', # 0x08
'[?]', # 0x09
'[?]', # 0x0a
'[?]', # 0x0b
'[?]', # 0x0c
'[?]', # 0x0d
'[?]', # 0x0e
'[?]', # 0x0f
'[?]', # 0x10
'[?]', # 0x11
'[?]', # 0x12
'[?]', # 0x13
'[?]', # 0x14
'[?]', # 0x15
'[?]', # 0x16
'[?]', # 0x17
'[?]', # 0x18
'[?]', # 0x19
'[?]', # 0x1a
'[?]', # 0x1b
'[?]', # 0x1c
'[?]', # 0x1d
'[?]', # 0x1e
'[?]', # 0x1f
'[?]', # 0x20
'[?]', # 0x21
'[?]', # 0x22
'[?]', # 0x23
'[?]', # 0x24
'[?]', # 0x25
'[?]', # 0x26
'[?]', # 0x27
'[?]', # 0x28
'[?]', # 0x29
'[?]', # 0x2a
'[?]', # 0x2b
'[?]', # 0x2c
'[?]', # 0x2d
'[?]', # 0x2e
'[?]', # 0x2f
'[?]', # 0x30
'[?]', # 0x31
'[?]', # 0x32
'[?]', # 0x33
'[?]', # 0x34
'[?]', # 0x35
'[?]', # 0x36
'[?]', # 0x37
'[?]', # 0x38
'[?]', # 0x39
'[?]', # 0x3a
'[?]', # 0x3b
'[?]', # 0x3c
'[?]', # 0x3d
'[?]', # 0x3e
'[?]', # 0x3f
'[?]', # 0x40
'[?]', # 0x41
'[?]', # 0x42
'[?]', # 0x43
'[?]', # 0x44
'[?]', # 0x45
'[?]', # 0x46
'[?]', # 0x47
'[?]', # 0x48
'[?]', # 0x49
'[?]', # 0x4a
'[?]', # 0x4b
'[?]', # 0x4c
'[?]', # 0x4d
'[?]', # 0x4e
'[?]', # 0x4f
'[?]', # 0x50
'[?]', # 0x51
'[?]', # 0x52
'[?]', # 0x53
'[?]', # 0x54
'[?]', # 0x55
'[?]', # 0x56
'[?]', # 0x57
'[?]', # 0x58
'[?]', # 0x59
'[?]', # 0x5a
'[?]', # 0x5b
'[?]', # 0x5c
'[?]', # 0x5d
'[?]', # 0x5e
'[?]', # 0x5f
'[?]', # 0x60
'[?]', # 0x61
'[?]', # 0x62
'[?]', # 0x63
'[?]', # 0x64
'[?]', # 0x65
'[?]', # 0x66
'[?]', # 0x67
'[?]', # 0x68
'[?]', # 0x69
'[?]', # 0x6a
'[?]', # 0x6b
'[?]', # 0x6c
'[?]', # 0x6d
'[?]', # 0x6e
'[?]', # 0x6f
'[?]', # 0x70
'[?]', # 0x71
'[?]', # 0x72
'[?]', # 0x73
'[?]', # 0x74
'[?]', # 0x75
'[?]', # 0x76
'[?]', # 0x77
'[?]', # 0x78
'[?]', # 0x79
'[?]', # 0x7a
'[?]', # 0x7b
'[?]', # 0x7c
'[?]', # 0x7d
'[?]', # 0x7e
'[?]', # 0x7f
'[?] ', # 0x80
'[?] ', # 0x81
'[?] ', # 0x82
'[?] ', # 0x83
'[?] ', # 0x84
'[?] ', # 0x85
'[?] ', # 0x86
'[?] ', # 0x87
'[?] ', # 0x88
'[?] ', # 0x89
'[?] ', # 0x8a
'[?] ', # 0x8b
'[?] ', # 0x8c
'[?] ', # 0x8d
'[?] ', # 0x8e
'[?] ', # 0x8f
'[?] ', # 0x90
'[?] ', # 0x91
'[?] ', # 0x92
'[?] ', # 0x93
'[?] ', # 0x94
'[?] ', # 0x95
'[?] ', # 0x96
'[?] ', # 0x97
'[?] ', # 0x98
'[?] ', # 0x99
'[?]', # 0x9a
'[?] ', # 0x9b
'[?] ', # 0x9c
'[?] ', # 0x9d
'[?] ', # 0x9e
'[?] ', # 0x9f
'[?] ', # 0xa0
'[?] ', # 0xa1
'[?] ', # 0xa2
'[?] ', # 0xa3
'[?] ', # 0xa4
'[?] ', # 0xa5
'[?] ', # 0xa6
'[?] ', # 0xa7
'[?] ', # 0xa8
'[?] ', # 0xa9
'[?] ', # 0xaa
'[?] ', # 0xab
'[?] ', # 0xac
'[?] ', # 0xad
'[?] ', # 0xae
'[?] ', # 0xaf
'[?] ', # 0xb0
'[?] ', # 0xb1
'[?] ', # 0xb2
'[?] ', # 0xb3
'[?] ', # 0xb4
'[?] ', # 0xb5
'[?] ', # 0xb6
'[?] ', # 0xb7
'[?] ', # 0xb8
'[?] ', # 0xb9
'[?] ', # 0xba
'[?] ', # 0xbb
'[?] ', # 0xbc
'[?] ', # 0xbd
'[?] ', # 0xbe
'[?] ', # 0xbf
'[?] ', # 0xc0
'[?] ', # 0xc1
'[?] ', # 0xc2
'[?] ', # 0xc3
'[?] ', # 0xc4
'[?] ', # 0xc5
'[?] ', # 0xc6
'[?] ', # 0xc7
'[?] ', # 0xc8
'[?] ', # 0xc9
'[?] ', # 0xca
'[?] ', # 0xcb
'[?] ', # 0xcc
'[?] ', # 0xcd
'[?] ', # 0xce
'[?] ', # 0xcf
'[?] ', # 0xd0
'[?] ', # 0xd1
'[?] ', # 0xd2
'[?] ', # 0xd3
'[?] ', # 0xd4
'[?] ', # 0xd5
'[?] ', # 0xd6
'[?] ', # 0xd7
'[?] ', # 0xd8
'[?] ', # 0xd9
'[?] ', # 0xda
'[?] ', # 0xdb
'[?] ', # 0xdc
'[?] ', # 0xdd
'[?] ', # 0xde
'[?] ', # 0xdf
'[?] ', # 0xe0
'[?] ', # 0xe1
'[?] ', # 0xe2
'[?] ', # 0xe3
'[?] ', # 0xe4
'[?] ', # 0xe5
'[?] ', # 0xe6
'[?] ', # 0xe7
'[?] ', # 0xe8
'[?] ', # 0xe9
'[?] ', # 0xea
'[?] ', # 0xeb
'[?] ', # 0xec
'[?] ', # 0xed
'[?] ', # 0xee
'[?] ', # 0xef
'[?] ', # 0xf0
'[?] ', # 0xf1
'[?] ', # 0xf2
'[?] ', # 0xf3
'[?]', # 0xf4
'[?]', # 0xf5
'[?]', # 0xf6
'[?]', # 0xf7
'[?]', # 0xf8
'[?]', # 0xf9
'[?]', # 0xfa
'[?]', # 0xfb
'[?]', # 0xfc
'[?]', # 0xfd
'[?]', # 0xfe
)
| gpl-2.0 |
openstack/refstack | refstack/api/controllers/products.py | 2 | 11674 | # Copyright (c) 2015 Mirantis, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Product controller."""
import json
import uuid
from oslo_db.exception import DBReferenceError
from oslo_log import log
import pecan
from pecan.secure import secure
import six
from refstack.api import constants as const
from refstack.api.controllers import validation
from refstack.api import utils as api_utils
from refstack.api import validators
from refstack import db
LOG = log.getLogger(__name__)
class VersionsController(validation.BaseRestControllerWithValidation):
"""/v1/products/<product_id>/versions handler."""
__validator__ = validators.ProductVersionValidator
@pecan.expose('json')
def get(self, id):
"""Get all versions for a product."""
product = db.get_product(id)
vendor_id = product['organization_id']
is_admin = (api_utils.check_user_is_foundation_admin() or
api_utils.check_user_is_vendor_admin(vendor_id))
if not product['public'] and not is_admin:
pecan.abort(403, 'Forbidden.')
allowed_keys = ['id', 'product_id', 'version', 'cpid']
return db.get_product_versions(id, allowed_keys=allowed_keys)
@pecan.expose('json')
def get_one(self, id, version_id):
"""Get specific version information."""
product = db.get_product(id)
vendor_id = product['organization_id']
is_admin = (api_utils.check_user_is_foundation_admin() or
api_utils.check_user_is_vendor_admin(vendor_id))
if not product['public'] and not is_admin:
pecan.abort(403, 'Forbidden.')
allowed_keys = ['id', 'product_id', 'version', 'cpid']
return db.get_product_version(version_id, allowed_keys=allowed_keys)
@secure(api_utils.is_authenticated)
@pecan.expose('json')
def post(self, id):
"""'secure' decorator doesn't work at store_item. it must be here."""
self.product_id = id
return super(VersionsController, self).post()
@pecan.expose('json')
def store_item(self, version_info):
"""Add a new version for the product."""
if (not api_utils.check_user_is_product_admin(self.product_id) and
not api_utils.check_user_is_foundation_admin()):
pecan.abort(403, 'Forbidden.')
creator = api_utils.get_user_id()
pecan.response.status = 201
allowed_keys = ['id', 'product_id', 'version', 'cpid']
return db.add_product_version(self.product_id, version_info['version'],
creator, version_info.get('cpid'),
allowed_keys)
@secure(api_utils.is_authenticated)
@pecan.expose('json', method='PUT')
def put(self, id, version_id, **kw):
"""Update details for a specific version.
Endpoint: /v1/products/<product_id>/versions/<version_id>
"""
if (not api_utils.check_user_is_product_admin(id) and
not api_utils.check_user_is_foundation_admin()):
pecan.abort(403, 'Forbidden.')
version_info = {'id': version_id}
if 'cpid' in kw:
version_info['cpid'] = kw['cpid']
version = db.update_product_version(version_info)
pecan.response.status = 200
return version
@secure(api_utils.is_authenticated)
@pecan.expose('json')
def delete(self, id, version_id):
"""Delete a product version.
Endpoint: /v1/products/<product_id>/versions/<version_id>
"""
if (not api_utils.check_user_is_product_admin(id) and
not api_utils.check_user_is_foundation_admin()):
pecan.abort(403, 'Forbidden.')
try:
version = db.get_product_version(version_id,
allowed_keys=['version'])
if not version['version']:
pecan.abort(400, 'Can not delete the empty version as it is '
'used for basic product/test association. '
'This version was implicitly created with '
'the product, and so it cannot be deleted '
'explicitly.')
db.delete_product_version(version_id)
except DBReferenceError:
pecan.abort(400, 'Unable to delete. There are still tests '
'associated to this product version.')
pecan.response.status = 204
class ProductsController(validation.BaseRestControllerWithValidation):
"""/v1/products handler."""
__validator__ = validators.ProductValidator
_custom_actions = {
"action": ["POST"],
}
versions = VersionsController()
@pecan.expose('json')
def get(self):
"""Get information of all products."""
filters = api_utils.parse_input_params(['organization_id'])
allowed_keys = ['id', 'name', 'description', 'product_ref_id', 'type',
'product_type', 'public', 'organization_id']
user = api_utils.get_user_id()
is_admin = user in db.get_foundation_users()
try:
if is_admin:
products = db.get_products(allowed_keys=allowed_keys,
filters=filters)
for s in products:
s['can_manage'] = True
else:
result = dict()
filters['public'] = True
products = db.get_products(allowed_keys=allowed_keys,
filters=filters)
for s in products:
_id = s['id']
result[_id] = s
result[_id]['can_manage'] = False
filters.pop('public')
products = db.get_products_by_user(user,
allowed_keys=allowed_keys,
filters=filters)
for s in products:
_id = s['id']
if _id not in result:
result[_id] = s
result[_id]['can_manage'] = True
products = list(result.values())
except Exception as ex:
LOG.exception('An error occurred during '
'operation with database: %s' % ex)
pecan.abort(400)
products.sort(key=lambda x: x['name'])
return {'products': products}
@pecan.expose('json')
def get_one(self, id):
"""Get information about product."""
allowed_keys = ['id', 'name', 'description',
'product_ref_id', 'product_type',
'public', 'properties', 'created_at', 'updated_at',
'organization_id', 'created_by_user', 'type']
product = db.get_product(id, allowed_keys=allowed_keys)
vendor_id = product['organization_id']
is_admin = (api_utils.check_user_is_foundation_admin() or
api_utils.check_user_is_vendor_admin(vendor_id))
if not is_admin and not product['public']:
pecan.abort(403, 'Forbidden.')
if not is_admin:
admin_only_keys = ['created_by_user', 'created_at', 'updated_at',
'properties']
for key in list(product):
if key in admin_only_keys:
product.pop(key)
product['can_manage'] = is_admin
return product
@secure(api_utils.is_authenticated)
@pecan.expose('json')
def post(self):
"""'secure' decorator doesn't work at store_item. it must be here."""
return super(ProductsController, self).post()
@pecan.expose('json')
def store_item(self, product):
"""Handler for storing item. Should return new item id."""
creator = api_utils.get_user_id()
product['type'] = (const.SOFTWARE
if product['product_type'] == const.DISTRO
else const.CLOUD)
if product['type'] == const.SOFTWARE:
product['product_ref_id'] = six.text_type(uuid.uuid4())
vendor_id = product.pop('organization_id', None)
if not vendor_id:
# find or create default vendor for new product
# TODO(andrey-mp): maybe just fill with info here and create
# at DB layer in one transaction
default_vendor_name = 'vendor_' + creator
vendors = db.get_organizations_by_user(creator)
for v in vendors:
if v['name'] == default_vendor_name:
vendor_id = v['id']
break
else:
vendor = {'name': default_vendor_name}
vendor = db.add_organization(vendor, creator)
vendor_id = vendor['id']
product['organization_id'] = vendor_id
product = db.add_product(product, creator)
return {'id': product['id']}
@secure(api_utils.is_authenticated)
@pecan.expose('json', method='PUT')
def put(self, id, **kw):
"""Handler for update item. Should return full info with updates."""
product = db.get_product(id)
vendor_id = product['organization_id']
vendor = db.get_organization(vendor_id)
is_admin = (api_utils.check_user_is_foundation_admin()
or api_utils.check_user_is_vendor_admin(vendor_id))
if not is_admin:
pecan.abort(403, 'Forbidden.')
product_info = {'id': id}
if 'name' in kw:
product_info['name'] = kw['name']
if 'description' in kw:
product_info['description'] = kw['description']
if 'product_ref_id' in kw:
product_info['product_ref_id'] = kw['product_ref_id']
if 'public' in kw:
# user can mark product as public only if
# his/her vendor is public(official)
public = api_utils.str_to_bool(kw['public'])
if (vendor['type'] not in (const.OFFICIAL_VENDOR, const.FOUNDATION)
and public):
pecan.abort(403, 'Forbidden.')
product_info['public'] = public
if 'properties' in kw:
product_info['properties'] = json.dumps(kw['properties'])
db.update_product(product_info)
pecan.response.status = 200
product = db.get_product(id)
product['can_manage'] = True
return product
@secure(api_utils.is_authenticated)
@pecan.expose('json')
def delete(self, id):
"""Delete product."""
if (not api_utils.check_user_is_foundation_admin() and
not api_utils.check_user_is_product_admin(id)):
pecan.abort(403, 'Forbidden.')
try:
db.delete_product(id)
except DBReferenceError:
pecan.abort(400, 'Unable to delete. There are still tests '
'associated to versions of this product.')
pecan.response.status = 204
| apache-2.0 |
BlueDragonX/pyramid_couchauth_example | setup.py | 1 | 1333 | # Copyright (c) 2011-2012 Ryan Bourgeois <bluedragonx@gmail.com>
#
# This project is free software according to the BSD-modified license. Refer to
# the LICENSE file for complete details.
import os
from setuptools import setup, find_packages
here = os.path.abspath(os.path.dirname(__file__))
requires = ['pyramid',
'WebError',
'pyramid_couchauth',
'py_bcrypt']
setup(name='pyramid_couchauth_example',
version='0.1',
description='Working pyramid_couchauth example.',
long_description='A full pyramid project implementing pyramid_couchauth as the authentication/authorization mechanism.',
classifiers=[
"Programming Language :: Python",
"Framework :: Pylons",
"Topic :: Internet :: WWW/HTTP",
"Topic :: Internet :: WWW/HTTP :: WSGI :: Application",
],
author='Ryan Bourgeois',
author_email='bluedragonx@gmail.com',
url='https://github.com/BlueDragonX/pyramid_couchauth_example',
keywords='web pyramid pylons couchdb',
packages=find_packages(),
include_package_data=True,
zip_safe=False,
install_requires=requires,
tests_require=requires,
test_suite="pyramid_couchauth_example",
entry_points = """\
[paste.app_factory]
main = pyramid_couchauth_example:main
""",
)
| bsd-3-clause |
Leila20/django | tests/auth_tests/test_admin_multidb.py | 8 | 1495 | from django.conf.urls import url
from django.contrib import admin
from django.contrib.auth.admin import UserAdmin
from django.contrib.auth.models import User
from django.db import connections
from django.test import TestCase, mock, override_settings
from django.urls import reverse
class Router(object):
target_db = None
def db_for_read(self, model, **hints):
return self.target_db
db_for_write = db_for_read
site = admin.AdminSite(name='test_adminsite')
site.register(User, admin_class=UserAdmin)
urlpatterns = [
url(r'^admin/', site.urls),
]
@override_settings(ROOT_URLCONF=__name__, DATABASE_ROUTERS=['%s.Router' % __name__])
class MultiDatabaseTests(TestCase):
multi_db = True
@classmethod
def setUpTestData(cls):
cls.superusers = {}
for db in connections:
Router.target_db = db
cls.superusers[db] = User.objects.create_superuser(
username='admin', password='something', email='test@test.org',
)
@mock.patch('django.contrib.auth.admin.transaction')
def test_add_view(self, mock):
for db in connections:
Router.target_db = db
self.client.force_login(self.superusers[db])
self.client.post(reverse('test_adminsite:auth_user_add'), {
'username': 'some_user',
'password1': 'helloworld',
'password2': 'helloworld',
})
mock.atomic.assert_called_with(using=db)
| bsd-3-clause |
bitspill/electrum-doged | lib/tests/test_bitcoin.py | 9 | 7678 | import unittest
import sys
from ecdsa.util import number_to_string
from lib.bitcoin import (
generator_secp256k1, point_to_ser, public_key_to_bc_address, EC_KEY,
bip32_root, bip32_public_derivation, bip32_private_derivation, pw_encode,
pw_decode, Hash, public_key_from_private_key, address_from_private_key,
is_valid, is_private_key, xpub_from_xprv)
try:
import ecdsa
except ImportError:
sys.exit("Error: python-ecdsa does not seem to be installed. Try 'sudo pip install ecdsa'")
class Test_bitcoin(unittest.TestCase):
def test_crypto(self):
for message in ["Chancellor on brink of second bailout for banks", chr(255)*512]:
self._do_test_crypto(message)
def _do_test_crypto(self, message):
G = generator_secp256k1
_r = G.order()
pvk = ecdsa.util.randrange( pow(2,256) ) %_r
Pub = pvk*G
pubkey_c = point_to_ser(Pub,True)
#pubkey_u = point_to_ser(Pub,False)
addr_c = public_key_to_bc_address(pubkey_c)
#addr_u = public_key_to_bc_address(pubkey_u)
#print "Private key ", '%064x'%pvk
eck = EC_KEY(number_to_string(pvk,_r))
#print "Compressed public key ", pubkey_c.encode('hex')
enc = EC_KEY.encrypt_message(message, pubkey_c)
dec = eck.decrypt_message(enc)
assert dec == message
#print "Uncompressed public key", pubkey_u.encode('hex')
#enc2 = EC_KEY.encrypt_message(message, pubkey_u)
dec2 = eck.decrypt_message(enc)
assert dec2 == message
signature = eck.sign_message(message, True, addr_c)
#print signature
EC_KEY.verify_message(addr_c, signature, message)
def test_bip32(self):
# see https://en.bitcoin.it/wiki/BIP_0032_TestVectors
xpub, xprv = self._do_test_bip32("000102030405060708090a0b0c0d0e0f", "m/0'/1/2'/2/1000000000", testnet=False)
assert xpub == "xpub6H1LXWLaKsWFhvm6RVpEL9P4KfRZSW7abD2ttkWP3SSQvnyA8FSVqNTEcYFgJS2UaFcxupHiYkro49S8yGasTvXEYBVPamhGW6cFJodrTHy"
assert xprv == "xprvA41z7zogVVwxVSgdKUHDy1SKmdb533PjDz7J6N6mV6uS3ze1ai8FHa8kmHScGpWmj4WggLyQjgPie1rFSruoUihUZREPSL39UNdE3BBDu76"
xpub, xprv = self._do_test_bip32("fffcf9f6f3f0edeae7e4e1dedbd8d5d2cfccc9c6c3c0bdbab7b4b1aeaba8a5a29f9c999693908d8a8784817e7b7875726f6c696663605d5a5754514e4b484542","m/0/2147483647'/1/2147483646'/2", testnet=False)
assert xpub == "xpub6FnCn6nSzZAw5Tw7cgR9bi15UV96gLZhjDstkXXxvCLsUXBGXPdSnLFbdpq8p9HmGsApME5hQTZ3emM2rnY5agb9rXpVGyy3bdW6EEgAtqt"
assert xprv == "xprvA2nrNbFZABcdryreWet9Ea4LvTJcGsqrMzxHx98MMrotbir7yrKCEXw7nadnHM8Dq38EGfSh6dqA9QWTyefMLEcBYJUuekgW4BYPJcr9E7j"
def test_bip32_testnet(self):
xpub, xprv = self._do_test_bip32("000102030405060708090a0b0c0d0e0f", "m/0'/1/2'/2/1000000000", testnet=True)
assert xpub == "tpubDHNy3kAG39ThyiwwsgoKY4iRenXDRtce8qdCFJZXPMCJg5dsCUHayp84raLTpvyiNA9sXPob5rgqkKvkN8S7MMyXbnEhGJMW64Cf4vFAoaF"
assert xprv == "tprv8kgvuL81tmn36Fv9z38j8f4K5m1HGZRjZY2QxnXDy5PuqbP6a5TzoKWCgTcGHBu66W3TgSbAu2yX6sPza5FkHmy564Sh6gmCPUNeUt4yj2x"
xpub, xprv = self._do_test_bip32("fffcf9f6f3f0edeae7e4e1dedbd8d5d2cfccc9c6c3c0bdbab7b4b1aeaba8a5a29f9c999693908d8a8784817e7b7875726f6c696663605d5a5754514e4b484542","m/0/2147483647'/1/2147483646'/2", testnet=True)
assert xpub == "tpubDG9qJLc8hq8PMG7y4sQEodLSocEkfj4mGrUC75b7G76mDoqybcUXvmvRsruvLeF14mhixobZwZP6LwqeFePKU83Sv8ZnxWdHBb6VzE6zbvC"
assert xprv == "tprv8jTo9vZtZTSiTo6BBDjeQDgLEaipWPsrhYsQpZYoqqJNPKbCyDewkHJZhkoSHiWYCUf1Gm4TFzQxcG4D6s1J9Hsn4whDK7QYyHHokJeUuac"
def _do_test_bip32(self, seed, sequence, testnet):
xprv, xpub = bip32_root(seed.decode('hex'), testnet)
assert sequence[0:2] == "m/"
path = 'm'
sequence = sequence[2:]
for n in sequence.split('/'):
child_path = path + '/' + n
if n[-1] != "'":
xpub2 = bip32_public_derivation(xpub, path, child_path, testnet)
xprv, xpub = bip32_private_derivation(xprv, path, child_path, testnet)
if n[-1] != "'":
assert xpub == xpub2
path = child_path
return xpub, xprv
def test_aes_homomorphic(self):
"""Make sure AES is homomorphic."""
payload = u'\u66f4\u7a33\u5b9a\u7684\u4ea4\u6613\u5e73\u53f0'
password = u'secret'
enc = pw_encode(payload, password)
dec = pw_decode(enc, password)
self.assertEqual(dec, payload)
def test_aes_encode_without_password(self):
"""When not passed a password, pw_encode is noop on the payload."""
payload = u'\u66f4\u7a33\u5b9a\u7684\u4ea4\u6613\u5e73\u53f0'
enc = pw_encode(payload, None)
self.assertEqual(payload, enc)
def test_aes_deencode_without_password(self):
"""When not passed a password, pw_decode is noop on the payload."""
payload = u'\u66f4\u7a33\u5b9a\u7684\u4ea4\u6613\u5e73\u53f0'
enc = pw_decode(payload, None)
self.assertEqual(payload, enc)
def test_aes_decode_with_invalid_password(self):
"""pw_decode raises an Exception when supplied an invalid password."""
payload = u"blah"
password = u"uber secret"
wrong_password = u"not the password"
enc = pw_encode(payload, password)
self.assertRaises(Exception, pw_decode, enc, wrong_password)
def test_hash(self):
"""Make sure the Hash function does sha256 twice"""
payload = u"test"
expected = '\x95MZI\xfdp\xd9\xb8\xbc\xdb5\xd2R&x)\x95\x7f~\xf7\xfalt\xf8\x84\x19\xbd\xc5\xe8"\t\xf4'
result = Hash(payload)
self.assertEqual(expected, result)
def test_xpub_from_xprv(self):
"""We can derive the xpub key from a xprv."""
# Taken from test vectors in https://en.bitcoin.it/wiki/BIP_0032_TestVectors
xpub = "xpub6H1LXWLaKsWFhvm6RVpEL9P4KfRZSW7abD2ttkWP3SSQvnyA8FSVqNTEcYFgJS2UaFcxupHiYkro49S8yGasTvXEYBVPamhGW6cFJodrTHy"
xprv = "xprvA41z7zogVVwxVSgdKUHDy1SKmdb533PjDz7J6N6mV6uS3ze1ai8FHa8kmHScGpWmj4WggLyQjgPie1rFSruoUihUZREPSL39UNdE3BBDu76"
result = xpub_from_xprv(xprv)
self.assertEqual(result, xpub)
def test_xpub_from_xprv_testnet(self):
"""We can derive the xpub key from a xprv using testnet headers."""
xpub = "tpubDHNy3kAG39ThyiwwsgoKY4iRenXDRtce8qdCFJZXPMCJg5dsCUHayp84raLTpvyiNA9sXPob5rgqkKvkN8S7MMyXbnEhGJMW64Cf4vFAoaF"
xprv = "tprv8kgvuL81tmn36Fv9z38j8f4K5m1HGZRjZY2QxnXDy5PuqbP6a5TzoKWCgTcGHBu66W3TgSbAu2yX6sPza5FkHmy564Sh6gmCPUNeUt4yj2x"
result = xpub_from_xprv(xprv, testnet=True)
self.assertEqual(result, xpub)
class Test_keyImport(unittest.TestCase):
""" The keys used in this class are TEST keys from
https://en.bitcoin.it/wiki/BIP_0032_TestVectors"""
private_key = "TAD8rebzCEyYBZWCqjsKxeH9YjenLqX55MNgqGyeQkHdN5T7ejYH"
public_key_hex = "0220d43256bdb32c7517bb0e3f086f54ec351d2299a5808b6a36c7ba434094c8ef"
main_address = "LYUdH72gHL4gcW8pPwaJm4uCFbkCXABAZW"
def test_public_key_from_private_key(self):
result = public_key_from_private_key(self.private_key)
self.assertEqual(self.public_key_hex, result)
def test_address_from_private_key(self):
result = address_from_private_key(self.private_key)
self.assertEqual(self.main_address, result)
def test_is_valid_address(self):
self.assertTrue(is_valid(self.main_address))
self.assertFalse(is_valid("not an address"))
def test_is_private_key(self):
self.assertTrue(is_private_key(self.private_key))
self.assertFalse(is_private_key(self.public_key_hex))
| gpl-3.0 |
harterj/moose | python/MooseDocs/extensions/ifelse.py | 5 | 8666 | #* This file is part of the MOOSE framework
#* https://www.mooseframework.org
#*
#* All rights reserved, see COPYRIGHT for full restrictions
#* https://github.com/idaholab/moose/blob/master/COPYRIGHT
#*
#* Licensed under LGPL 2.1, please see LICENSE for details
#* https://www.gnu.org/licenses/lgpl-2.1.html
import os
import sys
import re
import importlib
import collections
import logging
import moosetree
import mooseutils
import MooseDocs
from ..base import Extension, components
from ..base.readers import MarkdownReader
from ..common import exceptions
from ..tree import tokens
from . import command, appsyntax
LOG = logging.getLogger(__name__)
# A token for keeping track of if/elif/else statements. If the token has children than the statement
# is True and the content within the token should be tokenized and displayed
Statement = tokens.newToken('Statement')
Condition = tokens.newToken('Condition', command=None, content=None, function=None)
def make_extension(**kwargs):
return IfElseExtension(**kwargs)
def hasMooseApp(ext, app):
"""Module function for searching for the existence of a registered application name."""
return ext.hasRegistredApp(app)
def hasSubmodule(ext, name):
"""Module function for testing if an application has a submodule ending with the given name."""
return ext.hasSubmodule(name)
class IfElseExtension(command.CommandExtension):
"""
Allows the if/elif/else statements to control content.
"""
@staticmethod
def defaultConfig():
config = command.CommandExtension.defaultConfig()
config['modules'] = (list(), "A list of python modules to search for functions; by default the 'ifelse.py' extension is included. All functions called must accept the extension as the first argument.")
return config
def __init__(self, *args, **kwargs):
command.CommandExtension.__init__(self, *args, **kwargs)
# List of registered apps, see preExecute
self._registerd_apps = set()
self._current_app = None
# Build list of modules for function searching and include this file by default
self._modules = list()
self._modules.append(sys.modules[__name__])
for name in self.get('modules'):
try:
self._modules.append(importlib.import_module(name))
except ImportError as e:
msg = "Failed to import the supplied '{}' module.\n{}"
raise exceptions.MooseDocsException(msg, name, e)
def preExecute(self):
"""Populate a list of registered applications."""
syntax = None
for ext in self.translator.extensions:
if isinstance(ext, appsyntax.AppSyntaxExtension):
syntax = ext.syntax
break
if syntax is not None:
for node in moosetree.iterate(syntax):
self._registerd_apps.update(node.groups())
def hasRegistredApp(self, name):
"""Helper for the 'hasMooseApp' function."""
if not self._registerd_apps:
msg = "The 'hasMooseApp' function requires the 'appsyntax' extension to have complete syntax, the 'ifelse' extension is being disabled."
self.setActive(False)
LOG.warning(msg)
return name in self._registerd_apps
def hasSubmodule(self, name):
"""Helper for the 'hasSubmodule' function."""
status = mooseutils.git_submodule_info(MooseDocs.ROOT_DIR, '--recursive')
return any([repo.endswith(name) for repo in status.keys()])
def extend(self, reader, renderer):
self.requires(command)
self.addCommand(reader, IfCommand())
self.addCommand(reader, ElifCommand())
self.addCommand(reader, ElseCommand())
def getFunction(self, func_name):
"""Find a function in the list of loaded modules."""
for mod in self._modules:
func = getattr(mod, func_name, None)
if func is not None:
return func
msg = "Unable to locate function '{}' in the listed modules, the loaded modules include:\n{}"
raise exceptions.MooseDocsException(msg, func_name, ' \n'.join([m.__name__ for m in self._modules]))
class IfCommandBase(command.CommandComponent):
"""
Base for if/elif commands that require the evaluation of a function
"""
SUBCOMMAND = None
FUNCTION_RE = re.compile(r'(?P<not>!*)(?P<function>\w+)(?P<args>\(.*?\))$', flags=re.MULTILINE|re.UNICODE)
@staticmethod
def defaultSettings():
settings = command.CommandComponent.defaultSettings()
settings['function'] = (None, "The function---with arguments---to evaluate. This setting is +required+.")
return settings
def createTokenHelper(self, parent, info, page):
group = MarkdownReader.INLINE if MarkdownReader.INLINE in info else MarkdownReader.BLOCK
command = info['command']
function = self.settings['function']
# Must supply 'function'
if function is None:
msg = "The 'function' setting is required."
raise exceptions.MooseDocsException(msg)
# 'if' creates a statement that contains Condition tokens
if command == 'if':
parent = Statement(parent)
elif parent.children:
parent = parent.children[-1]
if parent.name != 'Statement':
msg = "The 'Condition' being created is out of place, it must be in sequence with an " \
"an 'if' and 'elif' condition(s)."
raise exceptions.MooseDocsException(msg)
condition = Condition(parent, command=command, content=info[group], function=function)
return condition, group
def evaluateFunction(self):
"""Helper for evaluating the 'function' setting."""
# Separate function name from arguments
function = self.settings['function']
match = IfCommand.FUNCTION_RE.search(function)
if match is None:
msg = "Invalid expression for 'function' setting: {}"
raise exceptions.MooseDocsException(msg, function)
# Locate and evaluate the function
func = self.extension.getFunction(match.group('function'))
args = eval(match.group('args')[:-1] + ',)') # add trailing ',' to always create tuple
value = func(self.extension, *args)
# Require that an actual 'bool' is returned to avoid un-intended operation, for example
# if a function is returned it would evaluate to True.
if not isinstance(value, bool):
msg = "The return value from the function '{}' must be a 'bool' type, but '{}' returned."
raise exceptions.MooseDocsException(msg, match.group('function'), type(value))
return not value if match.group('not') == '!' else value
class IfCommand(IfCommandBase):
COMMAND = 'if'
def createToken(self, parent, info, page):
condition, group = IfCommandBase.createTokenHelper(self, parent, info, page)
# If the condition is not met, then remove content by setting it to None
if not self.evaluateFunction():
info._LexerInformation__match[group] = None
return condition
class ElifCommand(IfCommandBase):
COMMAND = 'elif'
def createToken(self, parent, info, page):
condition, group = IfCommandBase.createTokenHelper(self, parent, info, page)
# Condition has already been satisfied if any sibling has content
satisfied = any(bool(c.children) for c in condition.siblings)
# If a previous condition is met or this condition is not met, remove content
if satisfied or not self.evaluateFunction():
info._LexerInformation__match[group] = None
return condition
class ElseCommand(command.CommandComponent):
COMMAND = 'else'
SUBCOMMAND = None
def createToken(self, parent, info, page):
group = MarkdownReader.INLINE if MarkdownReader.INLINE in info else MarkdownReader.BLOCK
statement = parent.children[-1] if len(parent) > 0 else None
prev = statement.children[-1] if (statement is not None and len(statement) > 0) else None
if prev is None:
msg = "The 'else' command must follow an 'if' or 'elif' condition."
raise exceptions.MooseDocsException(msg)
# Condition has already been satisfied if any sibling has content
condition = Condition(parent.children[-1], command=info['command'], content=info[group])
satisfied = any(bool(c.children) for c in condition.siblings)
if satisfied:
info._LexerInformation__match[group] = None
return condition
| lgpl-2.1 |
CoherentLabs/depot_tools | third_party/logilab/astroid/__pkginfo__.py | 1 | 1761 | # copyright 2003-2013 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
# contact http://www.logilab.fr/ -- mailto:contact@logilab.fr
#
# This file is part of astroid.
#
# astroid is free software: you can redistribute it and/or modify it
# under the terms of the GNU Lesser General Public License as published by the
# Free Software Foundation, either version 2.1 of the License, or (at your
# option) any later version.
#
# astroid is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License
# for more details.
#
# You should have received a copy of the GNU Lesser General Public License along
# with astroid. If not, see <http://www.gnu.org/licenses/>.
"""astroid packaging information"""
distname = 'astroid'
modname = 'astroid'
numversion = (1, 2, 1)
version = '.'.join([str(num) for num in numversion])
install_requires = ['logilab-common >= 0.60.0']
license = 'LGPL'
author = 'Logilab'
author_email = 'python-projects@lists.logilab.org'
mailinglist = "mailto://%s" % author_email
web = 'http://bitbucket.org/logilab/astroid'
description = "rebuild a new abstract syntax tree from Python's ast"
from os.path import join
include_dirs = ['brain',
join('test', 'regrtest_data'),
join('test', 'data'), join('test', 'data2')
]
classifiers = ["Topic :: Software Development :: Libraries :: Python Modules",
"Topic :: Software Development :: Quality Assurance",
"Programming Language :: Python",
"Programming Language :: Python :: 2",
"Programming Language :: Python :: 3",
]
| bsd-3-clause |
bearstech/ansible | lib/ansible/modules/network/avi/avi_cloud.py | 46 | 9205 | #!/usr/bin/python
#
# Created on Aug 25, 2016
# @author: Gaurav Rastogi (grastogi@avinetworks.com)
# Eric Anderson (eanderson@avinetworks.com)
# module_check: supported
# Avi Version: 17.1.1
#
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: avi_cloud
author: Gaurav Rastogi (grastogi@avinetworks.com)
short_description: Module for setup of Cloud Avi RESTful Object
description:
- This module is used to configure Cloud object
- more examples at U(https://github.com/avinetworks/devops)
requirements: [ avisdk ]
version_added: "2.4"
options:
state:
description:
- The state that should be applied on the entity.
default: present
choices: ["absent","present"]
apic_configuration:
description:
- Apicconfiguration settings for cloud.
apic_mode:
description:
- Boolean flag to set apic_mode.
- Default value when not specified in API or module is interpreted by Avi Controller as False.
aws_configuration:
description:
- Awsconfiguration settings for cloud.
cloudstack_configuration:
description:
- Cloudstackconfiguration settings for cloud.
dhcp_enabled:
description:
- Select the ip address management scheme.
- Default value when not specified in API or module is interpreted by Avi Controller as False.
dns_provider_ref:
description:
- Dns profile for the cloud.
- It is a reference to an object of type ipamdnsproviderprofile.
docker_configuration:
description:
- Dockerconfiguration settings for cloud.
east_west_dns_provider_ref:
description:
- Dns profile for east-west services.
- It is a reference to an object of type ipamdnsproviderprofile.
east_west_ipam_provider_ref:
description:
- Ipam profile for east-west services.
- Warning - please use virtual subnets in this ipam profile that do not conflict with the underlay networks or any overlay networks in the cluster.
- For example in aws and gcp, 169.254.0.0/16 is used for storing instance metadata.
- Hence, it should not be used in this profile.
- It is a reference to an object of type ipamdnsproviderprofile.
enable_vip_static_routes:
description:
- Use static routes for vip side network resolution during virtualservice placement.
- Default value when not specified in API or module is interpreted by Avi Controller as False.
ipam_provider_ref:
description:
- Ipam profile for the cloud.
- It is a reference to an object of type ipamdnsproviderprofile.
license_type:
description:
- If no license type is specified then default license enforcement for the cloud type is chosen.
- The default mappings are container cloud is max ses, openstack and vmware is cores and linux it is sockets.
- Enum options - LIC_BACKEND_SERVERS, LIC_SOCKETS, LIC_CORES, LIC_HOSTS.
linuxserver_configuration:
description:
- Linuxserverconfiguration settings for cloud.
mesos_configuration:
description:
- Mesosconfiguration settings for cloud.
mtu:
description:
- Mtu setting for the cloud.
- Default value when not specified in API or module is interpreted by Avi Controller as 1500.
name:
description:
- Name of the object.
required: true
nsx_configuration:
description:
- Configuration parameters for nsx manager.
- Field introduced in 17.1.1.
obj_name_prefix:
description:
- Default prefix for all automatically created objects in this cloud.
- This prefix can be overridden by the se-group template.
openstack_configuration:
description:
- Openstackconfiguration settings for cloud.
oshiftk8s_configuration:
description:
- Oshiftk8sconfiguration settings for cloud.
prefer_static_routes:
description:
- Prefer static routes over interface routes during virtualservice placement.
- Default value when not specified in API or module is interpreted by Avi Controller as False.
proxy_configuration:
description:
- Proxyconfiguration settings for cloud.
rancher_configuration:
description:
- Rancherconfiguration settings for cloud.
tenant_ref:
description:
- It is a reference to an object of type tenant.
url:
description:
- Avi controller URL of the object.
uuid:
description:
- Unique object identifier of the object.
vca_configuration:
description:
- Vcloudairconfiguration settings for cloud.
vcenter_configuration:
description:
- Vcenterconfiguration settings for cloud.
vtype:
description:
- Cloud type.
- Enum options - CLOUD_NONE, CLOUD_VCENTER, CLOUD_OPENSTACK, CLOUD_AWS, CLOUD_VCA, CLOUD_APIC, CLOUD_MESOS, CLOUD_LINUXSERVER, CLOUD_DOCKER_UCP,
- CLOUD_RANCHER, CLOUD_OSHIFT_K8S.
- Default value when not specified in API or module is interpreted by Avi Controller as CLOUD_NONE.
required: true
extends_documentation_fragment:
- avi
'''
EXAMPLES = '''
- name: Create a VMware cloud with write access mode
avi_cloud:
username: ''
controller: ''
password: ''
apic_mode: false
dhcp_enabled: true
enable_vip_static_routes: false
license_type: LIC_CORES
mtu: 1500
name: VCenter Cloud
prefer_static_routes: false
tenant_ref: admin
vcenter_configuration:
datacenter_ref: /api/vimgrdcruntime/datacenter-2-10.10.20.100
management_network: /api/vimgrnwruntime/dvportgroup-103-10.10.20.100
password: password
privilege: WRITE_ACCESS
username: user
vcenter_url: 10.10.20.100
vtype: CLOUD_VCENTER
'''
RETURN = '''
obj:
description: Cloud (api/cloud) object
returned: success, changed
type: dict
'''
from ansible.module_utils.basic import AnsibleModule
try:
from ansible.module_utils.avi import (
avi_common_argument_spec, HAS_AVI, avi_ansible_api)
except ImportError:
HAS_AVI = False
def main():
argument_specs = dict(
state=dict(default='present',
choices=['absent', 'present']),
apic_configuration=dict(type='dict',),
apic_mode=dict(type='bool',),
aws_configuration=dict(type='dict',),
cloudstack_configuration=dict(type='dict',),
dhcp_enabled=dict(type='bool',),
dns_provider_ref=dict(type='str',),
docker_configuration=dict(type='dict',),
east_west_dns_provider_ref=dict(type='str',),
east_west_ipam_provider_ref=dict(type='str',),
enable_vip_static_routes=dict(type='bool',),
ipam_provider_ref=dict(type='str',),
license_type=dict(type='str',),
linuxserver_configuration=dict(type='dict',),
mesos_configuration=dict(type='dict',),
mtu=dict(type='int',),
name=dict(type='str', required=True),
nsx_configuration=dict(type='dict',),
obj_name_prefix=dict(type='str',),
openstack_configuration=dict(type='dict',),
oshiftk8s_configuration=dict(type='dict',),
prefer_static_routes=dict(type='bool',),
proxy_configuration=dict(type='dict',),
rancher_configuration=dict(type='dict',),
tenant_ref=dict(type='str',),
url=dict(type='str',),
uuid=dict(type='str',),
vca_configuration=dict(type='dict',),
vcenter_configuration=dict(type='dict',),
vtype=dict(type='str', required=True),
)
argument_specs.update(avi_common_argument_spec())
module = AnsibleModule(
argument_spec=argument_specs, supports_check_mode=True)
if not HAS_AVI:
return module.fail_json(msg=(
'Avi python API SDK (avisdk>=17.1) is not installed. '
'For more details visit https://github.com/avinetworks/sdk.'))
return avi_ansible_api(module, 'cloud',
set([]))
if __name__ == '__main__':
main()
| gpl-3.0 |
diagramsoftware/bank-payment | account_banking_mandate/models/payment_line.py | 4 | 2623 | # -*- coding: utf-8 -*-
# © 2014 Compassion CH - Cyril Sester <csester@compassion.ch>
# © 2014 Serv. Tecnol. Avanzados - Pedro M. Baeza
# © 2015 Akretion - Alexis de Lattre <alexis.delattre@akretion.com>
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl.html).
from openerp import models, fields, api, exceptions, _
class PaymentLine(models.Model):
_inherit = 'payment.line'
mandate_id = fields.Many2one(
comodel_name='account.banking.mandate', string='Direct Debit Mandate',
domain=[('state', '=', 'valid')])
@api.model
def create(self, vals=None):
"""If the customer invoice has a mandate, take it
otherwise, take the first valid mandate of the bank account
"""
if vals is None:
vals = {}
partner_bank_id = vals.get('bank_id')
move_line_id = vals.get('move_line_id')
if (self.env.context.get('search_payment_order_type') == 'debit' and
'mandate_id' not in vals):
if move_line_id:
line = self.env['account.move.line'].browse(move_line_id)
if (line.invoice and line.invoice.type == 'out_invoice' and
line.invoice.mandate_id):
vals.update({
'mandate_id': line.invoice.mandate_id.id,
'bank_id': line.invoice.mandate_id.partner_bank_id.id,
})
if partner_bank_id and 'mandate_id' not in vals:
mandates = self.env['account.banking.mandate'].search(
[('partner_bank_id', '=', partner_bank_id),
('state', '=', 'valid')])
if mandates:
vals['mandate_id'] = mandates[0].id
return super(PaymentLine, self).create(vals)
@api.one
@api.constrains('mandate_id', 'bank_id')
def _check_mandate_bank_link(self):
if (self.mandate_id and self.bank_id and
self.mandate_id.partner_bank_id.id !=
self.bank_id.id):
raise exceptions.Warning(
_("The payment line with reference '%s' has the bank account "
"'%s' which is not attached to the mandate '%s' (this "
"mandate is attached to the bank account '%s').") %
(self.name,
self.env['res.partner.bank'].name_get(
[self.bank_id.id])[0][1],
self.mandate_id.unique_mandate_reference,
self.env['res.partner.bank'].name_get(
[self.mandate_id.partner_bank_id.id])[0][1]))
| agpl-3.0 |
Khan/rbtools | rbtools/clients/__init__.py | 1 | 6084 | import logging
import sys
from rbtools.utils.process import die
# The clients are lazy loaded via load_scmclients()
SCMCLIENTS = None
class SCMClient(object):
"""
A base representation of an SCM tool for fetching repository information
and generating diffs.
"""
def __init__(self, user_config=None, configs=[], options=None):
self.user_config = user_config
self.configs = configs
self.options = options
def get_repository_info(self):
return None
def check_options(self):
pass
def scan_for_server(self, repository_info):
"""
Scans the current directory on up to find a .reviewboard file
containing the server path.
"""
server_url = None
if self.user_config:
server_url = self._get_server_from_config(self.user_config,
repository_info)
if not server_url:
for config in self.configs:
server_url = self._get_server_from_config(config,
repository_info)
if server_url:
break
return server_url
def diff(self, args):
"""
Returns the generated diff and optional parent diff for this
repository.
The returned tuple is (diff_string, parent_diff_string)
"""
return (None, None)
def diff_between_revisions(self, revision_range, args, repository_info):
"""
Returns the generated diff between revisions in the repository.
"""
return (None, None)
def _get_server_from_config(self, config, repository_info):
if 'REVIEWBOARD_URL' in config:
return config['REVIEWBOARD_URL']
elif 'TREES' in config:
trees = config['TREES']
if not isinstance(trees, dict):
die("Warning: 'TREES' in config file is not a dict!")
# If repository_info is a list, check if any one entry is in trees.
path = None
if isinstance(repository_info.path, list):
for path in repository_info.path:
if path in trees:
break
else:
path = None
elif repository_info.path in trees:
path = repository_info.path
if path and 'REVIEWBOARD_URL' in trees[path]:
return trees[path]['REVIEWBOARD_URL']
return None
class RepositoryInfo(object):
"""
A representation of a source code repository.
"""
def __init__(self, path=None, base_path=None, supports_changesets=False,
supports_parent_diffs=False, supports_updating_commits=False):
self.path = path
self.base_path = base_path
self.supports_changesets = supports_changesets
self.supports_parent_diffs = supports_parent_diffs
self.supports_updating_commits = supports_updating_commits
logging.debug("repository info: %s" % self)
def __str__(self):
return "Path: %s, Base path: %s, Supports changesets: %s" % \
(self.path, self.base_path, self.supports_changesets)
def set_base_path(self, base_path):
if not base_path.startswith('/'):
base_path = '/' + base_path
logging.debug("changing repository info base_path from %s to %s" % \
(self.base_path, base_path))
self.base_path = base_path
def find_server_repository_info(self, server):
"""
Try to find the repository from the list of repositories on the server.
For Subversion, this could be a repository with a different URL. For
all other clients, this is a noop.
"""
return self
def load_scmclients(options):
global SCMCLIENTS
from rbtools.clients.clearcase import ClearCaseClient
from rbtools.clients.cvs import CVSClient
from rbtools.clients.git import GitClient
from rbtools.clients.mercurial import MercurialClient
from rbtools.clients.perforce import PerforceClient
from rbtools.clients.plastic import PlasticClient
from rbtools.clients.svn import SVNClient
SCMCLIENTS = [
CVSClient(options=options),
ClearCaseClient(options=options),
GitClient(options=options),
MercurialClient(options=options),
PerforceClient(options=options),
PlasticClient(options=options),
SVNClient(options=options),
]
def scan_usable_client(options):
from rbtools.clients.perforce import PerforceClient
repository_info = None
tool = None
if SCMCLIENTS is None:
load_scmclients(options)
# Try to find the SCM Client we're going to be working with.
for tool in SCMCLIENTS:
repository_info = tool.get_repository_info()
if repository_info:
break
if not repository_info:
if options.repository_url:
print "No supported repository could be accessed at the supplied "\
"url."
else:
print "The current directory does not contain a checkout from a"
print "supported source code repository."
sys.exit(1)
# Verify that options specific to an SCM Client have not been mis-used.
if options.change_only and not repository_info.supports_changesets:
sys.stderr.write("The --change-only option is not valid for the "
"current SCM client.\n")
sys.exit(1)
if options.parent_branch and not repository_info.supports_parent_diffs:
sys.stderr.write("The --parent option is not valid for the "
"current SCM client.\n")
sys.exit(1)
if ((options.p4_client or options.p4_port) and
not isinstance(tool, PerforceClient)):
sys.stderr.write("The --p4-client and --p4-port options are not valid "
"for the current SCM client.\n")
sys.exit(1)
return (repository_info, tool)
| mit |
weinbe58/QuSpin | docs/downloads/ce24628b1934b1aacd9f1ac29dfbc30e/example14.py | 3 | 3789 | from __future__ import print_function, division
#
import sys,os
os.environ['KMP_DUPLICATE_LIB_OK']='True' # uncomment this line if omp error occurs on OSX for python 3
os.environ['OMP_NUM_THREADS']='1' # set number of OpenMP threads to run in parallel
os.environ['MKL_NUM_THREADS']='1' # set number of MKL threads to run in parallel
#
quspin_path = os.path.join(os.getcwd(),"../../")
sys.path.insert(0,quspin_path)
###########################################################################
# example 14 #
# In this script we demonstrate how to use the user_basis to construct #
# a spin-1/2 Hamiltonian on a restricted Hilbert space where a spin-up #
# on a given lattice site must be preceded and succeeded by a spin-down. #
###########################################################################
from quspin.operators import hamiltonian
from quspin.basis import spin_basis_1d # Hilbert space spin basis_1d
from quspin.basis.user import user_basis # Hilbert space user basis
from quspin.basis.user import pre_check_state_sig_32,op_sig_32,map_sig_32 # user basis data types
from numba import carray,cfunc # numba helper functions
from numba import uint32,int32 # numba data types
import numpy as np
#
N = 10 # total number of lattice sites
#
###### function to call when applying operators
@cfunc(op_sig_32, locals=dict(s=int32,b=uint32))
def op(op_struct_ptr,op_str,ind,N,args):
# using struct pointer to pass op_struct_ptr back to C++ see numba Records
op_struct = carray(op_struct_ptr,1)[0]
err = 0
ind = N - ind - 1 # convention for QuSpin for mapping from bits to sites.
s = (((op_struct.state>>ind)&1)<<1)-1
b = (1<<ind)
#
if op_str==120: # "x" is integer value 120 (check with ord("x"))
op_struct.state ^= b
elif op_str==121: # "y" is integer value 120 (check with ord("y"))
op_struct.state ^= b
op_struct.matrix_ele *= 1.0j*s
elif op_str==122: # "z" is integer value 120 (check with ord("z"))
op_struct.matrix_ele *= s
else:
op_struct.matrix_ele = 0
err = -1
#
return err
#
op_args=np.array([],dtype=np.uint32)
#
###### function to filter states/project states out of the basis
#
@cfunc(pre_check_state_sig_32,
locals=dict(s_shift_left=uint32,s_shift_right=uint32), )
def pre_check_state(s,N,args):
""" imposes that that a bit with 1 must be preceded and followed by 0,
i.e. a particle on a given site must have empty neighboring sites.
#
Works only for lattices of up to N=32 sites (otherwise, change mask)
#
"""
mask = (0xffffffff >> (32 - N)) # works for lattices of up to 32 sites
# cycle bits left by 1 periodically
s_shift_left = (((s << 1) & mask) | ((s >> (N - 1)) & mask))
#
# cycle bits right by 1 periodically
s_shift_right = (((s >> 1) & mask) | ((s << (N - 1)) & mask))
#
return (((s_shift_right|s_shift_left)&s))==0
#
pre_check_state_args=None
#
###### construct user_basis
# define maps dict
maps = dict() # no symmetries to apply.
# define op_dict
op_dict = dict(op=op,op_args=op_args)
# define pre_check_state
pre_check_state=(pre_check_state,pre_check_state_args) # None gives a null pinter to args
# create user basis
basis = user_basis(np.uint32,N,op_dict,allowed_ops=set("xyz"),sps=2,
pre_check_state=pre_check_state,Ns_block_est=300000,**maps)
# print basis
print(basis)
#
###### construct Hamiltonian
# site-coupling lists
h_list = [[1.0,i] for i in range(N)]
# operator string lists
static = [["x",h_list],]
# compute Hamiltonian, no checks have been implemented
no_checks=dict(check_symm=False, check_pcon=False, check_herm=False)
H = hamiltonian(static,[],basis=basis,dtype=np.float64,**no_checks) | bsd-3-clause |
gnmathur/aMAZEd | binary_tree.py | 1 | 2219 | """
Maze generation using the binary tree algorithm
MIT License
Copyright (c) 2017 Gaurav Mathur
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
from cell import Cell
from grid import Grid
import random
class BinaryTreeMaze:
"""
Algorithm to implements a 'perfect' maze. It has two major biases:
a. There is always a 'diagonal' path from the South-west starting point
to the North-east end
b. The north-most row and the East-mode columns are always an unbroken
corridor
"""
def __init__(self):
pass
@staticmethod
def create(grid):
for cell in grid.each_cell():
neighbors = []
if cell.cellNorth is not None:
neighbors.append(cell.cellNorth)
if cell.cellEast is not None:
neighbors.append(cell.cellEast)
if len(neighbors):
index = random.randint(0, len(neighbors)-1)
neighbor = neighbors[index]
cell.link(neighbor)
if __name__ == "__main__":
nRows = input("Enter number of rows: ")
nColumns = input("Enter number of columns: ")
g = Grid(nRows, nColumns)
BinaryTreeMaze.create(g)
print g
| mit |
aqavi-paracha/coinsbazar | contrib/bitrpc/bitrpc.py | 1 | 7841 | from jsonrpc import ServiceProxy
import sys
import string
# ===== BEGIN USER SETTINGS =====
# if you do not set these you will be prompted for a password for every command
rpcuser = ""
rpcpass = ""
# ====== END USER SETTINGS ======
if rpcpass == "":
access = ServiceProxy("http://127.0.0.1:8332")
else:
access = ServiceProxy("http://"+rpcuser+":"+rpcpass+"@127.0.0.1:8332")
cmd = sys.argv[1].lower()
if cmd == "backupwallet":
try:
path = raw_input("Enter destination path/filename: ")
print access.backupwallet(path)
except:
print "\n---An error occurred---\n"
elif cmd == "getaccount":
try:
addr = raw_input("Enter a CoinsBazar address: ")
print access.getaccount(addr)
except:
print "\n---An error occurred---\n"
elif cmd == "getaccountaddress":
try:
acct = raw_input("Enter an account name: ")
print access.getaccountaddress(acct)
except:
print "\n---An error occurred---\n"
elif cmd == "getaddressesbyaccount":
try:
acct = raw_input("Enter an account name: ")
print access.getaddressesbyaccount(acct)
except:
print "\n---An error occurred---\n"
elif cmd == "getbalance":
try:
acct = raw_input("Enter an account (optional): ")
mc = raw_input("Minimum confirmations (optional): ")
try:
print access.getbalance(acct, mc)
except:
print access.getbalance()
except:
print "\n---An error occurred---\n"
elif cmd == "getblockbycount":
try:
height = raw_input("Height: ")
print access.getblockbycount(height)
except:
print "\n---An error occurred---\n"
elif cmd == "getblockcount":
try:
print access.getblockcount()
except:
print "\n---An error occurred---\n"
elif cmd == "getblocknumber":
try:
print access.getblocknumber()
except:
print "\n---An error occurred---\n"
elif cmd == "getconnectioncount":
try:
print access.getconnectioncount()
except:
print "\n---An error occurred---\n"
elif cmd == "getdifficulty":
try:
print access.getdifficulty()
except:
print "\n---An error occurred---\n"
elif cmd == "getgenerate":
try:
print access.getgenerate()
except:
print "\n---An error occurred---\n"
elif cmd == "gethashespersec":
try:
print access.gethashespersec()
except:
print "\n---An error occurred---\n"
elif cmd == "getinfo":
try:
print access.getinfo()
except:
print "\n---An error occurred---\n"
elif cmd == "getnewaddress":
try:
acct = raw_input("Enter an account name: ")
try:
print access.getnewaddress(acct)
except:
print access.getnewaddress()
except:
print "\n---An error occurred---\n"
elif cmd == "getreceivedbyaccount":
try:
acct = raw_input("Enter an account (optional): ")
mc = raw_input("Minimum confirmations (optional): ")
try:
print access.getreceivedbyaccount(acct, mc)
except:
print access.getreceivedbyaccount()
except:
print "\n---An error occurred---\n"
elif cmd == "getreceivedbyaddress":
try:
addr = raw_input("Enter a CoinsBazar address (optional): ")
mc = raw_input("Minimum confirmations (optional): ")
try:
print access.getreceivedbyaddress(addr, mc)
except:
print access.getreceivedbyaddress()
except:
print "\n---An error occurred---\n"
elif cmd == "gettransaction":
try:
txid = raw_input("Enter a transaction ID: ")
print access.gettransaction(txid)
except:
print "\n---An error occurred---\n"
elif cmd == "getwork":
try:
data = raw_input("Data (optional): ")
try:
print access.gettransaction(data)
except:
print access.gettransaction()
except:
print "\n---An error occurred---\n"
elif cmd == "help":
try:
cmd = raw_input("Command (optional): ")
try:
print access.help(cmd)
except:
print access.help()
except:
print "\n---An error occurred---\n"
elif cmd == "listaccounts":
try:
mc = raw_input("Minimum confirmations (optional): ")
try:
print access.listaccounts(mc)
except:
print access.listaccounts()
except:
print "\n---An error occurred---\n"
elif cmd == "listreceivedbyaccount":
try:
mc = raw_input("Minimum confirmations (optional): ")
incemp = raw_input("Include empty? (true/false, optional): ")
try:
print access.listreceivedbyaccount(mc, incemp)
except:
print access.listreceivedbyaccount()
except:
print "\n---An error occurred---\n"
elif cmd == "listreceivedbyaddress":
try:
mc = raw_input("Minimum confirmations (optional): ")
incemp = raw_input("Include empty? (true/false, optional): ")
try:
print access.listreceivedbyaddress(mc, incemp)
except:
print access.listreceivedbyaddress()
except:
print "\n---An error occurred---\n"
elif cmd == "listtransactions":
try:
acct = raw_input("Account (optional): ")
count = raw_input("Number of transactions (optional): ")
frm = raw_input("Skip (optional):")
try:
print access.listtransactions(acct, count, frm)
except:
print access.listtransactions()
except:
print "\n---An error occurred---\n"
elif cmd == "move":
try:
frm = raw_input("From: ")
to = raw_input("To: ")
amt = raw_input("Amount:")
mc = raw_input("Minimum confirmations (optional): ")
comment = raw_input("Comment (optional): ")
try:
print access.move(frm, to, amt, mc, comment)
except:
print access.move(frm, to, amt)
except:
print "\n---An error occurred---\n"
elif cmd == "sendfrom":
try:
frm = raw_input("From: ")
to = raw_input("To: ")
amt = raw_input("Amount:")
mc = raw_input("Minimum confirmations (optional): ")
comment = raw_input("Comment (optional): ")
commentto = raw_input("Comment-to (optional): ")
try:
print access.sendfrom(frm, to, amt, mc, comment, commentto)
except:
print access.sendfrom(frm, to, amt)
except:
print "\n---An error occurred---\n"
elif cmd == "sendmany":
try:
frm = raw_input("From: ")
to = raw_input("To (in format address1:amount1,address2:amount2,...): ")
mc = raw_input("Minimum confirmations (optional): ")
comment = raw_input("Comment (optional): ")
try:
print access.sendmany(frm,to,mc,comment)
except:
print access.sendmany(frm,to)
except:
print "\n---An error occurred---\n"
elif cmd == "sendtoaddress":
try:
to = raw_input("To (in format address1:amount1,address2:amount2,...): ")
amt = raw_input("Amount:")
comment = raw_input("Comment (optional): ")
commentto = raw_input("Comment-to (optional): ")
try:
print access.sendtoaddress(to,amt,comment,commentto)
except:
print access.sendtoaddress(to,amt)
except:
print "\n---An error occurred---\n"
elif cmd == "setaccount":
try:
addr = raw_input("Address: ")
acct = raw_input("Account:")
print access.setaccount(addr,acct)
except:
print "\n---An error occurred---\n"
elif cmd == "setgenerate":
try:
gen= raw_input("Generate? (true/false): ")
cpus = raw_input("Max processors/cores (-1 for unlimited, optional):")
try:
print access.setgenerate(gen, cpus)
except:
print access.setgenerate(gen)
except:
print "\n---An error occurred---\n"
elif cmd == "settxfee":
try:
amt = raw_input("Amount:")
print access.settxfee(amt)
except:
print "\n---An error occurred---\n"
elif cmd == "stop":
try:
print access.stop()
except:
print "\n---An error occurred---\n"
elif cmd == "validateaddress":
try:
addr = raw_input("Address: ")
print access.validateaddress(addr)
except:
print "\n---An error occurred---\n"
elif cmd == "walletpassphrase":
try:
pwd = raw_input("Enter wallet passphrase: ")
access.walletpassphrase(pwd, 60)
print "\n---Wallet unlocked---\n"
except:
print "\n---An error occurred---\n"
elif cmd == "walletpassphrasechange":
try:
pwd = raw_input("Enter old wallet passphrase: ")
pwd2 = raw_input("Enter new wallet passphrase: ")
access.walletpassphrasechange(pwd, pwd2)
print
print "\n---Passphrase changed---\n"
except:
print
print "\n---An error occurred---\n"
print
else:
print "Command not found or not supported" | mit |
aps-sids/ansible-modules-extras | clustering/consul_acl.py | 43 | 9694 | #!/usr/bin/python
#
# (c) 2015, Steve Gargan <steve.gargan@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = """
module: consul_acl
short_description: "manipulate consul acl keys and rules"
description:
- allows the addition, modification and deletion of ACL keys and associated
rules in a consul cluster via the agent. For more details on using and
configuring ACLs, see https://www.consul.io/docs/internals/acl.html.
requirements:
- "python >= 2.6"
- python-consul
- pyhcl
- requests
version_added: "2.0"
author: "Steve Gargan (@sgargan)"
options:
mgmt_token:
description:
- a management token is required to manipulate the acl lists
state:
description:
- whether the ACL pair should be present or absent, defaults to present
required: false
choices: ['present', 'absent']
type:
description:
- the type of token that should be created, either management or
client, defaults to client
choices: ['client', 'management']
name:
description:
- the name that should be associated with the acl key, this is opaque
to Consul
required: false
token:
description:
- the token key indentifying an ACL rule set. If generated by consul
this will be a UUID.
required: false
rules:
description:
- an list of the rules that should be associated with a given key/token.
required: false
host:
description:
- host of the consul agent defaults to localhost
required: false
default: localhost
port:
description:
- the port on which the consul agent is running
required: false
default: 8500
"""
EXAMPLES = '''
- name: create an acl token with rules
consul_acl:
mgmt_token: 'some_management_acl'
host: 'consul1.mycluster.io'
name: 'Foo access'
rules:
- key: 'foo'
policy: read
- key: 'private/foo'
policy: deny
- name: remove a token
consul_acl:
mgmt_token: 'some_management_acl'
host: 'consul1.mycluster.io'
token: '172bd5c8-9fe9-11e4-b1b0-3c15c2c9fd5e'
state: absent
'''
import sys
try:
import consul
from requests.exceptions import ConnectionError
python_consul_installed = True
except ImportError, e:
python_consul_installed = False
try:
import hcl
pyhcl_installed = True
except ImportError:
pyhcl_installed = False
from requests.exceptions import ConnectionError
def execute(module):
state = module.params.get('state')
if state == 'present':
update_acl(module)
else:
remove_acl(module)
def update_acl(module):
rules = module.params.get('rules')
state = module.params.get('state')
token = module.params.get('token')
token_type = module.params.get('token_type')
mgmt = module.params.get('mgmt_token')
name = module.params.get('name')
consul = get_consul_api(module, mgmt)
changed = False
try:
if token:
existing_rules = load_rules_for_token(module, consul, token)
supplied_rules = yml_to_rules(module, rules)
print existing_rules
print supplied_rules
changed = not existing_rules == supplied_rules
if changed:
y = supplied_rules.to_hcl()
token = consul.acl.update(
token,
name=name,
type=token_type,
rules=supplied_rules.to_hcl())
else:
try:
rules = yml_to_rules(module, rules)
if rules.are_rules():
rules = rules.to_json()
else:
rules = None
token = consul.acl.create(
name=name, type=token_type, rules=rules)
changed = True
except Exception, e:
module.fail_json(
msg="No token returned, check your managment key and that \
the host is in the acl datacenter %s" % e)
except Exception, e:
module.fail_json(msg="Could not create/update acl %s" % e)
module.exit_json(changed=changed,
token=token,
rules=rules,
name=name,
type=token_type)
def remove_acl(module):
state = module.params.get('state')
token = module.params.get('token')
mgmt = module.params.get('mgmt_token')
consul = get_consul_api(module, token=mgmt)
changed = token and consul.acl.info(token)
if changed:
token = consul.acl.destroy(token)
module.exit_json(changed=changed, token=token)
def load_rules_for_token(module, consul_api, token):
try:
rules = Rules()
info = consul_api.acl.info(token)
if info and info['Rules']:
rule_set = to_ascii(info['Rules'])
for rule in hcl.loads(rule_set).values():
for key, policy in rule.iteritems():
rules.add_rule(Rule(key, policy['policy']))
return rules
except Exception, e:
module.fail_json(
msg="Could not load rule list from retrieved rule data %s, %s" % (
token, e))
return json_to_rules(module, loaded)
def to_ascii(unicode_string):
if isinstance(unicode_string, unicode):
return unicode_string.encode('ascii', 'ignore')
return unicode_string
def yml_to_rules(module, yml_rules):
rules = Rules()
if yml_rules:
for rule in yml_rules:
if not('key' in rule or 'policy' in rule):
module.fail_json(msg="a rule requires a key and a policy.")
rules.add_rule(Rule(rule['key'], rule['policy']))
return rules
template = '''key "%s" {
policy = "%s"
}'''
class Rules:
def __init__(self):
self.rules = {}
def add_rule(self, rule):
self.rules[rule.key] = rule
def are_rules(self):
return len(self.rules) > 0
def to_json(self):
rules = {}
for key, rule in self.rules.iteritems():
rules[key] = {'policy': rule.policy}
return json.dumps({'keys': rules})
def to_hcl(self):
rules = ""
for key, rule in self.rules.iteritems():
rules += template % (key, rule.policy)
return to_ascii(rules)
def __eq__(self, other):
if not (other or isinstance(other, self.__class__)
or len(other.rules) == len(self.rules)):
return False
for name, other_rule in other.rules.iteritems():
if not name in self.rules:
return False
rule = self.rules[name]
if not (rule and rule == other_rule):
return False
return True
def __str__(self):
return self.to_hcl()
class Rule:
def __init__(self, key, policy):
self.key = key
self.policy = policy
def __eq__(self, other):
return (isinstance(other, self.__class__)
and self.key == other.key
and self.policy == other.policy)
def __hash__(self):
return hash(self.key) ^ hash(self.policy)
def __str__(self):
return '%s %s' % (self.key, self.policy)
def get_consul_api(module, token=None):
if not token:
token = token = module.params.get('token')
return consul.Consul(host=module.params.get('host'),
port=module.params.get('port'),
token=token)
def test_dependencies(module):
if not python_consul_installed:
module.fail_json(msg="python-consul required for this module. "\
"see http://python-consul.readthedocs.org/en/latest/#installation")
if not pyhcl_installed:
module.fail_json( msg="pyhcl required for this module."\
" see https://pypi.python.org/pypi/pyhcl")
def main():
argument_spec = dict(
mgmt_token=dict(required=True),
host=dict(default='localhost'),
name=dict(required=False),
port=dict(default=8500, type='int'),
rules=dict(default=None, required=False, type='list'),
state=dict(default='present', choices=['present', 'absent']),
token=dict(required=False),
token_type=dict(
required=False, choices=['client', 'management'], default='client')
)
module = AnsibleModule(argument_spec, supports_check_mode=False)
test_dependencies(module)
try:
execute(module)
except ConnectionError, e:
module.fail_json(msg='Could not connect to consul agent at %s:%s, error was %s' % (
module.params.get('host'), module.params.get('port'), str(e)))
except Exception, e:
module.fail_json(msg=str(e))
# import module snippets
from ansible.module_utils.basic import *
if __name__ == '__main__':
main()
| gpl-3.0 |
JohnUrban/squiggler | squiggler/dna_replication_functions.py | 1 | 15698 | #
import numpy as np
from hmm import posterior_decoding, prob_data, compare_statepath, max_and_index
'''
3 state HMM Notes
In each emission matrix, the emissions for A,C, and G should be the same.
It is the emissions for T, T1, and T2 that change.
In the unlabeled state, it is predominantly T.
In label 1 state, it is predominantly T with an increase of T1 up to ~10% of Ts.
In label 2 state, it is predominantly T with an increase of T2 up to ~10% of Ts.
---
The transitions from/to the 3 states:
In unlabeled state, self-to-self should be quite high and self-to-other should be equivalen to label1 or label2.
--> In big picture, one is equallly as likely to be 5' to label 1 state as to label2 state.
--> active origins ~200kb apart on average in yeast (although all origins are 35kb apart) [[Lengronne et al, NAR, 2001, "Monitoring S phase ..."]]
--> When randomly sampling in between active origins, on average you will be 100kb 5' to next active origin
--> However, the label from the fork from that origin is equally likely to be anywhere within that 100 kb
--> Thus, on average one is ~50kb away from label
--> So starting in the unlabeled state, the probability of seeing a labeled state is 1/50kb
--> i.e. prob of not seeing label and staying in unlabeled state is 49.999kb/50kb
--> self-to-self = 0.99998
--> self to either label = 0.00002
--> self to specific label = 0.00002/2 = 0.00001
In label 1 (or 2), self-to-self should be high (but not as high as unlabel-to-unlabel since these are shorter stretches)
--> prob of staying in label is proportional to mean length of stretch that is labeled (which is proportional to labeling time)
--> Assume 1000 bp labels
--> Then self-to-self = 999/500 = 0.999
--> And self-to-other = 0.001
--> And self-to-specific-other = 0.001/2 = 0.0005
---
The init:
--> over-estimating ~400 replication forks in the genome
---(means ~every other origin is used at 2 forks per origin (200 origins used))
--> assuming 100% of cells in S-phase
--> assuming 1000 bp labeled per label per fork * 2 labeling periods = 2000 bp labeled per fork
-- which is 4000 bp label per activated origin (2 forks)
--> 200 origins * 2 forks/origin * 2000 label/fork = 800,000; 100*0.8e6/12e6 = 6.667% of DNA would be labeled 3.337% per label)
--> assuming only 30% of cells in S-phase, then 0.3* 6.667 = 2% of DNA would have label (1% per label)
--> For development, I will just assume 1% of DNA for each label
'''
init = np.array([0.8,0.1,0.1])
tran = np.array([[0.99998,0.00001,0.00001],[0.0005,0.999,0.0005],[0.0005,0.0005,0.999]])
emis = np.array([[0.25,0.25,0.25,0.24,0.005,0.005],[0.25,0.25,0.25,0.22,0.025,0.005],[0.25,0.25,0.25,0.22,0.005,0.025]])
## if change above, remember to copy/paste new values into viterbi_test
def gen_labeled_seq_from_reference(inseq, emis, init, tran):
'''inseq is a DNA sequence -- assumes only A,C,G,T are present.
init is the initial prob matrix of starting in s1,s2,s3 (the 3 states)
--> it should be a list (or np array) of length 3 that sums to 1
emis are emission probabilities (A,C,G,T,T1,T2) for the 3 states
--> should be an array with 3 rows (1 for each state) and 6 columns (1 for each base type in order: A,C,G,T,T1,T2)
--> rows ahouls sum to 1
tran is the trans matrix from/to the 3 states
--> should be a 3x3 matrix -- interpreted as: from row to column (i.e. from state i to state j for cell i,j)
---
Note: T1 is written as "X" and T2 is written as "Y" -- thus outseq will be composed of: A,C,G,T,X,Y
'''
statenums = range(len(init))
#make label_emission matrix from emission matrix -- only contains T,X,Y normalized
label_emits = emis[:,3:]/emis[:,3:].sum(axis=1)[:,None]
#ensure uppercase
inseq = inseq.upper()
## init
outseq = ''
current_state = np.random.choice(statenums, p=init)
statepath = []
## iter - for each T, randomly select wether it will be T,X,Y given current state
for b in inseq:
statepath.append(current_state)
if b is 'T':
outseq += np.random.choice(['T','X','Y'], p = label_emits[current_state, :])
else:
outseq += b
current_state = np.random.choice(statenums, p=tran[current_state, :])
return outseq, statepath
def gen_labeled_seq_denovo(length, emis, init, tran):
''' '''
statenums = range(len(init))
# initialize
outseq = ''
current_state = np.random.choice(statenums, p=init)
statepath = []
for b in range(length):
statepath.append(current_state)
outseq += np.random.choice(["A","C","G","T","X","Y"], p=emis[current_state, :])
current_state = np.random.choice(statenums, p=tran[current_state, :])
return outseq, statepath
def gen_labeled_seq(emis, init, tran, inseq=None, length=None):
'''either inseq or length must be defined
if both are, it defaults to inseq and ignores length'''
assert inseq or length
#ensure floats
emis = emis.astype(float)
init = init.astype(float)
tran = tran.astype(float)
#ensure rows sum to 1
emis = emis/emis.sum(axis=1)[:,None]
init = init/init.sum()
tran = tran/tran.sum(axis=1)[:,None]
#options
if inseq:
return gen_labeled_seq_from_reference(inseq, emis, init, tran)
else:
return gen_labeled_seq_denovo(length, emis, init, tran)
def nt_counts(inseq):
bases = {"A":0,"C":0,"G":0,"T":0,"X":0,"Y":0}
for b in inseq:
bases[b] += 1
return bases
def state_counts(statepath):
states = {0:0,1:0,2:0}
for s in statepath:
states[s] += 1
return states
def nt_proportions(inseq):
seqlen = float(len(inseq))
bases = nt_counts(inseq)
for key in bases.keys():
bases[key] = bases[key]/seqlen
return bases
def test_gen_labeled_seq(inseq=None, length=None):
assert inseq or length
#"T"*1000
if inseq:
seq, statepath = gen_labeled_seq(emis, init, tran, inseq=inseq)
elif length:
seq, statepath = gen_labeled_seq(emis, init, tran, length=length)
return nt_proportions(seq), state_counts(statepath)
def nt2intdict():
return {"A":0, "C":1, "G":2, "T":3, "X":4, "Y":5}
def forward_seq(emis, tran, init, emitted_seq, num_states = 3, num_emits=None, nt2int = nt2intdict()):
## t, e, and i are np.matrix objects
states = range(num_states)
if num_emits == None:
num_emits = len(emitted_seq)
Forward = np.zeros([num_states,num_emits])
scalefactors = np.zeros([2,num_emits])
#initial
Forward[:, 0] = np.multiply(init,emis[:,nt2int[emitted_seq[0]]])
## scale to prevent underflow -- keep track of scaling
scalefactors[0,0] = sum(Forward[:,0])
scalefactors[1,0] = np.log(scalefactors[0,0])
Forward[:,0] = Forward[:,0]/scalefactors[0,0]
## iterate
for k in range(1, num_emits):
emit = emis[:,nt2int[emitted_seq[k]]]
Forward[:,k] = np.multiply(emit, np.dot(Forward[:,k-1],tran))
scalefactors[0,k] = sum(Forward[:,k])
scalefactors[1,k] = np.log(scalefactors[0,k]) + scalefactors[1,k-1]
Forward[:,k] = Forward[:,k]/scalefactors[0,k]
return Forward, scalefactors
def backward_seq(emis, tran, init, emitted_seq, num_states = 3, num_emits=None, nt2int = nt2intdict()):
## t, e, and i are np.matrix objects
states = range(num_states)
if num_emits == None:
num_emits = len(emitted_seq)
Backward = np.zeros([num_states,num_emits])
scalefactors = np.zeros([2,num_emits])
end = num_emits - 1
#initial
Backward[:, end] = 1
## scale to prevent underflow -- keep track of scaling
scalefactors[0,end] = sum(Backward[:,end])
scalefactors[1,end] = np.log(scalefactors[0,end])
Backward[:,end] = Backward[:,end]/scalefactors[0,end]
## iterate
for k in range(end-1, -1, -1):
emit = emis[:,nt2int[emitted_seq[k+1]]] #ep.pdf(emitted_data[k+1])
a = np.multiply(Backward[:,k+1], emit).transpose()
Backward [:,k] = np.dot(tran, a).transpose()
scalefactors[0,k] = sum(Backward[:,k])
scalefactors[1,k] = np.log(scalefactors[0,k]) + scalefactors[1,k+1]
Backward[:,k] = Backward[:,k]/scalefactors[0,k]
return Backward, scalefactors
def viterbi_seq(emis, tran, init, emitted_seq, num_states=3, num_emits=None, logprobs=False, nt2int = nt2intdict()):
np.seterr(divide='ignore')
states = range(num_states)
if num_emits == None:
num_emits = len(emitted_seq)
if not logprobs:
init = np.log(init)
tran = np.log(tran)
pointer = np.zeros([num_emits, num_states])
Viterbi = np.zeros([num_states, num_emits])
## need to add log_probs instead of multiply probs to prevent underflow
Viterbi[:,0] = init + np.log(emis[:, nt2int[emitted_seq[0]]])
pointer[0,:] = 1
for j in range(1,num_emits):
selection = Viterbi[:,j-1] + tran.transpose()
maxstates = np.apply_along_axis(max_and_index, 1, selection)
Viterbi[:,j] = np.log(emis[:,nt2int[emitted_seq[j]]]) + maxstates[:,1]
pointer[j,:] = maxstates[:,0]
end = num_emits - 1
#path init
viterbi_path = np.zeros(num_emits).astype(int)
viterbi_path[end] = Viterbi[:,end].argmax()
#prob
viterbi_prob = Viterbi[viterbi_path[end], end]
#path iter
for j in range(end,0,-1):
viterbi_path[j-1] = pointer[j,viterbi_path[j]]
return viterbi_path, viterbi_prob
def generate_tran(num_states=3, high_self2self=True, self2self_factor=15, equal_other=True):
tran = np.zeros([num_states,num_states])
for i in range(num_states):
tran[i,:] = np.random.randint(0,100,num_states)
if high_self2self:
for i in range(num_states):
s2s = tran[i,i]*self2self_factor
if equal_other:
tran[i,:] = tran[i,np.random.choice(num_states)]
tran[i,i] = s2s
tran = tran+1e-100
tran = tran/tran.sum(axis=1)[:,None]
return tran
def generate_init(num_states=3):
init = np.random.randint(0,100,num_states).astype(float)
init = init+1e-100
init = init/init.sum()
return init
def generate_emis(num_states=3, num_symbols=6, keep_ACG_constant=True, ensure_each_T_has_max=True):
emis = np.zeros([num_states,num_symbols])
for i in range(num_states):
emis[i,:] = np.random.randint(0,100,num_symbols)
if keep_ACG_constant and num_symbols == 6:
rowclone = np.random.choice(num_states)
emis[:,0:3] = emis[rowclone,0:3]
emis[:,3:] = emis[:,3:]*emis[rowclone,3:].sum()/emis[:,3:].sum(axis=1)[:,None]
if ensure_each_T_has_max and num_symbols == 6 and num_states == 3:
order = sorted(emis[0,3:])[:2]
np.random.shuffle(order)
emis[0,3] = max(emis[0,3:])
emis[0,4:] = order
order = sorted(emis[1,3:])[:2]
np.random.shuffle(order)
emis[1,4] = max(emis[1,3:])
emis[1,3] = order[0]
emis[1,5] = order[1]
order = sorted(emis[2,3:])[:2]
np.random.shuffle(order)
emis[2,5] = max(emis[2,3:])
emis[2,3] = order[0]
emis[2,4] = order[1]
emis = emis+1e-100
emis = emis/emis.sum(axis=1)[:,None]
return emis
def viterbi_test(randomize=False, length=1000, num_states=3, num_symbols=6, detailed=False, keep_ACG_constant=True, ensure_each_T_has_max=True, high_self2self=True, self2self_factor=15, equal_other=True):
if randomize:
init = generate_init()
tran = generate_tran(high_self2self=high_self2self, self2self_factor=self2self_factor, equal_other=equal_other)
emis = generate_emis(keep_ACG_constant=keep_ACG_constant, ensure_each_T_has_max=keep_ACG_constant)
## print emis; print
## print tran.sum(axis=1)
## print init.sum()
else:
## pass
## emis = np.array([[0.25,0.25,0.25,0.23,0.01,0.01],[0.25,0.25,0.25,0.12,0.12,0.01],[0.25,0.25,0.25,0.12,0.01,0.12]])
init = np.array([0.8,0.1,0.1])
tran = np.array([[0.99998,0.00001,0.00001],[0.0005,0.999,0.0005],[0.0005,0.0005,0.999]])
emis = np.array([[0.25,0.25,0.25,0.24,0.005,0.005],[0.25,0.25,0.25,0.22,0.025,0.005],[0.25,0.25,0.25,0.22,0.005,0.025]])
ans=gen_labeled_seq(emis, init, tran, length=length)
v=viterbi_seq(emis, tran, init, ans[0])
test = compare_statepath(ans[1], v[0])[2]
all0 = compare_statepath(ans[1], np.zeros(length))[2]
all1 = compare_statepath(ans[1], np.ones(length))[2]
all2 = compare_statepath(ans[1], np.ones(length)*2)[2]
allrand = compare_statepath(ans[1], np.random.choice([0,1,2], size=length))[2]
test_is_best = test >= all0 and test >= all1 and test >= all2 and test >= allrand
pct_point_diff_from_next = test-max(all0,all1,all2,allrand)
if 100-max(all0,all1,all2,allrand) == 0:
pct_as_good_as_poss = 100*(1e-10+pct_point_diff_from_next)/(100-max(all0,all1,all2,allrand)+1e-10)
else:
pct_as_good_as_poss = 100*pct_point_diff_from_next/(100-max(all0,all1,all2,allrand))
if detailed:
print ans[1]
print v[0]
print test, all0, all1, all2, allrand, "test_is_best = ", test_is_best, pct_point_diff_from_next, pct_as_good_as_poss
else:
print test, all0, all1, all2, allrand, "test_is_best = ", test_is_best, pct_point_diff_from_next, pct_as_good_as_poss
return test, test_is_best, pct_point_diff_from_next, pct_as_good_as_poss
def test_viterbi(iterations=10, randomize=False, length=1000, random_but_close_to_expt=None, keep_ACG_constant=True, ensure_each_T_has_max=True, high_self2self=True, self2self_factor=15, equal_other=True):
accuracy = np.zeros(iterations)
best = np.zeros(iterations)
pct_best = np.zeros(iterations)
diff = np.zeros(iterations)
if randomize and random_but_close_to_expt is not None:
if random_but_close_to_expt:
keep_ACG_constant=True; ensure_each_T_has_max=True; high_self2self=True; self2self_factor=15; equal_other=True
else:
keep_ACG_constant=False; ensure_each_T_has_max=False; high_self2self=False; self2self_factor=1; equal_other=False
for i in range(iterations):
accuracy[i], best[i], pct_best[i], diff[i] = viterbi_test(randomize, length, keep_ACG_constant=keep_ACG_constant, ensure_each_T_has_max=ensure_each_T_has_max, high_self2self=high_self2self, self2self_factor=self2self_factor, equal_other=equal_other)
return accuracy, best, pct_best, diff
def analyze_accuracy(ans):
return np.median(ans), ans.mean(), ans.std(), ans.min(), ans.max()
def analyze_best(ans):
return 100*sum(ans)/len(ans)
def analyze_answer(ans):
a=analyze_accuracy(ans[0])
b=analyze_best(ans[1])
print "Median accuracy:", a[0]
print "Mean accuracy:", a[1]
print "Stdev accuracy:", a[2]
print "Min accuracy:", a[3]
print "Max accuracy:", a[4]
print "Method better than arbitrary AND random states in", b, "% of the trials."
print
##NEXT
## use profile to segment statepath into origins, terms, free-float type 1 (X->Y), free-float type2 (Y->X)
## define these as 4 profiles that all go back to background
## The 4 profiles share states, but these states can just be redundant
## e.g. origin profile can have Y->X->Y ..... I will think about tomorrow...
## perhaps can just compress each segment of statepath into single emissions
| mit |
david-abel/name_generator | name_gen_app/views.py | 1 | 1198 | from django.shortcuts import render
from django.http import HttpResponse, HttpResponseRedirect
from django.contrib.auth.forms import UserCreationForm
from django.template import loader, RequestContext
from name_gen_app.models import NGram
import pickle
import name_generator
# Load
with open('ngrams/unigram_counts.pickle', 'rb') as handle:
unigram_counts = pickle.load(handle)
with open('ngrams/bigram_counts.pickle', 'rb') as handle:
bigram_counts = pickle.load(handle)
with open('ngrams/trigram_counts.pickle', 'rb') as handle:
trigram_counts = pickle.load(handle)
with open('ngrams/fourgram_counts.pickle', 'rb') as handle:
fourgram_counts = pickle.load(handle)
with open('ngrams/fivegram_counts.pickle', 'rb') as handle:
fivegram_counts = pickle.load(handle)
# Create your views here.
def home(request):
weights = [1,3,5,7,10]
candidate_name = name_generator.generate_name(weights, unigram_counts, bigram_counts, trigram_counts, fourgram_counts, fivegram_counts)
template = loader.get_template('name_gen.html')
context = RequestContext(request, {
'candidate_name': candidate_name,
})
return HttpResponse(template.render(context)) | mit |
vijayendrabvs/ssl-neutron | neutron/extensions/quotasv2.py | 8 | 5436 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2011 OpenStack Foundation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import sys
from oslo.config import cfg
import webob
from neutron.api import extensions
from neutron.api.v2.attributes import convert_to_int
from neutron.api.v2 import base
from neutron.api.v2 import resource
from neutron.common import exceptions as n_exc
from neutron.manager import NeutronManager
from neutron.openstack.common import importutils
from neutron import quota
from neutron import wsgi
RESOURCE_NAME = 'quota'
RESOURCE_COLLECTION = RESOURCE_NAME + "s"
QUOTAS = quota.QUOTAS
DB_QUOTA_DRIVER = 'neutron.db.quota_db.DbQuotaDriver'
EXTENDED_ATTRIBUTES_2_0 = {
RESOURCE_COLLECTION: {}
}
class QuotaSetsController(wsgi.Controller):
def __init__(self, plugin):
self._resource_name = RESOURCE_NAME
self._plugin = plugin
self._driver = importutils.import_class(
cfg.CONF.QUOTAS.quota_driver
)
self._update_extended_attributes = True
def _update_attributes(self):
for quota_resource in QUOTAS.resources.iterkeys():
attr_dict = EXTENDED_ATTRIBUTES_2_0[RESOURCE_COLLECTION]
attr_dict[quota_resource] = {'allow_post': False,
'allow_put': True,
'convert_to': convert_to_int,
'validate': {'type:range':
[-1, sys.maxsize]},
'is_visible': True}
self._update_extended_attributes = False
def _get_quotas(self, request, tenant_id):
return self._driver.get_tenant_quotas(
request.context, QUOTAS.resources, tenant_id)
def create(self, request, body=None):
msg = _('POST requests are not supported on this resource.')
raise webob.exc.HTTPNotImplemented(msg)
def index(self, request):
context = request.context
self._check_admin(context)
return {self._resource_name + "s":
self._driver.get_all_quotas(context, QUOTAS.resources)}
def tenant(self, request):
"""Retrieve the tenant info in context."""
context = request.context
if not context.tenant_id:
raise n_exc.QuotaMissingTenant()
return {'tenant': {'tenant_id': context.tenant_id}}
def show(self, request, id):
if id != request.context.tenant_id:
self._check_admin(request.context,
reason=_("Non-admin is not authorised "
"to access quotas for another tenant"))
return {self._resource_name: self._get_quotas(request, id)}
def _check_admin(self, context,
reason=_("Only admin can view or configure quota")):
if not context.is_admin:
raise n_exc.AdminRequired(reason=reason)
def delete(self, request, id):
self._check_admin(request.context)
self._driver.delete_tenant_quota(request.context, id)
def update(self, request, id, body=None):
self._check_admin(request.context)
if self._update_extended_attributes:
self._update_attributes()
body = base.Controller.prepare_request_body(
request.context, body, False, self._resource_name,
EXTENDED_ATTRIBUTES_2_0[RESOURCE_COLLECTION])
for key, value in body[self._resource_name].items():
self._driver.update_quota_limit(request.context, id, key, value)
return {self._resource_name: self._get_quotas(request, id)}
class Quotasv2(extensions.ExtensionDescriptor):
"""Quotas management support."""
@classmethod
def get_name(cls):
return "Quota management support"
@classmethod
def get_alias(cls):
return RESOURCE_COLLECTION
@classmethod
def get_description(cls):
description = 'Expose functions for quotas management'
if cfg.CONF.QUOTAS.quota_driver == DB_QUOTA_DRIVER:
description += ' per tenant'
return description
@classmethod
def get_namespace(cls):
return "http://docs.openstack.org/network/ext/quotas-sets/api/v2.0"
@classmethod
def get_updated(cls):
return "2012-07-29T10:00:00-00:00"
@classmethod
def get_resources(cls):
"""Returns Ext Resources."""
controller = resource.Resource(
QuotaSetsController(NeutronManager.get_plugin()),
faults=base.FAULT_MAP)
return [extensions.ResourceExtension(
Quotasv2.get_alias(),
controller,
collection_actions={'tenant': 'GET'})]
def get_extended_resources(self, version):
if version == "2.0":
return EXTENDED_ATTRIBUTES_2_0
else:
return {}
| apache-2.0 |
535521469/crawler_sth | scrapyd/website.py | 2 | 4652 | from datetime import datetime
from twisted.web import resource, static
from twisted.application.service import IServiceCollection
from scrapy.utils.misc import load_object
from .interfaces import IPoller, IEggStorage, ISpiderScheduler
from . import webservice
class Root(resource.Resource):
def __init__(self, config, app):
resource.Resource.__init__(self)
self.debug = config.getboolean('debug', False)
self.runner = config.get('runner')
logsdir = config.get('logs_dir')
itemsdir = config.get('items_dir')
self.app = app
self.putChild('', Home(self))
self.putChild('logs', static.File(logsdir, 'text/plain'))
self.putChild('items', static.File(itemsdir, 'text/plain'))
self.putChild('jobs', Jobs(self))
services = config.items('services', ())
for servName, servClsName in services:
servCls = load_object(servClsName)
self.putChild(servName, servCls(self))
self.update_projects()
def update_projects(self):
self.poller.update_projects()
self.scheduler.update_projects()
@property
def launcher(self):
app = IServiceCollection(self.app, self.app)
return app.getServiceNamed('launcher')
@property
def scheduler(self):
return self.app.getComponent(ISpiderScheduler)
@property
def eggstorage(self):
return self.app.getComponent(IEggStorage)
@property
def poller(self):
return self.app.getComponent(IPoller)
class Home(resource.Resource):
def __init__(self, root):
resource.Resource.__init__(self)
self.root = root
def render_GET(self, txrequest):
vars = {
'projects': ', '.join(self.root.scheduler.list_projects()),
}
return """
<html>
<head><title>Scrapyd</title></head>
<body>
<h1>Scrapyd</h1>
<p>Available projects: <b>%(projects)s</b></p>
<ul>
<li><a href="/jobs">Jobs</a></li>
<li><a href="/items/">Items</li>
<li><a href="/logs/">Logs</li>
<li><a href="http://doc.scrapy.org/en/latest/topics/scrapyd.html">Documentation</a></li>
</ul>
<h2>How to schedule a spider?</h2>
<p>To schedule a spider you need to use the API (this web UI is only for
monitoring)</p>
<p>Example using <a href="http://curl.haxx.se/">curl</a>:</p>
<p><code>curl http://localhost:6800/schedule.json -d project=default -d spider=somespider</code></p>
<p>For more information about the API, see the <a href="http://doc.scrapy.org/en/latest/topics/scrapyd.html">Scrapyd documentation</a></p>
</body>
</html>
""" % vars
class Jobs(resource.Resource):
def __init__(self, root):
resource.Resource.__init__(self)
self.root = root
def render(self, txrequest):
s = "<html><head><title>Scrapyd</title></title>"
s += "<body>"
s += "<h1>Jobs</h1>"
s += "<p><a href='..'>Go back</a></p>"
s += "<table border='1'>"
s += "<th>Project</th><th>Spider</th><th>Job</th><th>PID</th><th>Runtime</th><th>Log</th><th>Items</th>"
s += "<tr><th colspan='7' style='background-color: #ddd'>Pending</th></tr>"
for project, queue in self.root.poller.queues.items():
for m in queue.list():
s += "<tr>"
s += "<td>%s</td>" % project
s += "<td>%s</td>" % str(m['name'])
s += "<td>%s</td>" % str(m['_job'])
s += "</tr>"
s += "<tr><th colspan='7' style='background-color: #ddd'>Running</th></tr>"
for p in self.root.launcher.processes.values():
s += "<tr>"
for a in ['project', 'spider', 'job', 'pid']:
s += "<td>%s</td>" % getattr(p, a)
s += "<td>%s</td>" % (datetime.now() - p.start_time)
s += "<td><a href='/logs/%s/%s/%s.log'>Log</a></td>" % (p.project, p.spider, p.job)
s += "<td><a href='/items/%s/%s/%s.jl'>Items</a></td>" % (p.project, p.spider, p.job)
s += "</tr>"
s += "<tr><th colspan='7' style='background-color: #ddd'>Finished</th></tr>"
for p in self.root.launcher.finished:
s += "<tr>"
for a in ['project', 'spider', 'job']:
s += "<td>%s</td>" % getattr(p, a)
s += "<td></td>"
s += "<td>%s</td>" % (p.end_time - p.start_time)
s += "<td><a href='/logs/%s/%s/%s.log'>Log</a></td>" % (p.project, p.spider, p.job)
s += "<td><a href='/items/%s/%s/%s.jl'>Items</a></td>" % (p.project, p.spider, p.job)
s += "</tr>"
s += "</table>"
s += "</body>"
s += "</html>"
return s
| bsd-3-clause |
beiko-lab/gengis | bin/Lib/curses/ascii.py | 24 | 2706 | """Constants and membership tests for ASCII characters"""
NUL = 0x00 # ^@
SOH = 0x01 # ^A
STX = 0x02 # ^B
ETX = 0x03 # ^C
EOT = 0x04 # ^D
ENQ = 0x05 # ^E
ACK = 0x06 # ^F
BEL = 0x07 # ^G
BS = 0x08 # ^H
TAB = 0x09 # ^I
HT = 0x09 # ^I
LF = 0x0a # ^J
NL = 0x0a # ^J
VT = 0x0b # ^K
FF = 0x0c # ^L
CR = 0x0d # ^M
SO = 0x0e # ^N
SI = 0x0f # ^O
DLE = 0x10 # ^P
DC1 = 0x11 # ^Q
DC2 = 0x12 # ^R
DC3 = 0x13 # ^S
DC4 = 0x14 # ^T
NAK = 0x15 # ^U
SYN = 0x16 # ^V
ETB = 0x17 # ^W
CAN = 0x18 # ^X
EM = 0x19 # ^Y
SUB = 0x1a # ^Z
ESC = 0x1b # ^[
FS = 0x1c # ^\
GS = 0x1d # ^]
RS = 0x1e # ^^
US = 0x1f # ^_
SP = 0x20 # space
DEL = 0x7f # delete
controlnames = [
"NUL", "SOH", "STX", "ETX", "EOT", "ENQ", "ACK", "BEL",
"BS", "HT", "LF", "VT", "FF", "CR", "SO", "SI",
"DLE", "DC1", "DC2", "DC3", "DC4", "NAK", "SYN", "ETB",
"CAN", "EM", "SUB", "ESC", "FS", "GS", "RS", "US",
"SP"
]
def _ctoi(c):
if type(c) == type(""):
return ord(c)
else:
return c
def isalnum(c): return isalpha(c) or isdigit(c)
def isalpha(c): return isupper(c) or islower(c)
def isascii(c): return _ctoi(c) <= 127 # ?
def isblank(c): return _ctoi(c) in (8,32)
def iscntrl(c): return _ctoi(c) <= 31
def isdigit(c): return _ctoi(c) >= 48 and _ctoi(c) <= 57
def isgraph(c): return _ctoi(c) >= 33 and _ctoi(c) <= 126
def islower(c): return _ctoi(c) >= 97 and _ctoi(c) <= 122
def isprint(c): return _ctoi(c) >= 32 and _ctoi(c) <= 126
def ispunct(c): return _ctoi(c) != 32 and not isalnum(c)
def isspace(c): return _ctoi(c) in (9, 10, 11, 12, 13, 32)
def isupper(c): return _ctoi(c) >= 65 and _ctoi(c) <= 90
def isxdigit(c): return isdigit(c) or \
(_ctoi(c) >= 65 and _ctoi(c) <= 70) or (_ctoi(c) >= 97 and _ctoi(c) <= 102)
def isctrl(c): return _ctoi(c) < 32
def ismeta(c): return _ctoi(c) > 127
def ascii(c):
if type(c) == type(""):
return chr(_ctoi(c) & 0x7f)
else:
return _ctoi(c) & 0x7f
def ctrl(c):
if type(c) == type(""):
return chr(_ctoi(c) & 0x1f)
else:
return _ctoi(c) & 0x1f
def alt(c):
if type(c) == type(""):
return chr(_ctoi(c) | 0x80)
else:
return _ctoi(c) | 0x80
def unctrl(c):
bits = _ctoi(c)
if bits == 0x7f:
rep = "^?"
elif isprint(bits & 0x7f):
rep = chr(bits & 0x7f)
else:
rep = "^" + chr(((bits & 0x7f) | 0x20) + 0x20)
if bits & 0x80:
return "!" + rep
return rep
| gpl-3.0 |
aregee/moksaya | profiles/authorization.py | 1 | 4172 | import logging
from tastypie.authorization import DjangoAuthorization , Authorization
from tastypie.exceptions import Unauthorized
from guardian.shortcuts import get_objects_for_user
from guardian.core import ObjectPermissionChecker
logger = logging.getLogger(__name__)
class GuardianAuthorization(Authorization):
"""
:create_permission_code:
the permission code that signifies the user can create one of these objects
:view_permission_code:
the permission code that signifies the user can view the detail
:update_permission_code:
the permission code that signifies the user can update one of these objects
:remove_permission_code:
the permission code that signifies the user can remove one of these objects
:kwargs:
other permission codes
class Something(models.Model):
name = models.CharField()
class SomethingResource(ModelResource):
class Meta:
queryset = Something.objects.all()
authorization = GuardianAuthorization(
view_permission_code = 'can_view',
create_permission_code = 'can_create',
update_permission_code = 'can_update',
delete_permission_code = 'can_delete'
)
"""
def __init__(self, *args, **kwargs):
self.view_permission_code = kwargs.pop("view_permission_code", 'can_view')
self.create_permission_code = kwargs.pop("create_permission_code", 'can_create')
self.update_permission_code = kwargs.pop("update_permission_code", 'can_update')
self.delete_permission_code = kwargs.pop("delete_permission_code", 'can_delete')
super(GuardianAuthorization, self).__init__(*args, **kwargs)
def generic_base_check(self, object_list, bundle):
"""
Returns False if either:
a) if the `object_list.model` doesn't have a `_meta` attribute
b) the `bundle.request` object doesn have a `user` attribute
"""
klass = self.base_checks(bundle.request, object_list.model)
if klass is False:
raise Unauthorized("You are not allowed to access that resource.")
return True
def generic_item_check(self, object_list, bundle, permission):
if not self.generic_base_check(object_list, bundle):
raise Unauthorized("You are not allowed to access that resource.")
checker = ObjectPermissionChecker(bundle.request.user)
if not checker.has_perm(permission, bundle.obj):
raise Unauthorized("You are not allowed to access that resource.")
return True
def generic_list_check(self, object_list, bundle, permission):
if not self.generic_base_check(object_list, bundle):
raise Unauthorized("You are not allowed to access that resource.")
return get_objects_for_user(bundle.request.user, object_list, permission)
def read_list(self, object_list, bundle):
# This assumes a ``QuerySet`` from ``ModelResource``.
return object_list.filter(user=bundle.request.user)
def read_detail(self, object_list, bundle):
# Is the requested object owned by the user?
return bundle.obj.user == bundle.request.user
def create_list(self, object_list, bundle):
# Assuming their auto-assigned to ``user``.
return object_list
def create_detail(self, object_list, bundle):
return object_list #bundle.obj.user == bundle.request.user
def update_list(self, object_list, bundle):
allowed = []
# Since they may not all be saved, iterate over them.
for obj in object_list:
if obj.user == bundle.request.user:
allowed.append(obj)
return allowed
def update_detail(self, object_list, bundle):
return bundle.obj.user == bundle.request.user
def delete_list(self, object_list, bundle):
# Sorry user, no deletes for you!
raise Unauthorized("Sorry, no deletes.")
def delete_detail(self, object_list, bundle):
raise Unauthorized("Sorry, no deletes.")
| mit |
rackerlabs/instrumented-ceilometer | ceilometer/tests/volume/test_notifications.py | 1 | 7306 | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from ceilometer.openstack.common import test
from ceilometer.volume import notifications
NOTIFICATION_VOLUME_EXISTS = {
u'_context_roles': [u'admin'],
u'_context_request_id': u'req-7ef29a5d-adeb-48a8-b104-59c05361aa27',
u'_context_quota_class': None,
u'event_type': u'volume.exists',
u'timestamp': u'2012-09-21 09:29:10.620731',
u'message_id': u'e0e6a5ad-2fc9-453c-b3fb-03fe504538dc',
u'_context_auth_token': None,
u'_context_is_admin': True,
u'_context_project_id': None,
u'_context_timestamp': u'2012-09-21T09:29:10.266928',
u'_context_read_deleted': u'no',
u'_context_user_id': None,
u'_context_remote_address': None,
u'publisher_id': u'volume.ubuntu-VirtualBox',
u'payload': {u'status': u'available',
u'audit_period_beginning': u'2012-09-20 00:00:00',
u'display_name': u'volume1',
u'tenant_id': u'6c97f1ecf17047eab696786d56a0bff5',
u'created_at': u'2012-09-20 15:05:16',
u'snapshot_id': None,
u'volume_type': None,
u'volume_id': u'84c363b9-9854-48dc-b949-fe04263f4cf0',
u'audit_period_ending': u'2012-09-21 00:00:00',
u'user_id': u'4d2fa4b76a4a4ecab8c468c8dea42f89',
u'launched_at': u'2012-09-20 15:05:23',
u'size': 2},
u'priority': u'INFO'
}
NOTIFICATION_VOLUME_DELETE = {
u'_context_roles': [u'Member', u'admin'],
u'_context_request_id': u'req-6ba8ccb4-1093-4a39-b029-adfaa3fc7ceb',
u'_context_quota_class': None,
u'event_type': u'volume.delete.start',
u'timestamp': u'2012-09-21 10:24:13.168630',
u'message_id': u'f6e6bc1f-fcd5-41e1-9a86-da7d024f03d9',
u'_context_auth_token': u'277c6899de8a4b3d999f3e2e4c0915ff',
u'_context_is_admin': True,
u'_context_project_id': u'6c97f1ecf17047eab696786d56a0bff5',
u'_context_timestamp': u'2012-09-21T10:23:54.741228',
u'_context_read_deleted': u'no',
u'_context_user_id': u'4d2fa4b76a4a4ecab8c468c8dea42f89',
u'_context_remote_address': u'192.168.22.101',
u'publisher_id': u'volume.ubuntu-VirtualBox',
u'payload': {u'status': u'deleting',
u'volume_type_id': None,
u'display_name': u'abc',
u'tenant_id': u'6c97f1ecf17047eab696786d56a0bff5',
u'created_at': u'2012-09-21 10:10:47',
u'snapshot_id': None,
u'volume_id': u'3b761164-84b4-4eb3-8fcb-1974c641d6ef',
u'user_id': u'4d2fa4b76a4a4ecab8c468c8dea42f89',
u'launched_at': u'2012-09-21 10:10:50',
u'size': 3},
u'priority': u'INFO'}
NOTIFICATION_VOLUME_RESIZE = {
u'_context_roles': [u'Member', u'admin'],
u'_context_request_id': u'req-6ba8ccb4-1093-4a39-b029-adfaa3fc7ceb',
u'_context_quota_class': None,
u'event_type': u'volume.resize.end',
u'timestamp': u'2012-09-21 10:24:13.168630',
u'message_id': u'b5814258-3425-4eb7-b6b7-bf4811203e58',
u'_context_auth_token': u'277c6899de8a4b3d999f3e2e4c0915ff',
u'_context_is_admin': True,
u'_context_project_id': u'6c97f1ecf17047eab696786d56a0bff5',
u'_context_timestamp': u'2012-09-21T10:02:27.134211',
u'_context_read_deleted': u'no',
u'_context_user_id': u'4d2fa4b76a4a4ecab8c468c8dea42f89',
u'_context_remote_address': u'192.168.22.101',
u'publisher_id': u'volume.ubuntu-VirtualBox',
u'payload': {u'status': u'extending',
u'volume_type_id': None,
u'display_name': u'abc',
u'tenant_id': u'6c97f1ecf17047eab696786d56a0bff5',
u'created_at': u'2012-09-21 10:10:47',
u'snapshot_id': None,
u'volume_id': u'3b761164-84b4-4eb3-8fcb-1974c641d6ef',
u'user_id': u'4d2fa4b76a4a4ecab8c468c8dea42f89',
u'launched_at': u'2012-09-21 10:10:50',
u'size': 3},
u'priority': u'INFO'}
class TestNotifications(test.BaseTestCase):
def _verify_common_sample(self, s, name, notification):
self.assertFalse(s is None)
self.assertEqual(s.name, name)
self.assertEqual(s.resource_id, notification['payload']['volume_id'])
self.assertEqual(s.timestamp, notification['timestamp'])
metadata = s.resource_metadata
self.assertEqual(metadata.get('host'), notification['publisher_id'])
def test_volume_exists(self):
v = notifications.Volume()
samples = list(v.process_notification(NOTIFICATION_VOLUME_EXISTS))
self.assertEqual(len(samples), 1)
s = samples[0]
self._verify_common_sample(s, 'volume', NOTIFICATION_VOLUME_EXISTS)
self.assertEqual(s.volume, 1)
def test_volume_size_exists(self):
v = notifications.VolumeSize()
samples = list(v.process_notification(NOTIFICATION_VOLUME_EXISTS))
self.assertEqual(len(samples), 1)
s = samples[0]
self._verify_common_sample(s, 'volume.size',
NOTIFICATION_VOLUME_EXISTS)
self.assertEqual(s.volume,
NOTIFICATION_VOLUME_EXISTS['payload']['size'])
def test_volume_delete(self):
v = notifications.Volume()
samples = list(v.process_notification(NOTIFICATION_VOLUME_DELETE))
self.assertEqual(len(samples), 1)
s = samples[0]
self._verify_common_sample(s, 'volume', NOTIFICATION_VOLUME_DELETE)
self.assertEqual(s.volume, 1)
def test_volume_size_delete(self):
v = notifications.VolumeSize()
samples = list(v.process_notification(NOTIFICATION_VOLUME_DELETE))
self.assertEqual(len(samples), 1)
s = samples[0]
self._verify_common_sample(s, 'volume.size',
NOTIFICATION_VOLUME_DELETE)
self.assertEqual(s.volume,
NOTIFICATION_VOLUME_DELETE['payload']['size'])
def test_volume_resize(self):
v = notifications.Volume()
samples = list(v.process_notification(NOTIFICATION_VOLUME_RESIZE))
self.assertEqual(len(samples), 1)
s = samples[0]
self._verify_common_sample(s, 'volume', NOTIFICATION_VOLUME_RESIZE)
self.assertEqual(s.volume, 1)
def test_volume_size_resize(self):
v = notifications.VolumeSize()
samples = list(v.process_notification(NOTIFICATION_VOLUME_RESIZE))
self.assertEqual(len(samples), 1)
s = samples[0]
self._verify_common_sample(s, 'volume.size',
NOTIFICATION_VOLUME_RESIZE)
self.assertEqual(s.volume,
NOTIFICATION_VOLUME_RESIZE['payload']['size'])
| apache-2.0 |
LeartS/odoo | addons/note_pad/__openerp__.py | 120 | 1678 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Memos pad',
'version': '0.1',
'category': 'Tools',
'description': """
This module update memos inside OpenERP for using an external pad
=================================================================
Use for update your text memo in real time with the following user that you invite.
""",
'author': 'OpenERP SA',
'website': 'http://openerp.com',
'summary': 'Sticky memos, Collaborative',
'depends': [
'mail',
'pad',
'note',
],
'data': [
'note_pad_view.xml',
],
'installable': True,
'application': False,
'auto_install': False,
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
shravan-achar/servo | tests/wpt/css-tests/tools/pywebsocket/src/test/client_for_testing.py | 451 | 39706 | #!/usr/bin/env python
#
# Copyright 2012, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""WebSocket client utility for testing.
This module contains helper methods for performing handshake, frame
sending/receiving as a WebSocket client.
This is code for testing mod_pywebsocket. Keep this code independent from
mod_pywebsocket. Don't import e.g. Stream class for generating frame for
testing. Using util.hexify, etc. that are not related to protocol processing
is allowed.
Note:
This code is far from robust, e.g., we cut corners in handshake.
"""
import base64
import errno
import logging
import os
import random
import re
import socket
import struct
import time
from mod_pywebsocket import common
from mod_pywebsocket import util
DEFAULT_PORT = 80
DEFAULT_SECURE_PORT = 443
# Opcodes introduced in IETF HyBi 01 for the new framing format
OPCODE_CONTINUATION = 0x0
OPCODE_CLOSE = 0x8
OPCODE_PING = 0x9
OPCODE_PONG = 0xa
OPCODE_TEXT = 0x1
OPCODE_BINARY = 0x2
# Strings used for handshake
_UPGRADE_HEADER = 'Upgrade: websocket\r\n'
_UPGRADE_HEADER_HIXIE75 = 'Upgrade: WebSocket\r\n'
_CONNECTION_HEADER = 'Connection: Upgrade\r\n'
WEBSOCKET_ACCEPT_UUID = '258EAFA5-E914-47DA-95CA-C5AB0DC85B11'
# Status codes
STATUS_NORMAL_CLOSURE = 1000
STATUS_GOING_AWAY = 1001
STATUS_PROTOCOL_ERROR = 1002
STATUS_UNSUPPORTED_DATA = 1003
STATUS_NO_STATUS_RECEIVED = 1005
STATUS_ABNORMAL_CLOSURE = 1006
STATUS_INVALID_FRAME_PAYLOAD_DATA = 1007
STATUS_POLICY_VIOLATION = 1008
STATUS_MESSAGE_TOO_BIG = 1009
STATUS_MANDATORY_EXT = 1010
STATUS_INTERNAL_ENDPOINT_ERROR = 1011
STATUS_TLS_HANDSHAKE = 1015
# Extension tokens
_DEFLATE_FRAME_EXTENSION = 'deflate-frame'
# TODO(bashi): Update after mux implementation finished.
_MUX_EXTENSION = 'mux_DO_NOT_USE'
_PERMESSAGE_DEFLATE_EXTENSION = 'permessage-deflate'
def _method_line(resource):
return 'GET %s HTTP/1.1\r\n' % resource
def _sec_origin_header(origin):
return 'Sec-WebSocket-Origin: %s\r\n' % origin.lower()
def _origin_header(origin):
# 4.1 13. concatenation of the string "Origin:", a U+0020 SPACE character,
# and the /origin/ value, converted to ASCII lowercase, to /fields/.
return 'Origin: %s\r\n' % origin.lower()
def _format_host_header(host, port, secure):
# 4.1 9. Let /hostport/ be an empty string.
# 4.1 10. Append the /host/ value, converted to ASCII lowercase, to
# /hostport/
hostport = host.lower()
# 4.1 11. If /secure/ is false, and /port/ is not 80, or if /secure/
# is true, and /port/ is not 443, then append a U+003A COLON character
# (:) followed by the value of /port/, expressed as a base-ten integer,
# to /hostport/
if ((not secure and port != DEFAULT_PORT) or
(secure and port != DEFAULT_SECURE_PORT)):
hostport += ':' + str(port)
# 4.1 12. concatenation of the string "Host:", a U+0020 SPACE
# character, and /hostport/, to /fields/.
return 'Host: %s\r\n' % hostport
# TODO(tyoshino): Define a base class and move these shared methods to that.
def receive_bytes(socket, length):
received_bytes = []
remaining = length
while remaining > 0:
new_received_bytes = socket.recv(remaining)
if not new_received_bytes:
raise Exception(
'Connection closed before receiving requested length '
'(requested %d bytes but received only %d bytes)' %
(length, length - remaining))
received_bytes.append(new_received_bytes)
remaining -= len(new_received_bytes)
return ''.join(received_bytes)
# TODO(tyoshino): Now the WebSocketHandshake class diverts these methods. We
# should move to HTTP parser as specified in RFC 6455. For HyBi 00 and
# Hixie 75, pack these methods as some parser class.
def _read_fields(socket):
# 4.1 32. let /fields/ be a list of name-value pairs, initially empty.
fields = {}
while True:
# 4.1 33. let /name/ and /value/ be empty byte arrays
name = ''
value = ''
# 4.1 34. read /name/
name = _read_name(socket)
if name is None:
break
# 4.1 35. read spaces
# TODO(tyoshino): Skip only one space as described in the spec.
ch = _skip_spaces(socket)
# 4.1 36. read /value/
value = _read_value(socket, ch)
# 4.1 37. read a byte from the server
ch = receive_bytes(socket, 1)
if ch != '\n': # 0x0A
raise Exception(
'Expected LF but found %r while reading value %r for header '
'%r' % (ch, name, value))
# 4.1 38. append an entry to the /fields/ list that has the name
# given by the string obtained by interpreting the /name/ byte
# array as a UTF-8 stream and the value given by the string
# obtained by interpreting the /value/ byte array as a UTF-8 byte
# stream.
fields.setdefault(name, []).append(value)
# 4.1 39. return to the "Field" step above
return fields
def _read_name(socket):
# 4.1 33. let /name/ be empty byte arrays
name = ''
while True:
# 4.1 34. read a byte from the server
ch = receive_bytes(socket, 1)
if ch == '\r': # 0x0D
return None
elif ch == '\n': # 0x0A
raise Exception(
'Unexpected LF when reading header name %r' % name)
elif ch == ':': # 0x3A
return name
elif ch >= 'A' and ch <= 'Z': # range 0x31 to 0x5A
ch = chr(ord(ch) + 0x20)
name += ch
else:
name += ch
def _skip_spaces(socket):
# 4.1 35. read a byte from the server
while True:
ch = receive_bytes(socket, 1)
if ch == ' ': # 0x20
continue
return ch
def _read_value(socket, ch):
# 4.1 33. let /value/ be empty byte arrays
value = ''
# 4.1 36. read a byte from server.
while True:
if ch == '\r': # 0x0D
return value
elif ch == '\n': # 0x0A
raise Exception(
'Unexpected LF when reading header value %r' % value)
else:
value += ch
ch = receive_bytes(socket, 1)
def read_frame_header(socket):
received = receive_bytes(socket, 2)
first_byte = ord(received[0])
fin = (first_byte >> 7) & 1
rsv1 = (first_byte >> 6) & 1
rsv2 = (first_byte >> 5) & 1
rsv3 = (first_byte >> 4) & 1
opcode = first_byte & 0xf
second_byte = ord(received[1])
mask = (second_byte >> 7) & 1
payload_length = second_byte & 0x7f
if mask != 0:
raise Exception(
'Mask bit must be 0 for frames coming from server')
if payload_length == 127:
extended_payload_length = receive_bytes(socket, 8)
payload_length = struct.unpack(
'!Q', extended_payload_length)[0]
if payload_length > 0x7FFFFFFFFFFFFFFF:
raise Exception('Extended payload length >= 2^63')
elif payload_length == 126:
extended_payload_length = receive_bytes(socket, 2)
payload_length = struct.unpack(
'!H', extended_payload_length)[0]
return fin, rsv1, rsv2, rsv3, opcode, payload_length
class _TLSSocket(object):
"""Wrapper for a TLS connection."""
def __init__(self, raw_socket):
self._ssl = socket.ssl(raw_socket)
def send(self, bytes):
return self._ssl.write(bytes)
def recv(self, size=-1):
return self._ssl.read(size)
def close(self):
# Nothing to do.
pass
class HttpStatusException(Exception):
"""This exception will be raised when unexpected http status code was
received as a result of handshake.
"""
def __init__(self, name, status):
super(HttpStatusException, self).__init__(name)
self.status = status
class WebSocketHandshake(object):
"""Opening handshake processor for the WebSocket protocol (RFC 6455)."""
def __init__(self, options):
self._logger = util.get_class_logger(self)
self._options = options
def handshake(self, socket):
"""Handshake WebSocket.
Raises:
Exception: handshake failed.
"""
self._socket = socket
request_line = _method_line(self._options.resource)
self._logger.debug('Opening handshake Request-Line: %r', request_line)
self._socket.sendall(request_line)
fields = []
fields.append(_UPGRADE_HEADER)
fields.append(_CONNECTION_HEADER)
fields.append(_format_host_header(
self._options.server_host,
self._options.server_port,
self._options.use_tls))
if self._options.version is 8:
fields.append(_sec_origin_header(self._options.origin))
else:
fields.append(_origin_header(self._options.origin))
original_key = os.urandom(16)
key = base64.b64encode(original_key)
self._logger.debug(
'Sec-WebSocket-Key: %s (%s)', key, util.hexify(original_key))
fields.append('Sec-WebSocket-Key: %s\r\n' % key)
fields.append('Sec-WebSocket-Version: %d\r\n' % self._options.version)
# Setting up extensions.
if len(self._options.extensions) > 0:
fields.append('Sec-WebSocket-Extensions: %s\r\n' %
', '.join(self._options.extensions))
self._logger.debug('Opening handshake request headers: %r', fields)
for field in fields:
self._socket.sendall(field)
self._socket.sendall('\r\n')
self._logger.info('Sent opening handshake request')
field = ''
while True:
ch = receive_bytes(self._socket, 1)
field += ch
if ch == '\n':
break
self._logger.debug('Opening handshake Response-Line: %r', field)
if len(field) < 7 or not field.endswith('\r\n'):
raise Exception('Wrong status line: %r' % field)
m = re.match('[^ ]* ([^ ]*) .*', field)
if m is None:
raise Exception(
'No HTTP status code found in status line: %r' % field)
code = m.group(1)
if not re.match('[0-9][0-9][0-9]', code):
raise Exception(
'HTTP status code %r is not three digit in status line: %r' %
(code, field))
if code != '101':
raise HttpStatusException(
'Expected HTTP status code 101 but found %r in status line: '
'%r' % (code, field), int(code))
fields = _read_fields(self._socket)
ch = receive_bytes(self._socket, 1)
if ch != '\n': # 0x0A
raise Exception('Expected LF but found: %r' % ch)
self._logger.debug('Opening handshake response headers: %r', fields)
# Check /fields/
if len(fields['upgrade']) != 1:
raise Exception(
'Multiple Upgrade headers found: %s' % fields['upgrade'])
if len(fields['connection']) != 1:
raise Exception(
'Multiple Connection headers found: %s' % fields['connection'])
if fields['upgrade'][0] != 'websocket':
raise Exception(
'Unexpected Upgrade header value: %s' % fields['upgrade'][0])
if fields['connection'][0].lower() != 'upgrade':
raise Exception(
'Unexpected Connection header value: %s' %
fields['connection'][0])
if len(fields['sec-websocket-accept']) != 1:
raise Exception(
'Multiple Sec-WebSocket-Accept headers found: %s' %
fields['sec-websocket-accept'])
accept = fields['sec-websocket-accept'][0]
# Validate
try:
decoded_accept = base64.b64decode(accept)
except TypeError, e:
raise HandshakeException(
'Illegal value for header Sec-WebSocket-Accept: ' + accept)
if len(decoded_accept) != 20:
raise HandshakeException(
'Decoded value of Sec-WebSocket-Accept is not 20-byte long')
self._logger.debug('Actual Sec-WebSocket-Accept: %r (%s)',
accept, util.hexify(decoded_accept))
original_expected_accept = util.sha1_hash(
key + WEBSOCKET_ACCEPT_UUID).digest()
expected_accept = base64.b64encode(original_expected_accept)
self._logger.debug('Expected Sec-WebSocket-Accept: %r (%s)',
expected_accept,
util.hexify(original_expected_accept))
if accept != expected_accept:
raise Exception(
'Invalid Sec-WebSocket-Accept header: %r (expected) != %r '
'(actual)' % (accept, expected_accept))
server_extensions_header = fields.get('sec-websocket-extensions')
accepted_extensions = []
if server_extensions_header is not None:
accepted_extensions = common.parse_extensions(
', '.join(server_extensions_header))
# Scan accepted extension list to check if there is any unrecognized
# extensions or extensions we didn't request in it. Then, for
# extensions we request, parse them and store parameters. They will be
# used later by each extension.
deflate_frame_accepted = False
mux_accepted = False
for extension in accepted_extensions:
if extension.name() == _DEFLATE_FRAME_EXTENSION:
if self._options.use_deflate_frame:
deflate_frame_accepted = True
continue
if extension.name() == _MUX_EXTENSION:
if self._options.use_mux:
mux_accepted = True
continue
if extension.name() == _PERMESSAGE_DEFLATE_EXTENSION:
checker = self._options.check_permessage_deflate
if checker:
checker(extension)
continue
raise Exception(
'Received unrecognized extension: %s' % extension.name())
# Let all extensions check the response for extension request.
if (self._options.use_deflate_frame and
not deflate_frame_accepted):
raise Exception('%s extension not accepted' %
_DEFLATE_FRAME_EXTENSION)
if self._options.use_mux and not mux_accepted:
raise Exception('%s extension not accepted' % _MUX_EXTENSION)
class WebSocketHybi00Handshake(object):
"""Opening handshake processor for the WebSocket protocol version HyBi 00.
"""
def __init__(self, options, draft_field):
self._logger = util.get_class_logger(self)
self._options = options
self._draft_field = draft_field
def handshake(self, socket):
"""Handshake WebSocket.
Raises:
Exception: handshake failed.
"""
self._socket = socket
# 4.1 5. send request line.
request_line = _method_line(self._options.resource)
self._logger.debug('Opening handshake Request-Line: %r', request_line)
self._socket.sendall(request_line)
# 4.1 6. Let /fields/ be an empty list of strings.
fields = []
# 4.1 7. Add the string "Upgrade: WebSocket" to /fields/.
fields.append(_UPGRADE_HEADER_HIXIE75)
# 4.1 8. Add the string "Connection: Upgrade" to /fields/.
fields.append(_CONNECTION_HEADER)
# 4.1 9-12. Add Host: field to /fields/.
fields.append(_format_host_header(
self._options.server_host,
self._options.server_port,
self._options.use_tls))
# 4.1 13. Add Origin: field to /fields/.
fields.append(_origin_header(self._options.origin))
# TODO: 4.1 14 Add Sec-WebSocket-Protocol: field to /fields/.
# TODO: 4.1 15 Add cookie headers to /fields/.
# 4.1 16-23. Add Sec-WebSocket-Key<n> to /fields/.
self._number1, key1 = self._generate_sec_websocket_key()
self._logger.debug('Number1: %d', self._number1)
fields.append('Sec-WebSocket-Key1: %s\r\n' % key1)
self._number2, key2 = self._generate_sec_websocket_key()
self._logger.debug('Number2: %d', self._number1)
fields.append('Sec-WebSocket-Key2: %s\r\n' % key2)
fields.append('Sec-WebSocket-Draft: %s\r\n' % self._draft_field)
# 4.1 24. For each string in /fields/, in a random order: send the
# string, encoded as UTF-8, followed by a UTF-8 encoded U+000D CARRIAGE
# RETURN U+000A LINE FEED character pair (CRLF).
random.shuffle(fields)
self._logger.debug('Opening handshake request headers: %r', fields)
for field in fields:
self._socket.sendall(field)
# 4.1 25. send a UTF-8-encoded U+000D CARRIAGE RETURN U+000A LINE FEED
# character pair (CRLF).
self._socket.sendall('\r\n')
# 4.1 26. let /key3/ be a string consisting of eight random bytes (or
# equivalently, a random 64 bit integer encoded in a big-endian order).
self._key3 = self._generate_key3()
# 4.1 27. send /key3/ to the server.
self._socket.sendall(self._key3)
self._logger.debug(
'Key3: %r (%s)', self._key3, util.hexify(self._key3))
self._logger.info('Sent opening handshake request')
# 4.1 28. Read bytes from the server until either the connection
# closes, or a 0x0A byte is read. let /field/ be these bytes, including
# the 0x0A bytes.
field = ''
while True:
ch = receive_bytes(self._socket, 1)
field += ch
if ch == '\n':
break
self._logger.debug('Opening handshake Response-Line: %r', field)
# if /field/ is not at least seven bytes long, or if the last
# two bytes aren't 0x0D and 0x0A respectively, or if it does not
# contain at least two 0x20 bytes, then fail the WebSocket connection
# and abort these steps.
if len(field) < 7 or not field.endswith('\r\n'):
raise Exception('Wrong status line: %r' % field)
m = re.match('[^ ]* ([^ ]*) .*', field)
if m is None:
raise Exception('No code found in status line: %r' % field)
# 4.1 29. let /code/ be the substring of /field/ that starts from the
# byte after the first 0x20 byte, and ends with the byte before the
# second 0x20 byte.
code = m.group(1)
# 4.1 30. if /code/ is not three bytes long, or if any of the bytes in
# /code/ are not in the range 0x30 to 0x90, then fail the WebSocket
# connection and abort these steps.
if not re.match('[0-9][0-9][0-9]', code):
raise Exception(
'HTTP status code %r is not three digit in status line: %r' %
(code, field))
# 4.1 31. if /code/, interpreted as UTF-8, is "101", then move to the
# next step.
if code != '101':
raise HttpStatusException(
'Expected HTTP status code 101 but found %r in status line: '
'%r' % (code, field), int(code))
# 4.1 32-39. read fields into /fields/
fields = _read_fields(self._socket)
self._logger.debug('Opening handshake response headers: %r', fields)
# 4.1 40. _Fields processing_
# read a byte from server
ch = receive_bytes(self._socket, 1)
if ch != '\n': # 0x0A
raise Exception('Expected LF but found %r' % ch)
# 4.1 41. check /fields/
if len(fields['upgrade']) != 1:
raise Exception(
'Multiple Upgrade headers found: %s' % fields['upgrade'])
if len(fields['connection']) != 1:
raise Exception(
'Multiple Connection headers found: %s' % fields['connection'])
if len(fields['sec-websocket-origin']) != 1:
raise Exception(
'Multiple Sec-WebSocket-Origin headers found: %s' %
fields['sec-sebsocket-origin'])
if len(fields['sec-websocket-location']) != 1:
raise Exception(
'Multiple Sec-WebSocket-Location headers found: %s' %
fields['sec-sebsocket-location'])
# TODO(ukai): protocol
# if the entry's name is "upgrade"
# if the value is not exactly equal to the string "WebSocket",
# then fail the WebSocket connection and abort these steps.
if fields['upgrade'][0] != 'WebSocket':
raise Exception(
'Unexpected Upgrade header value: %s' % fields['upgrade'][0])
# if the entry's name is "connection"
# if the value, converted to ASCII lowercase, is not exactly equal
# to the string "upgrade", then fail the WebSocket connection and
# abort these steps.
if fields['connection'][0].lower() != 'upgrade':
raise Exception(
'Unexpected Connection header value: %s' %
fields['connection'][0])
# TODO(ukai): check origin, location, cookie, ..
# 4.1 42. let /challenge/ be the concatenation of /number_1/,
# expressed as a big endian 32 bit integer, /number_2/, expressed
# as big endian 32 bit integer, and the eight bytes of /key_3/ in the
# order they were sent on the wire.
challenge = struct.pack('!I', self._number1)
challenge += struct.pack('!I', self._number2)
challenge += self._key3
self._logger.debug(
'Challenge: %r (%s)', challenge, util.hexify(challenge))
# 4.1 43. let /expected/ be the MD5 fingerprint of /challenge/ as a
# big-endian 128 bit string.
expected = util.md5_hash(challenge).digest()
self._logger.debug(
'Expected challenge response: %r (%s)',
expected, util.hexify(expected))
# 4.1 44. read sixteen bytes from the server.
# let /reply/ be those bytes.
reply = receive_bytes(self._socket, 16)
self._logger.debug(
'Actual challenge response: %r (%s)', reply, util.hexify(reply))
# 4.1 45. if /reply/ does not exactly equal /expected/, then fail
# the WebSocket connection and abort these steps.
if expected != reply:
raise Exception(
'Bad challenge response: %r (expected) != %r (actual)' %
(expected, reply))
# 4.1 46. The *WebSocket connection is established*.
def _generate_sec_websocket_key(self):
# 4.1 16. let /spaces_n/ be a random integer from 1 to 12 inclusive.
spaces = random.randint(1, 12)
# 4.1 17. let /max_n/ be the largest integer not greater than
# 4,294,967,295 divided by /spaces_n/.
maxnum = 4294967295 / spaces
# 4.1 18. let /number_n/ be a random integer from 0 to /max_n/
# inclusive.
number = random.randint(0, maxnum)
# 4.1 19. let /product_n/ be the result of multiplying /number_n/ and
# /spaces_n/ together.
product = number * spaces
# 4.1 20. let /key_n/ be a string consisting of /product_n/, expressed
# in base ten using the numerals in the range U+0030 DIGIT ZERO (0) to
# U+0039 DIGIT NINE (9).
key = str(product)
# 4.1 21. insert between one and twelve random characters from the
# range U+0021 to U+002F and U+003A to U+007E into /key_n/ at random
# positions.
available_chars = range(0x21, 0x2f + 1) + range(0x3a, 0x7e + 1)
n = random.randint(1, 12)
for _ in xrange(n):
ch = random.choice(available_chars)
pos = random.randint(0, len(key))
key = key[0:pos] + chr(ch) + key[pos:]
# 4.1 22. insert /spaces_n/ U+0020 SPACE characters into /key_n/ at
# random positions other than start or end of the string.
for _ in xrange(spaces):
pos = random.randint(1, len(key) - 1)
key = key[0:pos] + ' ' + key[pos:]
return number, key
def _generate_key3(self):
# 4.1 26. let /key3/ be a string consisting of eight random bytes (or
# equivalently, a random 64 bit integer encoded in a big-endian order).
return ''.join([chr(random.randint(0, 255)) for _ in xrange(8)])
class WebSocketHixie75Handshake(object):
"""WebSocket handshake processor for IETF Hixie 75."""
_EXPECTED_RESPONSE = (
'HTTP/1.1 101 Web Socket Protocol Handshake\r\n' +
_UPGRADE_HEADER_HIXIE75 +
_CONNECTION_HEADER)
def __init__(self, options):
self._logger = util.get_class_logger(self)
self._options = options
def _skip_headers(self):
terminator = '\r\n\r\n'
pos = 0
while pos < len(terminator):
received = receive_bytes(self._socket, 1)
if received == terminator[pos]:
pos += 1
elif received == terminator[0]:
pos = 1
else:
pos = 0
def handshake(self, socket):
self._socket = socket
request_line = _method_line(self._options.resource)
self._logger.debug('Opening handshake Request-Line: %r', request_line)
self._socket.sendall(request_line)
headers = _UPGRADE_HEADER_HIXIE75 + _CONNECTION_HEADER
headers += _format_host_header(
self._options.server_host,
self._options.server_port,
self._options.use_tls)
headers += _origin_header(self._options.origin)
self._logger.debug('Opening handshake request headers: %r', headers)
self._socket.sendall(headers)
self._socket.sendall('\r\n')
self._logger.info('Sent opening handshake request')
for expected_char in WebSocketHixie75Handshake._EXPECTED_RESPONSE:
received = receive_bytes(self._socket, 1)
if expected_char != received:
raise Exception('Handshake failure')
# We cut corners and skip other headers.
self._skip_headers()
class WebSocketStream(object):
"""Frame processor for the WebSocket protocol (RFC 6455)."""
def __init__(self, socket, handshake):
self._handshake = handshake
self._socket = socket
# Filters applied to application data part of data frames.
self._outgoing_frame_filter = None
self._incoming_frame_filter = None
if self._handshake._options.use_deflate_frame:
self._outgoing_frame_filter = (
util._RFC1979Deflater(None, False))
self._incoming_frame_filter = util._RFC1979Inflater()
self._fragmented = False
def _mask_hybi(self, s):
# TODO(tyoshino): os.urandom does open/read/close for every call. If
# performance matters, change this to some library call that generates
# cryptographically secure pseudo random number sequence.
masking_nonce = os.urandom(4)
result = [masking_nonce]
count = 0
for c in s:
result.append(chr(ord(c) ^ ord(masking_nonce[count])))
count = (count + 1) % len(masking_nonce)
return ''.join(result)
def send_frame_of_arbitrary_bytes(self, header, body):
self._socket.sendall(header + self._mask_hybi(body))
def send_data(self, payload, frame_type, end=True, mask=True,
rsv1=0, rsv2=0, rsv3=0):
if self._outgoing_frame_filter is not None:
payload = self._outgoing_frame_filter.filter(payload)
if self._fragmented:
opcode = OPCODE_CONTINUATION
else:
opcode = frame_type
if end:
self._fragmented = False
fin = 1
else:
self._fragmented = True
fin = 0
if self._handshake._options.use_deflate_frame:
rsv1 = 1
if mask:
mask_bit = 1 << 7
else:
mask_bit = 0
header = chr(fin << 7 | rsv1 << 6 | rsv2 << 5 | rsv3 << 4 | opcode)
payload_length = len(payload)
if payload_length <= 125:
header += chr(mask_bit | payload_length)
elif payload_length < 1 << 16:
header += chr(mask_bit | 126) + struct.pack('!H', payload_length)
elif payload_length < 1 << 63:
header += chr(mask_bit | 127) + struct.pack('!Q', payload_length)
else:
raise Exception('Too long payload (%d byte)' % payload_length)
if mask:
payload = self._mask_hybi(payload)
self._socket.sendall(header + payload)
def send_binary(self, payload, end=True, mask=True):
self.send_data(payload, OPCODE_BINARY, end, mask)
def send_text(self, payload, end=True, mask=True):
self.send_data(payload.encode('utf-8'), OPCODE_TEXT, end, mask)
def _assert_receive_data(self, payload, opcode, fin, rsv1, rsv2, rsv3):
(actual_fin, actual_rsv1, actual_rsv2, actual_rsv3, actual_opcode,
payload_length) = read_frame_header(self._socket)
if actual_opcode != opcode:
raise Exception(
'Unexpected opcode: %d (expected) vs %d (actual)' %
(opcode, actual_opcode))
if actual_fin != fin:
raise Exception(
'Unexpected fin: %d (expected) vs %d (actual)' %
(fin, actual_fin))
if rsv1 is None:
rsv1 = 0
if self._handshake._options.use_deflate_frame:
rsv1 = 1
if rsv2 is None:
rsv2 = 0
if rsv3 is None:
rsv3 = 0
if actual_rsv1 != rsv1:
raise Exception(
'Unexpected rsv1: %r (expected) vs %r (actual)' %
(rsv1, actual_rsv1))
if actual_rsv2 != rsv2:
raise Exception(
'Unexpected rsv2: %r (expected) vs %r (actual)' %
(rsv2, actual_rsv2))
if actual_rsv3 != rsv3:
raise Exception(
'Unexpected rsv3: %r (expected) vs %r (actual)' %
(rsv3, actual_rsv3))
received = receive_bytes(self._socket, payload_length)
if self._incoming_frame_filter is not None:
received = self._incoming_frame_filter.filter(received)
if len(received) != len(payload):
raise Exception(
'Unexpected payload length: %d (expected) vs %d (actual)' %
(len(payload), len(received)))
if payload != received:
raise Exception(
'Unexpected payload: %r (expected) vs %r (actual)' %
(payload, received))
def assert_receive_binary(self, payload, opcode=OPCODE_BINARY, fin=1,
rsv1=None, rsv2=None, rsv3=None):
self._assert_receive_data(payload, opcode, fin, rsv1, rsv2, rsv3)
def assert_receive_text(self, payload, opcode=OPCODE_TEXT, fin=1,
rsv1=None, rsv2=None, rsv3=None):
self._assert_receive_data(payload.encode('utf-8'), opcode, fin, rsv1,
rsv2, rsv3)
def _build_close_frame(self, code, reason, mask):
frame = chr(1 << 7 | OPCODE_CLOSE)
if code is not None:
body = struct.pack('!H', code) + reason.encode('utf-8')
else:
body = ''
if mask:
frame += chr(1 << 7 | len(body)) + self._mask_hybi(body)
else:
frame += chr(len(body)) + body
return frame
def send_close(self, code, reason):
self._socket.sendall(
self._build_close_frame(code, reason, True))
def assert_receive_close(self, code, reason):
expected_frame = self._build_close_frame(code, reason, False)
actual_frame = receive_bytes(self._socket, len(expected_frame))
if actual_frame != expected_frame:
raise Exception(
'Unexpected close frame: %r (expected) vs %r (actual)' %
(expected_frame, actual_frame))
class WebSocketStreamHixie75(object):
"""Frame processor for the WebSocket protocol version Hixie 75 and HyBi 00.
"""
_CLOSE_FRAME = '\xff\x00'
def __init__(self, socket, unused_handshake):
self._socket = socket
def send_frame_of_arbitrary_bytes(self, header, body):
self._socket.sendall(header + body)
def send_data(self, payload, unused_frame_typem, unused_end, unused_mask):
frame = ''.join(['\x00', payload, '\xff'])
self._socket.sendall(frame)
def send_binary(self, unused_payload, unused_end, unused_mask):
pass
def send_text(self, payload, unused_end, unused_mask):
encoded_payload = payload.encode('utf-8')
frame = ''.join(['\x00', encoded_payload, '\xff'])
self._socket.sendall(frame)
def assert_receive_binary(self, payload, opcode=OPCODE_BINARY, fin=1,
rsv1=0, rsv2=0, rsv3=0):
raise Exception('Binary frame is not supported in hixie75')
def assert_receive_text(self, payload):
received = receive_bytes(self._socket, 1)
if received != '\x00':
raise Exception(
'Unexpected frame type: %d (expected) vs %d (actual)' %
(0, ord(received)))
received = receive_bytes(self._socket, len(payload) + 1)
if received[-1] != '\xff':
raise Exception(
'Termination expected: 0xff (expected) vs %r (actual)' %
received)
if received[0:-1] != payload:
raise Exception(
'Unexpected payload: %r (expected) vs %r (actual)' %
(payload, received[0:-1]))
def send_close(self, code, reason):
self._socket.sendall(self._CLOSE_FRAME)
def assert_receive_close(self, unused_code, unused_reason):
closing = receive_bytes(self._socket, len(self._CLOSE_FRAME))
if closing != self._CLOSE_FRAME:
raise Exception('Didn\'t receive closing handshake')
class ClientOptions(object):
"""Holds option values to configure the Client object."""
def __init__(self):
self.version = 13
self.server_host = ''
self.origin = ''
self.resource = ''
self.server_port = -1
self.socket_timeout = 1000
self.use_tls = False
self.extensions = []
# Enable deflate-application-data.
self.use_deflate_frame = False
# Enable mux
self.use_mux = False
def enable_deflate_frame(self):
self.use_deflate_frame = True
self.extensions.append(_DEFLATE_FRAME_EXTENSION)
def enable_mux(self):
self.use_mux = True
self.extensions.append(_MUX_EXTENSION)
def connect_socket_with_retry(host, port, timeout, use_tls,
retry=10, sleep_sec=0.1):
retry_count = 0
while retry_count < retry:
try:
s = socket.socket()
s.settimeout(timeout)
s.connect((host, port))
if use_tls:
return _TLSSocket(s)
return s
except socket.error, e:
if e.errno != errno.ECONNREFUSED:
raise
else:
retry_count = retry_count + 1
time.sleep(sleep_sec)
return None
class Client(object):
"""WebSocket client."""
def __init__(self, options, handshake, stream_class):
self._logger = util.get_class_logger(self)
self._options = options
self._socket = None
self._handshake = handshake
self._stream_class = stream_class
def connect(self):
self._socket = connect_socket_with_retry(
self._options.server_host,
self._options.server_port,
self._options.socket_timeout,
self._options.use_tls)
self._handshake.handshake(self._socket)
self._stream = self._stream_class(self._socket, self._handshake)
self._logger.info('Connection established')
def send_frame_of_arbitrary_bytes(self, header, body):
self._stream.send_frame_of_arbitrary_bytes(header, body)
def send_message(self, message, end=True, binary=False, raw=False,
mask=True):
if binary:
self._stream.send_binary(message, end, mask)
elif raw:
self._stream.send_data(message, OPCODE_TEXT, end, mask)
else:
self._stream.send_text(message, end, mask)
def assert_receive(self, payload, binary=False):
if binary:
self._stream.assert_receive_binary(payload)
else:
self._stream.assert_receive_text(payload)
def send_close(self, code=STATUS_NORMAL_CLOSURE, reason=''):
self._stream.send_close(code, reason)
def assert_receive_close(self, code=STATUS_NORMAL_CLOSURE, reason=''):
self._stream.assert_receive_close(code, reason)
def close_socket(self):
self._socket.close()
def assert_connection_closed(self):
try:
read_data = receive_bytes(self._socket, 1)
except Exception, e:
if str(e).find(
'Connection closed before receiving requested length ') == 0:
return
try:
error_number, message = e
for error_name in ['ECONNRESET', 'WSAECONNRESET']:
if (error_name in dir(errno) and
error_number == getattr(errno, error_name)):
return
except:
raise e
raise e
raise Exception('Connection is not closed (Read: %r)' % read_data)
def create_client(options):
return Client(
options, WebSocketHandshake(options), WebSocketStream)
def create_client_hybi00(options):
return Client(
options,
WebSocketHybi00Handshake(options, '0'),
WebSocketStreamHixie75)
def create_client_hixie75(options):
return Client(
options, WebSocketHixie75Handshake(options), WebSocketStreamHixie75)
# vi:sts=4 sw=4 et
| mpl-2.0 |
lunafeng/django | django/conf/locale/fi/formats.py | 504 | 1390 | # -*- encoding: utf-8 -*-
# This file is distributed under the same license as the Django package.
#
from __future__ import unicode_literals
# The *_FORMAT strings use the Django date format syntax,
# see http://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
DATE_FORMAT = 'j. E Y'
TIME_FORMAT = 'G.i'
DATETIME_FORMAT = r'j. E Y \k\e\l\l\o G.i'
YEAR_MONTH_FORMAT = 'F Y'
MONTH_DAY_FORMAT = 'j. F'
SHORT_DATE_FORMAT = 'j.n.Y'
SHORT_DATETIME_FORMAT = 'j.n.Y G.i'
FIRST_DAY_OF_WEEK = 1 # Monday
# The *_INPUT_FORMATS strings use the Python strftime format syntax,
# see http://docs.python.org/library/datetime.html#strftime-strptime-behavior
DATE_INPUT_FORMATS = [
'%d.%m.%Y', # '20.3.2014'
'%d.%m.%y', # '20.3.14'
]
DATETIME_INPUT_FORMATS = [
'%d.%m.%Y %H.%M.%S', # '20.3.2014 14.30.59'
'%d.%m.%Y %H.%M.%S.%f', # '20.3.2014 14.30.59.000200'
'%d.%m.%Y %H.%M', # '20.3.2014 14.30'
'%d.%m.%Y', # '20.3.2014'
'%d.%m.%y %H.%M.%S', # '20.3.14 14.30.59'
'%d.%m.%y %H.%M.%S.%f', # '20.3.14 14.30.59.000200'
'%d.%m.%y %H.%M', # '20.3.14 14.30'
'%d.%m.%y', # '20.3.14'
]
TIME_INPUT_FORMATS = [
'%H.%M.%S', # '14.30.59'
'%H.%M.%S.%f', # '14.30.59.000200'
'%H.%M', # '14.30'
]
DECIMAL_SEPARATOR = ','
THOUSAND_SEPARATOR = '\xa0' # Non-breaking space
NUMBER_GROUPING = 3
| bsd-3-clause |
huggingface/transformers | tests/test_tokenization_bert_generation.py | 1 | 9497 | # coding=utf-8
# Copyright 2020 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import unittest
from transformers import BertGenerationTokenizer
from transformers.file_utils import cached_property
from transformers.testing_utils import require_sentencepiece, require_torch, slow
from .test_tokenization_common import TokenizerTesterMixin
SPIECE_UNDERLINE = "▁"
SAMPLE_VOCAB = os.path.join(os.path.dirname(os.path.abspath(__file__)), "fixtures/test_sentencepiece.model")
@require_sentencepiece
class BertGenerationTokenizationTest(TokenizerTesterMixin, unittest.TestCase):
tokenizer_class = BertGenerationTokenizer
test_rust_tokenizer = False
test_sentencepiece = True
def setUp(self):
super().setUp()
tokenizer = BertGenerationTokenizer(SAMPLE_VOCAB, keep_accents=True)
tokenizer.save_pretrained(self.tmpdirname)
def test_convert_token_and_id(self):
"""Test ``_convert_token_to_id`` and ``_convert_id_to_token``."""
token = "<s>"
token_id = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(token), token_id)
self.assertEqual(self.get_tokenizer()._convert_id_to_token(token_id), token)
def test_get_vocab(self):
vocab_keys = list(self.get_tokenizer().get_vocab().keys())
self.assertEqual(vocab_keys[0], "<unk>")
self.assertEqual(vocab_keys[1], "<s>")
self.assertEqual(vocab_keys[-1], "<pad>")
self.assertEqual(len(vocab_keys), 1_002)
def test_vocab_size(self):
self.assertEqual(self.get_tokenizer().vocab_size, 1_000)
def test_full_tokenizer(self):
tokenizer = BertGenerationTokenizer(SAMPLE_VOCAB, keep_accents=True)
tokens = tokenizer.tokenize("This is a test")
self.assertListEqual(tokens, ["▁This", "▁is", "▁a", "▁t", "est"])
self.assertListEqual(
tokenizer.convert_tokens_to_ids(tokens),
[285, 46, 10, 170, 382],
)
tokens = tokenizer.tokenize("I was born in 92000, and this is falsé.")
self.assertListEqual(
tokens,
[
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"9",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"é",
".",
],
)
ids = tokenizer.convert_tokens_to_ids(tokens)
self.assertListEqual(
ids,
[8, 21, 84, 55, 24, 19, 7, 0, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 0, 4],
)
back_tokens = tokenizer.convert_ids_to_tokens(ids)
self.assertListEqual(
back_tokens,
[
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"<unk>",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"<unk>",
".",
],
)
@cached_property
def big_tokenizer(self):
return BertGenerationTokenizer.from_pretrained("google/bert_for_seq_generation_L-24_bbc_encoder")
@slow
def test_tokenization_base_easy_symbols(self):
symbols = "Hello World!"
original_tokenizer_encodings = [18536, 2260, 101]
self.assertListEqual(original_tokenizer_encodings, self.big_tokenizer.encode(symbols))
@slow
def test_tokenization_base_hard_symbols(self):
symbols = 'This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) " [ ] ! : - . Also we will add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth'
original_tokenizer_encodings = [
871,
419,
358,
946,
991,
2521,
452,
358,
1357,
387,
7751,
3536,
112,
985,
456,
126,
865,
938,
5400,
5734,
458,
1368,
467,
786,
2462,
5246,
1159,
633,
865,
4519,
457,
582,
852,
2557,
427,
916,
508,
405,
34324,
497,
391,
408,
11342,
1244,
385,
100,
938,
985,
456,
574,
362,
12597,
3200,
3129,
1172,
]
self.assertListEqual(original_tokenizer_encodings, self.big_tokenizer.encode(symbols))
@require_torch
@slow
def test_torch_encode_plus_sent_to_model(self):
import torch
from transformers import BertGenerationConfig, BertGenerationEncoder
# Build sequence
first_ten_tokens = list(self.big_tokenizer.get_vocab().keys())[:10]
sequence = " ".join(first_ten_tokens)
encoded_sequence = self.big_tokenizer.encode_plus(sequence, return_tensors="pt", return_token_type_ids=False)
batch_encoded_sequence = self.big_tokenizer.batch_encode_plus(
[sequence + " " + sequence], return_tensors="pt", return_token_type_ids=False
)
config = BertGenerationConfig()
model = BertGenerationEncoder(config)
assert model.get_input_embeddings().weight.shape[0] >= self.big_tokenizer.vocab_size
with torch.no_grad():
model(**encoded_sequence)
model(**batch_encoded_sequence)
@slow
def test_tokenizer_integration(self):
# fmt: off
expected_encoding = {'input_ids': [[39286, 458, 36335, 2001, 456, 13073, 13266, 455, 113, 7746, 1741, 11157, 391, 13073, 13266, 455, 113, 3967, 35412, 113, 4936, 109, 3870, 2377, 113, 30084, 45720, 458, 134, 17496, 112, 503, 11672, 113, 118, 112, 5665, 13347, 38687, 112, 1496, 31389, 112, 3268, 47264, 134, 962, 112, 16377, 8035, 23130, 430, 12169, 15518, 28592, 458, 146, 41697, 109, 391, 12169, 15518, 16689, 458, 146, 41358, 109, 452, 726, 4034, 111, 763, 35412, 5082, 388, 1903, 111, 9051, 391, 2870, 48918, 1900, 1123, 550, 998, 112, 9586, 15985, 455, 391, 410, 22955, 37636, 114], [448, 17496, 419, 3663, 385, 763, 113, 27533, 2870, 3283, 13043, 1639, 24713, 523, 656, 24013, 18550, 2521, 517, 27014, 21244, 420, 1212, 1465, 391, 927, 4833, 388, 578, 11786, 114, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [484, 2169, 7687, 21932, 18146, 726, 363, 17032, 3391, 114, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=expected_encoding,
model_name="google/bert_for_seq_generation_L-24_bbc_encoder",
revision="c817d1fd1be2ffa69431227a1fe320544943d4db",
)
| apache-2.0 |
release-engineering/releng-sop | tests/test_release_data.py | 1 | 3981 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
Tests for configuration data.
"""
from __future__ import unicode_literals
import unittest
import os
import sys
DIR = os.path.dirname(__file__)
sys.path.insert(0, os.path.join(DIR, ".."))
from releng_sop.common import Environment, Release, ConfigError # noqa: E402
RELEASES_DIR = os.path.join(DIR, "releases")
ENVIRONMENTS_DIR = os.path.join(DIR, "environments")
class TestReleaseData(unittest.TestCase):
"""Test release configuration data found in RELEASES_DIR."""
longMessage = True
def _get_releases(self):
result = []
for fn in os.listdir(RELEASES_DIR):
if not fn.endswith(".json"):
continue
result.append(fn[:-5])
return result
def test_releases(self):
"""Read all json files from RELEASES_DIR, and check data structure."""
releases = self._get_releases()
for release_id in releases:
release = Release(release_id, config_dirs=[RELEASES_DIR])
# check top-level sections
expected = [
"distgit",
"koji",
"scls",
"signing",
]
actual = sorted(release)
if "scls" not in actual:
# optional, may not be available
expected.remove("scls")
self.assertEqual(actual, expected, "\n\nrelease_id: %s" % release_id)
# check distgit data
expected = [
"branch",
]
self.assertEqual(sorted(release["distgit"]), expected, "\n\nrelease_id: %s" % release_id)
# check koji data
expected = [
"tag_bootstrap",
"tag_build",
"tag_buildrequires",
"tag_candidate",
"tag_compose",
"tag_override",
"tag_pending",
"tag_release",
"tag_temp_override",
"target",
]
self.assertEqual(sorted(release["koji"]), expected, "\n\nrelease_id: %s" % release_id)
# check signing data
expected = [
"sigkey_beta",
"sigkey_gold",
]
self.assertEqual(sorted(release["signing"]), expected, "\n\nrelease_id: %s" % release_id)
class TestEnvironmentData(unittest.TestCase):
"""Test environment configuration data found in ENVIRONMENTS_DIR."""
longMessage = True
def _get_environments(self):
result = []
for fn in os.listdir(ENVIRONMENTS_DIR):
if not fn.endswith(".json"):
continue
result.append(fn[:-5])
return result
def test_environments(self):
"""Read all json files from ENVIRONMENTS_DIR, and check data structure."""
environments = self._get_environments()
for env_id in environments:
env = Environment(env_id, config_dirs=[ENVIRONMENTS_DIR])
expected = [
"distgit_server",
"koji_profile",
"pdc_server",
"pulp_server",
"rpmsign_class",
]
self.assertEqual(sorted(env), expected, "\n\nenv_id: %s" % env_id)
class TestConfigDataNotFound(unittest.TestCase):
"""RuntimeError raised if config data is not found."""
def test_release_not_found(self):
"""Raise RuntimeError when release data no found."""
release_id = "no-such-release"
self.assertRaises(ConfigError,
Release, release_id, config_dirs=[RELEASES_DIR])
def test_environment_not_found(self):
"""Raise RuntimeError when environment configuration no found."""
env_id = "no-such-environment"
self.assertRaises(ConfigError,
Environment, env_id, config_dirs=[ENVIRONMENTS_DIR])
if __name__ == "__main__":
unittest.main()
| mit |
nicolargo/intellij-community | plugins/hg4idea/testData/bin/mercurial/archival.py | 94 | 10394 | # archival.py - revision archival for mercurial
#
# Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
from i18n import _
from node import hex
import match as matchmod
import cmdutil
import scmutil, util, encoding
import cStringIO, os, tarfile, time, zipfile
import zlib, gzip
import struct
import error
# from unzip source code:
_UNX_IFREG = 0x8000
_UNX_IFLNK = 0xa000
def tidyprefix(dest, kind, prefix):
'''choose prefix to use for names in archive. make sure prefix is
safe for consumers.'''
if prefix:
prefix = util.normpath(prefix)
else:
if not isinstance(dest, str):
raise ValueError('dest must be string if no prefix')
prefix = os.path.basename(dest)
lower = prefix.lower()
for sfx in exts.get(kind, []):
if lower.endswith(sfx):
prefix = prefix[:-len(sfx)]
break
lpfx = os.path.normpath(util.localpath(prefix))
prefix = util.pconvert(lpfx)
if not prefix.endswith('/'):
prefix += '/'
if prefix.startswith('../') or os.path.isabs(lpfx) or '/../' in prefix:
raise util.Abort(_('archive prefix contains illegal components'))
return prefix
exts = {
'tar': ['.tar'],
'tbz2': ['.tbz2', '.tar.bz2'],
'tgz': ['.tgz', '.tar.gz'],
'zip': ['.zip'],
}
def guesskind(dest):
for kind, extensions in exts.iteritems():
if util.any(dest.endswith(ext) for ext in extensions):
return kind
return None
class tarit(object):
'''write archive to tar file or stream. can write uncompressed,
or compress with gzip or bzip2.'''
class GzipFileWithTime(gzip.GzipFile):
def __init__(self, *args, **kw):
timestamp = None
if 'timestamp' in kw:
timestamp = kw.pop('timestamp')
if timestamp is None:
self.timestamp = time.time()
else:
self.timestamp = timestamp
gzip.GzipFile.__init__(self, *args, **kw)
def _write_gzip_header(self):
self.fileobj.write('\037\213') # magic header
self.fileobj.write('\010') # compression method
# Python 2.6 introduced self.name and deprecated self.filename
try:
fname = self.name
except AttributeError:
fname = self.filename
if fname and fname.endswith('.gz'):
fname = fname[:-3]
flags = 0
if fname:
flags = gzip.FNAME
self.fileobj.write(chr(flags))
gzip.write32u(self.fileobj, long(self.timestamp))
self.fileobj.write('\002')
self.fileobj.write('\377')
if fname:
self.fileobj.write(fname + '\000')
def __init__(self, dest, mtime, kind=''):
self.mtime = mtime
self.fileobj = None
def taropen(name, mode, fileobj=None):
if kind == 'gz':
mode = mode[0]
if not fileobj:
fileobj = open(name, mode + 'b')
gzfileobj = self.GzipFileWithTime(name, mode + 'b',
zlib.Z_BEST_COMPRESSION,
fileobj, timestamp=mtime)
self.fileobj = gzfileobj
return tarfile.TarFile.taropen(name, mode, gzfileobj)
else:
return tarfile.open(name, mode + kind, fileobj)
if isinstance(dest, str):
self.z = taropen(dest, mode='w:')
else:
# Python 2.5-2.5.1 have a regression that requires a name arg
self.z = taropen(name='', mode='w|', fileobj=dest)
def addfile(self, name, mode, islink, data):
i = tarfile.TarInfo(name)
i.mtime = self.mtime
i.size = len(data)
if islink:
i.type = tarfile.SYMTYPE
i.mode = 0777
i.linkname = data
data = None
i.size = 0
else:
i.mode = mode
data = cStringIO.StringIO(data)
self.z.addfile(i, data)
def done(self):
self.z.close()
if self.fileobj:
self.fileobj.close()
class tellable(object):
'''provide tell method for zipfile.ZipFile when writing to http
response file object.'''
def __init__(self, fp):
self.fp = fp
self.offset = 0
def __getattr__(self, key):
return getattr(self.fp, key)
def write(self, s):
self.fp.write(s)
self.offset += len(s)
def tell(self):
return self.offset
class zipit(object):
'''write archive to zip file or stream. can write uncompressed,
or compressed with deflate.'''
def __init__(self, dest, mtime, compress=True):
if not isinstance(dest, str):
try:
dest.tell()
except (AttributeError, IOError):
dest = tellable(dest)
self.z = zipfile.ZipFile(dest, 'w',
compress and zipfile.ZIP_DEFLATED or
zipfile.ZIP_STORED)
# Python's zipfile module emits deprecation warnings if we try
# to store files with a date before 1980.
epoch = 315532800 # calendar.timegm((1980, 1, 1, 0, 0, 0, 1, 1, 0))
if mtime < epoch:
mtime = epoch
self.mtime = mtime
self.date_time = time.gmtime(mtime)[:6]
def addfile(self, name, mode, islink, data):
i = zipfile.ZipInfo(name, self.date_time)
i.compress_type = self.z.compression
# unzip will not honor unix file modes unless file creator is
# set to unix (id 3).
i.create_system = 3
ftype = _UNX_IFREG
if islink:
mode = 0777
ftype = _UNX_IFLNK
i.external_attr = (mode | ftype) << 16L
# add "extended-timestamp" extra block, because zip archives
# without this will be extracted with unexpected timestamp,
# if TZ is not configured as GMT
i.extra += struct.pack('<hhBl',
0x5455, # block type: "extended-timestamp"
1 + 4, # size of this block
1, # "modification time is present"
int(self.mtime)) # last modification (UTC)
self.z.writestr(i, data)
def done(self):
self.z.close()
class fileit(object):
'''write archive as files in directory.'''
def __init__(self, name, mtime):
self.basedir = name
self.opener = scmutil.opener(self.basedir)
def addfile(self, name, mode, islink, data):
if islink:
self.opener.symlink(data, name)
return
f = self.opener(name, "w", atomictemp=True)
f.write(data)
f.close()
destfile = os.path.join(self.basedir, name)
os.chmod(destfile, mode)
def done(self):
pass
archivers = {
'files': fileit,
'tar': tarit,
'tbz2': lambda name, mtime: tarit(name, mtime, 'bz2'),
'tgz': lambda name, mtime: tarit(name, mtime, 'gz'),
'uzip': lambda name, mtime: zipit(name, mtime, False),
'zip': zipit,
}
def archive(repo, dest, node, kind, decode=True, matchfn=None,
prefix=None, mtime=None, subrepos=False):
'''create archive of repo as it was at node.
dest can be name of directory, name of archive file, or file
object to write archive to.
kind is type of archive to create.
decode tells whether to put files through decode filters from
hgrc.
matchfn is function to filter names of files to write to archive.
prefix is name of path to put before every archive member.'''
if kind == 'files':
if prefix:
raise util.Abort(_('cannot give prefix when archiving to files'))
else:
prefix = tidyprefix(dest, kind, prefix)
def write(name, mode, islink, getdata):
data = getdata()
if decode:
data = repo.wwritedata(name, data)
archiver.addfile(prefix + name, mode, islink, data)
if kind not in archivers:
raise util.Abort(_("unknown archive type '%s'") % kind)
ctx = repo[node]
archiver = archivers[kind](dest, mtime or ctx.date()[0])
if repo.ui.configbool("ui", "archivemeta", True):
def metadata():
base = 'repo: %s\nnode: %s\nbranch: %s\n' % (
repo[0].hex(), hex(node), encoding.fromlocal(ctx.branch()))
tags = ''.join('tag: %s\n' % t for t in ctx.tags()
if repo.tagtype(t) == 'global')
if not tags:
repo.ui.pushbuffer()
opts = {'template': '{latesttag}\n{latesttagdistance}',
'style': '', 'patch': None, 'git': None}
cmdutil.show_changeset(repo.ui, repo, opts).show(ctx)
ltags, dist = repo.ui.popbuffer().split('\n')
tags = ''.join('latesttag: %s\n' % t for t in ltags.split(':'))
tags += 'latesttagdistance: %s\n' % dist
return base + tags
name = '.hg_archival.txt'
if not matchfn or matchfn(name):
write(name, 0644, False, metadata)
if matchfn:
files = [f for f in ctx.manifest().keys() if matchfn(f)]
else:
files = ctx.manifest().keys()
total = len(files)
if total:
files.sort()
repo.ui.progress(_('archiving'), 0, unit=_('files'), total=total)
for i, f in enumerate(files):
ff = ctx.flags(f)
write(f, 'x' in ff and 0755 or 0644, 'l' in ff, ctx[f].data)
repo.ui.progress(_('archiving'), i + 1, item=f,
unit=_('files'), total=total)
repo.ui.progress(_('archiving'), None)
if subrepos:
for subpath in sorted(ctx.substate):
sub = ctx.sub(subpath)
submatch = matchmod.narrowmatcher(subpath, matchfn)
total += sub.archive(repo.ui, archiver, prefix, submatch)
if total == 0:
raise error.Abort(_('no files match the archive pattern'))
archiver.done()
return total
| apache-2.0 |
sharkykh/SickRage | sickrage/system/Restart.py | 8 | 1104 | # coding=utf-8
# This file is part of SickRage.
#
# URL: https://sickrage.github.io
# Git: https://github.com/SickRage/SickRage.git
#
# SickRage is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# SickRage is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with SickRage. If not, see <http://www.gnu.org/licenses/>.
from __future__ import unicode_literals
import sickbeard
from sickbeard.event_queue import Events
class Restart(object):
def __init__(self):
pass
@staticmethod
def restart(pid):
if str(pid) != str(sickbeard.PID):
return False
sickbeard.events.put(Events.SystemEvent.RESTART)
return True
| gpl-3.0 |
bigbrozer/license_manager_plugins | backend/lstc.py | 1 | 3311 | #===============================================================================
# -*- coding: UTF-8 -*-
# Module : backend.lstc
# Author : Vincent BESANCON aka 'v!nZ' <besancon.vincent@gmail.com>
# Description : Utility functions to get info from LSTC (LS-Dyna) license server.
#-------------------------------------------------------------------------------
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#===============================================================================
# TODO: Check how to group status() and expiration() as a single function.
import re
import subprocess
from nagios.errorlevels import NagiosOk
# Plugin configuration
import config
#-------------------------------------------------------------------------------
# Exceptions
#-------------------------------------------------------------------------------
class LstcStatusError(Exception):
"""Exception raised when lstc_qrun encounter an error"""
def __init__(self, error_msg, retcode, license):
self.errmsg = error_msg
self.retcode = retcode
self.license = license
#-------------------------------------------------------------------------------
# Lstc related
#-------------------------------------------------------------------------------
def status(license_port):
"""Execute a 'lstc_qrun -s' command using lstc_qrun on a remote server"""
cmdline = [config.LSTCQRUN_PATH, "-s", license_port]
cmd = subprocess.Popen(cmdline, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
cmd_output = cmd.communicate()[0]
# Warn for error message if any
error_pattern = re.compile('.*ERROR (.*)')
error_match = error_pattern.search(cmd_output)
if error_match:
error_message = error_match.group(1)
raise LstcStatusError(error_message, cmd.returncode, license_port)
# Check return code
if cmd.returncode == 0:
raise NagiosOk("There is no program running or queued.")
return cmd_output.split('\n')
def expiration(license_port):
"""Execute a 'lstc_qrun -r -s' command using lstc_qrun on a remote server"""
cmdline = [config.LSTCQRUN_PATH, "-r", "-s", license_port]
cmd = subprocess.Popen(cmdline, stdout=subprocess.PIPE)
cmd_output = cmd.communicate()[0].split('\n')
# Check return code
if cmd.returncode != 0:
# Get error message
error_pattern = re.compile('.*ERROR (.*)')
error_match = error_pattern.search(cmd_output[-1])
if error_match: error_message = error_match.group(1).title()
else: error_message = "License server not available !"
raise LstcStatusError(error_message, cmd.returncode, license_port)
return cmd_output
| gpl-3.0 |
demonchild2112/travis-test | grr/core/grr_response_core/lib/util/compat/yaml_test.py | 2 | 5906 | #!/usr/bin/env python
# -*- encoding: utf-8 -*-
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
import collections
import io
from absl.testing import absltest
from typing import Text
from grr_response_core.lib.util.compat import yaml # pylint: disable=g-import-not-at-top
# TODO: Add tests with 4-byte unicode characters once we switch to
# a proper YAML library.
class ParseTest(absltest.TestCase):
def testSimpleDict(self):
parsed = yaml.Parse("{ 'foo': 'bar', 'quux': 42 }")
expected = {"foo": "bar", "quux": 42}
self.assertEqual(parsed, expected)
def testComplexDict(self):
parsed = yaml.Parse("""
foo.bar:
quux: [4, 8, 15, 16, 23, 42]
thud:
- blargh
- norf
foo.baz:
- 3.14
- 1.62
""")
expected = {
"foo.bar": {
"quux": [4, 8, 15, 16, 23, 42],
"thud": ["blargh", "norf"],
},
"foo.baz": [3.14, 1.62],
}
self.assertEqual(parsed, expected)
def testUnicode(self):
parsed = yaml.Parse("""
gęsi:
- zbożowa
- krótkodzioba
- białoczelna
grzebiące:
- jarząbek
- głuszec
- bażant
""")
expected = {
"gęsi": ["zbożowa", "krótkodzioba", "białoczelna"],
"grzebiące": ["jarząbek", "głuszec", "bażant"],
}
self.assertEqual(parsed, expected)
def testUnicodeTags(self):
parsed = yaml.Parse("""
!!python/unicode żółć: !!python/unicode jaźń
!!python/unicode kość: !!python/unicode łoś
""")
expected = {
"żółć": "jaźń",
"kość": "łoś",
}
self.assertEqual(parsed, expected)
def testStringsAreUnicodeObjects(self):
self.assertIsInstance(yaml.Parse("\"foo\""), Text)
class ParseManyTest(absltest.TestCase):
def testMultipleDicts(self):
parsed = yaml.ParseMany("""
foo: 42
bar: 108
---
quux: norf
thud: blargh
""")
expected = [
{
"foo": 42,
"bar": 108,
},
{
"quux": "norf",
"thud": "blargh",
},
]
self.assertEqual(parsed, expected)
def testUnicode(self):
parsed = yaml.ParseMany("""
gąszcz: żuk
---
gęstwina: chrabąszcz
""")
expected = [
{
"gąszcz": "żuk"
},
{
"gęstwina": "chrabąszcz"
},
]
self.assertEqual(parsed, expected)
class ReadFromFileTest(absltest.TestCase):
def testSimple(self):
buf = io.StringIO("""
foo: bar
""")
expected = {
"foo": "bar",
}
self.assertEqual(yaml.ReadFromFile(buf), expected)
def testUnicode(self):
buf = io.StringIO("['Ł', 'Ż', 'Ź', 'Ó']")
self.assertEqual(yaml.ReadFromFile(buf), ["Ł", "Ż", "Ź", "Ó"])
class ReadManyFromFileTest(absltest.TestCase):
def testSimple(self):
buf = io.StringIO("""
foo: bar
---
quux: norf
---
thud: blargh
""")
expected = [
{
"foo": "bar",
},
{
"quux": "norf",
},
{
"thud": "blargh",
},
]
self.assertEqual(yaml.ReadManyFromFile(buf), expected)
def testUnicode(self):
buf = io.StringIO("""
- Ą
- Ę
---
- Ś
- Ć
""")
self.assertEqual(yaml.ReadManyFromFile(buf), [["Ą", "Ę"], ["Ś", "Ć"]])
class DumpTest(absltest.TestCase):
def testSimpleDict(self):
dumped = yaml.Dump({
"foo": "bar",
"quux": 42,
})
expected = """\
foo: bar
quux: 42
"""
self.assertEqual(dumped, expected)
def testComplexDict(self):
dumped = yaml.Dump({
"foo.bar": {
"quux": [4, 8, 15, 16, 23, 42],
"thud": ["blargh", "norf"],
},
"foo.baz": [3.14, 1.62],
})
expected = """\
foo.bar:
quux:
- 4
- 8
- 15
- 16
- 23
- 42
thud:
- blargh
- norf
foo.baz:
- 3.14
- 1.62
"""
self.assertEqual(dumped, expected)
def testUnicode(self):
data = collections.OrderedDict()
data["gęsi"] = ["zbożowa", "krótkodzioba", "białoczelna"]
data["grzebiące"] = ["jarząbek", "głuszec", "bażant"]
dumped = yaml.Dump(data)
expected = """\
gęsi:
- zbożowa
- krótkodzioba
- białoczelna
grzebiące:
- jarząbek
- głuszec
- bażant
"""
self.assertEqual(dumped, expected)
class DumpManyTest(absltest.TestCase):
def testMultipleDicts(self):
dumped = yaml.DumpMany([
collections.OrderedDict([("foo", 42), ("bar", 108)]),
collections.OrderedDict([("quux", "norf"), ("thud", "blargh")]),
])
expected = """\
foo: 42
bar: 108
---
quux: norf
thud: blargh
"""
self.assertEqual(dumped, expected)
def testUnicode(self):
dumped = yaml.DumpMany([
{
"gąszcz": "żuk"
},
{
"gęstwina": "chrabąszcz"
},
])
expected = """\
gąszcz: żuk
---
gęstwina: chrabąszcz
"""
self.assertEqual(dumped, expected)
class WriteToFileTest(absltest.TestCase):
def testSimple(self):
buf = io.StringIO()
yaml.WriteToFile(["foo", "bar", "baz"], buf)
expected = """\
- foo
- bar
- baz
"""
self.assertEqual(buf.getvalue(), expected)
def testUnicode(self):
buf = io.StringIO()
yaml.WriteToFile({"śpiączka": "własność"}, buf)
expected = """\
śpiączka: własność
"""
self.assertEqual(buf.getvalue(), expected)
class WriteManyToFileTest(absltest.TestCase):
def testSimple(self):
buf = io.StringIO()
yaml.WriteManyToFile([["foo", "bar"], ["quux", "norf"]], buf)
expected = """\
- foo
- bar
---
- quux
- norf
"""
self.assertEqual(buf.getvalue(), expected)
def testUnicode(self):
buf = io.StringIO()
yaml.WriteManyToFile([{"żałość": "nędza"}, {"ból": "udręka"}], buf)
expected = """\
żałość: nędza
---
ból: udręka
"""
self.assertEqual(buf.getvalue(), expected)
if __name__ == "__main__":
absltest.main()
| apache-2.0 |
vfulco/ansible | lib/ansible/cli/vault.py | 34 | 4763 | # (c) 2014, James Tanner <tanner.jc@gmail.com>
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
# ansible-vault is a script that encrypts/decrypts YAML files. See
# http://docs.ansible.com/playbooks_vault.html for more details.
import os
import sys
import traceback
from ansible import constants as C
from ansible.errors import AnsibleError, AnsibleOptionsError
from ansible.parsing.vault import VaultEditor
from ansible.cli import CLI
from ansible.utils.display import Display
class VaultCLI(CLI):
""" Vault command line class """
VALID_ACTIONS = ("create", "decrypt", "edit", "encrypt", "rekey", "view")
CIPHER = 'AES256'
def __init__(self, args, display=None):
self.vault_pass = None
super(VaultCLI, self).__init__(args, display)
def parse(self):
self.parser = CLI.base_parser(
vault_opts=True,
usage = "usage: %%prog [%s] [--help] [options] vaultfile.yml" % "|".join(self.VALID_ACTIONS),
epilog = "\nSee '%s <command> --help' for more information on a specific command.\n\n" % os.path.basename(sys.argv[0])
)
self.set_action()
# options specific to self.actions
if self.action == "create":
self.parser.set_usage("usage: %prog create [options] file_name")
elif self.action == "decrypt":
self.parser.set_usage("usage: %prog decrypt [options] file_name")
elif self.action == "edit":
self.parser.set_usage("usage: %prog edit [options] file_name")
elif self.action == "view":
self.parser.set_usage("usage: %prog view [options] file_name")
elif self.action == "encrypt":
self.parser.set_usage("usage: %prog encrypt [options] file_name")
elif self.action == "rekey":
self.parser.set_usage("usage: %prog rekey [options] file_name")
self.options, self.args = self.parser.parse_args()
self.display.verbosity = self.options.verbosity
if len(self.args) == 0 or len(self.args) > 1:
raise AnsibleOptionsError("Vault requires a single filename as a parameter")
def run(self):
super(VaultCLI, self).run()
if self.options.vault_password_file:
# read vault_pass from a file
self.vault_pass = CLI.read_vault_password_file(self.options.vault_password_file)
else:
self.vault_pass, _= self.ask_vault_passwords(ask_vault_pass=True, ask_new_vault_pass=False, confirm_new=False)
if not self.vault_pass:
raise AnsibleOptionsError("A password is required to use Ansible's Vault")
self.execute()
def execute_create(self):
cipher = getattr(self.options, 'cipher', self.CIPHER)
this_editor = VaultEditor(cipher, self.vault_pass, self.args[0])
this_editor.create_file()
def execute_decrypt(self):
cipher = getattr(self.options, 'cipher', self.CIPHER)
for f in self.args:
this_editor = VaultEditor(cipher, self.vault_pass, f)
this_editor.decrypt_file()
self.display.display("Decryption successful")
def execute_edit(self):
for f in self.args:
this_editor = VaultEditor(None, self.vault_pass, f)
this_editor.edit_file()
def execute_view(self):
for f in self.args:
this_editor = VaultEditor(None, self.vault_pass, f)
this_editor.view_file()
def execute_encrypt(self):
cipher = getattr(self.options, 'cipher', self.CIPHER)
for f in self.args:
this_editor = VaultEditor(cipher, self.vault_pass, f)
this_editor.encrypt_file()
self.display.display("Encryption successful")
def execute_rekey(self):
for f in self.args:
if not (os.path.isfile(f)):
raise AnsibleError(f + " does not exist")
__, new_password = self.ask_vault_passwords(ask_vault_pass=False, ask_new_vault_pass=True, confirm_new=True)
for f in self.args:
this_editor = VaultEditor(None, self.vault_pass, f)
this_editor.rekey_file(new_password)
self.display.display("Rekey successful")
| gpl-3.0 |
Ingenico-ePayments/connect-sdk-python3 | ingenico/connect/sdk/request_header.py | 1 | 1640 | class RequestHeader:
"""
A single request header. Immutable.
"""
def __init__(self, name, value):
if name is None or not name.strip():
raise ValueError("name is required")
self.__name = name
self.__value = value
@property
def name(self):
"""
:return: The header name.
"""
return self.__name
@property
def value(self):
"""
:return: The un-encoded value.
"""
return self.__value.decode('utf-8') if isinstance(self.__value, bytes) else self.__value
def __str__(self):
return self.__name + ":" + str(self.__value)
def get_header_value(headers, header_name):
"""
:return: The value of the header with the given name, or None if there
was no such header.
"""
if isinstance(headers, dict):
for name, value in headers.items():
if name.lower() == header_name.lower():
return value
elif headers is not None:
for header in headers:
if header.name.lower() == header_name.lower():
return header.value
return None
def get_header(headers, header_name):
"""
:return: The header with the given name, or None if there was no such
header.
"""
if isinstance(headers, dict):
for name, value in headers.items():
if name.lower() == header_name.lower():
return RequestHeader(name, value)
elif headers is not None:
for header in headers:
if header.name.lower() == header_name.lower():
return header
return None
| mit |
himmih/cluedo | venv/lib/python2.7/site-packages/setuptools/command/upload_docs.py | 390 | 6782 | # -*- coding: utf-8 -*-
"""upload_docs
Implements a Distutils 'upload_docs' subcommand (upload documentation to
PyPI's pythonhosted.org).
"""
from base64 import standard_b64encode
from distutils import log
from distutils.errors import DistutilsOptionError
from distutils.command.upload import upload
import os
import socket
import zipfile
import tempfile
import sys
import shutil
from setuptools.compat import httplib, urlparse, unicode, iteritems, PY3
from pkg_resources import iter_entry_points
errors = 'surrogateescape' if PY3 else 'strict'
# This is not just a replacement for byte literals
# but works as a general purpose encoder
def b(s, encoding='utf-8'):
if isinstance(s, unicode):
return s.encode(encoding, errors)
return s
class upload_docs(upload):
description = 'Upload documentation to PyPI'
user_options = [
('repository=', 'r',
"url of repository [default: %s]" % upload.DEFAULT_REPOSITORY),
('show-response', None,
'display full response text from server'),
('upload-dir=', None, 'directory to upload'),
]
boolean_options = upload.boolean_options
def has_sphinx(self):
if self.upload_dir is None:
for ep in iter_entry_points('distutils.commands', 'build_sphinx'):
return True
sub_commands = [('build_sphinx', has_sphinx)]
def initialize_options(self):
upload.initialize_options(self)
self.upload_dir = None
self.target_dir = None
def finalize_options(self):
upload.finalize_options(self)
if self.upload_dir is None:
if self.has_sphinx():
build_sphinx = self.get_finalized_command('build_sphinx')
self.target_dir = build_sphinx.builder_target_dir
else:
build = self.get_finalized_command('build')
self.target_dir = os.path.join(build.build_base, 'docs')
else:
self.ensure_dirname('upload_dir')
self.target_dir = self.upload_dir
self.announce('Using upload directory %s' % self.target_dir)
def create_zipfile(self, filename):
zip_file = zipfile.ZipFile(filename, "w")
try:
self.mkpath(self.target_dir) # just in case
for root, dirs, files in os.walk(self.target_dir):
if root == self.target_dir and not files:
raise DistutilsOptionError(
"no files found in upload directory '%s'"
% self.target_dir)
for name in files:
full = os.path.join(root, name)
relative = root[len(self.target_dir):].lstrip(os.path.sep)
dest = os.path.join(relative, name)
zip_file.write(full, dest)
finally:
zip_file.close()
def run(self):
# Run sub commands
for cmd_name in self.get_sub_commands():
self.run_command(cmd_name)
tmp_dir = tempfile.mkdtemp()
name = self.distribution.metadata.get_name()
zip_file = os.path.join(tmp_dir, "%s.zip" % name)
try:
self.create_zipfile(zip_file)
self.upload_file(zip_file)
finally:
shutil.rmtree(tmp_dir)
def upload_file(self, filename):
f = open(filename, 'rb')
content = f.read()
f.close()
meta = self.distribution.metadata
data = {
':action': 'doc_upload',
'name': meta.get_name(),
'content': (os.path.basename(filename), content),
}
# set up the authentication
credentials = b(self.username + ':' + self.password)
credentials = standard_b64encode(credentials)
if PY3:
credentials = credentials.decode('ascii')
auth = "Basic " + credentials
# Build up the MIME payload for the POST data
boundary = '--------------GHSKFJDLGDS7543FJKLFHRE75642756743254'
sep_boundary = b('\n--') + b(boundary)
end_boundary = sep_boundary + b('--')
body = []
for key, values in iteritems(data):
title = '\nContent-Disposition: form-data; name="%s"' % key
# handle multiple entries for the same name
if not isinstance(values, list):
values = [values]
for value in values:
if type(value) is tuple:
title += '; filename="%s"' % value[0]
value = value[1]
else:
value = b(value)
body.append(sep_boundary)
body.append(b(title))
body.append(b("\n\n"))
body.append(value)
if value and value[-1:] == b('\r'):
body.append(b('\n')) # write an extra newline (lurve Macs)
body.append(end_boundary)
body.append(b("\n"))
body = b('').join(body)
self.announce("Submitting documentation to %s" % (self.repository),
log.INFO)
# build the Request
# We can't use urllib2 since we need to send the Basic
# auth right with the first request
schema, netloc, url, params, query, fragments = \
urlparse(self.repository)
assert not params and not query and not fragments
if schema == 'http':
conn = httplib.HTTPConnection(netloc)
elif schema == 'https':
conn = httplib.HTTPSConnection(netloc)
else:
raise AssertionError("unsupported schema " + schema)
data = ''
try:
conn.connect()
conn.putrequest("POST", url)
content_type = 'multipart/form-data; boundary=%s' % boundary
conn.putheader('Content-type', content_type)
conn.putheader('Content-length', str(len(body)))
conn.putheader('Authorization', auth)
conn.endheaders()
conn.send(body)
except socket.error as e:
self.announce(str(e), log.ERROR)
return
r = conn.getresponse()
if r.status == 200:
self.announce('Server response (%s): %s' % (r.status, r.reason),
log.INFO)
elif r.status == 301:
location = r.getheader('Location')
if location is None:
location = 'https://pythonhosted.org/%s/' % meta.get_name()
self.announce('Upload successful. Visit %s' % location,
log.INFO)
else:
self.announce('Upload failed (%s): %s' % (r.status, r.reason),
log.ERROR)
if self.show_response:
print('-' * 75, r.read(), '-' * 75)
| apache-2.0 |
yawnosnorous/python-for-android | python-modules/twisted/twisted/plugins/twisted_reactors.py | 62 | 1420 | # Copyright (c) 2006 Twisted Matrix Laboratories.
# See LICENSE for details.
from twisted.application.reactors import Reactor
default = Reactor(
'default', 'twisted.internet.default',
'The best reactor for the current platform.')
select = Reactor(
'select', 'twisted.internet.selectreactor', 'select(2)-based reactor.')
wx = Reactor(
'wx', 'twisted.internet.wxreactor', 'wxPython integration reactor.')
gtk = Reactor(
'gtk', 'twisted.internet.gtkreactor', 'Gtk1 integration reactor.')
gtk2 = Reactor(
'gtk2', 'twisted.internet.gtk2reactor', 'Gtk2 integration reactor.')
glib2 = Reactor(
'glib2', 'twisted.internet.glib2reactor',
'GLib2 event-loop integration reactor.')
glade = Reactor(
'debug-gui', 'twisted.manhole.gladereactor',
'Semi-functional debugging/introspection reactor.')
win32er = Reactor(
'win32', 'twisted.internet.win32eventreactor',
'Win32 WaitForMultipleObjects-based reactor.')
poll = Reactor(
'poll', 'twisted.internet.pollreactor', 'poll(2)-based reactor.')
epoll = Reactor(
'epoll', 'twisted.internet.epollreactor', 'epoll(4)-based reactor.')
cf = Reactor(
'cf' , 'twisted.internet.cfreactor',
'CoreFoundation integration reactor.')
kqueue = Reactor(
'kqueue', 'twisted.internet.kqreactor', 'kqueue(2)-based reactor.')
iocp = Reactor(
'iocp', 'twisted.internet.iocpreactor',
'Win32 IO Completion Ports-based reactor.')
| apache-2.0 |
RJVB/audacity | lib-src/lv2/lv2/plugins/eg01-amp.lv2/waflib/Tools/d_scan.py | 292 | 3029 | #! /usr/bin/env python
# encoding: utf-8
# WARNING! Do not edit! http://waf.googlecode.com/git/docs/wafbook/single.html#_obtaining_the_waf_file
import re
from waflib import Utils,Logs
def filter_comments(filename):
txt=Utils.readf(filename)
i=0
buf=[]
max=len(txt)
begin=0
while i<max:
c=txt[i]
if c=='"'or c=="'":
buf.append(txt[begin:i])
delim=c
i+=1
while i<max:
c=txt[i]
if c==delim:break
elif c=='\\':
i+=1
i+=1
i+=1
begin=i
elif c=='/':
buf.append(txt[begin:i])
i+=1
if i==max:break
c=txt[i]
if c=='+':
i+=1
nesting=1
c=None
while i<max:
prev=c
c=txt[i]
if prev=='/'and c=='+':
nesting+=1
c=None
elif prev=='+'and c=='/':
nesting-=1
if nesting==0:break
c=None
i+=1
elif c=='*':
i+=1
c=None
while i<max:
prev=c
c=txt[i]
if prev=='*'and c=='/':break
i+=1
elif c=='/':
i+=1
while i<max and txt[i]!='\n':
i+=1
else:
begin=i-1
continue
i+=1
begin=i
buf.append(' ')
else:
i+=1
buf.append(txt[begin:])
return buf
class d_parser(object):
def __init__(self,env,incpaths):
self.allnames=[]
self.re_module=re.compile("module\s+([^;]+)")
self.re_import=re.compile("import\s+([^;]+)")
self.re_import_bindings=re.compile("([^:]+):(.*)")
self.re_import_alias=re.compile("[^=]+=(.+)")
self.env=env
self.nodes=[]
self.names=[]
self.incpaths=incpaths
def tryfind(self,filename):
found=0
for n in self.incpaths:
found=n.find_resource(filename.replace('.','/')+'.d')
if found:
self.nodes.append(found)
self.waiting.append(found)
break
if not found:
if not filename in self.names:
self.names.append(filename)
def get_strings(self,code):
self.module=''
lst=[]
mod_name=self.re_module.search(code)
if mod_name:
self.module=re.sub('\s+','',mod_name.group(1))
import_iterator=self.re_import.finditer(code)
if import_iterator:
for import_match in import_iterator:
import_match_str=re.sub('\s+','',import_match.group(1))
bindings_match=self.re_import_bindings.match(import_match_str)
if bindings_match:
import_match_str=bindings_match.group(1)
matches=import_match_str.split(',')
for match in matches:
alias_match=self.re_import_alias.match(match)
if alias_match:
match=alias_match.group(1)
lst.append(match)
return lst
def start(self,node):
self.waiting=[node]
while self.waiting:
nd=self.waiting.pop(0)
self.iter(nd)
def iter(self,node):
path=node.abspath()
code="".join(filter_comments(path))
names=self.get_strings(code)
for x in names:
if x in self.allnames:continue
self.allnames.append(x)
self.tryfind(x)
def scan(self):
env=self.env
gruik=d_parser(env,self.generator.includes_nodes)
node=self.inputs[0]
gruik.start(node)
nodes=gruik.nodes
names=gruik.names
if Logs.verbose:
Logs.debug('deps: deps for %s: %r; unresolved %r'%(str(node),nodes,names))
return(nodes,names)
| gpl-2.0 |
waheedahmed/edx-platform | common/lib/i18n_tests/test_extract_and_generate.py | 16 | 4697 | """
This test tests that i18n extraction (`paver i18n_extract -v`) works properly.
"""
from datetime import datetime, timedelta
import os
import random
import re
import sys
import string
import subprocess
from unittest import TestCase
from mock import patch
from polib import pofile
from pytz import UTC
from i18n import extract
from i18n import generate
from i18n import dummy
from i18n.config import CONFIGURATION
class TestGenerate(TestCase):
"""
Tests functionality of i18n/generate.py
"""
generated_files = ('django-partial.po', 'djangojs-partial.po', 'mako.po')
@classmethod
def setUpClass(cls):
super(TestGenerate, cls).setUpClass()
sys.stderr.write(
"\nThis test tests that i18n extraction (`paver i18n_extract`) works properly. "
"If you experience failures, please check that all instances of `gettext` and "
"`ngettext` are used correctly. You can also try running `paver i18n_extract -v` "
"locally for more detail.\n"
)
sys.stderr.write(
"\nExtracting i18n strings and generating dummy translations; "
"this may take a few minutes\n"
)
sys.stderr.flush()
extract.main(verbosity=0)
dummy.main(verbosity=0)
@classmethod
def tearDownClass(cls):
# Clear the Esperanto & RTL directories of any test artifacts
cmd = "git checkout conf/locale/eo conf/locale/rtl"
sys.stderr.write("Cleaning up dummy language directories: " + cmd)
sys.stderr.flush()
returncode = subprocess.call(cmd, shell=True)
assert returncode == 0
super(TestGenerate, cls).tearDownClass()
def setUp(self):
super(TestGenerate, self).setUp()
# Subtract 1 second to help comparisons with file-modify time succeed,
# since os.path.getmtime() is not millisecond-accurate
self.start_time = datetime.now(UTC) - timedelta(seconds=1)
def test_merge(self):
"""
Tests merge script on English source files.
"""
filename = os.path.join(CONFIGURATION.source_messages_dir, random_name())
generate.merge(CONFIGURATION.source_locale, target=filename)
self.assertTrue(os.path.exists(filename))
os.remove(filename)
# Patch dummy_locales to not have esperanto present
@patch.object(CONFIGURATION, 'dummy_locales', ['fake2'])
def test_main(self):
"""
Runs generate.main() which should merge source files,
then compile all sources in all configured languages.
Validates output by checking all .mo files in all configured languages.
.mo files should exist, and be recently created (modified
after start of test suite)
"""
generate.main(verbosity=0, strict=False)
for locale in CONFIGURATION.translated_locales:
for filename in ('django', 'djangojs'):
mofile = filename + '.mo'
path = os.path.join(CONFIGURATION.get_messages_dir(locale), mofile)
exists = os.path.exists(path)
self.assertTrue(exists, msg='Missing file in locale %s: %s' % (locale, mofile))
self.assertGreaterEqual(
datetime.fromtimestamp(os.path.getmtime(path), UTC),
self.start_time,
msg='File not recently modified: %s' % path
)
# Segmenting means that the merge headers don't work they way they
# used to, so don't make this check for now. I'm not sure if we'll
# get the merge header back eventually, or delete this code eventually.
# self.assert_merge_headers(locale)
def assert_merge_headers(self, locale):
"""
This is invoked by test_main to ensure that it runs after
calling generate.main().
There should be exactly three merge comment headers
in our merged .po file. This counts them to be sure.
A merge comment looks like this:
# #-#-#-#-# django-partial.po (0.1a) #-#-#-#-#
"""
path = os.path.join(CONFIGURATION.get_messages_dir(locale), 'django.po')
pof = pofile(path)
pattern = re.compile('^#-#-#-#-#', re.M)
match = pattern.findall(pof.header)
self.assertEqual(
len(match),
3,
msg="Found %s (should be 3) merge comments in the header for %s" % (len(match), path)
)
def random_name(size=6):
"""Returns random filename as string, like test-4BZ81W"""
chars = string.ascii_uppercase + string.digits
return 'test-' + ''.join(random.choice(chars) for x in range(size))
| agpl-3.0 |
Bachaco-ve/odoo | addons/procurement_jit_stock/__openerp__.py | 241 | 1738 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Just In Time Scheduling with Stock',
'version': '1.0',
'category': 'Base',
'description': """
If you install this module, it can make sure that not only
the ship of pick-pack-ship will be created in batch, but
the pick and the pack also. (which will dramatically improve performance)
Will be removed from Saas-6 and will be put in procurement_jit
over there, where procurement_jit will depend on stock
""",
'author': 'OpenERP SA',
'website': 'https://www.odoo.com/page/manufacturing',
'depends': ['procurement_jit', 'stock'],
'data': [],
'demo': [],
'test': [],
'installable': True,
'auto_install': True,
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
Dzess/ALFIRT | alfirt.opencv/src/classification/SURFFlannMatcher.py | 1 | 6726 | '''
Created on 30-08-2011
@author: Ankhazam
Based on find_obj.py OpenCV2 sample
'''
import numpy as np
import cv2
from common import anorm
class SURFFlannMatcher(object):
'''
Main recognition and training module.
'''
FLANN_INDEX_KDTREE = 1 # OpenCV bug: flann enums are missing
flann_params = dict(algorithm=FLANN_INDEX_KDTREE, trees=4)
def __init__(self, trainedObjects, surfThreshold=400):
'''
Constructor
@param trainedObjects: List of @see: TrainedObject used as recognition DB
@param surfThreshold: Threshold that was used to train the objects (if equal for all)
'''
self.trainedObjects = trainedObjects
self.surfThreshold = surfThreshold
self.surf = cv2.SURF(self.surfThreshold)
def addTrainedObject(self, trainedObject):
'''
Extends the loaded database with a new @see: TrainedObject
'''
self.trainedObjects.append(trainedObject)
def __matchUsingBruteforce(self, desc1, desc2, r_threshold=0.75):
res = []
for i in xrange(len(desc1)):
dist = anorm(desc2 - desc1[i])
n1, n2 = dist.argsort()[:2]
r = dist[n1] / dist[n2]
if r < r_threshold:
res.append((i, n1))
return np.array(res)
def __matchUsingFlann(self, desc1, desc2, r_threshold=0.6):
'''
Internal flann descriptors matcher in order to find the best match.
@param desc1, desc2: SURF features descriptors of currently processed object orientation and the test image.
@param r_threshold: Tunnable threshold for kNN normalized distance inside the descriptors space.
'''
flann = cv2.flann_Index(desc2, self.flann_params)
idx2, dist = flann.knnSearch(desc1, 2, params={}) # bug: need to provide empty dict
mask = dist[:, 0] / dist[:, 1] < r_threshold
idx1 = np.arange(len(desc1))
pairs = np.int32(zip(idx1, idx2[:, 0]))
return pairs[mask]
def __matchWithGivenflann(self, desc1, flannIndex, r_threshold=0.4):
'''
Internal flann descriptors matcher in order to find the best match.
@param desc1: SURF features descriptors of currently processed object orientation and the test image.
@param flannIndex: PreGenerated FlannIndex to be used for searching
@param r_threshold: Tunnable threshold for kNN normalized distance inside the descriptors space.
@return: Array of matched points.
'''
idx2, dist = flannIndex.knnSearch(desc1, 2, params={}) # bug: need to provide empty dict
mask = dist[:, 0] / dist[:, 1] < r_threshold
idx1 = np.arange(len(desc1))
pairs = np.int32(zip(idx1, idx2[:, 0]))
return pairs[mask]
def matchObject(self, image, matchMethod="flann", useRansac = 1, surfThreshold=None):
'''
Finds best match for each object in the database.
@param image: Image with object(s) to be found.
@param matchMetod: flann or brute to select the matcher
@param useRansac: 1/0 defining the optional use of RANSAC in homography matrix search.
@param surfThreshold: Threshold for Hessian detector in SURF method used for training the objects.
This method adapts however this threshold automatically basing on the read from each TrainedObject.
@return: List of tuples (TrainedObject, bestMatchOrientationIndex,
homographyStatus, homographyMatrix,
(matchedPointsInTrained, matchedPointsInTest) )
'''
# create new surf extractor only if needed
if (surfThreshold is not None) and (surfThreshold != self.surfThreshold):
self.surfThreshold = surfThreshold
self.surf = cv2.SURF(self.surfThreshold)
kp, desc = self.surf.detect(image, None, False)
print "Found SURF features of the passed image"
desc.shape = (-1, self.surf.descriptorSize())
print "Created shape from descriptor"
flannIndex = cv2.flann_Index(desc, self.flann_params)
print "Found flann index"
# list of (TrainedObject, bestMatchOrientationIndex, homographyStatus, homographyMatrix)
bestMatches = list()
# simple searching for best matched orientation
for trainedObject in self.trainedObjects:
# we need to recreate the flann index if objects are trained with different thresholds
if (trainedObject.surfThreshold != self.surfThreshold) :
self.surfThreshold = trainedObject.surfThreshold
self.surf = cv2.SURF(self.surfThreshold)
kp, desc = self.surf.detect(image, None, False)
desc.shape = (-1, self.surf.descriptorSize())
flannIndex = cv2.flann_Index(desc, self.flann_params)
# (TrainedObject, bestMatchOrientationIndex, homographyStatus, homographyMatrix)
bestMatchObject = None
ind = 0
for orientation in trainedObject.orientations:
# we are using flannMatcher, can change to bruteForce'''
if matchMethod == "flann":
matchResult = self.__matchWithGivenflann(orientation[2], flannIndex) # optimized with preGenerated FlannIndex
else:
matchResult = self.__matchUsingBruteforce(orientation[2], desc) # we can use Brute
if len(matchResult) > 10:
matched_p1 = np.array([orientation[1][i].pt for i, j in matchResult])
matched_p2 = np.array([kp[j].pt for i, j in matchResult])
#print len(matched_p1), len(matched_p2)
H, status = cv2.findHomography(matched_p1, matched_p2, (0, cv2.RANSAC)[useRansac], 5.0)
#print "Orientation name: ", orientation[0].name
#print '%d / %d inliers/matched' % (np.sum(status), len(status))
if ((bestMatchObject is None and np.sum(status) > 0)
or (np.sum(status) > np.sum(bestMatchObject[2])
or (np.sum(status) == np.sum(bestMatchObject[2]) and len(status) > len(bestMatchObject[2])))
) :
bestMatchObject = (trainedObject, ind, status, H, (matched_p1, matched_p2))
ind += 1
# appends to the results the best match for each TrainedObject
if bestMatchObject is not None:
bestMatches.append(bestMatchObject)
return bestMatches
| mit |
amontefusco/gnuradio-amontefusco | gnuradio-core/src/python/gnuradio/gr/qa_fft_filter.py | 6 | 9413 | #!/usr/bin/env python
#
# Copyright 2004,2005,2007 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
from gnuradio import gr, gr_unittest
import sys
import random
def make_random_complex_tuple(L):
result = []
for x in range(L):
result.append(complex(random.uniform(-1000,1000),
random.uniform(-1000,1000)))
return tuple(result)
def make_random_float_tuple(L):
result = []
for x in range(L):
result.append(float(int(random.uniform(-1000,1000))))
return tuple(result)
def reference_filter_ccc(dec, taps, input):
"""
compute result using conventional fir filter
"""
tb = gr.top_block()
#src = gr.vector_source_c(((0,) * (len(taps) - 1)) + input)
src = gr.vector_source_c(input)
op = gr.fir_filter_ccc(dec, taps)
dst = gr.vector_sink_c()
tb.connect(src, op, dst)
tb.run()
return dst.data()
def reference_filter_fff(dec, taps, input):
"""
compute result using conventional fir filter
"""
tb = gr.top_block()
#src = gr.vector_source_f(((0,) * (len(taps) - 1)) + input)
src = gr.vector_source_f(input)
op = gr.fir_filter_fff(dec, taps)
dst = gr.vector_sink_f()
tb.connect(src, op, dst)
tb.run()
return dst.data()
def print_complex(x):
for i in x:
i = complex(i)
sys.stdout.write("(%6.3f,%6.3fj), " % (i.real, i.imag))
sys.stdout.write('\n')
class test_fft_filter(gr_unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def assert_fft_ok2(self, expected_result, result_data):
expected_result = expected_result[:len(result_data)]
self.assertComplexTuplesAlmostEqual2 (expected_result, result_data,
abs_eps=1e-9, rel_eps=4e-4)
def assert_fft_float_ok2(self, expected_result, result_data, abs_eps=1e-9, rel_eps=4e-4):
expected_result = expected_result[:len(result_data)]
self.assertFloatTuplesAlmostEqual2 (expected_result, result_data,
abs_eps, rel_eps)
#def test_ccc_000(self):
# self.assertRaises (RuntimeError, gr.fft_filter_ccc, 2, (1,))
def test_ccc_001(self):
tb = gr.top_block()
src_data = (0,1,2,3,4,5,6,7)
taps = (1,)
expected_result = tuple([complex(x) for x in (0,1,2,3,4,5,6,7)])
src = gr.vector_source_c(src_data)
op = gr.fft_filter_ccc(1, taps)
dst = gr.vector_sink_c()
tb.connect(src, op, dst)
tb.run()
result_data = dst.data()
#print 'expected:', expected_result
#print 'results: ', result_data
self.assertComplexTuplesAlmostEqual (expected_result, result_data, 5)
def test_ccc_002(self):
tb = gr.top_block()
src_data = (0,1,2,3,4,5,6,7)
taps = (2,)
expected_result = tuple([2 * complex(x) for x in (0,1,2,3,4,5,6,7)])
src = gr.vector_source_c(src_data)
op = gr.fft_filter_ccc(1, taps)
dst = gr.vector_sink_c()
tb.connect(src, op, dst)
tb.run()
result_data = dst.data()
#print 'expected:', expected_result
#print 'results: ', result_data
self.assertComplexTuplesAlmostEqual (expected_result, result_data, 5)
def test_ccc_004(self):
random.seed(0)
for i in xrange(25):
# sys.stderr.write("\n>>> Loop = %d\n" % (i,))
src_len = 4*1024
src_data = make_random_complex_tuple(src_len)
ntaps = int(random.uniform(2, 1000))
taps = make_random_complex_tuple(ntaps)
expected_result = reference_filter_ccc(1, taps, src_data)
src = gr.vector_source_c(src_data)
op = gr.fft_filter_ccc(1, taps)
dst = gr.vector_sink_c()
tb = gr.top_block()
tb.connect(src, op, dst)
tb.run()
result_data = dst.data()
del tb
self.assert_fft_ok2(expected_result, result_data)
def test_ccc_005(self):
random.seed(0)
for i in xrange(25):
# sys.stderr.write("\n>>> Loop = %d\n" % (i,))
dec = i + 1
src_len = 4*1024
src_data = make_random_complex_tuple(src_len)
ntaps = int(random.uniform(2, 100))
taps = make_random_complex_tuple(ntaps)
expected_result = reference_filter_ccc(dec, taps, src_data)
src = gr.vector_source_c(src_data)
op = gr.fft_filter_ccc(dec, taps)
dst = gr.vector_sink_c()
tb = gr.top_block()
tb.connect(src, op, dst)
tb.run()
del tb
result_data = dst.data()
self.assert_fft_ok2(expected_result, result_data)
# ----------------------------------------------------------------
# test _fff version
# ----------------------------------------------------------------
def test_fff_001(self):
tb = gr.top_block()
src_data = (0,1,2,3,4,5,6,7)
taps = (1,)
expected_result = tuple([float(x) for x in (0,1,2,3,4,5,6,7)])
src = gr.vector_source_f(src_data)
op = gr.fft_filter_fff(1, taps)
dst = gr.vector_sink_f()
tb.connect(src, op, dst)
tb.run()
result_data = dst.data()
#print 'expected:', expected_result
#print 'results: ', result_data
self.assertFloatTuplesAlmostEqual (expected_result, result_data, 5)
def test_fff_002(self):
tb = gr.top_block()
src_data = (0,1,2,3,4,5,6,7)
taps = (2,)
expected_result = tuple([2 * float(x) for x in (0,1,2,3,4,5,6,7)])
src = gr.vector_source_f(src_data)
op = gr.fft_filter_fff(1, taps)
dst = gr.vector_sink_f()
tb.connect(src, op, dst)
tb.run()
result_data = dst.data()
#print 'expected:', expected_result
#print 'results: ', result_data
self.assertFloatTuplesAlmostEqual (expected_result, result_data, 5)
def xtest_fff_003(self):
random.seed(0)
for i in xrange(25):
sys.stderr.write("\n>>> Loop = %d\n" % (i,))
src_len = 4096
src_data = make_random_float_tuple(src_len)
ntaps = int(random.uniform(2, 1000))
taps = make_random_float_tuple(ntaps)
expected_result = reference_filter_fff(1, taps, src_data)
src = gr.vector_source_f(src_data)
op = gr.fft_filter_fff(1, taps)
dst = gr.vector_sink_f()
tb = gr.top_block()
tb.connect(src, op, dst)
tb.run()
result_data = dst.data()
#print "src_len =", src_len, " ntaps =", ntaps
try:
self.assert_fft_float_ok2(expected_result, result_data, abs_eps=1.0)
except:
expected = open('expected', 'w')
for x in expected_result:
expected.write(`x` + '\n')
actual = open('actual', 'w')
for x in result_data:
actual.write(`x` + '\n')
raise
def xtest_fff_004(self):
random.seed(0)
for i in xrange(25):
sys.stderr.write("\n>>> Loop = %d\n" % (i,))
src_len = 4*1024
src_data = make_random_float_tuple(src_len)
ntaps = int(random.uniform(2, 1000))
taps = make_random_float_tuple(ntaps)
expected_result = reference_filter_fff(1, taps, src_data)
src = gr.vector_source_f(src_data)
op = gr.fft_filter_fff(1, taps)
dst = gr.vector_sink_f()
tb = gr.top_block()
tb.connect(src, op, dst)
tb.run()
result_data = dst.data()
self.assert_fft_float_ok2(expected_result, result_data, abs_eps=2.0)
def xtest_fff_005(self):
random.seed(0)
for i in xrange(25):
sys.stderr.write("\n>>> Loop = %d\n" % (i,))
dec = i + 1
src_len = 4*1024
src_data = make_random_float_tuple(src_len)
ntaps = int(random.uniform(2, 100))
taps = make_random_float_tuple(ntaps)
expected_result = reference_filter_fff(dec, taps, src_data)
src = gr.vector_source_f(src_data)
op = gr.fft_filter_fff(dec, taps)
dst = gr.vector_sink_f()
tb = gr.top_block()
tb.connect(src, op, dst)
tb.run()
result_data = dst.data()
self.assert_fft_float_ok2(expected_result, result_data)
if __name__ == '__main__':
gr_unittest.main ()
| gpl-3.0 |
newerthcom/savagerebirth | libs/python-2.72/Lib/test/test_calendar.py | 35 | 28101 | import calendar
import unittest
from test import test_support
import locale
result_2004_text = """
2004
January February March
Mo Tu We Th Fr Sa Su Mo Tu We Th Fr Sa Su Mo Tu We Th Fr Sa Su
1 2 3 4 1 1 2 3 4 5 6 7
5 6 7 8 9 10 11 2 3 4 5 6 7 8 8 9 10 11 12 13 14
12 13 14 15 16 17 18 9 10 11 12 13 14 15 15 16 17 18 19 20 21
19 20 21 22 23 24 25 16 17 18 19 20 21 22 22 23 24 25 26 27 28
26 27 28 29 30 31 23 24 25 26 27 28 29 29 30 31
April May June
Mo Tu We Th Fr Sa Su Mo Tu We Th Fr Sa Su Mo Tu We Th Fr Sa Su
1 2 3 4 1 2 1 2 3 4 5 6
5 6 7 8 9 10 11 3 4 5 6 7 8 9 7 8 9 10 11 12 13
12 13 14 15 16 17 18 10 11 12 13 14 15 16 14 15 16 17 18 19 20
19 20 21 22 23 24 25 17 18 19 20 21 22 23 21 22 23 24 25 26 27
26 27 28 29 30 24 25 26 27 28 29 30 28 29 30
31
July August September
Mo Tu We Th Fr Sa Su Mo Tu We Th Fr Sa Su Mo Tu We Th Fr Sa Su
1 2 3 4 1 1 2 3 4 5
5 6 7 8 9 10 11 2 3 4 5 6 7 8 6 7 8 9 10 11 12
12 13 14 15 16 17 18 9 10 11 12 13 14 15 13 14 15 16 17 18 19
19 20 21 22 23 24 25 16 17 18 19 20 21 22 20 21 22 23 24 25 26
26 27 28 29 30 31 23 24 25 26 27 28 29 27 28 29 30
30 31
October November December
Mo Tu We Th Fr Sa Su Mo Tu We Th Fr Sa Su Mo Tu We Th Fr Sa Su
1 2 3 1 2 3 4 5 6 7 1 2 3 4 5
4 5 6 7 8 9 10 8 9 10 11 12 13 14 6 7 8 9 10 11 12
11 12 13 14 15 16 17 15 16 17 18 19 20 21 13 14 15 16 17 18 19
18 19 20 21 22 23 24 22 23 24 25 26 27 28 20 21 22 23 24 25 26
25 26 27 28 29 30 31 29 30 27 28 29 30 31
"""
result_2004_html = """
<?xml version="1.0" encoding="ascii"?>
<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Strict//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-strict.dtd">
<html>
<head>
<meta http-equiv="Content-Type" content="text/html; charset=ascii" />
<link rel="stylesheet" type="text/css" href="calendar.css" />
<title>Calendar for 2004</title>
</head>
<body>
<table border="0" cellpadding="0" cellspacing="0" class="year">
<tr><th colspan="3" class="year">2004</th></tr><tr><td><table border="0" cellpadding="0" cellspacing="0" class="month">
<tr><th colspan="7" class="month">January</th></tr>
<tr><th class="mon">Mon</th><th class="tue">Tue</th><th class="wed">Wed</th><th class="thu">Thu</th><th class="fri">Fri</th><th class="sat">Sat</th><th class="sun">Sun</th></tr>
<tr><td class="noday"> </td><td class="noday"> </td><td class="noday"> </td><td class="thu">1</td><td class="fri">2</td><td class="sat">3</td><td class="sun">4</td></tr>
<tr><td class="mon">5</td><td class="tue">6</td><td class="wed">7</td><td class="thu">8</td><td class="fri">9</td><td class="sat">10</td><td class="sun">11</td></tr>
<tr><td class="mon">12</td><td class="tue">13</td><td class="wed">14</td><td class="thu">15</td><td class="fri">16</td><td class="sat">17</td><td class="sun">18</td></tr>
<tr><td class="mon">19</td><td class="tue">20</td><td class="wed">21</td><td class="thu">22</td><td class="fri">23</td><td class="sat">24</td><td class="sun">25</td></tr>
<tr><td class="mon">26</td><td class="tue">27</td><td class="wed">28</td><td class="thu">29</td><td class="fri">30</td><td class="sat">31</td><td class="noday"> </td></tr>
</table>
</td><td><table border="0" cellpadding="0" cellspacing="0" class="month">
<tr><th colspan="7" class="month">February</th></tr>
<tr><th class="mon">Mon</th><th class="tue">Tue</th><th class="wed">Wed</th><th class="thu">Thu</th><th class="fri">Fri</th><th class="sat">Sat</th><th class="sun">Sun</th></tr>
<tr><td class="noday"> </td><td class="noday"> </td><td class="noday"> </td><td class="noday"> </td><td class="noday"> </td><td class="noday"> </td><td class="sun">1</td></tr>
<tr><td class="mon">2</td><td class="tue">3</td><td class="wed">4</td><td class="thu">5</td><td class="fri">6</td><td class="sat">7</td><td class="sun">8</td></tr>
<tr><td class="mon">9</td><td class="tue">10</td><td class="wed">11</td><td class="thu">12</td><td class="fri">13</td><td class="sat">14</td><td class="sun">15</td></tr>
<tr><td class="mon">16</td><td class="tue">17</td><td class="wed">18</td><td class="thu">19</td><td class="fri">20</td><td class="sat">21</td><td class="sun">22</td></tr>
<tr><td class="mon">23</td><td class="tue">24</td><td class="wed">25</td><td class="thu">26</td><td class="fri">27</td><td class="sat">28</td><td class="sun">29</td></tr>
</table>
</td><td><table border="0" cellpadding="0" cellspacing="0" class="month">
<tr><th colspan="7" class="month">March</th></tr>
<tr><th class="mon">Mon</th><th class="tue">Tue</th><th class="wed">Wed</th><th class="thu">Thu</th><th class="fri">Fri</th><th class="sat">Sat</th><th class="sun">Sun</th></tr>
<tr><td class="mon">1</td><td class="tue">2</td><td class="wed">3</td><td class="thu">4</td><td class="fri">5</td><td class="sat">6</td><td class="sun">7</td></tr>
<tr><td class="mon">8</td><td class="tue">9</td><td class="wed">10</td><td class="thu">11</td><td class="fri">12</td><td class="sat">13</td><td class="sun">14</td></tr>
<tr><td class="mon">15</td><td class="tue">16</td><td class="wed">17</td><td class="thu">18</td><td class="fri">19</td><td class="sat">20</td><td class="sun">21</td></tr>
<tr><td class="mon">22</td><td class="tue">23</td><td class="wed">24</td><td class="thu">25</td><td class="fri">26</td><td class="sat">27</td><td class="sun">28</td></tr>
<tr><td class="mon">29</td><td class="tue">30</td><td class="wed">31</td><td class="noday"> </td><td class="noday"> </td><td class="noday"> </td><td class="noday"> </td></tr>
</table>
</td></tr><tr><td><table border="0" cellpadding="0" cellspacing="0" class="month">
<tr><th colspan="7" class="month">April</th></tr>
<tr><th class="mon">Mon</th><th class="tue">Tue</th><th class="wed">Wed</th><th class="thu">Thu</th><th class="fri">Fri</th><th class="sat">Sat</th><th class="sun">Sun</th></tr>
<tr><td class="noday"> </td><td class="noday"> </td><td class="noday"> </td><td class="thu">1</td><td class="fri">2</td><td class="sat">3</td><td class="sun">4</td></tr>
<tr><td class="mon">5</td><td class="tue">6</td><td class="wed">7</td><td class="thu">8</td><td class="fri">9</td><td class="sat">10</td><td class="sun">11</td></tr>
<tr><td class="mon">12</td><td class="tue">13</td><td class="wed">14</td><td class="thu">15</td><td class="fri">16</td><td class="sat">17</td><td class="sun">18</td></tr>
<tr><td class="mon">19</td><td class="tue">20</td><td class="wed">21</td><td class="thu">22</td><td class="fri">23</td><td class="sat">24</td><td class="sun">25</td></tr>
<tr><td class="mon">26</td><td class="tue">27</td><td class="wed">28</td><td class="thu">29</td><td class="fri">30</td><td class="noday"> </td><td class="noday"> </td></tr>
</table>
</td><td><table border="0" cellpadding="0" cellspacing="0" class="month">
<tr><th colspan="7" class="month">May</th></tr>
<tr><th class="mon">Mon</th><th class="tue">Tue</th><th class="wed">Wed</th><th class="thu">Thu</th><th class="fri">Fri</th><th class="sat">Sat</th><th class="sun">Sun</th></tr>
<tr><td class="noday"> </td><td class="noday"> </td><td class="noday"> </td><td class="noday"> </td><td class="noday"> </td><td class="sat">1</td><td class="sun">2</td></tr>
<tr><td class="mon">3</td><td class="tue">4</td><td class="wed">5</td><td class="thu">6</td><td class="fri">7</td><td class="sat">8</td><td class="sun">9</td></tr>
<tr><td class="mon">10</td><td class="tue">11</td><td class="wed">12</td><td class="thu">13</td><td class="fri">14</td><td class="sat">15</td><td class="sun">16</td></tr>
<tr><td class="mon">17</td><td class="tue">18</td><td class="wed">19</td><td class="thu">20</td><td class="fri">21</td><td class="sat">22</td><td class="sun">23</td></tr>
<tr><td class="mon">24</td><td class="tue">25</td><td class="wed">26</td><td class="thu">27</td><td class="fri">28</td><td class="sat">29</td><td class="sun">30</td></tr>
<tr><td class="mon">31</td><td class="noday"> </td><td class="noday"> </td><td class="noday"> </td><td class="noday"> </td><td class="noday"> </td><td class="noday"> </td></tr>
</table>
</td><td><table border="0" cellpadding="0" cellspacing="0" class="month">
<tr><th colspan="7" class="month">June</th></tr>
<tr><th class="mon">Mon</th><th class="tue">Tue</th><th class="wed">Wed</th><th class="thu">Thu</th><th class="fri">Fri</th><th class="sat">Sat</th><th class="sun">Sun</th></tr>
<tr><td class="noday"> </td><td class="tue">1</td><td class="wed">2</td><td class="thu">3</td><td class="fri">4</td><td class="sat">5</td><td class="sun">6</td></tr>
<tr><td class="mon">7</td><td class="tue">8</td><td class="wed">9</td><td class="thu">10</td><td class="fri">11</td><td class="sat">12</td><td class="sun">13</td></tr>
<tr><td class="mon">14</td><td class="tue">15</td><td class="wed">16</td><td class="thu">17</td><td class="fri">18</td><td class="sat">19</td><td class="sun">20</td></tr>
<tr><td class="mon">21</td><td class="tue">22</td><td class="wed">23</td><td class="thu">24</td><td class="fri">25</td><td class="sat">26</td><td class="sun">27</td></tr>
<tr><td class="mon">28</td><td class="tue">29</td><td class="wed">30</td><td class="noday"> </td><td class="noday"> </td><td class="noday"> </td><td class="noday"> </td></tr>
</table>
</td></tr><tr><td><table border="0" cellpadding="0" cellspacing="0" class="month">
<tr><th colspan="7" class="month">July</th></tr>
<tr><th class="mon">Mon</th><th class="tue">Tue</th><th class="wed">Wed</th><th class="thu">Thu</th><th class="fri">Fri</th><th class="sat">Sat</th><th class="sun">Sun</th></tr>
<tr><td class="noday"> </td><td class="noday"> </td><td class="noday"> </td><td class="thu">1</td><td class="fri">2</td><td class="sat">3</td><td class="sun">4</td></tr>
<tr><td class="mon">5</td><td class="tue">6</td><td class="wed">7</td><td class="thu">8</td><td class="fri">9</td><td class="sat">10</td><td class="sun">11</td></tr>
<tr><td class="mon">12</td><td class="tue">13</td><td class="wed">14</td><td class="thu">15</td><td class="fri">16</td><td class="sat">17</td><td class="sun">18</td></tr>
<tr><td class="mon">19</td><td class="tue">20</td><td class="wed">21</td><td class="thu">22</td><td class="fri">23</td><td class="sat">24</td><td class="sun">25</td></tr>
<tr><td class="mon">26</td><td class="tue">27</td><td class="wed">28</td><td class="thu">29</td><td class="fri">30</td><td class="sat">31</td><td class="noday"> </td></tr>
</table>
</td><td><table border="0" cellpadding="0" cellspacing="0" class="month">
<tr><th colspan="7" class="month">August</th></tr>
<tr><th class="mon">Mon</th><th class="tue">Tue</th><th class="wed">Wed</th><th class="thu">Thu</th><th class="fri">Fri</th><th class="sat">Sat</th><th class="sun">Sun</th></tr>
<tr><td class="noday"> </td><td class="noday"> </td><td class="noday"> </td><td class="noday"> </td><td class="noday"> </td><td class="noday"> </td><td class="sun">1</td></tr>
<tr><td class="mon">2</td><td class="tue">3</td><td class="wed">4</td><td class="thu">5</td><td class="fri">6</td><td class="sat">7</td><td class="sun">8</td></tr>
<tr><td class="mon">9</td><td class="tue">10</td><td class="wed">11</td><td class="thu">12</td><td class="fri">13</td><td class="sat">14</td><td class="sun">15</td></tr>
<tr><td class="mon">16</td><td class="tue">17</td><td class="wed">18</td><td class="thu">19</td><td class="fri">20</td><td class="sat">21</td><td class="sun">22</td></tr>
<tr><td class="mon">23</td><td class="tue">24</td><td class="wed">25</td><td class="thu">26</td><td class="fri">27</td><td class="sat">28</td><td class="sun">29</td></tr>
<tr><td class="mon">30</td><td class="tue">31</td><td class="noday"> </td><td class="noday"> </td><td class="noday"> </td><td class="noday"> </td><td class="noday"> </td></tr>
</table>
</td><td><table border="0" cellpadding="0" cellspacing="0" class="month">
<tr><th colspan="7" class="month">September</th></tr>
<tr><th class="mon">Mon</th><th class="tue">Tue</th><th class="wed">Wed</th><th class="thu">Thu</th><th class="fri">Fri</th><th class="sat">Sat</th><th class="sun">Sun</th></tr>
<tr><td class="noday"> </td><td class="noday"> </td><td class="wed">1</td><td class="thu">2</td><td class="fri">3</td><td class="sat">4</td><td class="sun">5</td></tr>
<tr><td class="mon">6</td><td class="tue">7</td><td class="wed">8</td><td class="thu">9</td><td class="fri">10</td><td class="sat">11</td><td class="sun">12</td></tr>
<tr><td class="mon">13</td><td class="tue">14</td><td class="wed">15</td><td class="thu">16</td><td class="fri">17</td><td class="sat">18</td><td class="sun">19</td></tr>
<tr><td class="mon">20</td><td class="tue">21</td><td class="wed">22</td><td class="thu">23</td><td class="fri">24</td><td class="sat">25</td><td class="sun">26</td></tr>
<tr><td class="mon">27</td><td class="tue">28</td><td class="wed">29</td><td class="thu">30</td><td class="noday"> </td><td class="noday"> </td><td class="noday"> </td></tr>
</table>
</td></tr><tr><td><table border="0" cellpadding="0" cellspacing="0" class="month">
<tr><th colspan="7" class="month">October</th></tr>
<tr><th class="mon">Mon</th><th class="tue">Tue</th><th class="wed">Wed</th><th class="thu">Thu</th><th class="fri">Fri</th><th class="sat">Sat</th><th class="sun">Sun</th></tr>
<tr><td class="noday"> </td><td class="noday"> </td><td class="noday"> </td><td class="noday"> </td><td class="fri">1</td><td class="sat">2</td><td class="sun">3</td></tr>
<tr><td class="mon">4</td><td class="tue">5</td><td class="wed">6</td><td class="thu">7</td><td class="fri">8</td><td class="sat">9</td><td class="sun">10</td></tr>
<tr><td class="mon">11</td><td class="tue">12</td><td class="wed">13</td><td class="thu">14</td><td class="fri">15</td><td class="sat">16</td><td class="sun">17</td></tr>
<tr><td class="mon">18</td><td class="tue">19</td><td class="wed">20</td><td class="thu">21</td><td class="fri">22</td><td class="sat">23</td><td class="sun">24</td></tr>
<tr><td class="mon">25</td><td class="tue">26</td><td class="wed">27</td><td class="thu">28</td><td class="fri">29</td><td class="sat">30</td><td class="sun">31</td></tr>
</table>
</td><td><table border="0" cellpadding="0" cellspacing="0" class="month">
<tr><th colspan="7" class="month">November</th></tr>
<tr><th class="mon">Mon</th><th class="tue">Tue</th><th class="wed">Wed</th><th class="thu">Thu</th><th class="fri">Fri</th><th class="sat">Sat</th><th class="sun">Sun</th></tr>
<tr><td class="mon">1</td><td class="tue">2</td><td class="wed">3</td><td class="thu">4</td><td class="fri">5</td><td class="sat">6</td><td class="sun">7</td></tr>
<tr><td class="mon">8</td><td class="tue">9</td><td class="wed">10</td><td class="thu">11</td><td class="fri">12</td><td class="sat">13</td><td class="sun">14</td></tr>
<tr><td class="mon">15</td><td class="tue">16</td><td class="wed">17</td><td class="thu">18</td><td class="fri">19</td><td class="sat">20</td><td class="sun">21</td></tr>
<tr><td class="mon">22</td><td class="tue">23</td><td class="wed">24</td><td class="thu">25</td><td class="fri">26</td><td class="sat">27</td><td class="sun">28</td></tr>
<tr><td class="mon">29</td><td class="tue">30</td><td class="noday"> </td><td class="noday"> </td><td class="noday"> </td><td class="noday"> </td><td class="noday"> </td></tr>
</table>
</td><td><table border="0" cellpadding="0" cellspacing="0" class="month">
<tr><th colspan="7" class="month">December</th></tr>
<tr><th class="mon">Mon</th><th class="tue">Tue</th><th class="wed">Wed</th><th class="thu">Thu</th><th class="fri">Fri</th><th class="sat">Sat</th><th class="sun">Sun</th></tr>
<tr><td class="noday"> </td><td class="noday"> </td><td class="wed">1</td><td class="thu">2</td><td class="fri">3</td><td class="sat">4</td><td class="sun">5</td></tr>
<tr><td class="mon">6</td><td class="tue">7</td><td class="wed">8</td><td class="thu">9</td><td class="fri">10</td><td class="sat">11</td><td class="sun">12</td></tr>
<tr><td class="mon">13</td><td class="tue">14</td><td class="wed">15</td><td class="thu">16</td><td class="fri">17</td><td class="sat">18</td><td class="sun">19</td></tr>
<tr><td class="mon">20</td><td class="tue">21</td><td class="wed">22</td><td class="thu">23</td><td class="fri">24</td><td class="sat">25</td><td class="sun">26</td></tr>
<tr><td class="mon">27</td><td class="tue">28</td><td class="wed">29</td><td class="thu">30</td><td class="fri">31</td><td class="noday"> </td><td class="noday"> </td></tr>
</table>
</td></tr></table></body>
</html>
"""
class OutputTestCase(unittest.TestCase):
def normalize_calendar(self, s):
# Filters out locale dependent strings
def neitherspacenordigit(c):
return not c.isspace() and not c.isdigit()
lines = []
for line in s.splitlines(False):
# Drop texts, as they are locale dependent
if line and not filter(neitherspacenordigit, line):
lines.append(line)
return lines
def test_output(self):
self.assertEqual(
self.normalize_calendar(calendar.calendar(2004)),
self.normalize_calendar(result_2004_text)
)
def test_output_textcalendar(self):
self.assertEqual(
calendar.TextCalendar().formatyear(2004).strip(),
result_2004_text.strip()
)
def test_output_htmlcalendar(self):
self.assertEqual(
calendar.HTMLCalendar().formatyearpage(2004).strip(),
result_2004_html.strip()
)
class CalendarTestCase(unittest.TestCase):
def test_isleap(self):
# Make sure that the return is right for a few years, and
# ensure that the return values are 1 or 0, not just true or
# false (see SF bug #485794). Specific additional tests may
# be appropriate; this tests a single "cycle".
self.assertEqual(calendar.isleap(2000), 1)
self.assertEqual(calendar.isleap(2001), 0)
self.assertEqual(calendar.isleap(2002), 0)
self.assertEqual(calendar.isleap(2003), 0)
def test_setfirstweekday(self):
self.assertRaises(ValueError, calendar.setfirstweekday, 'flabber')
self.assertRaises(ValueError, calendar.setfirstweekday, -1)
self.assertRaises(ValueError, calendar.setfirstweekday, 200)
orig = calendar.firstweekday()
calendar.setfirstweekday(calendar.SUNDAY)
self.assertEqual(calendar.firstweekday(), calendar.SUNDAY)
calendar.setfirstweekday(calendar.MONDAY)
self.assertEqual(calendar.firstweekday(), calendar.MONDAY)
calendar.setfirstweekday(orig)
def test_enumerateweekdays(self):
self.assertRaises(IndexError, calendar.day_abbr.__getitem__, -10)
self.assertRaises(IndexError, calendar.day_name.__getitem__, 10)
self.assertEqual(len([d for d in calendar.day_abbr]), 7)
def test_days(self):
for attr in "day_name", "day_abbr":
value = getattr(calendar, attr)
self.assertEqual(len(value), 7)
self.assertEqual(len(value[:]), 7)
# ensure they're all unique
self.assertEqual(len(set(value)), 7)
# verify it "acts like a sequence" in two forms of iteration
self.assertEqual(value[::-1], list(reversed(value)))
def test_months(self):
for attr in "month_name", "month_abbr":
value = getattr(calendar, attr)
self.assertEqual(len(value), 13)
self.assertEqual(len(value[:]), 13)
self.assertEqual(value[0], "")
# ensure they're all unique
self.assertEqual(len(set(value)), 13)
# verify it "acts like a sequence" in two forms of iteration
self.assertEqual(value[::-1], list(reversed(value)))
def test_localecalendars(self):
# ensure that Locale{Text,HTML}Calendar resets the locale properly
# (it is still not thread-safe though)
old_october = calendar.TextCalendar().formatmonthname(2010, 10, 10)
try:
calendar.LocaleTextCalendar(locale='').formatmonthname(2010, 10, 10)
except locale.Error:
# cannot set the system default locale -- skip rest of test
return
calendar.LocaleHTMLCalendar(locale='').formatmonthname(2010, 10)
new_october = calendar.TextCalendar().formatmonthname(2010, 10, 10)
self.assertEquals(old_october, new_october)
class MonthCalendarTestCase(unittest.TestCase):
def setUp(self):
self.oldfirstweekday = calendar.firstweekday()
calendar.setfirstweekday(self.firstweekday)
def tearDown(self):
calendar.setfirstweekday(self.oldfirstweekday)
def check_weeks(self, year, month, weeks):
cal = calendar.monthcalendar(year, month)
self.assertEqual(len(cal), len(weeks))
for i in xrange(len(weeks)):
self.assertEqual(weeks[i], sum(day != 0 for day in cal[i]))
class MondayTestCase(MonthCalendarTestCase):
firstweekday = calendar.MONDAY
def test_february(self):
# A 28-day february starting on monday (7+7+7+7 days)
self.check_weeks(1999, 2, (7, 7, 7, 7))
# A 28-day february starting on tuesday (6+7+7+7+1 days)
self.check_weeks(2005, 2, (6, 7, 7, 7, 1))
# A 28-day february starting on sunday (1+7+7+7+6 days)
self.check_weeks(1987, 2, (1, 7, 7, 7, 6))
# A 29-day february starting on monday (7+7+7+7+1 days)
self.check_weeks(1988, 2, (7, 7, 7, 7, 1))
# A 29-day february starting on tuesday (6+7+7+7+2 days)
self.check_weeks(1972, 2, (6, 7, 7, 7, 2))
# A 29-day february starting on sunday (1+7+7+7+7 days)
self.check_weeks(2004, 2, (1, 7, 7, 7, 7))
def test_april(self):
# A 30-day april starting on monday (7+7+7+7+2 days)
self.check_weeks(1935, 4, (7, 7, 7, 7, 2))
# A 30-day april starting on tuesday (6+7+7+7+3 days)
self.check_weeks(1975, 4, (6, 7, 7, 7, 3))
# A 30-day april starting on sunday (1+7+7+7+7+1 days)
self.check_weeks(1945, 4, (1, 7, 7, 7, 7, 1))
# A 30-day april starting on saturday (2+7+7+7+7 days)
self.check_weeks(1995, 4, (2, 7, 7, 7, 7))
# A 30-day april starting on friday (3+7+7+7+6 days)
self.check_weeks(1994, 4, (3, 7, 7, 7, 6))
def test_december(self):
# A 31-day december starting on monday (7+7+7+7+3 days)
self.check_weeks(1980, 12, (7, 7, 7, 7, 3))
# A 31-day december starting on tuesday (6+7+7+7+4 days)
self.check_weeks(1987, 12, (6, 7, 7, 7, 4))
# A 31-day december starting on sunday (1+7+7+7+7+2 days)
self.check_weeks(1968, 12, (1, 7, 7, 7, 7, 2))
# A 31-day december starting on thursday (4+7+7+7+6 days)
self.check_weeks(1988, 12, (4, 7, 7, 7, 6))
# A 31-day december starting on friday (3+7+7+7+7 days)
self.check_weeks(2017, 12, (3, 7, 7, 7, 7))
# A 31-day december starting on saturday (2+7+7+7+7+1 days)
self.check_weeks(2068, 12, (2, 7, 7, 7, 7, 1))
class SundayTestCase(MonthCalendarTestCase):
firstweekday = calendar.SUNDAY
def test_february(self):
# A 28-day february starting on sunday (7+7+7+7 days)
self.check_weeks(2009, 2, (7, 7, 7, 7))
# A 28-day february starting on monday (6+7+7+7+1 days)
self.check_weeks(1999, 2, (6, 7, 7, 7, 1))
# A 28-day february starting on saturday (1+7+7+7+6 days)
self.check_weeks(1997, 2, (1, 7, 7, 7, 6))
# A 29-day february starting on sunday (7+7+7+7+1 days)
self.check_weeks(2004, 2, (7, 7, 7, 7, 1))
# A 29-day february starting on monday (6+7+7+7+2 days)
self.check_weeks(1960, 2, (6, 7, 7, 7, 2))
# A 29-day february starting on saturday (1+7+7+7+7 days)
self.check_weeks(1964, 2, (1, 7, 7, 7, 7))
def test_april(self):
# A 30-day april starting on sunday (7+7+7+7+2 days)
self.check_weeks(1923, 4, (7, 7, 7, 7, 2))
# A 30-day april starting on monday (6+7+7+7+3 days)
self.check_weeks(1918, 4, (6, 7, 7, 7, 3))
# A 30-day april starting on saturday (1+7+7+7+7+1 days)
self.check_weeks(1950, 4, (1, 7, 7, 7, 7, 1))
# A 30-day april starting on friday (2+7+7+7+7 days)
self.check_weeks(1960, 4, (2, 7, 7, 7, 7))
# A 30-day april starting on thursday (3+7+7+7+6 days)
self.check_weeks(1909, 4, (3, 7, 7, 7, 6))
def test_december(self):
# A 31-day december starting on sunday (7+7+7+7+3 days)
self.check_weeks(2080, 12, (7, 7, 7, 7, 3))
# A 31-day december starting on monday (6+7+7+7+4 days)
self.check_weeks(1941, 12, (6, 7, 7, 7, 4))
# A 31-day december starting on saturday (1+7+7+7+7+2 days)
self.check_weeks(1923, 12, (1, 7, 7, 7, 7, 2))
# A 31-day december starting on wednesday (4+7+7+7+6 days)
self.check_weeks(1948, 12, (4, 7, 7, 7, 6))
# A 31-day december starting on thursday (3+7+7+7+7 days)
self.check_weeks(1927, 12, (3, 7, 7, 7, 7))
# A 31-day december starting on friday (2+7+7+7+7+1 days)
self.check_weeks(1995, 12, (2, 7, 7, 7, 7, 1))
class MonthRangeTestCase(unittest.TestCase):
def test_january(self):
# Tests valid lower boundary case.
self.assertEqual(calendar.monthrange(2004,1), (3,31))
def test_february_leap(self):
# Tests February during leap year.
self.assertEqual(calendar.monthrange(2004,2), (6,29))
def test_february_nonleap(self):
# Tests February in non-leap year.
self.assertEqual(calendar.monthrange(2010,2), (0,28))
def test_december(self):
# Tests valid upper boundary case.
self.assertEqual(calendar.monthrange(2004,12), (2,31))
def test_zeroth_month(self):
# Tests low invalid boundary case.
with self.assertRaises(calendar.IllegalMonthError):
calendar.monthrange(2004, 0)
def test_thirteenth_month(self):
# Tests high invalid boundary case.
with self.assertRaises(calendar.IllegalMonthError):
calendar.monthrange(2004, 13)
class LeapdaysTestCase(unittest.TestCase):
def test_no_range(self):
# test when no range i.e. two identical years as args
self.assertEqual(calendar.leapdays(2010,2010), 0)
def test_no_leapdays(self):
# test when no leap years in range
self.assertEqual(calendar.leapdays(2010,2011), 0)
def test_no_leapdays_upper_boundary(self):
# test no leap years in range, when upper boundary is a leap year
self.assertEqual(calendar.leapdays(2010,2012), 0)
def test_one_leapday_lower_boundary(self):
# test when one leap year in range, lower boundary is leap year
self.assertEqual(calendar.leapdays(2012,2013), 1)
def test_several_leapyears_in_range(self):
self.assertEqual(calendar.leapdays(1997,2020), 5)
def test_main():
test_support.run_unittest(
OutputTestCase,
CalendarTestCase,
MondayTestCase,
SundayTestCase,
MonthRangeTestCase,
LeapdaysTestCase,
)
if __name__ == "__main__":
test_main()
| gpl-2.0 |
vincent-noel/SigNetSim | signetsim/views/edit/ModelEventsForm.py | 2 | 4068 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2014-2017 Vincent Noel (vincent.noel@butantan.gov.br)
#
# This file is part of libSigNetSim.
#
# libSigNetSim is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# libSigNetSim is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with libSigNetSim. If not, see <http://www.gnu.org/licenses/>.
""" ModelEventsForm.py
This file ...
"""
from .ModelParentForm import ModelParentForm
from signetsim.managers.models import renameSbmlIdInModelDependencies
class ModelEventsForm(ModelParentForm):
def __init__(self, parent):
ModelParentForm.__init__(self, parent)
self.name = None
self.sbmlId = None
self.trigger = ""
self.delay = None
self.priority = None
self.initialvalue = False
self.persistent = True
self.assignments = [(None, None)]
self.useValuesFromTriggerTime = False
def save(self, event):
event.setName(self.name)
if event.getSbmlId() != self.sbmlId:
renameSbmlIdInModelDependencies(
self.parent.getSbmlModel(), event.getSbmlId(), self.sbmlId
)
event.setSbmlId(self.sbmlId)
event.setTrigger(self.trigger)
event.setDelay(self.delay)
event.setPriority(self.priority)
event.setTriggerInitialValue(self.initialvalue)
event.setTriggerPersistent(self.persistent)
event.setUseValuesFromTriggerTime(self.useValuesFromTriggerTime)
event.listOfEventAssignments = []
for assignment in self.assignments:
t_assignment = event.addEventAssignment()
t_variable = self.parent.listOfVariables[assignment[0]]
t_assignment.setVariable(t_variable)
t_assignment.getDefinition().setPrettyPrintMathFormula(assignment[1])
def read(self, request):
self.id = self.readInt(request, 'event_id',
"The indice of the event", required=False)
self.name = self.readASCIIString(request, 'event_name',
"The name of the event", required=False)
self.sbmlId = self.readASCIIString(request, 'event_sbmlid',
"The identifier of the event")
self.readTrigger(request)
self.readAssignments(request)
self.readOptions(request)
self.printErrors()
def readTrigger(self, request):
self.trigger = self.readMath(request, 'event_trigger',
"The trigger of the event")
def readAssignments(self, request):
nb_assignment = 0
while ("event_assignment_%d_id" % nb_assignment) in request.POST:
if nb_assignment == 0:
self.assignments = []
t_assignment = self.readInt(request,
('event_assignment_%d_id' % nb_assignment),
"The variable for the assignment #%d" % nb_assignment,
max_value=len(self.parent.listOfVariables))
t_assignment_expression = self.readMath(request,
('event_assignment_%d_expression' % nb_assignment),
"The assignment for the assignment #%d" % nb_assignment)
if t_assignment is not None:
self.assignments.append((t_assignment, t_assignment_expression))
nb_assignment += 1
if nb_assignment < 1:
self.addError("Event need at least one assignment")
def readOptions(self, request):
self.priority = self.readMath(request, 'event_priority',
"The priority of the event", required=False)
self.delay = self.readMath(request, 'event_delay',
"The delay of the event", required=False)
self.persistent = self.readOnOff(request, 'event_persistent',
"The persistent property of the event")
self.initialvalue = self.readOnOff(request, 'event_initialvalue',
"The initial value of the event")
self.useValuesFromTriggerTime = self.readOnOff(request,
'event_usetriggertime',
"The use values from trigger time property of the event")
| agpl-3.0 |
nuagenetworks/vspk-python | vspk/v6/nuingressadvfwdtemplate.py | 1 | 21736 | # -*- coding: utf-8 -*-
#
# Copyright (c) 2015, Alcatel-Lucent Inc, 2017 Nokia
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from .fetchers import NUPermissionsFetcher
from .fetchers import NUMetadatasFetcher
from .fetchers import NUGlobalMetadatasFetcher
from .fetchers import NUIngressAdvFwdEntryTemplatesFetcher
from .fetchers import NUJobsFetcher
from bambou import NURESTObject
class NUIngressAdvFwdTemplate(NURESTObject):
""" Represents a IngressAdvFwdTemplate in the VSD
Notes:
Create a table for ingress forwarding policy rules. These include flow redirect rules and forwarding class override rules.
"""
__rest_name__ = "ingressadvfwdtemplate"
__resource_name__ = "ingressadvfwdtemplates"
## Constants
CONST_POLICY_STATE_DRAFT = "DRAFT"
CONST_PRIORITY_TYPE_MIDDLE_FIREWALL = "MIDDLE_FIREWALL"
CONST_PRIORITY_TYPE_TOP_FIREWALL = "TOP_FIREWALL"
CONST_POLICY_STATE_LIVE = "LIVE"
CONST_ENTITY_SCOPE_GLOBAL = "GLOBAL"
CONST_PRIORITY_TYPE_NONE = "NONE"
CONST_ENTITY_SCOPE_ENTERPRISE = "ENTERPRISE"
CONST_PRIORITY_TYPE_BOTTOM_FIREWALL = "BOTTOM_FIREWALL"
CONST_PRIORITY_TYPE_TOP = "TOP"
CONST_PRIORITY_TYPE_BOTTOM = "BOTTOM"
def __init__(self, **kwargs):
""" Initializes a IngressAdvFwdTemplate instance
Notes:
You can specify all parameters while calling this methods.
A special argument named `data` will enable you to load the
object from a Python dictionary
Examples:
>>> ingressadvfwdtemplate = NUIngressAdvFwdTemplate(id=u'xxxx-xxx-xxx-xxx', name=u'IngressAdvFwdTemplate')
>>> ingressadvfwdtemplate = NUIngressAdvFwdTemplate(data=my_dict)
"""
super(NUIngressAdvFwdTemplate, self).__init__()
# Read/Write Attributes
self._name = None
self._last_updated_by = None
self._last_updated_date = None
self._active = None
self._default_allow_ip = None
self._default_allow_non_ip = None
self._description = None
self._embedded_metadata = None
self._entity_scope = None
self._policy_state = None
self._creation_date = None
self._priority = None
self._priority_type = None
self._associated_live_entity_id = None
self._associated_virtual_firewall_policy_id = None
self._auto_generate_priority = None
self._owner = None
self._external_id = None
self.expose_attribute(local_name="name", remote_name="name", attribute_type=str, is_required=True, is_unique=False)
self.expose_attribute(local_name="last_updated_by", remote_name="lastUpdatedBy", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="last_updated_date", remote_name="lastUpdatedDate", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="active", remote_name="active", attribute_type=bool, is_required=False, is_unique=False)
self.expose_attribute(local_name="default_allow_ip", remote_name="defaultAllowIP", attribute_type=bool, is_required=False, is_unique=False)
self.expose_attribute(local_name="default_allow_non_ip", remote_name="defaultAllowNonIP", attribute_type=bool, is_required=False, is_unique=False)
self.expose_attribute(local_name="description", remote_name="description", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="embedded_metadata", remote_name="embeddedMetadata", attribute_type=list, is_required=False, is_unique=False)
self.expose_attribute(local_name="entity_scope", remote_name="entityScope", attribute_type=str, is_required=False, is_unique=False, choices=[u'ENTERPRISE', u'GLOBAL'])
self.expose_attribute(local_name="policy_state", remote_name="policyState", attribute_type=str, is_required=False, is_unique=False, choices=[u'DRAFT', u'LIVE'])
self.expose_attribute(local_name="creation_date", remote_name="creationDate", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="priority", remote_name="priority", attribute_type=int, is_required=False, is_unique=False)
self.expose_attribute(local_name="priority_type", remote_name="priorityType", attribute_type=str, is_required=False, is_unique=False, choices=[u'BOTTOM', u'BOTTOM_FIREWALL', u'MIDDLE_FIREWALL', u'NONE', u'TOP', u'TOP_FIREWALL'])
self.expose_attribute(local_name="associated_live_entity_id", remote_name="associatedLiveEntityID", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="associated_virtual_firewall_policy_id", remote_name="associatedVirtualFirewallPolicyID", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="auto_generate_priority", remote_name="autoGeneratePriority", attribute_type=bool, is_required=False, is_unique=False)
self.expose_attribute(local_name="owner", remote_name="owner", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="external_id", remote_name="externalID", attribute_type=str, is_required=False, is_unique=True)
# Fetchers
self.permissions = NUPermissionsFetcher.fetcher_with_object(parent_object=self, relationship="child")
self.metadatas = NUMetadatasFetcher.fetcher_with_object(parent_object=self, relationship="child")
self.global_metadatas = NUGlobalMetadatasFetcher.fetcher_with_object(parent_object=self, relationship="child")
self.ingress_adv_fwd_entry_templates = NUIngressAdvFwdEntryTemplatesFetcher.fetcher_with_object(parent_object=self, relationship="child")
self.jobs = NUJobsFetcher.fetcher_with_object(parent_object=self, relationship="child")
self._compute_args(**kwargs)
# Properties
@property
def name(self):
""" Get name value.
Notes:
The name of the entity
"""
return self._name
@name.setter
def name(self, value):
""" Set name value.
Notes:
The name of the entity
"""
self._name = value
@property
def last_updated_by(self):
""" Get last_updated_by value.
Notes:
ID of the user who last updated the object.
This attribute is named `lastUpdatedBy` in VSD API.
"""
return self._last_updated_by
@last_updated_by.setter
def last_updated_by(self, value):
""" Set last_updated_by value.
Notes:
ID of the user who last updated the object.
This attribute is named `lastUpdatedBy` in VSD API.
"""
self._last_updated_by = value
@property
def last_updated_date(self):
""" Get last_updated_date value.
Notes:
Time stamp when this object was last updated.
This attribute is named `lastUpdatedDate` in VSD API.
"""
return self._last_updated_date
@last_updated_date.setter
def last_updated_date(self, value):
""" Set last_updated_date value.
Notes:
Time stamp when this object was last updated.
This attribute is named `lastUpdatedDate` in VSD API.
"""
self._last_updated_date = value
@property
def active(self):
""" Get active value.
Notes:
If enabled, it means that this ACL or QOS entry is active
"""
return self._active
@active.setter
def active(self, value):
""" Set active value.
Notes:
If enabled, it means that this ACL or QOS entry is active
"""
self._active = value
@property
def default_allow_ip(self):
""" Get default_allow_ip value.
Notes:
If enabled a default ACL of Allow All is added as the last entry in the list of ACL entries
This attribute is named `defaultAllowIP` in VSD API.
"""
return self._default_allow_ip
@default_allow_ip.setter
def default_allow_ip(self, value):
""" Set default_allow_ip value.
Notes:
If enabled a default ACL of Allow All is added as the last entry in the list of ACL entries
This attribute is named `defaultAllowIP` in VSD API.
"""
self._default_allow_ip = value
@property
def default_allow_non_ip(self):
""" Get default_allow_non_ip value.
Notes:
If enabled, non ip traffic will be dropped
This attribute is named `defaultAllowNonIP` in VSD API.
"""
return self._default_allow_non_ip
@default_allow_non_ip.setter
def default_allow_non_ip(self, value):
""" Set default_allow_non_ip value.
Notes:
If enabled, non ip traffic will be dropped
This attribute is named `defaultAllowNonIP` in VSD API.
"""
self._default_allow_non_ip = value
@property
def description(self):
""" Get description value.
Notes:
A description of the entity
"""
return self._description
@description.setter
def description(self, value):
""" Set description value.
Notes:
A description of the entity
"""
self._description = value
@property
def embedded_metadata(self):
""" Get embedded_metadata value.
Notes:
Metadata objects associated with this entity. This will contain a list of Metadata objects if the API request is made using the special flag to enable the embedded Metadata feature. Only a maximum of Metadata objects is returned based on the value set in the system configuration.
This attribute is named `embeddedMetadata` in VSD API.
"""
return self._embedded_metadata
@embedded_metadata.setter
def embedded_metadata(self, value):
""" Set embedded_metadata value.
Notes:
Metadata objects associated with this entity. This will contain a list of Metadata objects if the API request is made using the special flag to enable the embedded Metadata feature. Only a maximum of Metadata objects is returned based on the value set in the system configuration.
This attribute is named `embeddedMetadata` in VSD API.
"""
self._embedded_metadata = value
@property
def entity_scope(self):
""" Get entity_scope value.
Notes:
Specify if scope of entity is Data center or Enterprise level
This attribute is named `entityScope` in VSD API.
"""
return self._entity_scope
@entity_scope.setter
def entity_scope(self, value):
""" Set entity_scope value.
Notes:
Specify if scope of entity is Data center or Enterprise level
This attribute is named `entityScope` in VSD API.
"""
self._entity_scope = value
@property
def policy_state(self):
""" Get policy_state value.
Notes:
None
This attribute is named `policyState` in VSD API.
"""
return self._policy_state
@policy_state.setter
def policy_state(self, value):
""" Set policy_state value.
Notes:
None
This attribute is named `policyState` in VSD API.
"""
self._policy_state = value
@property
def creation_date(self):
""" Get creation_date value.
Notes:
Time stamp when this object was created.
This attribute is named `creationDate` in VSD API.
"""
return self._creation_date
@creation_date.setter
def creation_date(self, value):
""" Set creation_date value.
Notes:
Time stamp when this object was created.
This attribute is named `creationDate` in VSD API.
"""
self._creation_date = value
@property
def priority(self):
""" Get priority value.
Notes:
The priority of the ACL entry that determines the order of entries
"""
return self._priority
@priority.setter
def priority(self, value):
""" Set priority value.
Notes:
The priority of the ACL entry that determines the order of entries
"""
self._priority = value
@property
def priority_type(self):
""" Get priority_type value.
Notes:
Possible values: TOP, BOTTOM, TOP_FIREWALL, BOTTOM_FIREWALL, MIDDLE_FIREWALL or NONE. TOP and BOTTOM ACL policies can only be defined and managed on the template level, NONE can be used on both the template and instantiated level. TOP_FIREWALL, BOTTOM_FIREWALL, MIDDLE_FIREWALL are used to represent acls dirived from Virtual Firewall Policies. These allow for careful control of ACL priority handling.
This attribute is named `priorityType` in VSD API.
"""
return self._priority_type
@priority_type.setter
def priority_type(self, value):
""" Set priority_type value.
Notes:
Possible values: TOP, BOTTOM, TOP_FIREWALL, BOTTOM_FIREWALL, MIDDLE_FIREWALL or NONE. TOP and BOTTOM ACL policies can only be defined and managed on the template level, NONE can be used on both the template and instantiated level. TOP_FIREWALL, BOTTOM_FIREWALL, MIDDLE_FIREWALL are used to represent acls dirived from Virtual Firewall Policies. These allow for careful control of ACL priority handling.
This attribute is named `priorityType` in VSD API.
"""
self._priority_type = value
@property
def associated_live_entity_id(self):
""" Get associated_live_entity_id value.
Notes:
In the draft mode, the ACL entry refers to this LiveEntity. In non-drafted mode, this is null.
This attribute is named `associatedLiveEntityID` in VSD API.
"""
return self._associated_live_entity_id
@associated_live_entity_id.setter
def associated_live_entity_id(self, value):
""" Set associated_live_entity_id value.
Notes:
In the draft mode, the ACL entry refers to this LiveEntity. In non-drafted mode, this is null.
This attribute is named `associatedLiveEntityID` in VSD API.
"""
self._associated_live_entity_id = value
@property
def associated_virtual_firewall_policy_id(self):
""" Get associated_virtual_firewall_policy_id value.
Notes:
The ID of the Virtual Firewall Policy, if this was created as part of the Virtual Firewall Policy creation
This attribute is named `associatedVirtualFirewallPolicyID` in VSD API.
"""
return self._associated_virtual_firewall_policy_id
@associated_virtual_firewall_policy_id.setter
def associated_virtual_firewall_policy_id(self, value):
""" Set associated_virtual_firewall_policy_id value.
Notes:
The ID of the Virtual Firewall Policy, if this was created as part of the Virtual Firewall Policy creation
This attribute is named `associatedVirtualFirewallPolicyID` in VSD API.
"""
self._associated_virtual_firewall_policy_id = value
@property
def auto_generate_priority(self):
""" Get auto_generate_priority value.
Notes:
This option affects how ACL entry priorities are generated when not specified. If 'false', the priority is generated by incrementing the current highest priority by 100. If 'true', a random priority will be generated, which is advised when creating many entries concurrently without specifying the priority. This will cause the ACL entry to be randomly placed in the existing list of ACL entries. Therefore it is advised to only enable this when allow rules are being created.
This attribute is named `autoGeneratePriority` in VSD API.
"""
return self._auto_generate_priority
@auto_generate_priority.setter
def auto_generate_priority(self, value):
""" Set auto_generate_priority value.
Notes:
This option affects how ACL entry priorities are generated when not specified. If 'false', the priority is generated by incrementing the current highest priority by 100. If 'true', a random priority will be generated, which is advised when creating many entries concurrently without specifying the priority. This will cause the ACL entry to be randomly placed in the existing list of ACL entries. Therefore it is advised to only enable this when allow rules are being created.
This attribute is named `autoGeneratePriority` in VSD API.
"""
self._auto_generate_priority = value
@property
def owner(self):
""" Get owner value.
Notes:
Identifies the user that has created this object.
"""
return self._owner
@owner.setter
def owner(self, value):
""" Set owner value.
Notes:
Identifies the user that has created this object.
"""
self._owner = value
@property
def external_id(self):
""" Get external_id value.
Notes:
External object ID. Used for integration with third party systems
This attribute is named `externalID` in VSD API.
"""
return self._external_id
@external_id.setter
def external_id(self, value):
""" Set external_id value.
Notes:
External object ID. Used for integration with third party systems
This attribute is named `externalID` in VSD API.
"""
self._external_id = value
## Custom methods
def is_template(self):
""" Verify that the object is a template
Returns:
(bool): True if the object is a template
"""
return True
def is_from_template(self):
""" Verify if the object has been instantiated from a template
Note:
The object has to be fetched. Otherwise, it does not
have information from its parent
Returns:
(bool): True if the object is a template
"""
return self.parent and self.rest_name != self.parent_type
| bsd-3-clause |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.