repo_name stringlengths 5 100 | path stringlengths 4 294 | copies stringclasses 990
values | size stringlengths 4 7 | content stringlengths 666 1M | license stringclasses 15
values |
|---|---|---|---|---|---|
cpennington/edx-platform | openedx/core/djangoapps/django_comment_common/utils.py | 5 | 6316 | # pylint: disable=missing-docstring
"""
Common comment client utility functions.
"""
import six
from contracts import new_contract
from openedx.core.djangoapps.course_groups.cohorts import get_legacy_discussion_settings
from openedx.core.djangoapps.django_comment_common.models import (
FORUM_ROLE_ADMINISTRATOR,
FORUM_ROLE_COMMUNITY_TA,
FORUM_ROLE_GROUP_MODERATOR,
FORUM_ROLE_MODERATOR,
FORUM_ROLE_STUDENT,
CourseDiscussionSettings,
Role
)
from openedx.core.lib.cache_utils import request_cached
new_contract('basestring', six.string_types[0])
class ThreadContext(object):
""" An enumeration that represents the context of a thread. Used primarily by the comments service. """
STANDALONE = 'standalone'
COURSE = 'course'
STUDENT_ROLE_PERMISSIONS = ["vote", "update_thread", "follow_thread", "unfollow_thread",
"update_comment", "create_sub_comment", "unvote", "create_thread",
"follow_commentable", "unfollow_commentable", "create_comment", ]
MODERATOR_ROLE_PERMISSIONS = ["edit_content", "delete_thread", "openclose_thread",
"endorse_comment", "delete_comment", "see_all_cohorts"]
GROUP_MODERATOR_ROLE_PERMISSIONS = ["group_edit_content", "group_delete_thread", "group_openclose_thread",
"group_endorse_comment", "group_delete_comment"]
ADMINISTRATOR_ROLE_PERMISSIONS = ["manage_moderator"]
GLOBAL_STAFF_ROLE_PERMISSIONS = ["see_all_cohorts"]
def _save_forum_role(course_key, name):
"""
Save and Update 'course_key' for all roles which are already created to keep course_id same
as actual passed course key
"""
role, created = Role.objects.get_or_create(name=name, course_id=course_key)
if created is False:
role.course_id = course_key
role.save()
return role
def seed_permissions_roles(course_key):
"""
Create and assign permissions for forum roles
"""
administrator_role = _save_forum_role(course_key, FORUM_ROLE_ADMINISTRATOR)
moderator_role = _save_forum_role(course_key, FORUM_ROLE_MODERATOR)
group_moderator_role = _save_forum_role(course_key, FORUM_ROLE_GROUP_MODERATOR)
community_ta_role = _save_forum_role(course_key, FORUM_ROLE_COMMUNITY_TA)
student_role = _save_forum_role(course_key, FORUM_ROLE_STUDENT)
for per in STUDENT_ROLE_PERMISSIONS:
student_role.add_permission(per)
for per in MODERATOR_ROLE_PERMISSIONS:
moderator_role.add_permission(per)
for per in GROUP_MODERATOR_ROLE_PERMISSIONS:
group_moderator_role.add_permission(per)
for per in ADMINISTRATOR_ROLE_PERMISSIONS:
administrator_role.add_permission(per)
moderator_role.inherit_permissions(student_role)
group_moderator_role.inherit_permissions(student_role)
# For now, Community TA == Moderator, except for the styling.
community_ta_role.inherit_permissions(moderator_role)
administrator_role.inherit_permissions(moderator_role)
def are_permissions_roles_seeded(course_id):
"""
Returns whether the forums permissions for a course have been provisioned in
the database
"""
try:
administrator_role = Role.objects.get(name=FORUM_ROLE_ADMINISTRATOR, course_id=course_id)
moderator_role = Role.objects.get(name=FORUM_ROLE_MODERATOR, course_id=course_id)
group_moderator_role = Role.objects.get(name=FORUM_ROLE_GROUP_MODERATOR, course_id=course_id)
student_role = Role.objects.get(name=FORUM_ROLE_STUDENT, course_id=course_id)
except: # pylint: disable=bare-except
return False
for per in STUDENT_ROLE_PERMISSIONS:
if not student_role.has_permission(per):
return False
for per in MODERATOR_ROLE_PERMISSIONS + STUDENT_ROLE_PERMISSIONS:
if not moderator_role.has_permission(per):
return False
for per in GROUP_MODERATOR_ROLE_PERMISSIONS + STUDENT_ROLE_PERMISSIONS:
if not group_moderator_role.has_permission(per):
return False
for per in ADMINISTRATOR_ROLE_PERMISSIONS + MODERATOR_ROLE_PERMISSIONS + STUDENT_ROLE_PERMISSIONS:
if not administrator_role.has_permission(per):
return False
return True
@request_cached()
def get_course_discussion_settings(course_key):
try:
course_discussion_settings = CourseDiscussionSettings.objects.get(course_id=course_key)
except CourseDiscussionSettings.DoesNotExist:
legacy_discussion_settings = get_legacy_discussion_settings(course_key)
course_discussion_settings, _ = CourseDiscussionSettings.objects.get_or_create(
course_id=course_key,
defaults={
'always_divide_inline_discussions': legacy_discussion_settings['always_cohort_inline_discussions'],
'divided_discussions': legacy_discussion_settings['cohorted_discussions'],
'division_scheme': CourseDiscussionSettings.COHORT if legacy_discussion_settings['is_cohorted']
else CourseDiscussionSettings.NONE
}
)
return course_discussion_settings
def set_course_discussion_settings(course_key, **kwargs):
"""
Set discussion settings for a course.
Arguments:
course_key: CourseKey
always_divide_inline_discussions (bool): If inline discussions should always be divided.
divided_discussions (list): List of discussion ids.
division_scheme (str): `CourseDiscussionSettings.NONE`, `CourseDiscussionSettings.COHORT`,
or `CourseDiscussionSettings.ENROLLMENT_TRACK`
Returns:
A CourseDiscussionSettings object.
"""
fields = {
'division_scheme': six.string_types[0],
'always_divide_inline_discussions': bool,
'divided_discussions': list,
}
course_discussion_settings = get_course_discussion_settings(course_key)
for field, field_type in fields.items():
if field in kwargs:
if not isinstance(kwargs[field], field_type):
raise ValueError(u"Incorrect field type for `{}`. Type must be `{}`".format(field, field_type.__name__))
setattr(course_discussion_settings, field, kwargs[field])
course_discussion_settings.save()
return course_discussion_settings
| agpl-3.0 |
mscherer/ansible-modules-core | cloud/openstack/os_server.py | 6 | 21338 | #!/usr/bin/python
# coding: utf-8 -*-
# Copyright (c) 2014 Hewlett-Packard Development Company, L.P.
# Copyright (c) 2013, Benno Joy <benno@ansible.com>
# Copyright (c) 2013, John Dewey <john@dewey.ws>
#
# This module is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This software is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this software. If not, see <http://www.gnu.org/licenses/>.
try:
import shade
from shade import meta
HAS_SHADE = True
except ImportError:
HAS_SHADE = False
DOCUMENTATION = '''
---
module: os_server
short_description: Create/Delete Compute Instances from OpenStack
extends_documentation_fragment: openstack
version_added: "2.0"
author: "Monty Taylor (@emonty)"
description:
- Create or Remove compute instances from OpenStack.
options:
name:
description:
- Name that has to be given to the instance
required: true
image:
description:
- The name or id of the base image to boot.
required: true
image_exclude:
description:
- Text to use to filter image names, for the case, such as HP, where
there are multiple image names matching the common identifying
portions. image_exclude is a negative match filter - it is text that
may not exist in the image name. Defaults to "(deprecated)"
flavor:
description:
- The name or id of the flavor in which the new instance has to be
created. Mutually exclusive with flavor_ram
required: false
default: 1
flavor_ram:
description:
- The minimum amount of ram in MB that the flavor in which the new
instance has to be created must have. Mutually exclusive with flavor.
required: false
default: 1
flavor_include:
description:
- Text to use to filter flavor names, for the case, such as Rackspace,
where there are multiple flavors that have the same ram count.
flavor_include is a positive match filter - it must exist in the
flavor name.
key_name:
description:
- The key pair name to be used when creating a instance
required: false
default: None
security_groups:
description:
- Names of the security groups to which the instance should be
added. This may be a YAML list or a comma separated string.
required: false
default: None
network:
description:
- Name or ID of a network to attach this instance to. A simpler
version of the nics parameter, only one of network or nics should
be supplied.
required: false
default: None
nics:
description:
- A list of networks to which the instance's interface should
be attached. Networks may be referenced by net-id/net-name/port-id
or port-name.
- 'Also this accepts a string containing a list of (net/port)-(id/name)
Eg: nics: "net-id=uuid-1,port-name=myport"
Only one of network or nics should be supplied.'
required: false
default: None
auto_ip:
description:
- Ensure instance has public ip however the cloud wants to do that
required: false
default: 'yes'
aliases: ['auto_floating_ip', 'public_ip']
floating_ips:
description:
- list of valid floating IPs that pre-exist to assign to this node
required: false
default: None
floating_ip_pools:
description:
- Name of floating IP pool from which to choose a floating IP
required: false
default: None
meta:
description:
- 'A list of key value pairs that should be provided as a metadata to
the new instance or a string containing a list of key-value pairs.
Eg: meta: "key1=value1,key2=value2"'
required: false
default: None
wait:
description:
- If the module should wait for the instance to be created.
required: false
default: 'yes'
timeout:
description:
- The amount of time the module should wait for the instance to get
into active state.
required: false
default: 180
config_drive:
description:
- Whether to boot the server with config drive enabled
required: false
default: 'no'
userdata:
description:
- Opaque blob of data which is made available to the instance
required: false
default: None
boot_from_volume:
description:
- Should the instance boot from a persistent volume created based on
the image given. Mututally exclusive with boot_volume.
required: false
default: false
volume_size:
description:
- The size of the volume to create in GB if booting from volume based
on an image.
boot_volume:
description:
- Volume name or id to use as the volume to boot from. Implies
boot_from_volume. Mutually exclusive with image and boot_from_volume.
required: false
default: None
aliases: ['root_volume']
terminate_volume:
description:
- If true, delete volume when deleting instance (if booted from volume)
default: false
volumes:
description:
- A list of preexisting volumes names or ids to attach to the instance
required: false
default: []
scheduler_hints:
description:
- Arbitrary key/value pairs to the scheduler for custom use
required: false
default: None
version_added: "2.1"
state:
description:
- Should the resource be present or absent.
choices: [present, absent]
default: present
delete_fip:
description:
- When I(state) is absent and this option is true, any floating IP
associated with the instance will be deleted along with the instance.
required: false
default: false
version_added: "2.2"
requirements:
- "python >= 2.6"
- "shade"
'''
EXAMPLES = '''
# Creates a new instance and attaches to a network and passes metadata to
# the instance
- os_server:
state: present
auth:
auth_url: https://region-b.geo-1.identity.hpcloudsvc.com:35357/v2.0/
username: admin
password: admin
project_name: admin
name: vm1
image: 4f905f38-e52a-43d2-b6ec-754a13ffb529
key_name: ansible_key
timeout: 200
flavor: 4
nics:
- net-id: 34605f38-e52a-25d2-b6ec-754a13ffb723
- net-name: another_network
meta:
hostname: test1
group: uge_master
# Creates a new instance in HP Cloud AE1 region availability zone az2 and
# automatically assigns a floating IP
- name: launch a compute instance
hosts: localhost
tasks:
- name: launch an instance
os_server:
state: present
auth:
auth_url: https://region-b.geo-1.identity.hpcloudsvc.com:35357/v2.0/
username: username
password: Equality7-2521
project_name: username-project1
name: vm1
region_name: region-b.geo-1
availability_zone: az2
image: 9302692b-b787-4b52-a3a6-daebb79cb498
key_name: test
timeout: 200
flavor: 101
security_groups: default
auto_ip: yes
# Creates a new instance in named cloud mordred availability zone az2
# and assigns a pre-known floating IP
- name: launch a compute instance
hosts: localhost
tasks:
- name: launch an instance
os_server:
state: present
cloud: mordred
name: vm1
availability_zone: az2
image: 9302692b-b787-4b52-a3a6-daebb79cb498
key_name: test
timeout: 200
flavor: 101
floating_ips:
- 12.34.56.79
# Creates a new instance with 4G of RAM on Ubuntu Trusty, ignoring
# deprecated images
- name: launch a compute instance
hosts: localhost
tasks:
- name: launch an instance
os_server:
name: vm1
state: present
cloud: mordred
region_name: region-b.geo-1
image: Ubuntu Server 14.04
image_exclude: deprecated
flavor_ram: 4096
# Creates a new instance with 4G of RAM on Ubuntu Trusty on a Performance node
- name: launch a compute instance
hosts: localhost
tasks:
- name: launch an instance
os_server:
name: vm1
cloud: rax-dfw
state: present
image: Ubuntu 14.04 LTS (Trusty Tahr) (PVHVM)
flavor_ram: 4096
flavor_include: Performance
# Creates a new instance and attaches to multiple network
- name: launch a compute instance
hosts: localhost
tasks:
- name: launch an instance with a string
os_server:
auth:
auth_url: https://region-b.geo-1.identity.hpcloudsvc.com:35357/v2.0/
username: admin
password: admin
project_name: admin
name: vm1
image: 4f905f38-e52a-43d2-b6ec-754a13ffb529
key_name: ansible_key
timeout: 200
flavor: 4
nics: "net-id=4cb08b20-62fe-11e5-9d70-feff819cdc9f,net-id=542f0430-62fe-11e5-9d70-feff819cdc9f..."
# Creates a new instance and attaches to a network and passes metadata to
# the instance
- os_server:
state: present
auth:
auth_url: https://region-b.geo-1.identity.hpcloudsvc.com:35357/v2.0/
username: admin
password: admin
project_name: admin
name: vm1
image: 4f905f38-e52a-43d2-b6ec-754a13ffb529
key_name: ansible_key
timeout: 200
flavor: 4
nics:
- net-id: 34605f38-e52a-25d2-b6ec-754a13ffb723
- net-name: another_network
meta: "hostname=test1,group=uge_master"
# Creates a new instance and attaches to a specific network
- os_server:
state: present
auth:
auth_url: https://region-b.geo-1.identity.hpcloudsvc.com:35357/v2.0/
username: admin
password: admin
project_name: admin
name: vm1
image: 4f905f38-e52a-43d2-b6ec-754a13ffb529
key_name: ansible_key
timeout: 200
flavor: 4
network: another_network
# Creates a new instance with 4G of RAM on a 75G Ubuntu Trusty volume
- name: launch a compute instance
hosts: localhost
tasks:
- name: launch an instance
os_server:
name: vm1
state: present
cloud: mordred
region_name: ams01
image: Ubuntu Server 14.04
flavor_ram: 4096
boot_from_volume: True
volume_size: 75
# Creates a new instance with 2 volumes attached
- name: launch a compute instance
hosts: localhost
tasks:
- name: launch an instance
os_server:
name: vm1
state: present
cloud: mordred
region_name: ams01
image: Ubuntu Server 14.04
flavor_ram: 4096
volumes:
- photos
- music
'''
def _exit_hostvars(module, cloud, server, changed=True):
hostvars = meta.get_hostvars_from_server(cloud, server)
module.exit_json(
changed=changed, server=server, id=server.id, openstack=hostvars)
def _parse_nics(nics):
for net in nics:
if type(net) == str:
for nic in net.split(','):
yield dict((nic.split('='),))
else:
yield net
def _network_args(module, cloud):
args = []
nics = module.params['nics']
if type(nics) != list:
module.fail_json(msg='The \'nics\' parameter must be a list.')
for net in _parse_nics(nics):
if type(net) != dict:
module.fail_json(
msg='Each entry in the \'nics\' parameter must be a dict.')
if net.get('net-id'):
args.append(net)
elif net.get('net-name'):
by_name = cloud.get_network(net['net-name'])
if not by_name:
module.fail_json(
msg='Could not find network by net-name: %s' %
net['net-name'])
args.append({'net-id': by_name['id']})
elif net.get('port-id'):
args.append(net)
elif net.get('port-name'):
by_name = cloud.get_port(net['port-name'])
if not by_name:
module.fail_json(
msg='Could not find port by port-name: %s' %
net['port-name'])
args.append({'port-id': by_name['id']})
return args
def _delete_server(module, cloud):
try:
cloud.delete_server(
module.params['name'], wait=module.params['wait'],
timeout=module.params['timeout'],
delete_ips=module.params['delete_fip'])
except Exception as e:
module.fail_json(msg="Error in deleting vm: %s" % e.message)
module.exit_json(changed=True, result='deleted')
def _create_server(module, cloud):
flavor = module.params['flavor']
flavor_ram = module.params['flavor_ram']
flavor_include = module.params['flavor_include']
image_id = None
if not module.params['boot_volume']:
image_id = cloud.get_image_id(
module.params['image'], module.params['image_exclude'])
if flavor:
flavor_dict = cloud.get_flavor(flavor)
if not flavor_dict:
module.fail_json(msg="Could not find flavor %s" % flavor)
else:
flavor_dict = cloud.get_flavor_by_ram(flavor_ram, flavor_include)
if not flavor_dict:
module.fail_json(msg="Could not find any matching flavor")
nics = _network_args(module, cloud)
if type(module.params['meta']) is str:
metas = {}
for kv_str in module.params['meta'].split(","):
k, v = kv_str.split("=")
metas[k] = v
module.params['meta'] = metas
bootkwargs = dict(
name=module.params['name'],
image=image_id,
flavor=flavor_dict['id'],
nics=nics,
meta=module.params['meta'],
security_groups=module.params['security_groups'],
userdata=module.params['userdata'],
config_drive=module.params['config_drive'],
)
for optional_param in (
'key_name', 'availability_zone', 'network',
'scheduler_hints', 'volume_size', 'volumes'):
if module.params[optional_param]:
bootkwargs[optional_param] = module.params[optional_param]
server = cloud.create_server(
ip_pool=module.params['floating_ip_pools'],
ips=module.params['floating_ips'],
auto_ip=module.params['auto_ip'],
boot_volume=module.params['boot_volume'],
boot_from_volume=module.params['boot_from_volume'],
terminate_volume=module.params['terminate_volume'],
wait=module.params['wait'], timeout=module.params['timeout'],
**bootkwargs
)
_exit_hostvars(module, cloud, server)
def _delete_floating_ip_list(cloud, server, extra_ips):
for ip in extra_ips:
cloud.nova_client.servers.remove_floating_ip(
server=server.id, address=ip)
def _check_floating_ips(module, cloud, server):
changed = False
auto_ip = module.params['auto_ip']
floating_ips = module.params['floating_ips']
floating_ip_pools = module.params['floating_ip_pools']
if floating_ip_pools or floating_ips or auto_ip:
ips = openstack_find_nova_addresses(server.addresses, 'floating')
if not ips:
# If we're configured to have a floating but we don't have one,
# let's add one
server = cloud.add_ips_to_server(
server,
auto_ip=auto_ip,
ips=floating_ips,
ip_pool=floating_ip_pools,
wait=module.params['wait'],
timeout=module.params['timeout'],
)
changed = True
elif floating_ips:
# we were configured to have specific ips, let's make sure we have
# those
missing_ips = []
for ip in floating_ips:
if ip not in ips:
missing_ips.append(ip)
if missing_ips:
server = cloud.add_ip_list(server, missing_ips,
wait=module.params['wait'],
timeout=module.params['timeout'])
changed = True
extra_ips = []
for ip in ips:
if ip not in floating_ips:
extra_ips.append(ip)
if extra_ips:
_delete_floating_ip_list(cloud, server, extra_ips)
changed = True
return (changed, server)
def _get_server_state(module, cloud):
state = module.params['state']
server = cloud.get_server(module.params['name'])
if server and state == 'present':
if server.status not in ('ACTIVE', 'SHUTOFF', 'PAUSED', 'SUSPENDED'):
module.fail_json(
msg="The instance is available but not Active state: "
+ server.status)
(ip_changed, server) = _check_floating_ips(module, cloud, server)
_exit_hostvars(module, cloud, server, ip_changed)
if server and state == 'absent':
return True
if state == 'absent':
module.exit_json(changed=False, result="not present")
return True
def main():
argument_spec = openstack_full_argument_spec(
name = dict(required=True),
image = dict(default=None),
image_exclude = dict(default='(deprecated)'),
flavor = dict(default=None),
flavor_ram = dict(default=None, type='int'),
flavor_include = dict(default=None),
key_name = dict(default=None),
security_groups = dict(default=['default'], type='list'),
network = dict(default=None),
nics = dict(default=[], type='list'),
meta = dict(default=None, type='raw'),
userdata = dict(default=None, aliases=['user_data']),
config_drive = dict(default=False, type='bool'),
auto_ip = dict(default=True, type='bool', aliases=['auto_floating_ip', 'public_ip']),
floating_ips = dict(default=None, type='list'),
floating_ip_pools = dict(default=None, type='list'),
volume_size = dict(default=False, type='int'),
boot_from_volume = dict(default=False, type='bool'),
boot_volume = dict(default=None, aliases=['root_volume']),
terminate_volume = dict(default=False, type='bool'),
volumes = dict(default=[], type='list'),
scheduler_hints = dict(default=None, type='dict'),
state = dict(default='present', choices=['absent', 'present']),
delete_fip = dict(default=False, type='bool'),
)
module_kwargs = openstack_module_kwargs(
mutually_exclusive=[
['auto_ip', 'floating_ips'],
['auto_ip', 'floating_ip_pools'],
['floating_ips', 'floating_ip_pools'],
['flavor', 'flavor_ram'],
['image', 'boot_volume'],
['boot_from_volume', 'boot_volume'],
['nics', 'network'],
],
required_if=[
('boot_from_volume', True, ['volume_size', 'image']),
],
)
module = AnsibleModule(argument_spec, **module_kwargs)
if not HAS_SHADE:
module.fail_json(msg='shade is required for this module')
state = module.params['state']
image = module.params['image']
boot_volume = module.params['boot_volume']
flavor = module.params['flavor']
flavor_ram = module.params['flavor_ram']
if state == 'present':
if not (image or boot_volume):
module.fail_json(
msg="Parameter 'image' or 'boot_volume' is required "
"if state == 'present'"
)
if not flavor and not flavor_ram:
module.fail_json(
msg="Parameter 'flavor' or 'flavor_ram' is required "
"if state == 'present'"
)
try:
cloud_params = dict(module.params)
cloud_params.pop('userdata', None)
cloud = shade.openstack_cloud(**cloud_params)
if state == 'present':
_get_server_state(module, cloud)
_create_server(module, cloud)
elif state == 'absent':
_get_server_state(module, cloud)
_delete_server(module, cloud)
except shade.OpenStackCloudException as e:
module.fail_json(msg=str(e), extra_data=e.extra_data)
# this is magic, see lib/ansible/module_common.py
from ansible.module_utils.basic import *
from ansible.module_utils.openstack import *
if __name__ == '__main__':
main()
| gpl-3.0 |
drglove/SickRage | lib/github/PullRequest.py | 72 | 23575 | # -*- coding: utf-8 -*-
# ########################## Copyrights and license ############################
# #
# Copyright 2012 Michael Stead <michael.stead@gmail.com> #
# Copyright 2012 Vincent Jacques <vincent@vincent-jacques.net> #
# Copyright 2012 Zearin <zearin@gonk.net> #
# Copyright 2013 AKFish <akfish@gmail.com> #
# Copyright 2013 Vincent Jacques <vincent@vincent-jacques.net> #
# Copyright 2013 martinqt <m.ki2@laposte.net> #
# #
# This file is part of PyGithub. http://jacquev6.github.com/PyGithub/ #
# #
# PyGithub is free software: you can redistribute it and/or modify it under #
# the terms of the GNU Lesser General Public License as published by the Free #
# Software Foundation, either version 3 of the License, or (at your option) #
# any later version. #
# #
# PyGithub is distributed in the hope that it will be useful, but WITHOUT ANY #
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS #
# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more #
# details. #
# #
# You should have received a copy of the GNU Lesser General Public License #
# along with PyGithub. If not, see <http://www.gnu.org/licenses/>. #
# #
# ##############################################################################
import github.GithubObject
import github.PaginatedList
import github.PullRequestMergeStatus
import github.NamedUser
import github.PullRequestPart
import github.PullRequestComment
import github.File
import github.IssueComment
import github.Commit
class PullRequest(github.GithubObject.CompletableGithubObject):
"""
This class represents PullRequests. The reference can be found here http://developer.github.com/v3/pulls/
"""
@property
def additions(self):
"""
:type: integer
"""
self._completeIfNotSet(self._additions)
return self._additions.value
@property
def assignee(self):
"""
:type: :class:`github.NamedUser.NamedUser`
"""
self._completeIfNotSet(self._assignee)
return self._assignee.value
@property
def base(self):
"""
:type: :class:`github.PullRequestPart.PullRequestPart`
"""
self._completeIfNotSet(self._base)
return self._base.value
@property
def body(self):
"""
:type: string
"""
self._completeIfNotSet(self._body)
return self._body.value
@property
def changed_files(self):
"""
:type: integer
"""
self._completeIfNotSet(self._changed_files)
return self._changed_files.value
@property
def closed_at(self):
"""
:type: datetime.datetime
"""
self._completeIfNotSet(self._closed_at)
return self._closed_at.value
@property
def comments(self):
"""
:type: integer
"""
self._completeIfNotSet(self._comments)
return self._comments.value
@property
def comments_url(self):
"""
:type: string
"""
self._completeIfNotSet(self._comments_url)
return self._comments_url.value
@property
def commits(self):
"""
:type: integer
"""
self._completeIfNotSet(self._commits)
return self._commits.value
@property
def commits_url(self):
"""
:type: string
"""
self._completeIfNotSet(self._commits_url)
return self._commits_url.value
@property
def created_at(self):
"""
:type: datetime.datetime
"""
self._completeIfNotSet(self._created_at)
return self._created_at.value
@property
def deletions(self):
"""
:type: integer
"""
self._completeIfNotSet(self._deletions)
return self._deletions.value
@property
def diff_url(self):
"""
:type: string
"""
self._completeIfNotSet(self._diff_url)
return self._diff_url.value
@property
def head(self):
"""
:type: :class:`github.PullRequestPart.PullRequestPart`
"""
self._completeIfNotSet(self._head)
return self._head.value
@property
def html_url(self):
"""
:type: string
"""
self._completeIfNotSet(self._html_url)
return self._html_url.value
@property
def id(self):
"""
:type: integer
"""
self._completeIfNotSet(self._id)
return self._id.value
@property
def issue_url(self):
"""
:type: string
"""
self._completeIfNotSet(self._issue_url)
return self._issue_url.value
@property
def merge_commit_sha(self):
"""
:type: string
"""
self._completeIfNotSet(self._merge_commit_sha)
return self._merge_commit_sha.value
@property
def mergeable(self):
"""
:type: bool
"""
self._completeIfNotSet(self._mergeable)
return self._mergeable.value
@property
def mergeable_state(self):
"""
:type: string
"""
self._completeIfNotSet(self._mergeable_state)
return self._mergeable_state.value
@property
def merged(self):
"""
:type: bool
"""
self._completeIfNotSet(self._merged)
return self._merged.value
@property
def merged_at(self):
"""
:type: datetime.datetime
"""
self._completeIfNotSet(self._merged_at)
return self._merged_at.value
@property
def merged_by(self):
"""
:type: :class:`github.NamedUser.NamedUser`
"""
self._completeIfNotSet(self._merged_by)
return self._merged_by.value
@property
def milestone(self):
"""
:type: :class:`github.Milestone.Milestone`
"""
self._completeIfNotSet(self._milestone)
return self._milestone.value
@property
def number(self):
"""
:type: integer
"""
self._completeIfNotSet(self._number)
return self._number.value
@property
def patch_url(self):
"""
:type: string
"""
self._completeIfNotSet(self._patch_url)
return self._patch_url.value
@property
def review_comment_url(self):
"""
:type: string
"""
self._completeIfNotSet(self._review_comment_url)
return self._review_comment_url.value
@property
def review_comments(self):
"""
:type: integer
"""
self._completeIfNotSet(self._review_comments)
return self._review_comments.value
@property
def review_comments_url(self):
"""
:type: string
"""
self._completeIfNotSet(self._review_comments_url)
return self._review_comments_url.value
@property
def state(self):
"""
:type: string
"""
self._completeIfNotSet(self._state)
return self._state.value
@property
def title(self):
"""
:type: string
"""
self._completeIfNotSet(self._title)
return self._title.value
@property
def updated_at(self):
"""
:type: datetime.datetime
"""
self._completeIfNotSet(self._updated_at)
return self._updated_at.value
@property
def url(self):
"""
:type: string
"""
self._completeIfNotSet(self._url)
return self._url.value
@property
def user(self):
"""
:type: :class:`github.NamedUser.NamedUser`
"""
self._completeIfNotSet(self._user)
return self._user.value
def create_comment(self, body, commit_id, path, position):
"""
:calls: `POST /repos/:owner/:repo/pulls/:number/comments <http://developer.github.com/v3/pulls/comments>`_
:param body: string
:param commit_id: :class:`github.Commit.Commit`
:param path: string
:param position: integer
:rtype: :class:`github.PullRequestComment.PullRequestComment`
"""
return self.create_review_comment(body, commit_id, path, position)
def create_review_comment(self, body, commit_id, path, position):
"""
:calls: `POST /repos/:owner/:repo/pulls/:number/comments <http://developer.github.com/v3/pulls/comments>`_
:param body: string
:param commit_id: :class:`github.Commit.Commit`
:param path: string
:param position: integer
:rtype: :class:`github.PullRequestComment.PullRequestComment`
"""
assert isinstance(body, (str, unicode)), body
assert isinstance(commit_id, github.Commit.Commit), commit_id
assert isinstance(path, (str, unicode)), path
assert isinstance(position, (int, long)), position
post_parameters = {
"body": body,
"commit_id": commit_id._identity,
"path": path,
"position": position,
}
headers, data = self._requester.requestJsonAndCheck(
"POST",
self.url + "/comments",
input=post_parameters
)
return github.PullRequestComment.PullRequestComment(self._requester, headers, data, completed=True)
def create_issue_comment(self, body):
"""
:calls: `POST /repos/:owner/:repo/issues/:number/comments <http://developer.github.com/v3/issues/comments>`_
:param body: string
:rtype: :class:`github.IssueComment.IssueComment`
"""
assert isinstance(body, (str, unicode)), body
post_parameters = {
"body": body,
}
headers, data = self._requester.requestJsonAndCheck(
"POST",
self._parentUrl(self._parentUrl(self.url)) + "/issues/" + str(self.number) + "/comments",
input=post_parameters
)
return github.IssueComment.IssueComment(self._requester, headers, data, completed=True)
def edit(self, title=github.GithubObject.NotSet, body=github.GithubObject.NotSet, state=github.GithubObject.NotSet):
"""
:calls: `PATCH /repos/:owner/:repo/pulls/:number <http://developer.github.com/v3/pulls>`_
:param title: string
:param body: string
:param state: string
:rtype: None
"""
assert title is github.GithubObject.NotSet or isinstance(title, (str, unicode)), title
assert body is github.GithubObject.NotSet or isinstance(body, (str, unicode)), body
assert state is github.GithubObject.NotSet or isinstance(state, (str, unicode)), state
post_parameters = dict()
if title is not github.GithubObject.NotSet:
post_parameters["title"] = title
if body is not github.GithubObject.NotSet:
post_parameters["body"] = body
if state is not github.GithubObject.NotSet:
post_parameters["state"] = state
headers, data = self._requester.requestJsonAndCheck(
"PATCH",
self.url,
input=post_parameters
)
self._useAttributes(data)
def get_comment(self, id):
"""
:calls: `GET /repos/:owner/:repo/pulls/comments/:number <http://developer.github.com/v3/pulls/comments>`_
:param id: integer
:rtype: :class:`github.PullRequestComment.PullRequestComment`
"""
return self.get_review_comment(id)
def get_review_comment(self, id):
"""
:calls: `GET /repos/:owner/:repo/pulls/comments/:number <http://developer.github.com/v3/pulls/comments>`_
:param id: integer
:rtype: :class:`github.PullRequestComment.PullRequestComment`
"""
assert isinstance(id, (int, long)), id
headers, data = self._requester.requestJsonAndCheck(
"GET",
self._parentUrl(self.url) + "/comments/" + str(id)
)
return github.PullRequestComment.PullRequestComment(self._requester, headers, data, completed=True)
def get_comments(self):
"""
:calls: `GET /repos/:owner/:repo/pulls/:number/comments <http://developer.github.com/v3/pulls/comments>`_
:rtype: :class:`github.PaginatedList.PaginatedList` of :class:`github.PullRequestComment.PullRequestComment`
"""
return self.get_review_comments()
def get_review_comments(self):
"""
:calls: `GET /repos/:owner/:repo/pulls/:number/comments <http://developer.github.com/v3/pulls/comments>`_
:rtype: :class:`github.PaginatedList.PaginatedList` of :class:`github.PullRequestComment.PullRequestComment`
"""
return github.PaginatedList.PaginatedList(
github.PullRequestComment.PullRequestComment,
self._requester,
self.url + "/comments",
None
)
def get_commits(self):
"""
:calls: `GET /repos/:owner/:repo/pulls/:number/commits <http://developer.github.com/v3/pulls>`_
:rtype: :class:`github.PaginatedList.PaginatedList` of :class:`github.Commit.Commit`
"""
return github.PaginatedList.PaginatedList(
github.Commit.Commit,
self._requester,
self.url + "/commits",
None
)
def get_files(self):
"""
:calls: `GET /repos/:owner/:repo/pulls/:number/files <http://developer.github.com/v3/pulls>`_
:rtype: :class:`github.PaginatedList.PaginatedList` of :class:`github.File.File`
"""
return github.PaginatedList.PaginatedList(
github.File.File,
self._requester,
self.url + "/files",
None
)
def get_issue_comment(self, id):
"""
:calls: `GET /repos/:owner/:repo/issues/comments/:id <http://developer.github.com/v3/issues/comments>`_
:param id: integer
:rtype: :class:`github.IssueComment.IssueComment`
"""
assert isinstance(id, (int, long)), id
headers, data = self._requester.requestJsonAndCheck(
"GET",
self._parentUrl(self._parentUrl(self.url)) + "/issues/comments/" + str(id)
)
return github.IssueComment.IssueComment(self._requester, headers, data, completed=True)
def get_issue_comments(self):
"""
:calls: `GET /repos/:owner/:repo/issues/:number/comments <http://developer.github.com/v3/issues/comments>`_
:rtype: :class:`github.PaginatedList.PaginatedList` of :class:`github.IssueComment.IssueComment`
"""
return github.PaginatedList.PaginatedList(
github.IssueComment.IssueComment,
self._requester,
self._parentUrl(self._parentUrl(self.url)) + "/issues/" + str(self.number) + "/comments",
None
)
def is_merged(self):
"""
:calls: `GET /repos/:owner/:repo/pulls/:number/merge <http://developer.github.com/v3/pulls>`_
:rtype: bool
"""
status, headers, data = self._requester.requestJson(
"GET",
self.url + "/merge"
)
return status == 204
def merge(self, commit_message=github.GithubObject.NotSet):
"""
:calls: `PUT /repos/:owner/:repo/pulls/:number/merge <http://developer.github.com/v3/pulls>`_
:param commit_message: string
:rtype: :class:`github.PullRequestMergeStatus.PullRequestMergeStatus`
"""
assert commit_message is github.GithubObject.NotSet or isinstance(commit_message, (str, unicode)), commit_message
post_parameters = dict()
if commit_message is not github.GithubObject.NotSet:
post_parameters["commit_message"] = commit_message
headers, data = self._requester.requestJsonAndCheck(
"PUT",
self.url + "/merge",
input=post_parameters
)
return github.PullRequestMergeStatus.PullRequestMergeStatus(self._requester, headers, data, completed=True)
def _initAttributes(self):
self._additions = github.GithubObject.NotSet
self._assignee = github.GithubObject.NotSet
self._base = github.GithubObject.NotSet
self._body = github.GithubObject.NotSet
self._changed_files = github.GithubObject.NotSet
self._closed_at = github.GithubObject.NotSet
self._comments = github.GithubObject.NotSet
self._comments_url = github.GithubObject.NotSet
self._commits = github.GithubObject.NotSet
self._commits_url = github.GithubObject.NotSet
self._created_at = github.GithubObject.NotSet
self._deletions = github.GithubObject.NotSet
self._diff_url = github.GithubObject.NotSet
self._head = github.GithubObject.NotSet
self._html_url = github.GithubObject.NotSet
self._id = github.GithubObject.NotSet
self._issue_url = github.GithubObject.NotSet
self._merge_commit_sha = github.GithubObject.NotSet
self._mergeable = github.GithubObject.NotSet
self._mergeable_state = github.GithubObject.NotSet
self._merged = github.GithubObject.NotSet
self._merged_at = github.GithubObject.NotSet
self._merged_by = github.GithubObject.NotSet
self._milestone = github.GithubObject.NotSet
self._number = github.GithubObject.NotSet
self._patch_url = github.GithubObject.NotSet
self._review_comment_url = github.GithubObject.NotSet
self._review_comments = github.GithubObject.NotSet
self._review_comments_url = github.GithubObject.NotSet
self._state = github.GithubObject.NotSet
self._title = github.GithubObject.NotSet
self._updated_at = github.GithubObject.NotSet
self._url = github.GithubObject.NotSet
self._user = github.GithubObject.NotSet
def _useAttributes(self, attributes):
if "additions" in attributes: # pragma no branch
self._additions = self._makeIntAttribute(attributes["additions"])
if "assignee" in attributes: # pragma no branch
self._assignee = self._makeClassAttribute(github.NamedUser.NamedUser, attributes["assignee"])
if "base" in attributes: # pragma no branch
self._base = self._makeClassAttribute(github.PullRequestPart.PullRequestPart, attributes["base"])
if "body" in attributes: # pragma no branch
self._body = self._makeStringAttribute(attributes["body"])
if "changed_files" in attributes: # pragma no branch
self._changed_files = self._makeIntAttribute(attributes["changed_files"])
if "closed_at" in attributes: # pragma no branch
self._closed_at = self._makeDatetimeAttribute(attributes["closed_at"])
if "comments" in attributes: # pragma no branch
self._comments = self._makeIntAttribute(attributes["comments"])
if "comments_url" in attributes: # pragma no branch
self._comments_url = self._makeStringAttribute(attributes["comments_url"])
if "commits" in attributes: # pragma no branch
self._commits = self._makeIntAttribute(attributes["commits"])
if "commits_url" in attributes: # pragma no branch
self._commits_url = self._makeStringAttribute(attributes["commits_url"])
if "created_at" in attributes: # pragma no branch
self._created_at = self._makeDatetimeAttribute(attributes["created_at"])
if "deletions" in attributes: # pragma no branch
self._deletions = self._makeIntAttribute(attributes["deletions"])
if "diff_url" in attributes: # pragma no branch
self._diff_url = self._makeStringAttribute(attributes["diff_url"])
if "head" in attributes: # pragma no branch
self._head = self._makeClassAttribute(github.PullRequestPart.PullRequestPart, attributes["head"])
if "html_url" in attributes: # pragma no branch
self._html_url = self._makeStringAttribute(attributes["html_url"])
if "id" in attributes: # pragma no branch
self._id = self._makeIntAttribute(attributes["id"])
if "issue_url" in attributes: # pragma no branch
self._issue_url = self._makeStringAttribute(attributes["issue_url"])
if "merge_commit_sha" in attributes: # pragma no branch
self._merge_commit_sha = self._makeStringAttribute(attributes["merge_commit_sha"])
if "mergeable" in attributes: # pragma no branch
self._mergeable = self._makeBoolAttribute(attributes["mergeable"])
if "mergeable_state" in attributes: # pragma no branch
self._mergeable_state = self._makeStringAttribute(attributes["mergeable_state"])
if "merged" in attributes: # pragma no branch
self._merged = self._makeBoolAttribute(attributes["merged"])
if "merged_at" in attributes: # pragma no branch
self._merged_at = self._makeDatetimeAttribute(attributes["merged_at"])
if "merged_by" in attributes: # pragma no branch
self._merged_by = self._makeClassAttribute(github.NamedUser.NamedUser, attributes["merged_by"])
if "milestone" in attributes: # pragma no branch
self._milestone = self._makeClassAttribute(github.Milestone.Milestone, attributes["milestone"])
if "number" in attributes: # pragma no branch
self._number = self._makeIntAttribute(attributes["number"])
if "patch_url" in attributes: # pragma no branch
self._patch_url = self._makeStringAttribute(attributes["patch_url"])
if "review_comment_url" in attributes: # pragma no branch
self._review_comment_url = self._makeStringAttribute(attributes["review_comment_url"])
if "review_comments" in attributes: # pragma no branch
self._review_comments = self._makeIntAttribute(attributes["review_comments"])
if "review_comments_url" in attributes: # pragma no branch
self._review_comments_url = self._makeStringAttribute(attributes["review_comments_url"])
if "state" in attributes: # pragma no branch
self._state = self._makeStringAttribute(attributes["state"])
if "title" in attributes: # pragma no branch
self._title = self._makeStringAttribute(attributes["title"])
if "updated_at" in attributes: # pragma no branch
self._updated_at = self._makeDatetimeAttribute(attributes["updated_at"])
if "url" in attributes: # pragma no branch
self._url = self._makeStringAttribute(attributes["url"])
if "user" in attributes: # pragma no branch
self._user = self._makeClassAttribute(github.NamedUser.NamedUser, attributes["user"])
| gpl-3.0 |
vrutkovs/behave | features/steps/context_steps.py | 2 | 2843 | # -*- coding: utf-8 -*-
"""
Step definition for Context object tests.
EXAMPLE
Scenario: Show that Context parameter
Given I set the parameter "person" to "Alice" in the behave context
Then the behave context should have a parameter named "person"
And the behave context object should contain:
| Parameter | Value |
| person | "Alice" |
Scenario: Show that Context parameter are not present in next scenario
Then the behave context should not have a parameter named "person"
"""
from behave import given, when, then, step
from hamcrest import assert_that, equal_to
# -----------------------------------------------------------------------------
# STEPS:
# -----------------------------------------------------------------------------
@step(u'I set the context parameter "{param_name}" to "{value}"')
def step_set_behave_context_parameter_to(context, param_name, value):
setattr(context, param_name, value)
@step(u'the parameter "{param_name}" exists in the behave context')
def step_behave_context_parameter_exists(context, param_name):
assert hasattr(context, param_name)
@step(u'the parameter "{param_name}" does not exist in the behave context')
def step_behave_context_parameter_not_exists(context, param_name):
assert not hasattr(context, param_name)
@given(u'the behave context has a parameter "{param_name}"')
def given_behave_context_has_parameter_named(context, param_name):
step_behave_context_parameter_exists(context, param_name)
@given(u'the behave context does not have a parameter "{param_name}"')
def given_behave_context_does_not_have_parameter_named(context, param_name):
step_behave_context_parameter_not_exists(context, param_name)
@step(u'the behave context should have a parameter "{param_name}"')
def step_behave_context_should_have_parameter_named(context, param_name):
step_behave_context_parameter_exists(context, param_name)
@step(u'the behave context should not have a parameter "{param_name}"')
def step_behave_context_should_not_have_parameter_named(context, param_name):
step_behave_context_parameter_not_exists(context, param_name)
@then(u'the behave context should contain')
def then_behave_context_should_contain_with_table(context):
assert context.table, "ENSURE: table is provided."
for row in context.table.rows:
param_name = row["Parameter"]
param_value = row["Value"]
if param_value.startswith('"') and param_value.endswith('"'):
param_value = param_value[1:-1]
actual = str(getattr(context, param_name, None))
assert hasattr(context, param_name)
assert_that(actual, equal_to(param_value))
@given(u'the behave context contains')
def given_behave_context_contains_with_table(context):
then_behave_context_should_contain_with_table(context) | bsd-2-clause |
zimmermegan/smarda | nltk-3.0.3/nltk/stem/__init__.py | 7 | 1253 | # Natural Language Toolkit: Stemmers
#
# Copyright (C) 2001-2015 NLTK Project
# Author: Trevor Cohn <tacohn@cs.mu.oz.au>
# Edward Loper <edloper@gmail.com>
# Steven Bird <stevenbird1@gmail.com>
# URL: <http://nltk.org/>
# For license information, see LICENSE.TXT
"""
NLTK Stemmers
Interfaces used to remove morphological affixes from words, leaving
only the word stem. Stemming algorithms aim to remove those affixes
required for eg. grammatical role, tense, derivational morphology
leaving only the stem of the word. This is a difficult problem due to
irregular words (eg. common verbs in English), complicated
morphological rules, and part-of-speech and sense ambiguities
(eg. ``ceil-`` is not the stem of ``ceiling``).
StemmerI defines a standard interface for stemmers.
"""
from nltk.stem.api import StemmerI
from nltk.stem.regexp import RegexpStemmer
from nltk.stem.lancaster import LancasterStemmer
from nltk.stem.isri import ISRIStemmer
from nltk.stem.porter import PorterStemmer
from nltk.stem.snowball import SnowballStemmer
from nltk.stem.wordnet import WordNetLemmatizer
from nltk.stem.rslp import RSLPStemmer
if __name__ == "__main__":
import doctest
doctest.testmod(optionflags=doctest.NORMALIZE_WHITESPACE)
| mit |
vianuevm/findingnorris | flask/lib/python2.7/site-packages/pip/vcs/bazaar.py | 280 | 4427 | from __future__ import absolute_import
import logging
import os
import tempfile
import re
# TODO: Get this into six.moves.urllib.parse
try:
from urllib import parse as urllib_parse
except ImportError:
import urlparse as urllib_parse
from pip.utils import rmtree, display_path
from pip.vcs import vcs, VersionControl
from pip.download import path_to_url
logger = logging.getLogger(__name__)
class Bazaar(VersionControl):
name = 'bzr'
dirname = '.bzr'
repo_name = 'branch'
schemes = (
'bzr', 'bzr+http', 'bzr+https', 'bzr+ssh', 'bzr+sftp', 'bzr+ftp',
'bzr+lp',
)
def __init__(self, url=None, *args, **kwargs):
super(Bazaar, self).__init__(url, *args, **kwargs)
# Python >= 2.7.4, 3.3 doesn't have uses_fragment or non_hierarchical
# Register lp but do not expose as a scheme to support bzr+lp.
if getattr(urllib_parse, 'uses_fragment', None):
urllib_parse.uses_fragment.extend(['lp'])
urllib_parse.non_hierarchical.extend(['lp'])
def export(self, location):
"""
Export the Bazaar repository at the url to the destination location
"""
temp_dir = tempfile.mkdtemp('-export', 'pip-')
self.unpack(temp_dir)
if os.path.exists(location):
# Remove the location to make sure Bazaar can export it correctly
rmtree(location)
try:
self.run_command(['export', location], cwd=temp_dir,
show_stdout=False)
finally:
rmtree(temp_dir)
def switch(self, dest, url, rev_options):
self.run_command(['switch', url], cwd=dest)
def update(self, dest, rev_options):
self.run_command(['pull', '-q'] + rev_options, cwd=dest)
def obtain(self, dest):
url, rev = self.get_url_rev()
if rev:
rev_options = ['-r', rev]
rev_display = ' (to revision %s)' % rev
else:
rev_options = []
rev_display = ''
if self.check_destination(dest, url, rev_options, rev_display):
logger.info(
'Checking out %s%s to %s',
url,
rev_display,
display_path(dest),
)
self.run_command(['branch', '-q'] + rev_options + [url, dest])
def get_url_rev(self):
# hotfix the URL scheme after removing bzr+ from bzr+ssh:// readd it
url, rev = super(Bazaar, self).get_url_rev()
if url.startswith('ssh://'):
url = 'bzr+' + url
return url, rev
def get_url(self, location):
urls = self.run_command(['info'], show_stdout=False, cwd=location)
for line in urls.splitlines():
line = line.strip()
for x in ('checkout of branch: ',
'parent branch: '):
if line.startswith(x):
repo = line.split(x)[1]
if self._is_local_repository(repo):
return path_to_url(repo)
return repo
return None
def get_revision(self, location):
revision = self.run_command(
['revno'], show_stdout=False, cwd=location)
return revision.splitlines()[-1]
def get_tag_revs(self, location):
tags = self.run_command(
['tags'], show_stdout=False, cwd=location)
tag_revs = []
for line in tags.splitlines():
tags_match = re.search(r'([.\w-]+)\s*(.*)$', line)
if tags_match:
tag = tags_match.group(1)
rev = tags_match.group(2)
tag_revs.append((rev.strip(), tag.strip()))
return dict(tag_revs)
def get_src_requirement(self, dist, location, find_tags):
repo = self.get_url(location)
if not repo:
return None
if not repo.lower().startswith('bzr:'):
repo = 'bzr+' + repo
egg_project_name = dist.egg_name().split('-', 1)[0]
current_rev = self.get_revision(location)
tag_revs = self.get_tag_revs(location)
if current_rev in tag_revs:
# It's a tag
full_egg_name = '%s-%s' % (egg_project_name, tag_revs[current_rev])
else:
full_egg_name = '%s-dev_r%s' % (dist.egg_name(), current_rev)
return '%s@%s#egg=%s' % (repo, current_rev, full_egg_name)
vcs.register(Bazaar)
| mit |
ric2b/Vivaldi-browser | chromium/build/android/adb_install_apk.py | 6 | 5379 | #!/usr/bin/env python
#
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Utility script to install APKs from the command line quickly."""
import argparse
import glob
import logging
import os
import sys
import devil_chromium
from devil.android import apk_helper
from devil.android import device_blacklist
from devil.android import device_errors
from devil.android import device_utils
from devil.utils import run_tests_helper
from pylib import constants
def main():
parser = argparse.ArgumentParser()
apk_group = parser.add_mutually_exclusive_group(required=True)
apk_group.add_argument('--apk', dest='apk_name',
help='DEPRECATED The name of the apk containing the'
' application (with the .apk extension).')
apk_group.add_argument('apk_path', nargs='?',
help='The path to the APK to install.')
# TODO(jbudorick): Remove once no clients pass --apk_package
parser.add_argument('--apk_package', help='DEPRECATED unused')
parser.add_argument('--split',
action='append',
dest='splits',
help='A glob matching the apk splits. '
'Can be specified multiple times.')
parser.add_argument('--keep_data',
action='store_true',
default=False,
help='Keep the package data when installing '
'the application.')
parser.add_argument('--debug', action='store_const', const='Debug',
dest='build_type',
default=os.environ.get('BUILDTYPE', 'Debug'),
help='If set, run test suites under out/Debug. '
'Default is env var BUILDTYPE or Debug')
parser.add_argument('--release', action='store_const', const='Release',
dest='build_type',
help='If set, run test suites under out/Release. '
'Default is env var BUILDTYPE or Debug.')
parser.add_argument('-d', '--device', dest='devices', action='append',
default=[],
help='Target device for apk to install on. Enter multiple'
' times for multiple devices.')
parser.add_argument('--adb-path', type=os.path.abspath,
help='Absolute path to the adb binary to use.')
parser.add_argument('--blacklist-file', help='Device blacklist JSON file.')
parser.add_argument('-v', '--verbose', action='count',
help='Enable verbose logging.')
parser.add_argument('--downgrade', action='store_true',
help='If set, allows downgrading of apk.')
parser.add_argument('--timeout', type=int,
default=device_utils.DeviceUtils.INSTALL_DEFAULT_TIMEOUT,
help='Seconds to wait for APK installation. '
'(default: %(default)s)')
args = parser.parse_args()
run_tests_helper.SetLogLevel(args.verbose)
constants.SetBuildType(args.build_type)
devil_chromium.Initialize(
output_directory=constants.GetOutDirectory(),
adb_path=args.adb_path)
apk = args.apk_path or args.apk_name
if not apk.endswith('.apk'):
apk += '.apk'
if not os.path.exists(apk):
apk = os.path.join(constants.GetOutDirectory(), 'apks', apk)
if not os.path.exists(apk):
parser.error('%s not found.' % apk)
if args.splits:
splits = []
base_apk_package = apk_helper.ApkHelper(apk).GetPackageName()
for split_glob in args.splits:
apks = [f for f in glob.glob(split_glob) if f.endswith('.apk')]
if not apks:
logging.warning('No apks matched for %s.', split_glob)
for f in apks:
helper = apk_helper.ApkHelper(f)
if (helper.GetPackageName() == base_apk_package
and helper.GetSplitName()):
splits.append(f)
blacklist = (device_blacklist.Blacklist(args.blacklist_file)
if args.blacklist_file
else None)
devices = device_utils.DeviceUtils.HealthyDevices(blacklist=blacklist,
device_arg=args.devices)
def blacklisting_install(device):
try:
if args.splits:
device.InstallSplitApk(apk, splits, reinstall=args.keep_data,
allow_downgrade=args.downgrade)
else:
device.Install(apk, reinstall=args.keep_data,
allow_downgrade=args.downgrade,
timeout=args.timeout)
except (device_errors.CommandFailedError,
device_errors.DeviceUnreachableError):
logging.exception('Failed to install %s', apk)
if blacklist:
blacklist.Extend([str(device)], reason='install_failure')
logging.warning('Blacklisting %s', str(device))
except device_errors.CommandTimeoutError:
logging.exception('Timed out while installing %s', apk)
if blacklist:
blacklist.Extend([str(device)], reason='install_timeout')
logging.warning('Blacklisting %s', str(device))
device_utils.DeviceUtils.parallel(devices).pMap(blacklisting_install)
if __name__ == '__main__':
sys.exit(main())
| bsd-3-clause |
foreni-packages/webslayer | recursos/combinator.py | 8 | 1923 |
#from __future__ import generators
"""xpermutations.py
Generators for calculating a) the permutations of a sequence and
b) the combinations and selections of a number of elements from a
sequence. Uses Python 2.2 generators.
Similar solutions found also in comp.lang.python
Keywords: generator, combination, permutation, selection
See also: http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/105962
See also: http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/66463
See also: http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/66465
"""
def xcombinations(items, n):
if n==0: yield []
else:
for i in xrange(len(items)):
for cc in xcombinations(items[:i]+items[i+1:],n-1):
yield [items[i]]+cc
def xuniqueCombinations(items, n):
if n==0: yield []
else:
for i in xrange(len(items)):
for cc in xuniqueCombinations(items[i+1:],n-1):
yield [items[i]]+cc
def xselections(items, n):
if n==0: yield []
else:
for i in xrange(len(items)):
for ss in xselections(items, n-1):
yield [items[i]]+ss
def xpermutations(items):
return xcombinations(items, len(items))
#if __name__=="__main__":
#print "Permutations of 'love'"
#for p in xpermutations(['l','o','v','e']): print ''.join(p)
#print
print "Combinations of 2 letters from 'love'"
#for c in xcombinations(['l','o','v','e','a','b','c','d','1','2'],4): print ''.join(c)
c= xcombinations(['l','o','v','e','a','b','c','d','1','2'],4)
for x in c:
print ''.join(x)
#print
#print "Unique Combinations of 2 letters from 'love'"
#for uc in xuniqueCombinations(['l','o','v','e'],2): print ''.join(uc)
#print
#print "Selections of 2 letters from 'love'"
#for s in xselections(['l','o','v','e'],2): print ''.join(s)
#print
#print map(''.join, list(xpermutations('abcdefghijklmn')))
| gpl-2.0 |
ubic135/odoo-design | doc/_themes/odoodoc/github.py | 95 | 2492 | import inspect
import importlib
import os.path
from urlparse import urlunsplit
def setup(app):
app.add_config_value('github_user', None, 'env')
app.add_config_value('github_project', None, 'env')
app.connect('html-page-context', add_doc_link)
def linkcode_resolve(domain, info):
""" Resolves provided object to corresponding github URL
"""
# TODO: js?
if domain != 'py':
return None
if not (app.config.github_user and app.config.github_project):
return None
module, fullname = info['module'], info['fullname']
# TODO: attributes/properties don't have modules, maybe try to look
# them up based on their cached host object?
if not module:
return None
obj = importlib.import_module(module)
for item in fullname.split('.'):
obj = getattr(obj, item, None)
if obj is None:
return None
# get original from decorated methods
try: obj = getattr(obj, '_orig')
except AttributeError: pass
try:
obj_source_path = inspect.getsourcefile(obj)
_, line = inspect.getsourcelines(obj)
except (TypeError, IOError):
# obj doesn't have a module, or something
return None
import openerp
project_root = os.path.join(os.path.dirname(openerp.__file__), '..')
return make_github_link(
app,
os.path.relpath(obj_source_path, project_root),
line)
app.config.linkcode_resolve = linkcode_resolve
def make_github_link(app, path, line=None, mode="blob"):
config = app.config
urlpath = "/{user}/{project}/{mode}/{branch}/{path}".format(
user=config.github_user,
project=config.github_project,
branch=config.version or 'master',
path=path,
mode=mode,
)
return urlunsplit((
'https',
'github.com',
urlpath,
'',
'' if line is None else 'L%d' % line
))
def add_doc_link(app, pagename, templatename, context, doctree):
""" Add github_link function linking to the current page on github """
if not app.config.github_user and app.config.github_project:
return
# can't use functools.partial because 3rd positional is line not mode
context['github_link'] = lambda mode='mode': make_github_link(
app, 'doc/%s%s' % (pagename, app.config.source_suffix), mode=mode)
| agpl-3.0 |
falkolab/titanium_mobile | node_modules/node-gyp/gyp/pylib/gyp/ordered_dict.py | 2354 | 10366 | # Unmodified from http://code.activestate.com/recipes/576693/
# other than to add MIT license header (as specified on page, but not in code).
# Linked from Python documentation here:
# http://docs.python.org/2/library/collections.html#collections.OrderedDict
#
# This should be deleted once Py2.7 is available on all bots, see
# http://crbug.com/241769.
#
# Copyright (c) 2009 Raymond Hettinger.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
# Backport of OrderedDict() class that runs on Python 2.4, 2.5, 2.6, 2.7 and pypy.
# Passes Python2.7's test suite and incorporates all the latest updates.
try:
from thread import get_ident as _get_ident
except ImportError:
from dummy_thread import get_ident as _get_ident
try:
from _abcoll import KeysView, ValuesView, ItemsView
except ImportError:
pass
class OrderedDict(dict):
'Dictionary that remembers insertion order'
# An inherited dict maps keys to values.
# The inherited dict provides __getitem__, __len__, __contains__, and get.
# The remaining methods are order-aware.
# Big-O running times for all methods are the same as for regular dictionaries.
# The internal self.__map dictionary maps keys to links in a doubly linked list.
# The circular doubly linked list starts and ends with a sentinel element.
# The sentinel element never gets deleted (this simplifies the algorithm).
# Each link is stored as a list of length three: [PREV, NEXT, KEY].
def __init__(self, *args, **kwds):
'''Initialize an ordered dictionary. Signature is the same as for
regular dictionaries, but keyword arguments are not recommended
because their insertion order is arbitrary.
'''
if len(args) > 1:
raise TypeError('expected at most 1 arguments, got %d' % len(args))
try:
self.__root
except AttributeError:
self.__root = root = [] # sentinel node
root[:] = [root, root, None]
self.__map = {}
self.__update(*args, **kwds)
def __setitem__(self, key, value, dict_setitem=dict.__setitem__):
'od.__setitem__(i, y) <==> od[i]=y'
# Setting a new item creates a new link which goes at the end of the linked
# list, and the inherited dictionary is updated with the new key/value pair.
if key not in self:
root = self.__root
last = root[0]
last[1] = root[0] = self.__map[key] = [last, root, key]
dict_setitem(self, key, value)
def __delitem__(self, key, dict_delitem=dict.__delitem__):
'od.__delitem__(y) <==> del od[y]'
# Deleting an existing item uses self.__map to find the link which is
# then removed by updating the links in the predecessor and successor nodes.
dict_delitem(self, key)
link_prev, link_next, key = self.__map.pop(key)
link_prev[1] = link_next
link_next[0] = link_prev
def __iter__(self):
'od.__iter__() <==> iter(od)'
root = self.__root
curr = root[1]
while curr is not root:
yield curr[2]
curr = curr[1]
def __reversed__(self):
'od.__reversed__() <==> reversed(od)'
root = self.__root
curr = root[0]
while curr is not root:
yield curr[2]
curr = curr[0]
def clear(self):
'od.clear() -> None. Remove all items from od.'
try:
for node in self.__map.itervalues():
del node[:]
root = self.__root
root[:] = [root, root, None]
self.__map.clear()
except AttributeError:
pass
dict.clear(self)
def popitem(self, last=True):
'''od.popitem() -> (k, v), return and remove a (key, value) pair.
Pairs are returned in LIFO order if last is true or FIFO order if false.
'''
if not self:
raise KeyError('dictionary is empty')
root = self.__root
if last:
link = root[0]
link_prev = link[0]
link_prev[1] = root
root[0] = link_prev
else:
link = root[1]
link_next = link[1]
root[1] = link_next
link_next[0] = root
key = link[2]
del self.__map[key]
value = dict.pop(self, key)
return key, value
# -- the following methods do not depend on the internal structure --
def keys(self):
'od.keys() -> list of keys in od'
return list(self)
def values(self):
'od.values() -> list of values in od'
return [self[key] for key in self]
def items(self):
'od.items() -> list of (key, value) pairs in od'
return [(key, self[key]) for key in self]
def iterkeys(self):
'od.iterkeys() -> an iterator over the keys in od'
return iter(self)
def itervalues(self):
'od.itervalues -> an iterator over the values in od'
for k in self:
yield self[k]
def iteritems(self):
'od.iteritems -> an iterator over the (key, value) items in od'
for k in self:
yield (k, self[k])
# Suppress 'OrderedDict.update: Method has no argument':
# pylint: disable=E0211
def update(*args, **kwds):
'''od.update(E, **F) -> None. Update od from dict/iterable E and F.
If E is a dict instance, does: for k in E: od[k] = E[k]
If E has a .keys() method, does: for k in E.keys(): od[k] = E[k]
Or if E is an iterable of items, does: for k, v in E: od[k] = v
In either case, this is followed by: for k, v in F.items(): od[k] = v
'''
if len(args) > 2:
raise TypeError('update() takes at most 2 positional '
'arguments (%d given)' % (len(args),))
elif not args:
raise TypeError('update() takes at least 1 argument (0 given)')
self = args[0]
# Make progressively weaker assumptions about "other"
other = ()
if len(args) == 2:
other = args[1]
if isinstance(other, dict):
for key in other:
self[key] = other[key]
elif hasattr(other, 'keys'):
for key in other.keys():
self[key] = other[key]
else:
for key, value in other:
self[key] = value
for key, value in kwds.items():
self[key] = value
__update = update # let subclasses override update without breaking __init__
__marker = object()
def pop(self, key, default=__marker):
'''od.pop(k[,d]) -> v, remove specified key and return the corresponding value.
If key is not found, d is returned if given, otherwise KeyError is raised.
'''
if key in self:
result = self[key]
del self[key]
return result
if default is self.__marker:
raise KeyError(key)
return default
def setdefault(self, key, default=None):
'od.setdefault(k[,d]) -> od.get(k,d), also set od[k]=d if k not in od'
if key in self:
return self[key]
self[key] = default
return default
def __repr__(self, _repr_running={}):
'od.__repr__() <==> repr(od)'
call_key = id(self), _get_ident()
if call_key in _repr_running:
return '...'
_repr_running[call_key] = 1
try:
if not self:
return '%s()' % (self.__class__.__name__,)
return '%s(%r)' % (self.__class__.__name__, self.items())
finally:
del _repr_running[call_key]
def __reduce__(self):
'Return state information for pickling'
items = [[k, self[k]] for k in self]
inst_dict = vars(self).copy()
for k in vars(OrderedDict()):
inst_dict.pop(k, None)
if inst_dict:
return (self.__class__, (items,), inst_dict)
return self.__class__, (items,)
def copy(self):
'od.copy() -> a shallow copy of od'
return self.__class__(self)
@classmethod
def fromkeys(cls, iterable, value=None):
'''OD.fromkeys(S[, v]) -> New ordered dictionary with keys from S
and values equal to v (which defaults to None).
'''
d = cls()
for key in iterable:
d[key] = value
return d
def __eq__(self, other):
'''od.__eq__(y) <==> od==y. Comparison to another OD is order-sensitive
while comparison to a regular mapping is order-insensitive.
'''
if isinstance(other, OrderedDict):
return len(self)==len(other) and self.items() == other.items()
return dict.__eq__(self, other)
def __ne__(self, other):
return not self == other
# -- the following methods are only used in Python 2.7 --
def viewkeys(self):
"od.viewkeys() -> a set-like object providing a view on od's keys"
return KeysView(self)
def viewvalues(self):
"od.viewvalues() -> an object providing a view on od's values"
return ValuesView(self)
def viewitems(self):
"od.viewitems() -> a set-like object providing a view on od's items"
return ItemsView(self)
| apache-2.0 |
touilleMan/mongoengine | mongoengine/base/metaclasses.py | 28 | 18503 | import warnings
from mongoengine.common import _import_class
from mongoengine.errors import InvalidDocumentError
from mongoengine.python_support import PY3
from mongoengine.queryset import (DO_NOTHING, DoesNotExist,
MultipleObjectsReturned,
QuerySetManager)
from mongoengine.base.common import _document_registry, ALLOW_INHERITANCE
from mongoengine.base.fields import BaseField, ComplexBaseField, ObjectIdField
__all__ = ('DocumentMetaclass', 'TopLevelDocumentMetaclass')
class DocumentMetaclass(type):
"""Metaclass for all documents.
"""
def __new__(cls, name, bases, attrs):
flattened_bases = cls._get_bases(bases)
super_new = super(DocumentMetaclass, cls).__new__
# If a base class just call super
metaclass = attrs.get('my_metaclass')
if metaclass and issubclass(metaclass, DocumentMetaclass):
return super_new(cls, name, bases, attrs)
attrs['_is_document'] = attrs.get('_is_document', False)
attrs['_cached_reference_fields'] = []
# EmbeddedDocuments could have meta data for inheritance
if 'meta' in attrs:
attrs['_meta'] = attrs.pop('meta')
# EmbeddedDocuments should inherit meta data
if '_meta' not in attrs:
meta = MetaDict()
for base in flattened_bases[::-1]:
# Add any mixin metadata from plain objects
if hasattr(base, 'meta'):
meta.merge(base.meta)
elif hasattr(base, '_meta'):
meta.merge(base._meta)
attrs['_meta'] = meta
attrs['_meta']['abstract'] = False # 789: EmbeddedDocument shouldn't inherit abstract
if attrs['_meta'].get('allow_inheritance', ALLOW_INHERITANCE):
StringField = _import_class('StringField')
attrs['_cls'] = StringField()
# Handle document Fields
# Merge all fields from subclasses
doc_fields = {}
for base in flattened_bases[::-1]:
if hasattr(base, '_fields'):
doc_fields.update(base._fields)
# Standard object mixin - merge in any Fields
if not hasattr(base, '_meta'):
base_fields = {}
for attr_name, attr_value in base.__dict__.iteritems():
if not isinstance(attr_value, BaseField):
continue
attr_value.name = attr_name
if not attr_value.db_field:
attr_value.db_field = attr_name
base_fields[attr_name] = attr_value
doc_fields.update(base_fields)
# Discover any document fields
field_names = {}
for attr_name, attr_value in attrs.iteritems():
if not isinstance(attr_value, BaseField):
continue
attr_value.name = attr_name
if not attr_value.db_field:
attr_value.db_field = attr_name
doc_fields[attr_name] = attr_value
# Count names to ensure no db_field redefinitions
field_names[attr_value.db_field] = field_names.get(
attr_value.db_field, 0) + 1
# Ensure no duplicate db_fields
duplicate_db_fields = [k for k, v in field_names.items() if v > 1]
if duplicate_db_fields:
msg = ("Multiple db_fields defined for: %s " %
", ".join(duplicate_db_fields))
raise InvalidDocumentError(msg)
# Set _fields and db_field maps
attrs['_fields'] = doc_fields
attrs['_db_field_map'] = dict([(k, getattr(v, 'db_field', k))
for k, v in doc_fields.iteritems()])
attrs['_reverse_db_field_map'] = dict(
(v, k) for k, v in attrs['_db_field_map'].iteritems())
attrs['_fields_ordered'] = tuple(i[1] for i in sorted(
(v.creation_counter, v.name)
for v in doc_fields.itervalues()))
#
# Set document hierarchy
#
superclasses = ()
class_name = [name]
for base in flattened_bases:
if (not getattr(base, '_is_base_cls', True) and
not getattr(base, '_meta', {}).get('abstract', True)):
# Collate hierarchy for _cls and _subclasses
class_name.append(base.__name__)
if hasattr(base, '_meta'):
# Warn if allow_inheritance isn't set and prevent
# inheritance of classes where inheritance is set to False
allow_inheritance = base._meta.get('allow_inheritance',
ALLOW_INHERITANCE)
if (allow_inheritance is not True and
not base._meta.get('abstract')):
raise ValueError('Document %s may not be subclassed' %
base.__name__)
# Get superclasses from last base superclass
document_bases = [b for b in flattened_bases
if hasattr(b, '_class_name')]
if document_bases:
superclasses = document_bases[0]._superclasses
superclasses += (document_bases[0]._class_name, )
_cls = '.'.join(reversed(class_name))
attrs['_class_name'] = _cls
attrs['_superclasses'] = superclasses
attrs['_subclasses'] = (_cls, )
attrs['_types'] = attrs['_subclasses'] # TODO depreciate _types
# Create the new_class
new_class = super_new(cls, name, bases, attrs)
# Set _subclasses
for base in document_bases:
if _cls not in base._subclasses:
base._subclasses += (_cls,)
base._types = base._subclasses # TODO depreciate _types
(Document, EmbeddedDocument, DictField,
CachedReferenceField) = cls._import_classes()
if issubclass(new_class, Document):
new_class._collection = None
# Add class to the _document_registry
_document_registry[new_class._class_name] = new_class
# In Python 2, User-defined methods objects have special read-only
# attributes 'im_func' and 'im_self' which contain the function obj
# and class instance object respectively. With Python 3 these special
# attributes have been replaced by __func__ and __self__. The Blinker
# module continues to use im_func and im_self, so the code below
# copies __func__ into im_func and __self__ into im_self for
# classmethod objects in Document derived classes.
if PY3:
for key, val in new_class.__dict__.items():
if isinstance(val, classmethod):
f = val.__get__(new_class)
if hasattr(f, '__func__') and not hasattr(f, 'im_func'):
f.__dict__.update({'im_func': getattr(f, '__func__')})
if hasattr(f, '__self__') and not hasattr(f, 'im_self'):
f.__dict__.update({'im_self': getattr(f, '__self__')})
# Handle delete rules
for field in new_class._fields.itervalues():
f = field
if f.owner_document is None:
f.owner_document = new_class
delete_rule = getattr(f, 'reverse_delete_rule', DO_NOTHING)
if isinstance(f, CachedReferenceField):
if issubclass(new_class, EmbeddedDocument):
raise InvalidDocumentError(
"CachedReferenceFields is not allowed in EmbeddedDocuments")
if not f.document_type:
raise InvalidDocumentError(
"Document is not available to sync")
if f.auto_sync:
f.start_listener()
f.document_type._cached_reference_fields.append(f)
if isinstance(f, ComplexBaseField) and hasattr(f, 'field'):
delete_rule = getattr(f.field,
'reverse_delete_rule',
DO_NOTHING)
if isinstance(f, DictField) and delete_rule != DO_NOTHING:
msg = ("Reverse delete rules are not supported "
"for %s (field: %s)" %
(field.__class__.__name__, field.name))
raise InvalidDocumentError(msg)
f = field.field
if delete_rule != DO_NOTHING:
if issubclass(new_class, EmbeddedDocument):
msg = ("Reverse delete rules are not supported for "
"EmbeddedDocuments (field: %s)" % field.name)
raise InvalidDocumentError(msg)
f.document_type.register_delete_rule(new_class,
field.name, delete_rule)
if (field.name and hasattr(Document, field.name) and
EmbeddedDocument not in new_class.mro()):
msg = ("%s is a document method and not a valid "
"field name" % field.name)
raise InvalidDocumentError(msg)
return new_class
def add_to_class(self, name, value):
setattr(self, name, value)
@classmethod
def _get_bases(cls, bases):
if isinstance(bases, BasesTuple):
return bases
seen = []
bases = cls.__get_bases(bases)
unique_bases = (b for b in bases if not (b in seen or seen.append(b)))
return BasesTuple(unique_bases)
@classmethod
def __get_bases(cls, bases):
for base in bases:
if base is object:
continue
yield base
for child_base in cls.__get_bases(base.__bases__):
yield child_base
@classmethod
def _import_classes(cls):
Document = _import_class('Document')
EmbeddedDocument = _import_class('EmbeddedDocument')
DictField = _import_class('DictField')
CachedReferenceField = _import_class('CachedReferenceField')
return Document, EmbeddedDocument, DictField, CachedReferenceField
class TopLevelDocumentMetaclass(DocumentMetaclass):
"""Metaclass for top-level documents (i.e. documents that have their own
collection in the database.
"""
def __new__(cls, name, bases, attrs):
flattened_bases = cls._get_bases(bases)
super_new = super(TopLevelDocumentMetaclass, cls).__new__
# Set default _meta data if base class, otherwise get user defined meta
if attrs.get('my_metaclass') == TopLevelDocumentMetaclass:
# defaults
attrs['_meta'] = {
'abstract': True,
'max_documents': None,
'max_size': None,
'ordering': [], # default ordering applied at runtime
'indexes': [], # indexes to be ensured at runtime
'id_field': None,
'index_background': False,
'index_drop_dups': False,
'index_opts': None,
'delete_rules': None,
'allow_inheritance': None,
}
attrs['_is_base_cls'] = True
attrs['_meta'].update(attrs.get('meta', {}))
else:
attrs['_meta'] = attrs.get('meta', {})
# Explicitly set abstract to false unless set
attrs['_meta']['abstract'] = attrs['_meta'].get('abstract', False)
attrs['_is_base_cls'] = False
# Set flag marking as document class - as opposed to an object mixin
attrs['_is_document'] = True
# Ensure queryset_class is inherited
if 'objects' in attrs:
manager = attrs['objects']
if hasattr(manager, 'queryset_class'):
attrs['_meta']['queryset_class'] = manager.queryset_class
# Clean up top level meta
if 'meta' in attrs:
del attrs['meta']
# Find the parent document class
parent_doc_cls = [b for b in flattened_bases
if b.__class__ == TopLevelDocumentMetaclass]
parent_doc_cls = None if not parent_doc_cls else parent_doc_cls[0]
# Prevent classes setting collection different to their parents
# If parent wasn't an abstract class
if (parent_doc_cls and 'collection' in attrs.get('_meta', {}) and
not parent_doc_cls._meta.get('abstract', True)):
msg = "Trying to set a collection on a subclass (%s)" % name
warnings.warn(msg, SyntaxWarning)
del attrs['_meta']['collection']
# Ensure abstract documents have abstract bases
if attrs.get('_is_base_cls') or attrs['_meta'].get('abstract'):
if (parent_doc_cls and
not parent_doc_cls._meta.get('abstract', False)):
msg = "Abstract document cannot have non-abstract base"
raise ValueError(msg)
return super_new(cls, name, bases, attrs)
# Merge base class metas.
# Uses a special MetaDict that handles various merging rules
meta = MetaDict()
for base in flattened_bases[::-1]:
# Add any mixin metadata from plain objects
if hasattr(base, 'meta'):
meta.merge(base.meta)
elif hasattr(base, '_meta'):
meta.merge(base._meta)
# Set collection in the meta if its callable
if (getattr(base, '_is_document', False) and
not base._meta.get('abstract')):
collection = meta.get('collection', None)
if callable(collection):
meta['collection'] = collection(base)
meta.merge(attrs.get('_meta', {})) # Top level meta
# Only simple classes (direct subclasses of Document)
# may set allow_inheritance to False
simple_class = all([b._meta.get('abstract')
for b in flattened_bases if hasattr(b, '_meta')])
if (not simple_class and meta['allow_inheritance'] is False and
not meta['abstract']):
raise ValueError('Only direct subclasses of Document may set '
'"allow_inheritance" to False')
# Set default collection name
if 'collection' not in meta:
meta['collection'] = ''.join('_%s' % c if c.isupper() else c
for c in name).strip('_').lower()
attrs['_meta'] = meta
# Call super and get the new class
new_class = super_new(cls, name, bases, attrs)
meta = new_class._meta
# Set index specifications
meta['index_specs'] = new_class._build_index_specs(meta['indexes'])
# If collection is a callable - call it and set the value
collection = meta.get('collection')
if callable(collection):
new_class._meta['collection'] = collection(new_class)
# Provide a default queryset unless exists or one has been set
if 'objects' not in dir(new_class):
new_class.objects = QuerySetManager()
# Validate the fields and set primary key if needed
for field_name, field in new_class._fields.iteritems():
if field.primary_key:
# Ensure only one primary key is set
current_pk = new_class._meta.get('id_field')
if current_pk and current_pk != field_name:
raise ValueError('Cannot override primary key field')
# Set primary key
if not current_pk:
new_class._meta['id_field'] = field_name
new_class.id = field
# Set primary key if not defined by the document
new_class._auto_id_field = getattr(parent_doc_cls,
'_auto_id_field', False)
if not new_class._meta.get('id_field'):
# After 0.10, find not existing names, instead of overwriting
id_name, id_db_name = cls.get_auto_id_names(new_class)
new_class._auto_id_field = True
new_class._meta['id_field'] = id_name
new_class._fields[id_name] = ObjectIdField(db_field=id_db_name)
new_class._fields[id_name].name = id_name
new_class.id = new_class._fields[id_name]
new_class._db_field_map[id_name] = id_db_name
new_class._reverse_db_field_map[id_db_name] = id_name
# Prepend id field to _fields_ordered
new_class._fields_ordered = (id_name, ) + new_class._fields_ordered
# Merge in exceptions with parent hierarchy
exceptions_to_merge = (DoesNotExist, MultipleObjectsReturned)
module = attrs.get('__module__')
for exc in exceptions_to_merge:
name = exc.__name__
parents = tuple(getattr(base, name) for base in flattened_bases
if hasattr(base, name)) or (exc,)
# Create new exception and set to new_class
exception = type(name, parents, {'__module__': module})
setattr(new_class, name, exception)
return new_class
@classmethod
def get_auto_id_names(cls, new_class):
id_name, id_db_name = ('id', '_id')
if id_name not in new_class._fields and \
id_db_name not in (v.db_field for v in new_class._fields.values()):
return id_name, id_db_name
id_basename, id_db_basename, i = 'auto_id', '_auto_id', 0
while id_name in new_class._fields or \
id_db_name in (v.db_field for v in new_class._fields.values()):
id_name = '{0}_{1}'.format(id_basename, i)
id_db_name = '{0}_{1}'.format(id_db_basename, i)
i += 1
return id_name, id_db_name
class MetaDict(dict):
"""Custom dictionary for meta classes.
Handles the merging of set indexes
"""
_merge_options = ('indexes',)
def merge(self, new_options):
for k, v in new_options.iteritems():
if k in self._merge_options:
self[k] = self.get(k, []) + v
else:
self[k] = v
class BasesTuple(tuple):
"""Special class to handle introspection of bases tuple in __new__"""
pass
| mit |
MySQLOnRocksDB/mysql-5.6 | xtrabackup/test/python/subunit/progress_model.py | 85 | 4056 | #
# subunit: extensions to Python unittest to get test results from subprocesses.
# Copyright (C) 2009 Robert Collins <robertc@robertcollins.net>
#
# Licensed under either the Apache License, Version 2.0 or the BSD 3-clause
# license at the users choice. A copy of both licenses are available in the
# project source as Apache-2.0 and BSD. You may not use this file except in
# compliance with one of these two licences.
#
# Unless required by applicable law or agreed to in writing, software
# distributed under these licenses is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# license you chose for the specific language governing permissions and
# limitations under that license.
#
"""Support for dealing with progress state."""
class ProgressModel(object):
"""A model of progress indicators as subunit defines it.
Instances of this class represent a single logical operation that is
progressing. The operation may have many steps, and some of those steps may
supply their own progress information. ProgressModel uses a nested concept
where the overall state can be pushed, creating new starting state, and
later pushed to return to the prior state. Many user interfaces will want
to display an overall summary though, and accordingly the pos() and width()
methods return overall summary information rather than information on the
current subtask.
The default state is 0/0 - indicating that the overall progress is unknown.
Anytime the denominator of pos/width is 0, rendering of a ProgressModel
should should take this into consideration.
:ivar: _tasks. This private attribute stores the subtasks. Each is a tuple:
pos, width, overall_numerator, overall_denominator. The overall fields
store the calculated overall numerator and denominator for the state
that was pushed.
"""
def __init__(self):
"""Create a ProgressModel.
The new model has no progress data at all - it will claim a summary
width of zero and position of 0.
"""
self._tasks = []
self.push()
def adjust_width(self, offset):
"""Adjust the with of the current subtask."""
self._tasks[-1][1] += offset
def advance(self):
"""Advance the current subtask."""
self._tasks[-1][0] += 1
def pop(self):
"""Pop a subtask off the ProgressModel.
See push for a description of how push and pop work.
"""
self._tasks.pop()
def pos(self):
"""Return how far through the operation has progressed."""
if not self._tasks:
return 0
task = self._tasks[-1]
if len(self._tasks) > 1:
# scale up the overall pos by the current task or preserve it if
# no current width is known.
offset = task[2] * (task[1] or 1)
else:
offset = 0
return offset + task[0]
def push(self):
"""Push a new subtask.
After pushing a new subtask, the overall progress hasn't changed. Calls
to adjust_width, advance, set_width will only after the progress within
the range that calling 'advance' would have before - the subtask
represents progressing one step in the earlier task.
Call pop() to restore the progress model to the state before push was
called.
"""
self._tasks.append([0, 0, self.pos(), self.width()])
def set_width(self, width):
"""Set the width of the current subtask."""
self._tasks[-1][1] = width
def width(self):
"""Return the total width of the operation."""
if not self._tasks:
return 0
task = self._tasks[-1]
if len(self._tasks) > 1:
# scale up the overall width by the current task or preserve it if
# no current width is known.
return task[3] * (task[1] or 1)
else:
return task[1]
| gpl-2.0 |
canavandl/bokeh | bokeh/charts/builder/donut_builder.py | 31 | 8206 | """This is the Bokeh charts interface. It gives you a high level API to build
complex plot is a simple way.
This is the Donut class which lets you build your Donut charts just passing
the arguments to the Chart class and calling the proper functions.
It also add a new chained stacked method.
"""
#-----------------------------------------------------------------------------
# Copyright (c) 2012 - 2014, Continuum Analytics, Inc. All rights reserved.
#
# Powered by the Bokeh Development Team.
#
# The full license is in the file LICENSE.txt, distributed with this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
from __future__ import absolute_import, division
from math import pi
import pandas as pd
from ..utils import cycle_colors, polar_to_cartesian
from .._builder import Builder, create_and_build
from ...models import ColumnDataSource, GlyphRenderer, Range1d
from ...models.glyphs import AnnularWedge, Text, Wedge
from ...properties import Any, Bool, Either, List
#-----------------------------------------------------------------------------
# Classes and functions
#-----------------------------------------------------------------------------
def Donut(values, cat=None, width=800, height=800, xgrid=False, ygrid=False, **kws):
""" Creates a Donut chart using :class:`DonutBuilder <bokeh.charts.builder.donut_builder.DonutBuilder>`
to render the geometry from values and cat.
Args:
values (iterable): iterable 2d representing the data series
values matrix.
cat (list or bool, optional): list of string representing the categories.
Defaults to None.
In addition the the parameters specific to this chart,
:ref:`userguide_charts_generic_arguments` are also accepted as keyword parameters.
Returns:
a new :class:`Chart <bokeh.charts.Chart>`
Examples:
.. bokeh-plot::
:source-position: above
from bokeh.charts import Donut, output_file, show
# dict, OrderedDict, lists, arrays and DataFrames are valid inputs
xyvalues = [[2., 5., 3.], [4., 1., 4.], [6., 4., 3.]]
donut = Donut(xyvalues, ['cpu1', 'cpu2', 'cpu3'])
output_file('donut.html')
show(donut)
"""
return create_and_build(
DonutBuilder, values, cat=cat, width=width, height=height,
xgrid=xgrid, ygrid=ygrid, **kws
)
class DonutBuilder(Builder):
"""This is the Donut class and it is in charge of plotting
Donut chart in an easy and intuitive way.
Essentially, it provides a way to ingest the data, make the proper
calculations and push the references into a source object.
We additionally make calculations for the donut slices and angles.
And finally add the needed glyphs (Wedges and AnnularWedges) taking
the references from the source.
"""
cat = Either(Bool, List(Any), help="""
List of string representing the categories. (Defaults to None.)
""")
def _process_data(self):
"""Take the chart data from self._values.
It calculates the chart properties accordingly (start/end angles).
Then build a dict containing references to all the calculated
points to be used by the Wedge glyph inside the ``_yield_renderers`` method.
"""
dd = dict(zip(self._values.keys(), self._values.values()))
self._df = df = pd.DataFrame(dd)
self._groups = df.index = self.cat
df.columns = self._values.keys()
# Get the sum per category
aggregated = df.T.sum()
# Get the total (sum of all categories)
self._total_units = total = aggregated.sum()
radians = lambda x: 2*pi*(x/total)
angles = aggregated.map(radians).cumsum()
end_angles = angles.tolist()
start_angles = [0] + end_angles[:-1]
colors = cycle_colors(self.cat, self.palette)
self.set_and_get("", "colors", colors)
self.set_and_get("", "end", end_angles)
self.set_and_get("", "start", start_angles)
def _set_sources(self):
"""Push the Donut data into the ColumnDataSource and calculate
the proper ranges.
"""
self._source = ColumnDataSource(self._data)
self.x_range = Range1d(start=-2, end=2)
self.y_range = Range1d(start=-2, end=2)
def draw_central_wedge(self):
"""Draw the central part of the donut wedge from donut.source and
its calculated start and end angles.
"""
glyph = Wedge(
x=0, y=0, radius=1, start_angle="start", end_angle="end",
line_color="white", line_width=2, fill_color="colors"
)
yield GlyphRenderer(data_source=self._source, glyph=glyph)
def draw_central_descriptions(self):
"""Draw the descriptions to be placed on the central part of the
donut wedge
"""
text = ["%s" % cat for cat in self.cat]
x, y = polar_to_cartesian(0.7, self._data["start"], self._data["end"])
text_source = ColumnDataSource(dict(text=text, x=x, y=y))
glyph = Text(
x="x", y="y", text="text",
text_align="center", text_baseline="middle"
)
yield GlyphRenderer(data_source=text_source, glyph=glyph)
def draw_external_ring(self, colors=None):
"""Draw the external part of the donut wedge from donut.source
and its related descriptions
"""
if colors is None:
colors = cycle_colors(self.cat, self.palette)
first = True
for i, (cat, start_angle, end_angle) in enumerate(zip(
self.cat, self._data['start'], self._data['end'])):
details = self._df.ix[i]
radians = lambda x: 2*pi*(x/self._total_units)
angles = details.map(radians).cumsum() + start_angle
end = angles.tolist() + [end_angle]
start = [start_angle] + end[:-1]
base_color = colors[i]
#fill = [ base_color.lighten(i*0.05) for i in range(len(details) + 1) ]
fill = [base_color for i in range(len(details) + 1)]
text = [rowlabel for rowlabel in details.index]
x, y = polar_to_cartesian(1.25, start, end)
source = ColumnDataSource(dict(start=start, end=end, fill=fill))
glyph = AnnularWedge(
x=0, y=0, inner_radius=1, outer_radius=1.5,
start_angle="start", end_angle="end",
line_color="white", line_width=2,
fill_color="fill"
)
yield GlyphRenderer(data_source=source, glyph=glyph)
text_angle = [(start[i]+end[i])/2 for i in range(len(start))]
text_angle = [angle + pi if pi/2 < angle < 3*pi/2 else angle
for angle in text_angle]
if first and text:
text.insert(0, '')
offset = pi / 48
text_angle.insert(0, text_angle[0] - offset)
start.insert(0, start[0] - offset)
end.insert(0, end[0] - offset)
x, y = polar_to_cartesian(1.25, start, end)
first = False
data = dict(text=text, x=x, y=y, angle=text_angle)
text_source = ColumnDataSource(data)
glyph = Text(
x="x", y="y", text="text", angle="angle",
text_align="center", text_baseline="middle"
)
yield GlyphRenderer(data_source=text_source, glyph=glyph)
def _yield_renderers(self):
"""Use the AnnularWedge and Wedge glyphs to display the wedges.
Takes reference points from data loaded at the ColumnDataSurce.
"""
# build the central round area of the donut
renderers = []
renderers += self.draw_central_wedge()
# write central descriptions
renderers += self.draw_central_descriptions()
# build external donut ring
renderers += self.draw_external_ring()
return renderers
| bsd-3-clause |
zding5/Microblog-Flask | flask/lib/python2.7/site-packages/pip/_vendor/requests/packages/urllib3/contrib/ntlmpool.py | 1010 | 4507 | """
NTLM authenticating pool, contributed by erikcederstran
Issue #10, see: http://code.google.com/p/urllib3/issues/detail?id=10
"""
try:
from http.client import HTTPSConnection
except ImportError:
from httplib import HTTPSConnection
from logging import getLogger
from ntlm import ntlm
from urllib3 import HTTPSConnectionPool
log = getLogger(__name__)
class NTLMConnectionPool(HTTPSConnectionPool):
"""
Implements an NTLM authentication version of an urllib3 connection pool
"""
scheme = 'https'
def __init__(self, user, pw, authurl, *args, **kwargs):
"""
authurl is a random URL on the server that is protected by NTLM.
user is the Windows user, probably in the DOMAIN\\username format.
pw is the password for the user.
"""
super(NTLMConnectionPool, self).__init__(*args, **kwargs)
self.authurl = authurl
self.rawuser = user
user_parts = user.split('\\', 1)
self.domain = user_parts[0].upper()
self.user = user_parts[1]
self.pw = pw
def _new_conn(self):
# Performs the NTLM handshake that secures the connection. The socket
# must be kept open while requests are performed.
self.num_connections += 1
log.debug('Starting NTLM HTTPS connection no. %d: https://%s%s' %
(self.num_connections, self.host, self.authurl))
headers = {}
headers['Connection'] = 'Keep-Alive'
req_header = 'Authorization'
resp_header = 'www-authenticate'
conn = HTTPSConnection(host=self.host, port=self.port)
# Send negotiation message
headers[req_header] = (
'NTLM %s' % ntlm.create_NTLM_NEGOTIATE_MESSAGE(self.rawuser))
log.debug('Request headers: %s' % headers)
conn.request('GET', self.authurl, None, headers)
res = conn.getresponse()
reshdr = dict(res.getheaders())
log.debug('Response status: %s %s' % (res.status, res.reason))
log.debug('Response headers: %s' % reshdr)
log.debug('Response data: %s [...]' % res.read(100))
# Remove the reference to the socket, so that it can not be closed by
# the response object (we want to keep the socket open)
res.fp = None
# Server should respond with a challenge message
auth_header_values = reshdr[resp_header].split(', ')
auth_header_value = None
for s in auth_header_values:
if s[:5] == 'NTLM ':
auth_header_value = s[5:]
if auth_header_value is None:
raise Exception('Unexpected %s response header: %s' %
(resp_header, reshdr[resp_header]))
# Send authentication message
ServerChallenge, NegotiateFlags = \
ntlm.parse_NTLM_CHALLENGE_MESSAGE(auth_header_value)
auth_msg = ntlm.create_NTLM_AUTHENTICATE_MESSAGE(ServerChallenge,
self.user,
self.domain,
self.pw,
NegotiateFlags)
headers[req_header] = 'NTLM %s' % auth_msg
log.debug('Request headers: %s' % headers)
conn.request('GET', self.authurl, None, headers)
res = conn.getresponse()
log.debug('Response status: %s %s' % (res.status, res.reason))
log.debug('Response headers: %s' % dict(res.getheaders()))
log.debug('Response data: %s [...]' % res.read()[:100])
if res.status != 200:
if res.status == 401:
raise Exception('Server rejected request: wrong '
'username or password')
raise Exception('Wrong server response: %s %s' %
(res.status, res.reason))
res.fp = None
log.debug('Connection established')
return conn
def urlopen(self, method, url, body=None, headers=None, retries=3,
redirect=True, assert_same_host=True):
if headers is None:
headers = {}
headers['Connection'] = 'Keep-Alive'
return super(NTLMConnectionPool, self).urlopen(method, url, body,
headers, retries,
redirect,
assert_same_host)
| mit |
gdementen/pycel | src/pycel/tokenizer.py | 1 | 29107 | #========================================================================
# Description: Tokenise an Excel formula using an implementation of
# E. W. Bachtal's algorithm, found here:
#
# http://ewbi.blogs.com/develops/2004/12/excel_formula_p.html
#
# Tested with Python v2.5 (win32)
# Author: Robin Macharg
# Copyright: Algorithm (c) E. W. Bachtal, this implementation (c) R. Macharg
#
# CVS Info:
# $Header: T:\\cvsarchive/Excel\040export\040&\040import\040XML/ExcelXMLTransform/EWBI_Javascript_port/jsport.py,v 1.5 2006/12/07 13:41:08 rmacharg Exp $
#
# Modification History
#
# Date Author Comment
# =======================================================================
# 2006/11/29 - RMM - Made strictly class-based.
# Added parse, render and pretty print methods
# 2006/11 - RMM - RMM = Robin Macharg
# Created
# 2011/10 - Dirk Gorissen - Patch to support scientific notation
#========================================================================
import re
import collections
#========================================================================
# Class: ExcelParserTokens
# Description: Inheritable container for token definitions
#
# Attributes: Self explanatory
#
# Methods: None
#========================================================================
class ExcelParserTokens:
TOK_TYPE_NOOP = "noop";
TOK_TYPE_OPERAND = "operand";
TOK_TYPE_FUNCTION = "function";
TOK_TYPE_SUBEXPR = "subexpression";
TOK_TYPE_ARGUMENT = "argument";
TOK_TYPE_OP_PRE = "operator-prefix";
TOK_TYPE_OP_IN = "operator-infix";
TOK_TYPE_OP_POST = "operator-postfix";
TOK_TYPE_WSPACE = "white-space";
TOK_TYPE_UNKNOWN = "unknown"
TOK_SUBTYPE_START = "start";
TOK_SUBTYPE_STOP = "stop";
TOK_SUBTYPE_TEXT = "text";
TOK_SUBTYPE_NUMBER = "number";
TOK_SUBTYPE_LOGICAL = "logical";
TOK_SUBTYPE_ERROR = "error";
TOK_SUBTYPE_RANGE = "range";
TOK_SUBTYPE_MATH = "math";
TOK_SUBTYPE_CONCAT = "concatenate";
TOK_SUBTYPE_INTERSECT = "intersect";
TOK_SUBTYPE_UNION = "union";
#========================================================================
# Class: f_token
# Description: Encapsulate a formula token
#
# Attributes: tvalue -
# ttype - See token definitions, above, for values
# tsubtype - See token definitions, above, for values
#
# Methods: f_token - __init__()
#========================================================================
class f_token:
def __init__(self, value, type, subtype):
self.tvalue = value
self.ttype = type
self.tsubtype = subtype
def __str__(self):
return self.tvalue
#========================================================================
# Class: f_tokens
# Description: An ordered list of tokens
# Attributes: items - Ordered list
# index - Current position in the list
#
# Methods: f_tokens - __init__()
# f_token - add() - Add a token to the end of the list
# None - addRef() - Add a token to the end of the list
# None - reset() - reset the index to -1
# Boolean - BOF() - End of list?
# Boolean - EOF() - Beginning of list?
# Boolean - moveNext() - Move the index along one
# f_token/None - current() - Return the current token
# f_token/None - next() - Return the next token (leave the index unchanged)
# f_token/None - previous() - Return the previous token (leave the index unchanged)
#========================================================================
class f_tokens:
def __init__(self):
self.items = []
self.index = -1
def add(self, value, type, subtype=""):
if (not subtype):
subtype = ""
token = f_token(value, type, subtype)
self.addRef(token)
return token
def addRef(self, token):
self.items.append(token)
def reset(self):
self.index = -1
def BOF(self):
return self.index <= 0
def EOF(self):
return self.index >= (len(self.items) - 1)
def moveNext(self):
if self.EOF():
return False
self.index += 1
return True
def current(self):
if self.index == -1:
return None
return self.items[self.index]
def next(self):
if self.EOF():
return None
return self.items[self.index + 1]
def previous(self):
if self.index < 1:
return None
return self.items[self.index -1]
#========================================================================
# Class: f_tokenStack
# Inherits: ExcelParserTokens - a list of token values
# Description: A LIFO stack of tokens
#
# Attributes: items - Ordered list
#
# Methods: f_tokenStack - __init__()
# None - push(token) - Push a token onto the stack
# f_token/None - pop() - Pop a token off the stack
# f_token/None - token() - Non-destructively return the top item on the stack
# String - type() - Return the top token's type
# String - subtype() - Return the top token's subtype
# String - value() - Return the top token's value
#========================================================================
class f_tokenStack(ExcelParserTokens):
def __init__(self):
self.items = []
def push(self, token):
self.items.append(token)
def pop(self):
token = self.items.pop()
return f_token("", token.ttype, self.TOK_SUBTYPE_STOP)
def token(self):
# Note: this uses Pythons and/or "hack" to emulate C's ternary operator (i.e. cond ? exp1 : exp2)
return ((len(self.items) > 0) and [self.items[len(self.items) - 1]] or [None])[0]
def value(self):
return ((self.token()) and [(self.token()).tvalue] or [""])[0]
def type(self):
t = self.token()
return ((self.token()) and [(self.token()).ttype] or [""])[0]
def subtype(self):
return ((self.token()) and [(self.token()).tsubtype] or [""])[0]
#========================================================================
# Class: ExcelParser
# Description: Parse an Excel formula into a stream of tokens
# Attributes:
#
# Methods: f_tokens - getTokens(formula) - return a token stream (list)
#========================================================================
class ExcelParser(ExcelParserTokens):
def getTokens(self, formula):
def currentChar():
return formula[offset]
def doubleChar():
return formula[offset:offset+2]
def nextChar():
# JavaScript returns an empty string if the index is out of bounds,
# Python throws an IndexError. We mimic this behaviour here.
try:
formula[offset+1]
except IndexError:
return ""
else:
return formula[offset+1]
def EOF():
return offset >= len(formula)
tokens = f_tokens()
tokenStack = f_tokenStack()
offset = 0
token = ""
inString = False
inPath = False
inRange = False
inError = False
while (len(formula) > 0):
if (formula[0] == " "):
formula = formula[1:]
else:
if (formula[0] == "="):
formula = formula[1:]
break;
# state-dependent character evaluation (order is important)
while not EOF():
# double-quoted strings
# embeds are doubled
# end marks token
if inString:
if currentChar() == "\"":
if nextChar() == "\"":
token += "\""
offset += 1
else:
inString = False
tokens.add(token, self.TOK_TYPE_OPERAND, self.TOK_SUBTYPE_TEXT)
token = ""
else:
token += currentChar()
offset += 1
continue
# single-quoted strings (links)
# embeds are double
# end does not mark a token
if inPath:
if currentChar() == "'":
if nextChar() == "'":
token += "'"
offset += 1
else:
inPath = False
else:
token += currentChar()
offset += 1;
continue;
# bracketed strings (range offset or linked workbook name)
# no embeds (changed to "()" by Excel)
# end does not mark a token
if inRange:
if currentChar() == "]":
inRange = False
token += currentChar()
offset += 1
continue
# error values
# end marks a token, determined from absolute list of values
if inError:
token += currentChar()
offset += 1
if ",#NULL!,#DIV/0!,#VALUE!,#REF!,#NAME?,#NUM!,#N/A,".find("," + token + ",") != -1:
inError = False
tokens.add(token, self.TOK_TYPE_OPERAND, self.TOK_SUBTYPE_ERROR)
token = ""
continue;
# scientific notation check
regexSN = '^[1-9]{1}(\.[0-9]+)?[eE]{1}$';
if (("+-").find(currentChar()) != -1):
if len(token) > 1:
if re.match(regexSN,token):
token += currentChar();
offset += 1;
continue;
# independent character evaulation (order not important)
#
# establish state-dependent character evaluations
if currentChar() == "\"":
if len(token) > 0:
# not expected
tokens.add(token, self.TOK_TYPE_UNKNOWN)
token = ""
inString = True
offset += 1
continue
if currentChar() == "'":
if len(token) > 0:
# not expected
tokens.add(token, self.TOK_TYPE_UNKNOWN)
token = ""
inPath = True
offset += 1
continue
if (currentChar() == "["):
inRange = True
token += currentChar()
offset += 1
continue
if (currentChar() == "#"):
if (len(token) > 0):
# not expected
tokens.add(token, self.TOK_TYPE_UNKNOWN)
token = ""
inError = True
token += currentChar()
offset += 1
continue
# mark start and end of arrays and array rows
if (currentChar() == "{"):
if (len(token) > 0):
# not expected
tokens.add(token, self.TOK_TYPE_UNKNOWN)
token = ""
tokenStack.push(tokens.add("ARRAY", self.TOK_TYPE_FUNCTION, self.TOK_SUBTYPE_START))
tokenStack.push(tokens.add("ARRAYROW", self.TOK_TYPE_FUNCTION, self.TOK_SUBTYPE_START))
offset += 1
continue
if (currentChar() == ";"):
if (len(token) > 0):
tokens.add(token, self.TOK_TYPE_OPERAND)
token = ""
tokens.addRef(tokenStack.pop())
tokens.add(",", self.TOK_TYPE_ARGUMENT)
tokenStack.push(tokens.add("ARRAYROW", self.TOK_TYPE_FUNCTION, self.TOK_SUBTYPE_START))
offset += 1
continue
if (currentChar() == "}"):
if (len(token) > 0):
tokens.add(token, self.TOK_TYPE_OPERAND)
token = ""
tokens.addRef(tokenStack.pop())
tokens.addRef(tokenStack.pop())
offset += 1
continue
# trim white-space
if (currentChar() == " "):
if (len(token) > 0):
tokens.add(token, self.TOK_TYPE_OPERAND)
token = ""
tokens.add("", self.TOK_TYPE_WSPACE)
offset += 1
while ((currentChar() == " ") and (not EOF())):
offset += 1
continue
# multi-character comparators
if (",>=,<=,<>,".find("," + doubleChar() + ",") != -1):
if (len(token) > 0):
tokens.add(token, self.TOK_TYPE_OPERAND)
token = ""
tokens.add(doubleChar(), self.TOK_TYPE_OP_IN, self.TOK_SUBTYPE_LOGICAL)
offset += 2
continue
# standard infix operators
if ("+-*/^&=><".find(currentChar()) != -1):
if (len(token) > 0):
tokens.add(token, self.TOK_TYPE_OPERAND)
token = ""
tokens.add(currentChar(), self.TOK_TYPE_OP_IN)
offset += 1
continue
# standard postfix operators
if ("%".find(currentChar()) != -1):
if (len(token) > 0):
tokens.add(token, self.TOK_TYPE_OPERAND)
token = ""
tokens.add(currentChar(), self.TOK_TYPE_OP_POST)
offset += 1
continue
# start subexpression or function
if (currentChar() == "("):
if (len(token) > 0):
tokenStack.push(tokens.add(token, self.TOK_TYPE_FUNCTION, self.TOK_SUBTYPE_START))
token = ""
else:
tokenStack.push(tokens.add("", self.TOK_TYPE_SUBEXPR, self.TOK_SUBTYPE_START))
offset += 1
continue
# function, subexpression, array parameters
if (currentChar() == ","):
if (len(token) > 0):
tokens.add(token, self.TOK_TYPE_OPERAND)
token = ""
if (not (tokenStack.type() == self.TOK_TYPE_FUNCTION)):
tokens.add(currentChar(), self.TOK_TYPE_OP_IN, self.TOK_SUBTYPE_UNION)
else:
tokens.add(currentChar(), self.TOK_TYPE_ARGUMENT)
offset += 1
continue
# stop subexpression
if (currentChar() == ")"):
if (len(token) > 0):
tokens.add(token, self.TOK_TYPE_OPERAND)
token = ""
tokens.addRef(tokenStack.pop())
offset += 1
continue
# token accumulation
token += currentChar()
offset += 1
# dump remaining accumulation
if (len(token) > 0):
tokens.add(token, self.TOK_TYPE_OPERAND)
# move all tokens to a new collection, excluding all unnecessary white-space tokens
tokens2 = f_tokens()
while (tokens.moveNext()):
token = tokens.current();
if (token.ttype == self.TOK_TYPE_WSPACE):
if ((tokens.BOF()) or (tokens.EOF())):
pass
elif (not(
((tokens.previous().ttype == self.TOK_TYPE_FUNCTION) and (tokens.previous().tsubtype == self.TOK_SUBTYPE_STOP)) or
((tokens.previous().ttype == self.TOK_TYPE_SUBEXPR) and (tokens.previous().tsubtype == self.TOK_SUBTYPE_STOP)) or
(tokens.previous().ttype == self.TOK_TYPE_OPERAND)
)
):
pass
elif (not(
((tokens.next().ttype == self.TOK_TYPE_FUNCTION) and (tokens.next().tsubtype == self.TOK_SUBTYPE_START)) or
((tokens.next().ttype == self.TOK_TYPE_SUBEXPR) and (tokens.next().tsubtype == self.TOK_SUBTYPE_START)) or
(tokens.next().ttype == self.TOK_TYPE_OPERAND)
)
):
pass
else:
tokens2.add(token.tvalue, self.TOK_TYPE_OP_IN, self.TOK_SUBTYPE_INTERSECT)
continue
tokens2.addRef(token);
# switch infix "-" operator to prefix when appropriate, switch infix "+" operator to noop when appropriate, identify operand
# and infix-operator subtypes, pull "@" from in front of function names
while (tokens2.moveNext()):
token = tokens2.current()
if ((token.ttype == self.TOK_TYPE_OP_IN) and (token.tvalue == "-")):
if (tokens2.BOF()):
token.ttype = self.TOK_TYPE_OP_PRE
elif (
((tokens2.previous().ttype == self.TOK_TYPE_FUNCTION) and (tokens2.previous().tsubtype == self.TOK_SUBTYPE_STOP)) or
((tokens2.previous().ttype == self.TOK_TYPE_SUBEXPR) and (tokens2.previous().tsubtype == self.TOK_SUBTYPE_STOP)) or
(tokens2.previous().ttype == self.TOK_TYPE_OP_POST) or
(tokens2.previous().ttype == self.TOK_TYPE_OPERAND)
):
token.tsubtype = self.TOK_SUBTYPE_MATH;
else:
token.ttype = self.TOK_TYPE_OP_PRE
continue
if ((token.ttype == self.TOK_TYPE_OP_IN) and (token.tvalue == "+")):
if (tokens2.BOF()):
token.ttype = self.TOK_TYPE_NOOP
elif (
((tokens2.previous().ttype == self.TOK_TYPE_FUNCTION) and (tokens2.previous().tsubtype == self.TOK_SUBTYPE_STOP)) or
((tokens2.previous().ttype == self.TOK_TYPE_SUBEXPR) and (tokens2.previous().tsubtype == self.TOK_SUBTYPE_STOP)) or
(tokens2.previous().ttype == self.TOK_TYPE_OP_POST) or
(tokens2.previous().ttype == self.TOK_TYPE_OPERAND)
):
token.tsubtype = self.TOK_SUBTYPE_MATH
else:
token.ttype = self.TOK_TYPE_NOOP
continue
if ((token.ttype == self.TOK_TYPE_OP_IN) and (len(token.tsubtype) == 0)):
if (("<>=").find(token.tvalue[0:1]) != -1):
token.tsubtype = self.TOK_SUBTYPE_LOGICAL
elif (token.tvalue == "&"):
token.tsubtype = self.TOK_SUBTYPE_CONCAT
else:
token.tsubtype = self.TOK_SUBTYPE_MATH
continue
if ((token.ttype == self.TOK_TYPE_OPERAND) and (len(token.tsubtype) == 0)):
try:
float(token.tvalue)
except ValueError, e:
if ((token.tvalue == 'TRUE') or (token.tvalue == 'FALSE')):
token.tsubtype = self.TOK_SUBTYPE_LOGICAL
else:
token.tsubtype = self.TOK_SUBTYPE_RANGE
else:
token.tsubtype = self.TOK_SUBTYPE_NUMBER
continue
if (token.ttype == self.TOK_TYPE_FUNCTION):
if (token.tvalue[0:1] == "@"):
token.tvalue = token.tvalue[1:]
continue
tokens2.reset();
# move all tokens to a new collection, excluding all noops
tokens = f_tokens()
while (tokens2.moveNext()):
if (tokens2.current().ttype != self.TOK_TYPE_NOOP):
tokens.addRef(tokens2.current())
tokens.reset()
return tokens
def parse(self, formula):
self.tokens = self.getTokens(formula)
def render(self):
output = ""
if self.tokens:
for t in self.tokens.items:
if t.ttype == self.TOK_TYPE_FUNCTION and t.tsubtype == self.TOK_SUBTYPE_START: output += t.tvalue + "("
elif t.ttype == self.TOK_TYPE_FUNCTION and t.tsubtype == self.TOK_SUBTYPE_STOP: output += ")"
elif t.ttype == self.TOK_TYPE_SUBEXPR and t.tsubtype == self.TOK_SUBTYPE_START: output += "("
elif t.ttype == self.TOK_TYPE_SUBEXPR and t.tsubtype == self.TOK_SUBTYPE_STOP: output += ")"
# TODO: add in RE substitution of " with "" for strings
elif t.ttype == self.TOK_TYPE_OPERAND and t.tsubtype == self.TOK_SUBTYPE_TEXT: output += "\"" + t.tvalue + "\""
elif t.ttype == self.TOK_TYPE_OP_IN and t.tsubtype == self.TOK_SUBTYPE_INTERSECT: output += " "
else: output += t.tvalue
return output
def prettyprint(self):
indent = 0
output = ""
if self.tokens:
for t in self.tokens.items:
#print "'",t.ttype,t.tsubtype,t.tvalue,"'"
if (t.tsubtype == self.TOK_SUBTYPE_STOP):
indent -= 1
output += " "*indent + t.tvalue + " <" + t.ttype +"> <" + t.tsubtype + ">" + "\n"
if (t.tsubtype == self.TOK_SUBTYPE_START):
indent += 1;
return output
class Operator:
def __init__(self,value,precedence,associativity):
self.value = value
self.precedence = precedence
self.associativity = associativity
class ASTNode(object):
def __init__(self,token):
super(ASTNode,self).__init__()
self.token = token
def emit(self):
self.token.tvalue
def __str__(self):
return self.token.tvalue
class OperatorNode(ASTNode):
def __init__(self,*args):
super(OperatorNode,self).__init__(*args)
def emit(self):
pass
class RangeNode(ASTNode):
def __init__(self,*args):
super(RangeNode,self).__init__(*args)
def emit(self):
pass
class FunctionNode(ASTNode):
def __init__(self,*args):
super(FunctionNode,self).__init__(*args)
self.num_args = 0
def emit(self):
pass
def create_node(t):
if t.ttype == "operand" and t.tsubtype == "range":
return RangeNode(t)
elif t.ttype == "function":
return FunctionNode(t)
elif t.ttype == "operator":
return OperatorNode(t)
else:
return ASTNode(t)
def shunting_yard(expression):
#remove leading =
if expression.startswith('='):
expression = expression[1:]
p = ExcelParser();
p.parse(expression)
# insert tokens for '(' and ')', to make things cleaner below
tokens = []
for t in p.tokens.items:
if t.ttype == "function" and t.tsubtype == "start":
t.tsubtype = ""
tokens.append(t)
tokens.append(f_token('(','arglist','start'))
elif t.ttype == "function" and t.tsubtype == "stop":
#t.tsubtype = ""
#tokens.append(t)
tokens.append(f_token(')','arglist','stop'))
elif t.ttype == "subexpression" and t.tsubtype == "start":
t.tvalue = '('
tokens.append(t)
elif t.ttype == "subexpression" and t.tsubtype == "stop":
t.tvalue = ')'
tokens.append(t)
else:
tokens.append(t)
print "tokens: ", "|".join([x.tvalue for x in tokens])
#http://office.microsoft.com/en-us/excel-help/calculation-operators-and-precedence-HP010078886.aspx
operators = {}
operators[':'] = Operator(':',8,'left')
operators[''] = Operator(' ',8,'left')
operators[','] = Operator(',',8,'left')
operators['u-'] = Operator('u-',7,'left') #unary negation
operators['%'] = Operator('%',6,'left')
operators['^'] = Operator('^',5,'left')
operators['*'] = Operator('*',4,'left')
operators['/'] = Operator('/',4,'left')
operators['+'] = Operator('+',3,'left')
operators['-'] = Operator('-',3,'left')
operators['&'] = Operator('&',2,'left')
operators['='] = Operator('=',1,'left')
operators['<'] = Operator('<',1,'left')
operators['>'] = Operator('>',1,'left')
operators['<='] = Operator('<=',1,'left')
operators['>='] = Operator('>=',1,'left')
operators['<>'] = Operator('<>',1,'left')
output = collections.deque()
stack = []
were_values = []
arg_count = []
def po():
print "output: ", "|".join([x.tvalue for x in output])
def so():
print "stack:", "|".join([x.tvalue for x in stack])
for t in tokens:
if t.ttype == "operand":
output.append(create_node(t))
if were_values:
were_values.pop()
were_values.append(True)
elif t.ttype == "function":
stack.append(t)
arg_count.append(0)
if were_values:
were_values.pop()
were_values.append(True)
were_values.append(False)
elif t.ttype == "argument":
while stack and (stack[-1].tsubtype != "start"):
output.append(create_node(stack.pop()))
if were_values.pop(): arg_count[-1] += 1
were_values.append(False)
if not len(stack):
raise Exception("Mismatched or misplaced parentheses")
elif t.ttype.startswith('operator'):
if t.ttype.endswith('-prefix') and t.tvalue =="-":
o1 = operators['u-']
else:
o1 = operators[t.tvalue]
while stack and stack[-1].ttype.startswith('operator'):
if stack[-1].ttype.endswith('-prefix') and stack[-1].tvalue =="-":
o2 = operators['u-']
else:
o2 = operators[stack[-1].tvalue]
if ( (o1.associativity == "left" and o1.precedence <= o2.precedence)
or
(o1.associativity == "right" and o1.precedence < o2.precedence) ):
output.append(create_node(stack.pop()))
else:
break
stack.append(t)
elif t.tsubtype == "start":
stack.append(t)
elif t.tsubtype == "stop":
while stack and stack[-1].tsubtype != "start":
output.append(create_node(stack.pop()))
if not stack:
raise Exception("Mismatched or misplaced parentheses")
stack.pop()
if stack and stack[-1].ttype == "function":
f = create_node(stack.pop())
a = arg_count.pop()
w = were_values.pop()
if w: a += 1
f.num_args = a
print f, "has ",a," args"
output.append(f)
while stack:
if stack[-1].tsubtype == "start" or stack[-1].tsubtype == "stop":
raise Exception("Mismatched or misplaced parentheses")
output.append(create_node(stack.pop()))
#print "Stack is: ", "|".join(stack)
#print "Ouput is: ", "|".join([x.tvalue for x in output])
return output
| gpl-3.0 |
naveenvhegde/python-telegram-bot | tests/test_sticker.py | 7 | 4864 | #!/usr/bin/env python
#
# A library that provides a Python interface to the Telegram Bot API
# Copyright (C) 2015 Leandro Toledo de Souza <leandrotoeldodesouza@gmail.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see [http://www.gnu.org/licenses/].
"""This module contains a object that represents Tests for Telegram Sticker"""
import os
import unittest
import sys
sys.path.append('.')
import telegram
from tests.base import BaseTest
class StickerTest(BaseTest, unittest.TestCase):
"""This object represents Tests for Telegram Sticker."""
def setUp(self):
self.sticker_file_id = 'BQADAQADHAADyIsGAAFZfq1bphjqlgI'
self.width = 510
self.height = 512
self.thumb = {'width': 90,
'height': 90,
'file_id': 'BQADAQADoQADHyP1B0mzJMVyzcB0Ag',
'file_size': 2364}
self.file_size = 39518
self.json_dict = {
'file_id': self.sticker_file_id,
'width': self.width,
'height': self.height,
'thumb': self.thumb,
'file_size': self.file_size
}
def test_send_sticker_file(self):
pass
def test_send_sticker_resend(self):
"""Test telegram.Bot sendSticker method"""
print('Testing bot.sendSticker - Resend by file_id')
message = self._bot.sendSticker(chat_id=self._chat_id,
sticker=self.sticker_file_id)
sticker = message.sticker
self.assertEqual(sticker.file_id, self.sticker_file_id)
self.assertEqual(sticker.width, self.width)
self.assertEqual(sticker.height, self.height)
self.assertTrue(isinstance(sticker.thumb, telegram.PhotoSize))
self.assertEqual(sticker.file_size, self.file_size)
def test_sticker_de_json(self):
"""Test Sticker.de_json() method"""
print('Testing Sticker.de_json()')
sticker = telegram.Sticker.de_json(self.json_dict)
self.assertEqual(sticker.file_id, self.sticker_file_id)
self.assertEqual(sticker.width, self.width)
self.assertEqual(sticker.height, self.height)
self.assertTrue(isinstance(sticker.thumb, telegram.PhotoSize))
self.assertEqual(sticker.file_size, self.file_size)
def test_sticker_to_json(self):
"""Test Sticker.to_json() method"""
print('Testing Sticker.to_json()')
sticker = telegram.Sticker.de_json(self.json_dict)
self.assertTrue(self.is_json(sticker.to_json()))
def test_sticker_to_dict(self):
"""Test Sticker.to_dict() method"""
print('Testing Sticker.to_dict()')
sticker = telegram.Sticker.de_json(self.json_dict)
self.assertEqual(sticker['file_id'], self.sticker_file_id)
self.assertEqual(sticker['width'], self.width)
self.assertEqual(sticker['height'], self.height)
self.assertTrue(isinstance(sticker['thumb'], telegram.PhotoSize))
self.assertEqual(sticker['file_size'], self.file_size)
def test_error_send_sticker_empty_file(self):
print('Testing bot.sendSticker - Null file')
json_dict = self.json_dict
del(json_dict['file_id'])
json_dict['sticker'] = open(os.devnull, 'rb')
self.assertRaises(telegram.TelegramError,
lambda: self._bot.sendSticker(chat_id=self._chat_id,
**json_dict))
def test_error_send_sticker_empty_file_id(self):
print('Testing bot.sendSticker - Empty file_id')
json_dict = self.json_dict
del(json_dict['file_id'])
json_dict['sticker'] = ''
self.assertRaises(telegram.TelegramError,
lambda: self._bot.sendSticker(chat_id=self._chat_id,
**json_dict))
def test_error_sticker_without_required_args(self):
print('Testing bot.sendSticker - Without required arguments')
json_dict = self.json_dict
del(json_dict['file_id'])
self.assertRaises(TypeError,
lambda: self._bot.sendSticker(chat_id=self._chat_id,
**json_dict))
if __name__ == '__main__':
unittest.main()
| gpl-3.0 |
invisiblek/android_kernel_htc_msm8974 | tools/perf/scripts/python/check-perf-trace.py | 11214 | 2503 | # perf script event handlers, generated by perf script -g python
# (c) 2010, Tom Zanussi <tzanussi@gmail.com>
# Licensed under the terms of the GNU GPL License version 2
#
# This script tests basic functionality such as flag and symbol
# strings, common_xxx() calls back into perf, begin, end, unhandled
# events, etc. Basically, if this script runs successfully and
# displays expected results, Python scripting support should be ok.
import os
import sys
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from Core import *
from perf_trace_context import *
unhandled = autodict()
def trace_begin():
print "trace_begin"
pass
def trace_end():
print_unhandled()
def irq__softirq_entry(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
vec):
print_header(event_name, common_cpu, common_secs, common_nsecs,
common_pid, common_comm)
print_uncommon(context)
print "vec=%s\n" % \
(symbol_str("irq__softirq_entry", "vec", vec)),
def kmem__kmalloc(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
call_site, ptr, bytes_req, bytes_alloc,
gfp_flags):
print_header(event_name, common_cpu, common_secs, common_nsecs,
common_pid, common_comm)
print_uncommon(context)
print "call_site=%u, ptr=%u, bytes_req=%u, " \
"bytes_alloc=%u, gfp_flags=%s\n" % \
(call_site, ptr, bytes_req, bytes_alloc,
flag_str("kmem__kmalloc", "gfp_flags", gfp_flags)),
def trace_unhandled(event_name, context, event_fields_dict):
try:
unhandled[event_name] += 1
except TypeError:
unhandled[event_name] = 1
def print_header(event_name, cpu, secs, nsecs, pid, comm):
print "%-20s %5u %05u.%09u %8u %-20s " % \
(event_name, cpu, secs, nsecs, pid, comm),
# print trace fields not included in handler args
def print_uncommon(context):
print "common_preempt_count=%d, common_flags=%s, common_lock_depth=%d, " \
% (common_pc(context), trace_flag_str(common_flags(context)), \
common_lock_depth(context))
def print_unhandled():
keys = unhandled.keys()
if not keys:
return
print "\nunhandled events:\n\n",
print "%-40s %10s\n" % ("event", "count"),
print "%-40s %10s\n" % ("----------------------------------------", \
"-----------"),
for event_name in keys:
print "%-40s %10d\n" % (event_name, unhandled[event_name])
| gpl-2.0 |
alex/warehouse | tests/unit/utils/test_http.py | 1 | 2794 | # Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
from warehouse.utils.http import is_safe_url, is_valid_uri
# (MOSTLY) FROM https://github.com/django/django/blob/
# 011a54315e46acdf288003566b8570440f5ac985/tests/utils_tests/test_http.py
class TestIsSafeUrl:
@pytest.mark.parametrize(
"url",
[
None,
'http://example.com',
'http:///example.com',
'https://example.com',
'ftp://exampel.com',
r'\\example.com',
r'\\\example.com',
r'/\\/example.com',
r'\\\example.com',
r'\\example.com',
r'\\//example.com',
r'/\/example.com',
r'\/example.com',
r'/\example.com',
'http:///example.com',
'http:/\//example.com',
'http:\/example.com',
'http:/\example.com',
'javascript:alert("XSS")',
'\njavascript:alert(x)',
'\x08//example.com',
'\n',
],
)
def test_rejects_bad_url(self, url):
assert not is_safe_url(url, host="testserver")
@pytest.mark.parametrize(
"url",
[
'/view/?param=http://example.com',
'/view/?param=https://example.com',
'/view?param=ftp://exampel.com',
'view/?param=//example.com',
'https://testserver/',
'HTTPS://testserver/',
'//testserver/',
'/url%20with%20spaces/',
],
)
def test_accepts_good_url(self, url):
assert is_safe_url(url, host="testserver")
class TestIsValidURI:
@pytest.mark.parametrize(
"uri",
[
"https://example.com/",
"http://example.com/",
"https://sub.example.com/path?query#thing",
],
)
def test_valid(self, uri):
assert is_valid_uri(uri)
@pytest.mark.parametrize(
"uri",
[
"javascript:alert(0)",
"UNKNOWN",
"ftp://example.com/",
],
)
def test_invalid(self, uri):
assert not is_valid_uri(uri)
def test_plain_schemes(self):
assert is_valid_uri("ftp://example.com/", require_scheme=True,
allowed_schemes=[])
| apache-2.0 |
undoware/neutron-drive | google_appengine/lib/django_1_2/tests/regressiontests/utils/datetime_safe.py | 43 | 2087 | import unittest
from datetime import date as original_date, datetime as original_datetime
from django.utils.datetime_safe import date, datetime
class DatetimeTests(unittest.TestCase):
def setUp(self):
self.just_safe = (1900, 1, 1)
self.just_unsafe = (1899, 12, 31, 23, 59, 59)
self.really_old = (20, 1, 1)
self.more_recent = (2006, 1, 1)
def test_compare_datetimes(self):
self.assertEqual(original_datetime(*self.more_recent), datetime(*self.more_recent))
self.assertEqual(original_datetime(*self.really_old), datetime(*self.really_old))
self.assertEqual(original_date(*self.more_recent), date(*self.more_recent))
self.assertEqual(original_date(*self.really_old), date(*self.really_old))
self.assertEqual(original_date(*self.just_safe).strftime('%Y-%m-%d'), date(*self.just_safe).strftime('%Y-%m-%d'))
self.assertEqual(original_datetime(*self.just_safe).strftime('%Y-%m-%d'), datetime(*self.just_safe).strftime('%Y-%m-%d'))
def test_safe_strftime(self):
self.assertEquals(date(*self.just_unsafe[:3]).strftime('%Y-%m-%d (weekday %w)'), '1899-12-31 (weekday 0)')
self.assertEquals(date(*self.just_safe).strftime('%Y-%m-%d (weekday %w)'), '1900-01-01 (weekday 1)')
self.assertEquals(datetime(*self.just_unsafe).strftime('%Y-%m-%d %H:%M:%S (weekday %w)'), '1899-12-31 23:59:59 (weekday 0)')
self.assertEquals(datetime(*self.just_safe).strftime('%Y-%m-%d %H:%M:%S (weekday %w)'), '1900-01-01 00:00:00 (weekday 1)')
# %y will error before this date
self.assertEquals(date(*self.just_safe).strftime('%y'), '00')
self.assertEquals(datetime(*self.just_safe).strftime('%y'), '00')
self.assertEquals(date(1850, 8, 2).strftime("%Y/%m/%d was a %A"), '1850/08/02 was a Friday')
def test_zero_padding(self):
"""
Regression for #12524
Check that pre-1000AD dates are padded with zeros if necessary
"""
self.assertEquals(date(1, 1, 1).strftime("%Y/%m/%d was a %A"), '0001/01/01 was a Monday')
| bsd-3-clause |
brianjgeiger/osf.io | framework/transactions/handlers.py | 9 | 2560 | # -*- coding: utf-8 -*-
from rest_framework import status as http_status
import logging
from framework.exceptions import HTTPError
from django.db import transaction
from flask import request, current_app, has_request_context, _request_ctx_stack
from werkzeug.local import LocalProxy
LOCK_ERROR_CODE = http_status.HTTP_400_BAD_REQUEST
NO_AUTO_TRANSACTION_ATTR = '_no_auto_transaction'
logger = logging.getLogger(__name__)
def _get_current_atomic():
if has_request_context():
ctx = _request_ctx_stack.top
return getattr(ctx, 'current_atomic', None)
return None
current_atomic = LocalProxy(_get_current_atomic)
def no_auto_transaction(func):
setattr(func, NO_AUTO_TRANSACTION_ATTR, True)
return func
def view_has_annotation(attr):
try:
endpoint = request.url_rule.endpoint
except (RuntimeError, AttributeError):
return False
view = current_app.view_functions[endpoint]
return getattr(view, attr, False)
def transaction_before_request():
"""Setup transaction before handling the request.
"""
if view_has_annotation(NO_AUTO_TRANSACTION_ATTR):
return None
ctx = _request_ctx_stack.top
atomic = transaction.atomic()
atomic.__enter__()
ctx.current_atomic = atomic
def transaction_after_request(response, base_status_code_error=500):
"""Teardown transaction after handling the request. Rollback if an
uncaught exception occurred, else commit. If the commit fails due to a lock
error, rollback and return error response.
"""
if view_has_annotation(NO_AUTO_TRANSACTION_ATTR):
return response
if response.status_code >= base_status_code_error:
# Construct an error in order to trigger rollback in transaction.atomic().__exit__
exc_type = HTTPError
exc_value = HTTPError(response.status_code)
current_atomic.__exit__(exc_type, exc_value, None)
else:
current_atomic.__exit__(None, None, None)
return response
def transaction_teardown_request(error=None):
"""Rollback transaction on uncaught error. This code should never be
reached in debug mode, since uncaught errors are raised for use in the
Werkzeug debugger.
"""
if view_has_annotation(NO_AUTO_TRANSACTION_ATTR):
return
if error is not None and current_atomic:
current_atomic.__exit__(error.__class__, error, None)
handlers = {
'before_request': transaction_before_request,
'after_request': transaction_after_request,
'teardown_request': transaction_teardown_request,
}
| apache-2.0 |
noba3/KoTos | addons/script.module.requests/lib/requests/sessions.py | 165 | 24544 | # -*- coding: utf-8 -*-
"""
requests.session
~~~~~~~~~~~~~~~~
This module provides a Session object to manage and persist settings across
requests (cookies, auth, proxies).
"""
import os
from collections import Mapping
from datetime import datetime
from .auth import _basic_auth_str
from .compat import cookielib, OrderedDict, urljoin, urlparse
from .cookies import (
cookiejar_from_dict, extract_cookies_to_jar, RequestsCookieJar, merge_cookies)
from .models import Request, PreparedRequest, DEFAULT_REDIRECT_LIMIT
from .hooks import default_hooks, dispatch_hook
from .utils import to_key_val_list, default_headers, to_native_string
from .exceptions import (
TooManyRedirects, InvalidSchema, ChunkedEncodingError, ContentDecodingError)
from .packages.urllib3._collections import RecentlyUsedContainer
from .structures import CaseInsensitiveDict
from .adapters import HTTPAdapter
from .utils import (
requote_uri, get_environ_proxies, get_netrc_auth, should_bypass_proxies,
get_auth_from_url
)
from .status_codes import codes
# formerly defined here, reexposed here for backward compatibility
from .models import REDIRECT_STATI
REDIRECT_CACHE_SIZE = 1000
def merge_setting(request_setting, session_setting, dict_class=OrderedDict):
"""
Determines appropriate setting for a given request, taking into account the
explicit setting on that request, and the setting in the session. If a
setting is a dictionary, they will be merged together using `dict_class`
"""
if session_setting is None:
return request_setting
if request_setting is None:
return session_setting
# Bypass if not a dictionary (e.g. verify)
if not (
isinstance(session_setting, Mapping) and
isinstance(request_setting, Mapping)
):
return request_setting
merged_setting = dict_class(to_key_val_list(session_setting))
merged_setting.update(to_key_val_list(request_setting))
# Remove keys that are set to None. Extract keys first to avoid altering
# the dictionary during iteration.
none_keys = [k for (k, v) in merged_setting.items() if v is None]
for key in none_keys:
del merged_setting[key]
return merged_setting
def merge_hooks(request_hooks, session_hooks, dict_class=OrderedDict):
"""
Properly merges both requests and session hooks.
This is necessary because when request_hooks == {'response': []}, the
merge breaks Session hooks entirely.
"""
if session_hooks is None or session_hooks.get('response') == []:
return request_hooks
if request_hooks is None or request_hooks.get('response') == []:
return session_hooks
return merge_setting(request_hooks, session_hooks, dict_class)
class SessionRedirectMixin(object):
def resolve_redirects(self, resp, req, stream=False, timeout=None,
verify=True, cert=None, proxies=None, **adapter_kwargs):
"""Receives a Response. Returns a generator of Responses."""
i = 0
hist = [] # keep track of history
while resp.is_redirect:
prepared_request = req.copy()
if i > 0:
# Update history and keep track of redirects.
hist.append(resp)
new_hist = list(hist)
resp.history = new_hist
try:
resp.content # Consume socket so it can be released
except (ChunkedEncodingError, ContentDecodingError, RuntimeError):
resp.raw.read(decode_content=False)
if i >= self.max_redirects:
raise TooManyRedirects('Exceeded %s redirects.' % self.max_redirects)
# Release the connection back into the pool.
resp.close()
url = resp.headers['location']
method = req.method
# Handle redirection without scheme (see: RFC 1808 Section 4)
if url.startswith('//'):
parsed_rurl = urlparse(resp.url)
url = '%s:%s' % (parsed_rurl.scheme, url)
# The scheme should be lower case...
parsed = urlparse(url)
url = parsed.geturl()
# Facilitate relative 'location' headers, as allowed by RFC 7231.
# (e.g. '/path/to/resource' instead of 'http://domain.tld/path/to/resource')
# Compliant with RFC3986, we percent encode the url.
if not parsed.netloc:
url = urljoin(resp.url, requote_uri(url))
else:
url = requote_uri(url)
prepared_request.url = to_native_string(url)
# Cache the url, unless it redirects to itself.
if resp.is_permanent_redirect and req.url != prepared_request.url:
self.redirect_cache[req.url] = prepared_request.url
# http://tools.ietf.org/html/rfc7231#section-6.4.4
if (resp.status_code == codes.see_other and
method != 'HEAD'):
method = 'GET'
# Do what the browsers do, despite standards...
# First, turn 302s into GETs.
if resp.status_code == codes.found and method != 'HEAD':
method = 'GET'
# Second, if a POST is responded to with a 301, turn it into a GET.
# This bizarre behaviour is explained in Issue 1704.
if resp.status_code == codes.moved and method == 'POST':
method = 'GET'
prepared_request.method = method
# https://github.com/kennethreitz/requests/issues/1084
if resp.status_code not in (codes.temporary_redirect, codes.permanent_redirect):
if 'Content-Length' in prepared_request.headers:
del prepared_request.headers['Content-Length']
prepared_request.body = None
headers = prepared_request.headers
try:
del headers['Cookie']
except KeyError:
pass
# Extract any cookies sent on the response to the cookiejar
# in the new request. Because we've mutated our copied prepared
# request, use the old one that we haven't yet touched.
extract_cookies_to_jar(prepared_request._cookies, req, resp.raw)
prepared_request._cookies.update(self.cookies)
prepared_request.prepare_cookies(prepared_request._cookies)
# Rebuild auth and proxy information.
proxies = self.rebuild_proxies(prepared_request, proxies)
self.rebuild_auth(prepared_request, resp)
# Override the original request.
req = prepared_request
resp = self.send(
req,
stream=stream,
timeout=timeout,
verify=verify,
cert=cert,
proxies=proxies,
allow_redirects=False,
**adapter_kwargs
)
extract_cookies_to_jar(self.cookies, prepared_request, resp.raw)
i += 1
yield resp
def rebuild_auth(self, prepared_request, response):
"""
When being redirected we may want to strip authentication from the
request to avoid leaking credentials. This method intelligently removes
and reapplies authentication where possible to avoid credential loss.
"""
headers = prepared_request.headers
url = prepared_request.url
if 'Authorization' in headers:
# If we get redirected to a new host, we should strip out any
# authentication headers.
original_parsed = urlparse(response.request.url)
redirect_parsed = urlparse(url)
if (original_parsed.hostname != redirect_parsed.hostname):
del headers['Authorization']
# .netrc might have more auth for us on our new host.
new_auth = get_netrc_auth(url) if self.trust_env else None
if new_auth is not None:
prepared_request.prepare_auth(new_auth)
return
def rebuild_proxies(self, prepared_request, proxies):
"""
This method re-evaluates the proxy configuration by considering the
environment variables. If we are redirected to a URL covered by
NO_PROXY, we strip the proxy configuration. Otherwise, we set missing
proxy keys for this URL (in case they were stripped by a previous
redirect).
This method also replaces the Proxy-Authorization header where
necessary.
"""
headers = prepared_request.headers
url = prepared_request.url
scheme = urlparse(url).scheme
new_proxies = proxies.copy() if proxies is not None else {}
if self.trust_env and not should_bypass_proxies(url):
environ_proxies = get_environ_proxies(url)
proxy = environ_proxies.get(scheme)
if proxy:
new_proxies.setdefault(scheme, environ_proxies[scheme])
if 'Proxy-Authorization' in headers:
del headers['Proxy-Authorization']
try:
username, password = get_auth_from_url(new_proxies[scheme])
except KeyError:
username, password = None, None
if username and password:
headers['Proxy-Authorization'] = _basic_auth_str(username, password)
return new_proxies
class Session(SessionRedirectMixin):
"""A Requests session.
Provides cookie persistence, connection-pooling, and configuration.
Basic Usage::
>>> import requests
>>> s = requests.Session()
>>> s.get('http://httpbin.org/get')
<Response [200]>
Or as a context manager::
>>> with requests.Session() as s:
>>> s.get('http://httpbin.org/get')
<Response [200]>
"""
__attrs__ = [
'headers', 'cookies', 'auth', 'proxies', 'hooks', 'params', 'verify',
'cert', 'prefetch', 'adapters', 'stream', 'trust_env',
'max_redirects',
]
def __init__(self):
#: A case-insensitive dictionary of headers to be sent on each
#: :class:`Request <Request>` sent from this
#: :class:`Session <Session>`.
self.headers = default_headers()
#: Default Authentication tuple or object to attach to
#: :class:`Request <Request>`.
self.auth = None
#: Dictionary mapping protocol or protocol and host to the URL of the proxy
#: (e.g. {'http': 'foo.bar:3128', 'http://host.name': 'foo.bar:4012'}) to
#: be used on each :class:`Request <Request>`.
self.proxies = {}
#: Event-handling hooks.
self.hooks = default_hooks()
#: Dictionary of querystring data to attach to each
#: :class:`Request <Request>`. The dictionary values may be lists for
#: representing multivalued query parameters.
self.params = {}
#: Stream response content default.
self.stream = False
#: SSL Verification default.
self.verify = True
#: SSL certificate default.
self.cert = None
#: Maximum number of redirects allowed. If the request exceeds this
#: limit, a :class:`TooManyRedirects` exception is raised.
self.max_redirects = DEFAULT_REDIRECT_LIMIT
#: Trust environment settings for proxy configuration, default
#: authentication and similar.
self.trust_env = True
#: A CookieJar containing all currently outstanding cookies set on this
#: session. By default it is a
#: :class:`RequestsCookieJar <requests.cookies.RequestsCookieJar>`, but
#: may be any other ``cookielib.CookieJar`` compatible object.
self.cookies = cookiejar_from_dict({})
# Default connection adapters.
self.adapters = OrderedDict()
self.mount('https://', HTTPAdapter())
self.mount('http://', HTTPAdapter())
# Only store 1000 redirects to prevent using infinite memory
self.redirect_cache = RecentlyUsedContainer(REDIRECT_CACHE_SIZE)
def __enter__(self):
return self
def __exit__(self, *args):
self.close()
def prepare_request(self, request):
"""Constructs a :class:`PreparedRequest <PreparedRequest>` for
transmission and returns it. The :class:`PreparedRequest` has settings
merged from the :class:`Request <Request>` instance and those of the
:class:`Session`.
:param request: :class:`Request` instance to prepare with this
session's settings.
"""
cookies = request.cookies or {}
# Bootstrap CookieJar.
if not isinstance(cookies, cookielib.CookieJar):
cookies = cookiejar_from_dict(cookies)
# Merge with session cookies
merged_cookies = merge_cookies(
merge_cookies(RequestsCookieJar(), self.cookies), cookies)
# Set environment's basic authentication if not explicitly set.
auth = request.auth
if self.trust_env and not auth and not self.auth:
auth = get_netrc_auth(request.url)
p = PreparedRequest()
p.prepare(
method=request.method.upper(),
url=request.url,
files=request.files,
data=request.data,
json=request.json,
headers=merge_setting(request.headers, self.headers, dict_class=CaseInsensitiveDict),
params=merge_setting(request.params, self.params),
auth=merge_setting(auth, self.auth),
cookies=merged_cookies,
hooks=merge_hooks(request.hooks, self.hooks),
)
return p
def request(self, method, url,
params=None,
data=None,
headers=None,
cookies=None,
files=None,
auth=None,
timeout=None,
allow_redirects=True,
proxies=None,
hooks=None,
stream=None,
verify=None,
cert=None,
json=None):
"""Constructs a :class:`Request <Request>`, prepares it and sends it.
Returns :class:`Response <Response>` object.
:param method: method for the new :class:`Request` object.
:param url: URL for the new :class:`Request` object.
:param params: (optional) Dictionary or bytes to be sent in the query
string for the :class:`Request`.
:param data: (optional) Dictionary, bytes, or file-like object to send
in the body of the :class:`Request`.
:param json: (optional) json to send in the body of the
:class:`Request`.
:param headers: (optional) Dictionary of HTTP Headers to send with the
:class:`Request`.
:param cookies: (optional) Dict or CookieJar object to send with the
:class:`Request`.
:param files: (optional) Dictionary of ``'filename': file-like-objects``
for multipart encoding upload.
:param auth: (optional) Auth tuple or callable to enable
Basic/Digest/Custom HTTP Auth.
:param timeout: (optional) How long to wait for the server to send
data before giving up, as a float, or a :ref:`(connect timeout,
read timeout) <timeouts>` tuple.
:type timeout: float or tuple
:param allow_redirects: (optional) Set to True by default.
:type allow_redirects: bool
:param proxies: (optional) Dictionary mapping protocol or protocol and
hostname to the URL of the proxy.
:param stream: (optional) whether to immediately download the response
content. Defaults to ``False``.
:param verify: (optional) whether the SSL cert will be verified.
A CA_BUNDLE path can also be provided. Defaults to ``True``.
:param cert: (optional) if String, path to ssl client cert file (.pem).
If Tuple, ('cert', 'key') pair.
"""
# Create the Request.
req = Request(
method = method.upper(),
url = url,
headers = headers,
files = files,
data = data or {},
json = json,
params = params or {},
auth = auth,
cookies = cookies,
hooks = hooks,
)
prep = self.prepare_request(req)
proxies = proxies or {}
settings = self.merge_environment_settings(
prep.url, proxies, stream, verify, cert
)
# Send the request.
send_kwargs = {
'timeout': timeout,
'allow_redirects': allow_redirects,
}
send_kwargs.update(settings)
resp = self.send(prep, **send_kwargs)
return resp
def get(self, url, **kwargs):
"""Sends a GET request. Returns :class:`Response` object.
:param url: URL for the new :class:`Request` object.
:param \*\*kwargs: Optional arguments that ``request`` takes.
"""
kwargs.setdefault('allow_redirects', True)
return self.request('GET', url, **kwargs)
def options(self, url, **kwargs):
"""Sends a OPTIONS request. Returns :class:`Response` object.
:param url: URL for the new :class:`Request` object.
:param \*\*kwargs: Optional arguments that ``request`` takes.
"""
kwargs.setdefault('allow_redirects', True)
return self.request('OPTIONS', url, **kwargs)
def head(self, url, **kwargs):
"""Sends a HEAD request. Returns :class:`Response` object.
:param url: URL for the new :class:`Request` object.
:param \*\*kwargs: Optional arguments that ``request`` takes.
"""
kwargs.setdefault('allow_redirects', False)
return self.request('HEAD', url, **kwargs)
def post(self, url, data=None, json=None, **kwargs):
"""Sends a POST request. Returns :class:`Response` object.
:param url: URL for the new :class:`Request` object.
:param data: (optional) Dictionary, bytes, or file-like object to send in the body of the :class:`Request`.
:param json: (optional) json to send in the body of the :class:`Request`.
:param \*\*kwargs: Optional arguments that ``request`` takes.
"""
return self.request('POST', url, data=data, json=json, **kwargs)
def put(self, url, data=None, **kwargs):
"""Sends a PUT request. Returns :class:`Response` object.
:param url: URL for the new :class:`Request` object.
:param data: (optional) Dictionary, bytes, or file-like object to send in the body of the :class:`Request`.
:param \*\*kwargs: Optional arguments that ``request`` takes.
"""
return self.request('PUT', url, data=data, **kwargs)
def patch(self, url, data=None, **kwargs):
"""Sends a PATCH request. Returns :class:`Response` object.
:param url: URL for the new :class:`Request` object.
:param data: (optional) Dictionary, bytes, or file-like object to send in the body of the :class:`Request`.
:param \*\*kwargs: Optional arguments that ``request`` takes.
"""
return self.request('PATCH', url, data=data, **kwargs)
def delete(self, url, **kwargs):
"""Sends a DELETE request. Returns :class:`Response` object.
:param url: URL for the new :class:`Request` object.
:param \*\*kwargs: Optional arguments that ``request`` takes.
"""
return self.request('DELETE', url, **kwargs)
def send(self, request, **kwargs):
"""Send a given PreparedRequest."""
# Set defaults that the hooks can utilize to ensure they always have
# the correct parameters to reproduce the previous request.
kwargs.setdefault('stream', self.stream)
kwargs.setdefault('verify', self.verify)
kwargs.setdefault('cert', self.cert)
kwargs.setdefault('proxies', self.proxies)
# It's possible that users might accidentally send a Request object.
# Guard against that specific failure case.
if not isinstance(request, PreparedRequest):
raise ValueError('You can only send PreparedRequests.')
checked_urls = set()
while request.url in self.redirect_cache:
checked_urls.add(request.url)
new_url = self.redirect_cache.get(request.url)
if new_url in checked_urls:
break
request.url = new_url
# Set up variables needed for resolve_redirects and dispatching of hooks
allow_redirects = kwargs.pop('allow_redirects', True)
stream = kwargs.get('stream')
hooks = request.hooks
# Get the appropriate adapter to use
adapter = self.get_adapter(url=request.url)
# Start time (approximately) of the request
start = datetime.utcnow()
# Send the request
r = adapter.send(request, **kwargs)
# Total elapsed time of the request (approximately)
r.elapsed = datetime.utcnow() - start
# Response manipulation hooks
r = dispatch_hook('response', hooks, r, **kwargs)
# Persist cookies
if r.history:
# If the hooks create history then we want those cookies too
for resp in r.history:
extract_cookies_to_jar(self.cookies, resp.request, resp.raw)
extract_cookies_to_jar(self.cookies, request, r.raw)
# Redirect resolving generator.
gen = self.resolve_redirects(r, request, **kwargs)
# Resolve redirects if allowed.
history = [resp for resp in gen] if allow_redirects else []
# Shuffle things around if there's history.
if history:
# Insert the first (original) request at the start
history.insert(0, r)
# Get the last request made
r = history.pop()
r.history = history
if not stream:
r.content
return r
def merge_environment_settings(self, url, proxies, stream, verify, cert):
"""Check the environment and merge it with some settings."""
# Gather clues from the surrounding environment.
if self.trust_env:
# Set environment's proxies.
env_proxies = get_environ_proxies(url) or {}
for (k, v) in env_proxies.items():
proxies.setdefault(k, v)
# Look for requests environment configuration and be compatible
# with cURL.
if verify is True or verify is None:
verify = (os.environ.get('REQUESTS_CA_BUNDLE') or
os.environ.get('CURL_CA_BUNDLE'))
# Merge all the kwargs.
proxies = merge_setting(proxies, self.proxies)
stream = merge_setting(stream, self.stream)
verify = merge_setting(verify, self.verify)
cert = merge_setting(cert, self.cert)
return {'verify': verify, 'proxies': proxies, 'stream': stream,
'cert': cert}
def get_adapter(self, url):
"""Returns the appropriate connection adapter for the given URL."""
for (prefix, adapter) in self.adapters.items():
if url.lower().startswith(prefix):
return adapter
# Nothing matches :-/
raise InvalidSchema("No connection adapters were found for '%s'" % url)
def close(self):
"""Closes all adapters and as such the session"""
for v in self.adapters.values():
v.close()
def mount(self, prefix, adapter):
"""Registers a connection adapter to a prefix.
Adapters are sorted in descending order by key length."""
self.adapters[prefix] = adapter
keys_to_move = [k for k in self.adapters if len(k) < len(prefix)]
for key in keys_to_move:
self.adapters[key] = self.adapters.pop(key)
def __getstate__(self):
state = dict((attr, getattr(self, attr, None)) for attr in self.__attrs__)
state['redirect_cache'] = dict(self.redirect_cache)
return state
def __setstate__(self, state):
redirect_cache = state.pop('redirect_cache', {})
for attr, value in state.items():
setattr(self, attr, value)
self.redirect_cache = RecentlyUsedContainer(REDIRECT_CACHE_SIZE)
for redirect, to in redirect_cache.items():
self.redirect_cache[redirect] = to
def session():
"""Returns a :class:`Session` for context-management."""
return Session()
| gpl-2.0 |
JunHe77/bigtop | bigtop-packages/src/common/bigtop-ambari-mpack/bgtp-ambari-mpack/src/main/resources/stacks/BGTP/1.0/services/YARN/package/scripts/params_windows.py | 6 | 2999 | """
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Ambari Agent
"""
from resource_management import *
from resource_management.libraries import functions
import os
from status_params import *
# server configurations
config = Script.get_config()
hadoop_user = config["configurations"]["cluster-env"]["hadoop.user.name"]
yarn_user = hadoop_user
hdfs_user = hadoop_user
smokeuser = hadoop_user
config_dir = os.environ["HADOOP_CONF_DIR"]
hadoop_home = os.environ["HADOOP_HOME"]
yarn_home = os.environ["HADOOP_YARN_HOME"]
hadoop_ssl_enabled = default("/configurations/core-site/hadoop.ssl.enabled", False)
_authentication = config['configurations']['core-site']['hadoop.security.authentication']
security_enabled = ( not is_empty(_authentication) and _authentication == 'kerberos')
smoke_user_keytab = config['configurations']['hadoop-env']['smokeuser_keytab']
kinit_path_local = functions.get_kinit_path(default('/configurations/kerberos-env/executable_search_paths', None))
rm_host = config['clusterHostInfo']['rm_host'][0]
rm_port = config['configurations']['yarn-site']['yarn.resourcemanager.webapp.address'].split(':')[-1]
rm_https_port = "8090"
rm_webui_address = format("{rm_host}:{rm_port}")
rm_webui_https_address = format("{rm_host}:{rm_https_port}")
hs_host = config['clusterHostInfo']['hs_host'][0]
hs_port = config['configurations']['mapred-site']['mapreduce.jobhistory.webapp.address'].split(':')[-1]
hs_webui_address = format("{hs_host}:{hs_port}")
hadoop_mapred2_jar_location = os.path.join(os.environ["HADOOP_COMMON_HOME"], "share", "hadoop", "mapreduce")
hadoopMapredExamplesJarName = "hadoop-mapreduce-examples-2.*.jar"
exclude_hosts = default("/clusterHostInfo/decom_nm_hosts", [])
exclude_file_path = default("/configurations/yarn-site/yarn.resourcemanager.nodes.exclude-path","/etc/hadoop/conf/yarn.exclude")
update_files_only = default("/commandParams/update_files_only",False)
nm_hosts = default("/clusterHostInfo/nm_hosts", [])
#incude file
include_file_path = default("/configurations/yarn-site/yarn.resourcemanager.nodes.include-path", None)
include_hosts = None
manage_include_files = default("/configurations/yarn-site/manage.include.files", False)
if include_file_path and manage_include_files:
include_hosts = list(set(nm_hosts) - set(exclude_hosts))
| apache-2.0 |
alb-i986/selenium | py/test/selenium/webdriver/common/clear_tests.py | 3 | 3263 | # Licensed to the Software Freedom Conservancy (SFC) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The SFC licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import unittest
import pytest
from selenium.common.exceptions import InvalidElementStateException
@pytest.mark.ignore_chrome
class ClearTests(unittest.TestCase):
def testWritableTextInputShouldClear(self):
self._loadPage("readOnlyPage")
element = self.driver.find_element_by_id("writableTextInput")
element.clear()
self.assertEqual("", element.get_attribute("value"))
def testTextInputShouldNotClearWhenDisabled(self):
self._loadPage("readOnlyPage")
try:
element = self.driver.find_element_by_id("textInputnotenabled")
self.assertFalse(element.is_enabled())
element.clear()
self.fail("Should not have been able to clear")
except InvalidElementStateException:
pass
def testTextInputShouldNotClearWhenReadOnly(self):
self._loadPage("readOnlyPage")
element = self.driver.find_element_by_id("readOnlyTextInput")
try:
element.clear()
self.fail("Should not have been able to clear")
except InvalidElementStateException:
pass
def testWritableTextAreaShouldClear(self):
self._loadPage("readOnlyPage")
element = self.driver.find_element_by_id("writableTextArea")
element.clear()
self.assertEqual("", element.get_attribute("value"))
def testTextAreaShouldNotClearWhenDisabled(self):
self._loadPage("readOnlyPage")
element = self.driver.find_element_by_id("textAreaNotenabled")
try:
element.clear()
self.fail("Should not have been able to clear")
except InvalidElementStateException:
pass
def testTextAreaShouldNotClearWhenReadOnly(self):
self._loadPage("readOnlyPage")
element = self.driver.find_element_by_id("textAreaReadOnly")
try:
element.clear()
self.fail("Should not have been able to clear")
except InvalidElementStateException:
pass
def testContentEditableAreaShouldClear(self):
self._loadPage("readOnlyPage")
element = self.driver.find_element_by_id("content-editable")
element.clear()
self.assertEqual("", element.text)
def _pageURL(self, name):
return self.webserver.where_is(name + '.html')
def _loadSimplePage(self):
self._loadPage("simpleTest")
def _loadPage(self, name):
self.driver.get(self._pageURL(name))
| apache-2.0 |
SteveHNH/ansible | test/units/modules/remote_management/oneview/hpe_test_utils.py | 21 | 5225 | # -*- coding: utf-8 -*-
#
# Copyright (2016-2017) Hewlett Packard Enterprise Development LP
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import yaml
from mock import Mock, patch
from oneview_module_loader import ONEVIEW_MODULE_UTILS_PATH
from hpOneView.oneview_client import OneViewClient
class OneViewBaseTestCase(object):
mock_ov_client_from_json_file = None
testing_class = None
mock_ansible_module = None
mock_ov_client = None
testing_module = None
EXAMPLES = None
def configure_mocks(self, test_case, testing_class):
"""
Preload mocked OneViewClient instance and AnsibleModule
Args:
test_case (object): class instance (self) that are inheriting from OneViewBaseTestCase
testing_class (object): class being tested
"""
self.testing_class = testing_class
# Define OneView Client Mock (FILE)
patcher_json_file = patch.object(OneViewClient, 'from_json_file')
test_case.addCleanup(patcher_json_file.stop)
self.mock_ov_client_from_json_file = patcher_json_file.start()
# Define OneView Client Mock
self.mock_ov_client = self.mock_ov_client_from_json_file.return_value
# Define Ansible Module Mock
patcher_ansible = patch(ONEVIEW_MODULE_UTILS_PATH + '.AnsibleModule')
test_case.addCleanup(patcher_ansible.stop)
mock_ansible_module = patcher_ansible.start()
self.mock_ansible_module = Mock()
mock_ansible_module.return_value = self.mock_ansible_module
self.__set_module_examples()
def test_main_function_should_call_run_method(self):
self.mock_ansible_module.params = {'config': 'config.json'}
main_func = getattr(self.testing_module, 'main')
with patch.object(self.testing_class, "run") as mock_run:
main_func()
mock_run.assert_called_once()
def __set_module_examples(self):
# Load scenarios from module examples (Also checks if it is a valid yaml)
ansible = __import__('ansible')
testing_module = self.testing_class.__module__.split('.')[-1]
self.testing_module = getattr(ansible.modules.remote_management.oneview, testing_module)
try:
# Load scenarios from module examples (Also checks if it is a valid yaml)
self.EXAMPLES = yaml.load(self.testing_module.EXAMPLES, yaml.SafeLoader)
except yaml.scanner.ScannerError:
message = "Something went wrong while parsing yaml from {}.EXAMPLES".format(self.testing_class.__module__)
raise Exception(message)
class FactsParamsTestCase(OneViewBaseTestCase):
"""
FactsParamsTestCase has common test for classes that support pass additional
parameters when retrieving all resources.
"""
def configure_client_mock(self, resorce_client):
"""
Args:
resorce_client: Resource client that is being called
"""
self.resource_client = resorce_client
def __validations(self):
if not self.testing_class:
raise Exception("Mocks are not configured, you must call 'configure_mocks' before running this test.")
if not self.resource_client:
raise Exception(
"Mock for the client not configured, you must call 'configure_client_mock' before running this test.")
def test_should_get_all_using_filters(self):
self.__validations()
self.resource_client.get_all.return_value = []
params_get_all_with_filters = dict(
config='config.json',
name=None,
params={
'start': 1,
'count': 3,
'sort': 'name:descending',
'filter': 'purpose=General',
'query': 'imported eq true'
})
self.mock_ansible_module.params = params_get_all_with_filters
self.testing_class().run()
self.resource_client.get_all.assert_called_once_with(start=1, count=3, sort='name:descending',
filter='purpose=General',
query='imported eq true')
def test_should_get_all_without_params(self):
self.__validations()
self.resource_client.get_all.return_value = []
params_get_all_with_filters = dict(
config='config.json',
name=None
)
self.mock_ansible_module.params = params_get_all_with_filters
self.testing_class().run()
self.resource_client.get_all.assert_called_once_with()
| gpl-3.0 |
4eek/edx-platform | common/djangoapps/external_auth/djangostore.py | 224 | 3356 | """A openid store using django cache"""
from openid.store.interface import OpenIDStore
from openid.store import nonce
from django.core.cache import cache
import logging
import time
DEFAULT_ASSOCIATIONS_TIMEOUT = 60
DEFAULT_NONCE_TIMEOUT = 600
ASSOCIATIONS_KEY_PREFIX = 'openid.provider.associations.'
NONCE_KEY_PREFIX = 'openid.provider.nonce.'
log = logging.getLogger('DjangoOpenIDStore')
def get_url_key(server_url):
key = ASSOCIATIONS_KEY_PREFIX + server_url
return key
def get_nonce_key(server_url, timestamp, salt):
key = '{prefix}{url}.{ts}.{salt}'.format(prefix=NONCE_KEY_PREFIX,
url=server_url,
ts=timestamp,
salt=salt)
return key
class DjangoOpenIDStore(OpenIDStore):
def __init__(self):
log.info('DjangoStore cache:' + str(cache.__class__))
def storeAssociation(self, server_url, assoc):
key = get_url_key(server_url)
log.info('storeAssociation {0}'.format(key))
associations = cache.get(key, {})
associations[assoc.handle] = assoc
cache.set(key, associations, DEFAULT_ASSOCIATIONS_TIMEOUT)
def getAssociation(self, server_url, handle=None):
key = get_url_key(server_url)
log.info('getAssociation {0}'.format(key))
associations = cache.get(key, {})
assoc = None
if handle is None:
# get best association
valid_assocs = [a for a in associations if a.getExpiresIn() > 0]
if valid_assocs:
valid_assocs.sort(lambda a: a.getExpiresIn(), reverse=True)
assoc = valid_assocs.sort[0]
else:
assoc = associations.get(handle)
# check expiration and remove if it has expired
if assoc and assoc.getExpiresIn() <= 0:
if handle is None:
cache.delete(key)
else:
associations.pop(handle)
cache.set(key, associations, DEFAULT_ASSOCIATIONS_TIMEOUT)
assoc = None
return assoc
def removeAssociation(self, server_url, handle):
key = get_url_key(server_url)
log.info('removeAssociation {0}'.format(key))
associations = cache.get(key, {})
removed = False
if associations:
if handle is None:
cache.delete(key)
removed = True
else:
assoc = associations.pop(handle, None)
if assoc:
cache.set(key, associations, DEFAULT_ASSOCIATIONS_TIMEOUT)
removed = True
return removed
def useNonce(self, server_url, timestamp, salt):
key = get_nonce_key(server_url, timestamp, salt)
log.info('useNonce {0}'.format(key))
if abs(timestamp - time.time()) > nonce.SKEW:
return False
anonce = cache.get(key)
found = False
if anonce is None:
cache.set(key, '-', DEFAULT_NONCE_TIMEOUT)
found = False
else:
found = True
return found
def cleanupNonces(self):
# not necesary, keys will timeout
return 0
def cleanupAssociations(self):
# not necesary, keys will timeout
return 0
| agpl-3.0 |
krazybean/randomaas | lib/python2.6/site-packages/jinja2/testsuite/debug.py | 415 | 1935 | # -*- coding: utf-8 -*-
"""
jinja2.testsuite.debug
~~~~~~~~~~~~~~~~~~~~~~
Tests the debug system.
:copyright: (c) 2010 by the Jinja Team.
:license: BSD, see LICENSE for more details.
"""
import unittest
from jinja2.testsuite import JinjaTestCase, filesystem_loader
from jinja2 import Environment, TemplateSyntaxError
env = Environment(loader=filesystem_loader)
class DebugTestCase(JinjaTestCase):
def test_runtime_error(self):
def test():
tmpl.render(fail=lambda: 1 / 0)
tmpl = env.get_template('broken.html')
self.assert_traceback_matches(test, r'''
File ".*?broken.html", line 2, in (top-level template code|<module>)
\{\{ fail\(\) \}\}
File ".*?debug.pyc?", line \d+, in <lambda>
tmpl\.render\(fail=lambda: 1 / 0\)
ZeroDivisionError: (int(eger)? )?division (or modulo )?by zero
''')
def test_syntax_error(self):
# XXX: the .*? is necessary for python3 which does not hide
# some of the stack frames we don't want to show. Not sure
# what's up with that, but that is not that critical. Should
# be fixed though.
self.assert_traceback_matches(lambda: env.get_template('syntaxerror.html'), r'''(?sm)
File ".*?syntaxerror.html", line 4, in (template|<module>)
\{% endif %\}.*?
(jinja2\.exceptions\.)?TemplateSyntaxError: Encountered unknown tag 'endif'. Jinja was looking for the following tags: 'endfor' or 'else'. The innermost block that needs to be closed is 'for'.
''')
def test_regular_syntax_error(self):
def test():
raise TemplateSyntaxError('wtf', 42)
self.assert_traceback_matches(test, r'''
File ".*debug.pyc?", line \d+, in test
raise TemplateSyntaxError\('wtf', 42\)
(jinja2\.exceptions\.)?TemplateSyntaxError: wtf
line 42''')
def suite():
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(DebugTestCase))
return suite
| apache-2.0 |
mollstam/UnrealPy | UnrealPyEmbed/Development/Python/2015.08.07-Python2710-x64-Source-vs2015/Python27/Source/libxml2-2.9.1/python/tests/tstxpath.py | 37 | 1465 | #!/usr/bin/python -u
import sys
import libxml2
#memory debug specific
libxml2.debugMemory(1)
called = ""
def foo(ctx, x):
global called
#
# test that access to the XPath evaluation contexts
#
pctxt = libxml2.xpathParserContext(_obj=ctx)
ctxt = pctxt.context()
called = ctxt.function()
return x + 1
def bar(ctxt, x):
return "%d" % (x + 2)
doc = libxml2.parseFile("tst.xml")
ctxt = doc.xpathNewContext()
res = ctxt.xpathEval("//*")
if len(res) != 2:
print("xpath query: wrong node set size")
sys.exit(1)
if res[0].name != "doc" or res[1].name != "foo":
print("xpath query: wrong node set value")
sys.exit(1)
libxml2.registerXPathFunction(ctxt._o, "foo", None, foo)
libxml2.registerXPathFunction(ctxt._o, "bar", None, bar)
i = 10000
while i > 0:
res = ctxt.xpathEval("foo(1)")
if res != 2:
print("xpath extension failure")
sys.exit(1)
i = i - 1
i = 10000
while i > 0:
res = ctxt.xpathEval("bar(1)")
if res != "3":
print("xpath extension failure got %s expecting '3'")
sys.exit(1)
i = i - 1
doc.freeDoc()
ctxt.xpathFreeContext()
if called != "foo":
print("xpath function: failed to access the context")
print("xpath function: %s" % (called))
sys.exit(1)
#memory debug specific
libxml2.cleanupParser()
if libxml2.debugMemory(1) == 0:
print("OK")
else:
print("Memory leak %d bytes" % (libxml2.debugMemory(1)))
libxml2.dumpMemory()
| mit |
Chilledheart/chromium | tools/telemetry/telemetry/value/skip.py | 13 | 1784 | # Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from telemetry import value as value_module
class SkipValue(value_module.Value):
def __init__(self, page, reason, description=None):
"""A value representing a skipped page.
Args:
page: The skipped page object.
reason: The string reason the page was skipped.
"""
super(SkipValue, self).__init__(page, 'skip', '', True, description, None)
self._reason = reason
def __repr__(self):
page_name = self.page.display_name
return 'SkipValue(%s, %s)' % (page_name, self._reason)
@property
def reason(self):
return self._reason
def GetBuildbotDataType(self, output_context):
return None
def GetBuildbotValue(self):
return None
def GetChartAndTraceNameForPerPageResult(self):
return None
def GetRepresentativeNumber(self):
return None
def GetRepresentativeString(self):
return None
@staticmethod
def GetJSONTypeName():
return 'skip'
def AsDict(self):
d = super(SkipValue, self).AsDict()
d['reason'] = self._reason
return d
@staticmethod
def FromDict(value_dict, page_dict):
kwargs = value_module.Value.GetConstructorKwArgs(value_dict, page_dict)
del kwargs['name']
del kwargs['units']
if 'important' in kwargs:
del kwargs['important']
kwargs['reason'] = value_dict['reason']
if 'tir_label' in kwargs:
del kwargs['tir_label']
return SkipValue(**kwargs)
@classmethod
def MergeLikeValuesFromSamePage(cls, values):
assert False, 'Should not be called.'
@classmethod
def MergeLikeValuesFromDifferentPages(cls, values):
assert False, 'Should not be called.'
| bsd-3-clause |
buenajuan300/android_kernel_samsung_grandprimevelte | tools/perf/scripts/python/net_dropmonitor.py | 2669 | 1738 | # Monitor the system for dropped packets and proudce a report of drop locations and counts
import os
import sys
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from perf_trace_context import *
from Core import *
from Util import *
drop_log = {}
kallsyms = []
def get_kallsyms_table():
global kallsyms
try:
f = open("/proc/kallsyms", "r")
except:
return
for line in f:
loc = int(line.split()[0], 16)
name = line.split()[2]
kallsyms.append((loc, name))
kallsyms.sort()
def get_sym(sloc):
loc = int(sloc)
# Invariant: kallsyms[i][0] <= loc for all 0 <= i <= start
# kallsyms[i][0] > loc for all end <= i < len(kallsyms)
start, end = -1, len(kallsyms)
while end != start + 1:
pivot = (start + end) // 2
if loc < kallsyms[pivot][0]:
end = pivot
else:
start = pivot
# Now (start == -1 or kallsyms[start][0] <= loc)
# and (start == len(kallsyms) - 1 or loc < kallsyms[start + 1][0])
if start >= 0:
symloc, name = kallsyms[start]
return (name, loc - symloc)
else:
return (None, 0)
def print_drop_table():
print "%25s %25s %25s" % ("LOCATION", "OFFSET", "COUNT")
for i in drop_log.keys():
(sym, off) = get_sym(i)
if sym == None:
sym = i
print "%25s %25s %25s" % (sym, off, drop_log[i])
def trace_begin():
print "Starting trace (Ctrl-C to dump results)"
def trace_end():
print "Gathering kallsyms data"
get_kallsyms_table()
print_drop_table()
# called from perf, when it finds a correspoinding event
def skb__kfree_skb(name, context, cpu, sec, nsec, pid, comm,
skbaddr, location, protocol):
slocation = str(location)
try:
drop_log[slocation] = drop_log[slocation] + 1
except:
drop_log[slocation] = 1
| gpl-2.0 |
dav1x/ansible | lib/ansible/modules/network/cumulus/_cl_ports.py | 70 | 7259 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2016, Cumulus Networks <ce-ceng@cumulusnetworks.com>
#
# This file is part of Ansible
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['deprecated'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: cl_ports
version_added: "2.1"
author: "Cumulus Networks (@CumulusNetworks)"
short_description: Configure Cumulus Switch port attributes (ports.conf)
deprecated: Deprecated in 2.3. Use M(nclu) instead.
description:
- Set the initial port attribute defined in the Cumulus Linux ports.conf,
file. This module does not do any error checking at the moment. Be careful
to not include ports that do not exist on the switch. Carefully read the
original ports.conf file for any exceptions or limitations.
For more details go the Configure Switch Port Attribute Documentation at
U(http://docs.cumulusnetworks.com).
options:
speed_10g:
description:
- List of ports to run initial run at 10G.
speed_40g:
description:
- List of ports to run initial run at 40G.
speed_4_by_10g:
description:
- List of 40G ports that will be unganged to run as 4 10G ports.
speed_40g_div_4:
description:
- List of 10G ports that will be ganged to form a 40G port.
'''
EXAMPLES = '''
# Use cl_ports module to manage the switch attributes defined in the
# ports.conf file on Cumulus Linux
## Unganged port configuration on certain ports
- name: configure ports.conf setup
cl_ports:
speed_4_by_10g:
- swp1
- swp32
speed_40g:
- swp2-31
## Unganged port configuration on certain ports
- name: configure ports.conf setup
cl_ports:
speed_4_by_10g:
- swp1-3
- swp6
speed_40g:
- swp4-5
- swp7-32
'''
RETURN = '''
changed:
description: whether the interface was changed
returned: changed
type: bool
sample: True
msg:
description: human-readable report of success or failure
returned: always
type: string
sample: "interface bond0 config updated"
'''
PORTS_CONF = '/etc/cumulus/ports.conf'
def hash_existing_ports_conf(module):
module.ports_conf_hash = {}
if not os.path.exists(PORTS_CONF):
return False
try:
existing_ports_conf = open(PORTS_CONF).readlines()
except IOError:
error_msg = get_exception()
_msg = "Failed to open %s: %s" % (PORTS_CONF, error_msg)
module.fail_json(msg=_msg)
return # for testing only should return on module.fail_json
for _line in existing_ports_conf:
_m0 = re.match(r'^(\d+)=(\w+)', _line)
if _m0:
_portnum = int(_m0.group(1))
_speed = _m0.group(2)
module.ports_conf_hash[_portnum] = _speed
def generate_new_ports_conf_hash(module):
new_ports_conf_hash = {}
convert_hash = {
'speed_40g_div_4': '40G/4',
'speed_4_by_10g': '4x10G',
'speed_10g': '10G',
'speed_40g': '40G'
}
for k in module.params.keys():
port_range = module.params[k]
port_setting = convert_hash[k]
if port_range:
port_range = [x for x in port_range if x]
for port_str in port_range:
port_range_str = port_str.replace('swp', '').split('-')
if len(port_range_str) == 1:
new_ports_conf_hash[int(port_range_str[0])] = \
port_setting
else:
int_range = map(int, port_range_str)
portnum_range = range(int_range[0], int_range[1]+1)
for i in portnum_range:
new_ports_conf_hash[i] = port_setting
module.new_ports_hash = new_ports_conf_hash
def compare_new_and_old_port_conf_hash(module):
ports_conf_hash_copy = module.ports_conf_hash.copy()
module.ports_conf_hash.update(module.new_ports_hash)
port_num_length = len(module.ports_conf_hash.keys())
orig_port_num_length = len(ports_conf_hash_copy.keys())
if port_num_length != orig_port_num_length:
module.fail_json(msg="Port numbering is wrong. \
Too many or two few ports configured")
return False
elif ports_conf_hash_copy == module.ports_conf_hash:
return False
return True
def make_copy_of_orig_ports_conf(module):
if os.path.exists(PORTS_CONF + '.orig'):
return
try:
shutil.copyfile(PORTS_CONF, PORTS_CONF + '.orig')
except IOError:
error_msg = get_exception()
_msg = "Failed to save the original %s: %s" % (PORTS_CONF, error_msg)
module.fail_json(msg=_msg)
return # for testing only
def write_to_ports_conf(module):
"""
use tempfile to first write out config in temp file
then write to actual location. may help prevent file
corruption. Ports.conf is a critical file for Cumulus.
Don't want to corrupt this file under any circumstance.
"""
temp = tempfile.NamedTemporaryFile()
try:
try:
temp.write('# Managed By Ansible\n')
for k in sorted(module.ports_conf_hash.keys()):
port_setting = module.ports_conf_hash[k]
_str = "%s=%s\n" % (k, port_setting)
temp.write(_str)
temp.seek(0)
shutil.copyfile(temp.name, PORTS_CONF)
except IOError:
error_msg = get_exception()
module.fail_json(
msg="Failed to write to %s: %s" % (PORTS_CONF, error_msg))
finally:
temp.close()
def main():
module = AnsibleModule(
argument_spec=dict(
speed_40g_div_4=dict(type='list'),
speed_4_by_10g=dict(type='list'),
speed_10g=dict(type='list'),
speed_40g=dict(type='list')
),
required_one_of=[['speed_40g_div_4',
'speed_4_by_10g',
'speed_10g',
'speed_40g']]
)
_changed = False
hash_existing_ports_conf(module)
generate_new_ports_conf_hash(module)
if compare_new_and_old_port_conf_hash(module):
make_copy_of_orig_ports_conf(module)
write_to_ports_conf(module)
_changed = True
_msg = "/etc/cumulus/ports.conf changed"
else:
_msg = 'No change in /etc/ports.conf'
module.exit_json(changed=_changed, msg=_msg)
# import module snippets
from ansible.module_utils.basic import *
# from ansible.module_utils.urls import *
import os
import tempfile
import shutil
if __name__ == '__main__':
main()
| gpl-3.0 |
tafaRU/odoo | addons/payment_buckaroo/models/buckaroo.py | 102 | 8291 | # -*- coding: utf-'8' "-*-"
from hashlib import sha1
import logging
import urlparse
from openerp.addons.payment.models.payment_acquirer import ValidationError
from openerp.addons.payment_buckaroo.controllers.main import BuckarooController
from openerp.osv import osv, fields
from openerp.tools.float_utils import float_compare
_logger = logging.getLogger(__name__)
class AcquirerBuckaroo(osv.Model):
_inherit = 'payment.acquirer'
def _get_buckaroo_urls(self, cr, uid, environment, context=None):
""" Buckaroo URLs
"""
if environment == 'prod':
return {
'buckaroo_form_url': 'https://checkout.buckaroo.nl/html/',
}
else:
return {
'buckaroo_form_url': 'https://testcheckout.buckaroo.nl/html/',
}
def _get_providers(self, cr, uid, context=None):
providers = super(AcquirerBuckaroo, self)._get_providers(cr, uid, context=context)
providers.append(['buckaroo', 'Buckaroo'])
return providers
_columns = {
'brq_websitekey': fields.char('WebsiteKey', required_if_provider='buckaroo'),
'brq_secretkey': fields.char('SecretKey', required_if_provider='buckaroo'),
}
def _buckaroo_generate_digital_sign(self, acquirer, inout, values):
""" Generate the shasign for incoming or outgoing communications.
:param browse acquirer: the payment.acquirer browse record. It should
have a shakey in shaky out
:param string inout: 'in' (openerp contacting buckaroo) or 'out' (buckaroo
contacting openerp).
:param dict values: transaction values
:return string: shasign
"""
assert inout in ('in', 'out')
assert acquirer.provider == 'buckaroo'
keys = "add_returndata Brq_amount Brq_culture Brq_currency Brq_invoicenumber Brq_return Brq_returncancel Brq_returnerror Brq_returnreject brq_test Brq_websitekey".split()
def get_value(key):
if values.get(key):
return values[key]
return ''
if inout == 'out':
if 'BRQ_SIGNATURE' in values:
del values['BRQ_SIGNATURE']
items = sorted((k.upper(), v) for k, v in values.items())
sign = ''.join('%s=%s' % (k, v) for k, v in items)
else:
sign = ''.join('%s=%s' % (k,get_value(k)) for k in keys)
#Add the pre-shared secret key at the end of the signature
sign = sign + acquirer.brq_secretkey
if isinstance(sign, str):
sign = urlparse.parse_qsl(sign)
shasign = sha1(sign).hexdigest()
return shasign
def buckaroo_form_generate_values(self, cr, uid, id, partner_values, tx_values, context=None):
base_url = self.pool['ir.config_parameter'].get_param(cr, uid, 'web.base.url')
acquirer = self.browse(cr, uid, id, context=context)
buckaroo_tx_values = dict(tx_values)
buckaroo_tx_values.update({
'Brq_websitekey': acquirer.brq_websitekey,
'Brq_amount': tx_values['amount'],
'Brq_currency': tx_values['currency'] and tx_values['currency'].name or '',
'Brq_invoicenumber': tx_values['reference'],
'brq_test' : True,
'Brq_return': '%s' % urlparse.urljoin(base_url, BuckarooController._return_url),
'Brq_returncancel': '%s' % urlparse.urljoin(base_url, BuckarooController._cancel_url),
'Brq_returnerror': '%s' % urlparse.urljoin(base_url, BuckarooController._exception_url),
'Brq_returnreject': '%s' % urlparse.urljoin(base_url, BuckarooController._reject_url),
'Brq_culture': 'en-US',
})
if buckaroo_tx_values.get('return_url'):
buckaroo_tx_values['add_returndata'] = {'return_url': '%s' % buckaroo_tx_values.pop('return_url')}
else:
buckaroo_tx_values['add_returndata'] = ''
buckaroo_tx_values['Brq_signature'] = self._buckaroo_generate_digital_sign(acquirer, 'in', buckaroo_tx_values)
return partner_values, buckaroo_tx_values
def buckaroo_get_form_action_url(self, cr, uid, id, context=None):
acquirer = self.browse(cr, uid, id, context=context)
return self._get_buckaroo_urls(cr, uid, acquirer.environment, context=context)['buckaroo_form_url']
class TxBuckaroo(osv.Model):
_inherit = 'payment.transaction'
# buckaroo status
_buckaroo_valid_tx_status = [190]
_buckaroo_pending_tx_status = [790, 791, 792, 793]
_buckaroo_cancel_tx_status = [890, 891]
_buckaroo_error_tx_status = [490, 491, 492]
_buckaroo_reject_tx_status = [690]
_columns = {
'buckaroo_txnid': fields.char('Transaction ID'),
}
# --------------------------------------------------
# FORM RELATED METHODS
# --------------------------------------------------
def _buckaroo_form_get_tx_from_data(self, cr, uid, data, context=None):
""" Given a data dict coming from buckaroo, verify it and find the related
transaction record. """
reference, pay_id, shasign = data.get('BRQ_INVOICENUMBER'), data.get('BRQ_PAYMENT'), data.get('BRQ_SIGNATURE')
if not reference or not pay_id or not shasign:
error_msg = 'Buckaroo: received data with missing reference (%s) or pay_id (%s) or shashign (%s)' % (reference, pay_id, shasign)
_logger.error(error_msg)
raise ValidationError(error_msg)
tx_ids = self.search(cr, uid, [('reference', '=', reference)], context=context)
if not tx_ids or len(tx_ids) > 1:
error_msg = 'Buckaroo: received data for reference %s' % (reference)
if not tx_ids:
error_msg += '; no order found'
else:
error_msg += '; multiple order found'
_logger.error(error_msg)
raise ValidationError(error_msg)
tx = self.pool['payment.transaction'].browse(cr, uid, tx_ids[0], context=context)
#verify shasign
shasign_check = self.pool['payment.acquirer']._buckaroo_generate_digital_sign(tx.acquirer_id, 'out' ,data)
if shasign_check.upper() != shasign.upper():
error_msg = 'Buckaroo: invalid shasign, received %s, computed %s, for data %s' % (shasign, shasign_check, data)
_logger.error(error_msg)
raise ValidationError(error_msg)
return tx
def _buckaroo_form_get_invalid_parameters(self, cr, uid, tx, data, context=None):
invalid_parameters = []
if tx.acquirer_reference and data.get('BRQ_TRANSACTIONS') != tx.acquirer_reference:
invalid_parameters.append(('Transaction Id', data.get('BRQ_TRANSACTIONS'), tx.acquirer_reference))
# check what is buyed
if float_compare(float(data.get('BRQ_AMOUNT', '0.0')), tx.amount, 2) != 0:
invalid_parameters.append(('Amount', data.get('BRQ_AMOUNT'), '%.2f' % tx.amount))
if data.get('BRQ_CURRENCY') != tx.currency_id.name:
invalid_parameters.append(('Currency', data.get('BRQ_CURRENCY'), tx.currency_id.name))
return invalid_parameters
def _buckaroo_form_validate(self, cr, uid, tx, data, context=None):
status_code = int(data.get('BRQ_STATUSCODE','0'))
if status_code in self._buckaroo_valid_tx_status:
tx.write({
'state': 'done',
'buckaroo_txnid': data.get('BRQ_TRANSACTIONS'),
})
return True
elif status_code in self._buckaroo_pending_tx_status:
tx.write({
'state': 'pending',
'buckaroo_txnid': data.get('BRQ_TRANSACTIONS'),
})
return True
elif status_code in self._buckaroo_cancel_tx_status:
tx.write({
'state': 'cancel',
'buckaroo_txnid': data.get('BRQ_TRANSACTIONS'),
})
return True
else:
error = 'Buckaroo: feedback error'
_logger.info(error)
tx.write({
'state': 'error',
'state_message': error,
'buckaroo_txnid': data.get('BRQ_TRANSACTIONS'),
})
return False
| agpl-3.0 |
daStrauss/subsurface | src/expts/spfTest.py | 1 | 1422 | '''
Created on Sep 4, 2012
Copyright © 2013
The Board of Trustees of The Leland Stanford Junior University.
All Rights Reserved
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
@author: dstrauss
'''
import numpy as np
D = {'solverType':'splitField', 'flavor':'TE', 'numRuns':3, 'expt':'spfTest'}
def getMyVars(parseNumber, D):
'''routine to return the parameters to test at the current iteration.'''
# rhos, bkgLocal = np.meshgrid(np.logspace(-4,0,20), range(100))
# rhos = rhos.flatten()
# bkgLocal = bkgLocal.flatten()
#
#
#
# D['bkgSig'] = rhos[parseNumber]
# D['bkgNo'] = bkgLocal[parseNumber] + 100
D['numProcs'] = 16
if parseNumber == 0:
D['bkgSig'] = 0.005
D['bkgNo'] = 100
elif parseNumber == 1:
D['bkgSig'] = 0.0048
D['bkgNo'] = 100
elif parseNumber == 2:
# D['bkgSig'] = .5
D['bkgNo'] = 100
return D
| apache-2.0 |
Hackplayers/Empire-mod-Hackplayers | lib/modules/powershell/situational_awareness/network/bloodhound.py | 10 | 6498 | from lib.common import helpers
class Module:
def __init__(self, mainMenu, params=[]):
self.info = {
'Name': 'Invoke-BloodHound',
'Author': ['@harmj0y', '@_wald0', '@cptjesus'],
'Description': ('Execute BloodHound data collection.'),
'Background' : True,
'OutputExtension' : None,
'NeedsAdmin' : False,
'OpsecSafe' : False,
'Language' : 'powershell',
'MinLanguageVersion' : '2',
'Comments': [
'https://bit.ly/getbloodhound'
]
}
# any options needed by the module, settable during runtime
self.options = {
# format:
# value_name : {description, required, default_value}
'Agent' : {
'Description' : 'Agent to run module on.',
'Required' : True,
'Value' : ''
},
'ComputerName' : {
'Description' : 'Array of one or more computers to enumerate',
'Required' : False,
'Value' : ''
},
'ComputerADSpath' : {
'Description' : 'The LDAP source to search through for computers, e.g. "LDAP://OU=secret,DC=testlab,DC=local"',
'Required' : False,
'Value' : ''
},
'UserADSPath' : {
'Description' : 'The LDAP source to search through for users/groups, e.g. "LDAP://OU=secret,DC=testlab,DC=local"',
'Required' : False,
'Value' : ''
},
'Domain' : {
'Description' : 'The domain to use for the query, defaults to the current domain.',
'Required' : False,
'Value' : ''
},
'DomainController' : {
'Description' : 'Domain controller to reflect LDAP queries through.',
'Required' : False,
'Value' : ''
},
'CollectionMethod' : {
'Description' : "The method to collect data. 'Group', 'ComputerOnly', 'LocalGroup', 'GPOLocalGroup', 'Session', 'LoggedOn', 'Trusts, 'Stealth', or 'Default'.",
'Required' : True,
'Value' : 'Default'
},
'SearchForest' : {
'Description' : 'Switch. Search all domains in the forest.',
'Required' : False,
'Value' : ''
},
'CSVFolder' : {
'Description' : 'The CSV folder to use for output, defaults to the current folder location.',
'Required' : False,
'Value' : '$(Get-Location)'
},
'CSVPrefix' : {
'Description' : 'A prefix for all CSV files.',
'Required' : False,
'Value' : ''
},
'URI' : {
'Description' : 'The BloodHound neo4j URL location (http://host:port/)',
'Required' : False,
'Value' : ''
},
'UserPass' : {
'Description' : 'The "user:password" for the BloodHound neo4j instance',
'Required' : False,
'Value' : ''
},
'GlobalCatalog' : {
'Description' : 'The global catalog location to resolve user memberships from.',
'Required' : False,
'Value' : ''
},
'SkipGCDeconfliction' : {
'Description' : 'Switch. Skip global catalog enumeration for session deconfliction',
'Required' : False,
'Value' : ''
},
'Threads' : {
'Description' : 'The maximum concurrent threads to execute.',
'Required' : True,
'Value' : '20'
},
'Throttle' : {
'Description' : 'The number of cypher queries to queue up for neo4j RESTful API ingestion.',
'Required' : True,
'Value' : '1000'
}
}
# save off a copy of the mainMenu object to access external functionality
# like listeners/agent handlers/etc.
self.mainMenu = mainMenu
for param in params:
# parameter format is [Name, Value]
option, value = param
if option in self.options:
self.options[option]['Value'] = value
def generate(self, obfuscate=False, obfuscationCommand=""):
moduleName = self.info["Name"]
# read in the common module source code
moduleSource = self.mainMenu.installPath + "/data/module_source/situational_awareness/network/BloodHound.ps1"
if obfuscate:
helpers.obfuscate_module(moduleSource=moduleSource, obfuscationCommand=obfuscationCommand)
moduleSource = moduleSource.replace("module_source", "obfuscated_module_source")
# TODO: just CSV output for this bloodhound version? no output to file?
try:
f = open(moduleSource, 'r')
except:
print helpers.color("[!] Could not read module source path at: " + str(moduleSource))
return ""
moduleCode = f.read()
f.close()
script = "%s\n" %(moduleCode)
scriptEnd = moduleName
for option,values in self.options.iteritems():
if option.lower() != "agent":
if values['Value'] and values['Value'] != '':
if values['Value'].lower() == "true":
# if we're just adding a switch
scriptEnd += " -" + str(option)
else:
scriptEnd += " -" + str(option) + " " + str(values['Value'])
scriptEnd += ' | Out-String | %{$_ + \"`n\"};"`n'+str(moduleName)+' completed!"'
if obfuscate:
scriptEnd = helpers.obfuscate(self.mainMenu.installPath, psScript=scriptEnd, obfuscationCommand=obfuscationCommand)
script += scriptEnd
return script
| bsd-3-clause |
hsiaoyi0504/scikit-learn | sklearn/linear_model/__init__.py | 270 | 3096 | """
The :mod:`sklearn.linear_model` module implements generalized linear models. It
includes Ridge regression, Bayesian Regression, Lasso and Elastic Net
estimators computed with Least Angle Regression and coordinate descent. It also
implements Stochastic Gradient Descent related algorithms.
"""
# See http://scikit-learn.sourceforge.net/modules/sgd.html and
# http://scikit-learn.sourceforge.net/modules/linear_model.html for
# complete documentation.
from .base import LinearRegression
from .bayes import BayesianRidge, ARDRegression
from .least_angle import (Lars, LassoLars, lars_path, LarsCV, LassoLarsCV,
LassoLarsIC)
from .coordinate_descent import (Lasso, ElasticNet, LassoCV, ElasticNetCV,
lasso_path, enet_path, MultiTaskLasso,
MultiTaskElasticNet, MultiTaskElasticNetCV,
MultiTaskLassoCV)
from .sgd_fast import Hinge, Log, ModifiedHuber, SquaredLoss, Huber
from .stochastic_gradient import SGDClassifier, SGDRegressor
from .ridge import (Ridge, RidgeCV, RidgeClassifier, RidgeClassifierCV,
ridge_regression)
from .logistic import (LogisticRegression, LogisticRegressionCV,
logistic_regression_path)
from .omp import (orthogonal_mp, orthogonal_mp_gram, OrthogonalMatchingPursuit,
OrthogonalMatchingPursuitCV)
from .passive_aggressive import PassiveAggressiveClassifier
from .passive_aggressive import PassiveAggressiveRegressor
from .perceptron import Perceptron
from .randomized_l1 import (RandomizedLasso, RandomizedLogisticRegression,
lasso_stability_path)
from .ransac import RANSACRegressor
from .theil_sen import TheilSenRegressor
__all__ = ['ARDRegression',
'BayesianRidge',
'ElasticNet',
'ElasticNetCV',
'Hinge',
'Huber',
'Lars',
'LarsCV',
'Lasso',
'LassoCV',
'LassoLars',
'LassoLarsCV',
'LassoLarsIC',
'LinearRegression',
'Log',
'LogisticRegression',
'LogisticRegressionCV',
'ModifiedHuber',
'MultiTaskElasticNet',
'MultiTaskElasticNetCV',
'MultiTaskLasso',
'MultiTaskLassoCV',
'OrthogonalMatchingPursuit',
'OrthogonalMatchingPursuitCV',
'PassiveAggressiveClassifier',
'PassiveAggressiveRegressor',
'Perceptron',
'RandomizedLasso',
'RandomizedLogisticRegression',
'Ridge',
'RidgeCV',
'RidgeClassifier',
'RidgeClassifierCV',
'SGDClassifier',
'SGDRegressor',
'SquaredLoss',
'TheilSenRegressor',
'enet_path',
'lars_path',
'lasso_path',
'lasso_stability_path',
'logistic_regression_path',
'orthogonal_mp',
'orthogonal_mp_gram',
'ridge_regression',
'RANSACRegressor']
| bsd-3-clause |
alshedivat/tensorflow | tensorflow/contrib/estimator/python/estimator/boosted_trees.py | 19 | 1341 | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""boosted_trees python module.
Importing from tensorflow.python.estimator is unsupported
and will soon break!
"""
# pylint: disable=unused-import,g-bad-import-order,g-import-not-at-top,wildcard-import
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow_estimator.contrib.estimator.python.estimator import boosted_trees
# Include attrs that start with single underscore.
_HAS_DYNAMIC_ATTRIBUTES = True
boosted_trees.__all__ = [
s for s in dir(boosted_trees) if not s.startswith('__')
]
from tensorflow_estimator.contrib.estimator.python.estimator.boosted_trees import *
| apache-2.0 |
loopCM/chromium | tools/telemetry/telemetry/page/page_test_unittest.py | 33 | 4010 | # Copyright (c) 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import os
import unittest
from telemetry.page import page as page_module
from telemetry.page import page_test
from telemetry.page.actions import all_page_actions
from telemetry.page.actions import page_action
def _CreatePage(test_filename):
url = 'file:///' + os.path.join('..', '..', 'unittest_data', test_filename)
base_dir = os.path.dirname(__file__)
page = page_module.Page(url, None, base_dir=base_dir)
return page
class DoNothingPageTest(page_test.PageTest):
def __init__(self, action_name_to_run=''):
super(DoNothingPageTest, self).__init__('DoNothing', action_name_to_run)
def DoNothing(self, page, tab, results):
pass
class AppendAction(page_action.PageAction):
def RunAction(self, page, tab, previous_action):
self.var.append(True)
class WrapAppendAction(page_action.PageAction):
def RunsPreviousAction(self):
return True
def RunAction(self, page, tab, previous_action):
self.var.append('before')
previous_action.WillRunAction(page, tab)
previous_action.RunAction(page, tab, None)
self.var.append('after')
class PageTestUnitTest(unittest.TestCase):
def setUp(self):
super(PageTestUnitTest, self).setUp()
all_page_actions.RegisterClassForTest('append', AppendAction)
all_page_actions.RegisterClassForTest('wrap_append', WrapAppendAction)
self._page_test = DoNothingPageTest('action_to_run')
self._page = _CreatePage('blank.html')
def testRunActions(self):
action_called = []
action_to_run = [
{ 'action': 'append', 'var': action_called }
]
setattr(self._page, 'action_to_run', action_to_run)
self._page_test.Run(None, self._page, None, None)
self.assertTrue(action_called)
def testPreviousAction(self):
action_list = []
action_to_run = [
{ 'action': 'append', 'var': action_list },
{ 'action': 'wrap_append', 'var': action_list }
]
setattr(self._page, 'action_to_run', action_to_run)
self._page_test.Run(None, self._page, None, None)
self.assertEqual(action_list, ['before', True, 'after'])
def testReferenceAction(self):
action_list = []
action_to_run = [
{ 'action': 'referenced_action_1' },
{ 'action': 'referenced_action_2' }
]
referenced_action_1 = { 'action': 'append', 'var': action_list }
referenced_action_2 = { 'action': 'wrap_append', 'var': action_list }
setattr(self._page, 'action_to_run', action_to_run)
setattr(self._page, 'referenced_action_1', referenced_action_1)
setattr(self._page, 'referenced_action_2', referenced_action_2)
self._page_test.Run(None, self._page, None, None)
self.assertEqual(action_list, ['before', True, 'after'])
def testRepeatAction(self):
action_list = []
action_to_run = { 'action': 'append', 'var': action_list, 'repeat': 10 }
setattr(self._page, 'action_to_run', action_to_run)
self._page_test.Run(None, self._page, None, None)
self.assertEqual(len(action_list), 10)
def testRepeatReferenceAction(self):
action_list = []
action_to_run = { 'action': 'referenced_action', 'repeat': 2 }
referenced_action = [
{ 'action': 'append', 'var': action_list },
{ 'action': 'wrap_append', 'var': action_list }
]
setattr(self._page, 'action_to_run', action_to_run)
setattr(self._page, 'referenced_action', referenced_action)
self._page_test.Run(None, self._page, None, None)
self.assertEqual(action_list,
['before', True, 'after', 'before', True, 'after'])
def testRepeatPreviousActionFails(self):
action_list = []
action_to_run = { 'action': 'wrap_append', 'var': action_list, 'repeat': 2 }
setattr(self._page, 'action_to_run', action_to_run)
self.assertRaises(page_action.PageActionFailed,
lambda: self._page_test.Run(None, self._page, None, None))
| bsd-3-clause |
amyvmiwei/kbengine | kbe/res/scripts/common/Lib/test/test_isinstance.py | 90 | 10171 | # Tests some corner cases with isinstance() and issubclass(). While these
# tests use new style classes and properties, they actually do whitebox
# testing of error conditions uncovered when using extension types.
import unittest
from test import support
import sys
class TestIsInstanceExceptions(unittest.TestCase):
# Test to make sure that an AttributeError when accessing the instance's
# class's bases is masked. This was actually a bug in Python 2.2 and
# 2.2.1 where the exception wasn't caught but it also wasn't being cleared
# (leading to an "undetected error" in the debug build). Set up is,
# isinstance(inst, cls) where:
#
# - cls isn't a type, or a tuple
# - cls has a __bases__ attribute
# - inst has a __class__ attribute
# - inst.__class__ as no __bases__ attribute
#
# Sounds complicated, I know, but this mimics a situation where an
# extension type raises an AttributeError when its __bases__ attribute is
# gotten. In that case, isinstance() should return False.
def test_class_has_no_bases(self):
class I(object):
def getclass(self):
# This must return an object that has no __bases__ attribute
return None
__class__ = property(getclass)
class C(object):
def getbases(self):
return ()
__bases__ = property(getbases)
self.assertEqual(False, isinstance(I(), C()))
# Like above except that inst.__class__.__bases__ raises an exception
# other than AttributeError
def test_bases_raises_other_than_attribute_error(self):
class E(object):
def getbases(self):
raise RuntimeError
__bases__ = property(getbases)
class I(object):
def getclass(self):
return E()
__class__ = property(getclass)
class C(object):
def getbases(self):
return ()
__bases__ = property(getbases)
self.assertRaises(RuntimeError, isinstance, I(), C())
# Here's a situation where getattr(cls, '__bases__') raises an exception.
# If that exception is not AttributeError, it should not get masked
def test_dont_mask_non_attribute_error(self):
class I: pass
class C(object):
def getbases(self):
raise RuntimeError
__bases__ = property(getbases)
self.assertRaises(RuntimeError, isinstance, I(), C())
# Like above, except that getattr(cls, '__bases__') raises an
# AttributeError, which /should/ get masked as a TypeError
def test_mask_attribute_error(self):
class I: pass
class C(object):
def getbases(self):
raise AttributeError
__bases__ = property(getbases)
self.assertRaises(TypeError, isinstance, I(), C())
# check that we don't mask non AttributeErrors
# see: http://bugs.python.org/issue1574217
def test_isinstance_dont_mask_non_attribute_error(self):
class C(object):
def getclass(self):
raise RuntimeError
__class__ = property(getclass)
c = C()
self.assertRaises(RuntimeError, isinstance, c, bool)
# test another code path
class D: pass
self.assertRaises(RuntimeError, isinstance, c, D)
# These tests are similar to above, but tickle certain code paths in
# issubclass() instead of isinstance() -- really PyObject_IsSubclass()
# vs. PyObject_IsInstance().
class TestIsSubclassExceptions(unittest.TestCase):
def test_dont_mask_non_attribute_error(self):
class C(object):
def getbases(self):
raise RuntimeError
__bases__ = property(getbases)
class S(C): pass
self.assertRaises(RuntimeError, issubclass, C(), S())
def test_mask_attribute_error(self):
class C(object):
def getbases(self):
raise AttributeError
__bases__ = property(getbases)
class S(C): pass
self.assertRaises(TypeError, issubclass, C(), S())
# Like above, but test the second branch, where the __bases__ of the
# second arg (the cls arg) is tested. This means the first arg must
# return a valid __bases__, and it's okay for it to be a normal --
# unrelated by inheritance -- class.
def test_dont_mask_non_attribute_error_in_cls_arg(self):
class B: pass
class C(object):
def getbases(self):
raise RuntimeError
__bases__ = property(getbases)
self.assertRaises(RuntimeError, issubclass, B, C())
def test_mask_attribute_error_in_cls_arg(self):
class B: pass
class C(object):
def getbases(self):
raise AttributeError
__bases__ = property(getbases)
self.assertRaises(TypeError, issubclass, B, C())
# meta classes for creating abstract classes and instances
class AbstractClass(object):
def __init__(self, bases):
self.bases = bases
def getbases(self):
return self.bases
__bases__ = property(getbases)
def __call__(self):
return AbstractInstance(self)
class AbstractInstance(object):
def __init__(self, klass):
self.klass = klass
def getclass(self):
return self.klass
__class__ = property(getclass)
# abstract classes
AbstractSuper = AbstractClass(bases=())
AbstractChild = AbstractClass(bases=(AbstractSuper,))
# normal classes
class Super:
pass
class Child(Super):
pass
# new-style classes
class NewSuper(object):
pass
class NewChild(NewSuper):
pass
class TestIsInstanceIsSubclass(unittest.TestCase):
# Tests to ensure that isinstance and issubclass work on abstract
# classes and instances. Before the 2.2 release, TypeErrors were
# raised when boolean values should have been returned. The bug was
# triggered by mixing 'normal' classes and instances were with
# 'abstract' classes and instances. This case tries to test all
# combinations.
def test_isinstance_normal(self):
# normal instances
self.assertEqual(True, isinstance(Super(), Super))
self.assertEqual(False, isinstance(Super(), Child))
self.assertEqual(False, isinstance(Super(), AbstractSuper))
self.assertEqual(False, isinstance(Super(), AbstractChild))
self.assertEqual(True, isinstance(Child(), Super))
self.assertEqual(False, isinstance(Child(), AbstractSuper))
def test_isinstance_abstract(self):
# abstract instances
self.assertEqual(True, isinstance(AbstractSuper(), AbstractSuper))
self.assertEqual(False, isinstance(AbstractSuper(), AbstractChild))
self.assertEqual(False, isinstance(AbstractSuper(), Super))
self.assertEqual(False, isinstance(AbstractSuper(), Child))
self.assertEqual(True, isinstance(AbstractChild(), AbstractChild))
self.assertEqual(True, isinstance(AbstractChild(), AbstractSuper))
self.assertEqual(False, isinstance(AbstractChild(), Super))
self.assertEqual(False, isinstance(AbstractChild(), Child))
def test_subclass_normal(self):
# normal classes
self.assertEqual(True, issubclass(Super, Super))
self.assertEqual(False, issubclass(Super, AbstractSuper))
self.assertEqual(False, issubclass(Super, Child))
self.assertEqual(True, issubclass(Child, Child))
self.assertEqual(True, issubclass(Child, Super))
self.assertEqual(False, issubclass(Child, AbstractSuper))
def test_subclass_abstract(self):
# abstract classes
self.assertEqual(True, issubclass(AbstractSuper, AbstractSuper))
self.assertEqual(False, issubclass(AbstractSuper, AbstractChild))
self.assertEqual(False, issubclass(AbstractSuper, Child))
self.assertEqual(True, issubclass(AbstractChild, AbstractChild))
self.assertEqual(True, issubclass(AbstractChild, AbstractSuper))
self.assertEqual(False, issubclass(AbstractChild, Super))
self.assertEqual(False, issubclass(AbstractChild, Child))
def test_subclass_tuple(self):
# test with a tuple as the second argument classes
self.assertEqual(True, issubclass(Child, (Child,)))
self.assertEqual(True, issubclass(Child, (Super,)))
self.assertEqual(False, issubclass(Super, (Child,)))
self.assertEqual(True, issubclass(Super, (Child, Super)))
self.assertEqual(False, issubclass(Child, ()))
self.assertEqual(True, issubclass(Super, (Child, (Super,))))
self.assertEqual(True, issubclass(NewChild, (NewChild,)))
self.assertEqual(True, issubclass(NewChild, (NewSuper,)))
self.assertEqual(False, issubclass(NewSuper, (NewChild,)))
self.assertEqual(True, issubclass(NewSuper, (NewChild, NewSuper)))
self.assertEqual(False, issubclass(NewChild, ()))
self.assertEqual(True, issubclass(NewSuper, (NewChild, (NewSuper,))))
self.assertEqual(True, issubclass(int, (int, (float, int))))
self.assertEqual(True, issubclass(str, (str, (Child, NewChild, str))))
def test_subclass_recursion_limit(self):
# make sure that issubclass raises RuntimeError before the C stack is
# blown
self.assertRaises(RuntimeError, blowstack, issubclass, str, str)
def test_isinstance_recursion_limit(self):
# make sure that issubclass raises RuntimeError before the C stack is
# blown
self.assertRaises(RuntimeError, blowstack, isinstance, '', str)
def blowstack(fxn, arg, compare_to):
# Make sure that calling isinstance with a deeply nested tuple for its
# argument will raise RuntimeError eventually.
tuple_arg = (compare_to,)
for cnt in range(sys.getrecursionlimit()+5):
tuple_arg = (tuple_arg,)
fxn(arg, tuple_arg)
def test_main():
support.run_unittest(
TestIsInstanceExceptions,
TestIsSubclassExceptions,
TestIsInstanceIsSubclass
)
if __name__ == '__main__':
test_main()
| lgpl-3.0 |
WillisXChen/django-oscar | tests/integration/basket/view_tests.py | 13 | 1792 | from django.contrib.messages import get_messages
from django.test import RequestFactory, TestCase
from django.utils import six
from oscar.apps.basket import views
from oscar.test.factories import BasketFactory, VoucherFactory
from oscar.test.utils import RequestFactory
class TestVoucherAddView(TestCase):
def test_get(self):
request = RequestFactory().get('/')
view = views.VoucherAddView.as_view()
response = view(request)
self.assertEqual(response.status_code, 302)
def _get_voucher_message(self, request):
return '\n'.join(six.text_type(m.message) for m in get_messages(request))
def test_post_valid(self):
basket = BasketFactory()
voucher = VoucherFactory()
self.assertTrue(voucher.is_active())
data = {
'code': voucher.code
}
request = RequestFactory().post('/', data=data, basket=basket)
view = views.VoucherAddView.as_view()
response = view(request)
self.assertEqual(response.status_code, 302)
voucher = voucher.__class__.objects.get(pk=voucher.pk)
self.assertEqual(voucher.num_basket_additions, 1, msg=self._get_voucher_message(request))
class TestVoucherRemoveView(TestCase):
def test_post_valid(self):
basket = BasketFactory()
voucher = VoucherFactory()
basket.vouchers.add(voucher)
data = {
'code': voucher.code
}
request = RequestFactory().post('/', data=data, basket=basket)
view = views.VoucherRemoveView.as_view()
response = view(request, pk=voucher.pk)
self.assertEqual(response.status_code, 302)
voucher = voucher.__class__.objects.get(pk=voucher.pk)
self.assertEqual(voucher.num_basket_additions, -1)
| bsd-3-clause |
NickRuiz/wikitrans-pootle | external_apps/profiles/urls.py | 6 | 1644 | """
URLConf for Django user profile management.
Recommended usage is to use a call to ``include()`` in your project's
root URLConf to include this URLConf for any URL beginning with
'/profiles/'.
If the default behavior of the profile views is acceptable to you,
simply use a line like this in your root URLConf to set up the default
URLs for profiles::
(r'^profiles/', include('profiles.urls')),
But if you'd like to customize the behavior (e.g., by passing extra
arguments to the various views) or split up the URLs, feel free to set
up your own URL patterns for these views instead. If you do, it's a
good idea to keep the name ``profiles_profile_detail`` for the pattern
which points to the ``profile_detail`` view, since several views use
``reverse()`` with that name to generate a default post-submission
redirect. If you don't use that name, remember to explicitly pass
``success_url`` to those views.
"""
from django.conf.urls.defaults import *
from profiles import views
urlpatterns = patterns('',
url(r'^create/?$',
views.create_profile,
name='profiles_create_profile'),
url(r'^edit/?$',
views.edit_profile,
name='profiles_edit_profile'),
url(r'^(?P<username>[^/]+)/?$',
views.profile_detail,
name='profiles_profile_detail'),
url(r'^$',
views.profile_list,
name='profiles_profile_list'),
)
| gpl-2.0 |
lmazuel/ansible | lib/ansible/modules/network/nxos/nxos_vtp_domain.py | 46 | 5924 | #!/usr/bin/python
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: nxos_vtp_domain
extends_documentation_fragment: nxos
version_added: "2.2"
short_description: Manages VTP domain configuration.
description:
- Manages VTP domain configuration.
author:
- Gabriele Gerbino (@GGabriele)
notes:
- VTP feature must be active on the device to use this module.
- This module is used to manage only VTP domain names.
- VTP domain names are case-sensible.
- If it's never been configured before, VTP version is set to 1 by default.
Otherwise, it leaves the previous configured version untouched.
Use M(nxos_vtp_version) to change it.
- Use this in combination with M(nxos_vtp_password) and M(nxos_vtp_version)
to fully manage VTP operations.
options:
domain:
description:
- VTP domain name.
required: true
'''
EXAMPLES = '''
# ENSURE VTP DOMAIN IS CONFIGURED
- nxos_vtp_domain:
domain: ntc
host: "{{ inventory_hostname }}"
username: "{{ un }}"
password: "{{ pwd }}"
'''
RETURN = '''
proposed:
description: k/v pairs of parameters passed into module
returned: always
type: dict
sample: {"domain": "ntc"}
existing:
description:
- k/v pairs of existing vtp domain
returned: always
type: dict
sample: {"domain": "testing", "version": "2", "vtp_password": "\"}
end_state:
description: k/v pairs of vtp domain after module execution
returned: always
type: dict
sample: {"domain": "ntc", "version": "2", "vtp_password": "\"}
updates:
description: command sent to the device
returned: always
type: list
sample: ["vtp domain ntc"]
changed:
description: check to see if a change was made on the device
returned: always
type: boolean
sample: true
'''
from ansible.module_utils.nxos import get_config, load_config, run_commands
from ansible.module_utils.nxos import nxos_argument_spec, check_args
from ansible.module_utils.basic import AnsibleModule
import re
def execute_show_command(command, module, command_type='cli_show'):
if module.params['transport'] == 'cli':
if 'status' not in command:
command += ' | json'
cmds = [command]
body = run_commands(module, cmds)
elif module.params['transport'] == 'nxapi':
cmds = [command]
body = run_commands(module, cmds)
return body
def flatten_list(command_lists):
flat_command_list = []
for command in command_lists:
if isinstance(command, list):
flat_command_list.extend(command)
else:
flat_command_list.append(command)
return flat_command_list
def get_vtp_config(module):
command = 'show vtp status'
body = execute_show_command(
command, module, command_type='cli_show_ascii')[0]
vtp_parsed = {}
if body:
version_regex = '.*VTP version running\s+:\s+(?P<version>\d).*'
domain_regex = '.*VTP Domain Name\s+:\s+(?P<domain>\S+).*'
try:
match_version = re.match(version_regex, body, re.DOTALL)
version = match_version.groupdict()['version']
except AttributeError:
version = ''
try:
match_domain = re.match(domain_regex, body, re.DOTALL)
domain = match_domain.groupdict()['domain']
except AttributeError:
domain = ''
if domain and version:
vtp_parsed['domain'] = domain
vtp_parsed['version'] = version
vtp_parsed['vtp_password'] = get_vtp_password(module)
return vtp_parsed
def get_vtp_password(module):
command = 'show vtp password'
body = execute_show_command(command, module)[0]
password = body['passwd']
if password:
return str(password)
else:
return ""
def main():
argument_spec = dict(
domain=dict(type='str', required=True),
)
argument_spec.update(nxos_argument_spec)
module = AnsibleModule(argument_spec=argument_spec,
supports_check_mode=True)
warnings = list()
check_args(module, warnings)
domain = module.params['domain']
existing = get_vtp_config(module)
end_state = existing
args = dict(domain=domain)
changed = False
proposed = dict((k, v) for k, v in args.items() if v is not None)
delta = dict(set(proposed.items()).difference(existing.items()))
commands = []
if delta:
commands.append(['vtp domain {0}'.format(domain)])
cmds = flatten_list(commands)
if cmds:
if module.check_mode:
module.exit_json(changed=True, commands=cmds)
else:
changed = True
load_config(module, cmds)
end_state = get_vtp_config(module)
if 'configure' in cmds:
cmds.pop(0)
results = {}
results['proposed'] = proposed
results['existing'] = existing
results['end_state'] = end_state
results['updates'] = cmds
results['changed'] = changed
results['warnings'] = warnings
module.exit_json(**results)
if __name__ == '__main__':
main()
| gpl-3.0 |
j0nathan33/CouchPotatoServer | couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/condenast.py | 11 | 4939 | # coding: utf-8
from __future__ import unicode_literals
import re
import json
from .common import InfoExtractor
from ..utils import (
compat_urllib_parse,
orderedSet,
compat_urllib_parse_urlparse,
compat_urlparse,
)
class CondeNastIE(InfoExtractor):
"""
Condé Nast is a media group, some of its sites use a custom HTML5 player
that works the same in all of them.
"""
# The keys are the supported sites and the values are the name to be shown
# to the user and in the extractor description.
_SITES = {
'wired': 'WIRED',
'gq': 'GQ',
'vogue': 'Vogue',
'glamour': 'Glamour',
'wmagazine': 'W Magazine',
'vanityfair': 'Vanity Fair',
'cnevids': 'Condé Nast',
}
_VALID_URL = r'http://(video|www|player)\.(?P<site>%s)\.com/(?P<type>watch|series|video|embed)/(?P<id>[^/?#]+)' % '|'.join(_SITES.keys())
IE_DESC = 'Condé Nast media group: %s' % ', '.join(sorted(_SITES.values()))
_TEST = {
'url': 'http://video.wired.com/watch/3d-printed-speakers-lit-with-led',
'md5': '1921f713ed48aabd715691f774c451f7',
'info_dict': {
'id': '5171b343c2b4c00dd0c1ccb3',
'ext': 'mp4',
'title': '3D Printed Speakers Lit With LED',
'description': 'Check out these beautiful 3D printed LED speakers. You can\'t actually buy them, but LumiGeek is working on a board that will let you make you\'re own.',
}
}
def _extract_series(self, url, webpage):
title = self._html_search_regex(r'<div class="cne-series-info">.*?<h1>(.+?)</h1>',
webpage, 'series title', flags=re.DOTALL)
url_object = compat_urllib_parse_urlparse(url)
base_url = '%s://%s' % (url_object.scheme, url_object.netloc)
m_paths = re.finditer(r'<p class="cne-thumb-title">.*?<a href="(/watch/.+?)["\?]',
webpage, flags=re.DOTALL)
paths = orderedSet(m.group(1) for m in m_paths)
build_url = lambda path: compat_urlparse.urljoin(base_url, path)
entries = [self.url_result(build_url(path), 'CondeNast') for path in paths]
return self.playlist_result(entries, playlist_title=title)
def _extract_video(self, webpage, url_type):
if url_type != 'embed':
description = self._html_search_regex(
[
r'<div class="cne-video-description">(.+?)</div>',
r'<div class="video-post-content">(.+?)</div>',
],
webpage, 'description', fatal=False, flags=re.DOTALL)
else:
description = None
params = self._search_regex(r'var params = {(.+?)}[;,]', webpage,
'player params', flags=re.DOTALL)
video_id = self._search_regex(r'videoId: [\'"](.+?)[\'"]', params, 'video id')
player_id = self._search_regex(r'playerId: [\'"](.+?)[\'"]', params, 'player id')
target = self._search_regex(r'target: [\'"](.+?)[\'"]', params, 'target')
data = compat_urllib_parse.urlencode({'videoId': video_id,
'playerId': player_id,
'target': target,
})
base_info_url = self._search_regex(r'url = [\'"](.+?)[\'"][,;]',
webpage, 'base info url',
default='http://player.cnevids.com/player/loader.js?')
info_url = base_info_url + data
info_page = self._download_webpage(info_url, video_id,
'Downloading video info')
video_info = self._search_regex(r'var video = ({.+?});', info_page, 'video info')
video_info = json.loads(video_info)
formats = [{
'format_id': '%s-%s' % (fdata['type'].split('/')[-1], fdata['quality']),
'url': fdata['src'],
'ext': fdata['type'].split('/')[-1],
'quality': 1 if fdata['quality'] == 'high' else 0,
} for fdata in video_info['sources'][0]]
self._sort_formats(formats)
return {
'id': video_id,
'formats': formats,
'title': video_info['title'],
'thumbnail': video_info['poster_frame'],
'description': description,
}
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
site = mobj.group('site')
url_type = mobj.group('type')
item_id = mobj.group('id')
self.to_screen('Extracting from %s with the Condé Nast extractor' % self._SITES[site])
webpage = self._download_webpage(url, item_id)
if url_type == 'series':
return self._extract_series(url, webpage)
else:
return self._extract_video(webpage, url_type)
| gpl-3.0 |
Josue-Martinez-Moreno/trackeddy | examples/random_field.py | 1 | 8259 | import time
tic=time.time()
import matplotlib
matplotlib.use('Agg')
import trackeddy
import trackeddy.tracking as ttrack
from trackeddy.geometryfunc import *
from pylab import *
import random
import pdb
import cmocean as cm
import matplotlib.gridspec as gridspec
import trackeddy.utils.field_generator as fg
import importlib
importlib.reload(ttrack)
t = 1000
n = 13
xx = linspace(10,12,200)
yy = linspace(10,12,200)
#print("Generate field")
#gf=fg.Generate_field(0.1,0.1,n,xx,yy,'Nint')
#data = gf.assemble_field(t)
data = zeros((t,300,300))
for tt in range(t):
print(tt)
gf=fg.Generate_field(0.1,0.1,randint(5, 15),xx,yy,'Nint')
data[tt,:,:] = gf.assemble_field(1)
##
x = linspace(10,12,300)
y = linspace(10,12,300)
################################################################################
################################################################################
#################################### FLAT ######################################
################################################################################
################################################################################
preferences={'ellipse':0.85,'eccentricity':0.85,'gaussian':0.8}
eddytd={}
eddytdn={}
t0 = 0
t = 1000
levels = {'max':data.max(),'min':0.05,'step':0.05}
eddytd = trackeddy.tracking.analyseddyzt(data,x,y,t0,t,1,levels,preferences=preferences,areamap='',mask='',maskopt='forcefit'\
,destdir='',physics='',diagnostics=False,plotdata=False,pprint=True,debug=False)
####
levels = {'max':data.min(),'min':-0.05,'step':-0.05}
eddytdn = trackeddy.tracking.analyseddyzt(data,x,y,t0,t,1,levels,preferences=preferences,areamap='',mask='',maskopt='forcefit'\
,destdir='',physics='',diagnostics=False,plotdata=False,pprint=True,debug=False)
pos_f = reconstruct_syntetic(shape(data),x,y,eddytd)
neg_f = reconstruct_syntetic(shape(data),x,y,eddytdn)
f_field = pos_f+neg_f
for tt in range(t0,t):
f = plt.figure()
gs = gridspec.GridSpec(2, 1)
ax1 = plt.subplot(gs[0])
ax1.pcolormesh(x,y,data[tt,:,:],vmin=-1,vmax=1,cmap=cm.cm.balance)
ax2 = plt.subplot(gs[1])
ax2.pcolormesh(f_field[tt,:,:],vmin=-1,vmax=1,cmap=cm.cm.balance)
ax2.contour(f_field[tt,:,:])
ax1.set_title('Assamble: %03d' % tt)
plt.savefig('time_%03d.png' %tt)
################################################################################
################################################################################
#################################### WAVE ######################################
################################################################################
################################################################################
amplitude = 1
frequency = 20
phase = 1
waves = zeros(shape(data))
X,Y = meshgrid(x,y)
for t in range(0,t):
r = X+y/10
waves[t,:,:] = 0.3*sin(r*frequency-t + phase)
wave_data = waves+data
levels = {'max':data.max(),'min':0.05,'step':0.05}
eddytd=ttrack.analyseddyzt(data,x,y,0,t,1,levels,preferences=preferences,areamap='',mask='',maskopt='forcefit'\
,destdir='',physics='',diagnostics=False,plotdata=False,pprint=True)
levels = {'max':data.min(),'min':-0.05,'step':-0.05}
eddytdn=ttrack.analyseddyzt(data,x,y,0,t,1,levels,preferences=preferences,areamap='',mask='',maskopt='forcefit'\
,destdir='',physics='',diagnostics=False,plotdata=False,pprint=True)
pos_w = reconstruct_syntetic(shape(data),x,y,eddytd)
neg_w = reconstruct_syntetic(shape(data),x,y,eddytdn)
w_field = pos_w+neg_w
################################################################################
################################################################################
#################################### JETS ######################################
################################################################################
################################################################################
k_y = 3
phase = 1
k_x = 2
jets = zeros(shape(data))
for t in range(0,t):
r = Y
k_y=random.uniform(2, 3)
phase=random.uniform(0, 1)
k_x=random.uniform(1, 2)
amp=0.3
jets[t,:,:] = amp*cos((k_y*(k_y*Y+phase+sin(k_x*X-t))))
jet_data = jets+data
levels = {'max':data.max(),'min':0.05,'step':0.05}
eddytd=ttrack.analyseddyzt(data,x,y,0,t,1,levels,preferences=preferences,areamap='',mask='',maskopt='forcefit'\
,destdir='',physics='',diagnostics=False,plotdata=False,pprint=True)
levels = {'max':data.min(),'min':-0.05,'step':-0.05}
eddytdn=ttrack.analyseddyzt(data,x,y,0,t,1,levels,preferences=preferences,areamap='',mask='',maskopt='forcefit'\
,destdir='',physics='',diagnostics=False,plotdata=False,pprint=True)
pos_f = reconstruct_syntetic(shape(data),x,y,eddytd)
neg_f = reconstruct_syntetic(shape(data),x,y,eddytdn)
j_field = pos_f+neg_f
################################################################################
################################################################################
##################################### KE #######################################
################################################################################
################################################################################
m_ke_c = []
m_ke_f = []
m_ke_w = []
m_ke_j = []
for tt in range(shape(data)[0]):
u_c,v_c = geovelfield( data[tt,:,:] ,x,y)
u_f,v_f = geovelfield(f_field[tt,:,:],x,y)
u_w,v_w = geovelfield(w_field[tt,:,:],x,y)
u_j,v_j = geovelfield(j_field[tt,:,:],x,y)
ke_c = KE(u_c,v_c)
ke_f = KE(u_f,v_f)
ke_w = KE(u_w,v_w)
ke_j = KE(u_j,v_j)
m_ke_c.append(mean(ke_c))
m_ke_f.append(mean(ke_f))
m_ke_w.append(mean(ke_w))
m_ke_j.append(mean(ke_j))
################################################################################
################################################################################
#################################### PLOT ######################################
################################################################################
################################################################################
import seaborn as sns
import pandas as pd
from scipy.stats import spearmanr,linregress
figure(dpi=300)
data=np.vstack([m_ke_c,m_ke_f]).T
df = pd.DataFrame(data, columns=[r"$KE_c$", r"$KE_r$"])
g1 = sns.jointplot(x=r"$KE_c$", y=r"$KE_r$", data=df, kind="kde",cmap='Blues',joint_kws={'shade_lowest':False})
lims = [100, 0]
g1.ax_joint.plot(lims, lims, '--k')
s,i,r,p,std=linregress(m_ke_c,m_ke_f)
x0=0
y0=s*x0+i
x1=100
y1=s*x1+i
g1.ax_joint.plot([x0,x1], [y0,y1], '-.b')
g1.ax_joint.text(60,20,r'R = %2f' % r, color='b')
g1.ax_marg_x.set_xlim(0,100)
g1.ax_marg_y.set_ylim(0,100)
print('estimate flat: ',mean([abs(y0/100),abs(1-y1/100)]))
plt.savefig('e_vs_e.png')
figure(dpi=300)
data=np.vstack([m_ke_c,m_ke_w]).T
df = pd.DataFrame(data, columns=[r"$KE_c$", r"$KE_r$"])
g1 = sns.jointplot(x=r"$KE_c$", y=r"$KE_r$", data=df, kind="kde",cmap='Blues',joint_kws={'shade_lowest':False})
lims = [100, 0]
g1.ax_joint.plot(lims, lims, '--k')
s,i,r,p,std=linregress(m_ke_c,m_ke_w)
x0=0
y0=s*x0+i
x1=100
y1=s*x1+i
g1.ax_joint.plot([x0,x1], [y0,y1], '-.b')
g1.ax_joint.text(60,20,r'R = %2f' % r, color='b')
g1.ax_marg_x.set_xlim(0,100)
g1.ax_marg_y.set_ylim(0,100)
print('estimate sin: ',mean([abs(y0/100),abs(1-y1/100)]))
plt.savefig('w_vs_e.png')
figure(dpi=300)
data=np.vstack([m_ke_c,m_ke_j]).T
df = pd.DataFrame(data, columns=[r"$KE_c$", r"$KE_r$"])
g1 = sns.jointplot(x=r"$KE_c$", y=r"$KE_r$", data=df, kind="kde",cmap='Blues',joint_kws={'shade_lowest':False})
lims = [100, 0]
g1.ax_joint.plot(lims, lims, '--k')
s,i,r,p,std=linregress(m_ke_c,m_ke_j)
x0=0
y0=s*x0+i
x1=100
y1=s*x1+i
g1.ax_joint.plot([x0,x1], [y0,y1], '-.b')
g1.ax_joint.text(60,20,r'R = %2f' % r, color='b')
g1.ax_marg_x.set_xlim(0,100)
g1.ax_marg_y.set_ylim(0,100)
print('estimate jet: ',mean([abs(y0/100),abs(1-y1/100)]))
plt.savefig('j_vs_e.png')
# for ii in range(0,30):
# plt.figure()
# plt.pcolormesh(af[ii])
# plt.savefig('%03d.png' %ii)
# plt.show()
toc=time.time()
print("######## ELAPSED TIME: ###########")
print("######## %2f s ###########" % (toc-tic)) | mit |
incaser/odoo-odoo | addons/l10n_in_hr_payroll/l10n_in_hr_payroll.py | 332 | 13610 | #-*- coding:utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2011 OpenERP SA (<http://openerp.com>). All Rights Reserved
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import time
from datetime import datetime
from dateutil.relativedelta import relativedelta
from calendar import isleap
from openerp.tools.translate import _
from openerp.osv import fields, osv
import openerp.addons.decimal_precision as dp
DATETIME_FORMAT = "%Y-%m-%d"
class hr_contract(osv.osv):
"""
Employee contract allows to add different values in fields.
Fields are used in salary rule computation.
"""
_inherit = 'hr.contract'
_description = 'HR Contract'
_columns = {
'tds': fields.float('TDS', digits_compute=dp.get_precision('Payroll'), help="Amount for Tax Deduction at Source"),
'driver_salay': fields.boolean('Driver Salary', help="Check this box if you provide allowance for driver"),
'medical_insurance': fields.float('Medical Insurance', digits_compute=dp.get_precision('Payroll'), help="Deduction towards company provided medical insurance"),
'voluntary_provident_fund': fields.float('Voluntary Provident Fund (%)', digits_compute=dp.get_precision('Payroll'), help="VPF is a safe option wherein you can contribute more than the PF ceiling of 12% that has been mandated by the government and VPF computed as percentage(%)"),
'house_rent_allowance_metro_nonmetro': fields.float('House Rent Allowance (%)', digits_compute=dp.get_precision('Payroll'), help="HRA is an allowance given by the employer to the employee for taking care of his rental or accommodation expenses for metro city it is 50% and for non metro 40%. \nHRA computed as percentage(%)"),
'supplementary_allowance': fields.float('Supplementary Allowance', digits_compute=dp.get_precision('Payroll')),
}
class payroll_advice(osv.osv):
'''
Bank Advice
'''
_name = 'hr.payroll.advice'
_description = 'Bank Advice'
_columns = {
'name':fields.char('Name', readonly=True, required=True, states={'draft': [('readonly', False)]},),
'note': fields.text('Description'),
'date': fields.date('Date', readonly=True, required=True, states={'draft': [('readonly', False)]}, help="Advice Date is used to search Payslips"),
'state':fields.selection([
('draft', 'Draft'),
('confirm', 'Confirmed'),
('cancel', 'Cancelled'),
], 'Status', select=True, readonly=True),
'number': fields.char('Reference', readonly=True),
'line_ids': fields.one2many('hr.payroll.advice.line', 'advice_id', 'Employee Salary', states={'draft': [('readonly', False)]}, readonly=True, copy=True),
'chaque_nos': fields.char('Cheque Numbers'),
'neft': fields.boolean('NEFT Transaction', help="Check this box if your company use online transfer for salary"),
'company_id':fields.many2one('res.company', 'Company', required=True, readonly=True, states={'draft': [('readonly', False)]}),
'bank_id':fields.many2one('res.bank', 'Bank', readonly=True, states={'draft': [('readonly', False)]}, help="Select the Bank from which the salary is going to be paid"),
'batch_id': fields.many2one('hr.payslip.run', 'Batch', readonly=True)
}
_defaults = {
'date': lambda * a: time.strftime('%Y-%m-%d'),
'state': lambda * a: 'draft',
'company_id': lambda self, cr, uid, context: \
self.pool.get('res.users').browse(cr, uid, uid,
context=context).company_id.id,
'note': "Please make the payroll transfer from above account number to the below mentioned account numbers towards employee salaries:"
}
def compute_advice(self, cr, uid, ids, context=None):
"""
Advice - Create Advice lines in Payment Advice and
compute Advice lines.
@param cr: the current row, from the database cursor,
@param uid: the current user’s ID for security checks,
@param ids: List of Advice’s IDs
@return: Advice lines
@param context: A standard dictionary for contextual values
"""
payslip_pool = self.pool.get('hr.payslip')
advice_line_pool = self.pool.get('hr.payroll.advice.line')
payslip_line_pool = self.pool.get('hr.payslip.line')
for advice in self.browse(cr, uid, ids, context=context):
old_line_ids = advice_line_pool.search(cr, uid, [('advice_id', '=', advice.id)], context=context)
if old_line_ids:
advice_line_pool.unlink(cr, uid, old_line_ids, context=context)
slip_ids = payslip_pool.search(cr, uid, [('date_from', '<=', advice.date), ('date_to', '>=', advice.date), ('state', '=', 'done')], context=context)
for slip in payslip_pool.browse(cr, uid, slip_ids, context=context):
if not slip.employee_id.bank_account_id and not slip.employee_id.bank_account_id.acc_number:
raise osv.except_osv(_('Error!'), _('Please define bank account for the %s employee') % (slip.employee_id.name))
line_ids = payslip_line_pool.search(cr, uid, [ ('slip_id', '=', slip.id), ('code', '=', 'NET')], context=context)
if line_ids:
line = payslip_line_pool.browse(cr, uid, line_ids, context=context)[0]
advice_line = {
'advice_id': advice.id,
'name': slip.employee_id.bank_account_id.acc_number,
'employee_id': slip.employee_id.id,
'bysal': line.total
}
advice_line_pool.create(cr, uid, advice_line, context=context)
payslip_pool.write(cr, uid, slip_ids, {'advice_id': advice.id}, context=context)
return True
def confirm_sheet(self, cr, uid, ids, context=None):
"""
confirm Advice - confirmed Advice after computing Advice Lines..
@param cr: the current row, from the database cursor,
@param uid: the current user’s ID for security checks,
@param ids: List of confirm Advice’s IDs
@return: confirmed Advice lines and set sequence of Advice.
@param context: A standard dictionary for contextual values
"""
seq_obj = self.pool.get('ir.sequence')
for advice in self.browse(cr, uid, ids, context=context):
if not advice.line_ids:
raise osv.except_osv(_('Error!'), _('You can not confirm Payment advice without advice lines.'))
advice_date = datetime.strptime(advice.date, DATETIME_FORMAT)
advice_year = advice_date.strftime('%m') + '-' + advice_date.strftime('%Y')
number = seq_obj.get(cr, uid, 'payment.advice')
sequence_num = 'PAY' + '/' + advice_year + '/' + number
self.write(cr, uid, [advice.id], {'number': sequence_num, 'state': 'confirm'}, context=context)
return True
def set_to_draft(self, cr, uid, ids, context=None):
"""Resets Advice as draft.
"""
return self.write(cr, uid, ids, {'state':'draft'}, context=context)
def cancel_sheet(self, cr, uid, ids, context=None):
"""Marks Advice as cancelled.
"""
return self.write(cr, uid, ids, {'state':'cancel'}, context=context)
def onchange_company_id(self, cr, uid, ids, company_id=False, context=None):
res = {}
if company_id:
company = self.pool.get('res.company').browse(cr, uid, [company_id], context=context)[0]
if company.partner_id.bank_ids:
res.update({'bank_id': company.partner_id.bank_ids[0].bank.id})
return {
'value':res
}
class hr_payslip_run(osv.osv):
_inherit = 'hr.payslip.run'
_description = 'Payslip Batches'
_columns = {
'available_advice': fields.boolean('Made Payment Advice?',
help="If this box is checked which means that Payment Advice exists for current batch",
readonly=False, copy=False),
}
def draft_payslip_run(self, cr, uid, ids, context=None):
res = super(hr_payslip_run, self).draft_payslip_run(cr, uid, ids, context=context)
self.write(cr, uid, ids, {'available_advice': False}, context=context)
return res
def create_advice(self, cr, uid, ids, context=None):
payslip_pool = self.pool.get('hr.payslip')
payslip_line_pool = self.pool.get('hr.payslip.line')
advice_pool = self.pool.get('hr.payroll.advice')
advice_line_pool = self.pool.get('hr.payroll.advice.line')
users = self.pool.get('res.users').browse(cr, uid, [uid], context=context)
for run in self.browse(cr, uid, ids, context=context):
if run.available_advice:
raise osv.except_osv(_('Error!'), _("Payment advice already exists for %s, 'Set to Draft' to create a new advice.") %(run.name))
advice_data = {
'batch_id': run.id,
'company_id': users[0].company_id.id,
'name': run.name,
'date': run.date_end,
'bank_id': users[0].company_id.bank_ids and users[0].company_id.bank_ids[0].id or False
}
advice_id = advice_pool.create(cr, uid, advice_data, context=context)
slip_ids = []
for slip_id in run.slip_ids:
# TODO is it necessary to interleave the calls ?
payslip_pool.signal_workflow(cr, uid, [slip_id.id], 'hr_verify_sheet')
payslip_pool.signal_workflow(cr, uid, [slip_id.id], 'process_sheet')
slip_ids.append(slip_id.id)
for slip in payslip_pool.browse(cr, uid, slip_ids, context=context):
if not slip.employee_id.bank_account_id or not slip.employee_id.bank_account_id.acc_number:
raise osv.except_osv(_('Error!'), _('Please define bank account for the %s employee') % (slip.employee_id.name))
line_ids = payslip_line_pool.search(cr, uid, [('slip_id', '=', slip.id), ('code', '=', 'NET')], context=context)
if line_ids:
line = payslip_line_pool.browse(cr, uid, line_ids, context=context)[0]
advice_line = {
'advice_id': advice_id,
'name': slip.employee_id.bank_account_id.acc_number,
'employee_id': slip.employee_id.id,
'bysal': line.total
}
advice_line_pool.create(cr, uid, advice_line, context=context)
return self.write(cr, uid, ids, {'available_advice' : True})
class payroll_advice_line(osv.osv):
'''
Bank Advice Lines
'''
def onchange_employee_id(self, cr, uid, ids, employee_id=False, context=None):
res = {}
hr_obj = self.pool.get('hr.employee')
if not employee_id:
return {'value': res}
employee = hr_obj.browse(cr, uid, [employee_id], context=context)[0]
res.update({'name': employee.bank_account_id.acc_number , 'ifsc_code': employee.bank_account_id.bank_bic or ''})
return {'value': res}
_name = 'hr.payroll.advice.line'
_description = 'Bank Advice Lines'
_columns = {
'advice_id': fields.many2one('hr.payroll.advice', 'Bank Advice'),
'name': fields.char('Bank Account No.', size=25, required=True),
'ifsc_code': fields.char('IFSC Code', size=16),
'employee_id': fields.many2one('hr.employee', 'Employee', required=True),
'bysal': fields.float('By Salary', digits_compute=dp.get_precision('Payroll')),
'debit_credit': fields.char('C/D', size=3, required=False),
'company_id': fields.related('advice_id', 'company_id', type='many2one', required=False, relation='res.company', string='Company', store=True),
'ifsc': fields.related('advice_id', 'neft', type='boolean', string='IFSC'),
}
_defaults = {
'debit_credit': 'C',
}
class hr_payslip(osv.osv):
'''
Employee Pay Slip
'''
_inherit = 'hr.payslip'
_description = 'Pay Slips'
_columns = {
'advice_id': fields.many2one('hr.payroll.advice', 'Bank Advice', copy=False)
}
class res_company(osv.osv):
_inherit = 'res.company'
_columns = {
'dearness_allowance': fields.boolean('Dearness Allowance', help="Check this box if your company provide Dearness Allowance to employee")
}
_defaults = {
'dearness_allowance': True,
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
protito/nyas-space-quest | cocos2d/tools/performance-analyze/convertor.py | 63 | 5245 | #!/usr/bin/python
#-*- coding: UTF-8 -*-
# ----------------------------------------------------------------------------
# Convert the performance test result from json files to excel.
#
# Author: Bill Zhang
#
# License: MIT
# ----------------------------------------------------------------------------
'''
Convert the performance test result from json files to excel.
'''
import xlwt
import os
import json
from argparse import ArgumentParser
DEFAULT_STYLE = 'borders: left thin, right thin, top thin, bottom thin;'
CONDITION_STYLE = 'pattern: pattern solid, fore_color light_green;'
RESULT_STYLE = 'pattern: pattern solid, fore_color light_yellow;'
BASE_KEYS = [
'osVersion',
'fileVersion',
'timeStamp',
'engineVersion',
'device'
]
KEY_CONDITION_HEADERS = "conditionHeaders"
KEY_RESULT_HEADERS = "resultHeaders"
KEY_RESULTS = "results"
START_COL_INDEX = 0
START_ROW_INDEX = 0
class KnownException(Exception):
pass
class Convertor:
def __init__(self, src_path, output_path=None):
self.src_path = self.change_to_abspath(src_path)
if not os.path.exists(self.src_path):
raise KnownException('%s is not existed!' % self.src_path)
if output_path is None:
# not specified output path, default use source path
if os.path.isfile(self.src_path):
self.output_path = os.path.dirname(self.src_path)
else:
self.output_path = self.src_path
else:
self.output_path = self.change_to_abspath(output_path)
def change_to_abspath(self, path):
ret = os.path.expanduser(path)
if not os.path.isabs(ret):
ret = os.path.abspath(ret)
ret = os.path.normpath(ret)
return ret
def get_col_width(self, col_str):
return 256 * (len(col_str) + 1)
def convert_file(self, file_path):
f = open(file_path)
testData = json.load(f)
f.close()
basename, ext = os.path.splitext(os.path.basename(file_path))
dst_file_path = os.path.join(self.output_path, "%s.xls" % basename)
if os.path.isfile(dst_file_path):
os.remove(dst_file_path)
workbook = xlwt.Workbook(encoding = 'ascii')
default_style = xlwt.Style.easyxf(DEFAULT_STYLE)
con_style = xlwt.Style.easyxf("%s%s" % (DEFAULT_STYLE, CONDITION_STYLE))
ret_style = xlwt.Style.easyxf("%s%s" % (DEFAULT_STYLE, RESULT_STYLE))
for key in testData.keys():
if key in BASE_KEYS:
continue
# create a sheet for the test case
sheetObj = workbook.add_sheet(key)
# get test case data
caseInfo = testData[key]
# Add headers for the test case
condHeaders = caseInfo[KEY_CONDITION_HEADERS]
retHeaders = caseInfo[KEY_RESULT_HEADERS]
curRow = START_ROW_INDEX
curCol = START_COL_INDEX
col_widths = {}
for header in (condHeaders + retHeaders):
sheetObj.write(curRow, curCol, header, default_style)
col_width = self.get_col_width(header)
col_widths[curCol] = col_width
sheetObj.col(curCol).width = col_width
curCol += 1
rets = caseInfo[KEY_RESULTS]
for retInfo in rets:
curRow += 1
curCol = START_COL_INDEX
for ret in retInfo:
if (curCol - START_COL_INDEX) < len(condHeaders):
use_style = con_style
else:
use_style = ret_style
sheetObj.write(curRow, curCol, ret, use_style)
new_width = self.get_col_width(ret)
old_width = col_widths[curCol]
if new_width > old_width:
sheetObj.col(curCol).width = new_width
col_widths[curCol] = new_width
curCol += 1
workbook.save(dst_file_path)
print("%s is generated." % dst_file_path)
def do_convert(self):
if not os.path.exists(self.output_path):
os.makedirs(self.output_path)
if os.path.isfile(self.src_path):
self.convert_file(self.src_path)
else:
for f in os.listdir(self.src_path):
full_path = os.path.join(self.src_path, f)
ignore, ext = os.path.splitext(f)
if os.path.isfile(full_path) and ext == '.json':
self.convert_file(full_path)
if __name__ == '__main__':
parser = ArgumentParser(description="Performance test data convertor.")
parser.add_argument('-s', dest='src_path', required=True, help='Specify the json file path or the folder path of json files.')
parser.add_argument('-o', dest='output_path', help='Specify the output path of excel files.')
(args, unknown) = parser.parse_known_args()
try:
convertor = Convertor(args.src_path, args.output_path)
convertor.do_convert()
except Exception as e:
if e.__class__.__name__ == "KnownException":
print(' '.join(e.args))
else:
raise
| apache-2.0 |
zhuwenping/python-for-android | python3-alpha/extra_modules/gdata/apps/multidomain/data.py | 102 | 12845 | #!/usr/bin/python2.4
#
# Copyright 2011 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Data model classes for the Multidomain Provisioning API."""
__author__ = 'Claudio Cherubino <ccherubino@google.com>'
import gdata.apps
import gdata.apps.apps_property_entry
import gdata.apps_property
import gdata.data
# This is required to work around a naming conflict between the Google
# Spreadsheets API and Python's built-in property function
pyproperty = property
# The apps:property firstName of a user entry
USER_FIRST_NAME = 'firstName'
# The apps:property lastName of a user entry
USER_LAST_NAME = 'lastName'
# The apps:property userEmail of a user entry
USER_EMAIL = 'userEmail'
# The apps:property password of a user entry
USER_PASSWORD = 'password'
# The apps:property hashFunction of a user entry
USER_HASH_FUNCTION = 'hashFunction'
# The apps:property isChangePasswordAtNextLogin of a user entry
USER_CHANGE_PASSWORD = 'isChangePasswordAtNextLogin'
# The apps:property agreedToTerms of a user entry
USER_AGREED_TO_TERMS = 'agreedToTerms'
# The apps:property isSuspended of a user entry
USER_SUSPENDED = 'isSuspended'
# The apps:property isAdmin of a user entry
USER_ADMIN = 'isAdmin'
# The apps:property ipWhitelisted of a user entry
USER_IP_WHITELISTED = 'ipWhitelisted'
# The apps:property quotaInGb of a user entry
USER_QUOTA = 'quotaInGb'
# The apps:property newEmail of a user rename request entry
USER_NEW_EMAIL = 'newEmail'
# The apps:property aliasEmail of an alias entry
ALIAS_EMAIL = 'aliasEmail'
class UserEntry(gdata.apps.apps_property_entry.AppsPropertyEntry):
"""Represents an User in object form."""
def GetFirstName(self):
"""Get the first name of the User object.
Returns:
The first name of this User object as a string or None.
"""
return self._GetProperty(USER_FIRST_NAME)
def SetFirstName(self, value):
"""Set the first name of this User object.
Args:
value: string The new first name to give this object.
"""
self._SetProperty(USER_FIRST_NAME, value)
first_name = pyproperty(GetFirstName, SetFirstName)
def GetLastName(self):
"""Get the last name of the User object.
Returns:
The last name of this User object as a string or None.
"""
return self._GetProperty(USER_LAST_NAME)
def SetLastName(self, value):
"""Set the last name of this User object.
Args:
value: string The new last name to give this object.
"""
self._SetProperty(USER_LAST_NAME, value)
last_name = pyproperty(GetLastName, SetLastName)
def GetEmail(self):
"""Get the email address of the User object.
Returns:
The email address of this User object as a string or None.
"""
return self._GetProperty(USER_EMAIL)
def SetEmail(self, value):
"""Set the email address of this User object.
Args:
value: string The new email address to give this object.
"""
self._SetProperty(USER_EMAIL, value)
email = pyproperty(GetEmail, SetEmail)
def GetPassword(self):
"""Get the password of the User object.
Returns:
The password of this User object as a string or None.
"""
return self._GetProperty(USER_PASSWORD)
def SetPassword(self, value):
"""Set the password of this User object.
Args:
value: string The new password to give this object.
"""
self._SetProperty(USER_PASSWORD, value)
password = pyproperty(GetPassword, SetPassword)
def GetHashFunction(self):
"""Get the hash function of the User object.
Returns:
The hash function of this User object as a string or None.
"""
return self._GetProperty(USER_HASH_FUNCTION)
def SetHashFunction(self, value):
"""Set the hash function of this User object.
Args:
value: string The new hash function to give this object.
"""
self._SetProperty(USER_HASH_FUNCTION, value)
hash_function = pyproperty(GetHashFunction, SetHashFunction)
def GetChangePasswordAtNextLogin(self):
"""Get the change password at next login flag of the User object.
Returns:
The change password at next login flag of this User object as a string or
None.
"""
return self._GetProperty(USER_CHANGE_PASSWORD)
def SetChangePasswordAtNextLogin(self, value):
"""Set the change password at next login flag of this User object.
Args:
value: string The new change password at next login flag to give this
object.
"""
self._SetProperty(USER_CHANGE_PASSWORD, value)
change_password_at_next_login = pyproperty(GetChangePasswordAtNextLogin,
SetChangePasswordAtNextLogin)
def GetAgreedToTerms(self):
"""Get the agreed to terms flag of the User object.
Returns:
The agreed to terms flag of this User object as a string or None.
"""
return self._GetProperty(USER_AGREED_TO_TERMS)
agreed_to_terms = pyproperty(GetAgreedToTerms)
def GetSuspended(self):
"""Get the suspended flag of the User object.
Returns:
The suspended flag of this User object as a string or None.
"""
return self._GetProperty(USER_SUSPENDED)
def SetSuspended(self, value):
"""Set the suspended flag of this User object.
Args:
value: string The new suspended flag to give this object.
"""
self._SetProperty(USER_SUSPENDED, value)
suspended = pyproperty(GetSuspended, SetSuspended)
def GetIsAdmin(self):
"""Get the isAdmin flag of the User object.
Returns:
The isAdmin flag of this User object as a string or None.
"""
return self._GetProperty(USER_ADMIN)
def SetIsAdmin(self, value):
"""Set the isAdmin flag of this User object.
Args:
value: string The new isAdmin flag to give this object.
"""
self._SetProperty(USER_ADMIN, value)
is_admin = pyproperty(GetIsAdmin, SetIsAdmin)
def GetIpWhitelisted(self):
"""Get the ipWhitelisted flag of the User object.
Returns:
The ipWhitelisted flag of this User object as a string or None.
"""
return self._GetProperty(USER_IP_WHITELISTED)
def SetIpWhitelisted(self, value):
"""Set the ipWhitelisted flag of this User object.
Args:
value: string The new ipWhitelisted flag to give this object.
"""
self._SetProperty(USER_IP_WHITELISTED, value)
ip_whitelisted = pyproperty(GetIpWhitelisted, SetIpWhitelisted)
def GetQuota(self):
"""Get the quota of the User object.
Returns:
The quota of this User object as a string or None.
"""
return self._GetProperty(USER_QUOTA)
def SetQuota(self, value):
"""Set the quota of this User object.
Args:
value: string The new quota to give this object.
"""
self._SetProperty(USER_QUOTA, value)
quota = pyproperty(GetQuota, GetQuota)
def __init__(self, uri=None, email=None, first_name=None, last_name=None,
password=None, hash_function=None, change_password=None,
agreed_to_terms=None, suspended=None, is_admin=None,
ip_whitelisted=None, quota=None, *args, **kwargs):
"""Constructs a new UserEntry object with the given arguments.
Args:
uri: string (optional) The uri of of this object for HTTP requests.
email: string (optional) The email address of the user.
first_name: string (optional) The first name of the user.
last_name: string (optional) The last name of the user.
password: string (optional) The password of the user.
hash_function: string (optional) The name of the function used to hash the
password.
change_password: Boolean (optional) Whether or not the user must change
password at first login.
agreed_to_terms: Boolean (optional) Whether or not the user has agreed to
the Terms of Service.
suspended: Boolean (optional) Whether or not the user is suspended.
is_admin: Boolean (optional) Whether or not the user has administrator
privileges.
ip_whitelisted: Boolean (optional) Whether or not the user's ip is
whitelisted.
quota: string (optional) The value (in GB) of the user's quota.
args: The other parameters to pass to gdata.entry.GDEntry constructor.
kwargs: The other parameters to pass to gdata.entry.GDEntry constructor.
"""
super(UserEntry, self).__init__(*args, **kwargs)
if uri:
self.uri = uri
if email:
self.email = email
if first_name:
self.first_name = first_name
if last_name:
self.last_name = last_name
if password:
self.password = password
if hash_function:
self.hash_function = hash_function
if change_password is not None:
self.change_password_at_next_login = str(change_password)
if agreed_to_terms is not None:
self.agreed_to_terms = str(agreed_to_terms)
if suspended is not None:
self.suspended = str(suspended)
if is_admin is not None:
self.is_admin = str(is_admin)
if ip_whitelisted is not None:
self.ip_whitelisted = str(ip_whitelisted)
if quota:
self.quota = quota
class UserFeed(gdata.data.GDFeed):
"""Represents a feed of UserEntry objects."""
# Override entry so that this feed knows how to type its list of entries.
entry = [UserEntry]
class UserRenameRequest(gdata.apps.apps_property_entry.AppsPropertyEntry):
"""Represents an User rename request in object form."""
def GetNewEmail(self):
"""Get the new email address for the User object.
Returns:
The new email address for the User object as a string or None.
"""
return self._GetProperty(USER_NEW_EMAIL)
def SetNewEmail(self, value):
"""Set the new email address for the User object.
Args:
value: string The new email address to give this object.
"""
self._SetProperty(USER_NEW_EMAIL, value)
new_email = pyproperty(GetNewEmail, SetNewEmail)
def __init__(self, new_email=None, *args, **kwargs):
"""Constructs a new UserRenameRequest object with the given arguments.
Args:
new_email: string (optional) The new email address for the target user.
args: The other parameters to pass to gdata.entry.GDEntry constructor.
kwargs: The other parameters to pass to gdata.entry.GDEntry constructor.
"""
super(UserRenameRequest, self).__init__(*args, **kwargs)
if new_email:
self.new_email = new_email
class AliasEntry(gdata.apps.apps_property_entry.AppsPropertyEntry):
"""Represents an Alias in object form."""
def GetUserEmail(self):
"""Get the user email address of the Alias object.
Returns:
The user email address of this Alias object as a string or None.
"""
return self._GetProperty(USER_EMAIL)
def SetUserEmail(self, value):
"""Set the user email address of this Alias object.
Args:
value: string The new user email address to give this object.
"""
self._SetProperty(USER_EMAIL, value)
user_email = pyproperty(GetUserEmail, SetUserEmail)
def GetAliasEmail(self):
"""Get the alias email address of the Alias object.
Returns:
The alias email address of this Alias object as a string or None.
"""
return self._GetProperty(ALIAS_EMAIL)
def SetAliasEmail(self, value):
"""Set the alias email address of this Alias object.
Args:
value: string The new alias email address to give this object.
"""
self._SetProperty(ALIAS_EMAIL, value)
alias_email = pyproperty(GetAliasEmail, SetAliasEmail)
def __init__(self, user_email=None, alias_email=None, *args, **kwargs):
"""Constructs a new AliasEntry object with the given arguments.
Args:
user_email: string (optional) The user email address for the object.
alias_email: string (optional) The alias email address for the object.
args: The other parameters to pass to gdata.entry.GDEntry constructor.
kwargs: The other parameters to pass to gdata.entry.GDEntry constructor.
"""
super(AliasEntry, self).__init__(*args, **kwargs)
if user_email:
self.user_email = user_email
if alias_email:
self.alias_email = alias_email
class AliasFeed(gdata.data.GDFeed):
"""Represents a feed of AliasEntry objects."""
# Override entry so that this feed knows how to type its list of entries.
entry = [AliasEntry]
| apache-2.0 |
jamdin/jdiner-mobile-byte3 | lib/numpy/core/_mx_datetime_parser.py | 84 | 33252 | #-*- coding: latin-1 -*-
"""
Date/Time string parsing module.
This code is a slightly modified version of Parser.py found in mx.DateTime
version 3.0.0
As such, it is subject to the terms of the eGenix public license version 1.1.0.
FIXME: Add license.txt to NumPy
"""
__all__ = ['date_from_string', 'datetime_from_string']
import types
import re
import datetime as dt
class RangeError(Exception): pass
# Enable to produce debugging output
_debug = 0
# REs for matching date and time parts in a string; These REs
# parse a superset of ARPA, ISO, American and European style dates.
# Timezones are supported via the Timezone submodule.
_year = '(?P<year>-?\d+\d(?!:))'
_fullyear = '(?P<year>-?\d+\d\d(?!:))'
_year_epoch = '(?:' + _year + '(?P<epoch> *[ABCDE\.]+)?)'
_fullyear_epoch = '(?:' + _fullyear + '(?P<epoch> *[ABCDE\.]+)?)'
_relyear = '(?:\((?P<relyear>[-+]?\d+)\))'
_month = '(?P<month>\d?\d(?!:))'
_fullmonth = '(?P<month>\d\d(?!:))'
_litmonth = ('(?P<litmonth>'
'jan|feb|mar|apr|may|jun|jul|aug|sep|oct|nov|dec|'
'mär|mae|mrz|mai|okt|dez|'
'fev|avr|juin|juil|aou|aoû|déc|'
'ene|abr|ago|dic|'
'out'
')[a-z,\.;]*')
litmonthtable = {
# English
'jan':1, 'feb':2, 'mar':3, 'apr':4, 'may':5, 'jun':6,
'jul':7, 'aug':8, 'sep':9, 'oct':10, 'nov':11, 'dec':12,
# German
'mär':3, 'mae':3, 'mrz':3, 'mai':5, 'okt':10, 'dez':12,
# French
'fev':2, 'avr':4, 'juin':6, 'juil':7, 'aou':8, 'aoû':8,
'déc':12,
# Spanish
'ene':1, 'abr':4, 'ago':8, 'dic':12,
# Portuguese
'out':10,
}
_relmonth = '(?:\((?P<relmonth>[-+]?\d+)\))'
_day = '(?P<day>\d?\d(?!:))'
_usday = '(?P<day>\d?\d(?!:))(?:st|nd|rd|th|[,\.;])?'
_fullday = '(?P<day>\d\d(?!:))'
_litday = ('(?P<litday>'
'mon|tue|wed|thu|fri|sat|sun|'
'die|mit|don|fre|sam|son|'
'lun|mar|mer|jeu|ven|sam|dim|'
'mie|jue|vie|sab|dom|'
'pri|seg|ter|cua|qui'
')[a-z]*')
litdaytable = {
# English
'mon':0, 'tue':1, 'wed':2, 'thu':3, 'fri':4, 'sat':5, 'sun':6,
# German
'die':1, 'mit':2, 'don':3, 'fre':4, 'sam':5, 'son':6,
# French
'lun':0, 'mar':1, 'mer':2, 'jeu':3, 'ven':4, 'sam':5, 'dim':6,
# Spanish
'mie':2, 'jue':3, 'vie':4, 'sab':5, 'dom':6,
# Portuguese
'pri':0, 'seg':1, 'ter':2, 'cua':3, 'qui':4,
}
_relday = '(?:\((?P<relday>[-+]?\d+)\))'
_hour = '(?P<hour>[012]?\d)'
_minute = '(?P<minute>[0-6]\d)'
_second = '(?P<second>[0-6]\d(?:[.,]\d+)?)'
_days = '(?P<days>\d*\d(?:[.,]\d+)?)'
_hours = '(?P<hours>\d*\d(?:[.,]\d+)?)'
_minutes = '(?P<minutes>\d*\d(?:[.,]\d+)?)'
_seconds = '(?P<seconds>\d*\d(?:[.,]\d+)?)'
_reldays = '(?:\((?P<reldays>[-+]?\d+(?:[.,]\d+)?)\))'
_relhours = '(?:\((?P<relhours>[-+]?\d+(?:[.,]\d+)?)\))'
_relminutes = '(?:\((?P<relminutes>[-+]?\d+(?:[.,]\d+)?)\))'
_relseconds = '(?:\((?P<relseconds>[-+]?\d+(?:[.,]\d+)?)\))'
_sign = '(?:(?P<sign>[-+]) *)'
_week = 'W(?P<week>\d?\d)'
_zone = '(?P<zone>[A-Z]+|[+-]\d\d?:?(?:\d\d)?)'
_ampm = '(?P<ampm>[ap][m.]+)'
_time = (_hour + ':' + _minute + '(?::' + _second + '|[^:]|$) *'
+ _ampm + '? *' + _zone + '?')
_isotime = _hour + ':?' + _minute + ':?' + _second + '? *' + _zone + '?'
_yeardate = _year
_weekdate = _year + '-?(?:' + _week + '-?' + _day + '?)?'
_eurodate = _day + '\.' + _month + '\.' + _year_epoch + '?'
_usdate = _month + '/' + _day + '(?:/' + _year_epoch + '|[^/]|$)'
_altusdate = _month + '-' + _day + '-' + _fullyear_epoch
_isodate = _year + '-' + _month + '-?' + _day + '?(?!:)'
_altisodate = _year + _fullmonth + _fullday + '(?!:)'
_usisodate = _fullyear + '/' + _fullmonth + '/' + _fullday
_litdate = ('(?:'+ _litday + ',? )? *' +
_usday + ' *' +
'[- ] *(?:' + _litmonth + '|'+ _month +') *[- ] *' +
_year_epoch + '?')
_altlitdate = ('(?:'+ _litday + ',? )? *' +
_litmonth + '[ ,.a-z]+' +
_usday +
'(?:[ a-z]+' + _year_epoch + ')?')
_eurlitdate = ('(?:'+ _litday + ',?[ a-z]+)? *' +
'(?:'+ _usday + '[ a-z]+)? *' +
_litmonth +
'(?:[ ,.a-z]+' + _year_epoch + ')?')
_relany = '[*%?a-zA-Z]+'
_relisodate = ('(?:(?:' + _relany + '|' + _year + '|' + _relyear + ')-' +
'(?:' + _relany + '|' + _month + '|' + _relmonth + ')-' +
'(?:' + _relany + '|' + _day + '|' + _relday + '))')
_asctime = ('(?:'+ _litday + ',? )? *' +
_usday + ' *' +
'[- ] *(?:' + _litmonth + '|'+ _month +') *[- ]' +
'(?:[0-9: ]+)' +
_year_epoch + '?')
_relisotime = ('(?:(?:' + _relany + '|' + _hour + '|' + _relhours + '):' +
'(?:' + _relany + '|' + _minute + '|' + _relminutes + ')' +
'(?::(?:' + _relany + '|' + _second + '|' + _relseconds + '))?)')
_isodelta1 = (_sign + '?' +
_days + ':' + _hours + ':' + _minutes + ':' + _seconds)
_isodelta2 = (_sign + '?' +
_hours + ':' + _minutes + ':' + _seconds)
_isodelta3 = (_sign + '?' +
_hours + ':' + _minutes)
_litdelta = (_sign + '?' +
'(?:' + _days + ' *d[a-z]*[,; ]*)?' +
'(?:' + _hours + ' *h[a-z]*[,; ]*)?' +
'(?:' + _minutes + ' *m[a-z]*[,; ]*)?' +
'(?:' + _seconds + ' *s[a-z]*[,; ]*)?')
_litdelta2 = (_sign + '?' +
'(?:' + _days + ' *d[a-z]*[,; ]*)?' +
_hours + ':' + _minutes + '(?::' + _seconds + ')?')
_timeRE = re.compile(_time, re.I)
_isotimeRE = re.compile(_isotime, re.I)
_isodateRE = re.compile(_isodate, re.I)
_altisodateRE = re.compile(_altisodate, re.I)
_usisodateRE = re.compile(_usisodate, re.I)
_yeardateRE = re.compile(_yeardate, re.I)
_eurodateRE = re.compile(_eurodate, re.I)
_usdateRE = re.compile(_usdate, re.I)
_altusdateRE = re.compile(_altusdate, re.I)
_litdateRE = re.compile(_litdate, re.I)
_altlitdateRE = re.compile(_altlitdate, re.I)
_eurlitdateRE = re.compile(_eurlitdate, re.I)
_relisodateRE = re.compile(_relisodate, re.I)
_asctimeRE = re.compile(_asctime, re.I)
_isodelta1RE = re.compile(_isodelta1)
_isodelta2RE = re.compile(_isodelta2)
_isodelta3RE = re.compile(_isodelta3)
_litdeltaRE = re.compile(_litdelta)
_litdelta2RE = re.compile(_litdelta2)
_relisotimeRE = re.compile(_relisotime, re.I)
# Available date parsers
_date_formats = ('euro',
'usiso', 'us', 'altus',
'iso', 'altiso',
'lit', 'altlit', 'eurlit',
'year', 'unknown')
# Available time parsers
_time_formats = ('standard',
'iso',
'unknown')
_zoneoffset = ('(?:'
'(?P<zonesign>[+-])?'
'(?P<hours>\d\d?)'
':?'
'(?P<minutes>\d\d)?'
'(?P<extra>\d+)?'
')'
)
_zoneoffsetRE = re.compile(_zoneoffset)
_zonetable = {
# Timezone abbreviations
# Std Summer
# Standards
'UT':0,
'UTC':0,
'GMT':0,
# A few common timezone abbreviations
'CET':1, 'CEST':2, 'CETDST':2, # Central European
'MET':1, 'MEST':2, 'METDST':2, # Mean European
'MEZ':1, 'MESZ':2, # Mitteleuropäische Zeit
'EET':2, 'EEST':3, 'EETDST':3, # Eastern Europe
'WET':0, 'WEST':1, 'WETDST':1, # Western Europe
'MSK':3, 'MSD':4, # Moscow
'IST':5.5, # India
'JST':9, # Japan
'KST':9, # Korea
'HKT':8, # Hong Kong
# US time zones
'AST':-4, 'ADT':-3, # Atlantic
'EST':-5, 'EDT':-4, # Eastern
'CST':-6, 'CDT':-5, # Central
'MST':-7, 'MDT':-6, # Midwestern
'PST':-8, 'PDT':-7, # Pacific
# Australian time zones
'CAST':9.5, 'CADT':10.5, # Central
'EAST':10, 'EADT':11, # Eastern
'WAST':8, 'WADT':9, # Western
'SAST':9.5, 'SADT':10.5, # Southern
# US military time zones
'Z': 0,
'A': 1,
'B': 2,
'C': 3,
'D': 4,
'E': 5,
'F': 6,
'G': 7,
'H': 8,
'I': 9,
'K': 10,
'L': 11,
'M': 12,
'N':-1,
'O':-2,
'P':-3,
'Q':-4,
'R':-5,
'S':-6,
'T':-7,
'U':-8,
'V':-9,
'W':-10,
'X':-11,
'Y':-12
}
def utc_offset(zone):
""" utc_offset(zonestring)
Return the UTC time zone offset in minutes.
zone must be string and can either be given as +-HH:MM,
+-HHMM, +-HH numeric offset or as time zone
abbreviation. Daylight saving time must be encoded into the
zone offset.
Timezone abbreviations are treated case-insensitive.
"""
if not zone:
return 0
uzone = zone.upper()
if uzone in _zonetable:
return _zonetable[uzone]*60
offset = _zoneoffsetRE.match(zone)
if not offset:
raise ValueError,'wrong format or unkown time zone: "%s"' % zone
zonesign,hours,minutes,extra = offset.groups()
if extra:
raise ValueError,'illegal time zone offset: "%s"' % zone
offset = int(hours or 0) * 60 + int(minutes or 0)
if zonesign == '-':
offset = -offset
return offset
def add_century(year):
""" Sliding window approach to the Y2K problem: adds a suitable
century to the given year and returns it as integer.
The window used depends on the current year. If adding the current
century to the given year gives a year within the range
current_year-70...current_year+30 [both inclusive], then the
current century is added. Otherwise the century (current + 1 or
- 1) producing the least difference is chosen.
"""
current_year=dt.datetime.now().year
current_century=(dt.datetime.now().year / 100) * 100
if year > 99:
# Take it as-is
return year
year = year + current_century
diff = year - current_year
if diff >= -70 and diff <= 30:
return year
elif diff < -70:
return year + 100
else:
return year - 100
def _parse_date(text):
"""
Parses the date part given in text and returns a tuple
(text,day,month,year,style) with the following meanings:
* text gives the original text without the date part
* day,month,year give the parsed date
* style gives information about which parser was successful:
'euro' - the European date parser
'us' - the US date parser
'altus' - the alternative US date parser (with '-' instead of '/')
'iso' - the ISO date parser
'altiso' - the alternative ISO date parser (without '-')
'usiso' - US style ISO date parser (yyyy/mm/dd)
'lit' - the US literal date parser
'altlit' - the alternative US literal date parser
'eurlit' - the Eurpean literal date parser
'unknown' - no date part was found, defaultdate was used
Formats may be set to a tuple of style strings specifying which of the above
parsers to use and in which order to try them.
Default is to try all of them in the above order.
``defaultdate`` provides the defaults to use in case no date part is found.
Most other parsers default to the current year January 1 if some of these
date parts are missing.
If ``'unknown'`` is not given in formats and the date cannot be parsed,
a :exc:`ValueError` is raised.
"""
match = None
style = ''
formats = _date_formats
us_formats=('us', 'altus')
iso_formats=('iso', 'altiso', 'usiso')
now=dt.datetime.now
# Apply parsers in the order given in formats
for format in formats:
if format == 'euro':
# European style date
match = _eurodateRE.search(text)
if match is not None:
day,month,year,epoch = match.groups()
if year:
if len(year) == 2:
# Y2K problem:
year = add_century(int(year))
else:
year = int(year)
else:
defaultdate = now()
year = defaultdate.year
if epoch and 'B' in epoch:
year = -year + 1
month = int(month)
day = int(day)
# Could have mistaken euro format for us style date
# which uses month, day order
if month > 12 or month == 0:
match = None
continue
break
elif format == 'year':
# just a year specified
match = _yeardateRE.match(text)
if match is not None:
year = match.groups()[0]
if year:
if len(year) == 2:
# Y2K problem:
year = add_century(int(year))
else:
year = int(year)
else:
defaultdate = now()
year = defaultdate.year
day = 1
month = 1
break
elif format in iso_formats:
# ISO style date
if format == 'iso':
match = _isodateRE.search(text)
elif format == 'altiso':
match = _altisodateRE.search(text)
# Avoid mistaking ISO time parts ('Thhmmss') for dates
if match is not None:
left, right = match.span()
if left > 0 and \
text[left - 1:left] == 'T':
match = None
continue
else:
match = _usisodateRE.search(text)
if match is not None:
year,month,day = match.groups()
if len(year) == 2:
# Y2K problem:
year = add_century(int(year))
else:
year = int(year)
# Default to January 1st
if not month:
month = 1
else:
month = int(month)
if not day:
day = 1
else:
day = int(day)
break
elif format in us_formats:
# US style date
if format == 'us':
match = _usdateRE.search(text)
else:
match = _altusdateRE.search(text)
if match is not None:
month,day,year,epoch = match.groups()
if year:
if len(year) == 2:
# Y2K problem:
year = add_century(int(year))
else:
year = int(year)
else:
defaultdate = now()
year = defaultdate.year
if epoch and 'B' in epoch:
year = -year + 1
# Default to 1 if no day is given
if day:
day = int(day)
else:
day = 1
month = int(month)
# Could have mistaken us format for euro style date
# which uses day, month order
if month > 12 or month == 0:
match = None
continue
break
elif format == 'lit':
# US style literal date
match = _litdateRE.search(text)
if match is not None:
litday,day,litmonth,month,year,epoch = match.groups()
break
elif format == 'altlit':
# Alternative US style literal date
match = _altlitdateRE.search(text)
if match is not None:
litday,litmonth,day,year,epoch = match.groups()
month = '<missing>'
break
elif format == 'eurlit':
# European style literal date
match = _eurlitdateRE.search(text)
if match is not None:
litday,day,litmonth,year,epoch = match.groups()
month = '<missing>'
break
elif format == 'unknown':
# No date part: use defaultdate
defaultdate = now()
year = defaultdate.year
month = defaultdate.month
day = defaultdate.day
style = format
break
# Check success
if match is not None:
# Remove date from text
left, right = match.span()
if 0 and _debug:
print 'parsed date:',repr(text[left:right]),\
'giving:',year,month,day
text = text[:left] + text[right:]
style = format
elif not style:
# Not recognized: raise an error
raise ValueError, 'unknown date format: "%s"' % text
# Literal date post-processing
if style in ('lit', 'altlit', 'eurlit'):
if 0 and _debug: print match.groups()
# Default to current year, January 1st
if not year:
defaultdate = now()
year = defaultdate.year
else:
if len(year) == 2:
# Y2K problem:
year = add_century(int(year))
else:
year = int(year)
if epoch and 'B' in epoch:
year = -year + 1
if litmonth:
litmonth = litmonth.lower()
try:
month = litmonthtable[litmonth]
except KeyError:
raise ValueError,\
'wrong month name: "%s"' % litmonth
elif month:
month = int(month)
else:
month = 1
if day:
day = int(day)
else:
day = 1
#print '_parse_date:',text,day,month,year,style
return text,day,month,year,style
def _parse_time(text):
""" Parses a time part given in text and returns a tuple
(text,hour,minute,second,offset,style) with the following
meanings:
* text gives the original text without the time part
* hour,minute,second give the parsed time
* offset gives the time zone UTC offset
* style gives information about which parser was successful:
'standard' - the standard parser
'iso' - the ISO time format parser
'unknown' - no time part was found
formats may be set to a tuple specifying the parsers to use:
'standard' - standard time format with ':' delimiter
'iso' - ISO time format (superset of 'standard')
'unknown' - default to 0:00:00, 0 zone offset
If 'unknown' is not given in formats and the time cannot be
parsed, a ValueError is raised.
"""
match = None
style = ''
formats=_time_formats
# Apply parsers in the order given in formats
for format in formats:
# Standard format
if format == 'standard':
match = _timeRE.search(text)
if match is not None:
hour,minute,second,ampm,zone = match.groups()
style = 'standard'
break
# ISO format
if format == 'iso':
match = _isotimeRE.search(text)
if match is not None:
hour,minute,second,zone = match.groups()
ampm = None
style = 'iso'
break
# Default handling
elif format == 'unknown':
hour,minute,second,offset = 0,0,0.0,0
style = 'unknown'
break
if not style:
# If no default handling should be applied, raise an error
raise ValueError, 'unknown time format: "%s"' % text
# Post-processing
if match is not None:
if zone:
# Convert to UTC offset
offset = utc_offset(zone)
else:
offset = 0
hour = int(hour)
if ampm:
if ampm[0] in ('p', 'P'):
# 12pm = midday
if hour < 12:
hour = hour + 12
else:
# 12am = midnight
if hour >= 12:
hour = hour - 12
if minute:
minute = int(minute)
else:
minute = 0
if not second:
second = 0.0
else:
if ',' in second:
second = second.replace(',', '.')
second = float(second)
# Remove time from text
left,right = match.span()
if 0 and _debug:
print 'parsed time:',repr(text[left:right]),\
'giving:',hour,minute,second,offset
text = text[:left] + text[right:]
#print '_parse_time:',text,hour,minute,second,offset,style
return text,hour,minute,second,offset,style
###
def datetime_from_string(text):
""" datetime_from_string(text, [formats, defaultdate])
Returns a datetime instance reflecting the date and time given
in text. In case a timezone is given, the returned instance
will point to the corresponding UTC time value. Otherwise, the
value is set as given in the string.
formats may be set to a tuple of strings specifying which of
the following parsers to use and in which order to try
them. Default is to try all of them in the order given below:
'euro' - the European date parser
'us' - the US date parser
'altus' - the alternative US date parser (with '-' instead of '/')
'iso' - the ISO date parser
'altiso' - the alternative ISO date parser (without '-')
'usiso' - US style ISO date parser (yyyy/mm/dd)
'lit' - the US literal date parser
'altlit' - the alternative US literal date parser
'eurlit' - the Eurpean literal date parser
'unknown' - if no date part is found, use defaultdate
defaultdate provides the defaults to use in case no date part
is found. Most of the parsers default to the current year
January 1 if some of these date parts are missing.
If 'unknown' is not given in formats and the date cannot
be parsed, a ValueError is raised.
time_formats may be set to a tuple of strings specifying which
of the following parsers to use and in which order to try
them. Default is to try all of them in the order given below:
'standard' - standard time format HH:MM:SS (with ':' delimiter)
'iso' - ISO time format (superset of 'standard')
'unknown' - default to 00:00:00 in case the time format
cannot be parsed
Defaults to 00:00:00.00 for time parts that are not included
in the textual representation.
If 'unknown' is not given in time_formats and the time cannot
be parsed, a ValueError is raised.
"""
origtext = text
text,hour,minute,second,offset,timestyle = _parse_time(origtext)
text,day,month,year,datestyle = _parse_date(text)
if 0 and _debug:
print 'tried time/date on %s, date=%s, time=%s' % (origtext,
datestyle,
timestyle)
# If this fails, try the ISO order (date, then time)
if timestyle in ('iso', 'unknown'):
text,day,month,year,datestyle = _parse_date(origtext)
text,hour,minute,second,offset,timestyle = _parse_time(text)
if 0 and _debug:
print 'tried ISO on %s, date=%s, time=%s' % (origtext,
datestyle,
timestyle)
try:
microsecond = int(round(1000000 * (second % 1)))
second = int(second)
return dt.datetime(year,month,day,hour,minute,second, microsecond) - \
dt.timedelta(minutes=offset)
except ValueError, why:
raise RangeError,\
'Failed to parse "%s": %s' % (origtext, why)
def date_from_string(text):
""" date_from_string(text, [formats, defaultdate])
Returns a datetime instance reflecting the date given in
text. A possibly included time part is ignored.
formats and defaultdate work just like for
datetime_from_string().
"""
_text,day,month,year,datestyle = _parse_date(text)
try:
return dt.datetime(year,month,day)
except ValueError, why:
raise RangeError,\
'Failed to parse "%s": %s' % (text, why)
def validateDateTimeString(text):
""" validateDateTimeString(text, [formats, defaultdate])
Validates the given text and returns 1/0 depending on whether
text includes parseable date and time values or not.
formats works just like for datetime_from_string() and defines
the order of date/time parsers to apply. It defaults to the
same list of parsers as for datetime_from_string().
XXX Undocumented !
"""
try:
datetime_from_string(text)
except ValueError, why:
return 0
return 1
def validateDateString(text):
""" validateDateString(text, [formats, defaultdate])
Validates the given text and returns 1/0 depending on whether
text includes a parseable date value or not.
formats works just like for datetime_from_string() and defines
the order of date/time parsers to apply. It defaults to the
same list of parsers as for datetime_from_string().
XXX Undocumented !
"""
try:
date_from_string(text)
except ValueError, why:
return 0
return 1
### Tests
def _test():
import sys
t = dt.datetime.now()
_date = t.strftime('%Y-%m-%d')
print 'Testing DateTime Parser...'
l = [
# Literal formats
('Sun Nov 6 08:49:37 1994', '1994-11-06 08:49:37.00'),
('sun nov 6 08:49:37 1994', '1994-11-06 08:49:37.00'),
('sUN NOV 6 08:49:37 1994', '1994-11-06 08:49:37.00'),
('Sunday, 06-Nov-94 08:49:37 GMT', '1994-11-06 08:49:37.00'),
('Sun, 06 Nov 1994 08:49:37 GMT', '1994-11-06 08:49:37.00'),
('06-Nov-94 08:49:37', '1994-11-06 08:49:37.00'),
('06-Nov-94', '1994-11-06 00:00:00.00'),
('06-NOV-94', '1994-11-06 00:00:00.00'),
('November 19 08:49:37', '%s-11-19 08:49:37.00' % t.year),
('Nov. 9', '%s-11-09 00:00:00.00' % t.year),
('Sonntag, der 6. November 1994, 08:49:37 GMT', '1994-11-06 08:49:37.00'),
('6. November 2001, 08:49:37', '2001-11-06 08:49:37.00'),
('sep 6', '%s-09-06 00:00:00.00' % t.year),
('sep 6 2000', '2000-09-06 00:00:00.00'),
('September 29', '%s-09-29 00:00:00.00' % t.year),
('Sep. 29', '%s-09-29 00:00:00.00' % t.year),
('6 sep', '%s-09-06 00:00:00.00' % t.year),
('29 September', '%s-09-29 00:00:00.00' % t.year),
('29 Sep.', '%s-09-29 00:00:00.00' % t.year),
('sep 6 2001', '2001-09-06 00:00:00.00'),
('Sep 6, 2001', '2001-09-06 00:00:00.00'),
('September 6, 2001', '2001-09-06 00:00:00.00'),
('sep 6 01', '2001-09-06 00:00:00.00'),
('Sep 6, 01', '2001-09-06 00:00:00.00'),
('September 6, 01', '2001-09-06 00:00:00.00'),
('30 Apr 2006 20:19:00', '2006-04-30 20:19:00.00'),
# ISO formats
('1994-11-06 08:49:37', '1994-11-06 08:49:37.00'),
('010203', '2001-02-03 00:00:00.00'),
('2001-02-03 00:00:00.00', '2001-02-03 00:00:00.00'),
('2001-02 00:00:00.00', '2001-02-01 00:00:00.00'),
('2001-02-03', '2001-02-03 00:00:00.00'),
('2001-02', '2001-02-01 00:00:00.00'),
('20000824/2300', '2000-08-24 23:00:00.00'),
('20000824/0102', '2000-08-24 01:02:00.00'),
('20000824', '2000-08-24 00:00:00.00'),
('20000824/020301', '2000-08-24 02:03:01.00'),
('20000824 020301', '2000-08-24 02:03:01.00'),
('20000824T020301', '2000-08-24 02:03:01.00'),
('20000824 020301', '2000-08-24 02:03:01.00'),
('2000-08-24 02:03:01.00', '2000-08-24 02:03:01.00'),
('T020311', '%s 02:03:11.00' % _date),
('2003-12-9', '2003-12-09 00:00:00.00'),
('03-12-9', '2003-12-09 00:00:00.00'),
('003-12-9', '0003-12-09 00:00:00.00'),
('0003-12-9', '0003-12-09 00:00:00.00'),
('2003-1-9', '2003-01-09 00:00:00.00'),
('03-1-9', '2003-01-09 00:00:00.00'),
('003-1-9', '0003-01-09 00:00:00.00'),
('0003-1-9', '0003-01-09 00:00:00.00'),
# US formats
('06/11/94 08:49:37', '1994-06-11 08:49:37.00'),
('11/06/94 08:49:37', '1994-11-06 08:49:37.00'),
('9/23/2001', '2001-09-23 00:00:00.00'),
('9-23-2001', '2001-09-23 00:00:00.00'),
('9/6', '%s-09-06 00:00:00.00' % t.year),
('09/6', '%s-09-06 00:00:00.00' % t.year),
('9/06', '%s-09-06 00:00:00.00' % t.year),
('09/06', '%s-09-06 00:00:00.00' % t.year),
('9/6/2001', '2001-09-06 00:00:00.00'),
('09/6/2001', '2001-09-06 00:00:00.00'),
('9/06/2001', '2001-09-06 00:00:00.00'),
('09/06/2001', '2001-09-06 00:00:00.00'),
('9-6-2001', '2001-09-06 00:00:00.00'),
('09-6-2001', '2001-09-06 00:00:00.00'),
('9-06-2001', '2001-09-06 00:00:00.00'),
('09-06-2001', '2001-09-06 00:00:00.00'),
('2002/05/28 13:10:56.114700 GMT+2', '2002-05-28 13:10:56.114700'),
('1970/01/01', '1970-01-01 00:00:00.00'),
('20021025 12:00 PM', '2002-10-25 12:00:00.00'),
('20021025 12:30 PM', '2002-10-25 12:30:00.00'),
('20021025 12:00 AM', '2002-10-25 00:00:00.00'),
('20021025 12:30 AM', '2002-10-25 00:30:00.00'),
('20021025 1:00 PM', '2002-10-25 13:00:00.00'),
('20021025 2:00 AM', '2002-10-25 02:00:00.00'),
('Thursday, February 06, 2003 12:40 PM', '2003-02-06 12:40:00.00'),
('Mon, 18 Sep 2006 23:03:00', '2006-09-18 23:03:00.00'),
# European formats
('6.11.2001, 08:49:37', '2001-11-06 08:49:37.00'),
('06.11.2001, 08:49:37', '2001-11-06 08:49:37.00'),
('06.11. 08:49:37', '%s-11-06 08:49:37.00' % t.year),
#('21/12/2002', '2002-12-21 00:00:00.00'),
#('21/08/2002', '2002-08-21 00:00:00.00'),
#('21-08-2002', '2002-08-21 00:00:00.00'),
#('13/01/03', '2003-01-13 00:00:00.00'),
#('13/1/03', '2003-01-13 00:00:00.00'),
#('13/1/3', '2003-01-13 00:00:00.00'),
#('13/01/3', '2003-01-13 00:00:00.00'),
# Time only formats
('01:03', '%s 01:03:00.00' % _date),
('01:03:11', '%s 01:03:11.00' % _date),
('01:03:11.50', '%s 01:03:11.500000' % _date),
('01:03:11.50 AM', '%s 01:03:11.500000' % _date),
('01:03:11.50 PM', '%s 13:03:11.500000' % _date),
('01:03:11.50 a.m.', '%s 01:03:11.500000' % _date),
('01:03:11.50 p.m.', '%s 13:03:11.500000' % _date),
# Invalid formats
('6..2001, 08:49:37', '%s 08:49:37.00' % _date),
('9//2001', 'ignore'),
('06--94 08:49:37', 'ignore'),
('20-03 00:00:00.00', 'ignore'),
('9/2001', 'ignore'),
('9-6', 'ignore'),
('09-6', 'ignore'),
('9-06', 'ignore'),
('09-06', 'ignore'),
('20000824/23', 'ignore'),
('November 1994 08:49:37', 'ignore'),
]
# Add Unicode versions
try:
unicode
except NameError:
pass
else:
k = []
for text, result in l:
k.append((unicode(text), result))
l.extend(k)
for text, reference in l:
try:
value = datetime_from_string(text)
except:
if reference is None:
continue
else:
value = str(sys.exc_info()[1])
valid_datetime = validateDateTimeString(text)
valid_date = validateDateString(text)
if reference[-3:] == '.00': reference = reference[:-3]
if str(value) != reference and \
not reference == 'ignore':
print 'Failed to parse "%s"' % text
print ' expected: %s' % (reference or '<exception>')
print ' parsed: %s' % value
elif _debug:
print 'Parsed "%s" successfully' % text
if _debug:
if not valid_datetime:
print ' "%s" failed date/time validation' % text
if not valid_date:
print ' "%s" failed date validation' % text
et = dt.datetime.now()
print 'done. (after %f seconds)' % ((et-t).seconds)
if __name__ == '__main__':
_test()
| apache-2.0 |
olympian94/gstudio | gnowsys-ndf/gnowsys_ndf/ndf/management/commands/log_script.py | 11 | 10518 | from django.core.management.base import BaseCommand, CommandError
import os
log_file_path = os.path.join(os.path.dirname(__file__), '../../static/ndf/wikidata/iteration_1.txt')
my_log = open(log_file_path, "w")
def log_class_created(label, log_flag):
"""
Function to write message in log file when topic is created.
Parameters being passed -
1)label- name of the item
2)log_flag - controls indentation to make the log file readable
"""
global my_log
captcha = "="
while log_flag != 0:
captcha += "="
log_flag-=1
mylabel = u' '.join((label, ' ')).encode('utf-8').strip()
my_log.write(str(captcha) + (mylabel) + "---Class CREATED\n")
def log_class_exists(label, log_flag):
"""
Function to write message in log file if topic already exists.
Parameters being passed -
1)label- name of the item
2)log_flag - controls indentation to make the log file readable
"""
global my_log
captcha = "="
while log_flag != 0:
captcha += "="
log_flag-=1
mylabel = u' '.join((label, ' ')).encode('utf-8').strip()
my_log.write(str(captcha) + unicode(mylabel) + "---Class EXISTS\n")
def log_topic_created(label, log_flag):
"""
Function to write message in log file when topic is created.
Parameters being passed -
1)label- name of the item
2)log_flag - controls indentation to make the log file readable
"""
global my_log
captcha = "#"
while log_flag != 0:
captcha += "#"
log_flag-=1
mylabel = u' '.join((label, ' ')).encode('utf-8').strip()
try:
my_log.write(str(captcha) + unicode(mylabel) + "---Topic CREATED\n")
except UnicodeDecodeError:
my_log.write(str(captcha) + str("Label cannnot be printed") + "---Topic CREATED\n")
def log_topic_exists(label, log_flag):
"""
Function to write message in log file if topic already exists.
Parameters being passed -
1)label- name of the item
2)log_flag - controls indentation to make the log file readable
"""
global my_log
captcha = "#"
while log_flag != 0:
captcha += "#"
log_flag-=1
mylabel = u' '.join((label, ' ')).encode('utf-8').strip()
try:
my_log.write(str(captcha) + unicode(mylabel) + "---Topic CREATED\n")
except UnicodeDecodeError:
my_log.write(str(captcha) + str("Label cannnot be printed") + "---Topic EXISTS\n")
def log_attributeType_created(label, log_flag):
"""
Function to write message in log file when attributeType is created.
Parameters being passed -
1)label- name of the item
2)log_flag - controls indentation to make the log file readable
"""
global my_log
captcha = " "
while log_flag != 1:
captcha += " "
log_flag-=1
captcha += "-"
mylabel = u' '.join((label, ' ')).encode('utf-8').strip()
my_log.write(str(captcha) + unicode(mylabel) + "---AttributeType CREATED\n")
def log_attributeType_exists(label, log_flag):
"""
Function to write message in log file if attributeType already exists.
Parameters being passed -
1)label- name of the item
2)log_flag - controls indentation to make the log file readable
"""
global my_log
captcha = " "
while log_flag != 1:
captcha += " "
log_flag-=1
captcha += "-"
mylabel = u' '.join((label, ' ')).encode('utf-8').strip()
my_log.write(str(captcha) + unicode(mylabel) + "---AttributeType EXISTS\n")
def log_attribute_created(label, log_flag):
"""
Function to write message in log file when attribute is created.
Parameters being passed -
1)label- name of the item
2)log_flag - controls indentation to make the log file readable
"""
global my_log
captcha = " "
while log_flag != 1:
captcha += " "
log_flag-=1
captcha += "@"
mylabel = u' '.join((label, ' ')).encode('utf-8').strip()
my_log.write(str(captcha) + unicode(mylabel) + "---Attribute CREATED\n")
def log_attribute_exists(label, log_flag):
"""
Function to write message in log file if attribute already exists.
Parameters being passed -
1)label- name of the item
2)log_flag - controls indentation to make the log file readable
"""
global my_log
captcha = " "
while log_flag != 1:
captcha += " "
log_flag-=1
captcha += "@"
mylabel = u' '.join((label, ' ')).encode('utf-8').strip()
my_log.write(str(captcha) + unicode(mylabel) + "---Attribute EXISTS\n")
def log_relationType_created(label, log_flag):
"""
Function to write message in log file when relationType is created.
Parameters being passed -
1)label- name of the item
2)log_flag - controls indentation to make the log file readable
"""
global my_log
captcha = " "
while log_flag != 1:
captcha += " "
log_flag-=1
captcha += "$"
mylabel = u' '.join((label, ' ')).encode('utf-8').strip()
my_log.write(str(captcha) + unicode(mylabel) + "---RelationType CREATED\n")
def log_relationType_exists(label, log_flag):
"""
Function to write message in log file if relationType already exists.
Parameters being passed -
1)label- name of the item
2)log_flag - controls indentation to make the log file readable
"""
global my_log
captcha = " "
while log_flag != 1:
captcha += " "
log_flag-=1
captcha += "$"
mylabel = u' '.join((label, ' ')).encode('utf-8').strip()
my_log.write(str(captcha) + unicode(mylabel) + "---RelationType EXISTS\n")
def log_relation_created(label, log_flag):
"""
Function to write message in log file when relation is created.
Parameters being passed -
1)label- name of the item
2)log_flag - controls indentation to make the log file readable
"""
global my_log
captcha = " "
while log_flag != 1:
captcha += " "
log_flag-=1
captcha += "*"
mylabel = u' '.join((label, ' ')).encode('utf-8').strip()
my_log.write(str(captcha) + unicode(mylabel) + "---Relation CREATED\n")
def log_relation_exists(label, log_flag):
"""
Function to write message in log file if relation already exists.
Parameters being passed -
1)label- name of the item
2)log_flag - controls indentation to make the log file readable
"""
global my_log
captcha = " "
while log_flag != 1:
captcha += " "
log_flag-=1
captcha += "*"
mylabel = u' '.join((label, ' ')).encode('utf-8').strip()
my_log.write(str(captcha) + unicode(mylabel) + "---Relation EXISTS\n")
def log_inner_topic_start(log_flag):
"""
Helper log function that print messages to help in debugging
1)log_flag - controls indentation to make the log file readable
"""
global my_log
captcha = "-"
my_log.write("\n")
while log_flag != 0:
captcha += "-"
log_flag-=1
my_log.write(str(captcha) + "-----------------------------------------------------------------------\n")
def log_inner_topic_end(log_flag):
"""
Helper log function that print messages to help in debugging
1)log_flag - controls indentation to make the log file readable
"""
global my_log
captcha = "-"
my_log.write("\n")
while log_flag != 0:
captcha += "-"
log_flag-=1
my_log.write(str(captcha) + "_______________________________________________________________________\n")
def log_class_done(log_flag):
"""
Helper log function that print messages to help in debugging
1)log_flag - controls indentation to make the log file readable
"""
global my_log
captcha = "_"
my_log.write("\n")
while log_flag != 0:
captcha += "_"
log_flag-=1
my_log.write(str(captcha) + "_______________________________________________________________________\n")
def log_outer_topic(log_flag):
"""
Helper log function that print messages to help in debugging
1)log_flag - controls indentation to make the log file readable
"""
global my_log
captcha = "-"
my_log.write("\n")
while log_flag != 0:
captcha += "-"
log_flag-=1
my_log.write(str(captcha) + "-----------------------------------------------------------------------\n")
my_log.write(str(captcha) + "-----------------------------------------------------------------------\n")
def log_iteration_1_file_start():
"""
Start Iteration 1
opens file static/ndf/wikidata/iteration_1.txt to start the log.
"""
global my_log
my_log.close()
path = os.path.join(os.path.dirname(__file__), '../../static/ndf/wikidata/iteration_1.txt')
my_log = open(path, "w")
my_log.write("Iteration 1. Creating the GSystemType Classes\n\n")
def log_iteration_1_file_complete():
"""
Finish Iteration file 1.
Closes the file to end log of Iteration 1.
"""
captcha = "\nEnd of file\n"
my_log.write(str(captcha))
my_log.close()
def log_iteration_2_file_start():
"""
Start Iteration 2
opens file static/ndf/wikidata/iteration_2.txt to start the log.
"""
global my_log
my_log.close()
path = os.path.join(os.path.dirname(__file__), '../../static/ndf/wikidata/iteration_2.txt')
my_log = open(path, "w")
my_log.write("Iteration 2. Creating the GSystem Topics - WikiTopics and their attributes\n\n")
def log_iteration_2_file_complete():
"""
Finish Iteration file 2.
Closes the file to end log of Iteration 2.
"""
global my_log
captcha = "\nEnd of file\n"
my_log.write(str(captcha))
my_log.close()
def log_iteration_3_file_start():
"""
Start Iteration 3
opens file static/ndf/wikidata/iteration_3.txt to start the log.
"""
global my_log
my_log.close()
path = os.path.join(os.path.dirname(__file__), '../../static/ndf/wikidata/iteration_3.txt')
my_log = open(path, "w")
my_log.write("Iteration 3. Creating the Relationtypes and GRelations. The Topics have already been created.\n\n")
def log_iteration_3_file_complete():
"""
Finish Iteration file 3.
Closes the file to end log of Iteration 3.
"""
global my_log
captcha = "\nEnd of file\n"
my_log.write(str(captcha))
my_log.close()
class Command(BaseCommand):
def handle(self, *args, **options):
"""
This is the default method required to make this file run as a script in Django.
"""
print "\n Yeah the log_script is working. \n"
| agpl-3.0 |
Designist/sympy | sympy/polys/polyroots.py | 37 | 32381 | """Algorithms for computing symbolic roots of polynomials. """
from __future__ import print_function, division
import math
from sympy.core.symbol import Dummy, Symbol, symbols
from sympy.core import S, I, pi
from sympy.core.compatibility import ordered
from sympy.core.mul import expand_2arg, Mul
from sympy.core.power import Pow
from sympy.core.relational import Eq
from sympy.core.sympify import sympify
from sympy.core.numbers import Rational, igcd, comp
from sympy.core.exprtools import factor_terms
from sympy.core.logic import fuzzy_not
from sympy.ntheory import divisors, isprime, nextprime
from sympy.functions import exp, sqrt, im, cos, acos, Piecewise
from sympy.functions.elementary.miscellaneous import root
from sympy.polys.polytools import Poly, cancel, factor, gcd_list, discriminant
from sympy.polys.specialpolys import cyclotomic_poly
from sympy.polys.polyerrors import (PolynomialError, GeneratorsNeeded,
DomainError)
from sympy.polys.polyquinticconst import PolyQuintic
from sympy.polys.rationaltools import together
from sympy.simplify import simplify, powsimp
from sympy.utilities import public
from sympy.core.compatibility import reduce, range
def roots_linear(f):
"""Returns a list of roots of a linear polynomial."""
r = -f.nth(0)/f.nth(1)
dom = f.get_domain()
if not dom.is_Numerical:
if dom.is_Composite:
r = factor(r)
else:
r = simplify(r)
return [r]
def roots_quadratic(f):
"""Returns a list of roots of a quadratic polynomial. If the domain is ZZ
then the roots will be sorted with negatives coming before positives.
The ordering will be the same for any numerical coefficients as long as
the assumptions tested are correct, otherwise the ordering will not be
sorted (but will be canonical).
"""
a, b, c = f.all_coeffs()
dom = f.get_domain()
def _sqrt(d):
# remove squares from square root since both will be represented
# in the results; a similar thing is happening in roots() but
# must be duplicated here because not all quadratics are binomials
co = []
other = []
for di in Mul.make_args(d):
if di.is_Pow and di.exp.is_Integer and di.exp % 2 == 0:
co.append(Pow(di.base, di.exp//2))
else:
other.append(di)
if co:
d = Mul(*other)
co = Mul(*co)
return co*sqrt(d)
return sqrt(d)
def _simplify(expr):
if dom.is_Composite:
return factor(expr)
else:
return simplify(expr)
if c is S.Zero:
r0, r1 = S.Zero, -b/a
if not dom.is_Numerical:
r1 = _simplify(r1)
elif r1.is_negative:
r0, r1 = r1, r0
elif b is S.Zero:
r = -c/a
if not dom.is_Numerical:
r = _simplify(r)
R = _sqrt(r)
r0 = -R
r1 = R
else:
d = b**2 - 4*a*c
A = 2*a
B = -b/A
if not dom.is_Numerical:
d = _simplify(d)
B = _simplify(B)
D = factor_terms(_sqrt(d)/A)
r0 = B - D
r1 = B + D
if a.is_negative:
r0, r1 = r1, r0
elif not dom.is_Numerical:
r0, r1 = [expand_2arg(i) for i in (r0, r1)]
return [r0, r1]
def roots_cubic(f, trig=False):
"""Returns a list of roots of a cubic polynomial.
References
==========
[1] https://en.wikipedia.org/wiki/Cubic_function, General formula for roots,
(accessed November 17, 2014).
"""
if trig:
a, b, c, d = f.all_coeffs()
p = (3*a*c - b**2)/3/a**2
q = (2*b**3 - 9*a*b*c + 27*a**2*d)/(27*a**3)
D = 18*a*b*c*d - 4*b**3*d + b**2*c**2 - 4*a*c**3 - 27*a**2*d**2
if (D > 0) == True:
rv = []
for k in range(3):
rv.append(2*sqrt(-p/3)*cos(acos(3*q/2/p*sqrt(-3/p))/3 - k*2*pi/3))
return [i - b/3/a for i in rv]
_, a, b, c = f.monic().all_coeffs()
if c is S.Zero:
x1, x2 = roots([1, a, b], multiple=True)
return [x1, S.Zero, x2]
p = b - a**2/3
q = c - a*b/3 + 2*a**3/27
pon3 = p/3
aon3 = a/3
u1 = None
if p is S.Zero:
if q is S.Zero:
return [-aon3]*3
if q.is_real:
if q.is_positive:
u1 = -root(q, 3)
elif q.is_negative:
u1 = root(-q, 3)
elif q is S.Zero:
y1, y2 = roots([1, 0, p], multiple=True)
return [tmp - aon3 for tmp in [y1, S.Zero, y2]]
elif q.is_real and q.is_negative:
u1 = -root(-q/2 + sqrt(q**2/4 + pon3**3), 3)
coeff = I*sqrt(3)/2
if u1 is None:
u1 = S(1)
u2 = -S.Half + coeff
u3 = -S.Half - coeff
a, b, c, d = S(1), a, b, c
D0 = b**2 - 3*a*c
D1 = 2*b**3 - 9*a*b*c + 27*a**2*d
C = root((D1 + sqrt(D1**2 - 4*D0**3))/2, 3)
return [-(b + uk*C + D0/C/uk)/3/a for uk in [u1, u2, u3]]
u2 = u1*(-S.Half + coeff)
u3 = u1*(-S.Half - coeff)
if p is S.Zero:
return [u1 - aon3, u2 - aon3, u3 - aon3]
soln = [
-u1 + pon3/u1 - aon3,
-u2 + pon3/u2 - aon3,
-u3 + pon3/u3 - aon3
]
return soln
def _roots_quartic_euler(p, q, r, a):
"""
Descartes-Euler solution of the quartic equation
Parameters
==========
p, q, r: coefficients of ``x**4 + p*x**2 + q*x + r``
a: shift of the roots
Notes
=====
This is a helper function for ``roots_quartic``.
Look for solutions of the form ::
``x1 = sqrt(R) - sqrt(A + B*sqrt(R))``
``x2 = -sqrt(R) - sqrt(A - B*sqrt(R))``
``x3 = -sqrt(R) + sqrt(A - B*sqrt(R))``
``x4 = sqrt(R) + sqrt(A + B*sqrt(R))``
To satisfy the quartic equation one must have
``p = -2*(R + A); q = -4*B*R; r = (R - A)**2 - B**2*R``
so that ``R`` must satisfy the Descartes-Euler resolvent equation
``64*R**3 + 32*p*R**2 + (4*p**2 - 16*r)*R - q**2 = 0``
If the resolvent does not have a rational solution, return None;
in that case it is likely that the Ferrari method gives a simpler
solution.
Examples
========
>>> from sympy import S
>>> from sympy.polys.polyroots import _roots_quartic_euler
>>> p, q, r = -S(64)/5, -S(512)/125, -S(1024)/3125
>>> _roots_quartic_euler(p, q, r, S(0))[0]
-sqrt(32*sqrt(5)/125 + 16/5) + 4*sqrt(5)/5
"""
# solve the resolvent equation
x = Symbol('x')
eq = 64*x**3 + 32*p*x**2 + (4*p**2 - 16*r)*x - q**2
xsols = list(roots(Poly(eq, x), cubics=False).keys())
xsols = [sol for sol in xsols if sol.is_rational]
if not xsols:
return None
R = max(xsols)
c1 = sqrt(R)
B = -q*c1/(4*R)
A = -R - p/2
c2 = sqrt(A + B)
c3 = sqrt(A - B)
return [c1 - c2 - a, -c1 - c3 - a, -c1 + c3 - a, c1 + c2 - a]
def roots_quartic(f):
r"""
Returns a list of roots of a quartic polynomial.
There are many references for solving quartic expressions available [1-5].
This reviewer has found that many of them require one to select from among
2 or more possible sets of solutions and that some solutions work when one
is searching for real roots but don't work when searching for complex roots
(though this is not always stated clearly). The following routine has been
tested and found to be correct for 0, 2 or 4 complex roots.
The quasisymmetric case solution [6] looks for quartics that have the form
`x**4 + A*x**3 + B*x**2 + C*x + D = 0` where `(C/A)**2 = D`.
Although no general solution that is always applicable for all
coefficients is known to this reviewer, certain conditions are tested
to determine the simplest 4 expressions that can be returned:
1) `f = c + a*(a**2/8 - b/2) == 0`
2) `g = d - a*(a*(3*a**2/256 - b/16) + c/4) = 0`
3) if `f != 0` and `g != 0` and `p = -d + a*c/4 - b**2/12` then
a) `p == 0`
b) `p != 0`
Examples
========
>>> from sympy import Poly, symbols, I
>>> from sympy.polys.polyroots import roots_quartic
>>> r = roots_quartic(Poly('x**4-6*x**3+17*x**2-26*x+20'))
>>> # 4 complex roots: 1+-I*sqrt(3), 2+-I
>>> sorted(str(tmp.evalf(n=2)) for tmp in r)
['1.0 + 1.7*I', '1.0 - 1.7*I', '2.0 + 1.0*I', '2.0 - 1.0*I']
References
==========
1. http://mathforum.org/dr.math/faq/faq.cubic.equations.html
2. http://en.wikipedia.org/wiki/Quartic_function#Summary_of_Ferrari.27s_method
3. http://planetmath.org/encyclopedia/GaloisTheoreticDerivationOfTheQuarticFormula.html
4. http://staff.bath.ac.uk/masjhd/JHD-CA.pdf
5. http://www.albmath.org/files/Math_5713.pdf
6. http://www.statemaster.com/encyclopedia/Quartic-equation
7. eqworld.ipmnet.ru/en/solutions/ae/ae0108.pdf
"""
_, a, b, c, d = f.monic().all_coeffs()
if not d:
return [S.Zero] + roots([1, a, b, c], multiple=True)
elif (c/a)**2 == d:
x, m = f.gen, c/a
g = Poly(x**2 + a*x + b - 2*m, x)
z1, z2 = roots_quadratic(g)
h1 = Poly(x**2 - z1*x + m, x)
h2 = Poly(x**2 - z2*x + m, x)
r1 = roots_quadratic(h1)
r2 = roots_quadratic(h2)
return r1 + r2
else:
a2 = a**2
e = b - 3*a2/8
f = c + a*(a2/8 - b/2)
g = d - a*(a*(3*a2/256 - b/16) + c/4)
aon4 = a/4
if f is S.Zero:
y1, y2 = [sqrt(tmp) for tmp in
roots([1, e, g], multiple=True)]
return [tmp - aon4 for tmp in [-y1, -y2, y1, y2]]
if g is S.Zero:
y = [S.Zero] + roots([1, 0, e, f], multiple=True)
return [tmp - aon4 for tmp in y]
else:
# Descartes-Euler method, see [7]
sols = _roots_quartic_euler(e, f, g, aon4)
if sols:
return sols
# Ferrari method, see [1, 2]
a2 = a**2
e = b - 3*a2/8
f = c + a*(a2/8 - b/2)
g = d - a*(a*(3*a2/256 - b/16) + c/4)
p = -e**2/12 - g
q = -e**3/108 + e*g/3 - f**2/8
TH = Rational(1, 3)
def _ans(y):
w = sqrt(e + 2*y)
arg1 = 3*e + 2*y
arg2 = 2*f/w
ans = []
for s in [-1, 1]:
root = sqrt(-(arg1 + s*arg2))
for t in [-1, 1]:
ans.append((s*w - t*root)/2 - aon4)
return ans
# p == 0 case
y1 = -5*e/6 - q**TH
if p.is_zero:
return _ans(y1)
# if p != 0 then u below is not 0
root = sqrt(q**2/4 + p**3/27)
r = -q/2 + root # or -q/2 - root
u = r**TH # primary root of solve(x**3 - r, x)
y2 = -5*e/6 + u - p/u/3
if fuzzy_not(p.is_zero):
return _ans(y2)
# sort it out once they know the values of the coefficients
return [Piecewise((a1, Eq(p, 0)), (a2, True))
for a1, a2 in zip(_ans(y1), _ans(y2))]
def roots_binomial(f):
"""Returns a list of roots of a binomial polynomial. If the domain is ZZ
then the roots will be sorted with negatives coming before positives.
The ordering will be the same for any numerical coefficients as long as
the assumptions tested are correct, otherwise the ordering will not be
sorted (but will be canonical).
"""
n = f.degree()
a, b = f.nth(n), f.nth(0)
base = -cancel(b/a)
alpha = root(base, n)
if alpha.is_number:
alpha = alpha.expand(complex=True)
# define some parameters that will allow us to order the roots.
# If the domain is ZZ this is guaranteed to return roots sorted
# with reals before non-real roots and non-real sorted according
# to real part and imaginary part, e.g. -1, 1, -1 + I, 2 - I
neg = base.is_negative
even = n % 2 == 0
if neg:
if even == True and (base + 1).is_positive:
big = True
else:
big = False
# get the indices in the right order so the computed
# roots will be sorted when the domain is ZZ
ks = []
imax = n//2
if even:
ks.append(imax)
imax -= 1
if not neg:
ks.append(0)
for i in range(imax, 0, -1):
if neg:
ks.extend([i, -i])
else:
ks.extend([-i, i])
if neg:
ks.append(0)
if big:
for i in range(0, len(ks), 2):
pair = ks[i: i + 2]
pair = list(reversed(pair))
# compute the roots
roots, d = [], 2*I*pi/n
for k in ks:
zeta = exp(k*d).expand(complex=True)
roots.append((alpha*zeta).expand(power_base=False))
return roots
def _inv_totient_estimate(m):
"""
Find ``(L, U)`` such that ``L <= phi^-1(m) <= U``.
Examples
========
>>> from sympy.polys.polyroots import _inv_totient_estimate
>>> _inv_totient_estimate(192)
(192, 840)
>>> _inv_totient_estimate(400)
(400, 1750)
"""
primes = [ d + 1 for d in divisors(m) if isprime(d + 1) ]
a, b = 1, 1
for p in primes:
a *= p
b *= p - 1
L = m
U = int(math.ceil(m*(float(a)/b)))
P = p = 2
primes = []
while P <= U:
p = nextprime(p)
primes.append(p)
P *= p
P //= p
b = 1
for p in primes[:-1]:
b *= p - 1
U = int(math.ceil(m*(float(P)/b)))
return L, U
def roots_cyclotomic(f, factor=False):
"""Compute roots of cyclotomic polynomials. """
L, U = _inv_totient_estimate(f.degree())
for n in range(L, U + 1):
g = cyclotomic_poly(n, f.gen, polys=True)
if f == g:
break
else: # pragma: no cover
raise RuntimeError("failed to find index of a cyclotomic polynomial")
roots = []
if not factor:
# get the indices in the right order so the computed
# roots will be sorted
h = n//2
ks = [i for i in range(1, n + 1) if igcd(i, n) == 1]
ks.sort(key=lambda x: (x, -1) if x <= h else (abs(x - n), 1))
d = 2*I*pi/n
for k in reversed(ks):
roots.append(exp(k*d).expand(complex=True))
else:
g = Poly(f, extension=root(-1, n))
for h, _ in ordered(g.factor_list()[1]):
roots.append(-h.TC())
return roots
def roots_quintic(f):
"""
Calulate exact roots of a solvable quintic
"""
result = []
coeff_5, coeff_4, p, q, r, s = f.all_coeffs()
# Eqn must be of the form x^5 + px^3 + qx^2 + rx + s
if coeff_4:
return result
if coeff_5 != 1:
l = [p/coeff_5, q/coeff_5, r/coeff_5, s/coeff_5]
if not all(coeff.is_Rational for coeff in l):
return result
f = Poly(f/coeff_5)
quintic = PolyQuintic(f)
# Eqn standardized. Algo for solving starts here
if not f.is_irreducible:
return result
f20 = quintic.f20
# Check if f20 has linear factors over domain Z
if f20.is_irreducible:
return result
# Now, we know that f is solvable
for _factor in f20.factor_list()[1]:
if _factor[0].is_linear:
theta = _factor[0].root(0)
break
d = discriminant(f)
delta = sqrt(d)
# zeta = a fifth root of unity
zeta1, zeta2, zeta3, zeta4 = quintic.zeta
T = quintic.T(theta, d)
tol = S(1e-10)
alpha = T[1] + T[2]*delta
alpha_bar = T[1] - T[2]*delta
beta = T[3] + T[4]*delta
beta_bar = T[3] - T[4]*delta
disc = alpha**2 - 4*beta
disc_bar = alpha_bar**2 - 4*beta_bar
l0 = quintic.l0(theta)
l1 = _quintic_simplify((-alpha + sqrt(disc)) / S(2))
l4 = _quintic_simplify((-alpha - sqrt(disc)) / S(2))
l2 = _quintic_simplify((-alpha_bar + sqrt(disc_bar)) / S(2))
l3 = _quintic_simplify((-alpha_bar - sqrt(disc_bar)) / S(2))
order = quintic.order(theta, d)
test = (order*delta.n()) - ( (l1.n() - l4.n())*(l2.n() - l3.n()) )
# Comparing floats
if not comp(test, 0, tol):
l2, l3 = l3, l2
# Now we have correct order of l's
R1 = l0 + l1*zeta1 + l2*zeta2 + l3*zeta3 + l4*zeta4
R2 = l0 + l3*zeta1 + l1*zeta2 + l4*zeta3 + l2*zeta4
R3 = l0 + l2*zeta1 + l4*zeta2 + l1*zeta3 + l3*zeta4
R4 = l0 + l4*zeta1 + l3*zeta2 + l2*zeta3 + l1*zeta4
Res = [None, [None]*5, [None]*5, [None]*5, [None]*5]
Res_n = [None, [None]*5, [None]*5, [None]*5, [None]*5]
sol = Symbol('sol')
# Simplifying improves performace a lot for exact expressions
R1 = _quintic_simplify(R1)
R2 = _quintic_simplify(R2)
R3 = _quintic_simplify(R3)
R4 = _quintic_simplify(R4)
# Solve imported here. Causing problems if imported as 'solve'
# and hence the changed name
from sympy.solvers.solvers import solve as _solve
a, b = symbols('a b', cls=Dummy)
_sol = _solve( sol**5 - a - I*b, sol)
for i in range(5):
_sol[i] = factor(_sol[i])
R1 = R1.as_real_imag()
R2 = R2.as_real_imag()
R3 = R3.as_real_imag()
R4 = R4.as_real_imag()
for i, root in enumerate(_sol):
Res[1][i] = _quintic_simplify(root.subs({ a: R1[0], b: R1[1] }))
Res[2][i] = _quintic_simplify(root.subs({ a: R2[0], b: R2[1] }))
Res[3][i] = _quintic_simplify(root.subs({ a: R3[0], b: R3[1] }))
Res[4][i] = _quintic_simplify(root.subs({ a: R4[0], b: R4[1] }))
for i in range(1, 5):
for j in range(5):
Res_n[i][j] = Res[i][j].n()
Res[i][j] = _quintic_simplify(Res[i][j])
r1 = Res[1][0]
r1_n = Res_n[1][0]
for i in range(5):
if comp(im(r1_n*Res_n[4][i]), 0, tol):
r4 = Res[4][i]
break
u, v = quintic.uv(theta, d)
sqrt5 = math.sqrt(5)
# Now we have various Res values. Each will be a list of five
# values. We have to pick one r value from those five for each Res
u, v = quintic.uv(theta, d)
testplus = (u + v*delta*sqrt(5)).n()
testminus = (u - v*delta*sqrt(5)).n()
# Evaluated numbers suffixed with _n
# We will use evaluated numbers for calculation. Much faster.
r4_n = r4.n()
r2 = r3 = None
for i in range(5):
r2temp_n = Res_n[2][i]
for j in range(5):
# Again storing away the exact number and using
# evaluated numbers in computations
r3temp_n = Res_n[3][j]
if( comp( r1_n*r2temp_n**2 + r4_n*r3temp_n**2 - testplus, 0, tol) and
comp( r3temp_n*r1_n**2 + r2temp_n*r4_n**2 - testminus, 0, tol ) ):
r2 = Res[2][i]
r3 = Res[3][j]
break
if r2:
break
# Now, we have r's so we can get roots
x1 = (r1 + r2 + r3 + r4)/5
x2 = (r1*zeta4 + r2*zeta3 + r3*zeta2 + r4*zeta1)/5
x3 = (r1*zeta3 + r2*zeta1 + r3*zeta4 + r4*zeta2)/5
x4 = (r1*zeta2 + r2*zeta4 + r3*zeta1 + r4*zeta3)/5
x5 = (r1*zeta1 + r2*zeta2 + r3*zeta3 + r4*zeta4)/5
result = [x1, x2, x3, x4, x5]
# Now check if solutions are distinct
saw = set()
for r in result:
r = r.n(2)
if r in saw:
# Roots were identical. Abort, return []
# and fall back to usual solve
return []
saw.add(r)
return result
def _quintic_simplify(expr):
expr = powsimp(expr)
expr = cancel(expr)
return together(expr)
def _integer_basis(poly):
"""Compute coefficient basis for a polynomial over integers.
Returns the integer ``div`` such that substituting ``x = div*y``
``p(x) = m*q(y)`` where the coefficients of ``q`` are smaller
than those of ``p``.
For example ``x**5 + 512*x + 1024 = 0``
with ``div = 4`` becomes ``y**5 + 2*y + 1 = 0``
Returns the integer ``div`` or ``None`` if there is no possible scaling.
Examples
========
>>> from sympy.polys import Poly
>>> from sympy.abc import x
>>> from sympy.polys.polyroots import _integer_basis
>>> p = Poly(x**5 + 512*x + 1024, x, domain='ZZ')
>>> _integer_basis(p)
4
"""
monoms, coeffs = list(zip(*poly.terms()))
monoms, = list(zip(*monoms))
coeffs = list(map(abs, coeffs))
if coeffs[0] < coeffs[-1]:
coeffs = list(reversed(coeffs))
n = monoms[0]
monoms = [n - i for i in reversed(monoms)]
else:
return None
monoms = monoms[:-1]
coeffs = coeffs[:-1]
divs = reversed(divisors(gcd_list(coeffs))[1:])
try:
div = next(divs)
except StopIteration:
return None
while True:
for monom, coeff in zip(monoms, coeffs):
if coeff % div**monom != 0:
try:
div = next(divs)
except StopIteration:
return None
else:
break
else:
return div
def preprocess_roots(poly):
"""Try to get rid of symbolic coefficients from ``poly``. """
coeff = S.One
try:
_, poly = poly.clear_denoms(convert=True)
except DomainError:
return coeff, poly
poly = poly.primitive()[1]
poly = poly.retract()
# TODO: This is fragile. Figure out how to make this independent of construct_domain().
if poly.get_domain().is_Poly and all(c.is_term for c in poly.rep.coeffs()):
poly = poly.inject()
strips = list(zip(*poly.monoms()))
gens = list(poly.gens[1:])
base, strips = strips[0], strips[1:]
for gen, strip in zip(list(gens), strips):
reverse = False
if strip[0] < strip[-1]:
strip = reversed(strip)
reverse = True
ratio = None
for a, b in zip(base, strip):
if not a and not b:
continue
elif not a or not b:
break
elif b % a != 0:
break
else:
_ratio = b // a
if ratio is None:
ratio = _ratio
elif ratio != _ratio:
break
else:
if reverse:
ratio = -ratio
poly = poly.eval(gen, 1)
coeff *= gen**(-ratio)
gens.remove(gen)
if gens:
poly = poly.eject(*gens)
if poly.is_univariate and poly.get_domain().is_ZZ:
basis = _integer_basis(poly)
if basis is not None:
n = poly.degree()
def func(k, coeff):
return coeff//basis**(n - k[0])
poly = poly.termwise(func)
coeff *= basis
return coeff, poly
@public
def roots(f, *gens, **flags):
"""
Computes symbolic roots of a univariate polynomial.
Given a univariate polynomial f with symbolic coefficients (or
a list of the polynomial's coefficients), returns a dictionary
with its roots and their multiplicities.
Only roots expressible via radicals will be returned. To get
a complete set of roots use RootOf class or numerical methods
instead. By default cubic and quartic formulas are used in
the algorithm. To disable them because of unreadable output
set ``cubics=False`` or ``quartics=False`` respectively. If cubic
roots are real but are expressed in terms of complex numbers
(casus irreducibilis [1]) the ``trig`` flag can be set to True to
have the solutions returned in terms of cosine and inverse cosine
functions.
To get roots from a specific domain set the ``filter`` flag with
one of the following specifiers: Z, Q, R, I, C. By default all
roots are returned (this is equivalent to setting ``filter='C'``).
By default a dictionary is returned giving a compact result in
case of multiple roots. However to get a list containing all
those roots set the ``multiple`` flag to True; the list will
have identical roots appearing next to each other in the result.
(For a given Poly, the all_roots method will give the roots in
sorted numerical order.)
Examples
========
>>> from sympy import Poly, roots
>>> from sympy.abc import x, y
>>> roots(x**2 - 1, x)
{-1: 1, 1: 1}
>>> p = Poly(x**2-1, x)
>>> roots(p)
{-1: 1, 1: 1}
>>> p = Poly(x**2-y, x, y)
>>> roots(Poly(p, x))
{-sqrt(y): 1, sqrt(y): 1}
>>> roots(x**2 - y, x)
{-sqrt(y): 1, sqrt(y): 1}
>>> roots([1, 0, -1])
{-1: 1, 1: 1}
References
==========
1. http://en.wikipedia.org/wiki/Cubic_function#Trigonometric_.28and_hyperbolic.29_method
"""
from sympy.polys.polytools import to_rational_coeffs
flags = dict(flags)
auto = flags.pop('auto', True)
cubics = flags.pop('cubics', True)
trig = flags.pop('trig', False)
quartics = flags.pop('quartics', True)
quintics = flags.pop('quintics', False)
multiple = flags.pop('multiple', False)
filter = flags.pop('filter', None)
predicate = flags.pop('predicate', None)
if isinstance(f, list):
if gens:
raise ValueError('redundant generators given')
x = Dummy('x')
poly, i = {}, len(f) - 1
for coeff in f:
poly[i], i = sympify(coeff), i - 1
f = Poly(poly, x, field=True)
else:
try:
f = Poly(f, *gens, **flags)
if f.length == 2 and f.degree() != 1:
# check for foo**n factors in the constant
n = f.degree()
npow_bases = []
expr = f.as_expr()
con = expr.as_independent(*gens)[0]
for p in Mul.make_args(con):
if p.is_Pow and not p.exp % n:
npow_bases.append(p.base**(p.exp/n))
else:
other.append(p)
if npow_bases:
b = Mul(*npow_bases)
B = Dummy()
d = roots(Poly(expr - con + B**n*Mul(*others), *gens,
**flags), *gens, **flags)
rv = {}
for k, v in d.items():
rv[k.subs(B, b)] = v
return rv
except GeneratorsNeeded:
if multiple:
return []
else:
return {}
if f.is_multivariate:
raise PolynomialError('multivariate polynomials are not supported')
def _update_dict(result, root, k):
if root in result:
result[root] += k
else:
result[root] = k
def _try_decompose(f):
"""Find roots using functional decomposition. """
factors, roots = f.decompose(), []
for root in _try_heuristics(factors[0]):
roots.append(root)
for factor in factors[1:]:
previous, roots = list(roots), []
for root in previous:
g = factor - Poly(root, f.gen)
for root in _try_heuristics(g):
roots.append(root)
return roots
def _try_heuristics(f):
"""Find roots using formulas and some tricks. """
if f.is_ground:
return []
if f.is_monomial:
return [S(0)]*f.degree()
if f.length() == 2:
if f.degree() == 1:
return list(map(cancel, roots_linear(f)))
else:
return roots_binomial(f)
result = []
for i in [-1, 1]:
if not f.eval(i):
f = f.quo(Poly(f.gen - i, f.gen))
result.append(i)
break
n = f.degree()
if n == 1:
result += list(map(cancel, roots_linear(f)))
elif n == 2:
result += list(map(cancel, roots_quadratic(f)))
elif f.is_cyclotomic:
result += roots_cyclotomic(f)
elif n == 3 and cubics:
result += roots_cubic(f, trig=trig)
elif n == 4 and quartics:
result += roots_quartic(f)
elif n == 5 and quintics:
result += roots_quintic(f)
return result
(k,), f = f.terms_gcd()
if not k:
zeros = {}
else:
zeros = {S(0): k}
coeff, f = preprocess_roots(f)
if auto and f.get_domain().has_Ring:
f = f.to_field()
rescale_x = None
translate_x = None
result = {}
if not f.is_ground:
if not f.get_domain().is_Exact:
for r in f.nroots():
_update_dict(result, r, 1)
elif f.degree() == 1:
result[roots_linear(f)[0]] = 1
elif f.length() == 2:
roots_fun = roots_quadratic if f.degree() == 2 else roots_binomial
for r in roots_fun(f):
_update_dict(result, r, 1)
else:
_, factors = Poly(f.as_expr()).factor_list()
if len(factors) == 1 and f.degree() == 2:
for r in roots_quadratic(f):
_update_dict(result, r, 1)
else:
if len(factors) == 1 and factors[0][1] == 1:
if f.get_domain().is_EX:
res = to_rational_coeffs(f)
if res:
if res[0] is None:
translate_x, f = res[2:]
else:
rescale_x, f = res[1], res[-1]
result = roots(f)
if not result:
for root in _try_decompose(f):
_update_dict(result, root, 1)
else:
for root in _try_decompose(f):
_update_dict(result, root, 1)
else:
for factor, k in factors:
for r in _try_heuristics(Poly(factor, f.gen, field=True)):
_update_dict(result, r, k)
if coeff is not S.One:
_result, result, = result, {}
for root, k in _result.items():
result[coeff*root] = k
result.update(zeros)
if filter not in [None, 'C']:
handlers = {
'Z': lambda r: r.is_Integer,
'Q': lambda r: r.is_Rational,
'R': lambda r: r.is_real,
'I': lambda r: r.is_imaginary,
}
try:
query = handlers[filter]
except KeyError:
raise ValueError("Invalid filter: %s" % filter)
for zero in dict(result).keys():
if not query(zero):
del result[zero]
if predicate is not None:
for zero in dict(result).keys():
if not predicate(zero):
del result[zero]
if rescale_x:
result1 = {}
for k, v in result.items():
result1[k*rescale_x] = v
result = result1
if translate_x:
result1 = {}
for k, v in result.items():
result1[k + translate_x] = v
result = result1
if not multiple:
return result
else:
zeros = []
for zero in ordered(result):
zeros.extend([zero]*result[zero])
return zeros
def root_factors(f, *gens, **args):
"""
Returns all factors of a univariate polynomial.
Examples
========
>>> from sympy.abc import x, y
>>> from sympy.polys.polyroots import root_factors
>>> root_factors(x**2 - y, x)
[x - sqrt(y), x + sqrt(y)]
"""
args = dict(args)
filter = args.pop('filter', None)
F = Poly(f, *gens, **args)
if not F.is_Poly:
return [f]
if F.is_multivariate:
raise ValueError('multivariate polynomials are not supported')
x = F.gens[0]
zeros = roots(F, filter=filter)
if not zeros:
factors = [F]
else:
factors, N = [], 0
for r, n in ordered(zeros.items()):
factors, N = factors + [Poly(x - r, x)]*n, N + n
if N < F.degree():
G = reduce(lambda p, q: p*q, factors)
factors.append(F.quo(G))
if not isinstance(f, Poly):
factors = [ f.as_expr() for f in factors ]
return factors
| bsd-3-clause |
vicentzhong/ycmd | ycmd/completers/typescript/typescript_completer.py | 10 | 9494 | #!/usr/bin/env python
#
# Copyright (C) 2015 Google Inc.
#
# This file is part of YouCompleteMe.
#
# YouCompleteMe is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# YouCompleteMe is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with YouCompleteMe. If not, see <http://www.gnu.org/licenses/>.
import json
import logging
import os
import subprocess
from threading import Lock
from tempfile import NamedTemporaryFile
from ycmd import responses
from ycmd import utils
from ycmd.completers.completer import Completer
BINARY_NOT_FOUND_MESSAGE = ( 'tsserver not found. '
'TypeScript 1.5 or higher is required' )
MAX_DETAILED_COMPLETIONS = 100
_logger = logging.getLogger( __name__ )
class TypeScriptCompleter( Completer ):
"""
Completer for TypeScript.
It uses TSServer which is bundled with TypeScript 1.5
See the protocol here:
https://github.com/Microsoft/TypeScript/blob/2cb0dfd99dc2896958b75e44303d8a7a32e5dc33/src/server/protocol.d.ts
"""
def __init__( self, user_options ):
super( TypeScriptCompleter, self ).__init__( user_options )
# Used to prevent threads from concurrently reading and writing to
# the tsserver process' stdout and stdin
self._lock = Lock()
binarypath = utils.PathToFirstExistingExecutable( [ 'tsserver' ] )
if not binarypath:
_logger.error( BINARY_NOT_FOUND_MESSAGE )
raise RuntimeError( BINARY_NOT_FOUND_MESSAGE )
# Each request sent to tsserver must have a sequence id.
# Responses contain the id sent in the corresponding request.
self._sequenceid = 0
# TSServer ignores the fact that newlines are two characters on Windows
# (\r\n) instead of one on other platforms (\n), so we use the
# universal_newlines option to convert those newlines to \n. See the issue
# https://github.com/Microsoft/TypeScript/issues/3403
# TODO: remove this option when the issue is fixed.
# We also need to redirect the error stream to the output one on Windows.
self._tsserver_handle = utils.SafePopen( binarypath,
stdout = subprocess.PIPE,
stdin = subprocess.PIPE,
stderr = subprocess.STDOUT,
universal_newlines = True )
_logger.info( 'Enabling typescript completion' )
def _SendRequest( self, command, arguments = None ):
"""Send a request message to TSServer."""
seq = self._sequenceid
self._sequenceid += 1
request = {
'seq': seq,
'type': 'request',
'command': command
}
if arguments:
request[ 'arguments' ] = arguments
self._tsserver_handle.stdin.write( json.dumps( request ) )
self._tsserver_handle.stdin.write( "\n" )
return seq
def _ReadResponse( self, expected_seq ):
"""Read a response message from TSServer."""
# The headers are pretty similar to HTTP.
# At the time of writing, 'Content-Length' is the only supplied header.
headers = {}
while True:
headerline = self._tsserver_handle.stdout.readline().strip()
if not headerline:
break
key, value = headerline.split( ':', 1 )
headers[ key.strip() ] = value.strip()
# The response message is a JSON object which comes back on one line.
# Since this might change in the future, we use the 'Content-Length'
# header.
if 'Content-Length' not in headers:
raise RuntimeError( "Missing 'Content-Length' header" )
contentlength = int( headers[ 'Content-Length' ] )
message = json.loads( self._tsserver_handle.stdout.read( contentlength ) )
msgtype = message[ 'type' ]
if msgtype == 'event':
self._HandleEvent( message )
return self._ReadResponse( expected_seq )
if msgtype != 'response':
raise RuntimeError( 'Unsuported message type {0}'.format( msgtype ) )
if int( message[ 'request_seq' ] ) != expected_seq:
raise RuntimeError( 'Request sequence mismatch' )
if not message[ 'success' ]:
raise RuntimeError( message[ 'message' ] )
return message
def _HandleEvent( self, event ):
"""Handle event message from TSServer."""
# We ignore events for now since we don't have a use for them.
eventname = event[ 'event' ]
_logger.info( 'Recieved {0} event from tsserver'.format( eventname ) )
def _Reload( self, request_data ):
"""
Syncronize TSServer's view of the file to
the contents of the unsaved buffer.
"""
filename = request_data[ 'filepath' ]
contents = request_data[ 'file_data' ][ filename ][ 'contents' ]
tmpfile = NamedTemporaryFile( delete=False )
tmpfile.write( utils.ToUtf8IfNeeded( contents ) )
tmpfile.close()
seq = self._SendRequest( 'reload', {
'file': filename,
'tmpfile': tmpfile.name
})
self._ReadResponse( seq )
os.unlink( tmpfile.name )
def SupportedFiletypes( self ):
return [ 'typescript' ]
def ComputeCandidatesInner( self, request_data ):
with self._lock:
self._Reload( request_data )
seq = self._SendRequest( 'completions', {
'file': request_data[ 'filepath' ],
'line': request_data[ 'line_num' ],
'offset': request_data[ 'column_num' ]
})
entries = self._ReadResponse( seq )[ 'body' ]
# A less detailed version of the completion data is returned
# if there are too many entries. This improves responsiveness.
if len( entries ) > MAX_DETAILED_COMPLETIONS:
return [ _ConvertCompletionData(e) for e in entries ]
names = []
namelength = 0
for e in entries:
name = e[ 'name' ]
namelength = max( namelength, len( name ) )
names.append( name )
seq = self._SendRequest( 'completionEntryDetails', {
'file': request_data[ 'filepath' ],
'line': request_data[ 'line_num' ],
'offset': request_data[ 'column_num' ],
'entryNames': names
})
detailed_entries = self._ReadResponse( seq )[ 'body' ]
return [ _ConvertDetailedCompletionData( e, namelength )
for e in detailed_entries ]
def OnBufferVisit( self, request_data ):
filename = request_data[ 'filepath' ]
with self._lock:
self._SendRequest( 'open', { 'file': filename } )
def OnBufferUnload( self, request_data ):
filename = request_data[ 'filepath' ]
with self._lock:
self._SendRequest( 'close', { 'file': filename } )
def OnFileReadyToParse( self, request_data ):
with self._lock:
self._Reload( request_data )
def DefinedSubcommands( self ):
return [ 'GoToDefinition',
'GetType']
def OnUserCommand( self, arguments, request_data ):
command = arguments[ 0 ]
if command == 'GoToDefinition':
return self._GoToDefinition( request_data )
if command == 'GetType':
return self._GetType( request_data )
raise ValueError( self.UserCommandsHelpMessage() )
def _GoToDefinition( self, request_data ):
with self._lock:
self._Reload( request_data )
seq = self._SendRequest( 'definition', {
'file': request_data[ 'filepath' ],
'line': request_data[ 'line_num' ],
'offset': request_data[ 'column_num' ]
})
filespans = self._ReadResponse( seq )[ 'body' ]
if not filespans:
raise RuntimeError( 'Could not find definition' )
span = filespans[ 0 ]
return responses.BuildGoToResponse(
filepath = span[ 'file' ],
line_num = span[ 'start' ][ 'line' ],
column_num = span[ 'start' ][ 'offset' ]
)
def _GetType( self, request_data ):
with self._lock:
self._Reload( request_data )
seq = self._SendRequest( 'quickinfo', {
'file': request_data[ 'filepath' ],
'line': request_data[ 'line_num' ],
'offset': request_data[ 'column_num' ]
})
info = self._ReadResponse( seq )[ 'body' ]
return responses.BuildDisplayMessageResponse( info[ 'displayString' ] )
def Shutdown( self ):
with self._lock:
self._SendRequest( 'exit' )
def _ConvertCompletionData( completion_data ):
return responses.BuildCompletionData(
insertion_text = utils.ToUtf8IfNeeded( completion_data[ 'name' ] ),
menu_text = utils.ToUtf8IfNeeded( completion_data[ 'name' ] ),
kind = utils.ToUtf8IfNeeded( completion_data[ 'kind' ] ),
extra_data = utils.ToUtf8IfNeeded( completion_data[ 'kind' ] )
)
def _ConvertDetailedCompletionData( completion_data, padding = 0 ):
name = completion_data[ 'name' ]
display_parts = completion_data[ 'displayParts' ]
signature = ''.join( [ p[ 'text' ] for p in display_parts ] )
menu_text = '{0} {1}'.format( name.ljust( padding ), signature )
return responses.BuildCompletionData(
insertion_text = utils.ToUtf8IfNeeded( name ),
menu_text = utils.ToUtf8IfNeeded( menu_text ),
kind = utils.ToUtf8IfNeeded( completion_data[ 'kind' ] )
)
| gpl-3.0 |
Lujeni/ansible | lib/ansible/modules/cloud/vmware/vmware_vm_info.py | 13 | 10816 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2015, Joseph Callen <jcallen () csc.com>
# Copyright: (c) 2018, Ansible Project
# Copyright: (c) 2018, Fedor Vompe <f.vompe () comptek.ru>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {
'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'
}
DOCUMENTATION = r'''
---
module: vmware_vm_info
short_description: Return basic info pertaining to a VMware machine guest
description:
- Return basic information pertaining to a vSphere or ESXi virtual machine guest.
- Cluster name as fact is added in version 2.7.
- This module was called C(vmware_vm_facts) before Ansible 2.9. The usage did not change.
version_added: '2.0'
author:
- Joseph Callen (@jcpowermac)
- Abhijeet Kasurde (@Akasurde)
- Fedor Vompe (@sumkincpp)
notes:
- Tested on ESXi 6.7, vSphere 5.5 and vSphere 6.5
- From 2.8 and onwards, information are returned as list of dict instead of dict.
requirements:
- python >= 2.6
- PyVmomi
options:
vm_type:
description:
- If set to C(vm), then information are gathered for virtual machines only.
- If set to C(template), then information are gathered for virtual machine templates only.
- If set to C(all), then information are gathered for all virtual machines and virtual machine templates.
required: False
default: 'all'
choices: [ all, vm, template ]
version_added: 2.5
type: str
show_attribute:
description:
- Attributes related to VM guest shown in information only when this is set C(true).
default: no
type: bool
version_added: 2.8
folder:
description:
- Specify a folder location of VMs to gather information from.
- 'Examples:'
- ' folder: /ha-datacenter/vm'
- ' folder: ha-datacenter/vm'
- ' folder: /datacenter1/vm'
- ' folder: datacenter1/vm'
- ' folder: /datacenter1/vm/folder1'
- ' folder: datacenter1/vm/folder1'
- ' folder: /folder1/datacenter1/vm'
- ' folder: folder1/datacenter1/vm'
- ' folder: /folder1/datacenter1/vm/folder2'
type: str
version_added: 2.9
show_tag:
description:
- Tags related to virtual machine are shown if set to C(True).
default: False
type: bool
version_added: 2.9
extends_documentation_fragment: vmware.documentation
'''
EXAMPLES = r'''
- name: Gather all registered virtual machines
vmware_vm_info:
hostname: '{{ vcenter_hostname }}'
username: '{{ vcenter_username }}'
password: '{{ vcenter_password }}'
delegate_to: localhost
register: vminfo
- debug:
var: vminfo.virtual_machines
- name: Gather only registered virtual machine templates
vmware_vm_info:
hostname: '{{ vcenter_hostname }}'
username: '{{ vcenter_username }}'
password: '{{ vcenter_password }}'
vm_type: template
delegate_to: localhost
register: template_info
- debug:
var: template_info.virtual_machines
- name: Gather only registered virtual machines
vmware_vm_info:
hostname: '{{ vcenter_hostname }}'
username: '{{ vcenter_username }}'
password: '{{ vcenter_password }}'
vm_type: vm
delegate_to: localhost
register: vm_info
- debug:
var: vm_info.virtual_machines
- name: Get UUID from given VM Name
block:
- name: Get virtual machine info
vmware_vm_info:
hostname: '{{ vcenter_hostname }}'
username: '{{ vcenter_username }}'
password: '{{ vcenter_password }}'
folder: "/datacenter/vm/folder"
delegate_to: localhost
register: vm_info
- debug:
msg: "{{ item.uuid }}"
with_items:
- "{{ vm_info.virtual_machines | json_query(query) }}"
vars:
query: "[?guest_name=='DC0_H0_VM0']"
- name: Get Tags from given VM Name
block:
- name: Get virtual machine info
vmware_vm_info:
hostname: '{{ vcenter_hostname }}'
username: '{{ vcenter_username }}'
password: '{{ vcenter_password }}'
folder: "/datacenter/vm/folder"
delegate_to: localhost
register: vm_info
- debug:
msg: "{{ item.tags }}"
with_items:
- "{{ vm_info.virtual_machines | json_query(query) }}"
vars:
query: "[?guest_name=='DC0_H0_VM0']"
'''
RETURN = r'''
virtual_machines:
description: list of dictionary of virtual machines and their information
returned: success
type: list
sample: [
{
"guest_name": "ubuntu_t",
"cluster": null,
"esxi_hostname": "10.76.33.226",
"guest_fullname": "Ubuntu Linux (64-bit)",
"ip_address": "",
"mac_address": [
"00:50:56:87:a5:9a"
],
"power_state": "poweredOff",
"uuid": "4207072c-edd8-3bd5-64dc-903fd3a0db04",
"vm_network": {
"00:50:56:87:a5:9a": {
"ipv4": [
"10.76.33.228"
],
"ipv6": []
}
},
"attributes": {
"job": "backup-prepare"
},
"tags": [
{
"category_id": "urn:vmomi:InventoryServiceCategory:b316cc45-f1a9-4277-811d-56c7e7975203:GLOBAL",
"category_name": "cat_0001",
"description": "",
"id": "urn:vmomi:InventoryServiceTag:43737ec0-b832-4abf-abb1-fd2448ce3b26:GLOBAL",
"name": "tag_0001"
}
]
}
]
'''
try:
from pyVmomi import vim
except ImportError:
pass
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.vmware import PyVmomi, get_all_objs, vmware_argument_spec, _get_vm_prop
from ansible.module_utils.vmware_rest_client import VmwareRestClient
class VmwareVmInfo(PyVmomi):
def __init__(self, module):
super(VmwareVmInfo, self).__init__(module)
def get_tag_info(self, vm_dynamic_obj):
vmware_client = VmwareRestClient(self.module)
return vmware_client.get_tags_for_vm(vm_mid=vm_dynamic_obj._moId)
def get_vm_attributes(self, vm):
return dict((x.name, v.value) for x in self.custom_field_mgr
for v in vm.customValue if x.key == v.key)
# https://github.com/vmware/pyvmomi-community-samples/blob/master/samples/getallvms.py
def get_all_virtual_machines(self):
"""
Get all virtual machines and related configurations information
"""
folder = self.params.get('folder')
folder_obj = None
if folder:
folder_obj = self.content.searchIndex.FindByInventoryPath(folder)
if not folder_obj:
self.module.fail_json(msg="Failed to find folder specified by %(folder)s" % self.params)
virtual_machines = get_all_objs(self.content, [vim.VirtualMachine], folder=folder_obj)
_virtual_machines = []
for vm in virtual_machines:
_ip_address = ""
summary = vm.summary
if summary.guest is not None:
_ip_address = summary.guest.ipAddress
if _ip_address is None:
_ip_address = ""
_mac_address = []
all_devices = _get_vm_prop(vm, ('config', 'hardware', 'device'))
if all_devices:
for dev in all_devices:
if isinstance(dev, vim.vm.device.VirtualEthernetCard):
_mac_address.append(dev.macAddress)
net_dict = {}
vmnet = _get_vm_prop(vm, ('guest', 'net'))
if vmnet:
for device in vmnet:
net_dict[device.macAddress] = dict()
net_dict[device.macAddress]['ipv4'] = []
net_dict[device.macAddress]['ipv6'] = []
for ip_addr in device.ipAddress:
if "::" in ip_addr:
net_dict[device.macAddress]['ipv6'].append(ip_addr)
else:
net_dict[device.macAddress]['ipv4'].append(ip_addr)
esxi_hostname = None
esxi_parent = None
if summary.runtime.host:
esxi_hostname = summary.runtime.host.summary.config.name
esxi_parent = summary.runtime.host.parent
cluster_name = None
if esxi_parent and isinstance(esxi_parent, vim.ClusterComputeResource):
cluster_name = summary.runtime.host.parent.name
vm_attributes = dict()
if self.module.params.get('show_attribute'):
vm_attributes = self.get_vm_attributes(vm)
vm_tags = list()
if self.module.params.get('show_tag'):
vm_tags = self.get_tag_info(vm)
virtual_machine = {
"guest_name": summary.config.name,
"guest_fullname": summary.config.guestFullName,
"power_state": summary.runtime.powerState,
"ip_address": _ip_address, # Kept for backward compatibility
"mac_address": _mac_address, # Kept for backward compatibility
"uuid": summary.config.uuid,
"vm_network": net_dict,
"esxi_hostname": esxi_hostname,
"cluster": cluster_name,
"attributes": vm_attributes,
"tags": vm_tags
}
vm_type = self.module.params.get('vm_type')
is_template = _get_vm_prop(vm, ('config', 'template'))
if vm_type == 'vm' and not is_template:
_virtual_machines.append(virtual_machine)
elif vm_type == 'template' and is_template:
_virtual_machines.append(virtual_machine)
elif vm_type == 'all':
_virtual_machines.append(virtual_machine)
return _virtual_machines
def main():
argument_spec = vmware_argument_spec()
argument_spec.update(
vm_type=dict(type='str', choices=['vm', 'all', 'template'], default='all'),
show_attribute=dict(type='bool', default='no'),
show_tag=dict(type='bool', default=False),
folder=dict(type='str'),
)
module = AnsibleModule(
argument_spec=argument_spec,
supports_check_mode=True
)
if module._name == 'vmware_vm_facts':
module.deprecate("The 'vmware_vm_facts' module has been renamed to 'vmware_vm_info'", version='2.13')
vmware_vm_info = VmwareVmInfo(module)
_virtual_machines = vmware_vm_info.get_all_virtual_machines()
module.exit_json(changed=False, virtual_machines=_virtual_machines)
if __name__ == '__main__':
main()
| gpl-3.0 |
bonitadecker77/python-for-android | python3-alpha/extra_modules/oauth2/__init__.py | 46 | 29186 | """
The MIT License
Copyright (c) 2007-2010 Leah Culver, Joe Stump, Mark Paschal, Vic Fryzel
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
import base64
import urllib.request, urllib.parse, urllib.error
import time
import random
import urllib.parse
import hmac
import binascii
import httplib2
try:
from urllib.parse import parse_qs
parse_qs # placate pyflakes
except ImportError:
# fall back for Python 2.5
from cgi import parse_qs
try:
from hashlib import sha1
sha = sha1
except ImportError:
# hashlib was added in Python 2.5
import sha
from . import _version
__version__ = _version.__version__
OAUTH_VERSION = '1.0' # Hi Blaine!
HTTP_METHOD = 'GET'
SIGNATURE_METHOD = 'PLAINTEXT'
class Error(RuntimeError):
"""Generic exception class."""
def __init__(self, message='OAuth error occurred.'):
self._message = message
@property
def message(self):
"""A hack to get around the deprecation errors in 2.6."""
return self._message
def __str__(self):
return self._message
class MissingSignature(Error):
pass
def build_authenticate_header(realm=''):
"""Optional WWW-Authenticate header (401 error)"""
return {'WWW-Authenticate': 'OAuth realm="%s"' % realm}
def build_xoauth_string(url, consumer, token=None):
"""Build an XOAUTH string for use in SMTP/IMPA authentication."""
request = Request.from_consumer_and_token(consumer, token,
"GET", url)
signing_method = SignatureMethod_HMAC_SHA1()
request.sign_request(signing_method, consumer, token)
params = []
for k, v in sorted(request.items()):
if v is not None:
params.append('%s="%s"' % (k, escape(v)))
return "%s %s %s" % ("GET", url, ','.join(params))
def to_unicode(s):
""" Convert to unicode, raise exception with instructive error
message if s is not unicode, ascii, or utf-8. """
if not isinstance(s, str):
if not isinstance(s, str):
raise TypeError('You are required to pass either unicode or string here, not: %r (%s)' % (type(s), s))
try:
s = s.decode('utf-8')
except UnicodeDecodeError as le:
raise TypeError('You are required to pass either a unicode object or a utf-8 string here. You passed a Python string object which contained non-utf-8: %r. The UnicodeDecodeError that resulted from attempting to interpret it as utf-8 was: %s' % (s, le,))
return s
def to_utf8(s):
return to_unicode(s).encode('utf-8')
def to_unicode_if_string(s):
if isinstance(s, str):
return to_unicode(s)
else:
return s
def to_utf8_if_string(s):
if isinstance(s, str):
return to_utf8(s)
else:
return s
def to_unicode_optional_iterator(x):
"""
Raise TypeError if x is a str containing non-utf8 bytes or if x is
an iterable which contains such a str.
"""
if isinstance(x, str):
return to_unicode(x)
try:
l = list(x)
except TypeError as e:
assert 'is not iterable' in str(e)
return x
else:
return [ to_unicode(e) for e in l ]
def to_utf8_optional_iterator(x):
"""
Raise TypeError if x is a str or if x is an iterable which
contains a str.
"""
if isinstance(x, str):
return to_utf8(x)
try:
l = list(x)
except TypeError as e:
assert 'is not iterable' in str(e)
return x
else:
return [ to_utf8_if_string(e) for e in l ]
def escape(s):
"""Escape a URL including any /."""
return urllib.parse.quote(s.encode('utf-8'), safe='~')
def generate_timestamp():
"""Get seconds since epoch (UTC)."""
return int(time.time())
def generate_nonce(length=8):
"""Generate pseudorandom number."""
return ''.join([str(random.randint(0, 9)) for i in range(length)])
def generate_verifier(length=8):
"""Generate pseudorandom number."""
return ''.join([str(random.randint(0, 9)) for i in range(length)])
class Consumer(object):
"""A consumer of OAuth-protected services.
The OAuth consumer is a "third-party" service that wants to access
protected resources from an OAuth service provider on behalf of an end
user. It's kind of the OAuth client.
Usually a consumer must be registered with the service provider by the
developer of the consumer software. As part of that process, the service
provider gives the consumer a *key* and a *secret* with which the consumer
software can identify itself to the service. The consumer will include its
key in each request to identify itself, but will use its secret only when
signing requests, to prove that the request is from that particular
registered consumer.
Once registered, the consumer can then use its consumer credentials to ask
the service provider for a request token, kicking off the OAuth
authorization process.
"""
key = None
secret = None
def __init__(self, key, secret):
self.key = key
self.secret = secret
if self.key is None or self.secret is None:
raise ValueError("Key and secret must be set.")
def __str__(self):
data = {'oauth_consumer_key': self.key,
'oauth_consumer_secret': self.secret}
return urllib.parse.urlencode(data)
class Token(object):
"""An OAuth credential used to request authorization or a protected
resource.
Tokens in OAuth comprise a *key* and a *secret*. The key is included in
requests to identify the token being used, but the secret is used only in
the signature, to prove that the requester is who the server gave the
token to.
When first negotiating the authorization, the consumer asks for a *request
token* that the live user authorizes with the service provider. The
consumer then exchanges the request token for an *access token* that can
be used to access protected resources.
"""
key = None
secret = None
callback = None
callback_confirmed = None
verifier = None
def __init__(self, key, secret):
self.key = key
self.secret = secret
if self.key is None or self.secret is None:
raise ValueError("Key and secret must be set.")
def set_callback(self, callback):
self.callback = callback
self.callback_confirmed = 'true'
def set_verifier(self, verifier=None):
if verifier is not None:
self.verifier = verifier
else:
self.verifier = generate_verifier()
def get_callback_url(self):
if self.callback and self.verifier:
# Append the oauth_verifier.
parts = urllib.parse.urlparse(self.callback)
scheme, netloc, path, params, query, fragment = parts[:6]
if query:
query = '%s&oauth_verifier=%s' % (query, self.verifier)
else:
query = 'oauth_verifier=%s' % self.verifier
return urllib.parse.urlunparse((scheme, netloc, path, params,
query, fragment))
return self.callback
def to_string(self):
"""Returns this token as a plain string, suitable for storage.
The resulting string includes the token's secret, so you should never
send or store this string where a third party can read it.
"""
data = {
'oauth_token': self.key,
'oauth_token_secret': self.secret,
}
if self.callback_confirmed is not None:
data['oauth_callback_confirmed'] = self.callback_confirmed
return urllib.parse.urlencode(data)
@staticmethod
def from_string(s):
"""Deserializes a token from a string like one returned by
`to_string()`."""
if not len(s):
raise ValueError("Invalid parameter string.")
params = parse_qs(s, keep_blank_values=False)
if not len(params):
raise ValueError("Invalid parameter string.")
try:
key = params['oauth_token'][0]
except Exception:
raise ValueError("'oauth_token' not found in OAuth request.")
try:
secret = params['oauth_token_secret'][0]
except Exception:
raise ValueError("'oauth_token_secret' not found in "
"OAuth request.")
token = Token(key, secret)
try:
token.callback_confirmed = params['oauth_callback_confirmed'][0]
except KeyError:
pass # 1.0, no callback confirmed.
return token
def __str__(self):
return self.to_string()
def setter(attr):
name = attr.__name__
def getter(self):
try:
return self.__dict__[name]
except KeyError:
raise AttributeError(name)
def deleter(self):
del self.__dict__[name]
return property(getter, attr, deleter)
class Request(dict):
"""The parameters and information for an HTTP request, suitable for
authorizing with OAuth credentials.
When a consumer wants to access a service's protected resources, it does
so using a signed HTTP request identifying itself (the consumer) with its
key, and providing an access token authorized by the end user to access
those resources.
"""
version = OAUTH_VERSION
def __init__(self, method=HTTP_METHOD, url=None, parameters=None,
body='', is_form_encoded=False):
if url is not None:
self.url = to_unicode(url)
self.method = method
if parameters is not None:
for k, v in list(parameters.items()):
k = to_unicode(k)
v = to_unicode_optional_iterator(v)
self[k] = v
self.body = body
self.is_form_encoded = is_form_encoded
@setter
def url(self, value):
self.__dict__['url'] = value
if value is not None:
scheme, netloc, path, params, query, fragment = urllib.parse.urlparse(value)
# Exclude default port numbers.
if scheme == 'http' and netloc[-3:] == ':80':
netloc = netloc[:-3]
elif scheme == 'https' and netloc[-4:] == ':443':
netloc = netloc[:-4]
if scheme not in ('http', 'https'):
raise ValueError("Unsupported URL %s (%s)." % (value, scheme))
# Normalized URL excludes params, query, and fragment.
self.normalized_url = urllib.parse.urlunparse((scheme, netloc, path, None, None, None))
else:
self.normalized_url = None
self.__dict__['url'] = None
@setter
def method(self, value):
self.__dict__['method'] = value.upper()
def _get_timestamp_nonce(self):
return self['oauth_timestamp'], self['oauth_nonce']
def get_nonoauth_parameters(self):
"""Get any non-OAuth parameters."""
return dict([(k, v) for k, v in list(self.items())
if not k.startswith('oauth_')])
def to_header(self, realm=''):
"""Serialize as a header for an HTTPAuth request."""
oauth_params = ((k, v) for k, v in list(self.items())
if k.startswith('oauth_'))
stringy_params = ((k, escape(str(v))) for k, v in oauth_params)
header_params = ('%s="%s"' % (k, v) for k, v in stringy_params)
params_header = ', '.join(header_params)
auth_header = 'OAuth realm="%s"' % realm
if params_header:
auth_header = "%s, %s" % (auth_header, params_header)
return {'Authorization': auth_header}
def to_postdata(self):
"""Serialize as post data for a POST request."""
d = {}
for k, v in list(self.items()):
d[k.encode('utf-8')] = to_utf8_optional_iterator(v)
# tell urlencode to deal with sequence values and map them correctly
# to resulting querystring. for example self["k"] = ["v1", "v2"] will
# result in 'k=v1&k=v2' and not k=%5B%27v1%27%2C+%27v2%27%5D
return urllib.parse.urlencode(d, True).replace('+', '%20')
def to_url(self):
"""Serialize as a URL for a GET request."""
base_url = urllib.parse.urlparse(self.url)
try:
query = base_url.query
except AttributeError:
# must be python <2.5
query = base_url[4]
query = parse_qs(query)
for k, v in list(self.items()):
query.setdefault(k, []).append(v)
try:
scheme = base_url.scheme
netloc = base_url.netloc
path = base_url.path
params = base_url.params
fragment = base_url.fragment
except AttributeError:
# must be python <2.5
scheme = base_url[0]
netloc = base_url[1]
path = base_url[2]
params = base_url[3]
fragment = base_url[5]
url = (scheme, netloc, path, params,
urllib.parse.urlencode(query, True), fragment)
return urllib.parse.urlunparse(url)
def get_parameter(self, parameter):
ret = self.get(parameter)
if ret is None:
raise Error('Parameter not found: %s' % parameter)
return ret
def get_normalized_parameters(self):
"""Return a string that contains the parameters that must be signed."""
items = []
for key, value in list(self.items()):
if key == 'oauth_signature':
continue
# 1.0a/9.1.1 states that kvp must be sorted by key, then by value,
# so we unpack sequence values into multiple items for sorting.
if isinstance(value, str):
items.append((to_utf8_if_string(key), to_utf8(value)))
else:
try:
value = list(value)
except TypeError as e:
assert 'is not iterable' in str(e)
items.append((to_utf8_if_string(key), to_utf8_if_string(value)))
else:
items.extend((to_utf8_if_string(key), to_utf8_if_string(item)) for item in value)
# Include any query string parameters from the provided URL
query = urllib.parse.urlparse(self.url)[4]
url_items = list(self._split_url_string(query).items())
url_items = [(to_utf8(k), to_utf8(v)) for k, v in url_items if k != 'oauth_signature' ]
items.extend(url_items)
items.sort()
encoded_str = urllib.parse.urlencode(items)
# Encode signature parameters per Oauth Core 1.0 protocol
# spec draft 7, section 3.6
# (http://tools.ietf.org/html/draft-hammer-oauth-07#section-3.6)
# Spaces must be encoded with "%20" instead of "+"
return encoded_str.replace('+', '%20').replace('%7E', '~')
def sign_request(self, signature_method, consumer, token):
"""Set the signature parameter to the result of sign."""
if not self.is_form_encoded:
# according to
# http://oauth.googlecode.com/svn/spec/ext/body_hash/1.0/oauth-bodyhash.html
# section 4.1.1 "OAuth Consumers MUST NOT include an
# oauth_body_hash parameter on requests with form-encoded
# request bodies."
self['oauth_body_hash'] = base64.b64encode(sha(self.body).digest())
if 'oauth_consumer_key' not in self:
self['oauth_consumer_key'] = consumer.key
if token and 'oauth_token' not in self:
self['oauth_token'] = token.key
self['oauth_signature_method'] = signature_method.name
self['oauth_signature'] = signature_method.sign(self, consumer, token)
@classmethod
def make_timestamp(cls):
"""Get seconds since epoch (UTC)."""
return str(int(time.time()))
@classmethod
def make_nonce(cls):
"""Generate pseudorandom number."""
return str(random.randint(0, 100000000))
@classmethod
def from_request(cls, http_method, http_url, headers=None, parameters=None,
query_string=None):
"""Combines multiple parameter sources."""
if parameters is None:
parameters = {}
# Headers
if headers and 'Authorization' in headers:
auth_header = headers['Authorization']
# Check that the authorization header is OAuth.
if auth_header[:6] == 'OAuth ':
auth_header = auth_header[6:]
try:
# Get the parameters from the header.
header_params = cls._split_header(auth_header)
parameters.update(header_params)
except:
raise Error('Unable to parse OAuth parameters from '
'Authorization header.')
# GET or POST query string.
if query_string:
query_params = cls._split_url_string(query_string)
parameters.update(query_params)
# URL parameters.
param_str = urllib.parse.urlparse(http_url)[4] # query
url_params = cls._split_url_string(param_str)
parameters.update(url_params)
if parameters:
return cls(http_method, http_url, parameters)
return None
@classmethod
def from_consumer_and_token(cls, consumer, token=None,
http_method=HTTP_METHOD, http_url=None, parameters=None,
body='', is_form_encoded=False):
if not parameters:
parameters = {}
defaults = {
'oauth_consumer_key': consumer.key,
'oauth_timestamp': cls.make_timestamp(),
'oauth_nonce': cls.make_nonce(),
'oauth_version': cls.version,
}
defaults.update(parameters)
parameters = defaults
if token:
parameters['oauth_token'] = token.key
if token.verifier:
parameters['oauth_verifier'] = token.verifier
return Request(http_method, http_url, parameters, body=body,
is_form_encoded=is_form_encoded)
@classmethod
def from_token_and_callback(cls, token, callback=None,
http_method=HTTP_METHOD, http_url=None, parameters=None):
if not parameters:
parameters = {}
parameters['oauth_token'] = token.key
if callback:
parameters['oauth_callback'] = callback
return cls(http_method, http_url, parameters)
@staticmethod
def _split_header(header):
"""Turn Authorization: header into parameters."""
params = {}
parts = header.split(',')
for param in parts:
# Ignore realm parameter.
if param.find('realm') > -1:
continue
# Remove whitespace.
param = param.strip()
# Split key-value.
param_parts = param.split('=', 1)
# Remove quotes and unescape the value.
params[param_parts[0]] = urllib.parse.unquote(param_parts[1].strip('\"'))
return params
@staticmethod
def _split_url_string(param_str):
"""Turn URL string into parameters."""
parameters = parse_qs(param_str.encode('utf-8'), keep_blank_values=True)
for k, v in list(parameters.items()):
parameters[k] = urllib.parse.unquote(v[0])
return parameters
class Client(httplib2.Http):
"""OAuthClient is a worker to attempt to execute a request."""
def __init__(self, consumer, token=None, cache=None, timeout=None,
proxy_info=None):
if consumer is not None and not isinstance(consumer, Consumer):
raise ValueError("Invalid consumer.")
if token is not None and not isinstance(token, Token):
raise ValueError("Invalid token.")
self.consumer = consumer
self.token = token
self.method = SignatureMethod_HMAC_SHA1()
httplib2.Http.__init__(self, cache=cache, timeout=timeout, proxy_info=proxy_info)
def set_signature_method(self, method):
if not isinstance(method, SignatureMethod):
raise ValueError("Invalid signature method.")
self.method = method
def request(self, uri, method="GET", body='', headers=None,
redirections=httplib2.DEFAULT_MAX_REDIRECTS, connection_type=None):
DEFAULT_POST_CONTENT_TYPE = 'application/x-www-form-urlencoded'
if not isinstance(headers, dict):
headers = {}
if method == "POST":
headers['Content-Type'] = headers.get('Content-Type',
DEFAULT_POST_CONTENT_TYPE)
is_form_encoded = \
headers.get('Content-Type') == 'application/x-www-form-urlencoded'
if is_form_encoded and body:
parameters = parse_qs(body)
else:
parameters = None
req = Request.from_consumer_and_token(self.consumer,
token=self.token, http_method=method, http_url=uri,
parameters=parameters, body=body, is_form_encoded=is_form_encoded)
req.sign_request(self.method, self.consumer, self.token)
schema, rest = urllib.parse.splittype(uri)
if rest.startswith('//'):
hierpart = '//'
else:
hierpart = ''
host, rest = urllib.parse.splithost(rest)
realm = schema + ':' + hierpart + host
if is_form_encoded:
body = req.to_postdata()
elif method == "GET":
uri = req.to_url()
else:
headers.update(req.to_header(realm=realm))
return httplib2.Http.request(self, uri, method=method, body=body,
headers=headers, redirections=redirections,
connection_type=connection_type)
class Server(object):
"""A skeletal implementation of a service provider, providing protected
resources to requests from authorized consumers.
This class implements the logic to check requests for authorization. You
can use it with your web server or web framework to protect certain
resources with OAuth.
"""
timestamp_threshold = 300 # In seconds, five minutes.
version = OAUTH_VERSION
signature_methods = None
def __init__(self, signature_methods=None):
self.signature_methods = signature_methods or {}
def add_signature_method(self, signature_method):
self.signature_methods[signature_method.name] = signature_method
return self.signature_methods
def verify_request(self, request, consumer, token):
"""Verifies an api call and checks all the parameters."""
self._check_version(request)
self._check_signature(request, consumer, token)
parameters = request.get_nonoauth_parameters()
return parameters
def build_authenticate_header(self, realm=''):
"""Optional support for the authenticate header."""
return {'WWW-Authenticate': 'OAuth realm="%s"' % realm}
def _check_version(self, request):
"""Verify the correct version of the request for this server."""
version = self._get_version(request)
if version and version != self.version:
raise Error('OAuth version %s not supported.' % str(version))
def _get_version(self, request):
"""Return the version of the request for this server."""
try:
version = request.get_parameter('oauth_version')
except:
version = OAUTH_VERSION
return version
def _get_signature_method(self, request):
"""Figure out the signature with some defaults."""
try:
signature_method = request.get_parameter('oauth_signature_method')
except:
signature_method = SIGNATURE_METHOD
try:
# Get the signature method object.
signature_method = self.signature_methods[signature_method]
except:
signature_method_names = ', '.join(list(self.signature_methods.keys()))
raise Error('Signature method %s not supported try one of the following: %s' % (signature_method, signature_method_names))
return signature_method
def _get_verifier(self, request):
return request.get_parameter('oauth_verifier')
def _check_signature(self, request, consumer, token):
timestamp, nonce = request._get_timestamp_nonce()
self._check_timestamp(timestamp)
signature_method = self._get_signature_method(request)
try:
signature = request.get_parameter('oauth_signature')
except:
raise MissingSignature('Missing oauth_signature.')
# Validate the signature.
valid = signature_method.check(request, consumer, token, signature)
if not valid:
key, base = signature_method.signing_base(request, consumer, token)
raise Error('Invalid signature. Expected signature base '
'string: %s' % base)
def _check_timestamp(self, timestamp):
"""Verify that timestamp is recentish."""
timestamp = int(timestamp)
now = int(time.time())
lapsed = now - timestamp
if lapsed > self.timestamp_threshold:
raise Error('Expired timestamp: given %d and now %s has a '
'greater difference than threshold %d' % (timestamp, now,
self.timestamp_threshold))
class SignatureMethod(object):
"""A way of signing requests.
The OAuth protocol lets consumers and service providers pick a way to sign
requests. This interface shows the methods expected by the other `oauth`
modules for signing requests. Subclass it and implement its methods to
provide a new way to sign requests.
"""
def signing_base(self, request, consumer, token):
"""Calculates the string that needs to be signed.
This method returns a 2-tuple containing the starting key for the
signing and the message to be signed. The latter may be used in error
messages to help clients debug their software.
"""
raise NotImplementedError
def sign(self, request, consumer, token):
"""Returns the signature for the given request, based on the consumer
and token also provided.
You should use your implementation of `signing_base()` to build the
message to sign. Otherwise it may be less useful for debugging.
"""
raise NotImplementedError
def check(self, request, consumer, token, signature):
"""Returns whether the given signature is the correct signature for
the given consumer and token signing the given request."""
built = self.sign(request, consumer, token)
return built == signature
class SignatureMethod_HMAC_SHA1(SignatureMethod):
name = 'HMAC-SHA1'
def signing_base(self, request, consumer, token):
if not hasattr(request, 'normalized_url') or request.normalized_url is None:
raise ValueError("Base URL for request is not set.")
sig = (
escape(request.method),
escape(request.normalized_url),
escape(request.get_normalized_parameters()),
)
key = '%s&' % escape(consumer.secret)
if token:
key += escape(token.secret)
raw = '&'.join(sig)
return key, raw
def sign(self, request, consumer, token):
"""Builds the base signature string."""
key, raw = self.signing_base(request, consumer, token)
hashed = hmac.new(key, raw, sha)
# Calculate the digest base 64.
return binascii.b2a_base64(hashed.digest())[:-1]
class SignatureMethod_PLAINTEXT(SignatureMethod):
name = 'PLAINTEXT'
def signing_base(self, request, consumer, token):
"""Concatenates the consumer key and secret with the token's
secret."""
sig = '%s&' % escape(consumer.secret)
if token:
sig = sig + escape(token.secret)
return sig, sig
def sign(self, request, consumer, token):
key, raw = self.signing_base(request, consumer, token)
return raw
| apache-2.0 |
inonit/wagtail | wagtail/bin/wagtail.py | 5 | 2191 | #!/usr/bin/env python
from __future__ import print_function, absolute_import
import os
from optparse import OptionParser
from django.core.management import ManagementUtility
def create_project(parser, options, args):
# Validate args
if len(args) < 2:
parser.error("Please specify a name for your wagtail installation")
elif len(args) > 3:
parser.error("Too many arguments")
project_name = args[1]
try:
dest_dir = args[2]
except IndexError:
dest_dir = None
# Make sure given name is not already in use by another python package/module.
try:
__import__(project_name)
except ImportError:
pass
else:
parser.error("'%s' conflicts with the name of an existing "
"Python module and cannot be used as a project "
"name. Please try another name." % project_name)
print("Creating a wagtail project called %(project_name)s" % {'project_name': project_name})
# Create the project from the wagtail template using startapp
# First find the path to wagtail
import wagtail
wagtail_path = os.path.dirname(wagtail.__file__)
template_path = os.path.join(wagtail_path, 'project_template')
# Call django-admin startproject
utility_args = ['django-admin.py',
'startproject',
'--template=' + template_path,
'--ext=html,rst',
project_name]
if dest_dir:
utility_args.append(dest_dir)
utility = ManagementUtility(utility_args)
utility.execute()
print("Success! %(project_name)s is created" % {'project_name': project_name})
COMMANDS = {
'start': create_project,
}
def main():
# Parse options
parser = OptionParser(usage="Usage: %prog start project_name [directory]")
(options, args) = parser.parse_args()
# Find command
try:
command = args[0]
except IndexError:
parser.print_help()
return
if command in COMMANDS:
COMMANDS[command](parser, options, args)
else:
parser.error("Unrecognised command: " + command)
if __name__ == "__main__":
main()
| bsd-3-clause |
mtlchun/edx | openedx/core/djangoapps/content/course_structures/migrations/0001_initial.py | 102 | 1759 | # -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'CourseStructure'
db.create_table('course_structures_coursestructure', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('created', self.gf('model_utils.fields.AutoCreatedField')(default=datetime.datetime.now)),
('modified', self.gf('model_utils.fields.AutoLastModifiedField')(default=datetime.datetime.now)),
('course_id', self.gf('xmodule_django.models.CourseKeyField')(unique=True, max_length=255, db_index=True)),
('structure_json', self.gf('django.db.models.fields.TextField')(null=True, blank=True)),
))
db.send_create_signal('course_structures', ['CourseStructure'])
def backwards(self, orm):
# Deleting model 'CourseStructure'
db.delete_table('course_structures_coursestructure')
models = {
'course_structures.coursestructure': {
'Meta': {'object_name': 'CourseStructure'},
'course_id': ('xmodule_django.models.CourseKeyField', [], {'unique': 'True', 'max_length': '255', 'db_index': 'True'}),
'created': ('model_utils.fields.AutoCreatedField', [], {'default': 'datetime.datetime.now'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('model_utils.fields.AutoLastModifiedField', [], {'default': 'datetime.datetime.now'}),
'structure_json': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'})
}
}
complete_apps = ['course_structures'] | agpl-3.0 |
tleonardi/bedparse | docs/conf.py | 1 | 5346 | from recommonmark.parser import CommonMarkParser
# -*- coding: utf-8 -*-
#
# Configuration file for the Sphinx documentation builder.
#
# This file does only contain a selection of the most common options. For a
# full list see the documentation:
# http://www.sphinx-doc.org/en/master/config
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
import sys
sys.path.insert(0,os.path.abspath('..'))
print(sys.path)
# -- Project information -----------------------------------------------------
project = 'bedparse'
copyright = '2019, Tommaso Leonardi'
author = 'Tommaso Leonardi'
# The short X.Y version
version = ''
# The full version, including alpha/beta/rc tags
release = 'v0.2.0'
# -- General configuration ---------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = ['sphinx.ext.napoleon']
autoclass_content = 'both'
# Add any paths that contain templates here, relative to this directory.
templates_path = ['.templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
source_suffix = ['.rst', '.md']
#source_suffix = '.rst'
source_parsers = {
'.md': CommonMarkParser,
}
# The master toctree document.
master_doc = 'index'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = ['.build', 'Thumbs.db', '.DS_Store']
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = None
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
#html_theme = 'alabaster'
html_theme = "sphinx_rtd_theme"
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['.static']
# Custom sidebar templates, must be a dictionary that maps document names
# to template names.
#
# The default sidebars (for documents that don't match any pattern) are
# defined by theme itself. Builtin themes are using these templates by
# default: ``['localtoc.html', 'relations.html', 'sourcelink.html',
# 'searchbox.html']``.
#
# html_sidebars = {}
# -- Options for HTMLHelp output ---------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'bedparsedoc'
# -- Options for LaTeX output ------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'bedparse.tex', 'bedparse Documentation',
'Tommaso Leonardi', 'manual'),
]
# -- Options for manual page output ------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'bedparse', 'bedparse Documentation',
[author], 1)
]
# -- Options for Texinfo output ----------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'bedparse', 'bedparse Documentation',
author, 'bedparse', 'One line description of project.',
'Miscellaneous'),
]
# -- Options for Epub output -------------------------------------------------
# Bibliographic Dublin Core info.
epub_title = project
# The unique identifier of the text. This can be a ISBN number
# or the project homepage.
#
# epub_identifier = ''
# A unique identification for the text.
#
# epub_uid = ''
# A list of files that should not be packed into the epub file.
epub_exclude_files = ['search.html']
| mit |
jia200x/RIOT | examples/suit_update/tests/01-run.py | 6 | 6163 | #!/usr/bin/env python3
# Copyright (C) 2019 Inria
#
# This file is subject to the terms and conditions of the GNU Lesser
# General Public License v2.1. See the file LICENSE in the top level
# directory for more details.
import os
import subprocess
import sys
import tempfile
import time
from testrunner import run
# Default test over loopback interface
COAP_HOST = "[fd00:dead:beef::1]"
UPDATING_TIMEOUT = 10
MANIFEST_TIMEOUT = 15
USE_ETHOS = int(os.getenv("USE_ETHOS", "1"))
TAP = os.getenv("TAP", "riot0")
TMPDIR = tempfile.TemporaryDirectory()
def start_aiocoap_fileserver():
aiocoap_process = subprocess.Popen(
"exec aiocoap-fileserver %s" % TMPDIR.name, shell=True
)
return aiocoap_process
def cleanup(aiocoap_process):
aiocoap_process.kill()
TMPDIR.cleanup()
def notify(coap_server, client_url, version=None):
cmd = [
"make",
"suit/notify",
"SUIT_COAP_SERVER={}".format(coap_server),
"SUIT_CLIENT={}".format(client_url),
]
if version is not None:
cmd.append("SUIT_NOTIFY_VERSION={}".format(version))
assert not subprocess.call(cmd)
def publish(server_dir, server_url, app_ver, keys='default', latest_name=None):
cmd = [
"make",
"suit/publish",
"SUIT_COAP_FSROOT={}".format(server_dir),
"SUIT_COAP_SERVER={}".format(server_url),
"APP_VER={}".format(app_ver),
"RIOTBOOT_SKIP_COMPILE=1",
"SUIT_KEY={}".format(keys),
]
if latest_name is not None:
cmd.append("SUIT_MANIFEST_SIGNED_LATEST={}".format(latest_name))
assert not subprocess.call(cmd)
def wait_for_update(child):
return child.expect([r"Fetching firmware \|[█ ]+\|\s+\d+\%",
"riotboot_flashwrite: riotboot flashing "
"completed successfully"],
timeout=UPDATING_TIMEOUT)
def get_ipv6_addr(child):
child.expect_exact('>')
child.sendline('ifconfig')
if USE_ETHOS == 0:
# Get device global address
child.expect(
r"inet6 addr: (?P<gladdr>[0-9a-fA-F:]+:[A-Fa-f:0-9]+)"
" scope: global VAL"
)
addr = child.match.group("gladdr").lower()
else:
# Get device local address
child.expect_exact("Link type: wired")
child.expect(
r"inet6 addr: (?P<lladdr>[0-9a-fA-F:]+:[A-Fa-f:0-9]+)"
" scope: link VAL"
)
addr = "{}%{}".format(child.match.group("lladdr").lower(), TAP)
return addr
def ping6(client):
print("pinging node...")
ping_ok = False
for _i in range(10):
try:
subprocess.check_call(["ping", "-q", "-c1", "-w1", client])
ping_ok = True
break
except subprocess.CalledProcessError:
pass
if not ping_ok:
print("pinging node failed. aborting test.")
sys.exit(1)
else:
print("pinging node succeeded.")
return ping_ok
def get_reachable_addr(child):
# Wait for suit_coap thread to start
child.expect_exact("suit_coap: started.")
child.expect_exact("Starting the shell")
# give some time for the network interface to be configured
time.sleep(1)
# Get address
client_addr = get_ipv6_addr(child)
# Verify address is reachable
ping6(client_addr)
return "[{}]".format(client_addr)
def app_version(child):
# get version of currently running image
# "Image Version: 0x00000000"
child.expect(r"Image Version: (?P<app_ver>0x[0-9a-fA-F:]+)\r\n")
app_ver = int(child.match.group("app_ver"), 16)
return app_ver
def _test_invalid_version(child, client, app_ver):
publish(TMPDIR.name, COAP_HOST, app_ver - 1)
notify(COAP_HOST, client, app_ver - 1)
child.expect_exact("suit_coap: trigger received")
child.expect_exact("suit: verifying manifest signature")
child.expect_exact("seq_nr <= running image")
def _test_invalid_signature(child, client, app_ver):
publish(TMPDIR.name, COAP_HOST, app_ver + 1, 'invalid_keys')
notify(COAP_HOST, client, app_ver + 1)
child.expect_exact("suit_coap: trigger received")
child.expect_exact("suit: verifying manifest signature")
child.expect_exact("Unable to validate signature")
def _test_successful_update(child, client, app_ver):
for version in [app_ver + 1, app_ver + 2]:
# Trigger update process, verify it validates manifest correctly
publish(TMPDIR.name, COAP_HOST, version)
notify(COAP_HOST, client, version)
child.expect_exact("suit_coap: trigger received")
child.expect_exact("suit: verifying manifest signature")
child.expect(
r"riotboot_flashwrite: initializing update to target slot (\d+)\r\n",
timeout=MANIFEST_TIMEOUT,
)
target_slot = int(child.match.group(1))
# Wait for update to complete
while wait_for_update(child) == 0:
pass
# Verify running slot
child.expect(r"running from slot (\d+)\r\n")
assert target_slot == int(child.match.group(1)), "BOOTED FROM SAME SLOT"
# Verify client is reachable and get address
client = get_reachable_addr(child)
def testfunc(child):
# Get current app_ver
current_app_ver = app_version(child)
# Verify client is reachable and get address
client = get_reachable_addr(child)
def run(func):
if child.logfile == sys.stdout:
func(child, client, current_app_ver)
else:
try:
func(child, client, current_app_ver)
print(".", end="", flush=True)
except Exception as e:
print("FAILED")
raise e
run(_test_invalid_signature)
run(_test_invalid_version)
run(_test_successful_update)
print("TEST PASSED")
if __name__ == "__main__":
try:
res = 1
aiocoap_process = start_aiocoap_fileserver()
# TODO: wait for coap port to be available
res = run(testfunc, echo=True)
except Exception as e:
print(e)
finally:
cleanup(aiocoap_process)
sys.exit(res)
| lgpl-2.1 |
Milad1993/linux | tools/perf/scripts/python/check-perf-trace.py | 1997 | 2539 | # perf script event handlers, generated by perf script -g python
# (c) 2010, Tom Zanussi <tzanussi@gmail.com>
# Licensed under the terms of the GNU GPL License version 2
#
# This script tests basic functionality such as flag and symbol
# strings, common_xxx() calls back into perf, begin, end, unhandled
# events, etc. Basically, if this script runs successfully and
# displays expected results, Python scripting support should be ok.
import os
import sys
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from Core import *
from perf_trace_context import *
unhandled = autodict()
def trace_begin():
print "trace_begin"
pass
def trace_end():
print_unhandled()
def irq__softirq_entry(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
common_callchain, vec):
print_header(event_name, common_cpu, common_secs, common_nsecs,
common_pid, common_comm)
print_uncommon(context)
print "vec=%s\n" % \
(symbol_str("irq__softirq_entry", "vec", vec)),
def kmem__kmalloc(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
common_callchain, call_site, ptr, bytes_req, bytes_alloc,
gfp_flags):
print_header(event_name, common_cpu, common_secs, common_nsecs,
common_pid, common_comm)
print_uncommon(context)
print "call_site=%u, ptr=%u, bytes_req=%u, " \
"bytes_alloc=%u, gfp_flags=%s\n" % \
(call_site, ptr, bytes_req, bytes_alloc,
flag_str("kmem__kmalloc", "gfp_flags", gfp_flags)),
def trace_unhandled(event_name, context, event_fields_dict):
try:
unhandled[event_name] += 1
except TypeError:
unhandled[event_name] = 1
def print_header(event_name, cpu, secs, nsecs, pid, comm):
print "%-20s %5u %05u.%09u %8u %-20s " % \
(event_name, cpu, secs, nsecs, pid, comm),
# print trace fields not included in handler args
def print_uncommon(context):
print "common_preempt_count=%d, common_flags=%s, common_lock_depth=%d, " \
% (common_pc(context), trace_flag_str(common_flags(context)), \
common_lock_depth(context))
def print_unhandled():
keys = unhandled.keys()
if not keys:
return
print "\nunhandled events:\n\n",
print "%-40s %10s\n" % ("event", "count"),
print "%-40s %10s\n" % ("----------------------------------------", \
"-----------"),
for event_name in keys:
print "%-40s %10d\n" % (event_name, unhandled[event_name])
| gpl-2.0 |
inclement/vispy | examples/basics/scene/contour.py | 14 | 2330 | # -*- coding: utf-8 -*-
# vispy: gallery 30
# -----------------------------------------------------------------------------
# Copyright (c) 2015, Vispy Development Team. All Rights Reserved.
# Distributed under the (new) BSD License. See LICENSE.txt for more info.
# -----------------------------------------------------------------------------
"""
Simple use of SceneCanvas to display an Image.
"""
import sys
from vispy import scene, app
from vispy.visuals.filters import IsolineFilter
from vispy.io import load_data_file, read_png
canvas = scene.SceneCanvas(keys='interactive')
canvas.size = 600, 800
canvas.show()
# Set up a viewbox to display the image with interactive pan/zoom
view = canvas.central_widget.add_view()
interpolation = 'bicubic'
img_data = read_png(load_data_file('mona_lisa/mona_lisa_sm.png'))
image = scene.visuals.Image(img_data, interpolation=interpolation,
parent=view.scene, method='impostor')
level = 10
iso = IsolineFilter(level=level, width=1., color='white')
# Set 2D camera (the camera will scale to the contents in the scene)
view.camera = scene.PanZoomCamera(aspect=1)
# flip y-axis to have correct aligment
view.camera.flip = (0, 1, 0)
# select face part
view.camera.rect = (160, 130, 240, 200)
canvas.title = ('Spatial Filtering using %s Filter - Isoline %d level'
% (image.interpolation, iso.level))
# get interpolation functions from Image
names = image.interpolation_functions
act = names.index(interpolation)
# Implement key presses
@canvas.events.key_press.connect
def on_key_press(event):
global act, level, first, interpolation
if event.key in ['Left', 'Right']:
if event.key == 'Right':
step = 1
else:
step = -1
act = (act + step) % len(names)
image.interpolation = names[act]
if event.key in ['Up', 'Down']:
iso.level += 1 if event.key == 'Up' else -1
canvas.title = ('Spatial Filtering using %s Filter - Isoline %d level'
% (image.interpolation, iso.level))
canvas.update()
# attaching of isoline filter via timer
def on_timer1(event):
image.attach(iso)
canvas.update()
timer1 = app.Timer('auto', iterations=1, connect=on_timer1, start=True)
if __name__ == '__main__' and sys.flags.interactive == 0:
app.run()
| bsd-3-clause |
1013553207/django | django/utils/jslex.py | 335 | 7778 | """JsLex: a lexer for Javascript"""
# Originally from https://bitbucket.org/ned/jslex
from __future__ import unicode_literals
import re
class Tok(object):
"""
A specification for a token class.
"""
num = 0
def __init__(self, name, regex, next=None):
self.id = Tok.num
Tok.num += 1
self.name = name
self.regex = regex
self.next = next
def literals(choices, prefix="", suffix=""):
"""
Create a regex from a space-separated list of literal `choices`.
If provided, `prefix` and `suffix` will be attached to each choice
individually.
"""
return "|".join(prefix + re.escape(c) + suffix for c in choices.split())
class Lexer(object):
"""
A generic multi-state regex-based lexer.
"""
def __init__(self, states, first):
self.regexes = {}
self.toks = {}
for state, rules in states.items():
parts = []
for tok in rules:
groupid = "t%d" % tok.id
self.toks[groupid] = tok
parts.append("(?P<%s>%s)" % (groupid, tok.regex))
self.regexes[state] = re.compile("|".join(parts), re.MULTILINE | re.VERBOSE)
self.state = first
def lex(self, text):
"""
Lexically analyze `text`.
Yields pairs (`name`, `tokentext`).
"""
end = len(text)
state = self.state
regexes = self.regexes
toks = self.toks
start = 0
while start < end:
for match in regexes[state].finditer(text, start):
name = match.lastgroup
tok = toks[name]
toktext = match.group(name)
start += len(toktext)
yield (tok.name, toktext)
if tok.next:
state = tok.next
break
self.state = state
class JsLexer(Lexer):
"""
A Javascript lexer
>>> lexer = JsLexer()
>>> list(lexer.lex("a = 1"))
[('id', 'a'), ('ws', ' '), ('punct', '='), ('ws', ' '), ('dnum', '1')]
This doesn't properly handle non-ASCII characters in the Javascript source.
"""
# Because these tokens are matched as alternatives in a regex, longer
# possibilities must appear in the list before shorter ones, for example,
# '>>' before '>'.
#
# Note that we don't have to detect malformed Javascript, only properly
# lex correct Javascript, so much of this is simplified.
# Details of Javascript lexical structure are taken from
# http://www.ecma-international.org/publications/files/ECMA-ST/ECMA-262.pdf
# A useful explanation of automatic semicolon insertion is at
# http://inimino.org/~inimino/blog/javascript_semicolons
both_before = [
Tok("comment", r"/\*(.|\n)*?\*/"),
Tok("linecomment", r"//.*?$"),
Tok("ws", r"\s+"),
Tok("keyword", literals("""
break case catch class const continue debugger
default delete do else enum export extends
finally for function if import in instanceof
new return super switch this throw try typeof
var void while with
""", suffix=r"\b"), next='reg'),
Tok("reserved", literals("null true false", suffix=r"\b"), next='div'),
Tok("id", r"""
([a-zA-Z_$ ]|\\u[0-9a-fA-Z]{4}) # first char
([a-zA-Z_$0-9]|\\u[0-9a-fA-F]{4})* # rest chars
""", next='div'),
Tok("hnum", r"0[xX][0-9a-fA-F]+", next='div'),
Tok("onum", r"0[0-7]+"),
Tok("dnum", r"""
( (0|[1-9][0-9]*) # DecimalIntegerLiteral
\. # dot
[0-9]* # DecimalDigits-opt
([eE][-+]?[0-9]+)? # ExponentPart-opt
|
\. # dot
[0-9]+ # DecimalDigits
([eE][-+]?[0-9]+)? # ExponentPart-opt
|
(0|[1-9][0-9]*) # DecimalIntegerLiteral
([eE][-+]?[0-9]+)? # ExponentPart-opt
)
""", next='div'),
Tok("punct", literals("""
>>>= === !== >>> <<= >>= <= >= == != << >> &&
|| += -= *= %= &= |= ^=
"""), next="reg"),
Tok("punct", literals("++ -- ) ]"), next='div'),
Tok("punct", literals("{ } ( [ . ; , < > + - * % & | ^ ! ~ ? : ="), next='reg'),
Tok("string", r'"([^"\\]|(\\(.|\n)))*?"', next='div'),
Tok("string", r"'([^'\\]|(\\(.|\n)))*?'", next='div'),
]
both_after = [
Tok("other", r"."),
]
states = {
# slash will mean division
'div': both_before + [
Tok("punct", literals("/= /"), next='reg'),
] + both_after,
# slash will mean regex
'reg': both_before + [
Tok("regex",
r"""
/ # opening slash
# First character is..
( [^*\\/[] # anything but * \ / or [
| \\. # or an escape sequence
| \[ # or a class, which has
( [^\]\\] # anything but \ or ]
| \\. # or an escape sequence
)* # many times
\]
)
# Following characters are same, except for excluding a star
( [^\\/[] # anything but \ / or [
| \\. # or an escape sequence
| \[ # or a class, which has
( [^\]\\] # anything but \ or ]
| \\. # or an escape sequence
)* # many times
\]
)* # many times
/ # closing slash
[a-zA-Z0-9]* # trailing flags
""", next='div'),
] + both_after,
}
def __init__(self):
super(JsLexer, self).__init__(self.states, 'reg')
def prepare_js_for_gettext(js):
"""
Convert the Javascript source `js` into something resembling C for
xgettext.
What actually happens is that all the regex literals are replaced with
"REGEX".
"""
def escape_quotes(m):
"""Used in a regex to properly escape double quotes."""
s = m.group(0)
if s == '"':
return r'\"'
else:
return s
lexer = JsLexer()
c = []
for name, tok in lexer.lex(js):
if name == 'regex':
# C doesn't grok regexes, and they aren't needed for gettext,
# so just output a string instead.
tok = '"REGEX"'
elif name == 'string':
# C doesn't have single-quoted strings, so make all strings
# double-quoted.
if tok.startswith("'"):
guts = re.sub(r"\\.|.", escape_quotes, tok[1:-1])
tok = '"' + guts + '"'
elif name == 'id':
# C can't deal with Unicode escapes in identifiers. We don't
# need them for gettext anyway, so replace them with something
# innocuous
tok = tok.replace("\\", "U")
c.append(tok)
return ''.join(c)
| bsd-3-clause |
rbiswas4/simlib | setup.py | 1 | 1398 | from setuptools import setup
import sys
import os
import re
PACKAGENAME = 'opsimsummary'
packageDir = os.path.join(os.path.dirname(os.path.abspath(__file__)),
PACKAGENAME)
# Obtain the package version
versionFile = os.path.join(packageDir, 'version.py')
with open(versionFile, 'r') as f:
s = f.read()
# Look up the string value assigned to __version__ in version.py using regexp
versionRegExp = re.compile("__VERSION__ = \"(.*?)\"")
# Assign to __version__
__version__ = versionRegExp.findall(s)[0]
print(__version__)
# create requirements file
setupDir = os.path.join(packageDir, '..', 'setup')
genRequirements = os.path.join(setupDir, 'generate_requirements.py')
print(genRequirements)
setup(# package information
name=PACKAGENAME,
version=__version__,
description='simple repo to study OpSim output summaries',
long_description=''' ''',
# What code to include as packages
packages=[PACKAGENAME],
packagedir={PACKAGENAME: 'opsimsummary'},
# What data to include as packages
include_package_data=True,
package_data={PACKAGENAME:['example_data/*.dat', 'example_data/*.simlib',
'example_data/enigma_1189_micro.db',
'example_data/opsimv4_feat_micro.db',
'example_data/healpixels_micro.db']}
)
| mit |
markuskont/salt-syslog | vagrant/lib/win-x64/M2Crypto/RSA.py | 8 | 13128 | """M2Crypto wrapper for OpenSSL RSA API.
Copyright (c) 1999-2004 Ng Pheng Siong. All rights reserved."""
import sys
import util, BIO, Err, m2
class RSAError(Exception): pass
m2.rsa_init(RSAError)
no_padding = m2.no_padding
pkcs1_padding = m2.pkcs1_padding
sslv23_padding = m2.sslv23_padding
pkcs1_oaep_padding = m2.pkcs1_oaep_padding
class RSA:
"""
RSA Key Pair.
"""
m2_rsa_free = m2.rsa_free
def __init__(self, rsa, _pyfree=0):
assert m2.rsa_type_check(rsa), "'rsa' type error"
self.rsa = rsa
self._pyfree = _pyfree
def __del__(self):
if getattr(self, '_pyfree', 0):
self.m2_rsa_free(self.rsa)
def __len__(self):
return m2.rsa_size(self.rsa) << 3
def __getattr__(self, name):
if name == 'e':
return m2.rsa_get_e(self.rsa)
elif name == 'n':
return m2.rsa_get_n(self.rsa)
else:
raise AttributeError
def pub(self):
assert self.check_key(), 'key is not initialised'
return m2.rsa_get_e(self.rsa), m2.rsa_get_n(self.rsa)
def public_encrypt(self, data, padding):
assert self.check_key(), 'key is not initialised'
return m2.rsa_public_encrypt(self.rsa, data, padding)
def public_decrypt(self, data, padding):
assert self.check_key(), 'key is not initialised'
return m2.rsa_public_decrypt(self.rsa, data, padding)
def private_encrypt(self, data, padding):
assert self.check_key(), 'key is not initialised'
return m2.rsa_private_encrypt(self.rsa, data, padding)
def private_decrypt(self, data, padding):
assert self.check_key(), 'key is not initialised'
return m2.rsa_private_decrypt(self.rsa, data, padding)
def save_key_bio(self, bio, cipher='aes_128_cbc', callback=util.passphrase_callback):
"""
Save the key pair to an M2Crypto.BIO.BIO object in PEM format.
@type bio: M2Crypto.BIO.BIO
@param bio: M2Crypto.BIO.BIO object to save key to.
@type cipher: string
@param cipher: Symmetric cipher to protect the key. The default
cipher is 'aes_128_cbc'. If cipher is None, then the key is saved
in the clear.
@type callback: Python callable
@param callback: A Python callable object that is invoked
to acquire a passphrase with which to protect the key.
The default is util.passphrase_callback.
"""
if cipher is None:
return m2.rsa_write_key_no_cipher(self.rsa, bio._ptr(), callback)
else:
ciph = getattr(m2, cipher, None)
if ciph is None:
raise RSAError, 'not such cipher %s' % cipher
else:
ciph = ciph()
return m2.rsa_write_key(self.rsa, bio._ptr(), ciph, callback)
def save_key(self, file, cipher='aes_128_cbc', callback=util.passphrase_callback):
"""
Save the key pair to a file in PEM format.
@type file: string
@param file: Name of file to save key to.
@type cipher: string
@param cipher: Symmetric cipher to protect the key. The default
cipher is 'aes_128_cbc'. If cipher is None, then the key is saved
in the clear.
@type callback: Python callable
@param callback: A Python callable object that is invoked
to acquire a passphrase with which to protect the key.
The default is util.passphrase_callback.
"""
bio = BIO.openfile(file, 'wb')
return self.save_key_bio(bio, cipher, callback)
save_pem = save_key
def as_pem(self, cipher='aes_128_cbc', callback=util.passphrase_callback):
"""
Returns the key(pair) as a string in PEM format.
"""
bio = BIO.MemoryBuffer()
self.save_key_bio(bio, cipher, callback)
return bio.read()
def save_key_der_bio(self, bio):
"""
Save the key pair to an M2Crypto.BIO.BIO object in DER format.
@type bio: M2Crypto.BIO.BIO
@param bio: M2Crypto.BIO.BIO object to save key to.
"""
return m2.rsa_write_key_der(self.rsa, bio._ptr())
def save_key_der(self, file):
"""
Save the key pair to a file in DER format.
@type file: str
@param file: Filename to save key to
"""
bio = BIO.openfile(file, 'wb')
return self.save_key_der_bio(bio)
def save_pub_key_bio(self, bio):
"""
Save the public key to an M2Crypto.BIO.BIO object in PEM format.
@type bio: M2Crypto.BIO.BIO
@param bio: M2Crypto.BIO.BIO object to save key to.
"""
return m2.rsa_write_pub_key(self.rsa, bio._ptr())
def save_pub_key(self, file):
"""
Save the public key to a file in PEM format.
@type file: string
@param file: Name of file to save key to.
"""
bio = BIO.openfile(file, 'wb')
return m2.rsa_write_pub_key(self.rsa, bio._ptr())
def check_key(self):
return m2.rsa_check_key(self.rsa)
def sign_rsassa_pss(self, digest, algo='sha1', salt_length=20):
"""
Signs a digest with the private key using RSASSA-PSS
@requires: OpenSSL 0.9.7h or later.
@type digest: str
@param digest: A digest created by using the digest method
@type salt_length: int
@param salt_length: The length of the salt to use
@type algo: str
@param algo: The hash algorithm to use
@return: a string which is the signature
"""
hash = getattr(m2, algo, None)
if hash is None:
raise ValueError('not such hash algorithm %s' % hash_algo)
signature = m2.rsa_padding_add_pkcs1_pss(self.rsa, digest, hash(), salt_length)
return self.private_encrypt(signature, m2.no_padding)
def verify_rsassa_pss(self, data, signature, algo='sha1', salt_length=20):
"""
Verifies the signature RSASSA-PSS
@requires: OpenSSL 0.9.7h or later.
@type data: str
@param data: Data that has been signed
@type signature: str
@param signature: The signature signed with RSASSA-PSS
@type salt_length: int
@param salt_length: The length of the salt that was used
@type algo: str
@param algo: The hash algorithm to use
@return: 1 or 0, depending on whether the signature was
verified or not.
"""
hash = getattr(m2, algo, None)
if hash is None:
raise ValueError('not such hash algorithm %s' % hash_algo)
plain_signature = self.public_decrypt(signature, m2.no_padding)
return m2.rsa_verify_pkcs1_pss(self.rsa, data, plain_signature, hash(), salt_length)
def sign(self, digest, algo='sha1'):
"""
Signs a digest with the private key
@type digest: str
@param digest: A digest created by using the digest method
@type algo: str
@param algo: The method that created the digest.
Legal values are 'sha1','sha224', 'sha256', 'ripemd160',
and 'md5'.
@return: a string which is the signature
"""
digest_type = getattr(m2, 'NID_' + algo, None)
if digest_type is None:
raise ValueError, ('unknown algorithm', algo)
return m2.rsa_sign(self.rsa, digest, digest_type)
def verify(self, data, signature, algo='sha1'):
"""
Verifies the signature with the public key
@type data: str
@param data: Data that has been signed
@type signature: str
@param signature: The signature signed with the private key
@type algo: str
@param algo: The method use to create digest from the data
before it was signed. Legal values are 'sha1','sha224',
'sha256', 'ripemd160', and 'md5'.
@return: True or False, depending on whether the signature was
verified.
"""
digest_type = getattr(m2, 'NID_' + algo, None)
if digest_type is None:
raise ValueError, ('unknown algorithm', algo)
return m2.rsa_verify(self.rsa, data, signature, digest_type)
class RSA_pub(RSA):
"""
Object interface to an RSA public key.
"""
def __setattr__(self, name, value):
if name in ['e', 'n']:
raise RSAError, \
'use factory function new_pub_key() to set (e, n)'
else:
self.__dict__[name] = value
def private_encrypt(self, *argv):
raise RSAError, 'RSA_pub object has no private key'
def private_decrypt(self, *argv):
raise RSAError, 'RSA_pub object has no private key'
def save_key(self, file, *args, **kw):
"""
Save public key to file.
"""
return self.save_pub_key(file)
def save_key_bio(self, bio, *args, **kw):
"""
Save public key to BIO.
"""
return self.save_pub_key_bio(bio)
#save_key_der
#save_key_der_bio
def check_key(self):
return m2.rsa_check_pub_key(self.rsa)
def rsa_error():
raise RSAError, m2.err_reason_error_string(m2.err_get_error())
def keygen_callback(p, n, out=sys.stdout):
"""
Default callback for gen_key().
"""
ch = ['.','+','*','\n']
out.write(ch[p])
out.flush()
def gen_key(bits, e, callback=keygen_callback):
"""
Generate an RSA key pair.
@type bits: int
@param bits: Key length, in bits.
@type e: int
@param e: The RSA public exponent.
@type callback: Python callable
@param callback: A Python callable object that is invoked
during key generation; its usual purpose is to provide visual
feedback. The default callback is keygen_callback.
@rtype: M2Crypto.RSA.RSA
@return: M2Crypto.RSA.RSA object.
"""
return RSA(m2.rsa_generate_key(bits, e, callback), 1)
def load_key(file, callback=util.passphrase_callback):
"""
Load an RSA key pair from file.
@type file: string
@param file: Name of file containing RSA public key in PEM format.
@type callback: Python callable
@param callback: A Python callable object that is invoked
to acquire a passphrase with which to unlock the key.
The default is util.passphrase_callback.
@rtype: M2Crypto.RSA.RSA
@return: M2Crypto.RSA.RSA object.
"""
bio = BIO.openfile(file)
return load_key_bio(bio, callback)
def load_key_bio(bio, callback=util.passphrase_callback):
"""
Load an RSA key pair from an M2Crypto.BIO.BIO object.
@type bio: M2Crypto.BIO.BIO
@param bio: M2Crypto.BIO.BIO object containing RSA key pair in PEM
format.
@type callback: Python callable
@param callback: A Python callable object that is invoked
to acquire a passphrase with which to unlock the key.
The default is util.passphrase_callback.
@rtype: M2Crypto.RSA.RSA
@return: M2Crypto.RSA.RSA object.
"""
rsa = m2.rsa_read_key(bio._ptr(), callback)
if rsa is None:
rsa_error()
return RSA(rsa, 1)
def load_key_string(string, callback=util.passphrase_callback):
"""
Load an RSA key pair from a string.
@type string: string
@param string: String containing RSA key pair in PEM format.
@type callback: Python callable
@param callback: A Python callable object that is invoked
to acquire a passphrase with which to unlock the key.
The default is util.passphrase_callback.
@rtype: M2Crypto.RSA.RSA
@return: M2Crypto.RSA.RSA object.
"""
bio = BIO.MemoryBuffer(string)
return load_key_bio(bio, callback)
def load_pub_key(file):
"""
Load an RSA public key from file.
@type file: string
@param file: Name of file containing RSA public key in PEM format.
@rtype: M2Crypto.RSA.RSA_pub
@return: M2Crypto.RSA.RSA_pub object.
"""
bio = BIO.openfile(file)
return load_pub_key_bio(bio)
def load_pub_key_bio(bio):
"""
Load an RSA public key from an M2Crypto.BIO.BIO object.
@type bio: M2Crypto.BIO.BIO
@param bio: M2Crypto.BIO.BIO object containing RSA public key in PEM
format.
@rtype: M2Crypto.RSA.RSA_pub
@return: M2Crypto.RSA.RSA_pub object.
"""
rsa = m2.rsa_read_pub_key(bio._ptr())
if rsa is None:
rsa_error()
return RSA_pub(rsa, 1)
def new_pub_key((e, n)):
"""
Instantiate an RSA_pub object from an (e, n) tuple.
@type e: string
@param e: The RSA public exponent; it is a string in OpenSSL's MPINT
format - 4-byte big-endian bit-count followed by the appropriate
number of bits.
@type n: string
@param n: The RSA composite of primes; it is a string in OpenSSL's MPINT
format - 4-byte big-endian bit-count followed by the appropriate
number of bits.
@rtype: M2Crypto.RSA.RSA_pub
@return: M2Crypto.RSA.RSA_pub object.
"""
rsa = m2.rsa_new()
m2.rsa_set_e(rsa, e)
m2.rsa_set_n(rsa, n)
return RSA_pub(rsa, 1)
| gpl-3.0 |
transfix/atxcf | atxcf/PriceNetwork.py | 1 | 12082 | """
PriceNetwork module. Maintains a graph representing an asset exchange network to determine
prices of assets relative to each other.
- transfix@sublevels.net - 20160117
"""
import PriceSource
from PriceSource import PriceSourceError
import cache
import settings
from core import _log_error
from settings import get_setting, set_setting, has_creds
from functools import partial
import networkx as nx
import string
import threading
import multiprocessing
import time
import math
import requests.exceptions
class PriceNetworkError(PriceSource.PriceSourceError):
pass
class PriceNetwork(PriceSource.PriceSource):
def __init__(self):
super(PriceNetwork, self).__init__()
self._lock = threading.RLock()
self._sources = []
self.init_sources()
self._price_graph = None
def init_sources(self):
with self._lock:
self._sources = []
for source_name in get_setting("options", "price_sources",
default=["Bitfinex", "Bittrex",
"Poloniex", "Conversions",
"CoinExchange", "Coinigy"]):
if hasattr(PriceSource, source_name):
Source = getattr(PriceSource, source_name)
if not Source.requires_creds() or has_creds(Source.__name__):
self._sources.append(Source())
def get_sources(self):
return self._sources
def add_source(self, source):
G = self._get_price_graph()
with self._lock:
G.add_nodes_from(source.get_symbols())
self._sources.append(source)
for mkt in source.get_markets():
from_mkt, to_mkt = mkt.split("/")
G.add_edge(from_mkt, to_mkt)
def _get_price_graph(self):
markets = self.get_markets()
with self._lock:
if not self._price_graph:
G = nx.Graph()
G.add_nodes_from(self.get_symbols())
for mkt in markets:
from_mkt, to_mkt = mkt.split("/")
G.add_edge(from_mkt, to_mkt)
self._price_graph = G
return self._price_graph
def get_symbols(self):
"""
Returns the set of all known symbols across all price sources.
"""
symbols = set()
with self._lock:
for source in self.get_sources():
try:
for symbol in source.get_symbols():
symbols.add(symbol)
except Exception as e:
_log_error(['PriceNetwork.get_symbols',
source._class_name(), str(e)])
return list(symbols)
def get_base_symbols(self):
"""
Returns the set of all known symbols across all price sources.
"""
symbols = set()
with self._lock:
for source in self._sources:
try:
for symbol in source.get_base_symbols():
symbols.add(symbol)
except Exception as e:
_log_error(['PriceNetwork.get_base_symbols',
source._class_name(), str(e)])
return list(symbols)
def get_markets(self):
"""
Returns all markets known by all of the price sources.
"""
mkts = set()
with self._lock:
for source in self._sources:
try:
for mkt in source.get_markets():
mkts.add(mkt)
except Exception as e:
_log_error(['PriceNetwork.get_markets',
source._class_name(), str(e)])
return list(mkts)
def get_market_sources(self):
"""
Returns all market sources with their respective markets
"""
mkt_srcs = {}
with self._lock:
for source in self._sources:
mkt_srcs.update({source._class_name(): source.get_markets()})
return mkt_srcs
def _do_get_price(self, from_asset, to_asset, amount=1.0):
"""
Helper function for get_price.
"""
mkt_key = from_asset + "/" + to_asset
inv_mkt_key = to_asset + "/" + from_asset
do_cache = False
unit_prices = []
# return from the cache if it is already available
if cache.has_key(mkt_key):
unit_prices.append(cache.get_val(mkt_key))
else:
do_cache = True
with self._lock:
for source in self._sources:
try:
mkts = source.get_markets()
if mkt_key in mkts or inv_mkt_key in mkts:
price = source.get_price(from_asset, to_asset, 1.0)
unit_prices.append(float(price))
except PriceSourceError as e:
_log_error(['PriceNetwork._do_get_price',
source._class_name(), str(e)])
except requests.exceptions.ConnectionError as e:
_log_error(['PriceNetwork._do_get_price',
source._class_name(), str(e)])
if len(unit_prices) == 0:
raise PriceNetworkError("%s: Couldn't determine price of %s/%s" % (self._class_name(),
from_asset,
to_asset))
avg_price = math.fsum(unit_prices)/float(len(unit_prices))
# Make sure to copy it to the cache so future retrievals
# within 60 seconds are quick.
if do_cache:
expire = get_setting("options", "cache_price_expiration", default=60)
cache.set_val(mkt_key, float(avg_price), expire=expire)
return avg_price * amount
def get_shortest_path(self, from_asset, to_asset):
"""
Returns the shortest path known from_asset to_asset.
"""
if from_asset == to_asset:
return (from_asset,)
G = self._get_price_graph()
# Sometimes the sources may add new markets after the
# price network is initialized. So lets add them here.
do_add_edge = False
if not from_asset in G and from_asset in self.get_symbols():
G.add_node(from_asset)
do_add_edge = True
if not to_asset in G and to_asset in self.get_symbols():
do_add_edge = True
G.add_node(to_asset)
mkt = from_asset + "/" + to_asset
if do_add_edge and mkt in self.get_markets():
G.add_edge(from_asset, to_asset)
sh_p = None
try:
sh_p = nx.shortest_path(G, from_asset, to_asset)
except Exception as e:
_log_error(['PriceNetwork.get_price',
self._class_name(), str(e)])
return sh_p
def get_price(self, from_asset, to_asset, amount=1.0):
"""
Returns how much of to_asset you would have after exchanging it
for amount of from_asset based on the last price. Saves prices
in the cache to help with frequent requests for prices.
"""
if from_asset == to_asset or amount == 0.0:
return amount
sh_p = self.get_shortest_path(from_asset, to_asset)
if not sh_p:
raise PriceNetworkError("No path from {0} to {1}"
.format(from_asset, to_asset))
# for each edge in the path, compute the conversion price
cur_value = float(amount)
for from_cur, to_cur in zip(sh_p[0:], sh_p[1:]):
cur_value = self._do_get_price(from_cur, to_cur, cur_value)
return cur_value
def price(self, trade_pair_str, value = 1.0):
# trade_pair_str is a string with a slash separating two
# asset symbols, like XBT/USD
asset_strs = string.split(trade_pair_str,"/",1)
if len(asset_strs) != 2:
raise PriceNetworkError("Invalid trade_pair_str %s" % trade_pair_str)
asset_strs = [cur.strip() for cur in asset_strs]
return self.get_price(asset_strs[0], asset_strs[1], value)
_pn = None
def init():
"""
(Re-)initializes the PriceNetwork singleton.
"""
global _pn
_pn = PriceNetwork()
def _get_price_network():
"""
Returns a singleton instance of a PriceNetwork.
"""
global _pn
if not _pn:
init()
return _pn
def instance():
"""
Deprecates _get_price_network.
"""
return _get_price_network()
def add_source(source):
"""
Adds a source to the price network.
"""
instance().add_source(source)
def _do_get_price(value, trade_pair_str):
asset_strs = string.split(trade_pair_str,"/",1)
if len(asset_strs) != 2:
raise CmdError("Invalid trade pair %s" % trade_pair_str)
asset_strs = [cur.strip() for cur in asset_strs]
pn = instance()
price = pn.get_price(asset_strs[0], asset_strs[1], value)
if not price:
price = float('NaN')
return price
def get_price(*args, **kwargs):
"""
Returns price dependings on args:
- 1 == len(args) -> from/to pair string (aka trade_pair_str)
- 2 == len(args) -> (value, trade_pair_str)
- 3 == len(args) -> (value, from_asset, to_asset)
"""
value = 1.0
trade_pair_str = ""
if len(args) == 1:
# treat args as a from/to pair with amount == 1
value = 1.0
trade_pair_str = args[0]
elif len(args) == 2:
# treat args as a pair of (value, trade_pair_str)
value = float(args[0])
trade_pair_str = args[1]
elif len(args) == 3:
# treat args as a triple of (value, from_asset, to_asset)
value = float(args[0])
from_asset = args[1].strip()
to_asset = args[2].strip()
trade_pair_str = "%s/%s" % (from_asset, to_asset)
else:
raise CmdError("Invalid argument list for command get_price: %s" % str(args))
return _do_get_price(value, trade_pair_str)
def get_prices(balances, base_asset):
"""
Given a dict of balances, returns another dict with the
prices of each asset in terms of the base_asset.
"""
values = {}
for asset, balance in balances.iteritems():
if balance == 0.0:
continue
price = get_price(balance, asset, base_asset)
if price != 0.0:
values[asset] = (balance, price)
return values
def get_nav(balances, base_asset):
"""
Gven a dict of balances, returns the net asset value
of the whole collection in terms of the base_asset.
"""
prices = get_prices(balances, base_asset)
nav = 0.0
for asset, item in prices.iteritems():
nav += item[1]
return nav
def get_symbols():
"""
Returns all asset symbols known by the bot.
"""
return sorted(instance().get_symbols())
def get_base_symbols():
"""
Returns all symbols used for pricing.
"""
return sorted(instance().get_base_symbols())
def get_markets():
"""
Returns all markets known by the bot.
"""
return sorted(instance().get_markets())
def get_market_sources():
"""
Returns the name of all market sources used for pricing
"""
return [source for source in instance().get_market_sources()]
def get_all_prices(mkts=None):
"""
Returns prices for each market listed in mkts. If mkts is
none, returns prices of all known markets.
"""
prices = {}
if not mkts:
mkts = get_markets()
for mkt in mkts:
try:
price = get_price(mkt)
prices[mkt] = price
except PriceSourceError as e:
_log_error(['get_all_prices', '', str(e)])
return prices
| mit |
AnkitBarik/inermodz | velocity.py | 1 | 4027 | #!/usr/bin/env python3
# -*- coding: iso-8859-15 -*-
import numpy as np
from .sigma import sigma
from .libzhang import *
from .grid import grid
class vel:
def __init__(self,m=0,N=0,n=1,l=None,nr=33,nphi=256,ntheta=128,symm='es',norm=False):
n = n-1 # The definition of n starts from 1 :(
# N = (l - m - ((l-m)%2))/2
self.grid = grid(nr=nr, nphi=nphi, ntheta=ntheta)
self.Us = np.zeros([nphi, ntheta, nr])
self.Up = np.zeros([nphi, ntheta, nr])
self.Uz = np.zeros([nphi, ntheta, nr])
sig_arr, N = sigma(m=m, N=N, l=l, symm=symm)
print(('omega =', sig_arr*2))
sig = sig_arr[n]
print(('omega(%d,%d,%d) = %.4f' %(l, m, n+1, sig*2)))
if l is not None:
if (l-m)%2 == 0:
symm = 'es'
else:
symm = 'ea'
if (symm == 'es') or (symm == 'ES'):
for i in range(N+1):
for j in range(N-i+1):
C = (-1)**(i+j) * dfactorial(2*(m+N+i+j)-1)/ \
( 2**(j+1) * dfactorial(2*i-1) * factorial(N-i-j) \
* factorial(i) * factorial(j) * factorial(m+j) )
if i > 0:
UTemp = C * sig**(2*i-1) * ( 1 - sig**2)**j * 2*i * \
self.grid.s3D**(m+2*j) * self.grid.z3D**(2*i-1)
self.Uz = self.Uz + UTemp
UTemp = C * sig**(2*i) * (1-sig**2)**(j-1) * (m + m*sig + 2*j*sig)\
* self.grid.s3D**(m+2*j-1) * self.grid.z3D**(2*i)
self.Us = self.Us + UTemp
UTemp = C * sig**(2*i) * (1-sig**2)**(j-1) * (m + m*sig + 2*j)\
* self.grid.s3D**(m+2*j-1) * self.grid.z3D**(2*i)
self.Up = self.Up + UTemp
self.Us = self.Us * np.sin(m*self.grid.phi3D)
self.Uz = -self.Uz * np.sin(m*self.grid.phi3D)
self.Up = self.Up * np.cos(m*self.grid.phi3D)
self.Ux = self.Us * np.cos(self.grid.phi3D) - self.Up * np.sin(self.grid.phi3D)
self.Uy = self.Us * np.sin(self.grid.phi3D) + self.Up * np.cos(self.grid.phi3D)
if (symm == 'ea') or (symm == 'EA'):
for i in range(N+1):
for j in range(N-i+1):
C = (-1)**(i+j) * dfactorial(2*(m+N+i+j)+1)/ \
( 2**(j+1) * dfactorial(2*i+1) * factorial(N-i-j) \
* factorial(i) * factorial(j) * factorial(m+j) )
UTemp = C * sig**(2*i-1) * ( 1 - sig**2)**j * (2*i+1) * \
self.grid.s3D**(m+2*j) * self.grid.z3D**(2*j)
self.Uz = self.Uz + UTemp
UTemp = C * sig**(2*i) * (1-sig**2)**(j-1) * (m + m*sig + 2*j*sig)\
* self.grid.s3D**(m+2*j-1) * self.grid.z3D**(2*i+1)
self.Us = self.Us + UTemp
UTemp = C * sig**(2*i) * (1-sig**2)**(j-1) * (m + m*sig + 2*j)\
* self.grid.s3D**(m+2*j-1) * self.grid.z3D**(2*i+1)
self.Up = self.Up + UTemp
self.Us = self.Us * np.sin(m*self.grid.phi3D)
self.Uz = -self.Uz * np.sin(m*self.grid.phi3D)
self.Up = self.Up * np.cos(m*self.grid.phi3D)
self.Ux = self.Us * np.cos(self.grid.phi3D) - self.Up * np.sin(self.grid.phi3D)
self.Uy = self.Us * np.sin(self.grid.phi3D) + self.Up * np.cos(self.grid.phi3D)
del UTemp
if norm:
U2 = self.Us**2 + self.Up**2 + self.Uz**2
U2p = np.trapz(U2, x=self.grid.phi, axis=0)
U2t = np.trapz(U2p, self.grid.theta, axis=0)
U2r = np.trapz(U2t, self.grid.r, axis=0)
Vol = 4./3. * np.pi * (self.grid.r.max()**3 - self.grid.r.min()**3)
U2r /= Vol
self.Us /= np.sqrt(U2r)
self.Up /= np.sqrt(U2r)
self.Uz /= np.sqrt(U2r)
| gpl-3.0 |
dursk/django | django/contrib/admindocs/utils.py | 411 | 4187 | "Misc. utility functions/classes for admin documentation generator."
import re
from email.errors import HeaderParseError
from email.parser import HeaderParser
from django.core.urlresolvers import reverse
from django.utils.encoding import force_bytes
from django.utils.safestring import mark_safe
try:
import docutils.core
import docutils.nodes
import docutils.parsers.rst.roles
except ImportError:
docutils_is_available = False
else:
docutils_is_available = True
def trim_docstring(docstring):
"""
Uniformly trim leading/trailing whitespace from docstrings.
Based on https://www.python.org/dev/peps/pep-0257/#handling-docstring-indentation
"""
if not docstring or not docstring.strip():
return ''
# Convert tabs to spaces and split into lines
lines = docstring.expandtabs().splitlines()
indent = min(len(line) - len(line.lstrip()) for line in lines if line.lstrip())
trimmed = [lines[0].lstrip()] + [line[indent:].rstrip() for line in lines[1:]]
return "\n".join(trimmed).strip()
def parse_docstring(docstring):
"""
Parse out the parts of a docstring. Return (title, body, metadata).
"""
docstring = trim_docstring(docstring)
parts = re.split(r'\n{2,}', docstring)
title = parts[0]
if len(parts) == 1:
body = ''
metadata = {}
else:
parser = HeaderParser()
try:
metadata = parser.parsestr(parts[-1])
except HeaderParseError:
metadata = {}
body = "\n\n".join(parts[1:])
else:
metadata = dict(metadata.items())
if metadata:
body = "\n\n".join(parts[1:-1])
else:
body = "\n\n".join(parts[1:])
return title, body, metadata
def parse_rst(text, default_reference_context, thing_being_parsed=None):
"""
Convert the string from reST to an XHTML fragment.
"""
overrides = {
'doctitle_xform': True,
'inital_header_level': 3,
"default_reference_context": default_reference_context,
"link_base": reverse('django-admindocs-docroot').rstrip('/'),
'raw_enabled': False,
'file_insertion_enabled': False,
}
if thing_being_parsed:
thing_being_parsed = force_bytes("<%s>" % thing_being_parsed)
# Wrap ``text`` in some reST that sets the default role to ``cmsreference``,
# then restores it.
source = """
.. default-role:: cmsreference
%s
.. default-role::
"""
parts = docutils.core.publish_parts(source % text,
source_path=thing_being_parsed, destination_path=None,
writer_name='html', settings_overrides=overrides)
return mark_safe(parts['fragment'])
#
# reST roles
#
ROLES = {
'model': '%s/models/%s/',
'view': '%s/views/%s/',
'template': '%s/templates/%s/',
'filter': '%s/filters/#%s',
'tag': '%s/tags/#%s',
}
def create_reference_role(rolename, urlbase):
def _role(name, rawtext, text, lineno, inliner, options=None, content=None):
if options is None:
options = {}
if content is None:
content = []
node = docutils.nodes.reference(
rawtext,
text,
refuri=(urlbase % (
inliner.document.settings.link_base,
text.lower(),
)),
**options
)
return [node], []
docutils.parsers.rst.roles.register_canonical_role(rolename, _role)
def default_reference_role(name, rawtext, text, lineno, inliner, options=None, content=None):
if options is None:
options = {}
if content is None:
content = []
context = inliner.document.settings.default_reference_context
node = docutils.nodes.reference(
rawtext,
text,
refuri=(ROLES[context] % (
inliner.document.settings.link_base,
text.lower(),
)),
**options
)
return [node], []
if docutils_is_available:
docutils.parsers.rst.roles.register_canonical_role('cmsreference', default_reference_role)
for name, urlbase in ROLES.items():
create_reference_role(name, urlbase)
| bsd-3-clause |
evamwangi/bc-7-Todo_List | venv/Lib/warnings.py | 265 | 14044 | """Python part of the warnings subsystem."""
# Note: function level imports should *not* be used
# in this module as it may cause import lock deadlock.
# See bug 683658.
import linecache
import sys
import types
__all__ = ["warn", "showwarning", "formatwarning", "filterwarnings",
"resetwarnings", "catch_warnings"]
def warnpy3k(message, category=None, stacklevel=1):
"""Issue a deprecation warning for Python 3.x related changes.
Warnings are omitted unless Python is started with the -3 option.
"""
if sys.py3kwarning:
if category is None:
category = DeprecationWarning
warn(message, category, stacklevel+1)
def _show_warning(message, category, filename, lineno, file=None, line=None):
"""Hook to write a warning to a file; replace if you like."""
if file is None:
file = sys.stderr
try:
file.write(formatwarning(message, category, filename, lineno, line))
except IOError:
pass # the file (probably stderr) is invalid - this warning gets lost.
# Keep a working version around in case the deprecation of the old API is
# triggered.
showwarning = _show_warning
def formatwarning(message, category, filename, lineno, line=None):
"""Function to format a warning the standard way."""
s = "%s:%s: %s: %s\n" % (filename, lineno, category.__name__, message)
line = linecache.getline(filename, lineno) if line is None else line
if line:
line = line.strip()
s += " %s\n" % line
return s
def filterwarnings(action, message="", category=Warning, module="", lineno=0,
append=0):
"""Insert an entry into the list of warnings filters (at the front).
'action' -- one of "error", "ignore", "always", "default", "module",
or "once"
'message' -- a regex that the warning message must match
'category' -- a class that the warning must be a subclass of
'module' -- a regex that the module name must match
'lineno' -- an integer line number, 0 matches all warnings
'append' -- if true, append to the list of filters
"""
import re
assert action in ("error", "ignore", "always", "default", "module",
"once"), "invalid action: %r" % (action,)
assert isinstance(message, basestring), "message must be a string"
assert isinstance(category, (type, types.ClassType)), \
"category must be a class"
assert issubclass(category, Warning), "category must be a Warning subclass"
assert isinstance(module, basestring), "module must be a string"
assert isinstance(lineno, int) and lineno >= 0, \
"lineno must be an int >= 0"
item = (action, re.compile(message, re.I), category,
re.compile(module), lineno)
if append:
filters.append(item)
else:
filters.insert(0, item)
def simplefilter(action, category=Warning, lineno=0, append=0):
"""Insert a simple entry into the list of warnings filters (at the front).
A simple filter matches all modules and messages.
'action' -- one of "error", "ignore", "always", "default", "module",
or "once"
'category' -- a class that the warning must be a subclass of
'lineno' -- an integer line number, 0 matches all warnings
'append' -- if true, append to the list of filters
"""
assert action in ("error", "ignore", "always", "default", "module",
"once"), "invalid action: %r" % (action,)
assert isinstance(lineno, int) and lineno >= 0, \
"lineno must be an int >= 0"
item = (action, None, category, None, lineno)
if append:
filters.append(item)
else:
filters.insert(0, item)
def resetwarnings():
"""Clear the list of warning filters, so that no filters are active."""
filters[:] = []
class _OptionError(Exception):
"""Exception used by option processing helpers."""
pass
# Helper to process -W options passed via sys.warnoptions
def _processoptions(args):
for arg in args:
try:
_setoption(arg)
except _OptionError, msg:
print >>sys.stderr, "Invalid -W option ignored:", msg
# Helper for _processoptions()
def _setoption(arg):
import re
parts = arg.split(':')
if len(parts) > 5:
raise _OptionError("too many fields (max 5): %r" % (arg,))
while len(parts) < 5:
parts.append('')
action, message, category, module, lineno = [s.strip()
for s in parts]
action = _getaction(action)
message = re.escape(message)
category = _getcategory(category)
module = re.escape(module)
if module:
module = module + '$'
if lineno:
try:
lineno = int(lineno)
if lineno < 0:
raise ValueError
except (ValueError, OverflowError):
raise _OptionError("invalid lineno %r" % (lineno,))
else:
lineno = 0
filterwarnings(action, message, category, module, lineno)
# Helper for _setoption()
def _getaction(action):
if not action:
return "default"
if action == "all": return "always" # Alias
for a in ('default', 'always', 'ignore', 'module', 'once', 'error'):
if a.startswith(action):
return a
raise _OptionError("invalid action: %r" % (action,))
# Helper for _setoption()
def _getcategory(category):
import re
if not category:
return Warning
if re.match("^[a-zA-Z0-9_]+$", category):
try:
cat = eval(category)
except NameError:
raise _OptionError("unknown warning category: %r" % (category,))
else:
i = category.rfind(".")
module = category[:i]
klass = category[i+1:]
try:
m = __import__(module, None, None, [klass])
except ImportError:
raise _OptionError("invalid module name: %r" % (module,))
try:
cat = getattr(m, klass)
except AttributeError:
raise _OptionError("unknown warning category: %r" % (category,))
if not issubclass(cat, Warning):
raise _OptionError("invalid warning category: %r" % (category,))
return cat
# Code typically replaced by _warnings
def warn(message, category=None, stacklevel=1):
"""Issue a warning, or maybe ignore it or raise an exception."""
# Check if message is already a Warning object
if isinstance(message, Warning):
category = message.__class__
# Check category argument
if category is None:
category = UserWarning
assert issubclass(category, Warning)
# Get context information
try:
caller = sys._getframe(stacklevel)
except ValueError:
globals = sys.__dict__
lineno = 1
else:
globals = caller.f_globals
lineno = caller.f_lineno
if '__name__' in globals:
module = globals['__name__']
else:
module = "<string>"
filename = globals.get('__file__')
if filename:
fnl = filename.lower()
if fnl.endswith((".pyc", ".pyo")):
filename = filename[:-1]
else:
if module == "__main__":
try:
filename = sys.argv[0]
except AttributeError:
# embedded interpreters don't have sys.argv, see bug #839151
filename = '__main__'
if not filename:
filename = module
registry = globals.setdefault("__warningregistry__", {})
warn_explicit(message, category, filename, lineno, module, registry,
globals)
def warn_explicit(message, category, filename, lineno,
module=None, registry=None, module_globals=None):
lineno = int(lineno)
if module is None:
module = filename or "<unknown>"
if module[-3:].lower() == ".py":
module = module[:-3] # XXX What about leading pathname?
if registry is None:
registry = {}
if isinstance(message, Warning):
text = str(message)
category = message.__class__
else:
text = message
message = category(message)
key = (text, category, lineno)
# Quick test for common case
if registry.get(key):
return
# Search the filters
for item in filters:
action, msg, cat, mod, ln = item
if ((msg is None or msg.match(text)) and
issubclass(category, cat) and
(mod is None or mod.match(module)) and
(ln == 0 or lineno == ln)):
break
else:
action = defaultaction
# Early exit actions
if action == "ignore":
registry[key] = 1
return
# Prime the linecache for formatting, in case the
# "file" is actually in a zipfile or something.
linecache.getlines(filename, module_globals)
if action == "error":
raise message
# Other actions
if action == "once":
registry[key] = 1
oncekey = (text, category)
if onceregistry.get(oncekey):
return
onceregistry[oncekey] = 1
elif action == "always":
pass
elif action == "module":
registry[key] = 1
altkey = (text, category, 0)
if registry.get(altkey):
return
registry[altkey] = 1
elif action == "default":
registry[key] = 1
else:
# Unrecognized actions are errors
raise RuntimeError(
"Unrecognized action (%r) in warnings.filters:\n %s" %
(action, item))
# Print message and context
showwarning(message, category, filename, lineno)
class WarningMessage(object):
"""Holds the result of a single showwarning() call."""
_WARNING_DETAILS = ("message", "category", "filename", "lineno", "file",
"line")
def __init__(self, message, category, filename, lineno, file=None,
line=None):
local_values = locals()
for attr in self._WARNING_DETAILS:
setattr(self, attr, local_values[attr])
self._category_name = category.__name__ if category else None
def __str__(self):
return ("{message : %r, category : %r, filename : %r, lineno : %s, "
"line : %r}" % (self.message, self._category_name,
self.filename, self.lineno, self.line))
class catch_warnings(object):
"""A context manager that copies and restores the warnings filter upon
exiting the context.
The 'record' argument specifies whether warnings should be captured by a
custom implementation of warnings.showwarning() and be appended to a list
returned by the context manager. Otherwise None is returned by the context
manager. The objects appended to the list are arguments whose attributes
mirror the arguments to showwarning().
The 'module' argument is to specify an alternative module to the module
named 'warnings' and imported under that name. This argument is only useful
when testing the warnings module itself.
"""
def __init__(self, record=False, module=None):
"""Specify whether to record warnings and if an alternative module
should be used other than sys.modules['warnings'].
For compatibility with Python 3.0, please consider all arguments to be
keyword-only.
"""
self._record = record
self._module = sys.modules['warnings'] if module is None else module
self._entered = False
def __repr__(self):
args = []
if self._record:
args.append("record=True")
if self._module is not sys.modules['warnings']:
args.append("module=%r" % self._module)
name = type(self).__name__
return "%s(%s)" % (name, ", ".join(args))
def __enter__(self):
if self._entered:
raise RuntimeError("Cannot enter %r twice" % self)
self._entered = True
self._filters = self._module.filters
self._module.filters = self._filters[:]
self._showwarning = self._module.showwarning
if self._record:
log = []
def showwarning(*args, **kwargs):
log.append(WarningMessage(*args, **kwargs))
self._module.showwarning = showwarning
return log
else:
return None
def __exit__(self, *exc_info):
if not self._entered:
raise RuntimeError("Cannot exit %r without entering first" % self)
self._module.filters = self._filters
self._module.showwarning = self._showwarning
# filters contains a sequence of filter 5-tuples
# The components of the 5-tuple are:
# - an action: error, ignore, always, default, module, or once
# - a compiled regex that must match the warning message
# - a class representing the warning category
# - a compiled regex that must match the module that is being warned
# - a line number for the line being warning, or 0 to mean any line
# If either if the compiled regexs are None, match anything.
_warnings_defaults = False
try:
from _warnings import (filters, default_action, once_registry,
warn, warn_explicit)
defaultaction = default_action
onceregistry = once_registry
_warnings_defaults = True
except ImportError:
filters = []
defaultaction = "default"
onceregistry = {}
# Module initialization
_processoptions(sys.warnoptions)
if not _warnings_defaults:
silence = [ImportWarning, PendingDeprecationWarning]
# Don't silence DeprecationWarning if -3 or -Q was used.
if not sys.py3kwarning and not sys.flags.division_warning:
silence.append(DeprecationWarning)
for cls in silence:
simplefilter("ignore", category=cls)
bytes_warning = sys.flags.bytes_warning
if bytes_warning > 1:
bytes_action = "error"
elif bytes_warning:
bytes_action = "default"
else:
bytes_action = "ignore"
simplefilter(bytes_action, category=BytesWarning, append=1)
del _warnings_defaults
| mit |
saulpw/visidata | visidata/loaders/shp.py | 1 | 3391 | from visidata import *
# requires pyshp
def open_shp(p):
return ShapeSheet(p.name, source=p)
open_dbf = open_shp
shptypes = {
'C': str,
'N': float,
'L': float,
'F': float,
'D': date,
'M': str,
}
def shptype(ftype, declen):
t = shptypes[ftype[:1]]
if t is float and declen == 0:
return int
return t
# rowdef: shaperec
class ShapeSheet(Sheet):
rowtype = 'shapes'
columns = [
Column('shapeType', width=0, getter=lambda col,row: row.shape.shapeType)
]
def iterload(self):
import shapefile
self.sf = shapefile.Reader(str(self.source))
self.reloadCols()
for shaperec in Progress(self.sf.iterShapeRecords(), total=self.sf.numRecords):
yield shaperec
def reloadCols(self):
self.columns = []
for c in ShapeSheet.columns:
self.addColumn(copy(c))
for i, (fname, ftype, fieldlen, declen) in enumerate(self.sf.fields[1:]): # skip DeletionFlag
self.addColumn(Column(fname, getter=lambda col,row,i=i: row.record[i], type=shptype(ftype, declen)))
class ShapeMap(InvertedCanvas):
aspectRatio = 1.0
filetype = 'geojson'
@asyncthread
def reload(self):
self.reset()
for row in Progress(self.sourceRows):
# color according to key
k = self.source.rowkey(row)
if row.shape.shapeType in (5, 15, 25):
self.polygon(row.shape.points, self.plotColor(k), row)
elif row.shape.shapeType in (3, 13, 23):
self.polyline(row.shape.points, self.plotColor(k), row)
elif row.shape.shapeType in (1, 11, 21):
x, y = row.shape.points[0]
self.point(x, y, self.plotColor(k), row)
else:
vd.status('notimpl shapeType %s' % row.shape.shapeType)
x1, y1, x2, y2 = row.shape.bbox
textx, texty = (x1+x2)/2, (y1+y2)/2
disptext = self.textCol.getDisplayValue(row)
self.label(textx, texty, disptext, self.plotColor(k), row)
self.refresh()
@ShapeMap.api
def save_geojson(vd, p, vs):
features = []
for coords, attr, row in Progress(vs.polylines, 'saving'):
feat = {
'type': 'Feature',
'geometry': {
'type': 'LineString',
'coordinates': [[x, y] for x, y in coords],
},
'properties': {
col.name: col.getTypedValue(row) for col in vs.source.visibleCols
}
}
features.append(feat)
featcoll = {
'type': 'FeatureCollection',
'features': features,
}
with p.open_text(mode='w') as fp:
for chunk in json.JSONEncoder().iterencode(featcoll):
fp.write(chunk)
ShapeSheet.addCommand('.', 'plot-row', 'vd.push(ShapeMap(name+"_map", source=sheet, sourceRows=[cursorRow], textCol=cursorCol))', 'plot geospatial vector in current row')
ShapeSheet.addCommand('g.', 'plot-rows', 'vd.push(ShapeMap(name+"_map", source=sheet, sourceRows=rows, textCol=cursorCol))', 'plot all geospatial vectors in current sheet')
ShapeMap.addCommand('^S', 'save-sheet', 'vd.saveSheets(inputPath("save to: ", value=getDefaultSaveName(sheet)), sheet, confirm_overwrite=options.confirm_overwrite)', 'save current sheet to filename in format determined by extension (default .geojson)')
| gpl-3.0 |
timoc/scaliendb | script/db.py | 2 | 4913 | import os
import sys
import struct
import signal
import string
def read_data(fmt, data):
size = struct.calcsize(fmt)
struct_buf = data[:size]
data = data[size:]
ret = [data]
t = struct.unpack(fmt, struct_buf)
ret.extend(list(t))
return tuple(ret)
class StoragePage:
def __init__(self):
self.offset = 0
self.size = 0
def read_data(self, fmt, data):
size = struct.calcsize(fmt)
struct_buf = data[:size]
data = data[size:]
ret = [data]
t = struct.unpack(fmt, struct_buf)
ret.extend(list(t))
return tuple(ret)
def extract_data(self, length, data):
return data[length:], data[:length]
class StorageHeaderPage(StoragePage):
def __init__(self):
self.chunk_id = 0
self.min_log_segment_id = 0
self.max_log_segment_id = 0
self.max_log_command_id = 0
self.num_keys = 0
self.use_bloomfilter = False
self.index_page_offset = 0
self.index_page_size = 0
self.bloom_page_offset = 0
self.bloom_page_size = 0
self.first_key = ""
self.last_key = ""
self.mid_point = ""
def read(self, file):
data = file.read(4096)
if len(data) != 4096:
return False
data, size, checksum, version = self.read_data("<III", data)
# skip text
text = data[:64]
data = data[64:]
data, self.chunk_id, self.min_log_segment_id, self.max_log_segment_id, self.max_log_command_id = \
self.read_data("<QQQI", data)
if data[0] == 'T':
self.use_bloomfilter = True
data = data[1:]
data, self.num_keys, self.index_page_offset, self.index_page_size = self.read_data("<QQI", data)
if self.use_bloomfilter:
data, self.bloom_page_offset, self.bloom_page_size = self.read_data("<QI", data)
data, length, = self.read_data("<I", data)
data, self.first_key, = self.extract_data(length, data)
data, length, = self.read_data("<I", data)
data, self.last_key, = self.extract_data(length, data)
data, length, = self.read_data("<I", data)
data, self.mid_point, = self.extract_data(length, data)
class StorageDataPage(StoragePage):
def __init__(self, index):
self.index = index
self.size = 0
self.offset = 0
self.num_keys = 0
self.keybuffer_length = 0
self.checksum = 0
self.key_values = {}
def read(self, file):
data = file.read(4096)
self.size, self.checksum, self.keybuffer_length, self.num_keys = struct.unpack("<IIII", data[:16])
if self.size > 4096:
data += file.read(self.size - 4096)
data = data[16:]
# print("-- Page index %d, Num keys: %d" % (self.index, self.num_keys))
i = 0
prev_key = ""
while i < self.num_keys:
type, keylen = struct.unpack("<cH", data[:3])
data = data[3:]
key = data[:keylen]
if key <= prev_key:
print("Error")
prev_key = key
data = data[keylen:]
# TODO: values
# if type == 's':
# vallen, = struct.unpack("<I", data[:4])
# data = data[4:]
# value = data[:vallen]
# data = data[vallen:]
# if self.key_values.has_key(key):
# print("Error")
# print(file.tell())
# self.key_values[key] = value
# else:
# print("Delete key: " + key)
# if self.key_values.has_key(key):
# self.key_values.pop(key)
# print(key, value)
# print(key)
i += 1
return i
class StorageIndexPage(StoragePage):
def __init__(self):
self.size = 0
self.offset = 0
self.checksum = 0
self.num_keys = 0
def read(self, file):
data = file.read(4096)
self.size, self.checksum, self.num_keys = struct.unpack("<III", data[:12])
if self.size > 4096:
data += file.read(self.size - 4096)
data = data[12:]
i = 0
print("Index num_keys: %d, data length: %d" % (self.num_keys, len(data)))
while i < self.num_keys:
# key_offset, keylen = struct.unpack("<QH", data[:10])
# data = data[10:]
data, key_offset, keylen = self.read_data("<QH", data)
key = data[:keylen]
data = data[keylen:]
i += 1
# print("Index keylen: %d" % (keylen))
# print("Index key: %s" % (key))
class StorageBloomPage(StoragePage):
def __init__(self):
self.size = 0
self.offset = 0
self.checksum = 0
def read(self, file):
data = file.read(4096)
self.size, self.checksum = struct.unpack("<II", data[:8])
if self.size > 4096:
data += file.read(self.size - 4096)
data = data[8:]
print("-- Bloom page size: %d" % (self.size))
def check_chunk_file(filename):
f = open(filename, "rb")
if os.path.getsize(filename) == 0:
print("Empty chunk file")
return
header_page = StorageHeaderPage()
header_page.read(f)
print("num_keys = " + str(header_page.num_keys))
print(str(header_page.__dict__))
keys = 0
index = 0
while keys < header_page.num_keys:
data_page = StorageDataPage(index)
keys += data_page.read(f)
index += 1
#print("Keys: " + str(keys))
index_page = StorageIndexPage()
index_page.read(f)
if header_page.use_bloomfilter:
bloom_page = StorageBloomPage()
bloom_page.read(f)
if __name__ == "__main__":
signal.signal(signal.SIGPIPE, signal.SIG_DFL)
check_chunk_file(sys.argv[1])
| agpl-3.0 |
pombredanne/pylearn2 | doc/yaml_tutorial/autoencoder.py | 44 | 2740 | from __future__ import print_function
import numpy
import pickle
class AutoEncoder:
def __init__(self, nvis, nhid, iscale=0.1,
activation_fn=numpy.tanh,
params=None):
self.nvis = nvis
self.nhid = nhid
self.activation_fn = activation_fn
if params is None:
self.W = iscale * numpy.random.randn(nvis, nhid)
self.bias_vis = numpy.zeros(nvis)
self.bias_hid = numpy.zeros(nhid)
else:
self.W = params[0]
self.bias_vis = params[1]
self.bias_hid = params[2]
def __str__(self):
rval = '%s\n' % self.__class__.__name__
rval += '\tnvis = %i\n' % self.nvis
rval += '\tnhid = %i\n' % self.nhid
rval += '\tactivation_fn = %s\n' % str(self.activation_fn)
rval += '\tmean std(weights) = %.2f\n' % self.W.std(axis=0).mean()
return rval
def save(self, fname):
fp = open(fname, 'w')
pickle.dump([self.W, self.bias_vis, self.bias_hid], fp)
fp.close()
if __name__ == '__main__':
import os
from StringIO import StringIO
from pylearn2.config import yaml_parse
example1 = """
!obj:yaml_tutorial.autoencoder.AutoEncoder {
"nvis": 784,
"nhid": 100,
"iscale": 0.2,
}
"""
stream = StringIO()
stream.write(example1)
stream.seek(0)
print('Example 1: building basic auto-encoder.')
model = yaml_parse.load(stream)
print(model)
stream.close()
example2 = """
!obj:yaml_tutorial.autoencoder.AutoEncoder {
"nvis": &nvis 100,
"nhid": *nvis,
}
"""
stream = StringIO()
stream.write(example2)
stream.seek(0)
print('Example 2: anchors and references.')
model = yaml_parse.load(stream)
print(model)
stream.close()
example3 = """
!obj:yaml_tutorial.autoencoder.AutoEncoder {
"nvis": 784,
"nhid": 100,
"iscale": 1.0,
"activation_fn": !import 'pylearn2.expr.nnet.sigmoid_numpy',
}
"""
stream = StringIO()
stream.write(example3)
stream.seek(0)
print('Example 3: dynamic imports through !import.')
model = yaml_parse.load(stream)
model.save('example3_weights.pkl')
print(model)
stream.close()
example4 = """
!obj:yaml_tutorial.autoencoder.AutoEncoder {
"nvis": 784,
"nhid": 100,
"params": !pkl: 'example3_weights.pkl',
}
"""
stream = StringIO()
stream.write(example4)
stream.seek(0)
print('Example 4: loading data with !pkl command.')
model = yaml_parse.load(stream)
print(model)
stream.close()
| bsd-3-clause |
wutron/dlcoal | dlcoal/coal_old.py | 2 | 24948 | """
Coalescent methods
A note about population size. In this code all population sizes N or n are
uncorrected. If you need to compute a coalescent for a diploid species
you must multiply N by 2 before passing it to any of these functions.
"""
#=============================================================================
# imports
from __future__ import division
# python imports
import itertools
from itertools import chain, izip
from math import *
import random
# rasmus imports
from rasmus import treelib, stats, util
from rasmus.symbolic import *
# compbio imports
from . import birthdeath
#=============================================================================
# single coalescent PDFs, CDFs, and sampling functions
def prob_coal(t, k, n):
"""
Returns the probability density of observing the first coalesce of 'k'
individuals in a population size of 'n' at generation 't'
"""
# k choose 2
k2 = k * (k-1) / 2
k2n = k2 / n
return k2n * exp(- k2n * t)
def sample_coal(k, n):
"""
Returns a sample coalescent time for 'k' individuals in a population 'n'
"""
# k choose 2
k2 = k * (k-1) / 2
k2n = k2 / n
return random.expovariate(k2n)
def sample_coal_times(k, n):
"""
Returns a sampling of (k-1) coalescences for 'k' lineages in a
population of size 'n'.
"""
times = [0]
for j in xrange(k, 1, -1):
times.append(times[-1] + sample_coal(j, n))
return times[1:]
def prob_mrca(t, k, n):
"""
Probability density function of the age 't' of the most recent
common ancestor (MRCA) of 'k' lineages in a population size 'n'
"""
s = 0.0
for i in xrange(1, k):
lam = (i+1) * i / 2.0 / n
s += lam * exp(- lam * t) * mrca_const(i, 1, k-1)
return s
def cdf_mrca(t, k, n):
"""
Cumulative probability density of the age 't' of the most recent common
ancestor (MRCA) of 'k' lineages in a population size 'n'
"""
if k == 1:
return 1.0
s = 0.0
for i in xrange(1, k+1):
lam = i * (i-1) / (2.0 * n)
p = 1.0
for y in xrange(1, i):
p *= (y-k) / (k+y)
s += exp(-lam * t) * (2*i - 1) * p
return s
def mrca_const(i, a, b):
"""A constant used in calculating MRCA"""
# i+1 choose 2
y = (i+1) * i / 2.0
prod = 1.0
for j in xrange(a, b+1):
if j == i:
continue
# j+1 choose 2
x = (j+1) * j / 2.0
prod *= x / (x - y)
return prod
def prob_coal_bounded(t, k, n, T):
"""
Probability density function of seeing a coalescence at 't' from
'k' lineages in a population of size 'n' with bounding time 'T'
"""
if t > T:
return 0.0
if k == 2:
prob_coal(t, k, n)
return prob_coal(t, k, n) * cdf_mrca(T-t, k-1, n) / \
cdf_mrca(T, k, n)
def cdf_coal_bounded(t, k, n, T):
"""
Cumalative density function of seeing a coalescence at 't' from
'k' lineages in a population of size 'n' with bounding time 'T'
"""
i = k - 1
lam_i = (i+1)*i/2.0 / n
C = [mrca_const(j, 1, i-1) for j in xrange(1, i)]
A = lam_i / n / cdf_mrca(T, k, n)
B = sum(C) / lam_i
F = [C[j-1] * exp(-(j+1)*j/2.0/n * T) / ((j+1)*j/2.0/n - lam_i)
for j in xrange(1, i)]
return (lam_i / cdf_mrca(T, k, n) *
(B * (1-exp(-lam_i * t))
- sum(F[j-1] * (exp(((j+1)*j/2.0/n - lam_i)*t)-1)
for j in xrange(1, i))))
def sample_coal_bounded(k, n, T):
"""
Sample a coalescent time 't' for 'k' lineages and population 'n'
on the condition that the MRCA is before 'T'
"""
# special case
if k == 2:
return sample_coal_bounded2(n, T)
# this code solves this equation for t
# cdf(t) - p = 0
# where p ~ U(0, 1)
import scipy.optimize
i = k - 1
p = random.random()
# compute constants
lam_i = (i+1)*i/2.0 / n
C = [mrca_const(j, 1, i-1) for j in xrange(1, i)]
A = lam_i / cdf_mrca(T, k, n)
B = sum(C) / lam_i
F = [C[j-1] * exp(-(j+1)*j/2.0/n * T) / ((j+1)*j/2.0/n - lam_i)
for j in xrange(1, i)]
# CDF(t) - p
def f(t):
if t <= 0:
return t - p
if t >= T:
return 1.0 - p + (t - T)
return (A * (B * (1-exp(-lam_i * t))
- sum(F[j-1] * (exp(((j+1)*j/2.0/n - lam_i)*t)-1)
for j in xrange(1, i)))) - p
return scipy.optimize.brentq(f, 0.0, T, disp=False)
def sample_coal_bounded2(n, T):
"""
Sample a coalescent time 't' for 'k=2' lineages and population 'n'
on the condition that the MRCA is before 'T'
"""
# sample from a truncated expontial distribution
# k choose 2
lam = 1 / n
p = exp(-lam * T)
return - log(random.uniform(p, 1.0)) / lam
def sample_coal_bounded_reject(k, n, T):
"""
Sample a coalescent time 't' for 'k' lineages and population 'n'
on the condition that the MRCA is before 'T'
Uses rejection sampling. It works but is very inefficient.
"""
i = k - 1
consts = [mrca_const(j, 1, i-1) for j in xrange(1, i)]
x = sum(consts)
while True:
while True:
t = sample_coal(k, n)
if t < T:
break
if i == 1:
return t
y = sum(mrca_const(j, 1, i-1) * exp(-((j+1) * j / 2.0 / n) * (T - t))
for j in xrange(1, i))
r = 1 - y / x
if random.random() < r:
return t
def prob_coal_counts(u, v, t, n):
"""
The probabiluty of going from 'u' lineages to 'v' lineages in time 't'
with population size 'n'
"""
T = t / n
s = 0.0
for k in xrange(v, u+1):
a = exp(-k*(k-1)*T/2.0) * (2*k-1)*(-1)**(k-v) / stats.factorial(v) / \
stats.factorial(k-v) / (k+v-1) * \
stats.prod((v+y)*(u-y)/(u+y) for y in xrange(k))
s += a
return s
def prob_coal_recon_topology(tree, recon, stree, n):
"""
Returns the log probability of a reconciled gene tree ('tree', 'recon')
from the coalescent model given a species tree 'stree' and
population sizes 'n'
"""
popsizes = init_popsizes(stree, n)
# log probability
lnp = 0.0
nodes = set(tree.postorder())
# init reverse reconciliation
rev_recon = {}
for node, snode in recon.iteritems():
if node not in nodes:
raise Exception("node '%s' not in tree" % node.name)
rev_recon.setdefault(snode, []).append(node)
# init lineage counts
lineages = {}
for snode in stree:
if snode.is_leaf():
lineages[snode] = len([x for x in rev_recon[snode]
if x.is_leaf()])
else:
lineages[snode] = 0
# iterate through species tree branches
for snode in stree.postorder():
if snode.parent:
# non root branch
u = lineages[snode]
# subtract number of coals in branch
v = u - len([x for x in rev_recon.get(snode, [])
if not x.is_leaf()])
lineages[snode.parent] += v
lnp += log(prob_coal_counts(u, v, snode.dist,
popsizes[snode.name]))
lnp -= log(num_labeled_histories(u, v))
else:
u = lineages[snode]
lnp -= log(num_labeled_histories(u, 1))
# correct for topologies H(T)
# find connected subtrees that are in the same species branch
subtrees = []
subtree_root = {}
for node in tree.preorder():
if node.parent and recon[node] == recon[node.parent]:
subtree_root[node] = subtree_root[node.parent]
else:
subtrees.append(node)
subtree_root[node] = node
# find leaves through recursion
def walk(node, subtree, leaves):
if node.is_leaf():
leaves.append(node)
elif (subtree_root[node.children[0]] != subtree and
subtree_root[node.children[1]] != subtree):
leaves.append(node)
else:
for child in node.children:
walk(child, subtree, leaves)
# apply correction for each subtree
for subtree in subtrees:
leaves = []
for child in subtree.children:
walk(subtree, subtree, leaves)
if len(leaves) > 2:
lnp += log(birthdeath.num_topology_histories(subtree, leaves))
return lnp
def num_labeled_histories(nleaves, nroots):
n = 1.0
for i in xrange(nroots + 1, nleaves+1):
n *= i * (i - 1) / 2.0
return n
#=============================================================================
# sampling coalescent trees
#
# - normal coalescent
# - fixed time coalescent (may not be complete)
# - bounded coalescent (conditioned on completing in fixed time)
#
def sample_coal_tree(k, n):
"""
Returns a simulated coalescent tree for 'k' leaves from a population 'n'.
"""
times = [0]
for j in xrange(k, 1, -1):
times.append(times[-1] + sample_coal(j, n))
return make_tree_from_times(times)[0]
def sample_coal_tree_bounded(k, n, T, capped=False):
"""
Returns a simulated coalescent tree for 'k' leaves from a populations 'n'
with fixed maximum time 't'. The simulation is conditioned on returning
a tree that completely coaleces before time 'T'.
capped -- if True an artificial root to the tree. Used primarily by
other methods.
"""
times = [0]
for j in xrange(k, 1, -1):
times.append(times[-1] + sample_coal_bounded(j, n, T - times[-1]))
return make_tree_from_times(times, t=T, capped=capped)[0]
def sample_coal_tree_bounded_reject(k, n, T, capped=False):
"""
Returns a simulated coalescence tree for k leaves from a populations n
with fixed maximum time t. The simulation is conditioned on returning
a tree that completely coaleces before time T.
"""
# sample times with rejection sampling
while True:
times = [0]
for j in xrange(k, 1, -1):
times.append(times[-1] + sample_coal(j, n))
if times[-1] < t:
break
return make_tree_from_times(times, t=T, capped=capped)[0]
def sample_coal_tree_fixed(k, n, t, capped=False):
"""
Returns a simulated coalescence tree for 'k' leaves from a population size
'n' with a fixed maximum time 't'.
The return value is the tuple (tree, lineages) where lineages is a set
of lineages that have not yet coalesced.
capped -- if True, remaining lineages are added as children to a artificial
tree root.
"""
times = [0]
for j in xrange(k, 1, -1):
times.append(times[-1] + sample_coal(j, n))
if times[-1] > t:
times.pop()
break
return make_tree_from_times(times, k, t, capped=capped)
def init_popsizes(stree, n):
"""
Uses 'n' to initialize a population size dict for species tree 'stree'
"""
if isinstance(n, (int, float)):
return dict.fromkeys(stree.nodes.keys(), n)
elif isinstance(n, dict):
return n
else:
raise Exception("n must be a int or dict.")
def sample_multicoal_tree(stree, n, leaf_counts=None,
namefunc=None):
"""
Returns a gene tree from a multi-species coalescence process
stree -- species tree
n -- population size (int or dict)
If n is a dict it must map from species name to
population size.
leaf_counts -- dict of species names to a starting gene count.
Default is 1 gene per extant species.
namefunc -- a function that generates new gene names given a species
name.
"""
# initialize vector for how many genes per extant species
if leaf_counts is None:
leaf_counts = dict((l, 1) for l in stree.leaf_names())
# initialize function for generating new gene names
if namefunc is None:
spcounts = dict((l, 1) for l in stree.leaf_names())
def namefunc(sp):
name = sp + "_" + str(spcounts[sp])
spcounts[sp] += 1
return name
# initialize population sizes
popsizes = init_popsizes(stree, n)
# init gene counts
counts = dict((n.name, 0) for n in stree)
counts.update(leaf_counts)
# init reconciliation
recon = {}
# subtrees
subtrees = {}
# loop through species tree
for snode in stree.postorder():
# simulate population for one branch
k = counts[snode.name]
if snode.parent:
# non basal branch
subtree, lineages = sample_coal_tree_fixed(k, popsizes[snode.name],
snode.dist,
capped=True)
else:
# basal branch
subtree = sample_coal_tree(k, popsizes[snode.name])
lineages = subtree.root
subtrees[snode] = (subtree, lineages)
if snode.parent:
counts[snode.parent.name] += len(lineages)
for node in subtree:
recon[node] = snode
# stitch subtrees together
tree = treelib.Tree()
# add all nodes to total tree
for subtree, lineages in subtrees.values():
tree.merge_names(subtree)
tree.remove(subtree.root)
for snode in stree:
if not snode.is_leaf():
subtree, lineages = subtrees[snode]
# get lineages from child subtrees
lineages2 = chain(*[subtrees[child][1]
for child in snode.children])
# ensure leaves are randomly attached
leaves = subtree.leaves()
random.shuffle(leaves)
# stitch leaves of the subtree to children subtree lineages
for leaf, lineage in izip(leaves, lineages2):
tree.add_child(leaf, lineage)
# set root
tree.root = subtrees[stree.root][0].root
tree.add(tree.root)
# name leaves
for leaf in tree.leaves():
tree.rename(leaf.name, namefunc(recon[leaf].name))
return tree, recon
def make_tree_from_times(times, k=None, t=None, leaves=None, capped=False):
"""
Returns a Tree from a list of divergence times.
The topology is choosen by randomly choosing pairs of leaves.
"""
# initialize k
if k is None:
if leaves is not None:
k = len(leaves)
else:
k = len(times)
tree = treelib.Tree()
# initialize k children
if leaves is None:
children = set(treelib.TreeNode(tree.new_name()) for i in xrange(k))
else:
children = set(treelib.TreeNode(name) for name in leaves)
for child in children:
tree.add(child)
child.data["time"] = 0.0
# perform random merges
for i in xrange(1, len(times)):
# make new parent and merge children
parent = treelib.TreeNode(tree.new_name())
parent.data["time"] = times[i]
a, b = random.sample(children, 2)
tree.add_child(parent, a)
tree.add_child(parent, b)
# adjust children set
children.remove(a)
children.remove(b)
children.add(parent)
# set branch lengths
for node in tree:
if not node.parent:
if t is not None:
node.dist = t - node.data["time"]
else:
node.dist = 0.0
else:
node.dist = node.parent.data["time"] - node.data["time"]
# for convenience cap the tree for easy drawing/manipulation
if capped:
tree.make_root()
for node in children:
tree.add_child(tree.root, node)
else:
# set root
if len(children) == 1:
tree.root = list(children)[0]
# return tree and remaining lineages
return tree, children
#=============================================================================
# allele frequency
def sample_allele_freq(p, n):
"""
Sample a new allele frequency using starting allele frequency p and
population size n
"""
if p <= 0.0:
return 0.0
if p >= 1.0:
return 1.0
if p < 0.05:
return min(float(stats.poissonvariate(p*n))/n, n)
if p > 0.95:
return 1.0 - min(float(stats.poissonvariate((1-p)*n))/n, n)
mu = p * n
sigma = sqrt(n * p*(1 - p))
p1 = random.normalvariate(mu, sigma) / n
if p1 < 0:
return 0.0
if p1 > 1:
return 1.0
return p1
# Legendre polynomial
def legendre_poly(n):
""" \frac{1}{2^n n!} d^n/dx^n [(x^2 - 1)^n] """
return simplify(('mult', ('scalar', 1.0 / (2 ** n * stats.factorial(n))),
derivate(('power', ('add', ('power', ('var', 'x'),
('scalar', 2)),
('scalar', -1)),
('scalar', n)),
'x', n)))
def legendre(n, r):
l = simplify(assign_vars(legendre_poly(n), {'x': r}))
assert l[0] == 'scalar'
return l[1]
def gegenbauer(i, r):
return ((i * (i+1)) / 2.0 * hypergeo(i+2, 1 - i, 2, (1 - r) / 2.0))
def gegenbauer2(i, r):
return ((i * (i+1)) / float((2*i+1)*(1-r*r)) *
(legendre(i-1, r) - legendre(i+1, r)))
def gegenbauer3(n, a, z):
tot = 0
for k in xrange(int(n/2)+1):
tot += ((-1)**k * stats.gamma(n - k + a) / (
stats.gamma(a) * stats.factorial(k) * stats.factorial(n - 2*k))
* ((2*z) ** (n - 2*k)))
return tot
def prob_fix(p, n, t, k=8, esp=0.001):
"""Probability of fixation"""
r = 1 - 2*p
prob = p
for i in xrange(1, k+1):
term = (.5 * (-1)**i * (legendre(i-1, r) - legendre(i+1, r)) *
exp(-t * i * (i+1) / (4 * n)))
if term != 0.0 and abs(term) < esp:
return prob + term
prob += term
return prob
def hypergeo(a, b, c, z, k=100):
"""Hypergeometric function"""
terms = [0.0]
signs = [1.0]
for i in xrange(1, k+1):
term = float((i+a-1)*(i+b-1)*z)/(i+c-1)/i
signs.append(util.sign(term) * signs[-1])
if term == 0.0:
break
terms.append(log(abs(term)) + terms[i-1])
return sum(s*exp(i) for s, i in zip(signs, terms))
def loghypergeo(a, b, c, z, k=100):
"""
Hypergeometric function
Performs computation in log-space
"""
terms = [0.0]
signs = [1.0]
for i in xrange(1, k+1):
term = float((i+a-1)*(i+b-1)*z)/(i+c-1)/i
signs.append(util.sign(term) * signs[-1])
if term == 0.0:
break
terms.append(log(abs(term)) + terms[i-1])
sgn = 1
tot = -util.INF
for s, t in zip(signs, terms):
sgn, tot = stats.logadd_sign(sgn, tot, s, t)
return sgn, tot
def hypergeo_mult(i, z1, z2, k=100):
h1 = hypergeo(1-i, i+2, 2, z1, k)
h2 = hypergeo(1-i, i+2, 2, z2, k)
return h1 * h2
def freq_pdf(x, p, n, t, k=8):
if x > 0.5:
return freq_pdf(1.0-x, 1.0-p, n, t, k)
q = 1.0 - p
prob = -util.INF
sgn = 1
t4n = t / (4*n)
for i in xrange(1, k+1):
#term = (p * q * i * (i+1) * (2*i+1) *
# hypergeo(1-i,i+2,2,p) * hypergeo(1-i,i+2,2,x) *
# exp(-t * i * (i+1) / (4*n)))
lcoff = log(p * q * i * (i+1) * (2*i+1))
s1, h1 = loghypergeo(1-i,i+2,2,p, i+2)
s2, h2 = loghypergeo(1-i,i+2,2,x, i+2)
sgn2 = s1 * s2
term = (lcoff + h1 + h2 - (i * (i+1) * t4n))
sgn, prob = stats.logadd_sign(sgn, prob, sgn2, term)
return sgn * exp(prob)
#=============================================================================
if __name__ == "__main__":
from rasmus.common import plotfunc
if 0:
for i in range(5):
print "P_%d(x) = " % i, legendre_poly(i)
print
#========================
# hypergeo speed
a, b, c, z, k = 30, 20, 12, .3, 40
util.tic("hypergeo_fast")
for i in range(100):
hypergeo_fast(a, b, c, z, k)
util.toc()
util.tic("hypergeo")
for i in range(100):
hypergeo(a, b, c, z, k)
util.toc()
util.tic("loghypergeo")
for i in range(100):
loghypergeo(a, b, c, z, k)
util.toc()
if 0:
p0 = .5
k=30
p = plotfunc(lambda x: freq_pdf(x, p0, 1000, 100, k=k),
.01, .99, .01, style="lines")
p.plotfunc(lambda x: freq_pdf(x, p0, 1000, 200, k=k),
.01, .99, .01, style="lines")
p.plotfunc(lambda x: freq_pdf(x, p0, 1000, 500, k=k),
.01, .99, .01, style="lines")
p.plotfunc(lambda x: freq_pdf(x, p0, 1000, 1000, k=k),
.01, .99, .01, style="lines")
p.plotfunc(lambda x: freq_pdf(x, p0, 1000, 2000, k=k),
.01, .99, .01, style="lines")
p.plotfunc(lambda x: freq_pdf(x, p0, 1000, 3000, k=k),
.01, .99, .01, style="lines")
p.enableOutput(True)
p.replot()
#p.plotfunc(lambda x: normalPdf(x, (.5, .1135)),
# .01, .99, .01, style="lines")
if 0:
p0 = .1
p = plotfunc(lambda x: freq_pdf(x, p0, 1000, 100, k=25),
.01, .99, .01, style="lines")
p.plotfunc(lambda x: freq_pdf(x, p0, 1000, 200, k=25),
.01, .99, .01, style="lines")
p.plotfunc(lambda x: freq_pdf(x, p0, 1000, 500, k=25),
.01, .99, .01, style="lines")
p.plotfunc(lambda x: freq_pdf(x, p0, 1000, 1000, k=25),
.01, .99, .01, style="lines")
p.plotfunc(lambda x: freq_pdf(x, p0, 1000, 2000, k=25),
.01, .99, .01, style="lines")
p.plotfunc(lambda x: freq_pdf(x, p0, 1000, 3000, k=25),
.01, .99, .01, style="lines")
p.enableOutput(True)
p.replot()
#p.plotfunc(lambda x: freq_pdf3(x, .5, 1000, 1000/10, k=40),
# .01, .99, .01, style="lines")
if 0:
p0 = .5
k=30
p = plotfunc(lambda x: freq_pdf(x, p0, 1000, 30, k=k),
.01, .99, .01, style="lines")
p.enableOutput(True)
p.replot()
#=============================================================================
# old versions
def hypergeo_old(a, b, c, z, k=100):
"""Hypergeometric function"""
terms = [1.0]
for i in xrange(1, k+1):
terms.append(float((i+a-1)*(i+b-1)*z)/(i+c-1)/i * terms[i-1])
return sum(terms)
def freq_pdf_old(x, p, n, t, k=8):
if x > 0.5:
return freq_pdf2(1.0-x, 1.0-p, n, t, k)
q = 1.0 - p
prob = -util.INF
sgn = 1
t4n = t / (4*n)
for i in xrange(1, k+1):
#term = (p * q * i * (i+1) * (2*i+1) *
# hypergeo(1-i,i+2,2,p) * hypergeo(1-i,i+2,2,x) *
# exp(-t * i * (i+1) / (4*n)))
lcoff = log(p * q * i * (i+1) * (2*i+1))
h1 = hypergeo(1-i,i+2,2,p, i+2)
h2 = hypergeo(1-i,i+2,2,x, i+2)
sgn2 = util.sign(h1) * util.sign(h2)
if sgn2 != 0:
term = (lcoff + log(abs(h1)) + log(abs(h2)) +
(- i * (i+1) * t4n))
sgn, prob = stats.logadd_sign(sgn, prob, sgn2, term)
return sgn * exp(prob)
def freq_pdf2(x, p, n, t, k=8):
r = 1 - 2*p
z = 1 - 2*x
prob = 0.0
for i in xrange(1, k+1):
term = ((2*i + 1) * (i - r*r) / float(i * (i+1)) *
gegenbauer(i, r) * gegenbauer(i, z) *
exp(-t * i * (i+1) / (4*n)))
print term
prob += term
return prob
def freq_pdf3(x, p, n, t, k=8):
q = 1.0 - p
prob = 0.0
for i in xrange(1, k+1):
term = (p * q * i * (i+1) * (2*i+1) *
hypergeo(1-i,i+2,2,p,40) * hypergeo(1-i,i+2,2,x,40) *
exp(-t * i * (i+1) / (4*n)))
prob += term
return prob
def freq_pdf4(x, p, n, t, k=8):
q = 1.0 - p
prob = 0.0
for i in xrange(1, k+1):
term = (p * q * i * (i+1) * (2*i+1) *
hypergeo_mult(i, p, x, 100) *
exp(-t * i * (i+1) / (4*n)))
prob += term
return prob
def cdf_mrca2(t, k, n):
"""
Cumulative probability density of the age 't' of the most recent common
ancestor (MRCA) of 'k' lineages in a population size 'n'
"""
if k == 1:
return 1.0
s = 0.0
for i in xrange(1, k):
lam = (i+1) * i / 2.0 / n
s += (1 - exp(- lam * t)) * mrca_const(i, 1, k-1)
return s
| gpl-2.0 |
tinchoss/Python_Android | python/src/Lib/sqlite3/test/dump.py | 69 | 1739 | # Author: Paul Kippes <kippesp@gmail.com>
import unittest
import sqlite3 as sqlite
class DumpTests(unittest.TestCase):
def setUp(self):
self.cx = sqlite.connect(":memory:")
self.cu = self.cx.cursor()
def tearDown(self):
self.cx.close()
def CheckTableDump(self):
expected_sqls = [
"CREATE TABLE t1(id integer primary key, s1 text, " \
"t1_i1 integer not null, i2 integer, unique (s1), " \
"constraint t1_idx1 unique (i2));"
,
"INSERT INTO \"t1\" VALUES(1,'foo',10,20);"
,
"INSERT INTO \"t1\" VALUES(2,'foo2',30,30);"
,
"CREATE TABLE t2(id integer, t2_i1 integer, " \
"t2_i2 integer, primary key (id)," \
"foreign key(t2_i1) references t1(t1_i1));"
,
"CREATE TRIGGER trigger_1 update of t1_i1 on t1 " \
"begin " \
"update t2 set t2_i1 = new.t1_i1 where t2_i1 = old.t1_i1; " \
"end;"
,
"CREATE VIEW v1 as select * from t1 left join t2 " \
"using (id);"
]
[self.cu.execute(s) for s in expected_sqls]
i = self.cx.iterdump()
actual_sqls = [s for s in i]
expected_sqls = ['BEGIN TRANSACTION;'] + expected_sqls + \
['COMMIT;']
[self.assertEqual(expected_sqls[i], actual_sqls[i])
for i in xrange(len(expected_sqls))]
def suite():
return unittest.TestSuite(unittest.makeSuite(DumpTests, "Check"))
def test():
runner = unittest.TextTestRunner()
runner.run(suite())
if __name__ == "__main__":
test()
| apache-2.0 |
lahosken/pants | contrib/cpp/src/python/pants/contrib/cpp/tasks/cpp_task.py | 14 | 1725 | # coding=utf-8
# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
import subprocess
from pants.base.exceptions import TaskError
from pants.task.task import Task
from pants.contrib.cpp.targets.cpp_binary import CppBinary
from pants.contrib.cpp.targets.cpp_library import CppLibrary
from pants.contrib.cpp.targets.cpp_target import CppTarget
from pants.contrib.cpp.toolchain.cpp_toolchain import CppToolchain
class CppTask(Task):
@staticmethod
def is_cpp(target):
return isinstance(target, CppTarget)
@staticmethod
def is_library(target):
return isinstance(target, CppLibrary)
@staticmethod
def is_binary(target):
return isinstance(target, CppBinary)
@classmethod
def register_options(cls, register):
super(CppTask, cls).register_options(register)
register('--compiler', advanced=True, fingerprint=True,
help='Set a specific compiler to use (eg, g++-4.8, clang++)')
def execute(self):
raise NotImplementedError('execute must be implemented by subclasses of CppTask')
def run_command(self, cmd, workunit):
try:
self.context.log.debug('Executing: {0}'.format(cmd))
# TODO: capture stdout/stderr and redirect to log
subprocess.check_call(cmd, stdout=workunit.output('stdout'), stderr=workunit.output('stderr'))
except subprocess.CalledProcessError as e:
raise TaskError('Execution failed: {0}'.format(e))
@property
def cpp_toolchain(self):
return CppToolchain(self.get_options().compiler)
| apache-2.0 |
stefanbuenten/nanodegree | p5/tools/startup.py | 9 | 1161 | #!/usr/bin/python
print
print "checking for nltk"
try:
import nltk
except ImportError:
print "you should install nltk before continuing"
print "checking for numpy"
try:
import numpy
except ImportError:
print "you should install numpy before continuing"
print "checking for scipy"
try:
import scipy
except:
print "you should install scipy before continuing"
print "checking for sklearn"
try:
import sklearn
except:
print "you should install sklearn before continuing"
print
print "downloading the Enron dataset (this may take a while)"
print "to check on progress, you can cd up one level, then execute <ls -lthr>"
print "Enron dataset should be last item on the list, along with its current size"
print "download will complete at about 423 MB"
import urllib
url = "https://www.cs.cmu.edu/~./enron/enron_mail_20150507.tgz"
urllib.urlretrieve(url, filename="../enron_mail_20150507.tgz")
print "download complete!"
print
print "unzipping Enron dataset (this may take a while)"
import tarfile
import os
os.chdir("..")
tfile = tarfile.open("enron_mail_20150507.tgz", "r:gz")
tfile.extractall(".")
print "you're ready to go!"
| mit |
MichaelAquilina/S4 | s4/commands/ls_command.py | 2 | 2111 | #! -*- encoding: utf-8 -*-
from datetime import datetime
from tabulate import tabulate
from s4.commands import Command
class LsCommand(Command):
def run(self):
if "targets" not in self.config:
self.logger.info("You have not added any targets yet")
self.logger.info('Use the "add" command to do this')
return
if self.args.target not in self.config["targets"]:
all_targets = sorted(list(self.config["targets"].keys()))
self.logger.info('"%s" is an unknown target', self.args.target)
self.logger.info("Choices are: %s", all_targets)
return
target = self.config["targets"][self.args.target]
client_1, client_2 = self.get_clients(target)
sort_by = self.args.sort_by.lower()
descending = self.args.descending
keys = set(client_1.get_index_keys()) | set(client_2.get_index_keys())
total_size = 0
data = []
for key in sorted(keys):
self.logger.debug("Processing %s", key)
entry_1 = client_1.index.get(key, {})
entry_2 = client_2.index.get(key, {})
ts_1 = entry_1.get("local_timestamp")
ts_2 = entry_2.get("local_timestamp")
if self.args.show_all or ts_1 is not None:
data.append(
(
key,
datetime.utcfromtimestamp(int(ts_1))
if ts_1 is not None
else "<deleted>",
datetime.utcfromtimestamp(int(ts_2))
if ts_2 is not None
else None,
)
)
size = client_1.get_size(key)
self.logger.debug("%s size: %s", key, size)
total_size += size
headers = ["key", "local", "s3"]
data = sorted(data, reverse=descending, key=lambda x: x[headers.index(sort_by)])
print(tabulate(data, headers=headers))
print("Total Size: {:.2f}Mb".format(total_size / (1024 * 1024)))
| gpl-3.0 |
akhilaananthram/nupic | nupic/regions/TestNode.py | 34 | 7705 | from pprint import pprint as pp
import numpy
from PyRegion import PyRegion
class TestNode(PyRegion):
@classmethod
def getSpec(cls):
if hasattr(TestNode, '_failIngetSpec'):
assert False, 'Failing in TestNode.getSpec() as requested'
result = dict(
description='The node spec of the NuPIC 2 Python TestNode',
singleNodeOnly=False,
inputs=dict(
bottomUpIn=dict(
description='Primary input for the node',
dataType='Real64',
count=0,
required=True,
regionLevel=False,
isDefaultInput=True,
requireSplitterMap=True
)
),
outputs=dict(
bottomUpOut=dict(
description='Primary output for the node',
dataType='Real64',
count=0,
regionLevel=False,
isDefaultOutput=True
)
),
parameters=dict(
int32Param=dict(
description='Int32 scalar parameter',
dataType='Int32',
count=1,
constraints='',
defaultValue='32',
accessMode='ReadWrite'
),
uint32Param=dict(
description='UInt32 scalar parameter',
dataType='UInt32',
count=1,
constraints='',
defaultValue='33',
accessMode='ReadWrite'
),
int64Param=dict(
description='Int64 scalar parameter',
dataType='Int64',
count=1,
constraints='',
defaultValue='64',
accessMode='ReadWrite'
),
uint64Param=dict(
description='UInt64 scalar parameter',
dataType='UInt64',
count=1,
constraints='',
defaultValue='65',
accessMode='ReadWrite'
),
real32Param=dict(
description='Real32 scalar parameter',
dataType='Real32',
count=1,
constraints='',
defaultValue='32.1',
accessMode='ReadWrite'
),
real64Param=dict(
description='Real64 scalar parameter',
dataType='Real64',
count=1,
constraints='',
defaultValue='64.1',
accessMode='ReadWrite'
),
real32arrayParam=dict(
description='Real32 array parameter',
dataType='Real32',
count=0, # array
constraints='',
defaultValue='',
accessMode='ReadWrite'
),
int64arrayParam=dict(
description='Int64 array parameter',
dataType='Int64',
count=0, # array
constraints='',
defaultValue='',
accessMode='ReadWrite'
),
stringParam=dict(
description='String parameter',
dataType='Byte',
count=0, # string is conventionally Byte/0
constraints='',
defaultValue='nodespec value',
accessMode='ReadWrite'
),
failInInit=dict(
description='For testing failure in __init__()',
dataType='Int32',
count=1,
constraints='',
defaultValue='0',
accessMode='ReadWrite'
),
failInCompute=dict(
description='For testing failure in compute()',
dataType='Int32',
count=1,
constraints='',
defaultValue='0',
accessMode='ReadWrite'
),
),
commands=dict()
)
print result
return result
def __init__(self, *args, **kwargs):
""" """
# Facilitate failing in __init__ to test error handling
if 'failInInit' in kwargs:
assert False, 'TestNode.__init__() Failing on purpose as requested'
# Check if should fail in compute to test error handling
self._failInCompute = kwargs.pop('failInCompute', False)
# set these to a bunch of incorrect values, just to make
# sure they are set correctly by the nodespec.
self.parameters = dict(
int32Param=32,
uint32Param=33,
int64Param=64,
uint64Param=65,
real32Param=32.1,
real64Param=64.1,
real32ArrayParam=numpy.arange(10).astype('float32'),
real64ArrayParam=numpy.arange(10).astype('float64'),
# Construct int64 array in the same way as in C++
int64ArrayParam=numpy.arange(4).astype('int64'),
stringParam="nodespec value")
for key in kwargs:
if not key in self.parameters:
raise Exception("TestNode found keyword %s but there is no parameter with that name" % key)
self.parameters[key] = kwargs[key]
self.outputElementCount = 2 # used for computation
self._delta = 1
self._iter = 0
for i in xrange(0,4):
self.parameters["int64ArrayParam"][i] = i*64
def getParameter(self, name, index):
assert name in self.parameters
return self.parameters[name]
def setParameter(self, name, index, value):
assert name in self.parameters
self.parameters[name] = value
def initialize(self, dims, splitterMaps):
print 'TestNode.initialize() here.'
assert len(dims) == 2
self.dims = dims
self.nodeCount = dims[0] * dims[1]
self.splitterMap = splitterMaps['bottomUpIn']
print 'self.nodeCount:', self.nodeCount
print 'self.splitterMap:', self.splitterMap
print
def _getInputForNode(self, input, index):
#from dbgp.client import brk; brk(port=9019)
#indices = self.splitterMap[index * 8: index * 8 + 8]
indices = self.splitterMap[index]
v = []
for i in indices:
v.append(input[i])
return v
def compute(self, inputs, outputs):
if self._failInCompute:
assert False, 'TestNode.compute() Failing on purpose as requested'
print 'TestNode.compute() here.'
print 'splitter map:',
pp(self.splitterMap)
print
print 'inputs:',
pp(inputs)
print
if not 'bottomUpIn' in inputs:
bottomUpIn = [0] * 8
else:
bottomUpIn = inputs['bottomUpIn']
bottomUpOut = outputs['bottomUpOut']
assert len(bottomUpOut) == self.nodeCount * self.outputElementCount
for node in range(self.nodeCount):
input = self._getInputForNode(bottomUpIn, node)
if len(input) > 0:
try:
input = numpy.concatenate(input)
except ValueError: # 0-d dimensioned inputs don't need concatenation
#from dbgp.client import brk; brk(port=9019)
pass
base = node * self.outputElementCount
bottomUpOut[base] = len(input) + self._iter
x = sum(input)
for i in range(1, self.outputElementCount):
value = node + x + (i - 1) * self._delta
bottomUpOut[base+i] = value
print 'index, value:', base+i, value
print bottomUpOut[:base+i+1]
print '-----'
self._iter += 1
print 'outputs:',
pp(outputs)
print
def getOutputElementCount(self, name):
assert name == 'bottomUpOut'
return self.outputElementCount
def getParameterArrayCount(self, name, index):
assert name.endswith('ArrayParam')
print 'len(self.parameters[%s]) = %d' % (name, len(self.parameters[name]))
return len(self.parameters[name])
def getParameterArray(self, name, index, array):
assert name.endswith('ArrayParam')
assert name in self.parameters
v = self.parameters[name]
assert len(array) == len(v)
assert array.dtype == v.dtype
array[:] = v
def setParameterArray(self, name, index, array):
assert name.endswith('ArrayParam')
assert name in self.parameters
assert array.dtype == self.parameters[name].dtype
self.parameters[name] = numpy.array(array)
def test():
from pprint import pprint as pp
ns = TestNode.getSpec()
pp(ns)
if __name__=='__main__':
test() | agpl-3.0 |
jounex/hue | desktop/core/ext-py/django-nose-1.3/django_nose/runner.py | 26 | 15930 | """Django test runner that invokes nose.
You can use... ::
NOSE_ARGS = ['list', 'of', 'args']
in settings.py for arguments that you want always passed to nose.
"""
from __future__ import print_function
import os
import sys
from optparse import make_option
from types import MethodType
import django
from django.conf import settings
from django.core import exceptions
from django.core.management.base import BaseCommand
from django.core.management.color import no_style
from django.core.management.commands.loaddata import Command
from django.db import connections, transaction, DEFAULT_DB_ALIAS
from django.db.backends.creation import BaseDatabaseCreation
from django.utils.importlib import import_module
try:
from django.apps import apps
except ImportError:
# Django < 1.7
from django.db.models.loading import cache as apps
import nose.core
from django_nose.plugin import DjangoSetUpPlugin, ResultPlugin, TestReorderer
from django_nose.utils import uses_mysql
try:
any
except NameError:
def any(iterable):
for element in iterable:
if element:
return True
return False
try:
from django.test.runner import DiscoverRunner
except ImportError:
# Django < 1.8
from django.test.simple import DjangoTestSuiteRunner as DiscoverRunner
__all__ = ['BasicNoseRunner', 'NoseTestSuiteRunner']
# This is a table of Django's "manage.py test" options which
# correspond to nosetests options with a different name:
OPTION_TRANSLATION = {'--failfast': '-x',
'--nose-verbosity': '--verbosity'}
def translate_option(opt):
if '=' in opt:
long_opt, value = opt.split('=', 1)
return '%s=%s' % (translate_option(long_opt), value)
return OPTION_TRANSLATION.get(opt, opt)
# Django v1.2 does not have a _get_test_db_name() function.
if not hasattr(BaseDatabaseCreation, '_get_test_db_name'):
def _get_test_db_name(self):
TEST_DATABASE_PREFIX = 'test_'
if self.connection.settings_dict['TEST_NAME']:
return self.connection.settings_dict['TEST_NAME']
return TEST_DATABASE_PREFIX + self.connection.settings_dict['NAME']
BaseDatabaseCreation._get_test_db_name = _get_test_db_name
def _get_plugins_from_settings():
plugins = (list(getattr(settings, 'NOSE_PLUGINS', [])) +
['django_nose.plugin.TestReorderer'])
for plug_path in plugins:
try:
dot = plug_path.rindex('.')
except ValueError:
raise exceptions.ImproperlyConfigured(
"%s isn't a Nose plugin module" % plug_path)
p_mod, p_classname = plug_path[:dot], plug_path[dot + 1:]
try:
mod = import_module(p_mod)
except ImportError as e:
raise exceptions.ImproperlyConfigured(
'Error importing Nose plugin module %s: "%s"' % (p_mod, e))
try:
p_class = getattr(mod, p_classname)
except AttributeError:
raise exceptions.ImproperlyConfigured(
'Nose plugin module "%s" does not define a "%s"' %
(p_mod, p_classname))
yield p_class()
def _get_options():
"""Return all nose options that don't conflict with django options."""
cfg_files = nose.core.all_config_files()
manager = nose.core.DefaultPluginManager()
config = nose.core.Config(env=os.environ, files=cfg_files, plugins=manager)
config.plugins.addPlugins(list(_get_plugins_from_settings()))
options = config.getParser()._get_all_options()
# copy nose's --verbosity option and rename to --nose-verbosity
verbosity = [o for o in options if o.get_opt_string() == '--verbosity'][0]
verbosity_attrs = dict((attr, getattr(verbosity, attr))
for attr in verbosity.ATTRS
if attr not in ('dest', 'metavar'))
options.append(make_option('--nose-verbosity',
dest='nose_verbosity',
metavar='NOSE_VERBOSITY',
**verbosity_attrs))
# Django 1.6 introduces a "--pattern" option, which is shortened into "-p"
# do not allow "-p" to collide with nose's "--plugins" option.
plugins_option = [o for o in options if o.get_opt_string() == '--plugins'][0]
plugins_option._short_opts.remove('-p')
django_opts = [opt.dest for opt in BaseCommand.option_list] + ['version']
return tuple(o for o in options if o.dest not in django_opts and
o.action != 'help')
class BasicNoseRunner(DiscoverRunner):
"""Facade that implements a nose runner in the guise of a Django runner
You shouldn't have to use this directly unless the additions made by
``NoseTestSuiteRunner`` really bother you. They shouldn't, because they're
all off by default.
"""
__test__ = False
# Replace the builtin command options with the merged django/nose options:
options = _get_options()
def run_suite(self, nose_argv):
result_plugin = ResultPlugin()
plugins_to_add = [DjangoSetUpPlugin(self),
result_plugin,
TestReorderer()]
for plugin in _get_plugins_from_settings():
plugins_to_add.append(plugin)
try:
django.setup()
except AttributeError:
# Setup isn't necessary in Django < 1.7
pass
nose.core.TestProgram(argv=nose_argv, exit=False,
addplugins=plugins_to_add)
return result_plugin.result
def run_tests(self, test_labels, extra_tests=None):
"""Run the unit tests for all the test names in the provided list.
Test names specified may be file or module names, and may optionally
indicate the test case to run by separating the module or file name
from the test case name with a colon. Filenames may be relative or
absolute.
N.B.: The test_labels argument *MUST* be a sequence of
strings, *NOT* just a string object. (Or you will be
specifying tests for for each character in your string, and
not the whole string.
Examples:
runner.run_tests( ('test.module',) )
runner.run_tests(['another.test:TestCase.test_method'])
runner.run_tests(['a.test:TestCase'])
runner.run_tests(['/path/to/test/file.py:test_function'])
runner.run_tests( ('test.module', 'a.test:TestCase') )
Note: the extra_tests argument is currently ignored. You can
run old non-nose code that uses it without totally breaking,
but the extra tests will not be run. Maybe later.
Returns the number of tests that failed.
"""
nose_argv = (['nosetests'] + list(test_labels))
if hasattr(settings, 'NOSE_ARGS'):
nose_argv.extend(settings.NOSE_ARGS)
# Skip over 'manage.py test' and any arguments handled by django.
django_opts = ['--noinput', '--liveserver', '-p', '--pattern']
for opt in BaseCommand.option_list:
django_opts.extend(opt._long_opts)
django_opts.extend(opt._short_opts)
nose_argv.extend(translate_option(opt) for opt in sys.argv[1:]
if opt.startswith('-')
and not any(opt.startswith(d) for d in django_opts))
# if --nose-verbosity was omitted, pass Django verbosity to nose
if ('--verbosity' not in nose_argv and
not any(opt.startswith('--verbosity=') for opt in nose_argv)):
nose_argv.append('--verbosity=%s' % str(self.verbosity))
if self.verbosity >= 1:
print(' '.join(nose_argv))
result = self.run_suite(nose_argv)
# suite_result expects the suite as the first argument. Fake it.
return self.suite_result({}, result)
_old_handle = Command.handle
def _foreign_key_ignoring_handle(self, *fixture_labels, **options):
"""Wrap the the stock loaddata to ignore foreign key
checks so we can load circular references from fixtures.
This is monkeypatched into place in setup_databases().
"""
using = options.get('database', DEFAULT_DB_ALIAS)
commit = options.get('commit', True)
connection = connections[using]
# MySQL stinks at loading circular references:
if uses_mysql(connection):
cursor = connection.cursor()
cursor.execute('SET foreign_key_checks = 0')
_old_handle(self, *fixture_labels, **options)
if uses_mysql(connection):
cursor = connection.cursor()
cursor.execute('SET foreign_key_checks = 1')
# NOTE(erickt): This breaks installing Hue examples because we use
# loaddata to install the examples, then run Document.objects.sync() to
# clean up the database, so we need our connection to be left open.
#if commit:
# connection.close()
def _skip_create_test_db(self, verbosity=1, autoclobber=False, serialize=True):
"""``create_test_db`` implementation that skips both creation and flushing
The idea is to re-use the perfectly good test DB already created by an
earlier test run, cutting the time spent before any tests run from 5-13s
(depending on your I/O luck) down to 3.
"""
# Notice that the DB supports transactions. Originally, this was done in
# the method this overrides. The confirm method was added in Django v1.3
# (https://code.djangoproject.com/ticket/12991) but removed in Django v1.5
# (https://code.djangoproject.com/ticket/17760). In Django v1.5
# supports_transactions is a cached property evaluated on access.
if callable(getattr(self.connection.features, 'confirm', None)):
# Django v1.3-4
self.connection.features.confirm()
elif hasattr(self, "_rollback_works"):
# Django v1.2 and lower
can_rollback = self._rollback_works()
self.connection.settings_dict['SUPPORTS_TRANSACTIONS'] = can_rollback
return self._get_test_db_name()
def _reusing_db():
"""Return whether the ``REUSE_DB`` flag was passed"""
return os.getenv('REUSE_DB', 'false').lower() in ('true', '1', '')
def _can_support_reuse_db(connection):
"""Return whether it makes any sense to
use REUSE_DB with the backend of a connection."""
# Perhaps this is a SQLite in-memory DB. Those are created implicitly when
# you try to connect to them, so our usual test doesn't work.
return not connection.creation._get_test_db_name() == ':memory:'
def _should_create_database(connection):
"""Return whether we should recreate the given DB.
This is true if the DB doesn't exist or the REUSE_DB env var isn't truthy.
"""
# TODO: Notice when the Model classes change and return True. Worst case,
# we can generate sqlall and hash it, though it's a bit slow (2 secs) and
# hits the DB for no good reason. Until we find a faster way, I'm inclined
# to keep making people explicitly saying REUSE_DB if they want to reuse
# the DB.
if not _can_support_reuse_db(connection):
return True
# Notice whether the DB exists, and create it if it doesn't:
try:
connection.cursor()
except Exception: # TODO: Be more discerning but still DB agnostic.
return True
return not _reusing_db()
def _mysql_reset_sequences(style, connection):
"""Return a list of SQL statements needed to
reset all sequences for Django tables."""
tables = connection.introspection.django_table_names(only_existing=True)
flush_statements = connection.ops.sql_flush(
style, tables, connection.introspection.sequence_list())
# connection.ops.sequence_reset_sql() is not implemented for MySQL,
# and the base class just returns []. TODO: Implement it by pulling
# the relevant bits out of sql_flush().
return [s for s in flush_statements if s.startswith('ALTER')]
# Being overzealous and resetting the sequences on non-empty tables
# like django_content_type seems to be fine in MySQL: adding a row
# afterward does find the correct sequence number rather than
# crashing into an existing row.
class NoseTestSuiteRunner(BasicNoseRunner):
"""A runner that optionally skips DB creation
Monkeypatches connection.creation to let you skip creating databases if
they already exist. Your tests will start up much faster.
To opt into this behavior, set the environment variable ``REUSE_DB`` to
something that isn't "0" or "false" (case insensitive).
"""
def _get_models_for_connection(self, connection):
"""Return a list of models for a connection."""
tables = connection.introspection.get_table_list(connection.cursor())
return [m for m in apps.get_models() if
m._meta.db_table in tables]
def setup_databases(self):
for alias in connections:
connection = connections[alias]
creation = connection.creation
test_db_name = creation._get_test_db_name()
# Mess with the DB name so other things operate on a test DB
# rather than the real one. This is done in create_test_db when
# we don't monkeypatch it away with _skip_create_test_db.
orig_db_name = connection.settings_dict['NAME']
connection.settings_dict['NAME'] = test_db_name
if _should_create_database(connection):
# We're not using _skip_create_test_db, so put the DB name
# back:
connection.settings_dict['NAME'] = orig_db_name
# Since we replaced the connection with the test DB, closing
# the connection will avoid pooling issues with SQLAlchemy. The
# issue is trying to CREATE/DROP the test database using a
# connection to a DB that was established with that test DB.
# MySQLdb doesn't allow it, and SQLAlchemy attempts to reuse
# the existing connection from its pool.
connection.close()
else:
# Reset auto-increment sequences. Apparently, SUMO's tests are
# horrid and coupled to certain numbers.
cursor = connection.cursor()
style = no_style()
if uses_mysql(connection):
reset_statements = _mysql_reset_sequences(
style, connection)
else:
reset_statements = connection.ops.sequence_reset_sql(
style, self._get_models_for_connection(connection))
for reset_statement in reset_statements:
cursor.execute(reset_statement)
# Django v1.3 (https://code.djangoproject.com/ticket/9964)
# starts using commit_unless_managed() for individual
# connections. Backwards compatibility for Django 1.2 is to use
# the generic transaction function.
transaction.commit_unless_managed(using=connection.alias)
# Each connection has its own creation object, so this affects
# only a single connection:
creation.create_test_db = MethodType(
_skip_create_test_db, creation, creation.__class__)
Command.handle = _foreign_key_ignoring_handle
# With our class patch, does nothing but return some connection
# objects:
return super(NoseTestSuiteRunner, self).setup_databases()
def teardown_databases(self, *args, **kwargs):
"""Leave those poor, reusable databases alone if REUSE_DB is true."""
if not _reusing_db():
return super(NoseTestSuiteRunner, self).teardown_databases(
*args, **kwargs)
# else skip tearing down the DB so we can reuse it next time
| apache-2.0 |
pavanky/arrayfire-python | examples/graphics/histogram.py | 3 | 1053 | #!/usr/bin/python
#######################################################
# Copyright (c) 2015, ArrayFire
# All rights reserved.
#
# This file is distributed under 3-clause BSD license.
# The complete license agreement can be obtained at:
# http://arrayfire.com/licenses/BSD-3-Clause
########################################################
import arrayfire as af
import sys
import os
if __name__ == "__main__":
if (len(sys.argv) == 1):
raise RuntimeError("Expected to the image as the first argument")
if not os.path.isfile(sys.argv[1]):
raise RuntimeError("File %s not found" % sys.argv[1])
if (len(sys.argv) > 2):
af.set_device(int(sys.argv[2]))
af.info()
hist_win = af.Window(512, 512, "3D Plot example using ArrayFire")
img_win = af.Window(480, 640, "Input Image")
img = af.load_image(sys.argv[1]).as_type(af.Dtype.u8)
hist = af.histogram(img, 256, 0, 255)
while (not hist_win.close()) and (not img_win.close()):
hist_win.hist(hist, 0, 255)
img_win.image(img)
| bsd-3-clause |
DirkdeDraak/easybuild-framework | easybuild/toolchains/goolf.py | 4 | 1606 | ##
# Copyright 2013-2015 Ghent University
#
# This file is part of EasyBuild,
# originally created by the HPC team of Ghent University (http://ugent.be/hpc/en),
# with support of Ghent University (http://ugent.be/hpc),
# the Flemish Supercomputer Centre (VSC) (https://vscentrum.be/nl/en),
# the Hercules foundation (http://www.herculesstichting.be/in_English)
# and the Department of Economy, Science and Innovation (EWI) (http://www.ewi-vlaanderen.be/en).
#
# http://github.com/hpcugent/easybuild
#
# EasyBuild is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation v2.
#
# EasyBuild is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with EasyBuild. If not, see <http://www.gnu.org/licenses/>.
##
"""
EasyBuild support for goolf compiler toolchain (includes GCC, OpenMPI, OpenBLAS, LAPACK, ScaLAPACK and FFTW).
@author: Kenneth Hoste (Ghent University)
"""
from easybuild.toolchains.gompi import Gompi
from easybuild.toolchains.fft.fftw import Fftw
from easybuild.toolchains.linalg.openblas import OpenBLAS
from easybuild.toolchains.linalg.scalapack import ScaLAPACK
class Goolf(Gompi, OpenBLAS, ScaLAPACK, Fftw):
"""Compiler toolchain with GCC, OpenMPI, OpenBLAS, ScaLAPACK and FFTW."""
NAME = 'goolf'
SUBTOOLCHAIN = Gompi.NAME
| gpl-2.0 |
Sannoso/baxter_examples | scripts/head_wobbler.py | 5 | 4073 | #!/usr/bin/env python
# Copyright (c) 2013-2015, Rethink Robotics
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. Neither the name of the Rethink Robotics nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
import argparse
import random
import rospy
import baxter_interface
from baxter_interface import CHECK_VERSION
class Wobbler(object):
def __init__(self):
"""
'Wobbles' the head
"""
self._done = False
self._head = baxter_interface.Head()
# verify robot is enabled
print("Getting robot state... ")
self._rs = baxter_interface.RobotEnable(CHECK_VERSION)
self._init_state = self._rs.state().enabled
print("Enabling robot... ")
self._rs.enable()
print("Running. Ctrl-c to quit")
def clean_shutdown(self):
"""
Exits example cleanly by moving head to neutral position and
maintaining start state
"""
print("\nExiting example...")
if self._done:
self.set_neutral()
if not self._init_state and self._rs.state().enabled:
print("Disabling robot...")
self._rs.disable()
def set_neutral(self):
"""
Sets the head back into a neutral pose
"""
self._head.set_pan(0.0)
def wobble(self):
self.set_neutral()
"""
Performs the wobbling
"""
self._head.command_nod()
command_rate = rospy.Rate(1)
control_rate = rospy.Rate(100)
start = rospy.get_time()
while not rospy.is_shutdown() and (rospy.get_time() - start < 10.0):
angle = random.uniform(-1.5, 1.5)
while (not rospy.is_shutdown() and
not (abs(self._head.pan() - angle) <=
baxter_interface.HEAD_PAN_ANGLE_TOLERANCE)):
self._head.set_pan(angle, speed=30, timeout=0)
control_rate.sleep()
command_rate.sleep()
self._done = True
rospy.signal_shutdown("Example finished.")
def main():
"""RSDK Head Example: Wobbler
Nods the head and pans side-to-side towards random angles.
Demonstrates the use of the baxter_interface.Head class.
"""
arg_fmt = argparse.RawDescriptionHelpFormatter
parser = argparse.ArgumentParser(formatter_class=arg_fmt,
description=main.__doc__)
parser.parse_args(rospy.myargv()[1:])
print("Initializing node... ")
rospy.init_node("rsdk_head_wobbler")
wobbler = Wobbler()
rospy.on_shutdown(wobbler.clean_shutdown)
print("Wobbling... ")
wobbler.wobble()
print("Done.")
if __name__ == '__main__':
main()
| bsd-3-clause |
01000101/cloudify-cli | cloudify_cli/config/logger_config.py | 2 | 1456 | # flake8: NOQA
########
# Copyright (c) 2014 GigaSpaces Technologies Ltd. All rights reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# * See the License for the specific language governing permissions and
# * limitations under the License.
"""
Logging configuration (formatters, handlers..)
Note:
-----
This file doe's not include the actual loggers.
The loggers are configured in the config.yaml file
in order to expose them to cli users.
"""
LOGGER = {
"version": 1,
"formatters": {
"file": {
"format": "%(asctime)s [%(levelname)s] %(message)s"
},
"console": {
"format": "%(message)s"
}
},
"handlers": {
"file": {
"class": "logging.handlers.RotatingFileHandler",
"formatter": "file",
"maxBytes": "5000000",
"backupCount": "20"
},
"console": {
"class": "logging.StreamHandler",
"stream": "ext://sys.stdout",
"formatter": "console"
}
}
}
| apache-2.0 |
j71200/drone | scripts/fake_balloon.py | 1 | 8489 | #
# fake_balloon.py - creates an imagine of a balloon given a vehicle position, vehicle attitude and balloon position
#
# Start from command line using 'python balloon_finder.py'.
# The program will run for 10 seconds and will create a video file, "balloon_finder.avi", which contains the original video along with
# super-imposed circles showing the where circles were detected
#
# How to use:
# run colour_finder.py to find the best min and max Hue, Saturation and Brightness (aka Value) levels. Enter these into h_low, h_high, etc below
# run this script and hold the object in front of the camera and move it around
# check the balloon_finder.avi file to ensure the super-imposed circles accurately follow the object
import sys
from time import time
import math
import cv2
import numpy
from droneapi.lib import Location
import balloon_config
from balloon_video import balloon_video
import balloon_utils
from position_vector import PositionVector
from find_balloon import balloon_finder
class BalloonSimulator(object):
# constructor
def __init__(self):
# read fake balloon location from config file
self.fake_balloon_location = Location(balloon_config.config.get_float('fake-balloon', 'lat',-35.363274),
balloon_config.config.get_float('fake-balloon', 'lon',149.164630),
balloon_config.config.get_float('fake-balloon', 'alt',15))
# fake balloon's colour is mid way between colour filter's low and high values
h = (balloon_finder.filter_low[0] + balloon_finder.filter_high[0]) / 2
s = (balloon_finder.filter_low[1] + balloon_finder.filter_high[1]) / 2
v = (balloon_finder.filter_low[2] + balloon_finder.filter_high[2]) / 2
# convert colour to BGR palette
fake_balloon_colour_bgr = cv2.cvtColor(numpy.uint8([[[h,s,v]]]),cv2.COLOR_HSV2BGR)
self.fake_balloon_colour_bgr_scalar = cv2.cv.Scalar(fake_balloon_colour_bgr.item(0), fake_balloon_colour_bgr.item(1), fake_balloon_colour_bgr.item(2))
# fake balloon is same radius as actual balloon
self.fake_balloon_radius = balloon_finder.balloon_radius_expected
# background sky and ground colours
self.background_sky_colour_bgr = (232, 228, 227)
self.background_ground_colour_bgr_scalar = cv2.cv.Scalar(87, 145, 158)
# last iterations balloon radius
self.last_balloon_radius = 0
# get_background - returns a background image given a roll and pitch angle
# vehicle_roll and pitch are in radians
def get_background(self, vehicle_roll, vehicle_pitch):
# create sky coloured image
image = numpy.zeros((balloon_video.img_height, balloon_video.img_width, 3),numpy.uint8)
image[:] = self.background_sky_colour_bgr
# create large rectangle which will become the ground
top_left = balloon_utils.rotate_xy(balloon_video.img_center_x-1000, balloon_video.img_center_y, -vehicle_roll)
top_right = balloon_utils.rotate_xy(balloon_video.img_center_x+1000, balloon_video.img_center_y, -vehicle_roll)
bot_left = balloon_utils.rotate_xy(balloon_video.img_center_x-1000,balloon_video.img_center_y+1000, -vehicle_roll)
bot_right = balloon_utils.rotate_xy(balloon_video.img_center_x+1000,balloon_video.img_center_y+1000, -vehicle_roll)
# calculate vertical pixel shift
pitch_pixel_shift = balloon_video.angle_to_pixels_y(vehicle_pitch)
# add pitch adjustment
top_left = balloon_utils.shift_pixels_down(top_left, pitch_pixel_shift)
top_right = balloon_utils.shift_pixels_down(top_right, pitch_pixel_shift)
bot_left = balloon_utils.shift_pixels_down(bot_left, pitch_pixel_shift)
bot_right = balloon_utils.shift_pixels_down(bot_right, pitch_pixel_shift)
# draw horizon
box = numpy.array([top_left, top_right, bot_right, bot_left],numpy.int32)
cv2.fillConvexPoly(image, box, self.background_ground_colour_bgr_scalar)
return image
# draw_fake_balloon - draws fake balloon in the frame at the specified roll, pitch and yaw angle
# veh_pos : PositionVector holding the vehicle's offset from home
# balloon_pos : PositionVector holding the balloon's offset from home
# vehicle roll, pitch and yaw angles should be in radians
def draw_fake_balloon(self, frame, veh_pos, balloon_pos, vehicle_roll, vehicle_pitch, vehicle_yaw):
# calculate bearing to balloon
bearing_to_balloon = PositionVector.get_bearing(veh_pos, balloon_pos)
yaw_to_balloon = balloon_utils.wrap_PI(bearing_to_balloon-vehicle_yaw)
# calculate earth frame pitch angle from vehicle to balloon
pitch_to_balloon = vehicle_pitch + PositionVector.get_elevation(veh_pos, balloon_pos)
#print "Fake Balloon Bearing:%f Pitch:%f Dist:%f" % (math.degrees(bearing_to_balloon), math.degrees(pitch_to_balloon), dist_to_balloon_xy)
# calculate pixel position of balloon
balloon_x = balloon_video.angle_to_pixels_x(yaw_to_balloon) + balloon_video.img_center_x
balloon_y = balloon_video.angle_to_pixels_y(pitch_to_balloon) + balloon_video.img_center_y
# calculate size of balloon in pixels from distance and size
dist_to_balloon_xyz = PositionVector.get_distance_xyz(veh_pos, balloon_pos)
balloon_radius = balloon_utils.get_pixels_from_distance(dist_to_balloon_xyz, balloon_finder.balloon_radius_expected)
# store balloon radius
self.last_balloon_radius = balloon_radius
# draw balloon
cv2.circle(frame,(balloon_x,balloon_y), balloon_radius, self.fake_balloon_colour_bgr_scalar, -1)
# get_simulated_frame - returns an image of a simulated background and balloon based upon vehicle position, vehicle attitude and balloon position
def get_simulated_frame(self, veh_pos, vehicle_roll, vehicle_pitch, vehicle_yaw):
# get balloon position
balloon_pos = PositionVector.get_from_location(self.fake_balloon_location)
# get background
sim_frame = self.get_background(vehicle_roll, vehicle_pitch)
# draw balloon on background
self.draw_fake_balloon(sim_frame, veh_pos, balloon_pos, vehicle_roll, vehicle_pitch, vehicle_yaw)
return sim_frame
# main - tests this class
def main(self):
# set home to tridge's home field (absolute alt = 270)
PositionVector.set_home_location(Location(-35.362938,149.165085,0))
# calculate balloon position
fake_balloon_pos = PositionVector.get_from_location(self.fake_balloon_location)
# vehicle attitude and position
veh_pos = PositionVector(0,0,fake_balloon_pos.z) # at home location
veh_roll = math.radians(0) # leaned right 10 deg
veh_pitch = math.radians(0) # pitched back at 0 deg
veh_yaw = PositionVector.get_bearing(veh_pos,fake_balloon_pos) # facing towards fake balloon
# display positions from home
print "Vehicle %s" % veh_pos
print "Balloon %s" % fake_balloon_pos
# generate simulated frame of balloon 10m north, 2m above vehicle
img = self.get_simulated_frame(veh_pos, veh_roll, veh_pitch, veh_yaw)
while(True):
# move vehicle towards balloon
veh_pos = veh_pos + (fake_balloon_pos - veh_pos) * 0.01
# regenerate frame
img = self.get_simulated_frame(veh_pos, veh_roll, veh_pitch, veh_yaw)
# look for balloon in image using blob detector
found_in_image, xpos, ypos, size = balloon_finder.analyse_frame(img)
# display actual vs real distance
dist_actual = PositionVector.get_distance_xyz(veh_pos, fake_balloon_pos)
dist_est = balloon_utils.get_distance_from_pixels(size, balloon_finder.balloon_radius_expected)
print "Dist Est:%f Act:%f Size Est:%f Act:%f" % (dist_est, dist_actual, size, self.last_balloon_radius)
# show image
cv2.imshow("fake balloon", img)
# wait for keypress
k = cv2.waitKey(5) & 0xFF
if k == 27:
break
# destroy windows
cv2.destroyAllWindows()
# declare global instance
balloon_sim = BalloonSimulator()
# call main if this file is being run from the command line
if __name__ == "__main__":
balloon_sim.main()
| gpl-3.0 |
benthomasson/ansible | lib/ansible/modules/files/stat.py | 5 | 18891 | #!/usr/bin/python
# Copyright: (c) 2017, Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['stableinterface'],
'supported_by': 'core'}
DOCUMENTATION = r'''
---
module: stat
version_added: "1.3"
short_description: Retrieve file or file system status
description:
- Retrieves facts for a file similar to the linux/unix 'stat' command.
- For Windows targets, use the M(win_stat) module instead.
options:
path:
description:
- The full path of the file/object to get the facts of.
required: true
follow:
description:
- Whether to follow symlinks.
choices: [ 'no', 'yes' ]
default: 'no'
get_md5:
description:
- Whether to return the md5 sum of the file.
- Will return None if not a regular file or if we're
unable to use md5 (Common for FIPS-140 compliant systems).
choices: [ 'no', 'yes' ]
default: 'yes'
get_checksum:
description:
- Whether to return a checksum of the file (default sha1).
choices: [ 'no', 'yes' ]
default: 'yes'
version_added: "1.8"
checksum_algorithm:
description:
- Algorithm to determine checksum of file. Will throw an error if the
host is unable to use specified algorithm.
choices: [ sha1, sha224, sha256, sha384, sha512 ]
default: sha1
aliases: [ checksum, checksum_algo ]
version_added: "2.0"
get_mime:
description:
- Use file magic and return data about the nature of the file. this uses
the 'file' utility found on most Linux/Unix systems.
- This will add both `mime_type` and 'charset' fields to the return, if possible.
- In 2.3 this option changed from 'mime' to 'get_mime' and the default changed to 'Yes'.
choices: [ 'no', 'yes' ]
default: 'yes'
version_added: "2.1"
aliases: [ mime, mime_type, mime-type ]
get_attributes:
description:
- Get file attributes using lsattr tool if present.
choices: [ 'no', 'yes' ]
default: 'yes'
version_added: "2.3"
aliases: [ attr, attributes ]
notes:
- For Windows targets, use the M(win_stat) module instead.
author: Bruce Pennypacker (@bpennypacker)
'''
EXAMPLES = '''
# Obtain the stats of /etc/foo.conf, and check that the file still belongs
# to 'root'. Fail otherwise.
- stat:
path: /etc/foo.conf
register: st
- fail:
msg: "Whoops! file ownership has changed"
when: st.stat.pw_name != 'root'
# Determine if a path exists and is a symlink. Note that if the path does
# not exist, and we test sym.stat.islnk, it will fail with an error. So
# therefore, we must test whether it is defined.
# Run this to understand the structure, the skipped ones do not pass the
# check performed by 'when'
- stat:
path: /path/to/something
register: sym
- debug:
msg: "islnk isn't defined (path doesn't exist)"
when: sym.stat.islnk is not defined
- debug:
msg: "islnk is defined (path must exist)"
when: sym.stat.islnk is defined
- debug:
msg: "Path exists and is a symlink"
when: sym.stat.islnk is defined and sym.stat.islnk
- debug:
msg: "Path exists and isn't a symlink"
when: sym.stat.islnk is defined and sym.stat.islnk == False
# Determine if a path exists and is a directory. Note that we need to test
# both that p.stat.isdir actually exists, and also that it's set to true.
- stat:
path: /path/to/something
register: p
- debug:
msg: "Path exists and is a directory"
when: p.stat.isdir is defined and p.stat.isdir
# Don't do md5 checksum
- stat:
path: /path/to/myhugefile
get_md5: no
# Use sha256 to calculate checksum
- stat:
path: /path/to/something
checksum_algorithm: sha256
'''
RETURN = r'''
stat:
description: dictionary containing all the stat data, some platforms might add additional fields
returned: success
type: complex
contains:
exists:
description: if the destination path actually exists or not
returned: success
type: boolean
sample: True
path:
description: The full path of the file/object to get the facts of
returned: success and if path exists
type: string
sample: '/path/to/file'
mode:
description: Unix permissions of the file in octal
returned: success, path exists and user can read stats
type: octal
sample: 1755
isdir:
description: Tells you if the path is a directory
returned: success, path exists and user can read stats
type: boolean
sample: False
ischr:
description: Tells you if the path is a character device
returned: success, path exists and user can read stats
type: boolean
sample: False
isblk:
description: Tells you if the path is a block device
returned: success, path exists and user can read stats
type: boolean
sample: False
isreg:
description: Tells you if the path is a regular file
returned: success, path exists and user can read stats
type: boolean
sample: True
isfifo:
description: Tells you if the path is a named pipe
returned: success, path exists and user can read stats
type: boolean
sample: False
islnk:
description: Tells you if the path is a symbolic link
returned: success, path exists and user can read stats
type: boolean
sample: False
issock:
description: Tells you if the path is a unix domain socket
returned: success, path exists and user can read stats
type: boolean
sample: False
uid:
description: Numeric id representing the file owner
returned: success, path exists and user can read stats
type: int
sample: 1003
gid:
description: Numeric id representing the group of the owner
returned: success, path exists and user can read stats
type: int
sample: 1003
size:
description: Size in bytes for a plain file, amount of data for some special files
returned: success, path exists and user can read stats
type: int
sample: 203
inode:
description: Inode number of the path
returned: success, path exists and user can read stats
type: int
sample: 12758
dev:
description: Device the inode resides on
returned: success, path exists and user can read stats
type: int
sample: 33
nlink:
description: Number of links to the inode (hard links)
returned: success, path exists and user can read stats
type: int
sample: 1
atime:
description: Time of last access
returned: success, path exists and user can read stats
type: float
sample: 1424348972.575
mtime:
description: Time of last modification
returned: success, path exists and user can read stats
type: float
sample: 1424348972.575
ctime:
description: Time of last metadata update or creation (depends on OS)
returned: success, path exists and user can read stats
type: float
sample: 1424348972.575
wusr:
description: Tells you if the owner has write permission
returned: success, path exists and user can read stats
type: boolean
sample: True
rusr:
description: Tells you if the owner has read permission
returned: success, path exists and user can read stats
type: boolean
sample: True
xusr:
description: Tells you if the owner has execute permission
returned: success, path exists and user can read stats
type: boolean
sample: True
wgrp:
description: Tells you if the owner's group has write permission
returned: success, path exists and user can read stats
type: boolean
sample: False
rgrp:
description: Tells you if the owner's group has read permission
returned: success, path exists and user can read stats
type: boolean
sample: True
xgrp:
description: Tells you if the owner's group has execute permission
returned: success, path exists and user can read stats
type: boolean
sample: True
woth:
description: Tells you if others have write permission
returned: success, path exists and user can read stats
type: boolean
sample: False
roth:
description: Tells you if others have read permission
returned: success, path exists and user can read stats
type: boolean
sample: True
xoth:
description: Tells you if others have execute permission
returned: success, path exists and user can read stats
type: boolean
sample: True
isuid:
description: Tells you if the invoking user's id matches the owner's id
returned: success, path exists and user can read stats
type: boolean
sample: False
isgid:
description: Tells you if the invoking user's group id matches the owner's group id
returned: success, path exists and user can read stats
type: boolean
sample: False
lnk_source:
description: Target of the symlink normalized for the remote filesystem
returned: success, path exists and user can read stats and the path is a symbolic link
type: string
sample: /home/foobar/21102015-1445431274-908472971
lnk_target:
description: Target of the symlink. Note that relative paths remain relative
returned: success, path exists and user can read stats and the path is a symbolic link
type: string
sample: ../foobar/21102015-1445431274-908472971
version_added: 2.4
md5:
description: md5 hash of the path
returned: success, path exists and user can read stats and path
supports hashing and md5 is supported
type: string
sample: f88fa92d8cf2eeecf4c0a50ccc96d0c0
checksum:
description: hash of the path
returned: success, path exists, user can read stats, path supports
hashing and supplied checksum algorithm is available
type: string
sample: 50ba294cdf28c0d5bcde25708df53346825a429f
pw_name:
description: User name of owner
returned: success, path exists and user can read stats and installed python supports it
type: string
sample: httpd
gr_name:
description: Group name of owner
returned: success, path exists and user can read stats and installed python supports it
type: string
sample: www-data
mime_type:
description: file magic data or mime-type
returned: success, path exists and user can read stats and
installed python supports it and the `mime` option was true, will
return 'unknown' on error.
type: string
sample: PDF document, version 1.2
charset:
description: file character set or encoding
returned: success, path exists and user can read stats and
installed python supports it and the `mime` option was true, will
return 'unknown' on error.
type: string
sample: us-ascii
readable:
description: Tells you if the invoking user has the right to read the path
returned: success, path exists and user can read the path
type: boolean
sample: False
version_added: 2.2
writeable:
description: Tells you if the invoking user has the right to write the path
returned: success, path exists and user can write the path
type: boolean
sample: False
version_added: 2.2
executable:
description: Tells you if the invoking user has the execute the path
returned: success, path exists and user can execute the path
type: boolean
sample: False
version_added: 2.2
attributes:
description: list of file attributes
returned: success, path exists and user can execute the path
type: list
sample: [ immutable, extent ]
version_added: 2.3
'''
import errno
import grp
import os
import pwd
import stat
# import module snippets
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils._text import to_bytes
def format_output(module, path, st):
mode = st.st_mode
# back to ansible
output = dict(
exists=True,
path=path,
mode="%04o" % stat.S_IMODE(mode),
isdir=stat.S_ISDIR(mode),
ischr=stat.S_ISCHR(mode),
isblk=stat.S_ISBLK(mode),
isreg=stat.S_ISREG(mode),
isfifo=stat.S_ISFIFO(mode),
islnk=stat.S_ISLNK(mode),
issock=stat.S_ISSOCK(mode),
uid=st.st_uid,
gid=st.st_gid,
size=st.st_size,
inode=st.st_ino,
dev=st.st_dev,
nlink=st.st_nlink,
atime=st.st_atime,
mtime=st.st_mtime,
ctime=st.st_ctime,
wusr=bool(mode & stat.S_IWUSR),
rusr=bool(mode & stat.S_IRUSR),
xusr=bool(mode & stat.S_IXUSR),
wgrp=bool(mode & stat.S_IWGRP),
rgrp=bool(mode & stat.S_IRGRP),
xgrp=bool(mode & stat.S_IXGRP),
woth=bool(mode & stat.S_IWOTH),
roth=bool(mode & stat.S_IROTH),
xoth=bool(mode & stat.S_IXOTH),
isuid=bool(mode & stat.S_ISUID),
isgid=bool(mode & stat.S_ISGID),
)
# Platform dependent flags:
for other in [
# Some Linux
('st_blocks', 'blocks'),
('st_blksize', 'block_size'),
('st_rdev', 'device_type'),
('st_flags', 'flags'),
# Some Berkley based
('st_gen', 'generation'),
('st_birthtime', 'birthtime'),
# RISCOS
('st_ftype', 'file_type'),
('st_attrs', 'attrs'),
('st_obtype', 'object_type'),
# OS X
('st_rsize', 'real_size'),
('st_creator', 'creator'),
('st_type', 'file_type'),
]:
if hasattr(st, other[0]):
output[other[1]] = getattr(st, other[0])
return output
def main():
module = AnsibleModule(
argument_spec=dict(
path=dict(required=True, type='path'),
follow=dict(type='bool', default='no'),
get_md5=dict(type='bool', default='yes'),
get_checksum=dict(type='bool', default='yes'),
get_mime=dict(type='bool', default='yes', aliases=['mime', 'mime_type', 'mime-type']),
get_attributes=dict(type='bool', default='yes', aliases=['attr', 'attributes']),
checksum_algorithm=dict(type='str', default='sha1',
choices=['sha1', 'sha224', 'sha256', 'sha384', 'sha512'],
aliases=['checksum', 'checksum_algo']),
),
supports_check_mode=True,
)
path = module.params.get('path')
b_path = to_bytes(path, errors='surrogate_or_strict')
follow = module.params.get('follow')
get_mime = module.params.get('get_mime')
get_attr = module.params.get('get_attributes')
get_md5 = module.params.get('get_md5')
get_checksum = module.params.get('get_checksum')
checksum_algorithm = module.params.get('checksum_algorithm')
# main stat data
try:
if follow:
st = os.stat(b_path)
else:
st = os.lstat(b_path)
except OSError as e:
if e.errno == errno.ENOENT:
output = {'exists': False}
module.exit_json(changed=False, stat=output)
module.fail_json(msg=e.strerror)
# process base results
output = format_output(module, path, st)
# resolved permissions
for perm in [('readable', os.R_OK), ('writeable', os.W_OK), ('executable', os.X_OK)]:
output[perm[0]] = os.access(b_path, perm[1])
# symlink info
if output.get('islnk'):
output['lnk_source'] = os.path.realpath(b_path)
output['lnk_target'] = os.readlink(b_path)
try: # user data
pw = pwd.getpwuid(st.st_uid)
output['pw_name'] = pw.pw_name
except:
pass
try: # group data
grp_info = grp.getgrgid(st.st_gid)
output['gr_name'] = grp_info.gr_name
except:
pass
# checksums
if output.get('isreg') and output.get('readable'):
if get_md5:
# Will fail on FIPS-140 compliant systems
try:
output['md5'] = module.md5(b_path)
except ValueError:
output['md5'] = None
if get_checksum:
output['checksum'] = module.digest_from_file(b_path, checksum_algorithm)
# try to get mime data if requested
if get_mime:
output['mimetype'] = output['charset'] = 'unknown'
mimecmd = module.get_bin_path('file')
if mimecmd:
mimecmd = [mimecmd, '-i', b_path]
try:
rc, out, err = module.run_command(mimecmd)
if rc == 0:
mimetype, charset = out.split(':')[1].split(';')
output['mimetype'] = mimetype.strip()
output['charset'] = charset.split('=')[1].strip()
except:
pass
# try to get attr data
if get_attr:
output['version'] = None
output['attributes'] = []
output['attr_flags'] = ''
out = module.get_file_attributes(b_path)
for x in ('version', 'attributes', 'attr_flags'):
if x in out:
output[x] = out[x]
module.exit_json(changed=False, stat=output)
if __name__ == '__main__':
main()
| gpl-3.0 |
FabianKnapp/nexmon | buildtools/gcc-arm-none-eabi-5_4-2016q2-osx/arm-none-eabi/share/gdb/python/gdb/command/prompt.py | 68 | 2079 | # Extended prompt.
# Copyright (C) 2011-2015 Free Software Foundation, Inc.
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""GDB command for working with extended prompts."""
import gdb
import gdb.prompt
class _ExtendedPrompt(gdb.Parameter):
"""Set the extended prompt.
Usage: set extended-prompt VALUE
Substitutions are applied to VALUE to compute the real prompt.
The currently defined substitutions are:
"""
# Add the prompt library's dynamically generated help to the
# __doc__ string.
__doc__ = __doc__ + gdb.prompt.prompt_help()
set_doc = "Set the extended prompt."
show_doc = "Show the extended prompt."
def __init__(self):
super(_ExtendedPrompt, self).__init__("extended-prompt",
gdb.COMMAND_SUPPORT,
gdb.PARAM_STRING_NOESCAPE)
self.value = ''
self.hook_set = False
def get_show_string (self, pvalue):
if self.value is not '':
return "The extended prompt is: " + self.value
else:
return "The extended prompt is not set."
def get_set_string (self):
if self.hook_set == False:
gdb.prompt_hook = self.before_prompt_hook
self.hook_set = True
return ""
def before_prompt_hook(self, current):
if self.value is not '':
return gdb.prompt.substitute_prompt(self.value)
else:
return None
_ExtendedPrompt()
| gpl-3.0 |
SANBI-SA/tools-iuc | tools/ncbi_entrez_eutils/esearch.py | 10 | 1665 | #!/usr/bin/env python
from __future__ import print_function
import argparse
import json
import eutils
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='ESearch', epilog='')
parser.add_argument('db', help='Database to use')
parser.add_argument('term', help='Query')
parser.add_argument('--history_file', help='Filter existing history')
parser.add_argument('--datetype', help='Date type')
parser.add_argument('--reldate', help='In past N days')
parser.add_argument('--mindate', help='Minimum date')
parser.add_argument('--maxdate', help='maximum date')
# History
parser.add_argument('--history_out', type=argparse.FileType('w'),
help='Output history file')
parser.add_argument('--user_email', help="User email")
parser.add_argument('--admin_email', help="Admin email")
args = parser.parse_args()
c = eutils.Client(history_file=args.history_file, user_email=args.user_email, admin_email=args.admin_email)
payload = {
'db': args.db,
'term': args.term,
'retstart': 0,
'retmax': 20,
# hmmm @ retmax
}
if args.history_file is not None:
payload.update(c.get_history())
if args.history_out is not None:
payload['usehistory'] = 'y'
for attr in ('datetype', 'reldate', 'mindate', 'maxdate'):
if getattr(args, attr, None) is not None:
payload[attr] = getattr(args, attr)
results = c.search(**payload)
if args.history_out is not None:
history = c.extract_history(results)
args.history_out.write(json.dumps(history, indent=4))
print(results)
| mit |
SUSE/teuthology | scripts/schedule.py | 3 | 2686 | import docopt
import teuthology.misc
import teuthology.schedule
import sys
doc = """
usage: teuthology-schedule -h
teuthology-schedule [options] --name <name> [--] [<conf_file> ...]
Schedule ceph integration tests
positional arguments:
<conf_file> Config file to read
optional arguments:
-h, --help Show this help message and exit
-v, --verbose Be more verbose
-b <backend>, --queue-backend <backend>
Queue backend name, use prefix '@'
to append job config to the given
file path as yaml.
[default: beanstalk]
-n <name>, --name <name> Name of suite run the job is part of
-d <desc>, --description <desc> Job description
-o <owner>, --owner <owner> Job owner
-w <worker>, --worker <worker> Which worker to use (type of machine)
[default: plana]
-p <priority>, --priority <priority> Job priority (lower is sooner)
[default: 1000]
-N <num>, --num <num> Number of times to run/queue the job
[default: 1]
--first-in-suite Mark the first job in a suite so suite
can note down the rerun-related info
[default: False]
--last-in-suite Mark the last job in a suite so suite
post-processing can be run
[default: False]
--email <email> Where to send the results of a suite.
Only applies to the last job in a suite.
--timeout <timeout> How many seconds to wait for jobs to
finish before emailing results. Only
applies to the last job in a suite.
--seed <seed> The random seed for rerunning the suite.
Only applies to the last job in a suite.
--subset <subset> The subset option passed to teuthology-suite.
Only applies to the last job in a suite.
--dry-run Instead of scheduling, just output the
job config.
"""
def main(argv=sys.argv[1:]):
args = docopt.docopt(doc, argv=argv)
teuthology.schedule.main(args)
| mit |
xutianlong/TeamTalk | win-client/3rdParty/src/json/doxybuild.py | 136 | 6798 | """Script to generate doxygen documentation.
"""
import re
import os
import os.path
import sys
import shutil
from devtools import tarball
def find_program(*filenames):
"""find a program in folders path_lst, and sets env[var]
@param filenames: a list of possible names of the program to search for
@return: the full path of the filename if found, or '' if filename could not be found
"""
paths = os.environ.get('PATH', '').split(os.pathsep)
suffixes = ('win32' in sys.platform ) and '.exe .com .bat .cmd' or ''
for filename in filenames:
for name in [filename+ext for ext in suffixes.split()]:
for directory in paths:
full_path = os.path.join(directory, name)
if os.path.isfile(full_path):
return full_path
return ''
def do_subst_in_file(targetfile, sourcefile, dict):
"""Replace all instances of the keys of dict with their values.
For example, if dict is {'%VERSION%': '1.2345', '%BASE%': 'MyProg'},
then all instances of %VERSION% in the file will be replaced with 1.2345 etc.
"""
try:
f = open(sourcefile, 'rb')
contents = f.read()
f.close()
except:
print "Can't read source file %s"%sourcefile
raise
for (k,v) in dict.items():
v = v.replace('\\','\\\\')
contents = re.sub(k, v, contents)
try:
f = open(targetfile, 'wb')
f.write(contents)
f.close()
except:
print "Can't write target file %s"%targetfile
raise
def run_doxygen(doxygen_path, config_file, working_dir, is_silent):
config_file = os.path.abspath( config_file )
doxygen_path = doxygen_path
old_cwd = os.getcwd()
try:
os.chdir( working_dir )
cmd = [doxygen_path, config_file]
print 'Running:', ' '.join( cmd )
try:
import subprocess
except:
if os.system( ' '.join( cmd ) ) != 0:
print 'Documentation generation failed'
return False
else:
if is_silent:
process = subprocess.Popen( cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT )
else:
process = subprocess.Popen( cmd )
stdout, _ = process.communicate()
if process.returncode:
print 'Documentation generation failed:'
print stdout
return False
return True
finally:
os.chdir( old_cwd )
def build_doc( options, make_release=False ):
if make_release:
options.make_tarball = True
options.with_dot = True
options.with_html_help = True
options.with_uml_look = True
options.open = False
options.silent = True
version = open('version','rt').read().strip()
output_dir = 'dist/doxygen' # relative to doc/doxyfile location.
if not os.path.isdir( output_dir ):
os.makedirs( output_dir )
top_dir = os.path.abspath( '.' )
html_output_dirname = 'jsoncpp-api-html-' + version
tarball_path = os.path.join( 'dist', html_output_dirname + '.tar.gz' )
warning_log_path = os.path.join( output_dir, '../jsoncpp-doxygen-warning.log' )
html_output_path = os.path.join( output_dir, html_output_dirname )
def yesno( bool ):
return bool and 'YES' or 'NO'
subst_keys = {
'%JSONCPP_VERSION%': version,
'%DOC_TOPDIR%': '',
'%TOPDIR%': top_dir,
'%HTML_OUTPUT%': os.path.join( '..', output_dir, html_output_dirname ),
'%HAVE_DOT%': yesno(options.with_dot),
'%DOT_PATH%': os.path.split(options.dot_path)[0],
'%HTML_HELP%': yesno(options.with_html_help),
'%UML_LOOK%': yesno(options.with_uml_look),
'%WARNING_LOG_PATH%': os.path.join( '..', warning_log_path )
}
if os.path.isdir( output_dir ):
print 'Deleting directory:', output_dir
shutil.rmtree( output_dir )
if not os.path.isdir( output_dir ):
os.makedirs( output_dir )
do_subst_in_file( 'doc/doxyfile', 'doc/doxyfile.in', subst_keys )
ok = run_doxygen( options.doxygen_path, 'doc/doxyfile', 'doc', is_silent=options.silent )
if not options.silent:
print open(warning_log_path, 'rb').read()
index_path = os.path.abspath(os.path.join('doc', subst_keys['%HTML_OUTPUT%'], 'index.html'))
print 'Generated documentation can be found in:'
print index_path
if options.open:
import webbrowser
webbrowser.open( 'file://' + index_path )
if options.make_tarball:
print 'Generating doc tarball to', tarball_path
tarball_sources = [
output_dir,
'README.txt',
'LICENSE',
'NEWS.txt',
'version'
]
tarball_basedir = os.path.join( output_dir, html_output_dirname )
tarball.make_tarball( tarball_path, tarball_sources, tarball_basedir, html_output_dirname )
return tarball_path, html_output_dirname
def main():
usage = """%prog
Generates doxygen documentation in build/doxygen.
Optionaly makes a tarball of the documentation to dist/.
Must be started in the project top directory.
"""
from optparse import OptionParser
parser = OptionParser(usage=usage)
parser.allow_interspersed_args = False
parser.add_option('--with-dot', dest="with_dot", action='store_true', default=False,
help="""Enable usage of DOT to generate collaboration diagram""")
parser.add_option('--dot', dest="dot_path", action='store', default=find_program('dot'),
help="""Path to GraphViz dot tool. Must be full qualified path. [Default: %default]""")
parser.add_option('--doxygen', dest="doxygen_path", action='store', default=find_program('doxygen'),
help="""Path to Doxygen tool. [Default: %default]""")
parser.add_option('--with-html-help', dest="with_html_help", action='store_true', default=False,
help="""Enable generation of Microsoft HTML HELP""")
parser.add_option('--no-uml-look', dest="with_uml_look", action='store_false', default=True,
help="""Generates DOT graph without UML look [Default: False]""")
parser.add_option('--open', dest="open", action='store_true', default=False,
help="""Open the HTML index in the web browser after generation""")
parser.add_option('--tarball', dest="make_tarball", action='store_true', default=False,
help="""Generates a tarball of the documentation in dist/ directory""")
parser.add_option('-s', '--silent', dest="silent", action='store_true', default=False,
help="""Hides doxygen output""")
parser.enable_interspersed_args()
options, args = parser.parse_args()
build_doc( options )
if __name__ == '__main__':
main()
| apache-2.0 |
beezee/GAE-Django-site | django/utils/termcolors.py | 417 | 6885 | """
termcolors.py
"""
color_names = ('black', 'red', 'green', 'yellow', 'blue', 'magenta', 'cyan', 'white')
foreground = dict([(color_names[x], '3%s' % x) for x in range(8)])
background = dict([(color_names[x], '4%s' % x) for x in range(8)])
RESET = '0'
opt_dict = {'bold': '1', 'underscore': '4', 'blink': '5', 'reverse': '7', 'conceal': '8'}
def colorize(text='', opts=(), **kwargs):
"""
Returns your text, enclosed in ANSI graphics codes.
Depends on the keyword arguments 'fg' and 'bg', and the contents of
the opts tuple/list.
Returns the RESET code if no parameters are given.
Valid colors:
'black', 'red', 'green', 'yellow', 'blue', 'magenta', 'cyan', 'white'
Valid options:
'bold'
'underscore'
'blink'
'reverse'
'conceal'
'noreset' - string will not be auto-terminated with the RESET code
Examples:
colorize('hello', fg='red', bg='blue', opts=('blink',))
colorize()
colorize('goodbye', opts=('underscore',))
print colorize('first line', fg='red', opts=('noreset',))
print 'this should be red too'
print colorize('and so should this')
print 'this should not be red'
"""
code_list = []
if text == '' and len(opts) == 1 and opts[0] == 'reset':
return '\x1b[%sm' % RESET
for k, v in kwargs.iteritems():
if k == 'fg':
code_list.append(foreground[v])
elif k == 'bg':
code_list.append(background[v])
for o in opts:
if o in opt_dict:
code_list.append(opt_dict[o])
if 'noreset' not in opts:
text = text + '\x1b[%sm' % RESET
return ('\x1b[%sm' % ';'.join(code_list)) + text
def make_style(opts=(), **kwargs):
"""
Returns a function with default parameters for colorize()
Example:
bold_red = make_style(opts=('bold',), fg='red')
print bold_red('hello')
KEYWORD = make_style(fg='yellow')
COMMENT = make_style(fg='blue', opts=('bold',))
"""
return lambda text: colorize(text, opts, **kwargs)
NOCOLOR_PALETTE = 'nocolor'
DARK_PALETTE = 'dark'
LIGHT_PALETTE = 'light'
PALETTES = {
NOCOLOR_PALETTE: {
'ERROR': {},
'NOTICE': {},
'SQL_FIELD': {},
'SQL_COLTYPE': {},
'SQL_KEYWORD': {},
'SQL_TABLE': {},
'HTTP_INFO': {},
'HTTP_SUCCESS': {},
'HTTP_REDIRECT': {},
'HTTP_NOT_MODIFIED': {},
'HTTP_BAD_REQUEST': {},
'HTTP_NOT_FOUND': {},
'HTTP_SERVER_ERROR': {},
},
DARK_PALETTE: {
'ERROR': { 'fg': 'red', 'opts': ('bold',) },
'NOTICE': { 'fg': 'red' },
'SQL_FIELD': { 'fg': 'green', 'opts': ('bold',) },
'SQL_COLTYPE': { 'fg': 'green' },
'SQL_KEYWORD': { 'fg': 'yellow' },
'SQL_TABLE': { 'opts': ('bold',) },
'HTTP_INFO': { 'opts': ('bold',) },
'HTTP_SUCCESS': { },
'HTTP_REDIRECT': { 'fg': 'green' },
'HTTP_NOT_MODIFIED': { 'fg': 'cyan' },
'HTTP_BAD_REQUEST': { 'fg': 'red', 'opts': ('bold',) },
'HTTP_NOT_FOUND': { 'fg': 'yellow' },
'HTTP_SERVER_ERROR': { 'fg': 'magenta', 'opts': ('bold',) },
},
LIGHT_PALETTE: {
'ERROR': { 'fg': 'red', 'opts': ('bold',) },
'NOTICE': { 'fg': 'red' },
'SQL_FIELD': { 'fg': 'green', 'opts': ('bold',) },
'SQL_COLTYPE': { 'fg': 'green' },
'SQL_KEYWORD': { 'fg': 'blue' },
'SQL_TABLE': { 'opts': ('bold',) },
'HTTP_INFO': { 'opts': ('bold',) },
'HTTP_SUCCESS': { },
'HTTP_REDIRECT': { 'fg': 'green', 'opts': ('bold',) },
'HTTP_NOT_MODIFIED': { 'fg': 'green' },
'HTTP_BAD_REQUEST': { 'fg': 'red', 'opts': ('bold',) },
'HTTP_NOT_FOUND': { 'fg': 'red' },
'HTTP_SERVER_ERROR': { 'fg': 'magenta', 'opts': ('bold',) },
}
}
DEFAULT_PALETTE = DARK_PALETTE
def parse_color_setting(config_string):
"""Parse a DJANGO_COLORS environment variable to produce the system palette
The general form of a pallete definition is:
"palette;role=fg;role=fg/bg;role=fg,option,option;role=fg/bg,option,option"
where:
palette is a named palette; one of 'light', 'dark', or 'nocolor'.
role is a named style used by Django
fg is a background color.
bg is a background color.
option is a display options.
Specifying a named palette is the same as manually specifying the individual
definitions for each role. Any individual definitions following the pallete
definition will augment the base palette definition.
Valid roles:
'error', 'notice', 'sql_field', 'sql_coltype', 'sql_keyword', 'sql_table',
'http_info', 'http_success', 'http_redirect', 'http_bad_request',
'http_not_found', 'http_server_error'
Valid colors:
'black', 'red', 'green', 'yellow', 'blue', 'magenta', 'cyan', 'white'
Valid options:
'bold', 'underscore', 'blink', 'reverse', 'conceal'
"""
if not config_string:
return PALETTES[DEFAULT_PALETTE]
# Split the color configuration into parts
parts = config_string.lower().split(';')
palette = PALETTES[NOCOLOR_PALETTE].copy()
for part in parts:
if part in PALETTES:
# A default palette has been specified
palette.update(PALETTES[part])
elif '=' in part:
# Process a palette defining string
definition = {}
# Break the definition into the role,
# plus the list of specific instructions.
# The role must be in upper case
role, instructions = part.split('=')
role = role.upper()
styles = instructions.split(',')
styles.reverse()
# The first instruction can contain a slash
# to break apart fg/bg.
colors = styles.pop().split('/')
colors.reverse()
fg = colors.pop()
if fg in color_names:
definition['fg'] = fg
if colors and colors[-1] in color_names:
definition['bg'] = colors[-1]
# All remaining instructions are options
opts = tuple(s for s in styles if s in opt_dict.keys())
if opts:
definition['opts'] = opts
# The nocolor palette has all available roles.
# Use that palette as the basis for determining
# if the role is valid.
if role in PALETTES[NOCOLOR_PALETTE] and definition:
palette[role] = definition
# If there are no colors specified, return the empty palette.
if palette == PALETTES[NOCOLOR_PALETTE]:
return None
return palette
| bsd-3-clause |
RobinKa/tfga | tests/test_keras.py | 1 | 11142 | import unittest as ut
from io import BytesIO
from tfga.layers import (
GeometricProductDense, GeometricSandwichProductDense,
GeometricProductElementwise, GeometricSandwichProductElementwise,
GeometricProductConv1D,
GeometricAlgebraExp,
GeometricToTensor, GeometricToTensorWithKind,
TensorToGeometric, TensorWithKindToGeometric,
)
from tfga.blades import BladeKind
from tfga import GeometricAlgebra
from tensorflow import keras as ks
import h5py
import tensorflow as tf
# Make tensorflow not take over the entire GPU memory
for gpu in tf.config.experimental.list_physical_devices('GPU'):
tf.config.experimental.set_memory_growth(gpu, True)
class TestKerasLayers(ut.TestCase):
def assertTensorsEqual(self, a, b):
self.assertTrue(tf.reduce_all(a == b), "%s not equal to %s" % (a, b))
def test_tensor_to_geometric(self):
sta = GeometricAlgebra([1, -1, -1, -1])
tensor = tf.ones([32, 4])
gt_geom_tensor = tf.concat(
[tf.zeros([32, 1]), tf.ones([32, 4]), tf.zeros([32, 11])],
axis=-1
)
vector_blade_indices = [1, 2, 3, 4]
tensor_to_geom_layer = TensorToGeometric(sta, vector_blade_indices)
self.assertTensorsEqual(tensor_to_geom_layer(tensor), gt_geom_tensor)
def test_tensor_with_kind_to_geometric(self):
sta = GeometricAlgebra([1, -1, -1, -1])
tensor = tf.ones([32, 4])
gt_geom_tensor = tf.concat(
[tf.zeros([32, 1]), tf.ones([32, 4]), tf.zeros([32, 11])],
axis=-1
)
vector_blade_indices = [1, 2, 3, 4]
tensor_kind_to_geom_layer = TensorWithKindToGeometric(
sta, BladeKind.VECTOR)
self.assertTensorsEqual(
tensor_kind_to_geom_layer(tensor), gt_geom_tensor)
def test_geometric_to_tensor(self):
sta = GeometricAlgebra([1, -1, -1, -1])
gt_tensor = tf.ones([32, 4])
geom_tensor = tf.concat(
[tf.zeros([32, 1]), tf.ones([32, 4]), tf.zeros([32, 11])],
axis=-1
)
vector_blade_indices = [1, 2, 3, 4]
geom_to_tensor_layer = GeometricToTensor(sta, vector_blade_indices)
self.assertTensorsEqual(geom_to_tensor_layer(geom_tensor), gt_tensor)
def test_geometric_to_tensor_with_kind(self):
sta = GeometricAlgebra([1, -1, -1, -1])
gt_tensor = tf.ones([32, 4])
geom_tensor = tf.concat(
[tf.zeros([32, 1]), tf.ones([32, 4]), tf.zeros([32, 11])],
axis=-1
)
vector_blade_indices = [1, 2, 3, 4]
geom_to_tensor_kind_layer = GeometricToTensorWithKind(
sta, BladeKind.VECTOR)
self.assertTensorsEqual(
geom_to_tensor_kind_layer(geom_tensor), gt_tensor)
def test_geometric_product_dense_v_v(self):
sta = GeometricAlgebra([1, -1, -1, -1])
geom_tensor = tf.concat(
[tf.zeros([32, 6, 1]), tf.ones([32, 6, 4]), tf.zeros([32, 6, 11])],
axis=-1
)
vector_blade_indices = [1, 2, 3, 4]
geom_prod_layer = GeometricProductDense(
sta, 8,
blade_indices_kernel=vector_blade_indices,
blade_indices_bias=vector_blade_indices,
bias_initializer=tf.keras.initializers.RandomNormal()
)
result = geom_prod_layer(geom_tensor)
# vector * vector + vector -> scalar + bivector + vector
expected_result_indices = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
self.assertTrue(sta.is_pure(result, expected_result_indices))
def test_geometric_product_dense_s_mv(self):
sta = GeometricAlgebra([1, -1, -1, -1])
geom_tensor = tf.concat(
[tf.ones([20, 6, 1]), tf.zeros([20, 6, 15])],
axis=-1
)
mv_blade_indices = list(range(16))
geom_prod_layer = GeometricProductDense(
sta, 8,
blade_indices_kernel=mv_blade_indices,
blade_indices_bias=mv_blade_indices
)
result = geom_prod_layer(geom_tensor)
# scalar * multivector + multivector -> multivector
# Check that nothing is zero (it would be extremely unlikely
# but not impossible to randomly get a zero here).
self.assertTrue(tf.reduce_all(result != 0.0))
def test_geometric_product_dense_sequence(self):
sta = GeometricAlgebra([1, -1, -1, -1])
tensor = tf.ones([20, 6, 4])
vector_blade_indices = [1, 2, 3, 4]
mv_blade_indices = list(range(16))
# vector * vector + vector -> scalar + bivector + vector
scalar_bivector_blade_indices = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
sequence = tf.keras.Sequential([
TensorToGeometric(sta, blade_indices=vector_blade_indices),
GeometricProductDense(
sta, 8,
blade_indices_kernel=vector_blade_indices,
blade_indices_bias=vector_blade_indices,
bias_initializer=tf.keras.initializers.RandomNormal()
),
GeometricToTensor(sta, blade_indices=scalar_bivector_blade_indices)
])
result = sequence(tensor)
self.assertEqual(result.shape[-1], len(scalar_bivector_blade_indices))
def test_geometric_sandwich_product_dense_v_v(self):
sta = GeometricAlgebra([1, -1, -1, -1])
geom_tensor = tf.concat(
[tf.zeros([32, 6, 1]), tf.ones([32, 6, 4]), tf.zeros([32, 6, 11])],
axis=-1
)
vector_blade_indices = [1, 2, 3, 4]
result_indices = tf.concat([
sta.get_kind_blade_indices(BladeKind.VECTOR),
sta.get_kind_blade_indices(BladeKind.TRIVECTOR)
], axis=0)
geom_prod_layer = GeometricSandwichProductDense(
sta, 8,
blade_indices_kernel=vector_blade_indices,
blade_indices_bias=result_indices,
bias_initializer=tf.keras.initializers.RandomNormal()
)
result = geom_prod_layer(geom_tensor)
# vector * vector * ~vector + vector -> vector + trivector
self.assertTrue(sta.is_pure(result, result_indices))
class TestKerasLayersSerializable(ut.TestCase):
def assertTensorsEqual(self, a, b):
self.assertTrue(tf.reduce_all(a == b), "%s not equal to %s" % (a, b))
def _test_layer_serializable(self, layer, inputs):
# Create algebra
algebra = layer.algebra
# Create model
model = tf.keras.Sequential([layer])
# Predict on inputs to compare later
model_output = model(inputs)
# Serialize model to virtual file
model_file = h5py.File(BytesIO(), mode="w")
model.save(model_file)
# Load model from stream
loaded_model = tf.keras.models.load_model(model_file)
# Predict on same inputs as before
loaded_output = loaded_model(inputs)
# Check same output for original and loaded model
self.assertTensorsEqual(model_output, loaded_output)
# Check same recreated algebra
self.assertTensorsEqual(
algebra.metric, loaded_model.layers[0].algebra.metric)
self.assertTensorsEqual(
algebra.cayley, loaded_model.layers[0].algebra.cayley)
def test_geom_dense_serializable(self):
# Create algebra
sta = GeometricAlgebra([1, -1, -1, -1])
vector_blade_indices = [1, 2, 3, 4]
mv_blade_indices = list(range(16))
# Create model
self._test_layer_serializable(GeometricProductDense(
sta, units=8,
blade_indices_kernel=mv_blade_indices,
blade_indices_bias=vector_blade_indices
), tf.random.normal([3, 6, sta.num_blades], seed=0))
def test_sandwich_dense_serializable(self):
# Create algebra
sta = GeometricAlgebra([1, -1, -1, -1])
vector_blade_indices = [1, 2, 3, 4]
mv_blade_indices = list(range(16))
# Create model
self._test_layer_serializable(GeometricSandwichProductDense(
sta, units=8,
blade_indices_kernel=mv_blade_indices,
blade_indices_bias=vector_blade_indices
), tf.random.normal([3, 6, sta.num_blades], seed=0))
def test_geom_elementwise_serializable(self):
# Create algebra
sta = GeometricAlgebra([1, -1, -1, -1])
vector_blade_indices = [1, 2, 3, 4]
mv_blade_indices = list(range(16))
# Create model
self._test_layer_serializable(GeometricProductElementwise(
sta,
blade_indices_kernel=mv_blade_indices,
blade_indices_bias=vector_blade_indices
), tf.random.normal([3, 6, sta.num_blades], seed=0))
def test_sandwich_elementwise_serializable(self):
# Create algebra
sta = GeometricAlgebra([1, -1, -1, -1])
vector_blade_indices = [1, 2, 3, 4]
mv_blade_indices = list(range(16))
# Create model
self._test_layer_serializable(GeometricSandwichProductElementwise(
sta,
blade_indices_kernel=mv_blade_indices,
blade_indices_bias=vector_blade_indices
), tf.random.normal([3, 6, sta.num_blades], seed=0))
def test_geom_prod_conv1d_serializable(self):
# Create algebra
sta = GeometricAlgebra([1, -1, -1, -1])
vector_blade_indices = [1, 2, 3, 4]
mv_blade_indices = list(range(16))
# Create model
self._test_layer_serializable(GeometricProductConv1D(
sta, filters=8, kernel_size=3,
padding="SAME", stride=2,
blade_indices_kernel=mv_blade_indices,
blade_indices_bias=vector_blade_indices
), tf.random.normal([3, 8, 4, sta.num_blades], seed=0))
def test_tensor_to_geom_serializable(self):
# Create algebra
sta = GeometricAlgebra([1, -1, -1, -1])
vector_blade_indices = [1, 2, 3, 4]
# Create model
self._test_layer_serializable(TensorToGeometric(
sta, blade_indices=vector_blade_indices
), tf.random.normal([1, 2, 3, len(vector_blade_indices)], seed=0))
def test_geom_to_tensor_serializable(self):
# Create algebra
sta = GeometricAlgebra([1, -1, -1, -1])
vector_blade_indices = [1, 2, 3, 4]
# Create model
self._test_layer_serializable(GeometricToTensor(
sta, blade_indices=vector_blade_indices
), tf.random.normal([1, 2, 3, sta.num_blades], seed=0))
def test_geom_exp_serializable(self):
# Create algebra
ga = GeometricAlgebra([1, 1, 1])
inputs = ga.from_tensor_with_kind(
tf.random.normal([3], seed=0), BladeKind.BIVECTOR
)
# Create model
self._test_layer_serializable(GeometricAlgebraExp(
ga
), inputs)
| mit |
damdam-s/OCB | addons/portal_project/project.py | 285 | 1809 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2013-TODAY OpenERP S.A (<http://www.openerp.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import osv
from openerp.tools.translate import _
class portal_project(osv.Model):
""" Update of mail_mail class, to add the signin URL to notifications. """
_inherit = 'project.project'
def _get_visibility_selection(self, cr, uid, context=None):
""" Override to add portal option. """
selection = super(portal_project, self)._get_visibility_selection(cr, uid, context=context)
idx = [item[0] for item in selection].index('public')
selection.insert((idx + 1), ('portal', _('Customer related project: visible through portal')))
return selection
# return [('public', 'All Users'),
# ('portal', 'Portal Users and Employees'),
# ('employees', 'Employees Only'),
# ('followers', 'Followers Only')]
| agpl-3.0 |
kelle/astropy | docs/wcs/examples/programmatic.py | 4 | 1307 | # Set the WCS information manually by setting properties of the WCS
# object.
from __future__ import division, print_function
import numpy
from astropy import wcs
from astropy.io import fits
# Create a new WCS object. The number of axes must be set
# from the start
w = wcs.WCS(naxis=2)
# Set up an "Airy's zenithal" projection
# Vector properties may be set with Python lists, or Numpy arrays
w.wcs.crpix = [-234.75, 8.3393]
w.wcs.cdelt = numpy.array([-0.066667, 0.066667])
w.wcs.crval = [0, -90]
w.wcs.ctype = ["RA---AIR", "DEC--AIR"]
w.wcs.set_pv([(2, 1, 45.0)])
# Some pixel coordinates of interest.
pixcrd = numpy.array([[0, 0], [24, 38], [45, 98]], numpy.float_)
# Convert pixel coordinates to world coordinates
world = w.wcs_pix2world(pixcrd, 1)
print(world)
# Convert the same coordinates back to pixel coordinates.
pixcrd2 = w.wcs_world2pix(world, 1)
print(pixcrd2)
# These should be the same as the original pixel coordinates, modulo
# some floating-point error.
assert numpy.max(numpy.abs(pixcrd - pixcrd2)) < 1e-6
# Now, write out the WCS object as a FITS header
header = w.to_header()
# header is an astropy.io.fits.Header object. We can use it to create a new
# PrimaryHDU and write it to a file.
hdu = fits.PrimaryHDU(header=header)
# Save to FITS file
# hdu.writeto('test.fits')
| bsd-3-clause |
lmazuel/ansible | lib/ansible/modules/network/eos/eos_banner.py | 31 | 5804 | #!/usr/bin/python
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = """
---
module: eos_banner
version_added: "2.3"
author: "Peter Sprygada (@privateip)"
short_description: Manage multiline banners on Arista EOS devices
description:
- This will configure both login and motd banners on remote devices
running Arista EOS. It allows playbooks to add or remote
banner text from the active running configuration.
extends_documentation_fragment: eos
options:
banner:
description:
- Specifies which banner that should be
configured on the remote device.
required: true
default: null
choices: ['login', 'banner']
text:
description:
- The banner text that should be
present in the remote device running configuration. This argument
accepts a multiline string. Requires I(state=present).
default: null
state:
description:
- Specifies whether or not the configuration is
present in the current devices active running configuration.
default: present
choices: ['present', 'absent']
"""
EXAMPLES = """
- name: configure the login banner
eos_banner:
banner: login
text: |
this is my login banner
that contains a multiline
string
state: present
- name: remove the motd banner
eos_banner:
banner: motd
state: absent
"""
RETURN = """
commands:
description: The list of configuration mode commands to send to the device
returned: always
type: list
sample:
- banner login
- this is my login banner
- that contains a multiline
- string
- EOF
session_name:
description: The EOS config session name used to load the configuration
returned: if changes
type: str
sample: ansible_1479315771
"""
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.eos import load_config, run_commands
from ansible.module_utils.eos import eos_argument_spec, check_args
def map_obj_to_commands(updates, module):
commands = list()
want, have = updates
state = module.params['state']
if state == 'absent' and 'text' in have.keys() and have['text']:
commands.append('no banner %s' % module.params['banner'])
elif state == 'present':
if want['text'] and (want['text'] != have.get('text')):
if module.params['transport'] == 'cli':
commands.append('banner %s' % module.params['banner'])
commands.extend(want['text'].strip().split('\n'))
commands.append('EOF')
else:
# For EAPI we need to construct a dict with cmd/input
# key/values for the banner
commands.append({'cmd': 'banner %s' % module.params['banner'],
'input': want['text'].strip('\n')})
return commands
def map_config_to_obj(module):
output = run_commands(module, ['show banner %s' % module.params['banner']])
obj = {'banner': module.params['banner'], 'state': 'absent'}
if output:
if module.params['transport'] == 'cli':
obj['text'] = output[0]
else:
# On EAPI we need to extract the banner text from dict key
# 'loginBanner'
if module.params['banner'] == 'login':
banner_response_key = 'loginBanner'
else:
banner_response_key = 'motd'
if isinstance(output[0], dict) and banner_response_key in output[0].keys():
obj['text'] = output[0][banner_response_key].strip('\n')
obj['state'] = 'present'
return obj
def map_params_to_obj(module):
text = module.params['text']
if text:
text = str(text).strip()
return {
'banner': module.params['banner'],
'text': text,
'state': module.params['state']
}
def main():
""" main entry point for module execution
"""
argument_spec = dict(
banner=dict(required=True, choices=['login', 'motd']),
text=dict(),
state=dict(default='present', choices=['present', 'absent'])
)
argument_spec.update(eos_argument_spec)
required_if = [('state', 'present', ('text',))]
module = AnsibleModule(argument_spec=argument_spec,
required_if=required_if,
supports_check_mode=True)
warnings = list()
check_args(module, warnings)
result = {'changed': False}
if warnings:
result['warnings'] = warnings
want = map_params_to_obj(module)
have = map_config_to_obj(module)
commands = map_obj_to_commands((want, have), module)
result['commands'] = commands
if commands:
commit = not module.check_mode
response = load_config(module, commands, commit=commit)
if response.get('diff') and module._diff:
result['diff'] = {'prepared': response.get('diff')}
result['session_name'] = response.get('session')
result['changed'] = True
module.exit_json(**result)
if __name__ == '__main__':
main()
| gpl-3.0 |
rednaxelafx/apache-spark | examples/src/main/python/ml/tokenizer_example.py | 27 | 2044 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# $example on$
from pyspark.ml.feature import Tokenizer, RegexTokenizer
from pyspark.sql.functions import col, udf
from pyspark.sql.types import IntegerType
# $example off$
from pyspark.sql import SparkSession
if __name__ == "__main__":
spark = SparkSession\
.builder\
.appName("TokenizerExample")\
.getOrCreate()
# $example on$
sentenceDataFrame = spark.createDataFrame([
(0, "Hi I heard about Spark"),
(1, "I wish Java could use case classes"),
(2, "Logistic,regression,models,are,neat")
], ["id", "sentence"])
tokenizer = Tokenizer(inputCol="sentence", outputCol="words")
regexTokenizer = RegexTokenizer(inputCol="sentence", outputCol="words", pattern="\\W")
# alternatively, pattern="\\w+", gaps(False)
countTokens = udf(lambda words: len(words), IntegerType())
tokenized = tokenizer.transform(sentenceDataFrame)
tokenized.select("sentence", "words")\
.withColumn("tokens", countTokens(col("words"))).show(truncate=False)
regexTokenized = regexTokenizer.transform(sentenceDataFrame)
regexTokenized.select("sentence", "words") \
.withColumn("tokens", countTokens(col("words"))).show(truncate=False)
# $example off$
spark.stop()
| apache-2.0 |
YannThorimbert/ThorPy-1.0 | thorpy/elements/_explorerutils/_pathelement.py | 3 | 2278 | import os
from thorpy.elements.text import OneLineText
from thorpy.elements.clickable import Clickable
from thorpy.miscgui.storage import h_store
from thorpy.miscgui import functions, style
class PathElement(OneLineText):
def __init__(self, father, abspath):
OneLineText.__init__(self)
self.father = father
self._path = father.path
self.abspath = abspath
self._n = None
self._path_list = self._get_strs()
self._path = "".join(self._path_list)
def finish(self):
OneLineText.finish(self)
def _get_strs(self):
if self.abspath:
path = os.path.abspath(self._path)
else:
path = str(self._path)
path = os.path.normpath(path)
path = path.split(os.sep)
path = [s+"/" for s in path]
return path
def _reaction_path(self, n):
if n != self._n:
self._path_list = self._path_list[0:n+1]
self._path = "".join(self._path_list)
ycoord = self._elements[0].get_storer_rect().centery
self._set_path_elements(ycoord)
functions.refresh_current_menu()
self.father._refresh_ddlf()
self.father.unblit()
self.father.blit()
self.father.update()
def _set_path_elements(self, ycoord=None):
self.remove_all_elements()
i = 0
for s in self._path_list:
e = Clickable(s)
e.set_style("text")
e.normal_params.params["font_size"] = style.PATH_FONT_SIZE
e.press_params.params["font_size"] = style.PATH_FONT_SIZE
e.finish()
e.user_func = self._reaction_path
e.user_params = {"n" : i}
e.set_jailed(self.father)
e._lock_jail = True
self.add_elements([e])
i += 1
father = self
if self.father.is_finished():
father = self.father
wtot = h_store(father, self._elements, gap=0, xstart="auto", ycoord=ycoord)
if wtot > father.get_storer_rect().width:
fr = father.get_storer_rect()
h_store(father, self._elements, gap=0, ycoord=ycoord,
xstart=fr.right - wtot-2)
self._n = len(self._elements)
| mit |
tntnatbry/tensorflow | tensorflow/contrib/layers/python/layers/embedding_ops_test.py | 12 | 23589 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""embedding_ops tests."""
# pylint: disable=unused-import
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import itertools
import math
import sys
import numpy as np
from tensorflow.contrib.layers.python.layers import embedding_ops
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors_impl
from tensorflow.python.framework import random_seed
from tensorflow.python.framework import sparse_tensor as sparse_tensor_lib
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import partitioned_variables
from tensorflow.python.platform import test
class SafeEmbeddingLookupSparseTest(test.TestCase):
def _random_weights(self, vocab_size=4, embed_dim=4, num_shards=1):
assert vocab_size > 0
assert embed_dim > 0
assert num_shards > 0
assert num_shards <= vocab_size
embedding_weights = partitioned_variables.create_partitioned_variables(
shape=[vocab_size, embed_dim],
slicing=[num_shards, 1],
initializer=init_ops.truncated_normal_initializer(
mean=0.0, stddev=1.0 / math.sqrt(vocab_size), dtype=dtypes.float32))
for w in embedding_weights:
w.initializer.run()
embedding_weights = [w.eval() for w in embedding_weights]
return embedding_weights
def _ids_and_weights_2d(self):
# Each row demonstrates a test case:
# Row 0: multiple valid ids, 1 invalid id, weighted mean
# Row 1: all ids are invalid (leaving no valid ids after pruning)
# Row 2: no ids to begin with
# Row 3: single id
# Row 4: all ids have <=0 weight
indices = [[0, 0], [0, 1], [0, 2], [1, 0], [3, 0], [4, 0], [4, 1]]
ids = [0, 1, -1, -1, 2, 0, 1]
weights = [1.0, 2.0, 1.0, 1.0, 3.0, 0.0, -0.5]
shape = [5, 4]
sparse_ids = sparse_tensor_lib.SparseTensor(
constant_op.constant(indices, dtypes.int64),
constant_op.constant(ids, dtypes.int64),
constant_op.constant(shape, dtypes.int64))
sparse_weights = sparse_tensor_lib.SparseTensor(
constant_op.constant(indices, dtypes.int64),
constant_op.constant(weights, dtypes.float32),
constant_op.constant(shape, dtypes.int64))
return sparse_ids, sparse_weights
def _ids_and_weights_3d(self):
# Each (2-D) index demonstrates a test case:
# Index 0, 0: multiple valid ids, 1 invalid id, weighted mean
# Index 0, 1: all ids are invalid (leaving no valid ids after pruning)
# Index 0, 2: no ids to begin with
# Index 1, 0: single id
# Index 1, 1: all ids have <=0 weight
# Index 1, 2: no ids to begin with
indices = [[0, 0, 0], [0, 0, 1], [0, 0, 2], [0, 1, 0], [1, 0, 0], [1, 1, 0],
[1, 1, 1]]
ids = [0, 1, -1, -1, 2, 0, 1]
weights = [1.0, 2.0, 1.0, 1.0, 3.0, 0.0, -0.5]
shape = [2, 3, 4]
sparse_ids = sparse_tensor_lib.SparseTensor(
constant_op.constant(indices, dtypes.int64),
constant_op.constant(ids, dtypes.int64),
constant_op.constant(shape, dtypes.int64))
sparse_weights = sparse_tensor_lib.SparseTensor(
constant_op.constant(indices, dtypes.int64),
constant_op.constant(weights, dtypes.float32),
constant_op.constant(shape, dtypes.int64))
return sparse_ids, sparse_weights
def test_safe_embedding_lookup_sparse_return_zero_vector(self):
with self.test_session():
embedding_weights = self._random_weights()
sparse_ids, sparse_weights = self._ids_and_weights_2d()
embedding_lookup_result = (embedding_ops.safe_embedding_lookup_sparse(
embedding_weights, sparse_ids, sparse_weights).eval())
self.assertAllClose(
embedding_lookup_result,
[(1.0 * embedding_weights[0][0] + 2.0 * embedding_weights[0][1]) /
3.0, [0] * 4, [0] * 4, embedding_weights[0][2], [0] * 4])
def test_safe_embedding_lookup_sparse_return_special_vector(self):
with self.test_session():
embedding_weights = self._random_weights()
sparse_ids, sparse_weights = self._ids_and_weights_2d()
embedding_lookup_result = (embedding_ops.safe_embedding_lookup_sparse(
embedding_weights, sparse_ids, sparse_weights, default_id=3).eval())
self.assertAllClose(
embedding_lookup_result,
[(1.0 * embedding_weights[0][0] + 2.0 * embedding_weights[0][1]) /
3.0, embedding_weights[0][3], embedding_weights[0][3],
embedding_weights[0][2], embedding_weights[0][3]])
def test_safe_embedding_lookup_sparse_no_weights(self):
with self.test_session():
embedding_weights = self._random_weights()
sparse_ids, _ = self._ids_and_weights_2d()
embedding_lookup_result = (embedding_ops.safe_embedding_lookup_sparse(
embedding_weights, sparse_ids, None).eval())
self.assertAllClose(
embedding_lookup_result,
[(embedding_weights[0][0] + embedding_weights[0][1]) / 2.0, [0] * 4,
[0] * 4, embedding_weights[0][2],
(embedding_weights[0][0] + embedding_weights[0][1]) / 2.0])
def test_safe_embedding_lookup_sparse_partitioned(self):
with self.test_session():
embedding_weights = self._random_weights(num_shards=3)
sparse_ids, _ = self._ids_and_weights_2d()
embedding_lookup_result = (embedding_ops.safe_embedding_lookup_sparse(
embedding_weights, sparse_ids, None).eval())
embedding_weights = list(itertools.chain(*embedding_weights))
self.assertAllClose(embedding_lookup_result,
[(embedding_weights[0] + embedding_weights[1]) / 2.0,
[0] * 4, [0] * 4, embedding_weights[2],
(embedding_weights[0] + embedding_weights[1]) / 2.0])
def test_safe_embedding_lookup_sparse_partitioned_inconsistent_weights(self):
with self.test_session():
embedding_weights = self._random_weights(num_shards=3)
sparse_ids, sparse_weights = self._ids_and_weights_2d()
embedding_weights[1] = embedding_weights[1].astype(np.float64)
self.assertRaises(ValueError, embedding_ops.safe_embedding_lookup_sparse,
embedding_weights, sparse_ids)
embedding_weights = [
constant_op.constant(
w, dtype=dtypes.float64) for w in embedding_weights
]
self.assertRaises(ValueError, embedding_ops.safe_embedding_lookup_sparse,
embedding_weights, sparse_ids, sparse_weights)
def test_safe_embedding_lookup_sparse_3d_return_zero_vector(self):
with self.test_session():
embedding_weights = self._random_weights()
sparse_ids, sparse_weights = self._ids_and_weights_3d()
embedding_lookup_result = (embedding_ops.safe_embedding_lookup_sparse(
embedding_weights, sparse_ids, sparse_weights).eval())
self.assertAllClose(
embedding_lookup_result,
[[(1.0 * embedding_weights[0][0] + 2.0 * embedding_weights[0][1]) /
3.0, [0] * 4, [0] * 4],
[embedding_weights[0][2], [0] * 4, [0] * 4]])
def test_safe_embedding_lookup_sparse_3d_return_special_vector(self):
with self.test_session():
embedding_weights = self._random_weights()
sparse_ids, sparse_weights = self._ids_and_weights_3d()
embedding_lookup_result = (embedding_ops.safe_embedding_lookup_sparse(
embedding_weights, sparse_ids, sparse_weights, default_id=3).eval())
self.assertAllClose(
embedding_lookup_result,
[[(1.0 * embedding_weights[0][0] + 2.0 * embedding_weights[0][1]) /
3.0, embedding_weights[0][3], embedding_weights[0][3]], [
embedding_weights[0][2], embedding_weights[0][3],
embedding_weights[0][3]
]])
def test_safe_embedding_lookup_sparse_3d_no_weights(self):
with self.test_session():
embedding_weights = self._random_weights()
sparse_ids, _ = self._ids_and_weights_3d()
embedding_lookup_result = (embedding_ops.safe_embedding_lookup_sparse(
embedding_weights, sparse_ids, None).eval())
self.assertAllClose(
embedding_lookup_result,
[[(embedding_weights[0][0] + embedding_weights[0][1]) / 2.0, [0] * 4,
[0] * 4], [
embedding_weights[0][2],
(embedding_weights[0][0] + embedding_weights[0][1]) / 2.0,
[0] * 4
]])
def test_safe_embedding_lookup_sparse_3d_partitioned(self):
with self.test_session():
embedding_weights = self._random_weights(num_shards=3)
sparse_ids, _ = self._ids_and_weights_3d()
embedding_lookup_result = (embedding_ops.safe_embedding_lookup_sparse(
embedding_weights, sparse_ids, None).eval())
embedding_weights = list(itertools.chain(*embedding_weights))
self.assertAllClose(embedding_lookup_result,
[[(embedding_weights[0] + embedding_weights[1]) / 2.0,
[0] * 4, [0] * 4], [
embedding_weights[2],
(embedding_weights[0] + embedding_weights[1]) /
2.0, [0] * 4
]])
def test_safe_embedding_lookup_sparse_3d_partitioned_inconsistent_weights(
self):
with self.test_session():
embedding_weights = self._random_weights(num_shards=3)
sparse_ids, sparse_weights = self._ids_and_weights_3d()
embedding_weights[1] = embedding_weights[1].astype(np.float64)
self.assertRaises(ValueError, embedding_ops.safe_embedding_lookup_sparse,
embedding_weights, sparse_ids)
embedding_weights = [
constant_op.constant(
w, dtype=dtypes.float64) for w in embedding_weights
]
self.assertRaises(ValueError, embedding_ops.safe_embedding_lookup_sparse,
embedding_weights, sparse_ids, sparse_weights)
class ScatteredEmbeddingLookupTest(test.TestCase):
def setUp(self):
random_seed.set_random_seed(1)
def _random_weights(self, size=50, num_shards=1):
assert size > 0
assert num_shards > 0
assert num_shards <= size
embedding_weights = partitioned_variables.create_partitioned_variables(
shape=[size],
slicing=[num_shards],
initializer=init_ops.truncated_normal_initializer(
mean=0.0, stddev=1.0, dtype=dtypes.float32))
for w in embedding_weights:
w.initializer.run()
return embedding_weights
def test_scattered_embedding_consistency(self):
with self.test_session():
embedding_weights = self._random_weights()
values = constant_op.constant(["foo", "foo"])
embedding_lookup_result = embedding_ops.scattered_embedding_lookup(
embedding_weights, values, dimension=10).eval()
self.assertAllEqual(embedding_lookup_result.shape, [2, 10])
self.assertAllEqual(embedding_lookup_result[0],
embedding_lookup_result[1])
def test_scattered_embedding_multiple_partition(self):
with self.test_session():
embedding_weights = self._random_weights(num_shards=7)
values = constant_op.constant([4, 4, 5])
embedding_lookup_result = embedding_ops.scattered_embedding_lookup(
embedding_weights, values, dimension=5).eval()
self.assertAllEqual(embedding_lookup_result.shape, [3, 5])
self.assertAllEqual(embedding_lookup_result[0],
embedding_lookup_result[1])
# Different embedding expected for different value.
embedding_diff = np.min((embedding_lookup_result[2] -
embedding_lookup_result[0])**2)
self.assertGreater(embedding_diff, 0)
def test_scattered_embedding_coverage(self):
with self.test_session():
size = 8
embedding_weights = self._random_weights(size=size, num_shards=3)
values = constant_op.constant(["foo"])
# Large embedding dimension to cover the full range of weights.
embedding_lookup_result = embedding_ops.scattered_embedding_lookup(
embedding_weights, values, dimension=100).eval()
self.assertEqual(len(np.unique(embedding_lookup_result[0])), size)
def test_scattered_embedding_multi_dimension(self):
with self.test_session():
embedding_weights = self._random_weights()
values = constant_op.constant(
[["foo", "bar", "bar"], ["bar", "bar", "foo"]])
embedding_lookup_result = embedding_ops.scattered_embedding_lookup(
embedding_weights, values, dimension=10).eval()
self.assertAllEqual(embedding_lookup_result.shape, [2, 3, 10])
self.assertAllEqual(embedding_lookup_result[0][0],
embedding_lookup_result[1][2])
def test_scattered_embedding_lookup_sparse(self):
with self.test_session():
embedding_weights = self._random_weights(num_shards=3)
sparse_tensor = sparse_tensor_lib.SparseTensor(
values=["foo", "bar", "foo", "bar"],
indices=[[0, 0], [1, 0], [1, 1], [3, 0]],
dense_shape=[5, 2])
embedding_lookup_result = (
embedding_ops.scattered_embedding_lookup_sparse(
embedding_weights, sparse_tensor, dimension=5, combiner="mean")
.eval())
self.assertAllEqual(embedding_lookup_result.shape, [5, 5])
# Same non-zero embedding for the empty rows filled with a default value.
self.assertAllEqual(embedding_lookup_result[2],
embedding_lookup_result[4])
embedding_norm = np.sum(embedding_lookup_result[2]**2)
self.assertGreater(embedding_norm, 0)
self.assertAllEqual(embedding_lookup_result[1], 0.5 * (
embedding_lookup_result[0] + embedding_lookup_result[3]))
def test_embedding_lookup_unique(self):
d_embed = 5
n_embed = 10
idx_shape = (2, 3, 4)
embeds = np.random.randn(n_embed, d_embed)
idx = np.random.randint(0, n_embed, idx_shape)
with self.test_session():
embedded_np = embeds[idx]
embedded_tf = embedding_ops.embedding_lookup_unique(embeds, idx).eval()
self.assertEqual(embedded_np.shape, embedded_tf.shape)
np.testing.assert_almost_equal(embedded_np, embedded_tf)
def test_embedding_lookup_unique_param3d(self):
embeds = np.random.randn(5, 3, 3)
idx = np.random.randint(0, 5, 10)
idx2d = np.random.randint(0, 5, (10, 2))
with self.test_session():
embedded_np = embeds[idx]
embedded_np2d = embeds[idx2d]
embedded_tf = embedding_ops.embedding_lookup_unique(embeds, idx).eval()
embedded_tf_lst = embedding_ops.embedding_lookup_unique([embeds],
idx).eval()
embedded_tf2d = embedding_ops.embedding_lookup_unique(embeds,
idx2d).eval()
self.assertEqual(embedded_np.shape, embedded_tf.shape)
np.testing.assert_almost_equal(embedded_np, embedded_tf)
self.assertEqual(embedded_np.shape, embedded_tf_lst.shape)
np.testing.assert_almost_equal(embedded_np, embedded_tf_lst)
self.assertEqual(embedded_np2d.shape, embedded_tf2d.shape)
np.testing.assert_almost_equal(embedded_np2d, embedded_tf2d)
class SampledScatteredEmbeddingLookupTest(test.TestCase):
def setUp(self):
random_seed.set_random_seed(1)
self._hash_key = 1
def _random_weights(self, size=50, num_shards=1):
assert size > 0
assert num_shards > 0
assert num_shards <= size
embedding_weights = partitioned_variables.create_partitioned_variables(
shape=[size],
slicing=[num_shards],
initializer=init_ops.truncated_normal_initializer(
mean=0.0, stddev=1.0, dtype=dtypes.float32))
for w in embedding_weights:
w.initializer.run()
return embedding_weights
def test_hashed_embedding_consistency(self):
with self.test_session():
embedding_weights = self._random_weights()
values = constant_op.constant(["foo", "foo"])
# The first three sampled_candidates are equal, so the first three
# embedding weights will be equal.
sampled_candidates = constant_op.constant([[1, 3, 4, 6], [1, 3, 4, 7]])
embedding_lookup_result = ( # pylint: disable=protected-access
embedding_ops._sampled_scattered_embedding_lookup(
embedding_weights,
values,
sampled_candidates=sampled_candidates,
hash_key=self._hash_key).eval())
self.assertAllEqual(embedding_lookup_result.shape, [2, 4])
self.assertAllEqual(embedding_lookup_result[0][:3],
embedding_lookup_result[1][:3])
self.assertNotEqual(embedding_lookup_result[0][3],
embedding_lookup_result[1][3])
def test_hashed_embedding_multi_dimension(self):
with self.test_session():
embedding_weights = self._random_weights()
values = constant_op.constant(
[["foo", "bar", "bar"], ["bar", "bar", "foo"]])
sampled_candidates = constant_op.constant(
[[[1, 3, 4, 6], [1, 7, 8, 9], [1, 7, 8, 9]],
[[1, 7, 8, 9], [1, 7, 8, 9], [1, 3, 4, 6]]])
embedding_lookup_result = ( # pylint: disable=protected-access
embedding_ops._sampled_scattered_embedding_lookup(
embedding_weights,
values,
sampled_candidates=sampled_candidates,
hash_key=self._hash_key).eval())
self.assertAllEqual(embedding_lookup_result.shape, [2, 3, 4])
self.assertAllEqual(embedding_lookup_result[0][0],
embedding_lookup_result[1][2])
invalid_indices = constant_op.constant([[[1, 3, 4, 6], [1, 7, 8, 9]],
[[1, 7, 8, 9], [1, 7, 8, 9]]])
with self.assertRaisesRegexp(errors_impl.InvalidArgumentError, (
r"\[The shape of sampled_candidates: \] \[2 2 4\] "
r"\[ does not match the shape of values: \] \[2 3\]")):
# pylint: disable=protected-access
embedding_ops._sampled_scattered_embedding_lookup(
embedding_weights, values,
sampled_candidates=invalid_indices).eval()
class SampledScatteredEmbeddingLookupSparseTest(test.TestCase):
def setUp(self):
random_seed.set_random_seed(1)
self._hash_key = 1
def test_output_shape(self):
"""Verifies the shape of the output tensor."""
with self.test_session():
sp_values = sparse_tensor_lib.SparseTensor(
values=["a", "a", "b", "c", "d", "e", "f"],
indices=[[1, 0], [2, 0], [2, 1], [2, 2], [2, 3], [2, 4], [2, 5]],
dense_shape=[3, 6])
params = constant_op.constant([.1, .2, .3])
result = embedding_ops._sampled_scattered_embedding_lookup_sparse(
params, sp_values, dimension=4, hash_key=self._hash_key)
self.assertEqual(result.eval().shape, (3, 4))
def test_output_values(self):
"""Verifies the values in a trivial case."""
with self.test_session():
sp_values = sparse_tensor_lib.SparseTensor(
values=["a"], indices=[[1, 0]], dense_shape=[3, 1])
params = constant_op.constant([.1, .2, .3])
result = embedding_ops._sampled_scattered_embedding_lookup_sparse(
params, sp_values, dimension=5, hash_key=self._hash_key)
self.assertAllClose(result.eval(), [[0., 0., 0., 0., 0.],
[.3, .2, .2, .3, .1],
[0., 0., 0., 0., 0.]])
def test_output_values_with_sampled_candidates(self):
"""Verifies the values for given sampled_candidates."""
with self.test_session():
sp_values = sparse_tensor_lib.SparseTensor(
values=["a", "a", "b", "c", "d", "e", "f"],
indices=[[1, 0], [2, 0], [2, 1], [2, 2], [2, 3], [2, 4], [2, 5]],
dense_shape=[3, 6])
params = constant_op.constant([.1, .2, .3])
sampled_candidates = [[1, 0], [2, 1], [3, 2]]
sampled_result = embedding_ops._sampled_scattered_embedding_lookup_sparse(
params,
sp_values,
sampled_candidates=constant_op.constant(sampled_candidates),
hash_key=self._hash_key)
full_result = embedding_ops._sampled_scattered_embedding_lookup_sparse(
params, sp_values, dimension=4, hash_key=self._hash_key)
sampled_result_val = sampled_result.eval()
full_result_val = full_result.eval()
self.assertEqual(sampled_result_val.shape, (3, 2))
for i in range(len(sampled_candidates)):
self.assertAllClose(sampled_result_val[i],
full_result_val[i, sampled_candidates[i]])
def test_output_values_with_sign_hash(self):
"""Verifies the values in a trivial case with hash_signs=True."""
with self.test_session():
sp_values = sparse_tensor_lib.SparseTensor(
values=["a"], indices=[[1, 0]], dense_shape=[3, 1])
params = constant_op.constant([.1, .1, .1])
result = embedding_ops._sampled_scattered_embedding_lookup_sparse(
params,
sp_values,
dimension=4,
with_sign_hash=True,
hash_key=self._hash_key)
self.assertAllClose(result.eval(), [[0., 0., 0., 0.], [-.1, -.1, -.1, .1],
[0., 0., 0., 0.]])
def test_distributive_property(self):
"""Verifies the distributive property of matrix multiplication."""
with self.test_session():
params = constant_op.constant([.1, .2, .3])
sp_values_a = sparse_tensor_lib.SparseTensor(
values=["a"], indices=[[0, 0]], dense_shape=[3, 1])
sp_values_b = sparse_tensor_lib.SparseTensor(
values=["b"], indices=[[2, 0]], dense_shape=[3, 1])
sp_values_c = sparse_tensor_lib.SparseTensor(
values=["c"], indices=[[2, 0]], dense_shape=[3, 1])
sp_values = sparse_tensor_lib.SparseTensor(
values=["a", "b", "c"],
indices=[[0, 0], [2, 0], [2, 1]],
dense_shape=[3, 2])
result_a = embedding_ops._sampled_scattered_embedding_lookup_sparse(
params, sp_values_a, dimension=4, hash_key=self._hash_key)
result_b = embedding_ops._sampled_scattered_embedding_lookup_sparse(
params, sp_values_b, dimension=4, hash_key=self._hash_key)
result_c = embedding_ops._sampled_scattered_embedding_lookup_sparse(
params, sp_values_c, dimension=4, hash_key=self._hash_key)
result = embedding_ops._sampled_scattered_embedding_lookup_sparse(
params, sp_values, dimension=4, hash_key=self._hash_key)
result_abc = math_ops.add_n([result_a, result_b, result_c])
self.assertAllClose(result.eval(), result_abc.eval())
if __name__ == "__main__":
test.main()
| apache-2.0 |
adedayo/intellij-community | python/lib/Lib/email/Header.py | 92 | 21738 | # Copyright (C) 2002-2006 Python Software Foundation
# Author: Ben Gertzfield, Barry Warsaw
# Contact: email-sig@python.org
"""Header encoding and decoding functionality."""
__all__ = [
'Header',
'decode_header',
'make_header',
]
import re
import binascii
import email.quoprimime
import email.base64mime
from email.errors import HeaderParseError
from email.charset import Charset
NL = '\n'
SPACE = ' '
USPACE = u' '
SPACE8 = ' ' * 8
UEMPTYSTRING = u''
MAXLINELEN = 76
USASCII = Charset('us-ascii')
UTF8 = Charset('utf-8')
# Match encoded-word strings in the form =?charset?q?Hello_World?=
ecre = re.compile(r'''
=\? # literal =?
(?P<charset>[^?]*?) # non-greedy up to the next ? is the charset
\? # literal ?
(?P<encoding>[qb]) # either a "q" or a "b", case insensitive
\? # literal ?
(?P<encoded>.*?) # non-greedy up to the next ?= is the encoded string
\?= # literal ?=
(?=[ \t]|$) # whitespace or the end of the string
''', re.VERBOSE | re.IGNORECASE | re.MULTILINE)
# Field name regexp, including trailing colon, but not separating whitespace,
# according to RFC 2822. Character range is from tilde to exclamation mark.
# For use with .match()
fcre = re.compile(r'[\041-\176]+:$')
# Helpers
_max_append = email.quoprimime._max_append
def decode_header(header):
"""Decode a message header value without converting charset.
Returns a list of (decoded_string, charset) pairs containing each of the
decoded parts of the header. Charset is None for non-encoded parts of the
header, otherwise a lower-case string containing the name of the character
set specified in the encoded string.
An email.Errors.HeaderParseError may be raised when certain decoding error
occurs (e.g. a base64 decoding exception).
"""
# If no encoding, just return the header
header = str(header)
if not ecre.search(header):
return [(header, None)]
decoded = []
dec = ''
for line in header.splitlines():
# This line might not have an encoding in it
if not ecre.search(line):
decoded.append((line, None))
continue
parts = ecre.split(line)
while parts:
unenc = parts.pop(0).strip()
if unenc:
# Should we continue a long line?
if decoded and decoded[-1][1] is None:
decoded[-1] = (decoded[-1][0] + SPACE + unenc, None)
else:
decoded.append((unenc, None))
if parts:
charset, encoding = [s.lower() for s in parts[0:2]]
encoded = parts[2]
dec = None
if encoding == 'q':
dec = email.quoprimime.header_decode(encoded)
elif encoding == 'b':
try:
dec = email.base64mime.decode(encoded)
except binascii.Error:
# Turn this into a higher level exception. BAW: Right
# now we throw the lower level exception away but
# when/if we get exception chaining, we'll preserve it.
raise HeaderParseError
if dec is None:
dec = encoded
if decoded and decoded[-1][1] == charset:
decoded[-1] = (decoded[-1][0] + dec, decoded[-1][1])
else:
decoded.append((dec, charset))
del parts[0:3]
return decoded
def make_header(decoded_seq, maxlinelen=None, header_name=None,
continuation_ws=' '):
"""Create a Header from a sequence of pairs as returned by decode_header()
decode_header() takes a header value string and returns a sequence of
pairs of the format (decoded_string, charset) where charset is the string
name of the character set.
This function takes one of those sequence of pairs and returns a Header
instance. Optional maxlinelen, header_name, and continuation_ws are as in
the Header constructor.
"""
h = Header(maxlinelen=maxlinelen, header_name=header_name,
continuation_ws=continuation_ws)
for s, charset in decoded_seq:
# None means us-ascii but we can simply pass it on to h.append()
if charset is not None and not isinstance(charset, Charset):
charset = Charset(charset)
h.append(s, charset)
return h
class Header:
def __init__(self, s=None, charset=None,
maxlinelen=None, header_name=None,
continuation_ws=' ', errors='strict'):
"""Create a MIME-compliant header that can contain many character sets.
Optional s is the initial header value. If None, the initial header
value is not set. You can later append to the header with .append()
method calls. s may be a byte string or a Unicode string, but see the
.append() documentation for semantics.
Optional charset serves two purposes: it has the same meaning as the
charset argument to the .append() method. It also sets the default
character set for all subsequent .append() calls that omit the charset
argument. If charset is not provided in the constructor, the us-ascii
charset is used both as s's initial charset and as the default for
subsequent .append() calls.
The maximum line length can be specified explicit via maxlinelen. For
splitting the first line to a shorter value (to account for the field
header which isn't included in s, e.g. `Subject') pass in the name of
the field in header_name. The default maxlinelen is 76.
continuation_ws must be RFC 2822 compliant folding whitespace (usually
either a space or a hard tab) which will be prepended to continuation
lines.
errors is passed through to the .append() call.
"""
if charset is None:
charset = USASCII
if not isinstance(charset, Charset):
charset = Charset(charset)
self._charset = charset
self._continuation_ws = continuation_ws
cws_expanded_len = len(continuation_ws.replace('\t', SPACE8))
# BAW: I believe `chunks' and `maxlinelen' should be non-public.
self._chunks = []
if s is not None:
self.append(s, charset, errors)
if maxlinelen is None:
maxlinelen = MAXLINELEN
if header_name is None:
# We don't know anything about the field header so the first line
# is the same length as subsequent lines.
self._firstlinelen = maxlinelen
else:
# The first line should be shorter to take into account the field
# header. Also subtract off 2 extra for the colon and space.
self._firstlinelen = maxlinelen - len(header_name) - 2
# Second and subsequent lines should subtract off the length in
# columns of the continuation whitespace prefix.
self._maxlinelen = maxlinelen - cws_expanded_len
def __str__(self):
"""A synonym for self.encode()."""
return self.encode()
def __unicode__(self):
"""Helper for the built-in unicode function."""
uchunks = []
lastcs = None
for s, charset in self._chunks:
# We must preserve spaces between encoded and non-encoded word
# boundaries, which means for us we need to add a space when we go
# from a charset to None/us-ascii, or from None/us-ascii to a
# charset. Only do this for the second and subsequent chunks.
nextcs = charset
if uchunks:
if lastcs not in (None, 'us-ascii'):
if nextcs in (None, 'us-ascii'):
uchunks.append(USPACE)
nextcs = None
elif nextcs not in (None, 'us-ascii'):
uchunks.append(USPACE)
lastcs = nextcs
uchunks.append(unicode(s, str(charset)))
return UEMPTYSTRING.join(uchunks)
# Rich comparison operators for equality only. BAW: does it make sense to
# have or explicitly disable <, <=, >, >= operators?
def __eq__(self, other):
# other may be a Header or a string. Both are fine so coerce
# ourselves to a string, swap the args and do another comparison.
return other == self.encode()
def __ne__(self, other):
return not self == other
def append(self, s, charset=None, errors='strict'):
"""Append a string to the MIME header.
Optional charset, if given, should be a Charset instance or the name
of a character set (which will be converted to a Charset instance). A
value of None (the default) means that the charset given in the
constructor is used.
s may be a byte string or a Unicode string. If it is a byte string
(i.e. isinstance(s, str) is true), then charset is the encoding of
that byte string, and a UnicodeError will be raised if the string
cannot be decoded with that charset. If s is a Unicode string, then
charset is a hint specifying the character set of the characters in
the string. In this case, when producing an RFC 2822 compliant header
using RFC 2047 rules, the Unicode string will be encoded using the
following charsets in order: us-ascii, the charset hint, utf-8. The
first character set not to provoke a UnicodeError is used.
Optional `errors' is passed as the third argument to any unicode() or
ustr.encode() call.
"""
if charset is None:
charset = self._charset
elif not isinstance(charset, Charset):
charset = Charset(charset)
# If the charset is our faux 8bit charset, leave the string unchanged
if charset <> '8bit':
# We need to test that the string can be converted to unicode and
# back to a byte string, given the input and output codecs of the
# charset.
if isinstance(s, str):
# Possibly raise UnicodeError if the byte string can't be
# converted to a unicode with the input codec of the charset.
incodec = charset.input_codec or 'us-ascii'
ustr = unicode(s, incodec, errors)
# Now make sure that the unicode could be converted back to a
# byte string with the output codec, which may be different
# than the iput coded. Still, use the original byte string.
outcodec = charset.output_codec or 'us-ascii'
ustr.encode(outcodec, errors)
elif isinstance(s, unicode):
# Now we have to be sure the unicode string can be converted
# to a byte string with a reasonable output codec. We want to
# use the byte string in the chunk.
for charset in USASCII, charset, UTF8:
try:
outcodec = charset.output_codec or 'us-ascii'
s = s.encode(outcodec, errors)
break
except UnicodeError:
pass
else:
assert False, 'utf-8 conversion failed'
self._chunks.append((s, charset))
def _split(self, s, charset, maxlinelen, splitchars):
# Split up a header safely for use with encode_chunks.
splittable = charset.to_splittable(s)
encoded = charset.from_splittable(splittable, True)
elen = charset.encoded_header_len(encoded)
# If the line's encoded length first, just return it
if elen <= maxlinelen:
return [(encoded, charset)]
# If we have undetermined raw 8bit characters sitting in a byte
# string, we really don't know what the right thing to do is. We
# can't really split it because it might be multibyte data which we
# could break if we split it between pairs. The least harm seems to
# be to not split the header at all, but that means they could go out
# longer than maxlinelen.
if charset == '8bit':
return [(s, charset)]
# BAW: I'm not sure what the right test here is. What we're trying to
# do is be faithful to RFC 2822's recommendation that ($2.2.3):
#
# "Note: Though structured field bodies are defined in such a way that
# folding can take place between many of the lexical tokens (and even
# within some of the lexical tokens), folding SHOULD be limited to
# placing the CRLF at higher-level syntactic breaks."
#
# For now, I can only imagine doing this when the charset is us-ascii,
# although it's possible that other charsets may also benefit from the
# higher-level syntactic breaks.
elif charset == 'us-ascii':
return self._split_ascii(s, charset, maxlinelen, splitchars)
# BAW: should we use encoded?
elif elen == len(s):
# We can split on _maxlinelen boundaries because we know that the
# encoding won't change the size of the string
splitpnt = maxlinelen
first = charset.from_splittable(splittable[:splitpnt], False)
last = charset.from_splittable(splittable[splitpnt:], False)
else:
# Binary search for split point
first, last = _binsplit(splittable, charset, maxlinelen)
# first is of the proper length so just wrap it in the appropriate
# chrome. last must be recursively split.
fsplittable = charset.to_splittable(first)
fencoded = charset.from_splittable(fsplittable, True)
chunk = [(fencoded, charset)]
return chunk + self._split(last, charset, self._maxlinelen, splitchars)
def _split_ascii(self, s, charset, firstlen, splitchars):
chunks = _split_ascii(s, firstlen, self._maxlinelen,
self._continuation_ws, splitchars)
return zip(chunks, [charset]*len(chunks))
def _encode_chunks(self, newchunks, maxlinelen):
# MIME-encode a header with many different charsets and/or encodings.
#
# Given a list of pairs (string, charset), return a MIME-encoded
# string suitable for use in a header field. Each pair may have
# different charsets and/or encodings, and the resulting header will
# accurately reflect each setting.
#
# Each encoding can be email.Utils.QP (quoted-printable, for
# ASCII-like character sets like iso-8859-1), email.Utils.BASE64
# (Base64, for non-ASCII like character sets like KOI8-R and
# iso-2022-jp), or None (no encoding).
#
# Each pair will be represented on a separate line; the resulting
# string will be in the format:
#
# =?charset1?q?Mar=EDa_Gonz=E1lez_Alonso?=\n
# =?charset2?b?SvxyZ2VuIEL2aW5n?="
chunks = []
for header, charset in newchunks:
if not header:
continue
if charset is None or charset.header_encoding is None:
s = header
else:
s = charset.header_encode(header)
# Don't add more folding whitespace than necessary
if chunks and chunks[-1].endswith(' '):
extra = ''
else:
extra = ' '
_max_append(chunks, s, maxlinelen, extra)
joiner = NL + self._continuation_ws
return joiner.join(chunks)
def encode(self, splitchars=';, '):
"""Encode a message header into an RFC-compliant format.
There are many issues involved in converting a given string for use in
an email header. Only certain character sets are readable in most
email clients, and as header strings can only contain a subset of
7-bit ASCII, care must be taken to properly convert and encode (with
Base64 or quoted-printable) header strings. In addition, there is a
75-character length limit on any given encoded header field, so
line-wrapping must be performed, even with double-byte character sets.
This method will do its best to convert the string to the correct
character set used in email, and encode and line wrap it safely with
the appropriate scheme for that character set.
If the given charset is not known or an error occurs during
conversion, this function will return the header untouched.
Optional splitchars is a string containing characters to split long
ASCII lines on, in rough support of RFC 2822's `highest level
syntactic breaks'. This doesn't affect RFC 2047 encoded lines.
"""
newchunks = []
maxlinelen = self._firstlinelen
lastlen = 0
for s, charset in self._chunks:
# The first bit of the next chunk should be just long enough to
# fill the next line. Don't forget the space separating the
# encoded words.
targetlen = maxlinelen - lastlen - 1
if targetlen < charset.encoded_header_len(''):
# Stick it on the next line
targetlen = maxlinelen
newchunks += self._split(s, charset, targetlen, splitchars)
lastchunk, lastcharset = newchunks[-1]
lastlen = lastcharset.encoded_header_len(lastchunk)
return self._encode_chunks(newchunks, maxlinelen)
def _split_ascii(s, firstlen, restlen, continuation_ws, splitchars):
lines = []
maxlen = firstlen
for line in s.splitlines():
# Ignore any leading whitespace (i.e. continuation whitespace) already
# on the line, since we'll be adding our own.
line = line.lstrip()
if len(line) < maxlen:
lines.append(line)
maxlen = restlen
continue
# Attempt to split the line at the highest-level syntactic break
# possible. Note that we don't have a lot of smarts about field
# syntax; we just try to break on semi-colons, then commas, then
# whitespace.
for ch in splitchars:
if ch in line:
break
else:
# There's nothing useful to split the line on, not even spaces, so
# just append this line unchanged
lines.append(line)
maxlen = restlen
continue
# Now split the line on the character plus trailing whitespace
cre = re.compile(r'%s\s*' % ch)
if ch in ';,':
eol = ch
else:
eol = ''
joiner = eol + ' '
joinlen = len(joiner)
wslen = len(continuation_ws.replace('\t', SPACE8))
this = []
linelen = 0
for part in cre.split(line):
curlen = linelen + max(0, len(this)-1) * joinlen
partlen = len(part)
onfirstline = not lines
# We don't want to split after the field name, if we're on the
# first line and the field name is present in the header string.
if ch == ' ' and onfirstline and \
len(this) == 1 and fcre.match(this[0]):
this.append(part)
linelen += partlen
elif curlen + partlen > maxlen:
if this:
lines.append(joiner.join(this) + eol)
# If this part is longer than maxlen and we aren't already
# splitting on whitespace, try to recursively split this line
# on whitespace.
if partlen > maxlen and ch <> ' ':
subl = _split_ascii(part, maxlen, restlen,
continuation_ws, ' ')
lines.extend(subl[:-1])
this = [subl[-1]]
else:
this = [part]
linelen = wslen + len(this[-1])
maxlen = restlen
else:
this.append(part)
linelen += partlen
# Put any left over parts on a line by themselves
if this:
lines.append(joiner.join(this))
return lines
def _binsplit(splittable, charset, maxlinelen):
i = 0
j = len(splittable)
while i < j:
# Invariants:
# 1. splittable[:k] fits for all k <= i (note that we *assume*,
# at the start, that splittable[:0] fits).
# 2. splittable[:k] does not fit for any k > j (at the start,
# this means we shouldn't look at any k > len(splittable)).
# 3. We don't know about splittable[:k] for k in i+1..j.
# 4. We want to set i to the largest k that fits, with i <= k <= j.
#
m = (i+j+1) >> 1 # ceiling((i+j)/2); i < m <= j
chunk = charset.from_splittable(splittable[:m], True)
chunklen = charset.encoded_header_len(chunk)
if chunklen <= maxlinelen:
# m is acceptable, so is a new lower bound.
i = m
else:
# m is not acceptable, so final i must be < m.
j = m - 1
# i == j. Invariant #1 implies that splittable[:i] fits, and
# invariant #2 implies that splittable[:i+1] does not fit, so i
# is what we're looking for.
first = charset.from_splittable(splittable[:i], False)
last = charset.from_splittable(splittable[i:], False)
return first, last
| apache-2.0 |
Collin-V/TeamTalk | win-client/3rdParty/src/json/scons-tools/srcdist.py | 264 | 5224 | import os
import os.path
from fnmatch import fnmatch
import targz
##def DoxyfileParse(file_contents):
## """
## Parse a Doxygen source file and return a dictionary of all the values.
## Values will be strings and lists of strings.
## """
## data = {}
##
## import shlex
## lex = shlex.shlex(instream = file_contents, posix = True)
## lex.wordchars += "*+./-:"
## lex.whitespace = lex.whitespace.replace("\n", "")
## lex.escape = ""
##
## lineno = lex.lineno
## last_backslash_lineno = lineno
## token = lex.get_token()
## key = token # the first token should be a key
## last_token = ""
## key_token = False
## next_key = False
## new_data = True
##
## def append_data(data, key, new_data, token):
## if new_data or len(data[key]) == 0:
## data[key].append(token)
## else:
## data[key][-1] += token
##
## while token:
## if token in ['\n']:
## if last_token not in ['\\']:
## key_token = True
## elif token in ['\\']:
## pass
## elif key_token:
## key = token
## key_token = False
## else:
## if token == "+=":
## if not data.has_key(key):
## data[key] = list()
## elif token == "=":
## data[key] = list()
## else:
## append_data( data, key, new_data, token )
## new_data = True
##
## last_token = token
## token = lex.get_token()
##
## if last_token == '\\' and token != '\n':
## new_data = False
## append_data( data, key, new_data, '\\' )
##
## # compress lists of len 1 into single strings
## for (k, v) in data.items():
## if len(v) == 0:
## data.pop(k)
##
## # items in the following list will be kept as lists and not converted to strings
## if k in ["INPUT", "FILE_PATTERNS", "EXCLUDE_PATTERNS"]:
## continue
##
## if len(v) == 1:
## data[k] = v[0]
##
## return data
##
##def DoxySourceScan(node, env, path):
## """
## Doxygen Doxyfile source scanner. This should scan the Doxygen file and add
## any files used to generate docs to the list of source files.
## """
## default_file_patterns = [
## '*.c', '*.cc', '*.cxx', '*.cpp', '*.c++', '*.java', '*.ii', '*.ixx',
## '*.ipp', '*.i++', '*.inl', '*.h', '*.hh ', '*.hxx', '*.hpp', '*.h++',
## '*.idl', '*.odl', '*.cs', '*.php', '*.php3', '*.inc', '*.m', '*.mm',
## '*.py',
## ]
##
## default_exclude_patterns = [
## '*~',
## ]
##
## sources = []
##
## data = DoxyfileParse(node.get_contents())
##
## if data.get("RECURSIVE", "NO") == "YES":
## recursive = True
## else:
## recursive = False
##
## file_patterns = data.get("FILE_PATTERNS", default_file_patterns)
## exclude_patterns = data.get("EXCLUDE_PATTERNS", default_exclude_patterns)
##
## for node in data.get("INPUT", []):
## if os.path.isfile(node):
## sources.add(node)
## elif os.path.isdir(node):
## if recursive:
## for root, dirs, files in os.walk(node):
## for f in files:
## filename = os.path.join(root, f)
##
## pattern_check = reduce(lambda x, y: x or bool(fnmatch(filename, y)), file_patterns, False)
## exclude_check = reduce(lambda x, y: x and fnmatch(filename, y), exclude_patterns, True)
##
## if pattern_check and not exclude_check:
## sources.append(filename)
## else:
## for pattern in file_patterns:
## sources.extend(glob.glob("/".join([node, pattern])))
## sources = map( lambda path: env.File(path), sources )
## return sources
##
##
##def DoxySourceScanCheck(node, env):
## """Check if we should scan this file"""
## return os.path.isfile(node.path)
def srcDistEmitter(source, target, env):
## """Doxygen Doxyfile emitter"""
## # possible output formats and their default values and output locations
## output_formats = {
## "HTML": ("YES", "html"),
## "LATEX": ("YES", "latex"),
## "RTF": ("NO", "rtf"),
## "MAN": ("YES", "man"),
## "XML": ("NO", "xml"),
## }
##
## data = DoxyfileParse(source[0].get_contents())
##
## targets = []
## out_dir = data.get("OUTPUT_DIRECTORY", ".")
##
## # add our output locations
## for (k, v) in output_formats.items():
## if data.get("GENERATE_" + k, v[0]) == "YES":
## targets.append(env.Dir( os.path.join(out_dir, data.get(k + "_OUTPUT", v[1]))) )
##
## # don't clobber targets
## for node in targets:
## env.Precious(node)
##
## # set up cleaning stuff
## for node in targets:
## env.Clean(node, node)
##
## return (targets, source)
return (target,source)
def generate(env):
"""
Add builders and construction variables for the
SrcDist tool.
"""
## doxyfile_scanner = env.Scanner(
## DoxySourceScan,
## "DoxySourceScan",
## scan_check = DoxySourceScanCheck,
## )
if targz.exists(env):
srcdist_builder = targz.makeBuilder( srcDistEmitter )
env['BUILDERS']['SrcDist'] = srcdist_builder
def exists(env):
"""
Make sure srcdist exists.
"""
return targz.exists(env)
| apache-2.0 |
vaskalas/aiohttp | tests/test_web_cli.py | 4 | 3367 | import pytest
from aiohttp import web
from unittest import mock
@mock.patch("aiohttp.web.ArgumentParser.error", side_effect=SystemExit)
def test_entry_func_empty(error):
argv = [""]
with pytest.raises(SystemExit):
web.main(argv)
error.assert_called_with(
"'entry-func' not in 'module:function' syntax"
)
@mock.patch("aiohttp.web.ArgumentParser.error", side_effect=SystemExit)
def test_entry_func_only_module(error):
argv = ["test"]
with pytest.raises(SystemExit):
web.main(argv)
error.assert_called_with(
"'entry-func' not in 'module:function' syntax"
)
@mock.patch("aiohttp.web.ArgumentParser.error", side_effect=SystemExit)
def test_entry_func_only_function(error):
argv = [":test"]
with pytest.raises(SystemExit):
web.main(argv)
error.assert_called_with(
"'entry-func' not in 'module:function' syntax"
)
@mock.patch("aiohttp.web.ArgumentParser.error", side_effect=SystemExit)
def test_entry_func_only_seperator(error):
argv = [":"]
with pytest.raises(SystemExit):
web.main(argv)
error.assert_called_with(
"'entry-func' not in 'module:function' syntax"
)
@mock.patch("aiohttp.web.ArgumentParser.error", side_effect=SystemExit)
def test_entry_func_relative_module(error):
argv = [".a.b:c"]
with pytest.raises(SystemExit):
web.main(argv)
error.assert_called_with("relative module names not supported")
@mock.patch("aiohttp.web.import_module", side_effect=ImportError)
@mock.patch("aiohttp.web.ArgumentParser.error", side_effect=SystemExit)
def test_entry_func_non_existent_module(error, import_module):
argv = ["alpha.beta:func"]
with pytest.raises(SystemExit):
web.main(argv)
error.assert_called_with("module %r not found" % "alpha.beta")
@mock.patch("aiohttp.web.import_module")
@mock.patch("aiohttp.web.ArgumentParser.error", side_effect=SystemExit)
def test_entry_func_non_existent_attribute(error, import_module):
argv = ["alpha.beta:func"]
module = import_module("alpha.beta")
del module.func
with pytest.raises(SystemExit):
web.main(argv)
error.assert_called_with(
"module %r has no attribute %r" % ("alpha.beta", "func")
)
@mock.patch("aiohttp.web.run_app")
@mock.patch("aiohttp.web.import_module")
def test_entry_func_call(import_module, run_app):
argv = ("-H testhost -P 6666 --extra-optional-eins alpha.beta:func "
"--extra-optional-zwei extra positional args").split()
module = import_module("alpha.beta")
with pytest.raises(SystemExit):
web.main(argv)
module.func.assert_called_with(
("--extra-optional-eins --extra-optional-zwei extra positional "
"args").split()
)
@mock.patch("aiohttp.web.run_app")
@mock.patch("aiohttp.web.import_module")
@mock.patch("aiohttp.web.ArgumentParser.exit", side_effect=SystemExit)
def test_running_application(exit, import_module, run_app):
argv = ("-H testhost -P 6666 --extra-optional-eins alpha.beta:func "
"--extra-optional-zwei extra positional args").split()
module = import_module("alpha.beta")
app = module.func()
with pytest.raises(SystemExit):
web.main(argv)
run_app.assert_called_with(app, host="testhost", port=6666)
exit.assert_called_with(message="Stopped\n")
| apache-2.0 |
cuauv/software | serial/cli/configure.py | 1 | 1455 | #!/usr/bin/env python3
from build import ninja_common
build = ninja_common.Build('serial/cli')
build.generate(['program/loader/proto/cpp/AUVFirmware.pb.cc', 'program/loader/proto/cpp/AUVFirmware.pb.h'],
'serial/cli/program/loader/build_proto.sh',
['program/loader/proto/AUVFirmware.proto'])
build.build_cmd_with_static('auv-serial',
[
'reset/reset.cpp',
'info/info.cpp',
'simpled/simpled.cpp',
'fakedev/fakedev.cpp',
'fakedev/FakeDevice.cpp',
'fakedev/DeviceTalker.cpp',
'program/loader/MemoryImage.cpp',
'program/loader/hex.cpp',
'program/loader/AUVFirmware.cpp',
'program/loader/auvfw.cpp',
'program/loader/proto/cpp/AUVFirmware.pb.cc',
'program/dumpFW.cpp',
'program/buildFW.cpp',
'program/mergeHex.cpp',
'program/flash.cpp',
'program/BootTalker.cpp',
'Command.cpp',
'main.cpp',
],
implicit=['serial/cli/program/loader/proto/cpp/AUVFirmware.pb.h'],
cflags=['-Wall', '-Wextra', '-Werror', '-Iserial/libserial'],
auv_deps=['auv-serial'],
pkg_confs=['ncurses', 'protobuf-lite'])
| bsd-3-clause |
preds/kata | rolodex/test_rolodex.py | 1 | 2197 | import unittest
from rolodex import Rolodex
class TestRolodex(unittest.TestCase):
"""Test Rolodex class."""
def setUp(self): # noqa
self.rolodex = Rolodex()
def do_inserts(self):
"""Insert some mock data."""
self.rolodex.insert("Booker T., Washington, 87360, 373 781 7380, yellow")
self.rolodex.insert("Chandler, Kerri, (623)-668-9293, pink, 123123121")
self.rolodex.insert("James Murphy, yellow, 83880, 018 154 6474")
self.rolodex.insert("asdfawefawea")
def test_no_data_export(self):
"""Test export works without data inserted."""
self.assertEqual(self.rolodex.export(), {"entries": [], "errors": []})
def test_insert(self):
"""Test Rolodex.insert()."""
self.do_inserts()
# Check the output
self.assertEqual(self.rolodex.processed, 4)
self.assertEqual(self.rolodex.errors, [1, 3])
entries = [{'color': 'yellow',
'lastname': 'Washington',
'phonenumber': '373-781-7380',
'zipcode': '87360',
'firstname': 'Booker T.'},
{'color': 'yellow',
'lastname': 'Murphy',
'phonenumber': '018-154-6474',
'zipcode': '83880',
'firstname': 'James'}]
self.assertEqual(self.rolodex.entries, entries)
def test_export(self):
"""Test the main export function."""
self.do_inserts()
d = self.rolodex.export()
expected = {'errors': [1, 3], 'entries': [{'color': 'yellow', 'lastname': 'Murphy', 'phonenumber': '018-154-6474', 'zipcode': '83880', 'firstname': 'James'}, {'color': 'yellow', 'lastname': 'Washington', 'phonenumber': '373-781-7380', 'zipcode': '87360', 'firstname': 'Booker T.'}]} # noqa
self.assertEqual(d, expected)
def test_export_json(self):
"""Test the json export function runs."""
self.do_inserts()
s = self.rolodex.export_json()
self.assertTrue(len(s) > 300) # Basic test to make sure it prints more than an empty entries and error list
if __name__ == '__main__':
unittest.main()
| gpl-3.0 |
KonbOgonb/hlc_r1 | app/dataImporter.py | 1 | 3364 | PATH_TO_DATA = "../tmp/data/data.zip"
USERS_PREFIX = "users"
VISITS_PREFIX = "visits"
LOCATIONS_PREFIX = "locations"
ALL_PREFIXES = [USERS_PREFIX, VISITS_PREFIX, LOCATIONS_PREFIX]
WORKIGN_DIRECTORY = "./local_temp"
from zipfile import ZipFile
import json
from os import listdir
from os.path import isfile, join, exists
from model import User, Location, Visit
from repository import UserRepository, VisitRepository, LocationRepository
import time
class Payload(object):
def __init__(self, j):
self.__dict__ = json.loads(j)
def read_all_data():
with ZipFile(PATH_TO_DATA) as inputzip:
inputzip.extractall(WORKIGN_DIRECTORY)
process_all(WORKIGN_DIRECTORY + "/data")
process_all(WORKIGN_DIRECTORY)
def process_all(path):
if not exists(path):
return
files = [f for f in listdir(path) if isfile(join(path, f))]
users_files = [f for f in files if f.startswith(USERS_PREFIX)]
locations_files = [f for f in files if f.startswith(LOCATIONS_PREFIX)]
visits_files = [f for f in files if f.startswith(VISITS_PREFIX)]
user_repository = UserRepository()
location_repository = LocationRepository()
visit_repository = VisitRepository()
print("started loading users", time.time())
for file_name in users_files:
with open(join(path, file_name), encoding='utf-8') as f:
p = Payload(f.read())
user_repository.add_multi(p.users)
print("started loading locations", time.time())
user_repository.get_item("location1")
for file_name in locations_files:
with open(join(path, file_name), encoding='utf-8') as f:
p = Payload(f.read())
location_repository.add_multi(p.locations)
print("started loading visits", time.time())
for file_name in visits_files:
with open(join(path, file_name), encoding='utf-8') as f:
print(file_name)
p = Payload(f.read())
visit_repository.add_multi(p.visits)
users_to_update = {}
locations_to_update = {}
for x in p.visits:
visit = Visit(x)
if visit.user in users_to_update:
users_to_update[visit.user].append(visit.id)
else:
users_to_update[visit.user] = [visit.id]
if visit.location in locations_to_update:
locations_to_update[visit.location].append(visit.id)
else:
locations_to_update[visit.location] = [visit.id]
users = user_repository.get_multi(users_to_update.keys())
locations = location_repository.get_multi(locations_to_update.keys())
for k,v in users_to_update.items():
user = users[user_repository.get_key(k)]
if not user.visits:
user.visits = v
else:
user.visits += v
for k,v in locations_to_update.items():
location = locations[location_repository.get_key(k)]
if not location.visits:
location.visits = v
else:
location.visits += v
location_repository.update_multi(locations)
user_repository.update_multi(users)
print("finished loading data", time.time())
read_all_data() | mit |
noslenfa/tdjangorest | uw/lib/python2.7/site-packages/IPython/qt/console/mainwindow.py | 2 | 39046 | """The Qt MainWindow for the QtConsole
This is a tabbed pseudo-terminal of IPython sessions, with a menu bar for
common actions.
Authors:
* Evan Patterson
* Min RK
* Erik Tollerud
* Fernando Perez
* Bussonnier Matthias
* Thomas Kluyver
* Paul Ivanov
"""
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
# stdlib imports
import json
import re
import sys
import webbrowser
from threading import Thread
# System library imports
from IPython.external.qt import QtGui,QtCore
from IPython.core.magic import magic_escapes
def background(f):
"""call a function in a simple thread, to prevent blocking"""
t = Thread(target=f)
t.start()
return t
#-----------------------------------------------------------------------------
# Classes
#-----------------------------------------------------------------------------
class MainWindow(QtGui.QMainWindow):
#---------------------------------------------------------------------------
# 'object' interface
#---------------------------------------------------------------------------
_magic_menu_dict = {}
def __init__(self, app,
confirm_exit=True,
new_frontend_factory=None, slave_frontend_factory=None,
):
""" Create a tabbed MainWindow for managing IPython FrontendWidgets
Parameters
----------
app : reference to QApplication parent
confirm_exit : bool, optional
Whether we should prompt on close of tabs
new_frontend_factory : callable
A callable that returns a new IPythonWidget instance, attached to
its own running kernel.
slave_frontend_factory : callable
A callable that takes an existing IPythonWidget, and returns a new
IPythonWidget instance, attached to the same kernel.
"""
super(MainWindow, self).__init__()
self._kernel_counter = 0
self._app = app
self.confirm_exit = confirm_exit
self.new_frontend_factory = new_frontend_factory
self.slave_frontend_factory = slave_frontend_factory
self.tab_widget = QtGui.QTabWidget(self)
self.tab_widget.setDocumentMode(True)
self.tab_widget.setTabsClosable(True)
self.tab_widget.tabCloseRequested[int].connect(self.close_tab)
self.setCentralWidget(self.tab_widget)
# hide tab bar at first, since we have no tabs:
self.tab_widget.tabBar().setVisible(False)
# prevent focus in tab bar
self.tab_widget.setFocusPolicy(QtCore.Qt.NoFocus)
def update_tab_bar_visibility(self):
""" update visibility of the tabBar depending of the number of tab
0 or 1 tab, tabBar hidden
2+ tabs, tabBar visible
send a self.close if number of tab ==0
need to be called explicitly, or be connected to tabInserted/tabRemoved
"""
if self.tab_widget.count() <= 1:
self.tab_widget.tabBar().setVisible(False)
else:
self.tab_widget.tabBar().setVisible(True)
if self.tab_widget.count()==0 :
self.close()
@property
def next_kernel_id(self):
"""constantly increasing counter for kernel IDs"""
c = self._kernel_counter
self._kernel_counter += 1
return c
@property
def active_frontend(self):
return self.tab_widget.currentWidget()
def create_tab_with_new_frontend(self):
"""create a new frontend and attach it to a new tab"""
widget = self.new_frontend_factory()
self.add_tab_with_frontend(widget)
def create_tab_with_current_kernel(self):
"""create a new frontend attached to the same kernel as the current tab"""
current_widget = self.tab_widget.currentWidget()
current_widget_index = self.tab_widget.indexOf(current_widget)
current_widget_name = self.tab_widget.tabText(current_widget_index)
widget = self.slave_frontend_factory(current_widget)
if 'slave' in current_widget_name:
# don't keep stacking slaves
name = current_widget_name
else:
name = '(%s) slave' % current_widget_name
self.add_tab_with_frontend(widget,name=name)
def close_tab(self,current_tab):
""" Called when you need to try to close a tab.
It takes the number of the tab to be closed as argument, or a reference
to the widget inside this tab
"""
# let's be sure "tab" and "closing widget" are respectively the index
# of the tab to close and a reference to the frontend to close
if type(current_tab) is not int :
current_tab = self.tab_widget.indexOf(current_tab)
closing_widget=self.tab_widget.widget(current_tab)
# when trying to be closed, widget might re-send a request to be
# closed again, but will be deleted when event will be processed. So
# need to check that widget still exists and skip if not. One example
# of this is when 'exit' is sent in a slave tab. 'exit' will be
# re-sent by this function on the master widget, which ask all slave
# widgets to exit
if closing_widget==None:
return
#get a list of all slave widgets on the same kernel.
slave_tabs = self.find_slave_widgets(closing_widget)
keepkernel = None #Use the prompt by default
if hasattr(closing_widget,'_keep_kernel_on_exit'): #set by exit magic
keepkernel = closing_widget._keep_kernel_on_exit
# If signal sent by exit magic (_keep_kernel_on_exit, exist and not None)
# we set local slave tabs._hidden to True to avoid prompting for kernel
# restart when they get the signal. and then "forward" the 'exit'
# to the main window
if keepkernel is not None:
for tab in slave_tabs:
tab._hidden = True
if closing_widget in slave_tabs:
try :
self.find_master_tab(closing_widget).execute('exit')
except AttributeError:
self.log.info("Master already closed or not local, closing only current tab")
self.tab_widget.removeTab(current_tab)
self.update_tab_bar_visibility()
return
kernel_client = closing_widget.kernel_client
kernel_manager = closing_widget.kernel_manager
if keepkernel is None and not closing_widget._confirm_exit:
# don't prompt, just terminate the kernel if we own it
# or leave it alone if we don't
keepkernel = closing_widget._existing
if keepkernel is None: #show prompt
if kernel_client and kernel_client.channels_running:
title = self.window().windowTitle()
cancel = QtGui.QMessageBox.Cancel
okay = QtGui.QMessageBox.Ok
if closing_widget._may_close:
msg = "You are closing the tab : "+'"'+self.tab_widget.tabText(current_tab)+'"'
info = "Would you like to quit the Kernel and close all attached Consoles as well?"
justthis = QtGui.QPushButton("&No, just this Tab", self)
justthis.setShortcut('N')
closeall = QtGui.QPushButton("&Yes, close all", self)
closeall.setShortcut('Y')
# allow ctrl-d ctrl-d exit, like in terminal
closeall.setShortcut('Ctrl+D')
box = QtGui.QMessageBox(QtGui.QMessageBox.Question,
title, msg)
box.setInformativeText(info)
box.addButton(cancel)
box.addButton(justthis, QtGui.QMessageBox.NoRole)
box.addButton(closeall, QtGui.QMessageBox.YesRole)
box.setDefaultButton(closeall)
box.setEscapeButton(cancel)
pixmap = QtGui.QPixmap(self._app.icon.pixmap(QtCore.QSize(64,64)))
box.setIconPixmap(pixmap)
reply = box.exec_()
if reply == 1: # close All
for slave in slave_tabs:
background(slave.kernel_client.stop_channels)
self.tab_widget.removeTab(self.tab_widget.indexOf(slave))
closing_widget.execute("exit")
self.tab_widget.removeTab(current_tab)
background(kernel_client.stop_channels)
elif reply == 0: # close Console
if not closing_widget._existing:
# Have kernel: don't quit, just close the tab
closing_widget.execute("exit True")
self.tab_widget.removeTab(current_tab)
background(kernel_client.stop_channels)
else:
reply = QtGui.QMessageBox.question(self, title,
"Are you sure you want to close this Console?"+
"\nThe Kernel and other Consoles will remain active.",
okay|cancel,
defaultButton=okay
)
if reply == okay:
self.tab_widget.removeTab(current_tab)
elif keepkernel: #close console but leave kernel running (no prompt)
self.tab_widget.removeTab(current_tab)
background(kernel_client.stop_channels)
else: #close console and kernel (no prompt)
self.tab_widget.removeTab(current_tab)
if kernel_client and kernel_client.channels_running:
for slave in slave_tabs:
background(slave.kernel_client.stop_channels)
self.tab_widget.removeTab(self.tab_widget.indexOf(slave))
if kernel_manager:
kernel_manager.shutdown_kernel()
background(kernel_client.stop_channels)
self.update_tab_bar_visibility()
def add_tab_with_frontend(self,frontend,name=None):
""" insert a tab with a given frontend in the tab bar, and give it a name
"""
if not name:
name = 'kernel %i' % self.next_kernel_id
self.tab_widget.addTab(frontend,name)
self.update_tab_bar_visibility()
self.make_frontend_visible(frontend)
frontend.exit_requested.connect(self.close_tab)
def next_tab(self):
self.tab_widget.setCurrentIndex((self.tab_widget.currentIndex()+1))
def prev_tab(self):
self.tab_widget.setCurrentIndex((self.tab_widget.currentIndex()-1))
def make_frontend_visible(self,frontend):
widget_index=self.tab_widget.indexOf(frontend)
if widget_index > 0 :
self.tab_widget.setCurrentIndex(widget_index)
def find_master_tab(self,tab,as_list=False):
"""
Try to return the frontend that owns the kernel attached to the given widget/tab.
Only finds frontend owned by the current application. Selection
based on port of the kernel might be inaccurate if several kernel
on different ip use same port number.
This function does the conversion tabNumber/widget if needed.
Might return None if no master widget (non local kernel)
Will crash IPython if more than 1 masterWidget
When asList set to True, always return a list of widget(s) owning
the kernel. The list might be empty or containing several Widget.
"""
#convert from/to int/richIpythonWidget if needed
if isinstance(tab, int):
tab = self.tab_widget.widget(tab)
km=tab.kernel_client
#build list of all widgets
widget_list = [self.tab_widget.widget(i) for i in range(self.tab_widget.count())]
# widget that are candidate to be the owner of the kernel does have all the same port of the curent widget
# And should have a _may_close attribute
filtered_widget_list = [ widget for widget in widget_list if
widget.kernel_client.connection_file == km.connection_file and
hasattr(widget,'_may_close') ]
# the master widget is the one that may close the kernel
master_widget= [ widget for widget in filtered_widget_list if widget._may_close]
if as_list:
return master_widget
assert(len(master_widget)<=1 )
if len(master_widget)==0:
return None
return master_widget[0]
def find_slave_widgets(self,tab):
"""return all the frontends that do not own the kernel attached to the given widget/tab.
Only find frontends owned by the current application. Selection
based on connection file of the kernel.
This function does the conversion tabNumber/widget if needed.
"""
#convert from/to int/richIpythonWidget if needed
if isinstance(tab, int):
tab = self.tab_widget.widget(tab)
km=tab.kernel_client
#build list of all widgets
widget_list = [self.tab_widget.widget(i) for i in range(self.tab_widget.count())]
# widget that are candidate not to be the owner of the kernel does have all the same port of the curent widget
filtered_widget_list = ( widget for widget in widget_list if
widget.kernel_client.connection_file == km.connection_file)
# Get a list of all widget owning the same kernel and removed it from
# the previous cadidate. (better using sets ?)
master_widget_list = self.find_master_tab(tab, as_list=True)
slave_list = [widget for widget in filtered_widget_list if widget not in master_widget_list]
return slave_list
# Populate the menu bar with common actions and shortcuts
def add_menu_action(self, menu, action, defer_shortcut=False):
"""Add action to menu as well as self
So that when the menu bar is invisible, its actions are still available.
If defer_shortcut is True, set the shortcut context to widget-only,
where it will avoid conflict with shortcuts already bound to the
widgets themselves.
"""
menu.addAction(action)
self.addAction(action)
if defer_shortcut:
action.setShortcutContext(QtCore.Qt.WidgetShortcut)
def init_menu_bar(self):
#create menu in the order they should appear in the menu bar
self.init_file_menu()
self.init_edit_menu()
self.init_view_menu()
self.init_kernel_menu()
self.init_magic_menu()
self.init_window_menu()
self.init_help_menu()
def init_file_menu(self):
self.file_menu = self.menuBar().addMenu("&File")
self.new_kernel_tab_act = QtGui.QAction("New Tab with &New kernel",
self,
shortcut="Ctrl+T",
triggered=self.create_tab_with_new_frontend)
self.add_menu_action(self.file_menu, self.new_kernel_tab_act)
self.slave_kernel_tab_act = QtGui.QAction("New Tab with Sa&me kernel",
self,
shortcut="Ctrl+Shift+T",
triggered=self.create_tab_with_current_kernel)
self.add_menu_action(self.file_menu, self.slave_kernel_tab_act)
self.file_menu.addSeparator()
self.close_action=QtGui.QAction("&Close Tab",
self,
shortcut=QtGui.QKeySequence.Close,
triggered=self.close_active_frontend
)
self.add_menu_action(self.file_menu, self.close_action)
self.export_action=QtGui.QAction("&Save to HTML/XHTML",
self,
shortcut=QtGui.QKeySequence.Save,
triggered=self.export_action_active_frontend
)
self.add_menu_action(self.file_menu, self.export_action, True)
self.file_menu.addSeparator()
printkey = QtGui.QKeySequence(QtGui.QKeySequence.Print)
if printkey.matches("Ctrl+P") and sys.platform != 'darwin':
# Only override the default if there is a collision.
# Qt ctrl = cmd on OSX, so the match gets a false positive on OSX.
printkey = "Ctrl+Shift+P"
self.print_action = QtGui.QAction("&Print",
self,
shortcut=printkey,
triggered=self.print_action_active_frontend)
self.add_menu_action(self.file_menu, self.print_action, True)
if sys.platform != 'darwin':
# OSX always has Quit in the Application menu, only add it
# to the File menu elsewhere.
self.file_menu.addSeparator()
self.quit_action = QtGui.QAction("&Quit",
self,
shortcut=QtGui.QKeySequence.Quit,
triggered=self.close,
)
self.add_menu_action(self.file_menu, self.quit_action)
def init_edit_menu(self):
self.edit_menu = self.menuBar().addMenu("&Edit")
self.undo_action = QtGui.QAction("&Undo",
self,
shortcut=QtGui.QKeySequence.Undo,
statusTip="Undo last action if possible",
triggered=self.undo_active_frontend
)
self.add_menu_action(self.edit_menu, self.undo_action)
self.redo_action = QtGui.QAction("&Redo",
self,
shortcut=QtGui.QKeySequence.Redo,
statusTip="Redo last action if possible",
triggered=self.redo_active_frontend)
self.add_menu_action(self.edit_menu, self.redo_action)
self.edit_menu.addSeparator()
self.cut_action = QtGui.QAction("&Cut",
self,
shortcut=QtGui.QKeySequence.Cut,
triggered=self.cut_active_frontend
)
self.add_menu_action(self.edit_menu, self.cut_action, True)
self.copy_action = QtGui.QAction("&Copy",
self,
shortcut=QtGui.QKeySequence.Copy,
triggered=self.copy_active_frontend
)
self.add_menu_action(self.edit_menu, self.copy_action, True)
self.copy_raw_action = QtGui.QAction("Copy (&Raw Text)",
self,
shortcut="Ctrl+Shift+C",
triggered=self.copy_raw_active_frontend
)
self.add_menu_action(self.edit_menu, self.copy_raw_action, True)
self.paste_action = QtGui.QAction("&Paste",
self,
shortcut=QtGui.QKeySequence.Paste,
triggered=self.paste_active_frontend
)
self.add_menu_action(self.edit_menu, self.paste_action, True)
self.edit_menu.addSeparator()
selectall = QtGui.QKeySequence(QtGui.QKeySequence.SelectAll)
if selectall.matches("Ctrl+A") and sys.platform != 'darwin':
# Only override the default if there is a collision.
# Qt ctrl = cmd on OSX, so the match gets a false positive on OSX.
selectall = "Ctrl+Shift+A"
self.select_all_action = QtGui.QAction("Select &All",
self,
shortcut=selectall,
triggered=self.select_all_active_frontend
)
self.add_menu_action(self.edit_menu, self.select_all_action, True)
def init_view_menu(self):
self.view_menu = self.menuBar().addMenu("&View")
if sys.platform != 'darwin':
# disable on OSX, where there is always a menu bar
self.toggle_menu_bar_act = QtGui.QAction("Toggle &Menu Bar",
self,
shortcut="Ctrl+Shift+M",
statusTip="Toggle visibility of menubar",
triggered=self.toggle_menu_bar)
self.add_menu_action(self.view_menu, self.toggle_menu_bar_act)
fs_key = "Ctrl+Meta+F" if sys.platform == 'darwin' else "F11"
self.full_screen_act = QtGui.QAction("&Full Screen",
self,
shortcut=fs_key,
statusTip="Toggle between Fullscreen and Normal Size",
triggered=self.toggleFullScreen)
self.add_menu_action(self.view_menu, self.full_screen_act)
self.view_menu.addSeparator()
self.increase_font_size = QtGui.QAction("Zoom &In",
self,
shortcut=QtGui.QKeySequence.ZoomIn,
triggered=self.increase_font_size_active_frontend
)
self.add_menu_action(self.view_menu, self.increase_font_size, True)
self.decrease_font_size = QtGui.QAction("Zoom &Out",
self,
shortcut=QtGui.QKeySequence.ZoomOut,
triggered=self.decrease_font_size_active_frontend
)
self.add_menu_action(self.view_menu, self.decrease_font_size, True)
self.reset_font_size = QtGui.QAction("Zoom &Reset",
self,
shortcut="Ctrl+0",
triggered=self.reset_font_size_active_frontend
)
self.add_menu_action(self.view_menu, self.reset_font_size, True)
self.view_menu.addSeparator()
self.clear_action = QtGui.QAction("&Clear Screen",
self,
shortcut='Ctrl+L',
statusTip="Clear the console",
triggered=self.clear_magic_active_frontend)
self.add_menu_action(self.view_menu, self.clear_action)
self.pager_menu = self.view_menu.addMenu("&Pager")
hsplit_action = QtGui.QAction(".. &Horizontal Split",
self,
triggered=lambda: self.set_paging_active_frontend('hsplit'))
vsplit_action = QtGui.QAction(" : &Vertical Split",
self,
triggered=lambda: self.set_paging_active_frontend('vsplit'))
inside_action = QtGui.QAction(" &Inside Pager",
self,
triggered=lambda: self.set_paging_active_frontend('inside'))
self.pager_menu.addAction(hsplit_action)
self.pager_menu.addAction(vsplit_action)
self.pager_menu.addAction(inside_action)
def init_kernel_menu(self):
self.kernel_menu = self.menuBar().addMenu("&Kernel")
# Qt on OSX maps Ctrl to Cmd, and Meta to Ctrl
# keep the signal shortcuts to ctrl, rather than
# platform-default like we do elsewhere.
ctrl = "Meta" if sys.platform == 'darwin' else "Ctrl"
self.interrupt_kernel_action = QtGui.QAction("&Interrupt current Kernel",
self,
triggered=self.interrupt_kernel_active_frontend,
shortcut=ctrl+"+C",
)
self.add_menu_action(self.kernel_menu, self.interrupt_kernel_action)
self.restart_kernel_action = QtGui.QAction("&Restart current Kernel",
self,
triggered=self.restart_kernel_active_frontend,
shortcut=ctrl+"+.",
)
self.add_menu_action(self.kernel_menu, self.restart_kernel_action)
self.kernel_menu.addSeparator()
self.confirm_restart_kernel_action = QtGui.QAction("&Confirm kernel restart",
self,
checkable=True,
checked=self.active_frontend.confirm_restart,
triggered=self.toggle_confirm_restart_active_frontend
)
self.add_menu_action(self.kernel_menu, self.confirm_restart_kernel_action)
self.tab_widget.currentChanged.connect(self.update_restart_checkbox)
def _make_dynamic_magic(self,magic):
"""Return a function `fun` that will execute `magic` on active frontend.
Parameters
----------
magic : string
string that will be executed as is when the returned function is called
Returns
-------
fun : function
function with no parameters, when called will execute `magic` on the
current active frontend at call time
See Also
--------
populate_all_magic_menu : generate the "All Magics..." menu
Notes
-----
`fun` executes `magic` in active frontend at the moment it is triggered,
not the active frontend at the moment it was created.
This function is mostly used to create the "All Magics..." Menu at run time.
"""
# need two level nested function to be sure to pass magic
# to active frontend **at run time**.
def inner_dynamic_magic():
self.active_frontend.execute(magic)
inner_dynamic_magic.__name__ = "dynamics_magic_s"
return inner_dynamic_magic
def populate_all_magic_menu(self, display_data=None):
"""Clean "All Magics..." menu and repopulate it with `display_data`
Parameters
----------
display_data : dict,
dict of display_data for the magics dict of a MagicsManager.
Expects json data, as the result of %lsmagic
"""
for k,v in self._magic_menu_dict.items():
v.clear()
self.all_magic_menu.clear()
if not display_data:
return
if display_data['status'] != 'ok':
self.log.warn("%%lsmagic user-expression failed: %s" % display_data)
return
mdict = json.loads(display_data['data'].get('application/json', {}))
for mtype in sorted(mdict):
subdict = mdict[mtype]
prefix = magic_escapes[mtype]
for name in sorted(subdict):
mclass = subdict[name]
magic_menu = self._get_magic_menu(mclass)
pmagic = prefix + name
# Adding seperate QActions is needed for some window managers
xaction = QtGui.QAction(pmagic,
self,
triggered=self._make_dynamic_magic(pmagic)
)
xaction_all = QtGui.QAction(pmagic,
self,
triggered=self._make_dynamic_magic(pmagic)
)
magic_menu.addAction(xaction)
self.all_magic_menu.addAction(xaction_all)
def update_all_magic_menu(self):
""" Update the list of magics in the "All Magics..." Menu
Request the kernel with the list of available magics and populate the
menu with the list received back
"""
self.active_frontend._silent_exec_callback('get_ipython().magic("lsmagic")',
self.populate_all_magic_menu)
def _get_magic_menu(self,menuidentifier, menulabel=None):
"""return a submagic menu by name, and create it if needed
parameters:
-----------
menulabel : str
Label for the menu
Will infere the menu name from the identifier at creation if menulabel not given.
To do so you have too give menuidentifier as a CamelCassedString
"""
menu = self._magic_menu_dict.get(menuidentifier,None)
if not menu :
if not menulabel:
menulabel = re.sub("([a-zA-Z]+)([A-Z][a-z])","\g<1> \g<2>",menuidentifier)
menu = QtGui.QMenu(menulabel,self.magic_menu)
self._magic_menu_dict[menuidentifier]=menu
self.magic_menu.insertMenu(self.magic_menu_separator,menu)
return menu
def init_magic_menu(self):
self.magic_menu = self.menuBar().addMenu("&Magic")
self.magic_menu_separator = self.magic_menu.addSeparator()
self.all_magic_menu = self._get_magic_menu("AllMagics", menulabel="&All Magics...")
# This action should usually not appear as it will be cleared when menu
# is updated at first kernel response. Though, it is necessary when
# connecting through X-forwarding, as in this case, the menu is not
# auto updated, SO DO NOT DELETE.
self.pop = QtGui.QAction("&Update All Magic Menu ",
self, triggered=self.update_all_magic_menu)
self.add_menu_action(self.all_magic_menu, self.pop)
# we need to populate the 'Magic Menu' once the kernel has answer at
# least once let's do it immediately, but it's assured to works
self.pop.trigger()
self.reset_action = QtGui.QAction("&Reset",
self,
statusTip="Clear all variables from workspace",
triggered=self.reset_magic_active_frontend)
self.add_menu_action(self.magic_menu, self.reset_action)
self.history_action = QtGui.QAction("&History",
self,
statusTip="show command history",
triggered=self.history_magic_active_frontend)
self.add_menu_action(self.magic_menu, self.history_action)
self.save_action = QtGui.QAction("E&xport History ",
self,
statusTip="Export History as Python File",
triggered=self.save_magic_active_frontend)
self.add_menu_action(self.magic_menu, self.save_action)
self.who_action = QtGui.QAction("&Who",
self,
statusTip="List interactive variables",
triggered=self.who_magic_active_frontend)
self.add_menu_action(self.magic_menu, self.who_action)
self.who_ls_action = QtGui.QAction("Wh&o ls",
self,
statusTip="Return a list of interactive variables",
triggered=self.who_ls_magic_active_frontend)
self.add_menu_action(self.magic_menu, self.who_ls_action)
self.whos_action = QtGui.QAction("Who&s",
self,
statusTip="List interactive variables with details",
triggered=self.whos_magic_active_frontend)
self.add_menu_action(self.magic_menu, self.whos_action)
def init_window_menu(self):
self.window_menu = self.menuBar().addMenu("&Window")
if sys.platform == 'darwin':
# add min/maximize actions to OSX, which lacks default bindings.
self.minimizeAct = QtGui.QAction("Mini&mize",
self,
shortcut="Ctrl+m",
statusTip="Minimize the window/Restore Normal Size",
triggered=self.toggleMinimized)
# maximize is called 'Zoom' on OSX for some reason
self.maximizeAct = QtGui.QAction("&Zoom",
self,
shortcut="Ctrl+Shift+M",
statusTip="Maximize the window/Restore Normal Size",
triggered=self.toggleMaximized)
self.add_menu_action(self.window_menu, self.minimizeAct)
self.add_menu_action(self.window_menu, self.maximizeAct)
self.window_menu.addSeparator()
prev_key = "Ctrl+Shift+Left" if sys.platform == 'darwin' else "Ctrl+PgUp"
self.prev_tab_act = QtGui.QAction("Pre&vious Tab",
self,
shortcut=prev_key,
statusTip="Select previous tab",
triggered=self.prev_tab)
self.add_menu_action(self.window_menu, self.prev_tab_act)
next_key = "Ctrl+Shift+Right" if sys.platform == 'darwin' else "Ctrl+PgDown"
self.next_tab_act = QtGui.QAction("Ne&xt Tab",
self,
shortcut=next_key,
statusTip="Select next tab",
triggered=self.next_tab)
self.add_menu_action(self.window_menu, self.next_tab_act)
def init_help_menu(self):
# please keep the Help menu in Mac Os even if empty. It will
# automatically contain a search field to search inside menus and
# please keep it spelled in English, as long as Qt Doesn't support
# a QAction.MenuRole like HelpMenuRole otherwise it will lose
# this search field functionality
self.help_menu = self.menuBar().addMenu("&Help")
# Help Menu
self.intro_active_frontend_action = QtGui.QAction("&Intro to IPython",
self,
triggered=self.intro_active_frontend
)
self.add_menu_action(self.help_menu, self.intro_active_frontend_action)
self.quickref_active_frontend_action = QtGui.QAction("IPython &Cheat Sheet",
self,
triggered=self.quickref_active_frontend
)
self.add_menu_action(self.help_menu, self.quickref_active_frontend_action)
self.guiref_active_frontend_action = QtGui.QAction("&Qt Console",
self,
triggered=self.guiref_active_frontend
)
self.add_menu_action(self.help_menu, self.guiref_active_frontend_action)
self.onlineHelpAct = QtGui.QAction("Open Online &Help",
self,
triggered=self._open_online_help)
self.add_menu_action(self.help_menu, self.onlineHelpAct)
# minimize/maximize/fullscreen actions:
def toggle_menu_bar(self):
menu_bar = self.menuBar()
if menu_bar.isVisible():
menu_bar.setVisible(False)
else:
menu_bar.setVisible(True)
def toggleMinimized(self):
if not self.isMinimized():
self.showMinimized()
else:
self.showNormal()
def _open_online_help(self):
filename="http://ipython.org/ipython-doc/stable/index.html"
webbrowser.open(filename, new=1, autoraise=True)
def toggleMaximized(self):
if not self.isMaximized():
self.showMaximized()
else:
self.showNormal()
# Min/Max imizing while in full screen give a bug
# when going out of full screen, at least on OSX
def toggleFullScreen(self):
if not self.isFullScreen():
self.showFullScreen()
if sys.platform == 'darwin':
self.maximizeAct.setEnabled(False)
self.minimizeAct.setEnabled(False)
else:
self.showNormal()
if sys.platform == 'darwin':
self.maximizeAct.setEnabled(True)
self.minimizeAct.setEnabled(True)
def set_paging_active_frontend(self, paging):
self.active_frontend._set_paging(paging)
def close_active_frontend(self):
self.close_tab(self.active_frontend)
def restart_kernel_active_frontend(self):
self.active_frontend.request_restart_kernel()
def interrupt_kernel_active_frontend(self):
self.active_frontend.request_interrupt_kernel()
def toggle_confirm_restart_active_frontend(self):
widget = self.active_frontend
widget.confirm_restart = not widget.confirm_restart
self.confirm_restart_kernel_action.setChecked(widget.confirm_restart)
def update_restart_checkbox(self):
if self.active_frontend is None:
return
widget = self.active_frontend
self.confirm_restart_kernel_action.setChecked(widget.confirm_restart)
def cut_active_frontend(self):
widget = self.active_frontend
if widget.can_cut():
widget.cut()
def copy_active_frontend(self):
widget = self.active_frontend
widget.copy()
def copy_raw_active_frontend(self):
self.active_frontend._copy_raw_action.trigger()
def paste_active_frontend(self):
widget = self.active_frontend
if widget.can_paste():
widget.paste()
def undo_active_frontend(self):
self.active_frontend.undo()
def redo_active_frontend(self):
self.active_frontend.redo()
def reset_magic_active_frontend(self):
self.active_frontend.execute("%reset")
def history_magic_active_frontend(self):
self.active_frontend.execute("%history")
def save_magic_active_frontend(self):
self.active_frontend.save_magic()
def clear_magic_active_frontend(self):
self.active_frontend.execute("%clear")
def who_magic_active_frontend(self):
self.active_frontend.execute("%who")
def who_ls_magic_active_frontend(self):
self.active_frontend.execute("%who_ls")
def whos_magic_active_frontend(self):
self.active_frontend.execute("%whos")
def print_action_active_frontend(self):
self.active_frontend.print_action.trigger()
def export_action_active_frontend(self):
self.active_frontend.export_action.trigger()
def select_all_active_frontend(self):
self.active_frontend.select_all_action.trigger()
def increase_font_size_active_frontend(self):
self.active_frontend.increase_font_size.trigger()
def decrease_font_size_active_frontend(self):
self.active_frontend.decrease_font_size.trigger()
def reset_font_size_active_frontend(self):
self.active_frontend.reset_font_size.trigger()
def guiref_active_frontend(self):
self.active_frontend.execute("%guiref")
def intro_active_frontend(self):
self.active_frontend.execute("?")
def quickref_active_frontend(self):
self.active_frontend.execute("%quickref")
#---------------------------------------------------------------------------
# QWidget interface
#---------------------------------------------------------------------------
def closeEvent(self, event):
""" Forward the close event to every tabs contained by the windows
"""
if self.tab_widget.count() == 0:
# no tabs, just close
event.accept()
return
# Do Not loop on the widget count as it change while closing
title = self.window().windowTitle()
cancel = QtGui.QMessageBox.Cancel
okay = QtGui.QMessageBox.Ok
if self.confirm_exit:
if self.tab_widget.count() > 1:
msg = "Close all tabs, stop all kernels, and Quit?"
else:
msg = "Close console, stop kernel, and Quit?"
info = "Kernels not started here (e.g. notebooks) will be left alone."
closeall = QtGui.QPushButton("&Quit", self)
closeall.setShortcut('Q')
box = QtGui.QMessageBox(QtGui.QMessageBox.Question,
title, msg)
box.setInformativeText(info)
box.addButton(cancel)
box.addButton(closeall, QtGui.QMessageBox.YesRole)
box.setDefaultButton(closeall)
box.setEscapeButton(cancel)
pixmap = QtGui.QPixmap(self._app.icon.pixmap(QtCore.QSize(64,64)))
box.setIconPixmap(pixmap)
reply = box.exec_()
else:
reply = okay
if reply == cancel:
event.ignore()
return
if reply == okay:
while self.tab_widget.count() >= 1:
# prevent further confirmations:
widget = self.active_frontend
widget._confirm_exit = False
self.close_tab(widget)
event.accept()
| apache-2.0 |
fkorotkov/pants | src/python/pants/backend/graph_info/register.py | 15 | 1582 | # coding=utf-8
# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
from pants.backend.graph_info.tasks.cloc import CountLinesOfCode
from pants.backend.graph_info.tasks.dependees import ReverseDepmap
from pants.backend.graph_info.tasks.filemap import Filemap
from pants.backend.graph_info.tasks.filter import Filter
from pants.backend.graph_info.tasks.list_owners import ListOwners
from pants.backend.graph_info.tasks.listtargets import ListTargets
from pants.backend.graph_info.tasks.minimal_cover import MinimalCover
from pants.backend.graph_info.tasks.pathdeps import PathDeps
from pants.backend.graph_info.tasks.paths import Path, Paths
from pants.backend.graph_info.tasks.sorttargets import SortTargets
from pants.goal.task_registrar import TaskRegistrar as task
def register_goals():
task(name='list', action=ListTargets).install()
task(name='path', action=Path).install()
task(name='paths', action=Paths).install()
task(name='pathdeps', action=PathDeps).install()
task(name='dependees', action=ReverseDepmap).install()
task(name='filemap', action=Filemap).install()
task(name='minimize', action=MinimalCover).install()
task(name='filter', action=Filter).install()
task(name='sort', action=SortTargets).install()
task(name='cloc', action=CountLinesOfCode).install()
task(name='list-owners', action=ListOwners).install()
| apache-2.0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.