repo_name stringlengths 5 100 | path stringlengths 4 375 | copies stringclasses 991
values | size stringlengths 4 7 | content stringlengths 666 1M | license stringclasses 15
values |
|---|---|---|---|---|---|
laurentb/weboob | modules/supertoinette/module.py | 1 | 2042 | # -*- coding: utf-8 -*-
# Copyright(C) 2013 Julien Veyssier
#
# This file is part of a weboob module.
#
# This weboob module is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This weboob module is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this weboob module. If not, see <http://www.gnu.org/licenses/>.
from weboob.capabilities.recipe import CapRecipe, Recipe
from weboob.tools.backend import Module
from .browser import SupertoinetteBrowser
__all__ = ['SupertoinetteModule']
class SupertoinetteModule(Module, CapRecipe):
NAME = 'supertoinette'
MAINTAINER = u'Julien Veyssier'
EMAIL = 'julien.veyssier@aiur.fr'
VERSION = '2.1'
DESCRIPTION = u'Super Toinette, la cuisine familiale French recipe website'
LICENSE = 'AGPLv3+'
BROWSER = SupertoinetteBrowser
def get_recipe(self, id):
return self.browser.get_recipe(id)
def iter_recipes(self, pattern):
return self.browser.iter_recipes(pattern.encode('utf-8'))
def fill_recipe(self, recipe, fields):
if 'nb_person' in fields or 'instructions' in fields:
rec = self.get_recipe(recipe.id)
recipe.picture = rec.picture
recipe.instructions = rec.instructions
recipe.ingredients = rec.ingredients
recipe.comments = rec.comments
recipe.author = rec.author
recipe.nb_person = rec.nb_person
recipe.cooking_time = rec.cooking_time
recipe.preparation_time = rec.preparation_time
return recipe
OBJECTS = {
Recipe: fill_recipe,
}
| lgpl-3.0 |
devassistant/devassistant | devassistant/assistant_base.py | 8 | 5405 | import os
from devassistant import exceptions
from devassistant import settings
class AssistantBase(object):
"""WARNING: if assigning subassistants in __init__, make sure to override it
in subclass, so that it doesn't get inherited!"""
# Some informations about assistant
# These should all be present:
name = 'base'
fullname = 'Base'
description = ''
superassistant = None
role = settings.DEFAULT_ASSISTANT_ROLE
args = []
project_type = []
source_file = ''
files_dir = os.path.join(os.path.dirname(__file__), 'data', 'files')
def get_subassistant_classes(self):
"""Return list of classes that are subassistants of this assistant.
Override in subclasses representing assistants written in Python
Returns:
list of classes that are subassistants of this assistant
"""
return []
def get_subassistants(self):
"""Return list of instantiated subassistants.
Usually, this needs not be overriden in subclasses, you should just override
get_subassistant_classes
Returns:
list of instantiated subassistants
"""
if not hasattr(self, '_subassistants'):
self._subassistants = []
# we want to know, if type(self) defines 'get_subassistant_classes',
# we don't want to inherit it from superclass (would cause recursion)
if 'get_subassistant_classes' in vars(type(self)):
for a in self.get_subassistant_classes():
self._subassistants.append(a())
return self._subassistants
def get_subassistant_tree(self):
"""Returns a tree-like structure representing the assistant hierarchy going down
from this assistant to leaf assistants.
For example: [(<This Assistant>,
[(<Subassistant 1>, [...]),
(<Subassistant 2>, [...])]
)]
Returns:
a tree-like structure (see above) representing assistant hierarchy going down
from this assistant to leaf assistants
"""
if '_tree' not in dir(self):
subassistant_tree = []
subassistants = self.get_subassistants()
for subassistant in subassistants:
subassistant_tree.append(subassistant.get_subassistant_tree())
self._tree = (self, subassistant_tree)
return self._tree
def get_selected_subassistant_path(self, **kwargs):
"""Recursively searches self._tree - has format of (Assistant: [list_of_subassistants]) -
for specific path from first to last selected subassistants.
Args:
kwargs: arguments containing names of the given assistants in form of
subassistant_0 = 'name', subassistant_1 = 'another_name', ...
Returns:
list of subassistants objects from tree sorted from first to last
"""
path = [self]
previous_subas_list = None
currently_searching = self.get_subassistant_tree()[1]
# len(path) - 1 always points to next subassistant_N, so we can use it to control iteration
while settings.SUBASSISTANT_N_STRING.format(len(path) - 1) in kwargs and \
kwargs[settings.SUBASSISTANT_N_STRING.format(len(path) - 1)]:
for sa, subas_list in currently_searching:
if sa.name == kwargs[settings.SUBASSISTANT_N_STRING.format(len(path) - 1)]:
currently_searching = subas_list
path.append(sa)
break # sorry if you shed a tear ;)
if subas_list == previous_subas_list:
raise exceptions.AssistantNotFoundException(
'No assistant {n} after path {p}.'.format(
n=kwargs[settings.SUBASSISTANT_N_STRING.format(len(path) - 1)],
p=path))
previous_subas_list = subas_list
return path
def is_run_as_leaf(self, **kwargs):
"""Returns True if this assistant was run as last in path, False otherwise."""
# find the last subassistant_N
i = 0
while i < len(kwargs): # len(kwargs) is maximum of subassistant_N keys
if settings.SUBASSISTANT_N_STRING.format(i) in kwargs:
leaf_name = kwargs[settings.SUBASSISTANT_N_STRING.format(i)]
i += 1
return self.name == leaf_name
def errors(self, **kwargs):
"""Checks whether the command is doable, also checking the arguments
passed as kwargs. These are supposed to be non-recoverable problems,
that will abort the whole operation.
Errors should not be logged, only returned.
Returns:
list of errors as strings (empty list with no errors)
"""
return []
def dependencies(self, **kwargs):
"""Installs dependencies for this assistant.
Raises:
devassistant.exceptions.DependencyException containing the error message
"""
pass
def run(self, **kwargs):
"""Actually carries out the command represented by this object.
Errors should not be logged, but only raised, they shall be logged on higher level.
Raises:
devassistant.exceptions.RunException containing the error message
"""
pass
| gpl-2.0 |
mikewiebe-ansible/ansible | lib/ansible/modules/cloud/amazon/aws_ses_identity.py | 7 | 23415 | #!/usr/bin/python
# Copyright (c) 2017 Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
ANSIBLE_METADATA = {
'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'
}
DOCUMENTATION = '''
---
module: aws_ses_identity
short_description: Manages SES email and domain identity
description:
- This module allows the user to manage verified email and domain identity for SES.
- This covers verifying and removing identities as well as setting up complaint, bounce
and delivery notification settings.
version_added: "2.5"
author: Ed Costello (@orthanc)
options:
identity:
description:
- This is the email address or domain to verify / delete.
- If this contains an '@' then it will be considered an email. Otherwise it will be considered a domain.
required: true
type: str
state:
description: Whether to create(or update) or delete the identity.
default: present
choices: [ 'present', 'absent' ]
type: str
bounce_notifications:
description:
- Setup the SNS topic used to report bounce notifications.
- If omitted, bounce notifications will not be delivered to a SNS topic.
- If bounce notifications are not delivered to a SNS topic, I(feedback_forwarding) must be enabled.
suboptions:
topic:
description:
- The ARN of the topic to send notifications to.
- If omitted, notifications will not be delivered to a SNS topic.
include_headers:
description:
- Whether or not to include headers when delivering to the SNS topic.
- If I(topic) is not specified this will have no impact, but the SES setting is updated even if there is no topic.
type: bool
default: No
type: dict
complaint_notifications:
description:
- Setup the SNS topic used to report complaint notifications.
- If omitted, complaint notifications will not be delivered to a SNS topic.
- If complaint notifications are not delivered to a SNS topic, I(feedback_forwarding) must be enabled.
suboptions:
topic:
description:
- The ARN of the topic to send notifications to.
- If omitted, notifications will not be delivered to a SNS topic.
include_headers:
description:
- Whether or not to include headers when delivering to the SNS topic.
- If I(topic) is not specified this will have no impact, but the SES setting is updated even if there is no topic.
type: bool
default: No
type: dict
delivery_notifications:
description:
- Setup the SNS topic used to report delivery notifications.
- If omitted, delivery notifications will not be delivered to a SNS topic.
suboptions:
topic:
description:
- The ARN of the topic to send notifications to.
- If omitted, notifications will not be delivered to a SNS topic.
include_headers:
description:
- Whether or not to include headers when delivering to the SNS topic.
- If I(topic) is not specified this will have no impact, but the SES setting is updated even if there is no topic.
type: bool
default: No
type: dict
feedback_forwarding:
description:
- Whether or not to enable feedback forwarding.
- This can only be false if both I(bounce_notifications) and I(complaint_notifications) specify SNS topics.
type: 'bool'
default: True
requirements: [ 'botocore', 'boto3' ]
extends_documentation_fragment:
- aws
- ec2
'''
EXAMPLES = '''
# Note: These examples do not set authentication details, see the AWS Guide for details.
- name: Ensure example@example.com email identity exists
aws_ses_identity:
identity: example@example.com
state: present
- name: Delete example@example.com email identity
aws_ses_identity:
email: example@example.com
state: absent
- name: Ensure example.com domain identity exists
aws_ses_identity:
identity: example.com
state: present
# Create an SNS topic and send bounce and complaint notifications to it
# instead of emailing the identity owner
- name: Ensure complaints-topic exists
sns_topic:
name: "complaints-topic"
state: present
purge_subscriptions: False
register: topic_info
- name: Deliver feedback to topic instead of owner email
aws_ses_identity:
identity: example@example.com
state: present
complaint_notifications:
topic: "{{ topic_info.sns_arn }}"
include_headers: True
bounce_notifications:
topic: "{{ topic_info.sns_arn }}"
include_headers: False
feedback_forwarding: False
# Create an SNS topic for delivery notifications and leave complaints
# Being forwarded to the identity owner email
- name: Ensure delivery-notifications-topic exists
sns_topic:
name: "delivery-notifications-topic"
state: present
purge_subscriptions: False
register: topic_info
- name: Delivery notifications to topic
aws_ses_identity:
identity: example@example.com
state: present
delivery_notifications:
topic: "{{ topic_info.sns_arn }}"
'''
RETURN = '''
identity:
description: The identity being modified.
returned: success
type: str
sample: example@example.com
identity_arn:
description: The arn of the identity being modified.
returned: success
type: str
sample: arn:aws:ses:us-east-1:12345678:identity/example@example.com
verification_attributes:
description: The verification information for the identity.
returned: success
type: complex
sample: {
"verification_status": "Pending",
"verification_token": "...."
}
contains:
verification_status:
description: The verification status of the identity.
type: str
sample: "Pending"
verification_token:
description: The verification token for a domain identity.
type: str
notification_attributes:
description: The notification setup for the identity.
returned: success
type: complex
sample: {
"bounce_topic": "arn:aws:sns:....",
"complaint_topic": "arn:aws:sns:....",
"delivery_topic": "arn:aws:sns:....",
"forwarding_enabled": false,
"headers_in_bounce_notifications_enabled": true,
"headers_in_complaint_notifications_enabled": true,
"headers_in_delivery_notifications_enabled": true
}
contains:
bounce_topic:
description:
- The ARN of the topic bounce notifications are delivered to.
- Omitted if bounce notifications are not delivered to a topic.
type: str
complaint_topic:
description:
- The ARN of the topic complaint notifications are delivered to.
- Omitted if complaint notifications are not delivered to a topic.
type: str
delivery_topic:
description:
- The ARN of the topic delivery notifications are delivered to.
- Omitted if delivery notifications are not delivered to a topic.
type: str
forwarding_enabled:
description: Whether or not feedback forwarding is enabled.
type: bool
headers_in_bounce_notifications_enabled:
description: Whether or not headers are included in messages delivered to the bounce topic.
type: bool
headers_in_complaint_notifications_enabled:
description: Whether or not headers are included in messages delivered to the complaint topic.
type: bool
headers_in_delivery_notifications_enabled:
description: Whether or not headers are included in messages delivered to the delivery topic.
type: bool
'''
from ansible.module_utils.aws.core import AnsibleAWSModule
from ansible.module_utils.ec2 import camel_dict_to_snake_dict, AWSRetry, get_aws_connection_info
import time
try:
from botocore.exceptions import BotoCoreError, ClientError
except ImportError:
pass # caught by imported HAS_BOTO3
def get_verification_attributes(connection, module, identity, retries=0, retryDelay=10):
# Unpredictably get_identity_verification_attributes doesn't include the identity even when we've
# just registered it. Suspect this is an eventual consistency issue on AWS side.
# Don't want this complexity exposed users of the module as they'd have to retry to ensure
# a consistent return from the module.
# To avoid this we have an internal retry that we use only after registering the identity.
for attempt in range(0, retries + 1):
try:
response = connection.get_identity_verification_attributes(Identities=[identity], aws_retry=True)
except (BotoCoreError, ClientError) as e:
module.fail_json_aws(e, msg='Failed to retrieve identity verification attributes for {identity}'.format(identity=identity))
identity_verification = response['VerificationAttributes']
if identity in identity_verification:
break
time.sleep(retryDelay)
if identity not in identity_verification:
return None
return identity_verification[identity]
def get_identity_notifications(connection, module, identity, retries=0, retryDelay=10):
# Unpredictably get_identity_notifications doesn't include the notifications when we've
# just registered the identity.
# Don't want this complexity exposed users of the module as they'd have to retry to ensure
# a consistent return from the module.
# To avoid this we have an internal retry that we use only when getting the current notification
# status for return.
for attempt in range(0, retries + 1):
try:
response = connection.get_identity_notification_attributes(Identities=[identity], aws_retry=True)
except (BotoCoreError, ClientError) as e:
module.fail_json_aws(e, msg='Failed to retrieve identity notification attributes for {identity}'.format(identity=identity))
notification_attributes = response['NotificationAttributes']
# No clear AWS docs on when this happens, but it appears sometimes identities are not included in
# in the notification attributes when the identity is first registered. Suspect that this is caused by
# eventual consistency within the AWS services. It's been observed in builds so we need to handle it.
#
# When this occurs, just return None and we'll assume no identity notification settings have been changed
# from the default which is reasonable if this is just eventual consistency on creation.
# See: https://github.com/ansible/ansible/issues/36065
if identity in notification_attributes:
break
else:
# Paranoia check for coding errors, we only requested one identity, so if we get a different one
# something has gone very wrong.
if len(notification_attributes) != 0:
module.fail_json(
msg='Unexpected identity found in notification attributes, expected {0} but got {1!r}.'.format(
identity,
notification_attributes.keys(),
)
)
time.sleep(retryDelay)
if identity not in notification_attributes:
return None
return notification_attributes[identity]
def desired_topic(module, notification_type):
arg_dict = module.params.get(notification_type.lower() + '_notifications')
if arg_dict:
return arg_dict.get('topic', None)
else:
return None
def update_notification_topic(connection, module, identity, identity_notifications, notification_type):
topic_key = notification_type + 'Topic'
if identity_notifications is None:
# If there is no configuration for notifications cannot be being sent to topics
# hence assume None as the current state.
current = None
elif topic_key in identity_notifications:
current = identity_notifications[topic_key]
else:
# If there is information on the notifications setup but no information on the
# particular notification topic it's pretty safe to assume there's no topic for
# this notification. AWS API docs suggest this information will always be
# included but best to be defensive
current = None
required = desired_topic(module, notification_type)
if current != required:
try:
if not module.check_mode:
connection.set_identity_notification_topic(Identity=identity, NotificationType=notification_type, SnsTopic=required, aws_retry=True)
except (BotoCoreError, ClientError) as e:
module.fail_json_aws(e, msg='Failed to set identity notification topic for {identity} {notification_type}'.format(
identity=identity,
notification_type=notification_type,
))
return True
return False
def update_notification_topic_headers(connection, module, identity, identity_notifications, notification_type):
arg_dict = module.params.get(notification_type.lower() + '_notifications')
header_key = 'HeadersIn' + notification_type + 'NotificationsEnabled'
if identity_notifications is None:
# If there is no configuration for topic notifications, headers cannot be being
# forwarded, hence assume false.
current = False
elif header_key in identity_notifications:
current = identity_notifications[header_key]
else:
# AWS API doc indicates that the headers in fields are optional. Unfortunately
# it's not clear on what this means. But it's a pretty safe assumption that it means
# headers are not included since most API consumers would interpret absence as false.
current = False
if arg_dict is not None and 'include_headers' in arg_dict:
required = arg_dict['include_headers']
else:
required = False
if current != required:
try:
if not module.check_mode:
connection.set_identity_headers_in_notifications_enabled(Identity=identity, NotificationType=notification_type, Enabled=required,
aws_retry=True)
except (BotoCoreError, ClientError) as e:
module.fail_json_aws(e, msg='Failed to set identity headers in notification for {identity} {notification_type}'.format(
identity=identity,
notification_type=notification_type,
))
return True
return False
def update_feedback_forwarding(connection, module, identity, identity_notifications):
if identity_notifications is None:
# AWS requires feedback forwarding to be enabled unless bounces and complaints
# are being handled by SNS topics. So in the absence of identity_notifications
# information existing feedback forwarding must be on.
current = True
elif 'ForwardingEnabled' in identity_notifications:
current = identity_notifications['ForwardingEnabled']
else:
# If there is information on the notifications setup but no information on the
# forwarding state it's pretty safe to assume forwarding is off. AWS API docs
# suggest this information will always be included but best to be defensive
current = False
required = module.params.get('feedback_forwarding')
if current != required:
try:
if not module.check_mode:
connection.set_identity_feedback_forwarding_enabled(Identity=identity, ForwardingEnabled=required, aws_retry=True)
except (BotoCoreError, ClientError) as e:
module.fail_json_aws(e, msg='Failed to set identity feedback forwarding for {identity}'.format(identity=identity))
return True
return False
def create_mock_notifications_response(module):
resp = {
"ForwardingEnabled": module.params.get('feedback_forwarding'),
}
for notification_type in ('Bounce', 'Complaint', 'Delivery'):
arg_dict = module.params.get(notification_type.lower() + '_notifications')
if arg_dict is not None and 'topic' in arg_dict:
resp[notification_type + 'Topic'] = arg_dict['topic']
header_key = 'HeadersIn' + notification_type + 'NotificationsEnabled'
if arg_dict is not None and 'include_headers' in arg_dict:
resp[header_key] = arg_dict['include_headers']
else:
resp[header_key] = False
return resp
def update_identity_notifications(connection, module):
identity = module.params.get('identity')
changed = False
identity_notifications = get_identity_notifications(connection, module, identity)
for notification_type in ('Bounce', 'Complaint', 'Delivery'):
changed |= update_notification_topic(connection, module, identity, identity_notifications, notification_type)
changed |= update_notification_topic_headers(connection, module, identity, identity_notifications, notification_type)
changed |= update_feedback_forwarding(connection, module, identity, identity_notifications)
if changed or identity_notifications is None:
if module.check_mode:
identity_notifications = create_mock_notifications_response(module)
else:
identity_notifications = get_identity_notifications(connection, module, identity, retries=4)
return changed, identity_notifications
def validate_params_for_identity_present(module):
if module.params.get('feedback_forwarding') is False:
if not (desired_topic(module, 'Bounce') and desired_topic(module, 'Complaint')):
module.fail_json(msg="Invalid Parameter Value 'False' for 'feedback_forwarding'. AWS requires "
"feedback forwarding to be enabled unless bounces and complaints are handled by SNS topics")
def create_or_update_identity(connection, module, region, account_id):
identity = module.params.get('identity')
changed = False
verification_attributes = get_verification_attributes(connection, module, identity)
if verification_attributes is None:
try:
if not module.check_mode:
if '@' in identity:
connection.verify_email_identity(EmailAddress=identity, aws_retry=True)
else:
connection.verify_domain_identity(Domain=identity, aws_retry=True)
except (BotoCoreError, ClientError) as e:
module.fail_json_aws(e, msg='Failed to verify identity {identity}'.format(identity=identity))
if module.check_mode:
verification_attributes = {
"VerificationStatus": "Pending",
}
else:
verification_attributes = get_verification_attributes(connection, module, identity, retries=4)
changed = True
elif verification_attributes['VerificationStatus'] not in ('Pending', 'Success'):
module.fail_json(msg="Identity " + identity + " in bad status " + verification_attributes['VerificationStatus'],
verification_attributes=camel_dict_to_snake_dict(verification_attributes))
if verification_attributes is None:
module.fail_json(msg='Unable to load identity verification attributes after registering identity.')
notifications_changed, notification_attributes = update_identity_notifications(connection, module)
changed |= notifications_changed
if notification_attributes is None:
module.fail_json(msg='Unable to load identity notification attributes.')
identity_arn = 'arn:aws:ses:' + region + ':' + account_id + ':identity/' + identity
module.exit_json(
changed=changed,
identity=identity,
identity_arn=identity_arn,
verification_attributes=camel_dict_to_snake_dict(verification_attributes),
notification_attributes=camel_dict_to_snake_dict(notification_attributes),
)
def destroy_identity(connection, module):
identity = module.params.get('identity')
changed = False
verification_attributes = get_verification_attributes(connection, module, identity)
if verification_attributes is not None:
try:
if not module.check_mode:
connection.delete_identity(Identity=identity, aws_retry=True)
except (BotoCoreError, ClientError) as e:
module.fail_json_aws(e, msg='Failed to delete identity {identity}'.format(identity=identity))
changed = True
module.exit_json(
changed=changed,
identity=identity,
)
def get_account_id(module):
sts = module.client('sts')
try:
caller_identity = sts.get_caller_identity()
except (BotoCoreError, ClientError) as e:
module.fail_json_aws(e, msg='Failed to retrieve caller identity')
return caller_identity['Account']
def main():
module = AnsibleAWSModule(
argument_spec={
"identity": dict(required=True, type='str'),
"state": dict(default='present', choices=['present', 'absent']),
"bounce_notifications": dict(type='dict'),
"complaint_notifications": dict(type='dict'),
"delivery_notifications": dict(type='dict'),
"feedback_forwarding": dict(default=True, type='bool'),
},
supports_check_mode=True,
)
for notification_type in ('bounce', 'complaint', 'delivery'):
param_name = notification_type + '_notifications'
arg_dict = module.params.get(param_name)
if arg_dict:
extra_keys = [x for x in arg_dict.keys() if x not in ('topic', 'include_headers')]
if extra_keys:
module.fail_json(msg='Unexpected keys ' + str(extra_keys) + ' in ' + param_name + ' valid keys are topic or include_headers')
# SES APIs seem to have a much lower throttling threshold than most of the rest of the AWS APIs.
# Docs say 1 call per second. This shouldn't actually be a big problem for normal usage, but
# the ansible build runs multiple instances of the test in parallel that's caused throttling
# failures so apply a jittered backoff to call SES calls.
connection = module.client('ses', retry_decorator=AWSRetry.jittered_backoff())
state = module.params.get("state")
if state == 'present':
region = get_aws_connection_info(module, boto3=True)[0]
account_id = get_account_id(module)
validate_params_for_identity_present(module)
create_or_update_identity(connection, module, region, account_id)
else:
destroy_identity(connection, module)
if __name__ == '__main__':
main()
| gpl-3.0 |
alexthered/kienhoc-platform | lms/djangoapps/courseware/management/commands/tests/test_dump_course.py | 44 | 9075 | # coding=utf-8
"""Tests for Django management commands"""
import json
from nose.plugins.attrib import attr
from path import Path as path
import shutil
from StringIO import StringIO
import tarfile
from tempfile import mkdtemp
import factory
from django.conf import settings
from django.core.management import call_command
from xmodule.modulestore import ModuleStoreEnum
from xmodule.modulestore.django import modulestore
from xmodule.modulestore.tests.django_utils import ModuleStoreTestCase, mixed_store_config
from xmodule.modulestore.tests.django_utils import (
TEST_DATA_MONGO_MODULESTORE, TEST_DATA_SPLIT_MODULESTORE
)
from xmodule.modulestore.tests.factories import CourseFactory
from xmodule.modulestore.xml_importer import import_course_from_xml
DATA_DIR = settings.COMMON_TEST_DATA_ROOT
XML_COURSE_DIRS = ['toy', 'simple', 'open_ended']
MAPPINGS = {
'edX/toy/2012_Fall': 'xml',
'edX/simple/2012_Fall': 'xml',
'edX/open_ended/2012_Fall': 'xml',
}
TEST_DATA_MIXED_XML_MODULESTORE = mixed_store_config(
DATA_DIR, MAPPINGS, include_xml=True, xml_source_dirs=XML_COURSE_DIRS,
)
@attr('shard_1')
class CommandsTestBase(ModuleStoreTestCase):
"""
Base class for testing different django commands.
Must be subclassed using override_settings set to the modulestore
to be tested.
"""
__test__ = False
url_name = '2012_Fall'
def setUp(self):
super(CommandsTestBase, self).setUp()
self.test_course_key = modulestore().make_course_key("edX", "simple", "2012_Fall")
self.loaded_courses = self.load_courses()
def load_courses(self):
"""Load test courses and return list of ids"""
store = modulestore()
# Add a course with a unicode name.
unique_org = factory.Sequence(lambda n: u'ëḋẌ.%d' % n)
CourseFactory.create(
org=unique_org,
course=u'śíḿṕĺé',
display_name=u'2012_Fáĺĺ',
modulestore=store
)
courses = store.get_courses()
# NOTE: if xml store owns these, it won't import them into mongo
if self.test_course_key not in [c.id for c in courses]:
import_course_from_xml(
store, ModuleStoreEnum.UserID.mgmt_command, DATA_DIR, XML_COURSE_DIRS, create_if_not_present=True
)
return [course.id for course in store.get_courses()]
def call_command(self, name, *args, **kwargs):
"""Call management command and return output"""
out = StringIO() # To Capture the output of the command
call_command(name, *args, stdout=out, **kwargs)
out.seek(0)
return out.read()
def test_dump_course_ids(self):
kwargs = {'modulestore': 'default'}
output = self.call_command('dump_course_ids', **kwargs)
dumped_courses = output.decode('utf-8').strip().split('\n')
course_ids = {unicode(course_id) for course_id in self.loaded_courses}
dumped_ids = set(dumped_courses)
self.assertEqual(course_ids, dumped_ids)
def test_correct_course_structure_metadata(self):
course_id = unicode(modulestore().make_course_key('edX', 'open_ended', '2012_Fall'))
args = [course_id]
kwargs = {'modulestore': 'default'}
try:
output = self.call_command('dump_course_structure', *args, **kwargs)
except TypeError, exception:
self.fail(exception)
dump = json.loads(output)
self.assertGreater(len(dump.values()), 0)
def test_dump_course_structure(self):
args = [unicode(self.test_course_key)]
kwargs = {'modulestore': 'default'}
output = self.call_command('dump_course_structure', *args, **kwargs)
dump = json.loads(output)
# check that all elements in the course structure have metadata,
# but not inherited metadata:
for element in dump.itervalues():
self.assertIn('metadata', element)
self.assertIn('children', element)
self.assertIn('category', element)
self.assertNotIn('inherited_metadata', element)
# Check a few elements in the course dump
test_course_key = self.test_course_key
parent_id = unicode(test_course_key.make_usage_key('chapter', 'Overview'))
self.assertEqual(dump[parent_id]['category'], 'chapter')
self.assertEqual(len(dump[parent_id]['children']), 3)
child_id = dump[parent_id]['children'][1]
self.assertEqual(dump[child_id]['category'], 'videosequence')
self.assertEqual(len(dump[child_id]['children']), 2)
video_id = unicode(test_course_key.make_usage_key('video', 'Welcome'))
self.assertEqual(dump[video_id]['category'], 'video')
self.assertItemsEqual(
dump[video_id]['metadata'].keys(),
['download_video', 'youtube_id_0_75', 'youtube_id_1_0', 'youtube_id_1_25', 'youtube_id_1_5']
)
self.assertIn('youtube_id_1_0', dump[video_id]['metadata'])
# Check if there are the right number of elements
self.assertEqual(len(dump), 16)
def test_dump_inherited_course_structure(self):
args = [unicode(self.test_course_key)]
kwargs = {'modulestore': 'default', 'inherited': True}
output = self.call_command('dump_course_structure', *args, **kwargs)
dump = json.loads(output)
# check that all elements in the course structure have inherited metadata,
# and that it contains a particular value as well:
for element in dump.itervalues():
self.assertIn('metadata', element)
self.assertIn('children', element)
self.assertIn('category', element)
self.assertIn('inherited_metadata', element)
self.assertIsNone(element['inherited_metadata']['ispublic'])
# ... but does not contain inherited metadata containing a default value:
self.assertNotIn('due', element['inherited_metadata'])
def test_dump_inherited_course_structure_with_defaults(self):
args = [unicode(self.test_course_key)]
kwargs = {'modulestore': 'default', 'inherited': True, 'inherited_defaults': True}
output = self.call_command('dump_course_structure', *args, **kwargs)
dump = json.loads(output)
# check that all elements in the course structure have inherited metadata,
# and that it contains a particular value as well:
for element in dump.itervalues():
self.assertIn('metadata', element)
self.assertIn('children', element)
self.assertIn('category', element)
self.assertIn('inherited_metadata', element)
self.assertIsNone(element['inherited_metadata']['ispublic'])
# ... and contains inherited metadata containing a default value:
self.assertIsNone(element['inherited_metadata']['due'])
def test_export_course(self):
tmp_dir = path(mkdtemp())
self.addCleanup(shutil.rmtree, tmp_dir)
filename = tmp_dir / 'test.tar.gz'
self.run_export_course(filename)
with tarfile.open(filename) as tar_file:
self.check_export_file(tar_file)
def test_export_course_stdout(self):
output = self.run_export_course('-')
with tarfile.open(fileobj=StringIO(output)) as tar_file:
self.check_export_file(tar_file)
def run_export_course(self, filename): # pylint: disable=missing-docstring
args = [unicode(self.test_course_key), filename]
kwargs = {'modulestore': 'default'}
return self.call_command('export_course', *args, **kwargs)
def check_export_file(self, tar_file): # pylint: disable=missing-docstring
names = tar_file.getnames()
# Check if some of the files are present.
# The rest is of the code should be covered by the tests for
# xmodule.modulestore.xml_exporter, used by the dump_course command
assert_in = self.assertIn
assert_in('edX-simple-2012_Fall', names)
assert_in('edX-simple-2012_Fall/policies/{}/policy.json'.format(self.url_name), names)
assert_in('edX-simple-2012_Fall/html/toylab.html', names)
assert_in('edX-simple-2012_Fall/videosequence/A_simple_sequence.xml', names)
assert_in('edX-simple-2012_Fall/sequential/Lecture_2.xml', names)
class CommandsXMLTestCase(CommandsTestBase):
"""
Test case for management commands with the xml modulestore present.
"""
MODULESTORE = TEST_DATA_MIXED_XML_MODULESTORE
__test__ = True
class CommandsMongoTestCase(CommandsTestBase):
"""
Test case for management commands using the mixed mongo modulestore with old mongo as the default.
"""
MODULESTORE = TEST_DATA_MONGO_MODULESTORE
__test__ = True
class CommandSplitMongoTestCase(CommandsTestBase):
"""
Test case for management commands using the mixed mongo modulestore with split as the default.
"""
MODULESTORE = TEST_DATA_SPLIT_MODULESTORE
__test__ = True
url_name = 'course'
| agpl-3.0 |
ns950/calibre | src/calibre/db/delete_service.py | 14 | 5468 | #!/usr/bin/env python2
# vim:fileencoding=utf-8
from __future__ import (unicode_literals, division, absolute_import,
print_function)
__license__ = 'GPL v3'
__copyright__ = '2013, Kovid Goyal <kovid at kovidgoyal.net>'
import os, tempfile, shutil, errno, time, atexit
from threading import Thread
from Queue import Queue
from calibre.ptempfile import remove_dir
from calibre.utils.filenames import remove_dir_if_empty
from calibre.utils.recycle_bin import delete_tree, delete_file
class DeleteService(Thread):
''' Provide a blocking file delete implementation with support for the
recycle bin. On windows, deleting files to the recycle bin spins the event
loop, which can cause locking errors in the main thread. We get around this
by only moving the files/folders to be deleted out of the library in the
main thread, they are deleted to recycle bin in a separate worker thread.
This has the added advantage that doing a restore from the recycle bin wont
cause metadata.db and the file system to get out of sync. Also, deleting
becomes much faster, since in the common case, the move is done by a simple
os.rename(). The downside is that if the user quits calibre while a long
move to recycle bin is happening, the files may not all be deleted.'''
daemon = True
def __init__(self):
Thread.__init__(self)
self.requests = Queue()
def shutdown(self, timeout=20):
self.requests.put(None)
self.join(timeout)
def create_staging(self, library_path):
base_path = os.path.dirname(library_path)
base = os.path.basename(library_path)
try:
ans = tempfile.mkdtemp(prefix=base+' deleted ', dir=base_path)
except OSError:
ans = tempfile.mkdtemp(prefix=base+' deleted ')
atexit.register(remove_dir, ans)
return ans
def remove_dir_if_empty(self, path):
try:
os.rmdir(path)
except OSError as e:
if e.errno == errno.ENOTEMPTY or len(os.listdir(path)) > 0:
# Some linux systems appear to raise an EPERM instead of an
# ENOTEMPTY, see https://bugs.launchpad.net/bugs/1240797
return
raise
def delete_books(self, paths, library_path):
tdir = self.create_staging(library_path)
self.queue_paths(tdir, paths, delete_empty_parent=True)
def queue_paths(self, tdir, paths, delete_empty_parent=True):
try:
self._queue_paths(tdir, paths, delete_empty_parent=delete_empty_parent)
except:
if os.path.exists(tdir):
shutil.rmtree(tdir, ignore_errors=True)
raise
def _queue_paths(self, tdir, paths, delete_empty_parent=True):
requests = []
for path in paths:
if os.path.exists(path):
basename = os.path.basename(path)
c = 0
while True:
dest = os.path.join(tdir, basename)
if not os.path.exists(dest):
break
c += 1
basename = '%d - %s' % (c, os.path.basename(path))
try:
shutil.move(path, dest)
except EnvironmentError:
if os.path.isdir(path):
# shutil.move may have partially copied the directory,
# so the subsequent call to move() will fail as the
# destination directory already exists
raise
# Wait a little in case something has locked a file
time.sleep(1)
shutil.move(path, dest)
if delete_empty_parent:
remove_dir_if_empty(os.path.dirname(path), ignore_metadata_caches=True)
requests.append(dest)
if not requests:
remove_dir_if_empty(tdir)
else:
self.requests.put(tdir)
def delete_files(self, paths, library_path):
tdir = self.create_staging(library_path)
self.queue_paths(tdir, paths, delete_empty_parent=False)
def run(self):
while True:
x = self.requests.get()
try:
if x is None:
break
try:
self.do_delete(x)
except:
import traceback
traceback.print_exc()
finally:
self.requests.task_done()
def wait(self):
'Blocks until all pending deletes have completed'
self.requests.join()
def do_delete(self, tdir):
if os.path.exists(tdir):
try:
for x in os.listdir(tdir):
x = os.path.join(tdir, x)
if os.path.isdir(x):
delete_tree(x)
else:
delete_file(x)
finally:
shutil.rmtree(tdir)
__ds = None
def delete_service():
global __ds
if __ds is None:
__ds = DeleteService()
__ds.start()
return __ds
def shutdown(timeout=20):
global __ds
if __ds is not None:
__ds.shutdown(timeout)
__ds = None
def has_jobs():
global __ds
if __ds is not None:
return (not __ds.requests.empty()) or __ds.requests.unfinished_tasks
return False
| gpl-3.0 |
proxysh/Safejumper-for-Mac | buildlinux/env32/lib/python2.7/site-packages/obfsproxy/transports/scramblesuit/util.py | 12 | 5294 | """
This module implements several commonly used utility functions.
The implemented functions can be used to swap variables, write and read data
from files and to convert a number to raw text.
"""
import obfsproxy.common.log as logging
import os
import time
import const
import mycrypto
log = logging.get_obfslogger()
def setStateLocation( stateLocation ):
"""
Set the constant `STATE_LOCATION' to the given `stateLocation'.
The variable `stateLocation' determines where persistent information (such
as the server's key material) is stored. If `stateLocation' is `None', it
remains to be the current directory. In general, however, it should be a
subdirectory of Tor's data directory.
"""
if stateLocation is None:
return
if not stateLocation.endswith('/'):
stateLocation += '/'
# To be polite, we create a subdirectory inside wherever we are asked to
# store data in.
stateLocation += (const.TRANSPORT_NAME).lower() + '/'
# ...and if it does not exist yet, we attempt to create the full
# directory path.
if not os.path.exists(stateLocation):
log.info("Creating directory path `%s'." % stateLocation)
os.makedirs(stateLocation)
log.debug("Setting the state location to `%s'." % stateLocation)
const.STATE_LOCATION = stateLocation
def isValidHMAC( hmac1, hmac2, key ):
"""
Compares `hmac1' and `hmac2' after HMACing them again using `key'.
The arguments `hmac1' and `hmac2' are compared. If they are equal, `True'
is returned and otherwise `False'. To prevent timing attacks, double HMAC
verification is used meaning that the two arguments are HMACed again before
(variable-time) string comparison. The idea is taken from:
https://www.isecpartners.com/blog/2011/february/double-hmac-verification.aspx
"""
assert len(hmac1) == len(hmac2)
# HMAC the arguments again to prevent timing attacks.
doubleHmac1 = mycrypto.HMAC_SHA256_128(key, hmac1)
doubleHmac2 = mycrypto.HMAC_SHA256_128(key, hmac2)
if doubleHmac1 != doubleHmac2:
return False
log.debug("The computed HMAC is valid.")
return True
def locateMark( mark, payload ):
"""
Locate the given `mark' in `payload' and return its index.
The `mark' is placed before the HMAC of a ScrambleSuit authentication
mechanism and makes it possible to efficiently locate the HMAC. If the
`mark' could not be found, `None' is returned.
"""
index = payload.find(mark, 0, const.MAX_PADDING_LENGTH + const.MARK_LENGTH)
if index < 0:
log.debug("Could not find the mark just yet.")
return None
if (len(payload) - index - const.MARK_LENGTH) < \
const.HMAC_SHA256_128_LENGTH:
log.debug("Found the mark but the HMAC is still incomplete.")
return None
log.debug("Successfully located the mark.")
return index
def getEpoch( ):
"""
Return the Unix epoch divided by a constant as string.
This function returns a coarse-grained version of the Unix epoch. The
seconds passed since the epoch are divided by the constant
`EPOCH_GRANULARITY'.
"""
return str(int(time.time()) / const.EPOCH_GRANULARITY)
def expandedEpoch( ):
"""
Return [epoch, epoch-1, epoch+1].
"""
epoch = int(getEpoch())
return [str(epoch), str(epoch - 1), str(epoch + 1)]
def writeToFile( data, fileName ):
"""
Writes the given `data' to the file specified by `fileName'.
If an error occurs, the function logs an error message but does not throw
an exception or return an error code.
"""
log.debug("Opening `%s' for writing." % fileName)
try:
with open(fileName, "wb") as desc:
desc.write(data)
except IOError as err:
log.error("Error writing to `%s': %s." % (fileName, err))
def readFromFile( fileName, length=-1 ):
"""
Read `length' amount of bytes from the given `fileName'
If `length' equals -1 (the default), the entire file is read and the
content returned. If an error occurs, the function logs an error message
but does not throw an exception or return an error code.
"""
data = None
if not os.path.exists(fileName):
log.debug("File `%s' does not exist (yet?)." % fileName)
return None
log.debug("Opening `%s' for reading." % fileName)
try:
with open(fileName, "rb") as desc:
data = desc.read(length)
except IOError as err:
log.error("Error reading from `%s': %s." % (fileName, err))
return data
def sanitiseBase32( data ):
"""
Try to sanitise a Base32 string if it's slightly wrong.
ScrambleSuit's shared secret might be distributed verbally which could
cause mistakes. This function fixes simple mistakes, e.g., when a user
noted "1" rather than "I".
"""
data = data.upper()
if "1" in data:
log.info("Found a \"1\" in Base32-encoded \"%s\". Assuming " \
"it's actually \"I\"." % data)
data = data.replace("1", "I")
if "0" in data:
log.info("Found a \"0\" in Base32-encoded \"%s\". Assuming " \
"it's actually \"O\"." % data)
data = data.replace("0", "O")
return data
| gpl-2.0 |
mollstam/UnrealPy | UnrealPyEmbed/Development/Python/2015.08.07-Python2710-x64-Source-vs2015/Python27/Source/Twisted-15.2.1/twisted/test/test_doc.py | 8 | 3681 | # Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
import inspect, glob
from os import path
from twisted.trial import unittest
from twisted.python import reflect
from twisted.python.modules import getModule
def errorInFile(f, line=17, name=''):
"""
Return a filename formatted so emacs will recognize it as an error point
@param line: Line number in file. Defaults to 17 because that's about how
long the copyright headers are.
"""
return '%s:%d:%s' % (f, line, name)
# return 'File "%s", line %d, in %s' % (f, line, name)
class DocCoverageTests(unittest.TestCase):
"""
Looking for docstrings in all modules and packages.
"""
def setUp(self):
self.packageNames = []
for mod in getModule('twisted').walkModules():
if mod.isPackage():
self.packageNames.append(mod.name)
def testModules(self):
"""
Looking for docstrings in all modules.
"""
docless = []
for packageName in self.packageNames:
if packageName in ('twisted.test',):
# because some stuff in here behaves oddly when imported
continue
try:
package = reflect.namedModule(packageName)
except ImportError:
# This is testing doc coverage, not importability.
# (Really, I don't want to deal with the fact that I don't
# have pyserial installed.)
# print e
pass
else:
docless.extend(self.modulesInPackage(packageName, package))
self.failIf(docless, "No docstrings in module files:\n"
"%s" % ('\n'.join(map(errorInFile, docless)),))
def modulesInPackage(self, packageName, package):
docless = []
directory = path.dirname(package.__file__)
for modfile in glob.glob(path.join(directory, '*.py')):
moduleName = inspect.getmodulename(modfile)
if moduleName == '__init__':
# These are tested by test_packages.
continue
elif moduleName in ('spelunk_gnome','gtkmanhole'):
# argh special case pygtk evil argh. How does epydoc deal
# with this?
continue
try:
module = reflect.namedModule('.'.join([packageName,
moduleName]))
except Exception:
# print moduleName, "misbehaved:", e
pass
else:
if not inspect.getdoc(module):
docless.append(modfile)
return docless
def testPackages(self):
"""
Looking for docstrings in all packages.
"""
docless = []
for packageName in self.packageNames:
try:
package = reflect.namedModule(packageName)
except Exception:
# This is testing doc coverage, not importability.
# (Really, I don't want to deal with the fact that I don't
# have pyserial installed.)
# print e
pass
else:
if not inspect.getdoc(package):
docless.append(package.__file__.replace('.pyc','.py'))
self.failIf(docless, "No docstrings for package files\n"
"%s" % ('\n'.join(map(errorInFile, docless),)))
# This test takes a while and doesn't come close to passing. :(
testModules.skip = "Activate me when you feel like writing docstrings, and fixing GTK crashing bugs."
| mit |
accpy/accpy | accpy/dataio/hdf5.py | 1 | 3590 | # -*- coding: utf-8 -*-
''' accpy.dataio.hdf5
author:
Felix Kramer
'''
from h5py import File as h5pyFile
from h5py._hl.group import Group
from h5py._hl.dataset import Dataset
from time import strftime
from numpy import ndarray
class struct(object):
def __init__(self, **entries):
self.__dict__.update(entries)
def h5save(filename, datadict, timestamp=True):
''' save dataset to hdf5 format (for load see print(h5load.__doc__))
input:
- desired (path/)filename as string
- dictionary of data
return:
- saves data to "(path/)timestamp_filename.hdf5"
- complete (path/)filename is returned
usage-example:
datadict = {'dataset1' : {'x': array(...), 'y': array(...)},
'dataset2' : {'x': array(...), 'y': array(...), 'yerr': array(...)},
'parameter1' : 1.337,
'list1' : [1, 2, 'c']}
h5save(filename, True. datadict)
'''
def dict2h5(datadict, h5id):
for key, val in datadict.items():
if isinstance(key, bytes):
key = key.decode().replace('/', '|')
else:
key = key.replace('/', '|')
if isinstance(val, (list, tuple, str, bytes, int, float, ndarray)):
h5id.create_dataset(key, data=val)
elif isinstance(val, (dict)):
hdf5_subid = h5id.create_group(key)
dict2h5(val, hdf5_subid)
else:
raise Exception('Data of type {} is not yet supported, sorry for that!'.format(type(val)))
return
if timestamp:
path = '/'.join(filename.split('/')[:-1] + [''])
filename = strftime('%Y%m%d%H%M%S') + '_' + filename.split('/')[-1]
filename = path + filename
if filename[-5:] != '.hdf5':
filename += '.hdf5'
hdf5_fid = h5pyFile(filename, 'w')
dict2h5(datadict, hdf5_fid)
hdf5_fid.close()
return filename
def h5load(filename):
''' h5load(filename, verbose)
input:
- filename (as string) of h5save savedfile
- desired verbosity
return:
- dictionary of saved data
ALTERNATIVE:
if the dataset is too large for memory it is also possible to work with it on disk:
>>> import h5py
>>> data = h5py.File(filename, 'r')
'''
def h52dict(h5id, datadict):
for key, val in h5id.items():
if isinstance(val, (Dataset)):
datadict[key] = h5id[key][()]
elif isinstance(val, (Group)):
datadict[key] = {}
h52dict(h5id[key], datadict[key])
else:
raise Exception('Data of type {} is not yet supported, sorry for that!'.format(type(val)))
return
if filename[-5:] != '.hdf5':
filename += '.hdf5'
data = {}
hdf5_fid = h5pyFile(filename, 'r')
h52dict(hdf5_fid, data)
hdf5_fid.close()
return data
def confsave(filename, listofvars, listofvals):
# working with two lists as dictionarys do not accept numpy arrays
hdf5_fid = h5pyFile(filename, 'w')
hdf5_fid.create_dataset('listofvars', data=listofvars)
for var, val in zip(listofvars, listofvals):
hdf5_fid.create_dataset(var, data=val)
hdf5_fid.close()
def confload(filename):
fid = h5pyFile(filename, 'r')
listofvars = list(fid['listofvars'][()])
listofvals = []
for var in listofvars:
listofvals.append(fid[var][()])
fid.close()
return listofvars, listofvals
| gpl-3.0 |
havard024/prego | venv/lib/python2.7/site-packages/django/utils/dates.py | 488 | 2237 | "Commonly-used date structures"
from django.utils.translation import ugettext_lazy as _, pgettext_lazy
WEEKDAYS = {
0:_('Monday'), 1:_('Tuesday'), 2:_('Wednesday'), 3:_('Thursday'), 4:_('Friday'),
5:_('Saturday'), 6:_('Sunday')
}
WEEKDAYS_ABBR = {
0:_('Mon'), 1:_('Tue'), 2:_('Wed'), 3:_('Thu'), 4:_('Fri'),
5:_('Sat'), 6:_('Sun')
}
WEEKDAYS_REV = {
'monday':0, 'tuesday':1, 'wednesday':2, 'thursday':3, 'friday':4,
'saturday':5, 'sunday':6
}
MONTHS = {
1:_('January'), 2:_('February'), 3:_('March'), 4:_('April'), 5:_('May'), 6:_('June'),
7:_('July'), 8:_('August'), 9:_('September'), 10:_('October'), 11:_('November'),
12:_('December')
}
MONTHS_3 = {
1:_('jan'), 2:_('feb'), 3:_('mar'), 4:_('apr'), 5:_('may'), 6:_('jun'),
7:_('jul'), 8:_('aug'), 9:_('sep'), 10:_('oct'), 11:_('nov'), 12:_('dec')
}
MONTHS_3_REV = {
'jan':1, 'feb':2, 'mar':3, 'apr':4, 'may':5, 'jun':6, 'jul':7, 'aug':8,
'sep':9, 'oct':10, 'nov':11, 'dec':12
}
MONTHS_AP = { # month names in Associated Press style
1: pgettext_lazy('abbrev. month', 'Jan.'),
2: pgettext_lazy('abbrev. month', 'Feb.'),
3: pgettext_lazy('abbrev. month', 'March'),
4: pgettext_lazy('abbrev. month', 'April'),
5: pgettext_lazy('abbrev. month', 'May'),
6: pgettext_lazy('abbrev. month', 'June'),
7: pgettext_lazy('abbrev. month', 'July'),
8: pgettext_lazy('abbrev. month', 'Aug.'),
9: pgettext_lazy('abbrev. month', 'Sept.'),
10: pgettext_lazy('abbrev. month', 'Oct.'),
11: pgettext_lazy('abbrev. month', 'Nov.'),
12: pgettext_lazy('abbrev. month', 'Dec.')
}
MONTHS_ALT = { # required for long date representation by some locales
1: pgettext_lazy('alt. month', 'January'),
2: pgettext_lazy('alt. month', 'February'),
3: pgettext_lazy('alt. month', 'March'),
4: pgettext_lazy('alt. month', 'April'),
5: pgettext_lazy('alt. month', 'May'),
6: pgettext_lazy('alt. month', 'June'),
7: pgettext_lazy('alt. month', 'July'),
8: pgettext_lazy('alt. month', 'August'),
9: pgettext_lazy('alt. month', 'September'),
10: pgettext_lazy('alt. month', 'October'),
11: pgettext_lazy('alt. month', 'November'),
12: pgettext_lazy('alt. month', 'December')
}
| mit |
jaruba/chromium.src | build/android/gyp/lint.py | 40 | 6987 | #!/usr/bin/env python
#
# Copyright (c) 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Runs Android's lint tool."""
import optparse
import os
import sys
from xml.dom import minidom
from util import build_utils
_SRC_ROOT = os.path.abspath(os.path.join(os.path.dirname(__file__),
'..', '..', '..'))
def _RunLint(lint_path, config_path, processed_config_path, manifest_path,
result_path, product_dir, sources, jar_path):
def _RelativizePath(path):
"""Returns relative path to top-level src dir.
Args:
path: A path relative to cwd.
"""
return os.path.relpath(os.path.abspath(path), _SRC_ROOT)
def _ProcessConfigFile():
if not build_utils.IsTimeStale(processed_config_path, [config_path]):
return
with open(config_path, 'rb') as f:
content = f.read().replace(
'PRODUCT_DIR', _RelativizePath(product_dir))
with open(processed_config_path, 'wb') as f:
f.write(content)
def _ProcessResultFile():
with open(result_path, 'rb') as f:
content = f.read().replace(
_RelativizePath(product_dir), 'PRODUCT_DIR')
with open(result_path, 'wb') as f:
f.write(content)
def _ParseAndShowResultFile():
dom = minidom.parse(result_path)
issues = dom.getElementsByTagName('issue')
print >> sys.stderr
for issue in issues:
issue_id = issue.attributes['id'].value
message = issue.attributes['message'].value
location_elem = issue.getElementsByTagName('location')[0]
path = location_elem.attributes['file'].value
line = location_elem.getAttribute('line')
if line:
error = '%s:%s %s: %s [warning]' % (path, line, message, issue_id)
else:
# Issues in class files don't have a line number.
error = '%s %s: %s [warning]' % (path, message, issue_id)
print >> sys.stderr, error
for attr in ['errorLine1', 'errorLine2']:
error_line = issue.getAttribute(attr)
if error_line:
print >> sys.stderr, error_line
return len(issues)
with build_utils.TempDir() as temp_dir:
_ProcessConfigFile()
cmd = [
_RelativizePath(lint_path), '-Werror', '--exitcode', '--showall',
'--config', _RelativizePath(processed_config_path),
'--classpath', _RelativizePath(jar_path),
'--xml', _RelativizePath(result_path),
]
# There may be multiple source files with the same basename (but in
# different directories). It is difficult to determine what part of the path
# corresponds to the java package, and so instead just link the source files
# into temporary directories (creating a new one whenever there is a name
# conflict).
src_dirs = []
def NewSourceDir():
new_dir = os.path.join(temp_dir, str(len(src_dirs)))
os.mkdir(new_dir)
src_dirs.append(new_dir)
cmd.extend(['--sources', _RelativizePath(new_dir)])
return new_dir
def PathInDir(d, src):
return os.path.join(d, os.path.basename(src))
for src in sources:
src_dir = None
for d in src_dirs:
if not os.path.exists(PathInDir(d, src)):
src_dir = d
break
if not src_dir:
src_dir = NewSourceDir()
os.symlink(os.path.abspath(src), PathInDir(src_dir, src))
cmd.append(_RelativizePath(os.path.join(manifest_path, os.pardir)))
if os.path.exists(result_path):
os.remove(result_path)
try:
build_utils.CheckOutput(cmd, cwd=_SRC_ROOT)
except build_utils.CalledProcessError as e:
# There is a problem with lint usage
if not os.path.exists(result_path):
print 'Something is wrong:'
print e
return 0
# There are actual lint issues
else:
try:
num_issues = _ParseAndShowResultFile()
except Exception:
print 'Lint created unparseable xml file...'
print 'File contents:'
with open(result_path) as f:
print f.read()
return 0
_ProcessResultFile()
msg = ('\nLint found %d new issues.\n'
' - For full explanation refer to %s\n'
' - Wanna suppress these issues?\n'
' 1. Read comment in %s\n'
' 2. Run "python %s %s"\n' %
(num_issues,
_RelativizePath(result_path),
_RelativizePath(config_path),
_RelativizePath(os.path.join(_SRC_ROOT, 'build', 'android',
'lint', 'suppress.py')),
_RelativizePath(result_path)))
print >> sys.stderr, msg
# Lint errors do not fail the build.
return 0
return 0
def main():
parser = optparse.OptionParser()
build_utils.AddDepfileOption(parser)
parser.add_option('--lint-path', help='Path to lint executable.')
parser.add_option('--config-path', help='Path to lint suppressions file.')
parser.add_option('--processed-config-path',
help='Path to processed lint suppressions file.')
parser.add_option('--manifest-path', help='Path to AndroidManifest.xml')
parser.add_option('--result-path', help='Path to XML lint result file.')
parser.add_option('--product-dir', help='Path to product dir.')
parser.add_option('--src-dirs', help='Directories containing java files.')
parser.add_option('--java-files', help='Paths to java files.')
parser.add_option('--jar-path', help='Jar file containing class files.')
parser.add_option('--stamp', help='Path to touch on success.')
parser.add_option('--enable', action='store_true',
help='Run lint instead of just touching stamp.')
options, _ = parser.parse_args()
build_utils.CheckOptions(
options, parser, required=['lint_path', 'config_path',
'processed_config_path', 'manifest_path',
'result_path', 'product_dir',
'jar_path'])
rc = 0
if options.enable:
sources = []
if options.src_dirs:
src_dirs = build_utils.ParseGypList(options.src_dirs)
sources = build_utils.FindInDirectories(src_dirs, '*.java')
elif options.java_files:
sources = build_utils.ParseGypList(options.java_files)
else:
print 'One of --src-dirs or --java-files must be specified.'
return 1
rc = _RunLint(options.lint_path, options.config_path,
options.processed_config_path,
options.manifest_path, options.result_path,
options.product_dir, sources, options.jar_path)
if options.depfile:
build_utils.WriteDepfile(
options.depfile,
build_utils.GetPythonDependencies())
if options.stamp and not rc:
build_utils.Touch(options.stamp)
return rc
if __name__ == '__main__':
sys.exit(main())
| bsd-3-clause |
EUDAT-B2SHARE/invenio-old | modules/oaiharvest/lib/oai_harvest_daemon.py | 1 | 74140 | # -*- coding: utf-8 -*-
##
## This file is part of Invenio.
## Copyright (C) 2009, 2010, 2011 CERN.
##
## Invenio is free software; you can redistribute it and/or
## modify it under the terms of the GNU General Public License as
## published by the Free Software Foundation; either version 2 of the
## License, or (at your option) any later version.
##
## Invenio is distributed in the hope that it will be useful, but
## WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with Invenio; if not, write to the Free Software Foundation, Inc.,
## 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""
OAI Harvest daemon - harvest records from OAI repositories.
If started via CLI with --verb parameters, starts a manual single-shot
harvesting. Otherwise starts a BibSched task for periodical harvesting
of repositories defined in the OAI Harvest admin interface
"""
__revision__ = "$Id$"
import os
import sys
import getopt
import getpass
import re
import time
import calendar
import shutil
import tempfile
import urlparse
import random
from invenio.config import \
CFG_BINDIR, \
CFG_TMPDIR, \
CFG_ETCDIR, \
CFG_INSPIRE_SITE, \
CFG_CERN_SITE, \
CFG_PLOTEXTRACTOR_DOWNLOAD_TIMEOUT, \
CFG_SITE_URL, \
CFG_OAI_FAILED_HARVESTING_STOP_QUEUE, \
CFG_OAI_FAILED_HARVESTING_EMAILS_ADMIN
from invenio.oai_harvest_config import InvenioOAIHarvestWarning
from invenio.dbquery import run_sql
from invenio.bibtask import \
task_get_task_param, \
task_get_option, \
task_set_option, \
write_message, \
task_init, \
task_sleep_now_if_required, \
task_update_progress, \
task_low_level_submission
from invenio.bibrecord import record_extract_oai_id, create_records, \
create_record, record_add_fields, \
record_delete_fields, record_xml_output, \
record_get_field_instances, \
record_modify_subfield, \
record_has_field, field_xml_output
from invenio import oai_harvest_getter
from invenio.errorlib import register_exception
from invenio.plotextractor_getter import harvest_single, make_single_directory
from invenio.plotextractor_converter import untar
from invenio.plotextractor import process_single, get_defaults
from invenio.shellutils import run_shell_command, Timeout
from invenio.textutils import translate_latex2unicode
from invenio.bibedit_utils import record_find_matching_fields
from invenio.bibcatalog import bibcatalog_system
import invenio.template
oaiharvest_templates = invenio.template.load('oai_harvest')
## precompile some often-used regexp for speed reasons:
REGEXP_OAI_ID = re.compile("<identifier.*?>(.*?)<\/identifier>", re.DOTALL)
REGEXP_RECORD = re.compile("<record.*?>(.*?)</record>", re.DOTALL)
REGEXP_REFS = re.compile("<record.*?>.*?<controlfield .*?>.*?</controlfield>(.*?)</record>", re.DOTALL)
REGEXP_AUTHLIST = re.compile("<collaborationauthorlist.*?</collaborationauthorlist>", re.DOTALL)
CFG_OAI_AUTHORLIST_POSTMODE_STYLESHEET = "%s/bibconvert/config/%s" % (CFG_ETCDIR, "authorlist2marcxml.xsl")
def get_nb_records_in_file(filename):
"""
Return number of record in FILENAME that is either harvested or converted
file. Useful for statistics.
"""
try:
nb = open(filename, 'r').read().count("</record>")
except IOError:
nb = 0 # file not exists and such
except:
nb = -1
return nb
def task_run_core():
"""Run the harvesting task. The row argument is the oaiharvest task
queue row, containing if, arguments, etc.
Return 1 in case of success and 0 in case of failure.
"""
reposlist = []
datelist = []
dateflag = 0
filepath_prefix = "%s/oaiharvest_%s" % (CFG_TMPDIR, str(task_get_task_param("task_id")))
### go ahead: build up the reposlist
if task_get_option("repository") is not None:
### user requests harvesting from selected repositories
write_message("harvesting from selected repositories")
for reposname in task_get_option("repository"):
row = get_row_from_reposname(reposname)
if row == []:
write_message("source name %s is not valid" % (reposname,))
continue
else:
reposlist.append(get_row_from_reposname(reposname))
else:
### user requests harvesting from all repositories
write_message("harvesting from all repositories in the database")
reposlist = get_all_rows_from_db()
### go ahead: check if user requested from-until harvesting
if task_get_option("dates"):
### for each repos simply perform a from-until date harvesting...
### no need to update anything
dateflag = 1
for element in task_get_option("dates"):
datelist.append(element)
error_happened_p = 0 # 0: no error, 1: "recoverable" error (don't stop queue), 2: error (admin intervention needed)
j = 0
for repos in reposlist:
j += 1
task_sleep_now_if_required()
# Extract values from database row (in exact order):
# | id | baseurl | metadataprefix | arguments | comment
# | bibconvertcfgfile | name | lastrun | frequency
# | postprocess | setspecs | bibfilterprogram
source_id = repos[0][0]
baseurl = str(repos[0][1])
metadataprefix = str(repos[0][2])
bibconvert_cfgfile = str(repos[0][5])
reponame = str(repos[0][6])
lastrun = repos[0][7]
frequency = repos[0][8]
postmode = repos[0][9]
setspecs = str(repos[0][10])
bibfilterprogram = str(repos[0][11])
write_message("running in postmode %s" % (postmode,))
downloaded_material_dict = {}
harvested_files_list = []
# Harvest phase
harvestpath = "%s_%d_%s_" % (filepath_prefix, j, time.strftime("%Y%m%d%H%M%S"))
if dateflag == 1:
task_update_progress("Harvesting %s from %s to %s (%i/%i)" % \
(reponame, \
str(datelist[0]),
str(datelist[1]),
j, \
len(reposlist)))
exit_code, file_list = oai_harvest_get(prefix=metadataprefix,
baseurl=baseurl,
harvestpath=harvestpath,
fro=str(datelist[0]),
until=str(datelist[1]),
setspecs=setspecs)
if exit_code == 1 :
write_message("source %s was harvested from %s to %s" % \
(reponame, str(datelist[0]), str(datelist[1])))
harvested_files_list = file_list
else:
write_message("an error occurred while harvesting from source %s for the dates chosen:\n%s\n" % \
(reponame, file_list))
if error_happened_p < 1:
error_happened_p = 1
continue
elif dateflag != 1 and lastrun is None and frequency != 0:
write_message("source %s was never harvested before - harvesting whole repository" % \
(reponame,))
task_update_progress("Harvesting %s (%i/%i)" % \
(reponame,
j, \
len(reposlist)))
exit_code, file_list = oai_harvest_get(prefix=metadataprefix,
baseurl=baseurl,
harvestpath=harvestpath,
setspecs=setspecs)
if exit_code == 1 :
update_lastrun(source_id)
harvested_files_list = file_list
else :
write_message("an error occurred while harvesting from source %s:\n%s\n" % \
(reponame, file_list))
if error_happened_p < 1:
error_happened_p = 1
continue
elif dateflag != 1 and frequency != 0:
### check that update is actually needed,
### i.e. lastrun+frequency>today
timenow = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())
lastrundate = re.sub(r'\.[0-9]+$', '',
str(lastrun)) # remove trailing .00
timeinsec = int(frequency) * 60 * 60
updatedue = add_timestamp_and_timelag(lastrundate, timeinsec)
proceed = compare_timestamps_with_tolerance(updatedue, timenow)
if proceed == 0 or proceed == -1 : #update needed!
write_message("source %s is going to be updated" % (reponame,))
fromdate = str(lastrun)
fromdate = fromdate.split()[0] # get rid of time of the day for the moment
task_update_progress("Harvesting %s (%i/%i)" % \
(reponame,
j, \
len(reposlist)))
exit_code, file_list = oai_harvest_get(prefix=metadataprefix,
baseurl=baseurl,
harvestpath=harvestpath,
fro=fromdate,
setspecs=setspecs)
if exit_code == 1 :
update_lastrun(source_id)
harvested_files_list = file_list
else :
write_message("an error occurred while harvesting from source %s:\n%s\n" % \
(reponame, file_list))
if error_happened_p < 1:
error_happened_p = 1
continue
else:
write_message("source %s does not need updating" % (reponame,))
continue
elif dateflag != 1 and frequency == 0:
write_message("source %s has frequency set to 'Never' so it will not be updated" % \
(reponame,))
continue
# Harvesting done, now convert/extract/filter/upload as requested
if len(harvested_files_list) < 1:
write_message("No records harvested for %s" % (reponame,))
continue
# Retrieve all OAI IDs and set active list
harvested_identifier_list = collect_identifiers(harvested_files_list)
active_files_list = harvested_files_list
if len(active_files_list) != len(harvested_identifier_list):
# Harvested files and its identifiers are 'out of sync', abort harvest
write_message("Harvested files miss identifiers for %s" % (reponame,))
continue
write_message("post-harvest processes started")
# Convert phase
if 'c' in postmode:
updated_files_list = []
i = 0
write_message("conversion step started")
for active_file in active_files_list:
i += 1
task_sleep_now_if_required()
task_update_progress("Converting material harvested from %s (%i/%i)" % \
(reponame, \
i, \
len(active_files_list)))
updated_file = "%s.converted" % (active_file.split('.')[0],)
updated_files_list.append(updated_file)
(exitcode, err_msg) = call_bibconvert(config=bibconvert_cfgfile,
harvestpath=active_file,
convertpath=updated_file)
if exitcode == 0:
write_message("harvested file %s was successfully converted" % \
(active_file,))
else:
write_message("an error occurred while converting %s:\n%s" % \
(active_file, err_msg))
error_happened_p = 2
continue
# print stats:
for updated_file in updated_files_list:
write_message("File %s contains %i records." % \
(updated_file,
get_nb_records_in_file(updated_file)))
active_files_list = updated_files_list
write_message("conversion step ended")
# plotextract phase
if 'p' in postmode:
write_message("plotextraction step started")
# Download tarball for each harvested/converted record, then run plotextrator.
# Update converted xml files with generated xml or add it for upload
updated_files_list = []
i = 0
for active_file in active_files_list:
identifiers = harvested_identifier_list[i]
i += 1
task_sleep_now_if_required()
task_update_progress("Extracting plots from harvested material from %s (%i/%i)" % \
(reponame, i, len(active_files_list)))
updated_file = "%s.plotextracted" % (active_file.split('.')[0],)
updated_files_list.append(updated_file)
(exitcode, err_msg) = call_plotextractor(active_file,
updated_file,
identifiers,
downloaded_material_dict,
source_id)
if exitcode == 0:
if err_msg != "":
write_message("plots from %s was extracted, but with some errors:\n%s" % \
(active_file, err_msg))
else:
write_message("plots from %s was successfully extracted" % \
(active_file,))
else:
write_message("an error occurred while extracting plots from %s:\n%s" % \
(active_file, err_msg))
error_happened_p = 2
continue
# print stats:
for updated_file in updated_files_list:
write_message("File %s contains %i records." % \
(updated_file,
get_nb_records_in_file(updated_file)))
active_files_list = updated_files_list
write_message("plotextraction step ended")
# refextract phase
if 'r' in postmode:
updated_files_list = []
i = 0
write_message("refextraction step started")
for active_file in active_files_list:
identifiers = harvested_identifier_list[i]
i += 1
task_sleep_now_if_required()
task_update_progress("Extracting references from material harvested from %s (%i/%i)" % \
(reponame, i, len(active_files_list)))
updated_file = "%s.refextracted" % (active_file.split('.')[0],)
updated_files_list.append(updated_file)
(exitcode, err_msg) = call_refextract(active_file,
updated_file,
identifiers,
downloaded_material_dict,
source_id)
if exitcode == 0:
if err_msg != "":
write_message("references from %s was extracted, but with some errors:\n%s" % \
(active_file, err_msg))
else:
write_message("references from %s was successfully extracted" % \
(active_file,))
else:
write_message("an error occurred while extracting references from %s:\n%s" % \
(active_file, err_msg))
error_happened_p = 2
continue
# print stats:
for updated_file in updated_files_list:
write_message("File %s contains %i records." % \
(updated_file,
get_nb_records_in_file(updated_file)))
active_files_list = updated_files_list
write_message("refextraction step ended")
# authorlist phase
if 'a' in postmode:
write_message("authorlist extraction step started")
# Initialize BibCatalog connection as default user, if possible
if bibcatalog_system is not None:
bibcatalog_response = bibcatalog_system.check_system()
else:
bibcatalog_response = "No ticket system configured"
if bibcatalog_response != "":
write_message("BibCatalog error: %s\n" % (bibcatalog_response,))
updated_files_list = []
i = 0
for active_file in active_files_list:
identifiers = harvested_identifier_list[i]
i += 1
task_sleep_now_if_required()
task_update_progress("Extracting any authorlists from material harvested from %s (%i/%i)" % \
(reponame, i, len(active_files_list)))
updated_file = "%s.authextracted" % (active_file.split('.')[0],)
updated_files_list.append(updated_file)
(exitcode, err_msg) = call_authorlist_extract(active_file,
updated_file,
identifiers,
downloaded_material_dict,
source_id)
if exitcode == 0:
if err_msg != "":
write_message("authorlists from %s was extracted, but with some errors:\n%s" % \
(active_file, err_msg))
else:
write_message("any authorlists from %s was successfully extracted" % \
(active_file,))
else:
write_message("an error occurred while extracting authorlists from %s:\n%s" % \
(active_file, err_msg))
error_happened_p = 2
continue
# print stats:
for updated_file in updated_files_list:
write_message("File %s contains %i records." % \
(updated_file,
get_nb_records_in_file(updated_file)))
active_files_list = updated_files_list
write_message("authorlist extraction step ended")
# fulltext phase
if 't' in postmode:
write_message("full-text attachment step started")
# Attaching fulltext
updated_files_list = []
i = 0
for active_file in active_files_list:
identifiers = harvested_identifier_list[i]
i += 1
task_sleep_now_if_required()
task_update_progress("Attaching fulltext to records harvested from %s (%i/%i)" % \
(reponame, i, len(active_files_list)))
updated_file = "%s.fulltext" % (active_file.split('.')[0],)
updated_files_list.append(updated_file)
(exitcode, err_msg) = call_fulltext(active_file,
updated_file,
identifiers,
downloaded_material_dict,
source_id)
if exitcode == 0:
write_message("fulltext from %s was successfully attached" % \
(active_file,))
else:
write_message("an error occurred while attaching fulltext to %s:\n%s" % \
(active_file, err_msg))
error_happened_p = 2
continue
# print stats:
for updated_file in updated_files_list:
write_message("File %s contains %i records." % \
(updated_file,
get_nb_records_in_file(updated_file)))
active_files_list = updated_files_list
write_message("full-text attachment step ended")
# Filter-phase
if 'f' in postmode:
write_message("filtering step started")
# first call bibfilter:
res = 0
i = 0
for active_file in active_files_list:
i += 1
task_sleep_now_if_required()
task_update_progress("Filtering material harvested from %s (%i/%i)" % \
(reponame, \
i, \
len(active_files_list)))
(exitcode, err_msg) = call_bibfilter(bibfilterprogram, active_file)
if exitcode == 0:
write_message("%s was successfully bibfiltered" % \
(active_file,))
else:
write_message("an error occurred while bibfiltering %s:\n%s" % \
(active_file, err_msg))
error_happened_p = 2
continue
# print stats:
for active_file in active_files_list:
write_message("File %s contains %i records." % \
(active_file + ".insert.xml",
get_nb_records_in_file(active_file + ".insert.xml")))
write_message("File %s contains %i records." % \
(active_file + ".correct.xml",
get_nb_records_in_file(active_file + ".correct.xml")))
write_message("File %s contains %i records." % \
(active_file + ".append.xml",
get_nb_records_in_file(active_file + ".append.xml")))
write_message("File %s contains %i records." % \
(active_file + ".holdingpen.xml",
get_nb_records_in_file(active_file + ".holdingpen.xml")))
write_message("filtering step ended")
# Upload files
if "u" in postmode:
write_message("upload step started")
if 'f' in postmode:
upload_modes = [('.insert.xml', '-i'),
('.correct.xml', '-c'),
('.append.xml', '-a'),
('.holdingpen.xml', '-o')]
else:
upload_modes = [('', '-ir')]
i = 0
last_upload_task_id = -1
# Get a random sequence ID that will allow for the tasks to be
# run in order, regardless if parallel task execution is activated
sequence_id = random.randrange(1, 4294967296)
for active_file in active_files_list:
task_sleep_now_if_required()
i += 1
task_update_progress("Uploading records harvested from %s (%i/%i)" % \
(reponame, \
i, \
len(active_files_list)))
for suffix, mode in upload_modes:
upload_filename = active_file + suffix
if get_nb_records_in_file(upload_filename) == 0:
continue
last_upload_task_id = call_bibupload(upload_filename, \
[mode], \
source_id, \
sequence_id)
if not last_upload_task_id:
error_happened_p = 2
write_message("an error occurred while uploading %s from %s" % \
(upload_filename, reponame))
break
else:
write_message("material harvested from source %s was successfully uploaded" % \
(reponame,))
if len(active_files_list) > 0:
write_message("nothing to upload")
write_message("upload step ended")
if CFG_INSPIRE_SITE:
# Launch BibIndex,Webcoll update task to show uploaded content quickly
bibindex_params = ['-w', 'reportnumber,collection', \
'-P', '6', \
'-I', str(sequence_id), \
'--post-process', 'bst_run_bibtask[taskname="webcoll", user="oaiharvest", P="6", c="HEP"]']
task_low_level_submission("bibindex", "oaiharvest", *tuple(bibindex_params))
write_message("post-harvest processes ended")
if error_happened_p:
if CFG_OAI_FAILED_HARVESTING_STOP_QUEUE == 0 or \
not task_get_task_param("sleeptime") or \
error_happened_p > 1:
# Admin want BibSched to stop, or the task is not set to
# run at a later date: we must stop the queue.
write_message("An error occurred. Task is configured to stop")
return False
else:
# An error happened, but it can be recovered at next run
# (task is re-scheduled) and admin set BibSched to
# continue even after failure.
write_message("An error occurred, but task is configured to continue")
if CFG_OAI_FAILED_HARVESTING_EMAILS_ADMIN:
try:
raise InvenioOAIHarvestWarning("OAIHarvest (task #%s) failed at fully harvesting source(s) %s. BibSched has NOT been stopped, and OAIHarvest will try to recover at next run" % (task_get_task_param("task_id"), ", ".join([repo[0][6] for repo in reposlist]),))
except InvenioOAIHarvestWarning, e:
register_exception(stream='warning', alert_admin=True)
return True
else:
return True
def collect_identifiers(harvested_file_list):
"""Collects all OAI PMH identifiers from each file in the list
and adds them to a list of identifiers per file.
@param harvested_file_list: list of filepaths to harvested files
@return list of lists, containing each files' identifier list"""
result = []
for harvested_file in harvested_file_list:
try:
fd_active = open(harvested_file)
except IOError:
write_message("Error opening harvested file '%s'. Skipping.." % (harvested_file,))
continue
data = fd_active.read()
fd_active.close()
result.append(REGEXP_OAI_ID.findall(data))
return result
def remove_duplicates(harvested_file_list):
"""
Go through a list of harvested files and remove any duplicate records.
"""
harvested_identifiers = []
for harvested_file in harvested_file_list:
# Firstly, rename original file to temporary name
try:
os.rename(harvested_file, "%s~" % (harvested_file,))
except OSError:
write_message("Error renaming harvested file '%s'. Skipping.." % (harvested_file,))
continue
# Secondly, open files for writing and reading
try:
updated_harvested_file = open(harvested_file, 'w')
original_harvested_file = open("%s~" % (harvested_file,))
except IOError:
write_message("Error opening harvested file '%s'. Skipping.." % (harvested_file,))
continue
data = original_harvested_file.read()
original_harvested_file.close()
# Get and write OAI-PMH XML header data to updated file
header_index_end = data.find("<ListRecords>") + len("<ListRecords>")
updated_harvested_file.write("%s\n" % (data[:header_index_end],))
# By checking the OAI ID we write all records not written previously (in any file)
harvested_records = REGEXP_RECORD.findall(data)
for record in harvested_records:
oai_identifier = REGEXP_OAI_ID.search(record)
if oai_identifier != None and oai_identifier.group(1) not in harvested_identifiers:
updated_harvested_file.write("<record>%s</record>\n" % (record,))
harvested_identifiers.append(oai_identifier.group(1))
updated_harvested_file.write("</ListRecords>\n</OAI-PMH>\n")
updated_harvested_file.close()
def add_timestamp_and_timelag(timestamp,
timelag):
""" Adds a time lag in seconds to a given date (timestamp).
Returns the resulting date. """
# remove any trailing .00 in timestamp:
timestamp = re.sub(r'\.[0-9]+$', '', timestamp)
# first convert timestamp to Unix epoch seconds:
timestamp_seconds = calendar.timegm(time.strptime(timestamp,
"%Y-%m-%d %H:%M:%S"))
# now add them:
result_seconds = timestamp_seconds + timelag
result = time.strftime("%Y-%m-%d %H:%M:%S", time.gmtime(result_seconds))
return result
def update_lastrun(index):
""" A method that updates the lastrun of a repository
successfully harvested """
try:
today = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())
sql = 'UPDATE oaiHARVEST SET lastrun=%s WHERE id=%s'
run_sql(sql, (today, index))
return 1
except StandardError, e:
return (0, e)
def oai_harvest_get(prefix, baseurl, harvestpath,
fro=None, until=None, setspecs=None,
user=None, password=None, cert_file=None,
key_file=None, method="POST"):
"""
Retrieve OAI records from given repository, with given arguments
"""
try:
(addressing_scheme, network_location, path, dummy1, \
dummy2, dummy3) = urlparse.urlparse(baseurl)
secure = (addressing_scheme == "https")
http_param_dict = {'verb': "ListRecords",
'metadataPrefix': prefix}
if fro:
http_param_dict['from'] = fro
if until:
http_param_dict['until'] = until
sets = None
if setspecs:
sets = [oai_set.strip() for oai_set in setspecs.split(' ')]
harvested_files = oai_harvest_getter.harvest(network_location, path, http_param_dict, method, harvestpath,
sets, secure, user, password, cert_file, key_file)
remove_duplicates(harvested_files)
return (1, harvested_files)
except (StandardError, oai_harvest_getter.InvenioOAIRequestError), e:
return (0, e)
def call_bibconvert(config, harvestpath, convertpath):
""" Call BibConvert to convert file given at 'harvestpath' with
conversion template 'config', and save the result in file at
'convertpath'.
Returns status exit code of the conversion, as well as error
messages, if any
"""
exitcode, dummy, cmd_stderr = \
run_shell_command(cmd="%s/bibconvert -c %s < %s", \
args=(CFG_BINDIR, config, harvestpath), filename_out=convertpath)
return (exitcode, cmd_stderr)
def call_plotextractor(active_file, extracted_file, harvested_identifier_list, \
downloaded_files, source_id):
"""
Function that generates proper MARCXML containing harvested plots for
each record.
@param active_file: path to the currently processed file
@param extracted_file: path to the file where the final results will be saved
@param harvested_identifier_list: list of OAI identifiers for this active_file
@param downloaded_files: dict of identifier -> dict mappings for downloaded material.
@param source_id: the repository identifier
@type source_id: integer
@return: exitcode and any error messages as: (exitcode, err_msg)
"""
all_err_msg = []
exitcode = 0
# Read in active file
recs_fd = open(active_file, 'r')
records = recs_fd.read()
recs_fd.close()
# Find all record
record_xmls = REGEXP_RECORD.findall(records)
updated_xml = ['<?xml version="1.0" encoding="UTF-8"?>']
updated_xml.append('<collection>')
i = 0
for record_xml in record_xmls:
current_exitcode = 0
identifier = harvested_identifier_list[i]
i += 1
if identifier not in downloaded_files:
downloaded_files[identifier] = {}
updated_xml.append("<record>")
updated_xml.append(record_xml)
if not oaiharvest_templates.tmpl_should_process_record_with_mode(record_xml, 'p', source_id):
# We skip this record
updated_xml.append("</record>")
continue
if "tarball" not in downloaded_files[identifier]:
current_exitcode, err_msg, tarball, dummy = \
plotextractor_harvest(identifier, active_file, selection=["tarball"])
if current_exitcode != 0:
exitcode = current_exitcode
all_err_msg.append(err_msg)
else:
downloaded_files[identifier]["tarball"] = tarball
if current_exitcode == 0:
plotextracted_xml_path = process_single(downloaded_files[identifier]["tarball"])
if plotextracted_xml_path != None:
# We store the path to the directory the tarball contents live
downloaded_files[identifier]["tarball-extracted"] = os.path.split(plotextracted_xml_path)[0]
# Read and grab MARCXML from plotextractor run
plotsxml_fd = open(plotextracted_xml_path, 'r')
plotextracted_xml = plotsxml_fd.read()
plotsxml_fd.close()
re_list = REGEXP_RECORD.findall(plotextracted_xml)
if re_list != []:
updated_xml.append(re_list[0])
updated_xml.append("</record>")
updated_xml.append('</collection>')
# Write to file
file_fd = open(extracted_file, 'w')
file_fd.write("\n".join(updated_xml))
file_fd.close()
if len(all_err_msg) > 0:
return exitcode, "\n".join(all_err_msg)
return exitcode, ""
def call_refextract(active_file, extracted_file, harvested_identifier_list,
downloaded_files, source_id):
"""
Function that calls refextractor to extract references and attach them to
harvested records. It will download the fulltext-pdf for each identifier
if necessary.
@param active_file: path to the currently processed file
@param extracted_file: path to the file where the final results will be saved
@param harvested_identifier_list: list of OAI identifiers for this active_file
@param downloaded_files: dict of identifier -> dict mappings for downloaded material.
@param source_id: the repository identifier
@type source_id: integer
@return: exitcode and any error messages as: (exitcode, all_err_msg)
"""
all_err_msg = []
exitcode = 0
flag = ""
if CFG_INSPIRE_SITE == 1:
flag = "--inspire"
# Read in active file
recs_fd = open(active_file, 'r')
records = recs_fd.read()
recs_fd.close()
# Find all record
record_xmls = REGEXP_RECORD.findall(records)
updated_xml = ['<?xml version="1.0" encoding="UTF-8"?>']
updated_xml.append('<collection>')
i = 0
for record_xml in record_xmls:
current_exitcode = 0
identifier = harvested_identifier_list[i]
i += 1
if identifier not in downloaded_files:
downloaded_files[identifier] = {}
updated_xml.append("<record>")
updated_xml.append(record_xml)
if not oaiharvest_templates.tmpl_should_process_record_with_mode(record_xml, 'p', source_id):
# We skip this record
updated_xml.append("</record>")
continue
if "pdf" not in downloaded_files[identifier]:
current_exitcode, err_msg, dummy, pdf = \
plotextractor_harvest(identifier, active_file, selection=["pdf"])
if current_exitcode != 0:
exitcode = current_exitcode
all_err_msg.append(err_msg)
else:
downloaded_files[identifier]["pdf"] = pdf
if current_exitcode == 0:
current_exitcode, cmd_stdout, err_msg = run_shell_command(cmd="%s/refextract %s -f '%s'" % \
(CFG_BINDIR, flag, downloaded_files[identifier]["pdf"]))
if err_msg != "" or current_exitcode != 0:
exitcode = current_exitcode
all_err_msg.append("Error extracting references from id: %s\nError:%s" % \
(identifier, err_msg))
else:
references_xml = REGEXP_REFS.search(cmd_stdout)
if references_xml:
updated_xml.append(references_xml.group(1))
updated_xml.append("</record>")
updated_xml.append('</collection>')
# Write to file
file_fd = open(extracted_file, 'w')
file_fd.write("\n".join(updated_xml))
file_fd.close()
if len(all_err_msg) > 0:
return exitcode, "\n".join(all_err_msg)
return exitcode, ""
def call_authorlist_extract(active_file, extracted_file, harvested_identifier_list,
downloaded_files, source_id):
"""
Function that will look in harvested tarball for any authorlists. If found
it will extract and convert the authors using a XSLT stylesheet.
@param active_file: path to the currently processed file
@type active_file: string
@param extracted_file: path to the file where the final results will be saved
@type extracted_file: string
@param harvested_identifier_list: list of OAI identifiers for this active_file
@type harvested_identifier_list: list
@param downloaded_files: dict of identifier -> dict mappings for downloaded material.
@type downloaded_files: dict
@param source_id: the repository identifier
@type source_id: integer
@return: exitcode and any error messages as: (exitcode, all_err_msg)
@rtype: tuple
"""
all_err_msg = []
exitcode = 0
# Read in active file
recs_fd = open(active_file, 'r')
records = recs_fd.read()
recs_fd.close()
# Find all records
record_xmls = REGEXP_RECORD.findall(records)
updated_xml = ['<?xml version="1.0" encoding="UTF-8"?>']
updated_xml.append('<collection>')
i = 0
for record_xml in record_xmls:
current_exitcode = 0
identifier = harvested_identifier_list[i]
i += 1
if not oaiharvest_templates.tmpl_should_process_record_with_mode(record_xml, 'p', source_id):
# We skip this record
updated_xml.append("<record>")
updated_xml.append(record_xml)
updated_xml.append("</record>")
continue
# Grab BibRec instance of current record for later amending
existing_record, status_code, dummy1 = create_record("<record>%s</record>" % (record_xml,))
if status_code == 0:
all_err_msg.append("Error parsing record, skipping authorlist extraction of: %s\n" % \
(identifier,))
updated_xml.append("<record>%s</record>" % (record_xml,))
continue
if identifier not in downloaded_files:
downloaded_files[identifier] = {}
if "tarball" not in downloaded_files[identifier]:
current_exitcode, err_msg, tarball, dummy = \
plotextractor_harvest(identifier, active_file, selection=["tarball"])
if current_exitcode != 0:
exitcode = current_exitcode
all_err_msg.append(err_msg)
else:
downloaded_files[identifier]["tarball"] = tarball
if current_exitcode == 0:
current_exitcode, err_msg, authorlist_xml_path = authorlist_extract(downloaded_files[identifier]["tarball"], \
identifier, downloaded_files)
if current_exitcode != 0:
exitcode = current_exitcode
all_err_msg.append("Error extracting authors from id: %s\nError:%s" % \
(identifier, err_msg))
elif authorlist_xml_path is not None:
## Authorlist found
# Read and create BibRec
xml_fd = open(authorlist_xml_path, 'r')
author_xml = xml_fd.read()
xml_fd.close()
authorlist_record = create_records(author_xml)
if len(authorlist_record) == 1:
if authorlist_record[0][0] == None:
all_err_msg.append("Error parsing authorlist record for id: %s" % \
(identifier,))
continue
authorlist_record = authorlist_record[0][0]
# Convert any LaTeX symbols in authornames
translate_fieldvalues_from_latex(authorlist_record, '100', code='a')
translate_fieldvalues_from_latex(authorlist_record, '700', code='a')
# Look for any UNDEFINED fields in authorlist
key = "UNDEFINED"
matching_fields = record_find_matching_fields(key, authorlist_record, tag='100') \
+ record_find_matching_fields(key, authorlist_record, tag='700')
if len(matching_fields) > 0 and bibcatalog_system != None:
# UNDEFINED found. Create ticket in author queue
ticketid = create_authorlist_ticket(matching_fields, identifier)
if ticketid:
write_message("authorlist RT ticket %d submitted for %s" % (ticketid, identifier))
else:
all_err_msg.append("Error while submitting RT ticket for %s" % (identifier,))
# Replace 100,700 fields of original record with extracted fields
record_delete_fields(existing_record, '100')
record_delete_fields(existing_record, '700')
first_author = record_get_field_instances(authorlist_record, '100')
additional_authors = record_get_field_instances(authorlist_record, '700')
record_add_fields(existing_record, '100', first_author)
record_add_fields(existing_record, '700', additional_authors)
updated_xml.append(record_xml_output(existing_record))
updated_xml.append('</collection>')
# Write to file
file_fd = open(extracted_file, 'w')
file_fd.write("\n".join(updated_xml))
file_fd.close()
if len(all_err_msg) > 0:
return exitcode, all_err_msg
return exitcode, ""
def call_fulltext(active_file, extracted_file, harvested_identifier_list,
downloaded_files, source_id):
"""
Function that calls attach FFT tag for full-text pdf to harvested records.
It will download the fulltext-pdf for each identifier if necessary.
@param active_file: path to the currently processed file
@param extracted_file: path to the file where the final results will be saved
@param harvested_identifier_list: list of OAI identifiers for this active_file
@param downloaded_files: dict of identifier -> dict mappings for downloaded material.
@return: exitcode and any error messages as: (exitcode, err_msg)
"""
all_err_msg = []
exitcode = 0
# Read in active file
recs_fd = open(active_file, 'r')
records = recs_fd.read()
recs_fd.close()
# Set doctype FIXME: Remove when parameters are introduced to post-process steps
if CFG_INSPIRE_SITE == 1:
doctype = "arXiv"
elif CFG_CERN_SITE == 1:
doctype = ""
else:
doctype = ""
# Find all records
record_xmls = REGEXP_RECORD.findall(records)
updated_xml = ['<?xml version="1.0" encoding="UTF-8"?>']
updated_xml.append('<collection>')
i = 0
for record_xml in record_xmls:
current_exitcode = 0
identifier = harvested_identifier_list[i]
i += 1
if identifier not in downloaded_files:
downloaded_files[identifier] = {}
updated_xml.append("<record>")
updated_xml.append(record_xml)
if not oaiharvest_templates.tmpl_should_process_record_with_mode(record_xml, 'p', source_id):
# We skip this record
updated_xml.append("</record>")
continue
if "pdf" not in downloaded_files[identifier]:
current_exitcode, err_msg, dummy, pdf = \
plotextractor_harvest(identifier, active_file, selection=["pdf"])
if current_exitcode != 0:
exitcode = current_exitcode
all_err_msg.append(err_msg)
else:
downloaded_files[identifier]["pdf"] = pdf
if current_exitcode == 0:
fulltext_xml = """ <datafield tag="FFT" ind1=" " ind2=" ">
<subfield code="a">%(url)s</subfield>
<subfield code="t">%(doctype)s</subfield>
</datafield>""" % {'url': downloaded_files[identifier]["pdf"],
'doctype': doctype}
updated_xml.append(fulltext_xml)
updated_xml.append("</record>")
updated_xml.append('</collection>')
# Write to file
file_fd = open(extracted_file, 'w')
file_fd.write("\n".join(updated_xml))
file_fd.close()
if len(all_err_msg) > 0:
return exitcode, "\n".join(all_err_msg)
return exitcode, ""
def authorlist_extract(tarball_path, identifier, downloaded_files):
"""
Try to extract the tarball given, if not already extracted, and look for
any XML files that could be authorlists. If any is found, use a XSLT stylesheet
to transform the authorlist into MARCXML author-fields, and return the full path
of resulting conversion.
@param tarball_path: path to the tarball to check
@type tarball_path: string
@param identifier: OAI Identifier to the current record
@type identifier: string
@param downloaded_files: dict of identifier -> dict mappings for downloaded material.
@type downloaded_files: dict
@return: path to converted authorlist together with exitcode and any error messages as:
(exitcode, err_msg, authorlist_path)
@rtype: tuple
"""
all_err_msg = []
exitcode = 0
if "tarball-extracted" not in downloaded_files[identifier]:
# tarball has not been extracted
tar_dir, dummy = get_defaults(tarball=tarball_path, sdir=CFG_TMPDIR, refno_url="")
try:
dummy = untar(tarball_path, tar_dir)
except Timeout:
all_err_msg.append("Timeout during tarball extraction of %s" % (tarball_path,))
exitcode = 1
return exitcode, "\n".join(all_err_msg), None
downloaded_files[identifier]["tarball-extracted"] = tar_dir
# tarball is now surely extracted, so we try to fetch all XML in the folder
xml_files_list = find_matching_files(downloaded_files[identifier]["tarball-extracted"], \
["xml"])
# Try to convert authorlist candidates, returning on first success
for xml_file in xml_files_list:
xml_file_fd = open(xml_file, "r")
xml_content = xml_file_fd.read()
xml_file_fd.close()
match = REGEXP_AUTHLIST.findall(xml_content)
if match != []:
tempfile_fd, temp_authorlist_path = tempfile.mkstemp(suffix=".xml", prefix="authorlist_temp", dir=CFG_TMPDIR)
os.write(tempfile_fd, match[0])
os.close(tempfile_fd)
# Generate file to store conversion results
newfile_fd, authorlist_resultxml_path = tempfile.mkstemp(suffix=".xml", prefix="authorlist_MARCXML", \
dir=downloaded_files[identifier]["tarball-extracted"])
os.close(newfile_fd)
exitcode, cmd_stderr = call_bibconvert(config=CFG_OAI_AUTHORLIST_POSTMODE_STYLESHEET, \
harvestpath=temp_authorlist_path, \
convertpath=authorlist_resultxml_path)
if cmd_stderr == "" and exitcode == 0:
# Success!
return 0, "", authorlist_resultxml_path
# No valid authorlist found
return 0, "", None
def plotextractor_harvest(identifier, active_file, selection=["pdf", "tarball"]):
"""
Function that calls plotextractor library to download selected material,
i.e. tarball or pdf, for passed identifier. Returns paths to respective files.
@param identifier: OAI identifier of the record to harvest
@param active_file: path to the currently processed file
@param selection: list of materials to harvest
@return: exitcode, errormessages and paths to harvested tarball and fulltexts
(exitcode, err_msg, tarball, pdf)
"""
all_err_msg = []
exitcode = 0
active_dir, active_name = os.path.split(active_file)
# turn oaiharvest_23_1_20110214161632_converted -> oaiharvest_23_1_material
# to let harvested material in same folder structure
active_name = "_".join(active_name.split('_')[:-2]) + "_material"
extract_path = make_single_directory(active_dir, active_name)
tarball, pdf = harvest_single(identifier, extract_path, selection)
time.sleep(CFG_PLOTEXTRACTOR_DOWNLOAD_TIMEOUT)
if tarball == None and "tarball" in selection:
all_err_msg.append("Error harvesting tarball from id: %s %s" % \
(identifier, extract_path))
exitcode = 1
if pdf == None and "pdf" in selection:
all_err_msg.append("Error harvesting full-text from id: %s %s" % \
(identifier, extract_path))
exitcode = 1
return exitcode, "\n".join(all_err_msg), tarball, pdf
def find_matching_files(basedir, filetypes):
"""
This functions tries to find all files matching given filetypes by looking at
all the files and filenames in the given directory, including subdirectories.
@param basedir: full path to base directory to search in
@type basedir: string
@param filetypes: list of filetypes, extensions
@type filetypes: list
@return: exitcode and any error messages as: (exitcode, err_msg)
@rtype: tuple
"""
files_list = []
for dirpath, dummy0, filenames in os.walk(basedir):
for filename in filenames:
full_path = os.path.join(dirpath, filename)
dummy1, cmd_out, dummy2 = run_shell_command('file %s', (full_path,))
for filetype in filetypes:
if cmd_out.lower().find(filetype) > -1:
files_list.append(full_path)
elif filename.split('.')[-1].lower() == filetype:
files_list.append(full_path)
return files_list
def translate_fieldvalues_from_latex(record, tag, code='', encoding='utf-8'):
"""
Given a record and field tag, this function will modify the record by
translating the subfield values of found fields from LaTeX to chosen
encoding for all the subfields with given code (or all if no code is given).
@param record: record to modify, in BibRec style structure
@type record: dict
@param tag: tag of fields to modify
@type tag: string
@param code: restrict the translation to a given subfield code
@type code: string
@param encoding: scharacter encoding for the new value. Defaults to UTF-8.
@type encoding: string
"""
field_list = record_get_field_instances(record, tag)
for field in field_list:
subfields = field[0]
subfield_index = 0
for subfield_code, subfield_value in subfields:
if code == '' or subfield_code == code:
newvalue = translate_latex2unicode(subfield_value).encode(encoding)
record_modify_subfield(record, tag, subfield_code, newvalue, \
subfield_index, field_position_global=field[4])
subfield_index += 1
def create_authorlist_ticket(matching_fields, identifier):
"""
This function will submit a ticket generated by UNDEFINED affiliations
in extracted authors from collaboration authorlists.
@param matching_fields: list of (tag, field_instances) for UNDEFINED nodes
@type matching_fields: list
@param identifier: OAI identifier of record
@type identifier: string
@return: return the ID of the created ticket, or None on failure
@rtype: int or None
"""
if bibcatalog_system is None:
return None
subject = "[OAI Harvest] UNDEFINED affiliations for record %s" % (identifier,)
text = """
Harvested record with identifier %(ident)s has had its authorlist extracted and contains some UNDEFINED affiliations.
To see the record, go here: %(baseurl)s/search?p=%(ident)s
If the record is not there yet, try again later. It may take some time for it to load into the system.
List of unidentified fields:
%(fields)s
""" % {
'ident' : identifier,
'baseurl' : CFG_SITE_URL,
'fields' : "\n".join([field_xml_output(field, tag) for tag, field_instances in matching_fields \
for field in field_instances])
}
queue = "Authors"
ticketid = bibcatalog_system.ticket_submit(subject=subject, queue=queue)
if bibcatalog_system.ticket_comment(None, ticketid, text) == None:
write_message("Error: commenting on ticket %s failed." % (str(ticketid),))
return ticketid
def create_oaiharvest_log(task_id, oai_src_id, marcxmlfile):
"""
Function which creates the harvesting logs
@param task_id bibupload task id
"""
file_fd = open(marcxmlfile, "r")
xml_content = file_fd.read(-1)
file_fd.close()
create_oaiharvest_log_str(task_id, oai_src_id, xml_content)
def create_oaiharvest_log_str(task_id, oai_src_id, xml_content):
"""
Function which creates the harvesting logs
@param task_id bibupload task id
"""
try:
records = create_records(xml_content)
for record in records:
oai_id = record_extract_oai_id(record[0])
query = "INSERT INTO oaiHARVESTLOG (id_oaiHARVEST, oai_id, date_harvested, bibupload_task_id) VALUES (%s, %s, NOW(), %s)"
run_sql(query, (str(oai_src_id), str(oai_id), str(task_id)))
except Exception, msg:
print "Logging exception : %s " % (str(msg),)
def call_bibupload(marcxmlfile, mode=None, oai_src_id= -1, sequence_id=None):
"""
Creates a bibupload task for the task scheduler in given mode
on given file. Returns the generated task id and logs the event
in oaiHARVESTLOGS, also adding any given oai source identifier.
@param marcxmlfile: base-marcxmlfilename to upload
@param mode: mode to upload in
@param oai_src_id: id of current source config
@param sequence_id: sequence-number, if relevant
@return: task_id if successful, otherwise None.
"""
if mode is None:
mode = ["-r", "-i"]
if os.path.exists(marcxmlfile):
try:
args = mode
# Add job with priority 6 (above normal bibedit tasks) and file to upload to arguments
#FIXME: allow per-harvest arguments
args.extend(["-P", "6", marcxmlfile])
if sequence_id:
args.extend(['-I', str(sequence_id)])
task_id = task_low_level_submission("bibupload", "oaiharvest", *tuple(args))
create_oaiharvest_log(task_id, oai_src_id, marcxmlfile)
except Exception, msg:
write_message("An exception during submitting oaiharvest task occured : %s " % (str(msg)))
return None
return task_id
else:
write_message("marcxmlfile %s does not exist" % (marcxmlfile,))
return None
def call_bibfilter(bibfilterprogram, marcxmlfile):
"""
Call bibfilter program BIBFILTERPROGRAM on MARCXMLFILE, which is usually
run before uploading records.
The bibfilter should produce up to four files called MARCXMLFILE.insert.xml,
MARCXMLFILE.correct.xml, MARCXMLFILE.append.xml and MARCXMLFILE.holdingpen.xml.
The first file contains parts of MARCXML to be uploaded in insert mode,
the second file is uploaded in correct mode, third in append mode and the last file
contains MARCXML to be uploaded into the holding pen.
@param bibfilterprogram: path to bibfilter script to run
@param marcxmlfile: base-marcxmlfilename
@return: exitcode and any error messages as: (exitcode, err_msg)
"""
all_err_msg = []
exitcode = 0
if bibfilterprogram:
if not os.path.isfile(bibfilterprogram):
all_err_msg.append("bibfilterprogram %s is not a file" %
(bibfilterprogram,))
exitcode = 1
elif not os.path.isfile(marcxmlfile):
all_err_msg.append("marcxmlfile %s is not a file" % (marcxmlfile,))
exitcode = 1
else:
exitcode, dummy, cmd_stderr = run_shell_command(cmd="%s '%s'", \
args=(bibfilterprogram, \
marcxmlfile))
if exitcode != 0 or cmd_stderr != "":
all_err_msg.append("Error while running filtering script on %s\nError:%s" % \
(marcxmlfile, cmd_stderr))
else:
try:
all_err_msg.append("no bibfilterprogram defined, copying %s only" %
(marcxmlfile,))
shutil.copy(marcxmlfile, marcxmlfile + ".insert.xml")
except:
all_err_msg.append("cannot copy %s into %s.insert.xml" % (marcxmlfile, marcxmlfile))
exitcode = 1
return exitcode, "\n".join(all_err_msg)
def get_row_from_reposname(reposname):
""" Returns all information about a row (OAI source)
from the source name """
try:
sql = """SELECT id, baseurl, metadataprefix, arguments,
comment, bibconvertcfgfile, name, lastrun,
frequency, postprocess, setspecs,
bibfilterprogram
FROM oaiHARVEST WHERE name=%s"""
res = run_sql(sql, (reposname,))
reposdata = []
for element in res:
reposdata.append(element)
return reposdata
except StandardError, e:
return (0, e)
def get_all_rows_from_db():
""" This method retrieves the full database of repositories and returns
a list containing (in exact order):
| id | baseurl | metadataprefix | arguments | comment
| bibconvertcfgfile | name | lastrun | frequency
| postprocess | setspecs | bibfilterprogram
"""
try:
reposlist = []
sql = """SELECT id FROM oaiHARVEST"""
idlist = run_sql(sql)
for index in idlist:
sql = """SELECT id, baseurl, metadataprefix, arguments,
comment, bibconvertcfgfile, name, lastrun,
frequency, postprocess, setspecs,
bibfilterprogram
FROM oaiHARVEST WHERE id=%s""" % index
reposelements = run_sql(sql)
repos = []
for element in reposelements:
repos.append(element)
reposlist.append(repos)
return reposlist
except StandardError, e:
return (0, e)
def compare_timestamps_with_tolerance(timestamp1,
timestamp2,
tolerance=0):
"""Compare two timestamps TIMESTAMP1 and TIMESTAMP2, of the form
'2005-03-31 17:37:26'. Optionally receives a TOLERANCE argument
(in seconds). Return -1 if TIMESTAMP1 is less than TIMESTAMP2
minus TOLERANCE, 0 if they are equal within TOLERANCE limit,
and 1 if TIMESTAMP1 is greater than TIMESTAMP2 plus TOLERANCE.
"""
# remove any trailing .00 in timestamps:
timestamp1 = re.sub(r'\.[0-9]+$', '', timestamp1)
timestamp2 = re.sub(r'\.[0-9]+$', '', timestamp2)
# first convert timestamps to Unix epoch seconds:
timestamp1_seconds = calendar.timegm(time.strptime(timestamp1,
"%Y-%m-%d %H:%M:%S"))
timestamp2_seconds = calendar.timegm(time.strptime(timestamp2,
"%Y-%m-%d %H:%M:%S"))
# now compare them:
if timestamp1_seconds < timestamp2_seconds - tolerance:
return -1
elif timestamp1_seconds > timestamp2_seconds + tolerance:
return 1
else:
return 0
def get_dates(dates):
""" A method to validate and process the dates input by the user
at the command line """
twodates = []
if dates:
datestring = dates.split(":")
if len(datestring) == 2:
for date in datestring:
### perform some checks on the date format
datechunks = date.split("-")
if len(datechunks) == 3:
try:
if int(datechunks[0]) and int(datechunks[1]) and \
int(datechunks[2]):
twodates.append(date)
except StandardError:
write_message("Dates have invalid format, not "
"'yyyy-mm-dd:yyyy-mm-dd'")
twodates = None
return twodates
else:
write_message("Dates have invalid format, not "
"'yyyy-mm-dd:yyyy-mm-dd'")
twodates = None
return twodates
## final check.. date1 must me smaller than date2
date1 = str(twodates[0]) + " 01:00:00"
date2 = str(twodates[1]) + " 01:00:00"
if compare_timestamps_with_tolerance(date1, date2) != -1:
write_message("First date must be before second date.")
twodates = None
return twodates
else:
write_message("Dates have invalid format, not "
"'yyyy-mm-dd:yyyy-mm-dd'")
twodates = None
else:
twodates = None
return twodates
def get_repository_names(repositories):
""" A method to validate and process the repository names input by the
user at the command line """
repository_names = []
if repositories:
names = repositories.split(",")
for name in names:
### take into account both single word names and multiple word
### names (which get wrapped around "" or '')
name = name.strip()
if name.startswith("'"):
name = name.strip("'")
elif name.startswith('"'):
name = name.strip('"')
repository_names.append(name)
else:
repository_names = None
return repository_names
def usage(exitcode=0, msg=""):
"Print out info. Only used when run in 'manual' harvesting mode"
sys.stderr.write("*Manual single-shot harvesting mode*\n")
if msg:
sys.stderr.write(msg + "\n")
sys.exit(exitcode)
def main():
"""Starts the tool.
If the command line arguments are those of the 'manual' mode, then
starts a manual one-time harvesting. Else trigger a BibSched task
for automated harvesting based on the OAIHarvest admin settings.
"""
# Let's try to parse the arguments as used in manual harvesting:
try:
opts, args = getopt.getopt(sys.argv[1:], "o:v:m:p:i:s:f:u:r:x:c:k:w:l:",
["output=",
"verb=",
"method=",
"metadataPrefix=",
"identifier=",
"set=",
"from=",
"until=",
"resumptionToken=",
"certificate=",
"key=",
"user=",
"password="]
)
# So everything went smoothly: start harvesting in manual mode
if len([opt for opt, opt_value in opts if opt in ['-v', '--verb']]) > 0:
# verb parameter is given
http_param_dict = {}
method = "POST"
output = ""
user = None
password = None
cert_file = None
key_file = None
sets = []
# get options and arguments
for opt, opt_value in opts:
if opt in ["-v", "--verb"]:
http_param_dict['verb'] = opt_value
elif opt in ["-m", '--method']:
if opt_value == "GET" or opt_value == "POST":
method = opt_value
elif opt in ["-p", "--metadataPrefix"]:
http_param_dict['metadataPrefix'] = opt_value
elif opt in ["-i", "--identifier"]:
http_param_dict['identifier'] = opt_value
elif opt in ["-s", "--set"]:
sets = opt_value.split()
elif opt in ["-f", "--from"]:
http_param_dict['from'] = opt_value
elif opt in ["-u", "--until"]:
http_param_dict['until'] = opt_value
elif opt in ["-r", "--resumptionToken"]:
http_param_dict['resumptionToken'] = opt_value
elif opt in ["-o", "--output"]:
output = opt_value
elif opt in ["-c", "--certificate"]:
cert_file = opt_value
elif opt in ["-k", "--key"]:
key_file = opt_value
elif opt in ["-l", "--user"]:
user = opt_value
elif opt in ["-w", "--password"]:
password = opt_value
elif opt in ["-V", "--version"]:
print __revision__
sys.exit(0)
else:
usage(1, "Option %s is not allowed" % opt)
if len(args) > 0:
base_url = args[-1]
if not base_url.lower().startswith('http'):
base_url = 'http://' + base_url
(addressing_scheme, network_location, path, dummy1, \
dummy2, dummy3) = urlparse.urlparse(base_url)
secure = (addressing_scheme == "https")
if (cert_file and not key_file) or \
(key_file and not cert_file):
# Both are needed if one specified
usage(1, "You must specify both certificate and key files")
if password and not user:
# User must be specified when password is given
usage(1, "You must specify a username")
elif user and not password:
if not secure:
sys.stderr.write("*WARNING* Your password will be sent in clear!\n")
try:
password = getpass.getpass()
except KeyboardInterrupt, error:
sys.stderr.write("\n%s\n" % (error,))
sys.exit(0)
oai_harvest_getter.harvest(network_location, path,
http_param_dict, method,
output, sets, secure, user,
password, cert_file,
key_file)
sys.stderr.write("Harvesting completed at: %s\n\n" %
time.strftime("%Y-%m-%d %H:%M:%S --> ", time.localtime()))
return
else:
usage(1, "You must specify the URL to harvest")
else:
# verb is not given. We will continue with periodic
# harvesting. But first check if URL parameter is given:
# if it is, then warn directly now
if len(args) > 1 or \
(len(args) == 1 and not args[0].isdigit()):
usage(1, "You must specify the --verb parameter")
except getopt.error, e:
# So could it be that we are using different arguments? Try to
# start the BibSched task (automated harvesting) and see if it
# validates
pass
# BibSched mode - periodical harvesting
# Note that the 'help' is common to both manual and automated
# mode.
task_set_option("repository", None)
task_set_option("dates", None)
task_init(authorization_action='runoaiharvest',
authorization_msg="oaiharvest Task Submission",
description="""
Harvest records from OAI sources.
Manual vs automatic harvesting:
- Manual harvesting retrieves records from the specified URL,
with the specified OAI arguments. Harvested records are displayed
on the standard output or saved to a file, but are not integrated
into the repository. This mode is useful to 'play' with OAI
repositories or to build special harvesting scripts.
- Automatic harvesting relies on the settings defined in the OAI
Harvest admin interface to periodically retrieve the repositories
and sets to harvest. It also take care of harvesting only new or
modified records. Records harvested using this mode are converted
and integrated into the repository, according to the settings
defined in the OAI Harvest admin interface.
Examples:
Manual (single-shot) harvesting mode:
Save to /tmp/z.xml records from CDS added/modified between 2004-04-01
and 2004-04-02, in MARCXML:
$ oaiharvest -vListRecords -f2004-04-01 -u2004-04-02 -pmarcxml -o/tmp/z.xml http://cds.cern.ch/oai2d
Automatic (periodical) harvesting mode:
Schedule daily harvesting of all repositories defined in OAIHarvest admin:
$ oaiharvest -s 24h
Schedule daily harvesting of repository 'arxiv', defined in OAIHarvest admin:
$ oaiharvest -r arxiv -s 24h
Harvest in 10 minutes from 'pubmed' repository records added/modified
between 2005-05-05 and 2005-05-10:
$ oaiharvest -r pubmed -d 2005-05-05:2005-05-10 -t 10m
""",
help_specific_usage='Manual single-shot harvesting mode:\n'
' -o, --output specify output file\n'
' -v, --verb OAI verb to be executed\n'
' -m, --method http method (default POST)\n'
' -p, --metadataPrefix metadata format\n'
' -i, --identifier OAI identifier\n'
' -s, --set OAI set(s). Whitespace-separated list\n'
' -r, --resuptionToken Resume previous harvest\n'
' -f, --from from date (datestamp)\n'
' -u, --until until date (datestamp)\n'
' -c, --certificate path to public certificate (in case of certificate-based harvesting)\n'
' -k, --key path to private key (in case of certificate-based harvesting)\n'
' -l, --user username (in case of password-protected harvesting)\n'
' -w, --password password (in case of password-protected harvesting)\n'
'Automatic periodical harvesting mode:\n'
' -r, --repository="repo A"[,"repo B"] \t which repositories to harvest (default=all)\n'
' -d, --dates=yyyy-mm-dd:yyyy-mm-dd \t reharvest given dates only\n',
version=__revision__,
specific_params=("r:d:", ["repository=", "dates=", ]),
task_submit_elaborate_specific_parameter_fnc=
task_submit_elaborate_specific_parameter,
task_run_fnc=task_run_core)
def task_submit_elaborate_specific_parameter(key, value, opts, args):
"""Elaborate specific cli parameters for oaiharvest."""
if key in ("-r", "--repository"):
task_set_option('repository', get_repository_names(value))
elif key in ("-d", "--dates"):
task_set_option('dates', get_dates(value))
if value is not None and task_get_option("dates") is None:
raise StandardError, "Date format not valid."
else:
return False
return True
### okay, here we go:
if __name__ == '__main__':
main()
| gpl-2.0 |
fdvarela/odoo8 | addons/report_webkit/report_helper.py | 381 | 3507 | # -*- coding: utf-8 -*-
##############################################################################
#
# Copyright (c) 2010 Camptocamp SA (http://www.camptocamp.com)
# All Right Reserved
#
# Author : Nicolas Bessi (Camptocamp)
#
# WARNING: This program as such is intended to be used by professional
# programmers who take the whole responsability of assessing all potential
# consequences resulting from its eventual inadequacies and bugs
# End users who are looking for a ready-to-use solution with commercial
# garantees and support are strongly adviced to contract a Free Software
# Service Company
#
# This program is Free Software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
#
##############################################################################
import openerp
class WebKitHelper(object):
"""Set of usefull report helper"""
def __init__(self, cursor, uid, report_id, context):
"constructor"
self.cursor = cursor
self.uid = uid
self.pool = openerp.registry(self.cursor.dbname)
self.report_id = report_id
self.context = context
def embed_image(self, type, img, width=0, height=0) :
"Transform a DB image into an embedded HTML image"
if width :
width = 'width="%spx"'%(width)
else :
width = ' '
if height :
height = 'height="%spx"'%(height)
else :
height = ' '
toreturn = '<img %s %s src="data:image/%s;base64,%s" />'%(
width,
height,
type,
str(img))
return toreturn
def get_logo_by_name(self, name):
"""Return logo by name"""
header_obj = self.pool.get('ir.header_img')
header_img_id = header_obj.search(
self.cursor,
self.uid,
[('name','=',name)]
)
if not header_img_id :
return u''
if isinstance(header_img_id, list):
header_img_id = header_img_id[0]
head = header_obj.browse(self.cursor, self.uid, header_img_id)
return (head.img, head.type)
def embed_logo_by_name(self, name, width=0, height=0):
"""Return HTML embedded logo by name"""
img, type = self.get_logo_by_name(name)
return self.embed_image(type, img, width, height)
def embed_company_logo(self, width=0, height=0):
cr, uid, context = self.cursor, self.uid, self.context
my_user = self.pool.get('res.users').browse(cr, uid, uid, context=context)
logo = my_user.company_id.logo_web
return self.embed_image("png", logo, width, height)
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
Lujeni/ansible | test/units/modules/network/f5/test_bigip_snmp_trap.py | 38 | 6302 | # -*- coding: utf-8 -*-
#
# Copyright (c) 2017 F5 Networks Inc.
# GNU General Public License v3.0 (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
import json
import pytest
import sys
if sys.version_info < (2, 7):
pytestmark = pytest.mark.skip("F5 Ansible modules require Python >= 2.7")
from ansible.module_utils.basic import AnsibleModule
try:
from library.modules.bigip_snmp_trap import V2Parameters
from library.modules.bigip_snmp_trap import V1Parameters
from library.modules.bigip_snmp_trap import ModuleManager
from library.modules.bigip_snmp_trap import V2Manager
from library.modules.bigip_snmp_trap import V1Manager
from library.modules.bigip_snmp_trap import ArgumentSpec
# In Ansible 2.8, Ansible changed import paths.
from test.units.compat import unittest
from test.units.compat.mock import Mock
from test.units.compat.mock import patch
from test.units.compat.mock import DEFAULT
from test.units.modules.utils import set_module_args
except ImportError:
from ansible.modules.network.f5.bigip_snmp_trap import V2Parameters
from ansible.modules.network.f5.bigip_snmp_trap import V1Parameters
from ansible.modules.network.f5.bigip_snmp_trap import ModuleManager
from ansible.modules.network.f5.bigip_snmp_trap import V2Manager
from ansible.modules.network.f5.bigip_snmp_trap import V1Manager
from ansible.modules.network.f5.bigip_snmp_trap import ArgumentSpec
# Ansible 2.8 imports
from units.compat import unittest
from units.compat.mock import Mock
from units.compat.mock import patch
from units.compat.mock import DEFAULT
from units.modules.utils import set_module_args
fixture_path = os.path.join(os.path.dirname(__file__), 'fixtures')
fixture_data = {}
def load_fixture(name):
path = os.path.join(fixture_path, name)
if path in fixture_data:
return fixture_data[path]
with open(path) as f:
data = f.read()
try:
data = json.loads(data)
except Exception:
pass
fixture_data[path] = data
return data
class TestParameters(unittest.TestCase):
def test_module_networked_parameters(self):
args = dict(
name='foo',
snmp_version='1',
community='public',
destination='10.10.10.10',
port=1000,
network='other',
)
p = V2Parameters(params=args)
assert p.name == 'foo'
assert p.snmp_version == '1'
assert p.community == 'public'
assert p.destination == '10.10.10.10'
assert p.port == 1000
assert p.network == 'other'
def test_module_non_networked_parameters(self):
args = dict(
name='foo',
snmp_version='1',
community='public',
destination='10.10.10.10',
port=1000,
network='other',
)
p = V1Parameters(params=args)
assert p.name == 'foo'
assert p.snmp_version == '1'
assert p.community == 'public'
assert p.destination == '10.10.10.10'
assert p.port == 1000
assert p.network is None
def test_api_parameters(self):
args = dict(
name='foo',
community='public',
host='10.10.10.10',
network='other',
version=1,
port=1000
)
p = V2Parameters(params=args)
assert p.name == 'foo'
assert p.snmp_version == '1'
assert p.community == 'public'
assert p.destination == '10.10.10.10'
assert p.port == 1000
assert p.network == 'other'
class TestManager(unittest.TestCase):
def setUp(self):
self.spec = ArgumentSpec()
def test_create_trap(self, *args):
set_module_args(dict(
name='foo',
snmp_version='1',
community='public',
destination='10.10.10.10',
port=1000,
network='other',
provider=dict(
server='localhost',
password='password',
user='admin'
)
))
module = AnsibleModule(
argument_spec=self.spec.argument_spec,
supports_check_mode=self.spec.supports_check_mode
)
# Override methods to force specific logic in the module to happen
m0 = ModuleManager(module=module)
m0.is_version_without_network = Mock(return_value=False)
m0.is_version_with_default_network = Mock(return_value=True)
patches = dict(
create_on_device=DEFAULT,
exists=DEFAULT
)
with patch.multiple(V2Manager, **patches) as mo:
mo['create_on_device'].side_effect = Mock(return_value=True)
mo['exists'].side_effect = Mock(return_value=False)
results = m0.exec_module()
assert results['changed'] is True
assert results['port'] == 1000
assert results['snmp_version'] == '1'
def test_create_trap_non_network(self, *args):
set_module_args(dict(
name='foo',
snmp_version='1',
community='public',
destination='10.10.10.10',
port=1000,
provider=dict(
server='localhost',
password='password',
user='admin'
)
))
module = AnsibleModule(
argument_spec=self.spec.argument_spec,
supports_check_mode=self.spec.supports_check_mode
)
# Override methods to force specific logic in the module to happen
m0 = ModuleManager(module=module)
m0.is_version_without_network = Mock(return_value=True)
patches = dict(
create_on_device=DEFAULT,
exists=DEFAULT
)
with patch.multiple(V1Manager, **patches) as mo:
mo['create_on_device'].side_effect = Mock(return_value=True)
mo['exists'].side_effect = Mock(return_value=False)
results = m0.exec_module()
assert results['changed'] is True
assert results['port'] == 1000
assert results['snmp_version'] == '1'
| gpl-3.0 |
tensorflow/tensorflow | tensorflow/python/keras/saving/save_weights_test.py | 6 | 25642 | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#,============================================================================
"""Tests for model saving in the HDF5 format."""
import os
import shutil
import uuid
from absl.testing import parameterized
import numpy as np
from tensorflow.python import keras
from tensorflow.python.eager import context
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.keras import combinations
from tensorflow.python.keras import keras_parameterized
from tensorflow.python.keras import optimizer_v1
from tensorflow.python.keras import testing_utils
from tensorflow.python.keras.engine import training
from tensorflow.python.keras.saving import hdf5_format
from tensorflow.python.lib.io import file_io
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.platform import test
from tensorflow.python.training import checkpoint_management
from tensorflow.python.training import training as training_module
from tensorflow.python.training.tracking import util as trackable
try:
import h5py # pylint:disable=g-import-not-at-top
except ImportError:
h5py = None
@combinations.generate(combinations.combine(mode=['graph', 'eager']))
class TestWeightSavingAndLoading(test.TestCase, parameterized.TestCase):
def _save_model_dir(self, dirname='saved_model'):
temp_dir = self.get_temp_dir()
self.addCleanup(shutil.rmtree, temp_dir, ignore_errors=True)
return os.path.join(temp_dir, dirname)
@keras_parameterized.run_with_all_weight_formats
def test_weight_loading(self):
saved_model_dir = self._save_model_dir()
save_format = testing_utils.get_save_format()
with self.cached_session():
a = keras.layers.Input(shape=(2,))
x = keras.layers.Dense(3)(a)
b = keras.layers.Dense(1)(x)
model = keras.models.Model(a, b)
x = np.random.random((3, 2))
ref_y = model.predict(x)
weights = model.get_weights()
model.set_weights(weights)
y = model.predict(x)
self.assertAllClose(ref_y, y)
with self.assertRaises(ValueError):
model.set_weights(weights[1:])
with self.assertRaises(ValueError):
model.set_weights(weights[::-1])
model.save_weights(saved_model_dir, save_format=save_format)
model.load_weights(saved_model_dir)
y = model.predict(x)
self.assertAllClose(ref_y, y)
def test_weight_preprocessing(self):
input_dim = 3
output_dim = 3
size = 2
cases = [
[
(keras.layers.Bidirectional(keras.layers.SimpleRNN(2))),
[np.random.random((2, 1)), np.random.random((2, 1))],
(None, 3, 2),
],
[
(keras.layers.TimeDistributed(keras.layers.Dense(1))),
[np.random.random((2, 1)), np.random.random((1,))],
(None, 3, 2),
],
[
(keras.layers.Conv1D(output_dim, size, use_bias=False)),
[np.random.random((output_dim, input_dim, size, 1))],
(None, 4, input_dim),
],
[
(keras.layers.Conv2D(output_dim, size,
use_bias=False, data_format='channels_first')),
[np.random.random((output_dim, input_dim, size, size))],
(None, input_dim, 4, 4),
],
[
(keras.layers.Conv2DTranspose(output_dim, size,
use_bias=False,
data_format='channels_first')),
[np.random.random((output_dim, input_dim, size, size))],
(None, input_dim, 4, 4),
],
[
(keras.layers.Conv2DTranspose(output_dim, size,
use_bias=False,
data_format='channels_last')),
[np.random.random((size, size, input_dim, output_dim))],
(None, 4, 4, input_dim),
],
[
(keras.layers.Conv3D(output_dim, size,
use_bias=False, data_format='channels_first')),
[np.random.random((output_dim, input_dim, size, size, size))],
(None, input_dim, 4, 4, 4),
],
[
(keras.layers.GRUV1(output_dim)),
[np.random.random((input_dim, output_dim)),
np.random.random((output_dim, output_dim)),
np.random.random((output_dim,)),
np.random.random((input_dim, output_dim)),
np.random.random((output_dim, output_dim)),
np.random.random((output_dim,)),
np.random.random((input_dim, output_dim)),
np.random.random((output_dim, output_dim)),
np.random.random((output_dim,))],
(None, 4, input_dim),
],
[
(keras.layers.LSTMV1(output_dim)),
[np.random.random((input_dim, output_dim)),
np.random.random((output_dim, output_dim)),
np.random.random((output_dim,)),
np.random.random((input_dim, output_dim)),
np.random.random((output_dim, output_dim)),
np.random.random((output_dim,)),
np.random.random((input_dim, output_dim)),
np.random.random((output_dim, output_dim)),
np.random.random((output_dim,)),
np.random.random((input_dim, output_dim)),
np.random.random((output_dim, output_dim)),
np.random.random((output_dim,))],
(None, 4, input_dim),
],
]
for layer, weights, input_shape in cases:
layer.build(input_shape)
_ = hdf5_format.preprocess_weights_for_loading(
layer, weights, original_keras_version='1')
model = keras.models.Sequential([keras.layers.Dense(2, input_dim=2)])
_ = hdf5_format.preprocess_weights_for_loading(
model, model.weights, original_keras_version='1')
x = keras.Input((2,))
y = keras.layers.Dense(2)(x)
model = keras.models.Model(x, y)
_ = hdf5_format.preprocess_weights_for_loading(
model, model.weights, original_keras_version='1')
@parameterized.named_parameters(
('gru', keras.layers.GRU, {
'units': 2,
'input_shape': (3, 5)
}),
('gru_with_reset_after', keras.layers.GRU, {
'units': 2,
'input_shape': (3, 5),
'reset_after': True
}),
('lstm', keras.layers.LSTM, {
'units': 2,
'input_shape': (3, 5)
}),
('cudnngru', keras.layers.CuDNNGRU, {
'units': 2,
'input_shape': (3, 5)
}),
('cudnnlstm', keras.layers.CuDNNLSTM, {
'units': 2,
'input_shape': (3, 5)
}))
def test_preprocess_weights_for_loading_rnn_should_be_idempotent(
self, layer_class, layer_args):
with self.cached_session():
layer = layer_class(**layer_args)
layer.build(input_shape=layer_args.get('input_shape'))
weights1 = layer.get_weights()
weights2 = hdf5_format.preprocess_weights_for_loading(
layer, weights1)
_ = [
self.assertAllClose(x, y, rtol=1e-05)
for (x, y) in zip(weights1, weights2)
]
def test_sequential_weight_loading(self):
if h5py is None:
return
h5_path = self._save_model_dir('test.h5')
num_hidden = 5
input_dim = 3
batch_size = 5
num_classes = 2
with self.cached_session():
model = keras.models.Sequential()
model.add(keras.layers.Dense(num_hidden, input_dim=input_dim))
model.add(keras.layers.Dense(num_classes))
x = np.random.random((batch_size, input_dim))
ref_y = model.predict(x)
model.save_weights(h5_path)
model = keras.models.Sequential()
model.add(keras.layers.Dense(num_hidden, input_dim=input_dim))
model.add(keras.layers.Dense(num_classes))
model.load_weights(h5_path)
y = model.predict(x)
self.assertAllClose(y, ref_y)
@keras_parameterized.run_with_all_saved_model_formats(
exclude_formats=['tf_no_traces'])
def test_nested_model_weight_loading(self):
save_format = testing_utils.get_save_format()
saved_model_dir = self._save_model_dir()
batch_size = 5
shape = (None, None, 3)
with self.cached_session():
def gen_model():
def seq_model():
model = keras.models.Sequential([
keras.layers.Conv2D(3, 1, input_shape=shape),
keras.layers.BatchNormalization()])
return model
x = inner_inputs = keras.layers.Input((None, None, 3))
x = seq_model()(x)
x = seq_model()(x)
inner_model = keras.models.Model(inner_inputs, x)
inputs = keras.layers.Input(shape)
return keras.models.Model(inputs, inner_model(inputs))
model = gen_model()
x = np.random.random((batch_size, 1, 1, 3))
ref_y = model.predict(x)
model.save_weights(saved_model_dir, save_format=save_format)
model = gen_model()
model.load_weights(saved_model_dir)
y = model.predict(x)
self.assertAllClose(y, ref_y)
def test_sequential_weight_loading_group_name_with_incorrect_length(self):
if h5py is None:
return
h5_path = self._save_model_dir('test.h5')
num_hidden = 5
input_dim = 3
num_classes = 2
with self.cached_session():
ref_model = keras.models.Sequential()
ref_model.add(keras.layers.Dense(num_hidden, input_dim=input_dim,
name='d1'))
ref_model.add(keras.layers.Dense(num_classes, name='d2'))
ref_model.compile(loss=keras.losses.MSE,
optimizer='rmsprop',
metrics=[keras.metrics.categorical_accuracy])
f_ref_model = h5py.File(h5_path, 'w')
hdf5_format.save_weights_to_hdf5_group(f_ref_model, ref_model.layers)
f_model = h5py.File(h5_path, 'r')
model = keras.models.Sequential()
model.add(keras.layers.Dense(num_hidden, use_bias=False,
input_dim=input_dim, name='d1'))
model.add(keras.layers.Dense(num_classes, name='d2'))
model.compile(loss=keras.losses.MSE,
optimizer='rmsprop',
metrics=[keras.metrics.categorical_accuracy])
with self.assertRaisesRegex(
ValueError, r'Layer #0 \(named \"d1\"\) expects 1 '
r'weight\(s\), but the saved weights have 2 '
r'element\(s\)\.'):
hdf5_format.load_weights_from_hdf5_group_by_name(f_model, model.layers)
hdf5_format.load_weights_from_hdf5_group_by_name(
f_model, model.layers, skip_mismatch=True)
self.assertAllClose(keras.backend.get_value(ref_model.layers[1].kernel),
keras.backend.get_value(model.layers[1].kernel))
def test_sequential_weight_loading_group_name_with_incorrect_shape(self):
if h5py is None:
return
h5_path = self._save_model_dir('test.h5')
num_hidden = 5
input_dim = 3
num_classes = 2
with ops.Graph().as_default(), self.cached_session():
ref_model = keras.models.Sequential()
ref_model.add(keras.layers.Dense(num_hidden, input_dim=input_dim,
name='d1'))
ref_model.add(keras.layers.Dense(num_classes, name='d2'))
ref_model.compile(loss=keras.losses.MSE,
optimizer=optimizer_v1.RMSprop(lr=0.0001),
metrics=[keras.metrics.categorical_accuracy])
f_ref_model = h5py.File(h5_path, 'w')
keras.backend.set_value(ref_model.layers[1].bias, [3.5] * num_classes)
hdf5_format.save_weights_to_hdf5_group(f_ref_model, ref_model.layers)
f_model = h5py.File(h5_path, 'r')
model = keras.models.Sequential()
model.add(keras.layers.Dense(num_hidden + 5, input_dim=input_dim,
name='d1'))
model.add(keras.layers.Dense(num_classes, name='d2'))
model.compile(loss=keras.losses.MSE,
optimizer=optimizer_v1.RMSprop(lr=0.0001),
metrics=[keras.metrics.categorical_accuracy])
with self.assertRaisesRegex(
ValueError, r'Layer #0 \(named "d1"\), weight '
r'<tf\.Variable \'d1_1\/kernel:0\' '
r'shape=\(3, 10\) dtype=float32> has '
r'shape \(3, 10\), but the saved weight has '
r'shape \(3, 5\)\.'):
hdf5_format.load_weights_from_hdf5_group_by_name(f_model, model.layers)
hdf5_format.load_weights_from_hdf5_group_by_name(
f_model, model.layers, skip_mismatch=True)
self.assertAllClose([3.5] * num_classes,
keras.backend.get_value(model.layers[1].bias))
@keras_parameterized.run_with_all_saved_model_formats(
exclude_formats=['tf_no_traces'])
@keras_parameterized.run_with_all_model_types
def test_load_weights_from_saved_model(self):
save_path = self._save_model_dir()
save_format = testing_utils.get_save_format()
if save_format == 'h5' and testing_utils.get_model_type() == 'subclass':
# TODO(b/173646281): HDF5 format currently does not allow saving
# subclassed models.
return
with self.cached_session():
model = testing_utils.get_small_mlp(1, 4, input_dim=3)
data = np.random.random((1, 3))
labels = np.random.random((1, 4))
model.compile(loss='mse', optimizer='rmsprop')
model.fit(data, labels)
model.save(save_path, save_format=save_format)
new_model = testing_utils.get_small_mlp(1, 4, input_dim=3)
if testing_utils.get_model_type() == 'subclass':
# Call on test data to build the model.
new_model.predict(data)
new_model.load_weights(save_path)
self.assertAllClose(model.weights, new_model.weights)
class SubclassedModel(training.Model):
def __init__(self):
super(SubclassedModel, self).__init__()
self.x_layer = keras.layers.Dense(3)
self.b_layer = keras.layers.Dense(1)
def call(self, a):
return self.b_layer(self.x_layer(a))
class TestWeightSavingAndLoadingTFFormat(test.TestCase, parameterized.TestCase):
@combinations.generate(combinations.combine(mode=['graph', 'eager']))
def test_tensorflow_format_overwrite(self):
with self.cached_session() as session:
model = SubclassedModel()
temp_dir = self.get_temp_dir()
prefix = os.path.join(temp_dir, 'ckpt')
x = constant_op.constant(np.random.random((3, 2)), dtype=dtypes.float32)
executing_eagerly = context.executing_eagerly()
model(x) # pylint: disable=not-callable
if not executing_eagerly:
session.run([v.initializer for v in model.variables])
model.save_weights(prefix, save_format='tensorflow')
model.save_weights(prefix, save_format='tensorflow', overwrite=True)
with self.assertRaises(EOFError):
# Indirectly tests that the user is prompted
model.save_weights(prefix, save_format='tensorflow', overwrite=False)
def test_no_default_session(self):
with ops.Graph().as_default():
self.assertFalse(ops.get_default_session())
data = np.random.random((1000, 32)).astype(np.float32)
labels = np.random.random((1000, 10)).astype(np.float32)
model = keras.models.Sequential([
keras.layers.Dense(10, activation='softmax'),
keras.layers.Dense(10, activation='softmax')])
model.compile(optimizer=training_module.RMSPropOptimizer(0.001),
loss='categorical_crossentropy',
metrics=['accuracy'])
model.fit(data, labels)
fname = os.path.join(self.get_temp_dir(), 'weights', 'ckpt')
model.save_weights(fname)
model.load_weights(fname)
def test_no_graph_pollution(self):
with ops.get_default_graph().as_default():
graph = ops.Graph()
with graph.as_default(), self.session(graph) as session:
model = SubclassedModel()
temp_dir = self.get_temp_dir()
prefix = os.path.join(temp_dir, 'ckpt')
x = constant_op.constant(np.random.random((3, 2)), dtype=dtypes.float32)
model(x) # pylint: disable=not-callable
session.run([v.initializer for v in model.variables])
model.save_weights(prefix, save_format='tensorflow')
op_count = len(graph.get_operations())
model.save_weights(prefix, save_format='tensorflow')
self.assertLen(graph.get_operations(), op_count)
model.load_weights(prefix)
op_count = len(graph.get_operations())
model.load_weights(prefix)
self.assertLen(graph.get_operations(), op_count)
def _weight_loading_test_template(self, make_model_fn):
with self.cached_session():
model = make_model_fn()
model.compile(
loss='mse',
optimizer=training_module.RMSPropOptimizer(0.1),
metrics=['acc', keras.metrics.CategoricalAccuracy()])
temp_dir = self.get_temp_dir()
prefix = os.path.join(temp_dir, 'ckpt')
train_x = np.random.random((3, 2))
train_y = np.random.random((3,))
x = constant_op.constant(train_x, dtype=dtypes.float32)
model.train_on_batch(train_x, train_y)
model.save_weights(prefix, save_format='tf')
ref_y_before_train = model.predict(train_x)
model.train_on_batch(train_x, train_y)
ref_y_after_train = model.predict(train_x)
for v in model.variables:
self.evaluate(
v.assign(random_ops.random_normal(shape=array_ops.shape(v))))
self.addCleanup(shutil.rmtree, temp_dir)
model.load_weights(prefix)
self.assertAllClose(ref_y_before_train, self.evaluate(model(x)))
# Test restore-on-create if this is a subclassed Model (graph Networks
# will have already created their variables).
load_model = make_model_fn()
load_model.load_weights(prefix)
self.assertAllClose(
ref_y_before_train,
self.evaluate(load_model(x)))
load_model = make_model_fn()
load_model.load_weights(prefix)
# We need to run some of the restore ops for predict(), but not all
# variables have been created yet (optimizer slot variables). Tests
# incremental restore.
load_model.predict(train_x)
load_model.compile(
loss='mse',
optimizer=training_module.RMSPropOptimizer(0.1),
metrics=['acc', keras.metrics.CategoricalAccuracy()])
load_model.train_on_batch(train_x, train_y)
self.assertAllClose(ref_y_after_train, self.evaluate(load_model(x)))
@combinations.generate(combinations.combine(mode=['graph', 'eager']))
def test_weight_loading_graph_model(self):
def _make_graph_model():
a = keras.layers.Input(shape=(2,))
x = keras.layers.Dense(3)(a)
b = keras.layers.Dense(1)(x)
return keras.models.Model(a, b)
self._weight_loading_test_template(_make_graph_model)
@combinations.generate(combinations.combine(mode=['graph', 'eager']))
def test_weight_loading_subclassed_model(self):
self._weight_loading_test_template(SubclassedModel)
def _new_layer_weight_loading_test_template(
self, first_model_fn, second_model_fn):
with self.cached_session() as session:
model = first_model_fn()
temp_dir = self.get_temp_dir()
prefix = os.path.join(temp_dir, 'ckpt')
x = constant_op.constant(np.random.random((3, 2)), dtype=dtypes.float32)
executing_eagerly = context.executing_eagerly()
ref_y_tensor = model(x)
if not executing_eagerly:
session.run([v.initializer for v in model.variables])
ref_y = self.evaluate(ref_y_tensor)
model.save_weights(prefix)
self.assertEqual(
prefix,
checkpoint_management.latest_checkpoint(temp_dir))
for v in model.variables:
self.evaluate(
v.assign(random_ops.random_normal(shape=array_ops.shape(v))))
self.addCleanup(shutil.rmtree, temp_dir)
second_model = second_model_fn()
status = second_model.load_weights(prefix)
second_model(x)
status.run_restore_ops()
second_model.save_weights(prefix)
# Check that the second model's checkpoint loads into the original model
status = model.load_weights(prefix)
status.run_restore_ops(session)
y = self.evaluate(model(x))
self.assertAllClose(ref_y, y)
@combinations.generate(combinations.combine(mode=['graph', 'eager']))
def test_weight_loading_graph_model_added_layer(self):
def _save_graph_model():
a = keras.layers.Input(shape=(2,))
x = keras.layers.Dense(3, name='first')(a)
b = keras.layers.Dense(1, name='second')(x)
return keras.models.Model(a, b)
def _restore_graph_model():
a = keras.layers.Input(shape=(2,))
x = keras.layers.Dense(3, name='first')(a)
y = keras.layers.Dense(1, name='second')(x)
b = keras.layers.Dense(3, name='secondjr')(y)
return keras.models.Model(a, b)
self._new_layer_weight_loading_test_template(
_save_graph_model, _restore_graph_model)
@combinations.generate(combinations.combine(mode=['graph', 'eager']))
def test_weight_loading_graph_model_added_no_weight_layer(self):
def _save_graph_model():
a = keras.layers.Input(shape=(2,))
x = keras.layers.Dense(3, name='first')(a)
b = keras.layers.Dense(1, name='second')(x)
return keras.models.Model(a, b)
def _restore_graph_model():
a = keras.layers.Input(shape=(2,))
x = keras.layers.Dense(3, name='first')(a)
b = keras.layers.Dense(1, name='second')(x)
y = keras.layers.Dropout(rate=0.1)(b)
return keras.models.Model(a, y)
self._new_layer_weight_loading_test_template(
_save_graph_model, _restore_graph_model)
@combinations.generate(combinations.combine(mode=['graph', 'eager']))
def test_weight_loading_subclassed_model_added_layer(self):
class SubclassedModelRestore(training.Model):
def __init__(self):
super(SubclassedModelRestore, self).__init__()
self.x_layer = keras.layers.Dense(3)
self.y_layer = keras.layers.Dense(3)
self.b_layer = keras.layers.Dense(1)
def call(self, a):
return self.b_layer(self.y_layer(self.x_layer(a)))
self._new_layer_weight_loading_test_template(
SubclassedModel, SubclassedModelRestore)
@combinations.generate(combinations.combine(mode=['graph', 'eager']))
def test_incompatible_checkpoint(self):
save_path = trackable.Checkpoint().save(
os.path.join(self.get_temp_dir(), 'ckpt'))
m = DummySubclassModel()
with self.assertRaisesRegex(AssertionError, 'Nothing to load'):
m.load_weights(save_path)
m.dense = keras.layers.Dense(2)
m.dense(constant_op.constant([[1.]]))
with self.assertRaisesRegex(AssertionError,
'Nothing except the root object matched'):
m.load_weights(save_path)
@combinations.generate(combinations.combine(mode=['graph', 'eager']))
def test_directory_passed(self):
with self.cached_session():
m = DummySubclassModel()
v = m.add_weight(name='v', shape=[])
self.evaluate(v.assign(42.))
prefix = os.path.join(self.get_temp_dir(), str(uuid.uuid4()), 'ckpt/')
m.save_weights(prefix)
self.evaluate(v.assign(2.))
m.load_weights(prefix)
self.assertEqual(42., self.evaluate(v))
@combinations.generate(combinations.combine(mode=['graph', 'eager']))
def test_relative_path(self):
with self.cached_session():
m = DummySubclassModel()
v = m.add_weight(name='v', shape=[])
os.chdir(self.get_temp_dir())
prefix = 'ackpt'
self.evaluate(v.assign(42.))
m.save_weights(prefix)
self.assertTrue(file_io.file_exists_v2('ackpt.index'))
self.evaluate(v.assign(1.))
m.load_weights(prefix)
self.assertEqual(42., self.evaluate(v))
prefix = 'subdir/ackpt'
self.evaluate(v.assign(43.))
m.save_weights(prefix)
self.assertTrue(file_io.file_exists_v2('subdir/ackpt.index'))
self.evaluate(v.assign(2.))
m.load_weights(prefix)
self.assertEqual(43., self.evaluate(v))
prefix = 'ackpt/'
self.evaluate(v.assign(44.))
m.save_weights(prefix)
self.assertTrue(file_io.file_exists_v2('ackpt/.index'))
self.evaluate(v.assign(3.))
m.load_weights(prefix)
self.assertEqual(44., self.evaluate(v))
@combinations.generate(combinations.combine(mode=['graph', 'eager']))
def test_nonexistent_prefix_directory(self):
with self.cached_session():
m = DummySubclassModel()
v = m.add_weight(name='v', shape=[])
self.evaluate(v.assign(42.))
prefix = os.path.join(self.get_temp_dir(), str(uuid.uuid4()), 'bckpt')
m.save_weights(prefix)
self.evaluate(v.assign(2.))
m.load_weights(prefix)
self.assertEqual(42., self.evaluate(v))
class DummySubclassModel(training.Model):
pass
if __name__ == '__main__':
test.main()
| apache-2.0 |
Sorsly/subtle | google-cloud-sdk/lib/surface/compute/instance_groups/managed/rolling_action/recreate.py | 4 | 2496 | # Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Command for recreating instances of managed instance group."""
from googlecloudsdk.api_lib.compute import base_classes
from googlecloudsdk.calliope import base
from googlecloudsdk.command_lib.compute.instance_groups import flags as instance_groups_flags
from googlecloudsdk.command_lib.compute.instance_groups.managed import flags as instance_groups_managed_flags
from googlecloudsdk.command_lib.compute.instance_groups.managed import rolling_action
from googlecloudsdk.command_lib.compute.managed_instance_groups import update_instances_utils
@base.ReleaseTracks(base.ReleaseTrack.ALPHA)
class StartUpdate(base.Command):
"""Start recreate instances of managed instance group."""
@staticmethod
def Args(parser):
instance_groups_managed_flags.AddMaxSurgeArg(parser)
instance_groups_managed_flags.AddMaxUnavailableArg(parser)
instance_groups_managed_flags.AddMinReadyArg(parser)
instance_groups_flags.MULTISCOPE_INSTANCE_GROUP_MANAGER_ARG.AddArgument(
parser)
def Run(self, args):
holder = base_classes.ComputeApiHolder(self.ReleaseTrack())
client = holder.client
resources = holder.resources
cleared_fields = []
with client.apitools_client.IncludeFields(cleared_fields):
minimal_action = (client.messages.InstanceGroupManagerUpdatePolicy.
MinimalActionValueValuesEnum.REPLACE)
max_surge = update_instances_utils.ParseFixedOrPercent(
'--max-surge', 'max-surge', args.max_surge, client.messages)
return client.MakeRequests([
rolling_action.CreateRequest(args, cleared_fields, client, resources,
minimal_action, max_surge)
])
StartUpdate.detailed_help = {
'brief':
'Recreates instances in a managed instance group',
'DESCRIPTION':
"""\
*{command}* recreates instances in a managed instance group."""
}
| mit |
sebadiaz/rethinkdb | external/v8_3.30.33.16/build/gyp/pylib/gyp/input.py | 457 | 112827 | # Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from compiler.ast import Const
from compiler.ast import Dict
from compiler.ast import Discard
from compiler.ast import List
from compiler.ast import Module
from compiler.ast import Node
from compiler.ast import Stmt
import compiler
import copy
import gyp.common
import multiprocessing
import optparse
import os.path
import re
import shlex
import signal
import subprocess
import sys
import threading
import time
import traceback
from gyp.common import GypError
# A list of types that are treated as linkable.
linkable_types = ['executable', 'shared_library', 'loadable_module']
# A list of sections that contain links to other targets.
dependency_sections = ['dependencies', 'export_dependent_settings']
# base_path_sections is a list of sections defined by GYP that contain
# pathnames. The generators can provide more keys, the two lists are merged
# into path_sections, but you should call IsPathSection instead of using either
# list directly.
base_path_sections = [
'destination',
'files',
'include_dirs',
'inputs',
'libraries',
'outputs',
'sources',
]
path_sections = []
is_path_section_charset = set('=+?!')
is_path_section_match_re = re.compile('_(dir|file|path)s?$')
def IsPathSection(section):
# If section ends in one of these characters, it's applied to a section
# without the trailing characters. '/' is notably absent from this list,
# because there's no way for a regular expression to be treated as a path.
while section[-1:] in is_path_section_charset:
section = section[:-1]
return section in path_sections or is_path_section_match_re.search(section)
# base_non_configuration_keys is a list of key names that belong in the target
# itself and should not be propagated into its configurations. It is merged
# with a list that can come from the generator to
# create non_configuration_keys.
base_non_configuration_keys = [
# Sections that must exist inside targets and not configurations.
'actions',
'configurations',
'copies',
'default_configuration',
'dependencies',
'dependencies_original',
'libraries',
'postbuilds',
'product_dir',
'product_extension',
'product_name',
'product_prefix',
'rules',
'run_as',
'sources',
'standalone_static_library',
'suppress_wildcard',
'target_name',
'toolset',
'toolsets',
'type',
# Sections that can be found inside targets or configurations, but that
# should not be propagated from targets into their configurations.
'variables',
]
non_configuration_keys = []
# Keys that do not belong inside a configuration dictionary.
invalid_configuration_keys = [
'actions',
'all_dependent_settings',
'configurations',
'dependencies',
'direct_dependent_settings',
'libraries',
'link_settings',
'sources',
'standalone_static_library',
'target_name',
'type',
]
# Controls whether or not the generator supports multiple toolsets.
multiple_toolsets = False
# Paths for converting filelist paths to output paths: {
# toplevel,
# qualified_output_dir,
# }
generator_filelist_paths = None
def GetIncludedBuildFiles(build_file_path, aux_data, included=None):
"""Return a list of all build files included into build_file_path.
The returned list will contain build_file_path as well as all other files
that it included, either directly or indirectly. Note that the list may
contain files that were included into a conditional section that evaluated
to false and was not merged into build_file_path's dict.
aux_data is a dict containing a key for each build file or included build
file. Those keys provide access to dicts whose "included" keys contain
lists of all other files included by the build file.
included should be left at its default None value by external callers. It
is used for recursion.
The returned list will not contain any duplicate entries. Each build file
in the list will be relative to the current directory.
"""
if included == None:
included = []
if build_file_path in included:
return included
included.append(build_file_path)
for included_build_file in aux_data[build_file_path].get('included', []):
GetIncludedBuildFiles(included_build_file, aux_data, included)
return included
def CheckedEval(file_contents):
"""Return the eval of a gyp file.
The gyp file is restricted to dictionaries and lists only, and
repeated keys are not allowed.
Note that this is slower than eval() is.
"""
ast = compiler.parse(file_contents)
assert isinstance(ast, Module)
c1 = ast.getChildren()
assert c1[0] is None
assert isinstance(c1[1], Stmt)
c2 = c1[1].getChildren()
assert isinstance(c2[0], Discard)
c3 = c2[0].getChildren()
assert len(c3) == 1
return CheckNode(c3[0], [])
def CheckNode(node, keypath):
if isinstance(node, Dict):
c = node.getChildren()
dict = {}
for n in range(0, len(c), 2):
assert isinstance(c[n], Const)
key = c[n].getChildren()[0]
if key in dict:
raise GypError("Key '" + key + "' repeated at level " +
repr(len(keypath) + 1) + " with key path '" +
'.'.join(keypath) + "'")
kp = list(keypath) # Make a copy of the list for descending this node.
kp.append(key)
dict[key] = CheckNode(c[n + 1], kp)
return dict
elif isinstance(node, List):
c = node.getChildren()
children = []
for index, child in enumerate(c):
kp = list(keypath) # Copy list.
kp.append(repr(index))
children.append(CheckNode(child, kp))
return children
elif isinstance(node, Const):
return node.getChildren()[0]
else:
raise TypeError, "Unknown AST node at key path '" + '.'.join(keypath) + \
"': " + repr(node)
def LoadOneBuildFile(build_file_path, data, aux_data, variables, includes,
is_target, check):
if build_file_path in data:
return data[build_file_path]
if os.path.exists(build_file_path):
build_file_contents = open(build_file_path).read()
else:
raise GypError("%s not found (cwd: %s)" % (build_file_path, os.getcwd()))
build_file_data = None
try:
if check:
build_file_data = CheckedEval(build_file_contents)
else:
build_file_data = eval(build_file_contents, {'__builtins__': None},
None)
except SyntaxError, e:
e.filename = build_file_path
raise
except Exception, e:
gyp.common.ExceptionAppend(e, 'while reading ' + build_file_path)
raise
if not isinstance(build_file_data, dict):
raise GypError("%s does not evaluate to a dictionary." % build_file_path)
data[build_file_path] = build_file_data
aux_data[build_file_path] = {}
# Scan for includes and merge them in.
if ('skip_includes' not in build_file_data or
not build_file_data['skip_includes']):
try:
if is_target:
LoadBuildFileIncludesIntoDict(build_file_data, build_file_path, data,
aux_data, variables, includes, check)
else:
LoadBuildFileIncludesIntoDict(build_file_data, build_file_path, data,
aux_data, variables, None, check)
except Exception, e:
gyp.common.ExceptionAppend(e,
'while reading includes of ' + build_file_path)
raise
return build_file_data
def LoadBuildFileIncludesIntoDict(subdict, subdict_path, data, aux_data,
variables, includes, check):
includes_list = []
if includes != None:
includes_list.extend(includes)
if 'includes' in subdict:
for include in subdict['includes']:
# "include" is specified relative to subdict_path, so compute the real
# path to include by appending the provided "include" to the directory
# in which subdict_path resides.
relative_include = \
os.path.normpath(os.path.join(os.path.dirname(subdict_path), include))
includes_list.append(relative_include)
# Unhook the includes list, it's no longer needed.
del subdict['includes']
# Merge in the included files.
for include in includes_list:
if not 'included' in aux_data[subdict_path]:
aux_data[subdict_path]['included'] = []
aux_data[subdict_path]['included'].append(include)
gyp.DebugOutput(gyp.DEBUG_INCLUDES, "Loading Included File: '%s'", include)
MergeDicts(subdict,
LoadOneBuildFile(include, data, aux_data, variables, None,
False, check),
subdict_path, include)
# Recurse into subdictionaries.
for k, v in subdict.iteritems():
if v.__class__ == dict:
LoadBuildFileIncludesIntoDict(v, subdict_path, data, aux_data, variables,
None, check)
elif v.__class__ == list:
LoadBuildFileIncludesIntoList(v, subdict_path, data, aux_data, variables,
check)
# This recurses into lists so that it can look for dicts.
def LoadBuildFileIncludesIntoList(sublist, sublist_path, data, aux_data,
variables, check):
for item in sublist:
if item.__class__ == dict:
LoadBuildFileIncludesIntoDict(item, sublist_path, data, aux_data,
variables, None, check)
elif item.__class__ == list:
LoadBuildFileIncludesIntoList(item, sublist_path, data, aux_data,
variables, check)
# Processes toolsets in all the targets. This recurses into condition entries
# since they can contain toolsets as well.
def ProcessToolsetsInDict(data):
if 'targets' in data:
target_list = data['targets']
new_target_list = []
for target in target_list:
# If this target already has an explicit 'toolset', and no 'toolsets'
# list, don't modify it further.
if 'toolset' in target and 'toolsets' not in target:
new_target_list.append(target)
continue
if multiple_toolsets:
toolsets = target.get('toolsets', ['target'])
else:
toolsets = ['target']
# Make sure this 'toolsets' definition is only processed once.
if 'toolsets' in target:
del target['toolsets']
if len(toolsets) > 0:
# Optimization: only do copies if more than one toolset is specified.
for build in toolsets[1:]:
new_target = copy.deepcopy(target)
new_target['toolset'] = build
new_target_list.append(new_target)
target['toolset'] = toolsets[0]
new_target_list.append(target)
data['targets'] = new_target_list
if 'conditions' in data:
for condition in data['conditions']:
if isinstance(condition, list):
for condition_dict in condition[1:]:
ProcessToolsetsInDict(condition_dict)
# TODO(mark): I don't love this name. It just means that it's going to load
# a build file that contains targets and is expected to provide a targets dict
# that contains the targets...
def LoadTargetBuildFile(build_file_path, data, aux_data, variables, includes,
depth, check, load_dependencies):
# If depth is set, predefine the DEPTH variable to be a relative path from
# this build file's directory to the directory identified by depth.
if depth:
# TODO(dglazkov) The backslash/forward-slash replacement at the end is a
# temporary measure. This should really be addressed by keeping all paths
# in POSIX until actual project generation.
d = gyp.common.RelativePath(depth, os.path.dirname(build_file_path))
if d == '':
variables['DEPTH'] = '.'
else:
variables['DEPTH'] = d.replace('\\', '/')
if build_file_path in data['target_build_files']:
# Already loaded.
return False
data['target_build_files'].add(build_file_path)
gyp.DebugOutput(gyp.DEBUG_INCLUDES,
"Loading Target Build File '%s'", build_file_path)
build_file_data = LoadOneBuildFile(build_file_path, data, aux_data, variables,
includes, True, check)
# Store DEPTH for later use in generators.
build_file_data['_DEPTH'] = depth
# Set up the included_files key indicating which .gyp files contributed to
# this target dict.
if 'included_files' in build_file_data:
raise GypError(build_file_path + ' must not contain included_files key')
included = GetIncludedBuildFiles(build_file_path, aux_data)
build_file_data['included_files'] = []
for included_file in included:
# included_file is relative to the current directory, but it needs to
# be made relative to build_file_path's directory.
included_relative = \
gyp.common.RelativePath(included_file,
os.path.dirname(build_file_path))
build_file_data['included_files'].append(included_relative)
# Do a first round of toolsets expansion so that conditions can be defined
# per toolset.
ProcessToolsetsInDict(build_file_data)
# Apply "pre"/"early" variable expansions and condition evaluations.
ProcessVariablesAndConditionsInDict(
build_file_data, PHASE_EARLY, variables, build_file_path)
# Since some toolsets might have been defined conditionally, perform
# a second round of toolsets expansion now.
ProcessToolsetsInDict(build_file_data)
# Look at each project's target_defaults dict, and merge settings into
# targets.
if 'target_defaults' in build_file_data:
if 'targets' not in build_file_data:
raise GypError("Unable to find targets in build file %s" %
build_file_path)
index = 0
while index < len(build_file_data['targets']):
# This procedure needs to give the impression that target_defaults is
# used as defaults, and the individual targets inherit from that.
# The individual targets need to be merged into the defaults. Make
# a deep copy of the defaults for each target, merge the target dict
# as found in the input file into that copy, and then hook up the
# copy with the target-specific data merged into it as the replacement
# target dict.
old_target_dict = build_file_data['targets'][index]
new_target_dict = copy.deepcopy(build_file_data['target_defaults'])
MergeDicts(new_target_dict, old_target_dict,
build_file_path, build_file_path)
build_file_data['targets'][index] = new_target_dict
index += 1
# No longer needed.
del build_file_data['target_defaults']
# Look for dependencies. This means that dependency resolution occurs
# after "pre" conditionals and variable expansion, but before "post" -
# in other words, you can't put a "dependencies" section inside a "post"
# conditional within a target.
dependencies = []
if 'targets' in build_file_data:
for target_dict in build_file_data['targets']:
if 'dependencies' not in target_dict:
continue
for dependency in target_dict['dependencies']:
dependencies.append(
gyp.common.ResolveTarget(build_file_path, dependency, None)[0])
if load_dependencies:
for dependency in dependencies:
try:
LoadTargetBuildFile(dependency, data, aux_data, variables,
includes, depth, check, load_dependencies)
except Exception, e:
gyp.common.ExceptionAppend(
e, 'while loading dependencies of %s' % build_file_path)
raise
else:
return (build_file_path, dependencies)
def CallLoadTargetBuildFile(global_flags,
build_file_path, data,
aux_data, variables,
includes, depth, check,
generator_input_info):
"""Wrapper around LoadTargetBuildFile for parallel processing.
This wrapper is used when LoadTargetBuildFile is executed in
a worker process.
"""
try:
signal.signal(signal.SIGINT, signal.SIG_IGN)
# Apply globals so that the worker process behaves the same.
for key, value in global_flags.iteritems():
globals()[key] = value
# Save the keys so we can return data that changed.
data_keys = set(data)
aux_data_keys = set(aux_data)
SetGeneratorGlobals(generator_input_info)
result = LoadTargetBuildFile(build_file_path, data,
aux_data, variables,
includes, depth, check, False)
if not result:
return result
(build_file_path, dependencies) = result
data_out = {}
for key in data:
if key == 'target_build_files':
continue
if key not in data_keys:
data_out[key] = data[key]
aux_data_out = {}
for key in aux_data:
if key not in aux_data_keys:
aux_data_out[key] = aux_data[key]
# This gets serialized and sent back to the main process via a pipe.
# It's handled in LoadTargetBuildFileCallback.
return (build_file_path,
data_out,
aux_data_out,
dependencies)
except GypError, e:
sys.stderr.write("gyp: %s\n" % e)
return None
except Exception, e:
print >>sys.stderr, 'Exception:', e
print >>sys.stderr, traceback.format_exc()
return None
class ParallelProcessingError(Exception):
pass
class ParallelState(object):
"""Class to keep track of state when processing input files in parallel.
If build files are loaded in parallel, use this to keep track of
state during farming out and processing parallel jobs. It's stored
in a global so that the callback function can have access to it.
"""
def __init__(self):
# The multiprocessing pool.
self.pool = None
# The condition variable used to protect this object and notify
# the main loop when there might be more data to process.
self.condition = None
# The "data" dict that was passed to LoadTargetBuildFileParallel
self.data = None
# The "aux_data" dict that was passed to LoadTargetBuildFileParallel
self.aux_data = None
# The number of parallel calls outstanding; decremented when a response
# was received.
self.pending = 0
# The set of all build files that have been scheduled, so we don't
# schedule the same one twice.
self.scheduled = set()
# A list of dependency build file paths that haven't been scheduled yet.
self.dependencies = []
# Flag to indicate if there was an error in a child process.
self.error = False
def LoadTargetBuildFileCallback(self, result):
"""Handle the results of running LoadTargetBuildFile in another process.
"""
self.condition.acquire()
if not result:
self.error = True
self.condition.notify()
self.condition.release()
return
(build_file_path0, data0, aux_data0, dependencies0) = result
self.data['target_build_files'].add(build_file_path0)
for key in data0:
self.data[key] = data0[key]
for key in aux_data0:
self.aux_data[key] = aux_data0[key]
for new_dependency in dependencies0:
if new_dependency not in self.scheduled:
self.scheduled.add(new_dependency)
self.dependencies.append(new_dependency)
self.pending -= 1
self.condition.notify()
self.condition.release()
def LoadTargetBuildFilesParallel(build_files, data, aux_data,
variables, includes, depth, check,
generator_input_info):
parallel_state = ParallelState()
parallel_state.condition = threading.Condition()
# Make copies of the build_files argument that we can modify while working.
parallel_state.dependencies = list(build_files)
parallel_state.scheduled = set(build_files)
parallel_state.pending = 0
parallel_state.data = data
parallel_state.aux_data = aux_data
try:
parallel_state.condition.acquire()
while parallel_state.dependencies or parallel_state.pending:
if parallel_state.error:
break
if not parallel_state.dependencies:
parallel_state.condition.wait()
continue
dependency = parallel_state.dependencies.pop()
parallel_state.pending += 1
data_in = {}
data_in['target_build_files'] = data['target_build_files']
aux_data_in = {}
global_flags = {
'path_sections': globals()['path_sections'],
'non_configuration_keys': globals()['non_configuration_keys'],
'multiple_toolsets': globals()['multiple_toolsets']}
if not parallel_state.pool:
parallel_state.pool = multiprocessing.Pool(8)
parallel_state.pool.apply_async(
CallLoadTargetBuildFile,
args = (global_flags, dependency,
data_in, aux_data_in,
variables, includes, depth, check, generator_input_info),
callback = parallel_state.LoadTargetBuildFileCallback)
except KeyboardInterrupt, e:
parallel_state.pool.terminate()
raise e
parallel_state.condition.release()
parallel_state.pool.close()
parallel_state.pool.join()
parallel_state.pool = None
if parallel_state.error:
sys.exit(1)
# Look for the bracket that matches the first bracket seen in a
# string, and return the start and end as a tuple. For example, if
# the input is something like "<(foo <(bar)) blah", then it would
# return (1, 13), indicating the entire string except for the leading
# "<" and trailing " blah".
LBRACKETS= set('{[(')
BRACKETS = {'}': '{', ']': '[', ')': '('}
def FindEnclosingBracketGroup(input_str):
stack = []
start = -1
for index, char in enumerate(input_str):
if char in LBRACKETS:
stack.append(char)
if start == -1:
start = index
elif char in BRACKETS:
if not stack:
return (-1, -1)
if stack.pop() != BRACKETS[char]:
return (-1, -1)
if not stack:
return (start, index + 1)
return (-1, -1)
canonical_int_re = re.compile('(0|-?[1-9][0-9]*)$')
def IsStrCanonicalInt(string):
"""Returns True if |string| is in its canonical integer form.
The canonical form is such that str(int(string)) == string.
"""
return isinstance(string, str) and canonical_int_re.match(string)
# This matches things like "<(asdf)", "<!(cmd)", "<!@(cmd)", "<|(list)",
# "<!interpreter(arguments)", "<([list])", and even "<([)" and "<(<())".
# In the last case, the inner "<()" is captured in match['content'].
early_variable_re = re.compile(
'(?P<replace>(?P<type><(?:(?:!?@?)|\|)?)'
'(?P<command_string>[-a-zA-Z0-9_.]+)?'
'\((?P<is_array>\s*\[?)'
'(?P<content>.*?)(\]?)\))')
# This matches the same as early_variable_re, but with '>' instead of '<'.
late_variable_re = re.compile(
'(?P<replace>(?P<type>>(?:(?:!?@?)|\|)?)'
'(?P<command_string>[-a-zA-Z0-9_.]+)?'
'\((?P<is_array>\s*\[?)'
'(?P<content>.*?)(\]?)\))')
# This matches the same as early_variable_re, but with '^' instead of '<'.
latelate_variable_re = re.compile(
'(?P<replace>(?P<type>[\^](?:(?:!?@?)|\|)?)'
'(?P<command_string>[-a-zA-Z0-9_.]+)?'
'\((?P<is_array>\s*\[?)'
'(?P<content>.*?)(\]?)\))')
# Global cache of results from running commands so they don't have to be run
# more then once.
cached_command_results = {}
def FixupPlatformCommand(cmd):
if sys.platform == 'win32':
if type(cmd) == list:
cmd = [re.sub('^cat ', 'type ', cmd[0])] + cmd[1:]
else:
cmd = re.sub('^cat ', 'type ', cmd)
return cmd
PHASE_EARLY = 0
PHASE_LATE = 1
PHASE_LATELATE = 2
def ExpandVariables(input, phase, variables, build_file):
# Look for the pattern that gets expanded into variables
if phase == PHASE_EARLY:
variable_re = early_variable_re
expansion_symbol = '<'
elif phase == PHASE_LATE:
variable_re = late_variable_re
expansion_symbol = '>'
elif phase == PHASE_LATELATE:
variable_re = latelate_variable_re
expansion_symbol = '^'
else:
assert False
input_str = str(input)
if IsStrCanonicalInt(input_str):
return int(input_str)
# Do a quick scan to determine if an expensive regex search is warranted.
if expansion_symbol not in input_str:
return input_str
# Get the entire list of matches as a list of MatchObject instances.
# (using findall here would return strings instead of MatchObjects).
matches = list(variable_re.finditer(input_str))
if not matches:
return input_str
output = input_str
# Reverse the list of matches so that replacements are done right-to-left.
# That ensures that earlier replacements won't mess up the string in a
# way that causes later calls to find the earlier substituted text instead
# of what's intended for replacement.
matches.reverse()
for match_group in matches:
match = match_group.groupdict()
gyp.DebugOutput(gyp.DEBUG_VARIABLES, "Matches: %r", match)
# match['replace'] is the substring to look for, match['type']
# is the character code for the replacement type (< > <! >! <| >| <@
# >@ <!@ >!@), match['is_array'] contains a '[' for command
# arrays, and match['content'] is the name of the variable (< >)
# or command to run (<! >!). match['command_string'] is an optional
# command string. Currently, only 'pymod_do_main' is supported.
# run_command is true if a ! variant is used.
run_command = '!' in match['type']
command_string = match['command_string']
# file_list is true if a | variant is used.
file_list = '|' in match['type']
# Capture these now so we can adjust them later.
replace_start = match_group.start('replace')
replace_end = match_group.end('replace')
# Find the ending paren, and re-evaluate the contained string.
(c_start, c_end) = FindEnclosingBracketGroup(input_str[replace_start:])
# Adjust the replacement range to match the entire command
# found by FindEnclosingBracketGroup (since the variable_re
# probably doesn't match the entire command if it contained
# nested variables).
replace_end = replace_start + c_end
# Find the "real" replacement, matching the appropriate closing
# paren, and adjust the replacement start and end.
replacement = input_str[replace_start:replace_end]
# Figure out what the contents of the variable parens are.
contents_start = replace_start + c_start + 1
contents_end = replace_end - 1
contents = input_str[contents_start:contents_end]
# Do filter substitution now for <|().
# Admittedly, this is different than the evaluation order in other
# contexts. However, since filtration has no chance to run on <|(),
# this seems like the only obvious way to give them access to filters.
if file_list:
processed_variables = copy.deepcopy(variables)
ProcessListFiltersInDict(contents, processed_variables)
# Recurse to expand variables in the contents
contents = ExpandVariables(contents, phase,
processed_variables, build_file)
else:
# Recurse to expand variables in the contents
contents = ExpandVariables(contents, phase, variables, build_file)
# Strip off leading/trailing whitespace so that variable matches are
# simpler below (and because they are rarely needed).
contents = contents.strip()
# expand_to_list is true if an @ variant is used. In that case,
# the expansion should result in a list. Note that the caller
# is to be expecting a list in return, and not all callers do
# because not all are working in list context. Also, for list
# expansions, there can be no other text besides the variable
# expansion in the input string.
expand_to_list = '@' in match['type'] and input_str == replacement
if run_command or file_list:
# Find the build file's directory, so commands can be run or file lists
# generated relative to it.
build_file_dir = os.path.dirname(build_file)
if build_file_dir == '' and not file_list:
# If build_file is just a leaf filename indicating a file in the
# current directory, build_file_dir might be an empty string. Set
# it to None to signal to subprocess.Popen that it should run the
# command in the current directory.
build_file_dir = None
# Support <|(listfile.txt ...) which generates a file
# containing items from a gyp list, generated at gyp time.
# This works around actions/rules which have more inputs than will
# fit on the command line.
if file_list:
if type(contents) == list:
contents_list = contents
else:
contents_list = contents.split(' ')
replacement = contents_list[0]
if os.path.isabs(replacement):
raise GypError('| cannot handle absolute paths, got "%s"' % replacement)
if not generator_filelist_paths:
path = os.path.join(build_file_dir, replacement)
else:
if os.path.isabs(build_file_dir):
toplevel = generator_filelist_paths['toplevel']
rel_build_file_dir = gyp.common.RelativePath(build_file_dir, toplevel)
else:
rel_build_file_dir = build_file_dir
qualified_out_dir = generator_filelist_paths['qualified_out_dir']
path = os.path.join(qualified_out_dir, rel_build_file_dir, replacement)
gyp.common.EnsureDirExists(path)
replacement = gyp.common.RelativePath(path, build_file_dir)
f = gyp.common.WriteOnDiff(path)
for i in contents_list[1:]:
f.write('%s\n' % i)
f.close()
elif run_command:
use_shell = True
if match['is_array']:
contents = eval(contents)
use_shell = False
# Check for a cached value to avoid executing commands, or generating
# file lists more than once.
# TODO(http://code.google.com/p/gyp/issues/detail?id=112): It is
# possible that the command being invoked depends on the current
# directory. For that case the syntax needs to be extended so that the
# directory is also used in cache_key (it becomes a tuple).
# TODO(http://code.google.com/p/gyp/issues/detail?id=111): In theory,
# someone could author a set of GYP files where each time the command
# is invoked it produces different output by design. When the need
# arises, the syntax should be extended to support no caching off a
# command's output so it is run every time.
cache_key = str(contents)
cached_value = cached_command_results.get(cache_key, None)
if cached_value is None:
gyp.DebugOutput(gyp.DEBUG_VARIABLES,
"Executing command '%s' in directory '%s'",
contents, build_file_dir)
replacement = ''
if command_string == 'pymod_do_main':
# <!pymod_do_main(modulename param eters) loads |modulename| as a
# python module and then calls that module's DoMain() function,
# passing ["param", "eters"] as a single list argument. For modules
# that don't load quickly, this can be faster than
# <!(python modulename param eters). Do this in |build_file_dir|.
oldwd = os.getcwd() # Python doesn't like os.open('.'): no fchdir.
if build_file_dir: # build_file_dir may be None (see above).
os.chdir(build_file_dir)
try:
parsed_contents = shlex.split(contents)
try:
py_module = __import__(parsed_contents[0])
except ImportError as e:
raise GypError("Error importing pymod_do_main"
"module (%s): %s" % (parsed_contents[0], e))
replacement = str(py_module.DoMain(parsed_contents[1:])).rstrip()
finally:
os.chdir(oldwd)
assert replacement != None
elif command_string:
raise GypError("Unknown command string '%s' in '%s'." %
(command_string, contents))
else:
# Fix up command with platform specific workarounds.
contents = FixupPlatformCommand(contents)
p = subprocess.Popen(contents, shell=use_shell,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
stdin=subprocess.PIPE,
cwd=build_file_dir)
p_stdout, p_stderr = p.communicate('')
if p.wait() != 0 or p_stderr:
sys.stderr.write(p_stderr)
# Simulate check_call behavior, since check_call only exists
# in python 2.5 and later.
raise GypError("Call to '%s' returned exit status %d." %
(contents, p.returncode))
replacement = p_stdout.rstrip()
cached_command_results[cache_key] = replacement
else:
gyp.DebugOutput(gyp.DEBUG_VARIABLES,
"Had cache value for command '%s' in directory '%s'",
contents,build_file_dir)
replacement = cached_value
else:
if not contents in variables:
if contents[-1] in ['!', '/']:
# In order to allow cross-compiles (nacl) to happen more naturally,
# we will allow references to >(sources/) etc. to resolve to
# and empty list if undefined. This allows actions to:
# 'action!': [
# '>@(_sources!)',
# ],
# 'action/': [
# '>@(_sources/)',
# ],
replacement = []
else:
raise GypError('Undefined variable ' + contents +
' in ' + build_file)
else:
replacement = variables[contents]
if isinstance(replacement, list):
for item in replacement:
if (not contents[-1] == '/' and
not isinstance(item, str) and not isinstance(item, int)):
raise GypError('Variable ' + contents +
' must expand to a string or list of strings; ' +
'list contains a ' +
item.__class__.__name__)
# Run through the list and handle variable expansions in it. Since
# the list is guaranteed not to contain dicts, this won't do anything
# with conditions sections.
ProcessVariablesAndConditionsInList(replacement, phase, variables,
build_file)
elif not isinstance(replacement, str) and \
not isinstance(replacement, int):
raise GypError('Variable ' + contents +
' must expand to a string or list of strings; ' +
'found a ' + replacement.__class__.__name__)
if expand_to_list:
# Expanding in list context. It's guaranteed that there's only one
# replacement to do in |input_str| and that it's this replacement. See
# above.
if isinstance(replacement, list):
# If it's already a list, make a copy.
output = replacement[:]
else:
# Split it the same way sh would split arguments.
output = shlex.split(str(replacement))
else:
# Expanding in string context.
encoded_replacement = ''
if isinstance(replacement, list):
# When expanding a list into string context, turn the list items
# into a string in a way that will work with a subprocess call.
#
# TODO(mark): This isn't completely correct. This should
# call a generator-provided function that observes the
# proper list-to-argument quoting rules on a specific
# platform instead of just calling the POSIX encoding
# routine.
encoded_replacement = gyp.common.EncodePOSIXShellList(replacement)
else:
encoded_replacement = replacement
output = output[:replace_start] + str(encoded_replacement) + \
output[replace_end:]
# Prepare for the next match iteration.
input_str = output
# Look for more matches now that we've replaced some, to deal with
# expanding local variables (variables defined in the same
# variables block as this one).
gyp.DebugOutput(gyp.DEBUG_VARIABLES, "Found output %r, recursing.", output)
if isinstance(output, list):
if output and isinstance(output[0], list):
# Leave output alone if it's a list of lists.
# We don't want such lists to be stringified.
pass
else:
new_output = []
for item in output:
new_output.append(
ExpandVariables(item, phase, variables, build_file))
output = new_output
else:
output = ExpandVariables(output, phase, variables, build_file)
# Convert all strings that are canonically-represented integers into integers.
if isinstance(output, list):
for index in xrange(0, len(output)):
if IsStrCanonicalInt(output[index]):
output[index] = int(output[index])
elif IsStrCanonicalInt(output):
output = int(output)
return output
def ProcessConditionsInDict(the_dict, phase, variables, build_file):
# Process a 'conditions' or 'target_conditions' section in the_dict,
# depending on phase.
# early -> conditions
# late -> target_conditions
# latelate -> no conditions
#
# Each item in a conditions list consists of cond_expr, a string expression
# evaluated as the condition, and true_dict, a dict that will be merged into
# the_dict if cond_expr evaluates to true. Optionally, a third item,
# false_dict, may be present. false_dict is merged into the_dict if
# cond_expr evaluates to false.
#
# Any dict merged into the_dict will be recursively processed for nested
# conditionals and other expansions, also according to phase, immediately
# prior to being merged.
if phase == PHASE_EARLY:
conditions_key = 'conditions'
elif phase == PHASE_LATE:
conditions_key = 'target_conditions'
elif phase == PHASE_LATELATE:
return
else:
assert False
if not conditions_key in the_dict:
return
conditions_list = the_dict[conditions_key]
# Unhook the conditions list, it's no longer needed.
del the_dict[conditions_key]
for condition in conditions_list:
if not isinstance(condition, list):
raise GypError(conditions_key + ' must be a list')
if len(condition) != 2 and len(condition) != 3:
# It's possible that condition[0] won't work in which case this
# attempt will raise its own IndexError. That's probably fine.
raise GypError(conditions_key + ' ' + condition[0] +
' must be length 2 or 3, not ' + str(len(condition)))
[cond_expr, true_dict] = condition[0:2]
false_dict = None
if len(condition) == 3:
false_dict = condition[2]
# Do expansions on the condition itself. Since the conditon can naturally
# contain variable references without needing to resort to GYP expansion
# syntax, this is of dubious value for variables, but someone might want to
# use a command expansion directly inside a condition.
cond_expr_expanded = ExpandVariables(cond_expr, phase, variables,
build_file)
if not isinstance(cond_expr_expanded, str) and \
not isinstance(cond_expr_expanded, int):
raise ValueError, \
'Variable expansion in this context permits str and int ' + \
'only, found ' + expanded.__class__.__name__
try:
ast_code = compile(cond_expr_expanded, '<string>', 'eval')
if eval(ast_code, {'__builtins__': None}, variables):
merge_dict = true_dict
else:
merge_dict = false_dict
except SyntaxError, e:
syntax_error = SyntaxError('%s while evaluating condition \'%s\' in %s '
'at character %d.' %
(str(e.args[0]), e.text, build_file, e.offset),
e.filename, e.lineno, e.offset, e.text)
raise syntax_error
except NameError, e:
gyp.common.ExceptionAppend(e, 'while evaluating condition \'%s\' in %s' %
(cond_expr_expanded, build_file))
raise GypError(e)
if merge_dict != None:
# Expand variables and nested conditinals in the merge_dict before
# merging it.
ProcessVariablesAndConditionsInDict(merge_dict, phase,
variables, build_file)
MergeDicts(the_dict, merge_dict, build_file, build_file)
def LoadAutomaticVariablesFromDict(variables, the_dict):
# Any keys with plain string values in the_dict become automatic variables.
# The variable name is the key name with a "_" character prepended.
for key, value in the_dict.iteritems():
if isinstance(value, str) or isinstance(value, int) or \
isinstance(value, list):
variables['_' + key] = value
def LoadVariablesFromVariablesDict(variables, the_dict, the_dict_key):
# Any keys in the_dict's "variables" dict, if it has one, becomes a
# variable. The variable name is the key name in the "variables" dict.
# Variables that end with the % character are set only if they are unset in
# the variables dict. the_dict_key is the name of the key that accesses
# the_dict in the_dict's parent dict. If the_dict's parent is not a dict
# (it could be a list or it could be parentless because it is a root dict),
# the_dict_key will be None.
for key, value in the_dict.get('variables', {}).iteritems():
if not isinstance(value, str) and not isinstance(value, int) and \
not isinstance(value, list):
continue
if key.endswith('%'):
variable_name = key[:-1]
if variable_name in variables:
# If the variable is already set, don't set it.
continue
if the_dict_key is 'variables' and variable_name in the_dict:
# If the variable is set without a % in the_dict, and the_dict is a
# variables dict (making |variables| a varaibles sub-dict of a
# variables dict), use the_dict's definition.
value = the_dict[variable_name]
else:
variable_name = key
variables[variable_name] = value
def ProcessVariablesAndConditionsInDict(the_dict, phase, variables_in,
build_file, the_dict_key=None):
"""Handle all variable and command expansion and conditional evaluation.
This function is the public entry point for all variable expansions and
conditional evaluations. The variables_in dictionary will not be modified
by this function.
"""
# Make a copy of the variables_in dict that can be modified during the
# loading of automatics and the loading of the variables dict.
variables = variables_in.copy()
LoadAutomaticVariablesFromDict(variables, the_dict)
if 'variables' in the_dict:
# Make sure all the local variables are added to the variables
# list before we process them so that you can reference one
# variable from another. They will be fully expanded by recursion
# in ExpandVariables.
for key, value in the_dict['variables'].iteritems():
variables[key] = value
# Handle the associated variables dict first, so that any variable
# references within can be resolved prior to using them as variables.
# Pass a copy of the variables dict to avoid having it be tainted.
# Otherwise, it would have extra automatics added for everything that
# should just be an ordinary variable in this scope.
ProcessVariablesAndConditionsInDict(the_dict['variables'], phase,
variables, build_file, 'variables')
LoadVariablesFromVariablesDict(variables, the_dict, the_dict_key)
for key, value in the_dict.iteritems():
# Skip "variables", which was already processed if present.
if key != 'variables' and isinstance(value, str):
expanded = ExpandVariables(value, phase, variables, build_file)
if not isinstance(expanded, str) and not isinstance(expanded, int):
raise ValueError, \
'Variable expansion in this context permits str and int ' + \
'only, found ' + expanded.__class__.__name__ + ' for ' + key
the_dict[key] = expanded
# Variable expansion may have resulted in changes to automatics. Reload.
# TODO(mark): Optimization: only reload if no changes were made.
variables = variables_in.copy()
LoadAutomaticVariablesFromDict(variables, the_dict)
LoadVariablesFromVariablesDict(variables, the_dict, the_dict_key)
# Process conditions in this dict. This is done after variable expansion
# so that conditions may take advantage of expanded variables. For example,
# if the_dict contains:
# {'type': '<(library_type)',
# 'conditions': [['_type=="static_library"', { ... }]]},
# _type, as used in the condition, will only be set to the value of
# library_type if variable expansion is performed before condition
# processing. However, condition processing should occur prior to recursion
# so that variables (both automatic and "variables" dict type) may be
# adjusted by conditions sections, merged into the_dict, and have the
# intended impact on contained dicts.
#
# This arrangement means that a "conditions" section containing a "variables"
# section will only have those variables effective in subdicts, not in
# the_dict. The workaround is to put a "conditions" section within a
# "variables" section. For example:
# {'conditions': [['os=="mac"', {'variables': {'define': 'IS_MAC'}}]],
# 'defines': ['<(define)'],
# 'my_subdict': {'defines': ['<(define)']}},
# will not result in "IS_MAC" being appended to the "defines" list in the
# current scope but would result in it being appended to the "defines" list
# within "my_subdict". By comparison:
# {'variables': {'conditions': [['os=="mac"', {'define': 'IS_MAC'}]]},
# 'defines': ['<(define)'],
# 'my_subdict': {'defines': ['<(define)']}},
# will append "IS_MAC" to both "defines" lists.
# Evaluate conditions sections, allowing variable expansions within them
# as well as nested conditionals. This will process a 'conditions' or
# 'target_conditions' section, perform appropriate merging and recursive
# conditional and variable processing, and then remove the conditions section
# from the_dict if it is present.
ProcessConditionsInDict(the_dict, phase, variables, build_file)
# Conditional processing may have resulted in changes to automatics or the
# variables dict. Reload.
variables = variables_in.copy()
LoadAutomaticVariablesFromDict(variables, the_dict)
LoadVariablesFromVariablesDict(variables, the_dict, the_dict_key)
# Recurse into child dicts, or process child lists which may result in
# further recursion into descendant dicts.
for key, value in the_dict.iteritems():
# Skip "variables" and string values, which were already processed if
# present.
if key == 'variables' or isinstance(value, str):
continue
if isinstance(value, dict):
# Pass a copy of the variables dict so that subdicts can't influence
# parents.
ProcessVariablesAndConditionsInDict(value, phase, variables,
build_file, key)
elif isinstance(value, list):
# The list itself can't influence the variables dict, and
# ProcessVariablesAndConditionsInList will make copies of the variables
# dict if it needs to pass it to something that can influence it. No
# copy is necessary here.
ProcessVariablesAndConditionsInList(value, phase, variables,
build_file)
elif not isinstance(value, int):
raise TypeError, 'Unknown type ' + value.__class__.__name__ + \
' for ' + key
def ProcessVariablesAndConditionsInList(the_list, phase, variables,
build_file):
# Iterate using an index so that new values can be assigned into the_list.
index = 0
while index < len(the_list):
item = the_list[index]
if isinstance(item, dict):
# Make a copy of the variables dict so that it won't influence anything
# outside of its own scope.
ProcessVariablesAndConditionsInDict(item, phase, variables, build_file)
elif isinstance(item, list):
ProcessVariablesAndConditionsInList(item, phase, variables, build_file)
elif isinstance(item, str):
expanded = ExpandVariables(item, phase, variables, build_file)
if isinstance(expanded, str) or isinstance(expanded, int):
the_list[index] = expanded
elif isinstance(expanded, list):
the_list[index:index+1] = expanded
index += len(expanded)
# index now identifies the next item to examine. Continue right now
# without falling into the index increment below.
continue
else:
raise ValueError, \
'Variable expansion in this context permits strings and ' + \
'lists only, found ' + expanded.__class__.__name__ + ' at ' + \
index
elif not isinstance(item, int):
raise TypeError, 'Unknown type ' + item.__class__.__name__ + \
' at index ' + index
index = index + 1
def BuildTargetsDict(data):
"""Builds a dict mapping fully-qualified target names to their target dicts.
|data| is a dict mapping loaded build files by pathname relative to the
current directory. Values in |data| are build file contents. For each
|data| value with a "targets" key, the value of the "targets" key is taken
as a list containing target dicts. Each target's fully-qualified name is
constructed from the pathname of the build file (|data| key) and its
"target_name" property. These fully-qualified names are used as the keys
in the returned dict. These keys provide access to the target dicts,
the dicts in the "targets" lists.
"""
targets = {}
for build_file in data['target_build_files']:
for target in data[build_file].get('targets', []):
target_name = gyp.common.QualifiedTarget(build_file,
target['target_name'],
target['toolset'])
if target_name in targets:
raise GypError('Duplicate target definitions for ' + target_name)
targets[target_name] = target
return targets
def QualifyDependencies(targets):
"""Make dependency links fully-qualified relative to the current directory.
|targets| is a dict mapping fully-qualified target names to their target
dicts. For each target in this dict, keys known to contain dependency
links are examined, and any dependencies referenced will be rewritten
so that they are fully-qualified and relative to the current directory.
All rewritten dependencies are suitable for use as keys to |targets| or a
similar dict.
"""
all_dependency_sections = [dep + op
for dep in dependency_sections
for op in ('', '!', '/')]
for target, target_dict in targets.iteritems():
target_build_file = gyp.common.BuildFile(target)
toolset = target_dict['toolset']
for dependency_key in all_dependency_sections:
dependencies = target_dict.get(dependency_key, [])
for index in xrange(0, len(dependencies)):
dep_file, dep_target, dep_toolset = gyp.common.ResolveTarget(
target_build_file, dependencies[index], toolset)
if not multiple_toolsets:
# Ignore toolset specification in the dependency if it is specified.
dep_toolset = toolset
dependency = gyp.common.QualifiedTarget(dep_file,
dep_target,
dep_toolset)
dependencies[index] = dependency
# Make sure anything appearing in a list other than "dependencies" also
# appears in the "dependencies" list.
if dependency_key != 'dependencies' and \
dependency not in target_dict['dependencies']:
raise GypError('Found ' + dependency + ' in ' + dependency_key +
' of ' + target + ', but not in dependencies')
def ExpandWildcardDependencies(targets, data):
"""Expands dependencies specified as build_file:*.
For each target in |targets|, examines sections containing links to other
targets. If any such section contains a link of the form build_file:*, it
is taken as a wildcard link, and is expanded to list each target in
build_file. The |data| dict provides access to build file dicts.
Any target that does not wish to be included by wildcard can provide an
optional "suppress_wildcard" key in its target dict. When present and
true, a wildcard dependency link will not include such targets.
All dependency names, including the keys to |targets| and the values in each
dependency list, must be qualified when this function is called.
"""
for target, target_dict in targets.iteritems():
toolset = target_dict['toolset']
target_build_file = gyp.common.BuildFile(target)
for dependency_key in dependency_sections:
dependencies = target_dict.get(dependency_key, [])
# Loop this way instead of "for dependency in" or "for index in xrange"
# because the dependencies list will be modified within the loop body.
index = 0
while index < len(dependencies):
(dependency_build_file, dependency_target, dependency_toolset) = \
gyp.common.ParseQualifiedTarget(dependencies[index])
if dependency_target != '*' and dependency_toolset != '*':
# Not a wildcard. Keep it moving.
index = index + 1
continue
if dependency_build_file == target_build_file:
# It's an error for a target to depend on all other targets in
# the same file, because a target cannot depend on itself.
raise GypError('Found wildcard in ' + dependency_key + ' of ' +
target + ' referring to same build file')
# Take the wildcard out and adjust the index so that the next
# dependency in the list will be processed the next time through the
# loop.
del dependencies[index]
index = index - 1
# Loop through the targets in the other build file, adding them to
# this target's list of dependencies in place of the removed
# wildcard.
dependency_target_dicts = data[dependency_build_file]['targets']
for dependency_target_dict in dependency_target_dicts:
if int(dependency_target_dict.get('suppress_wildcard', False)):
continue
dependency_target_name = dependency_target_dict['target_name']
if (dependency_target != '*' and
dependency_target != dependency_target_name):
continue
dependency_target_toolset = dependency_target_dict['toolset']
if (dependency_toolset != '*' and
dependency_toolset != dependency_target_toolset):
continue
dependency = gyp.common.QualifiedTarget(dependency_build_file,
dependency_target_name,
dependency_target_toolset)
index = index + 1
dependencies.insert(index, dependency)
index = index + 1
def Unify(l):
"""Removes duplicate elements from l, keeping the first element."""
seen = {}
return [seen.setdefault(e, e) for e in l if e not in seen]
def RemoveDuplicateDependencies(targets):
"""Makes sure every dependency appears only once in all targets's dependency
lists."""
for target_name, target_dict in targets.iteritems():
for dependency_key in dependency_sections:
dependencies = target_dict.get(dependency_key, [])
if dependencies:
target_dict[dependency_key] = Unify(dependencies)
def Filter(l, item):
"""Removes item from l."""
res = {}
return [res.setdefault(e, e) for e in l if e != item]
def RemoveSelfDependencies(targets):
"""Remove self dependencies from targets that have the prune_self_dependency
variable set."""
for target_name, target_dict in targets.iteritems():
for dependency_key in dependency_sections:
dependencies = target_dict.get(dependency_key, [])
if dependencies:
for t in dependencies:
if t == target_name:
if targets[t].get('variables', {}).get('prune_self_dependency', 0):
target_dict[dependency_key] = Filter(dependencies, target_name)
class DependencyGraphNode(object):
"""
Attributes:
ref: A reference to an object that this DependencyGraphNode represents.
dependencies: List of DependencyGraphNodes on which this one depends.
dependents: List of DependencyGraphNodes that depend on this one.
"""
class CircularException(GypError):
pass
def __init__(self, ref):
self.ref = ref
self.dependencies = []
self.dependents = []
def __repr__(self):
return '<DependencyGraphNode: %r>' % self.ref
def FlattenToList(self):
# flat_list is the sorted list of dependencies - actually, the list items
# are the "ref" attributes of DependencyGraphNodes. Every target will
# appear in flat_list after all of its dependencies, and before all of its
# dependents.
flat_list = []
# in_degree_zeros is the list of DependencyGraphNodes that have no
# dependencies not in flat_list. Initially, it is a copy of the children
# of this node, because when the graph was built, nodes with no
# dependencies were made implicit dependents of the root node.
in_degree_zeros = set(self.dependents[:])
while in_degree_zeros:
# Nodes in in_degree_zeros have no dependencies not in flat_list, so they
# can be appended to flat_list. Take these nodes out of in_degree_zeros
# as work progresses, so that the next node to process from the list can
# always be accessed at a consistent position.
node = in_degree_zeros.pop()
flat_list.append(node.ref)
# Look at dependents of the node just added to flat_list. Some of them
# may now belong in in_degree_zeros.
for node_dependent in node.dependents:
is_in_degree_zero = True
for node_dependent_dependency in node_dependent.dependencies:
if not node_dependent_dependency.ref in flat_list:
# The dependent one or more dependencies not in flat_list. There
# will be more chances to add it to flat_list when examining
# it again as a dependent of those other dependencies, provided
# that there are no cycles.
is_in_degree_zero = False
break
if is_in_degree_zero:
# All of the dependent's dependencies are already in flat_list. Add
# it to in_degree_zeros where it will be processed in a future
# iteration of the outer loop.
in_degree_zeros.add(node_dependent)
return flat_list
def FindCycles(self, path=None):
"""
Returns a list of cycles in the graph, where each cycle is its own list.
"""
if path is None:
path = [self]
results = []
for node in self.dependents:
if node in path:
cycle = [node]
for part in path:
cycle.append(part)
if part == node:
break
results.append(tuple(cycle))
else:
results.extend(node.FindCycles([node] + path))
return list(set(results))
def DirectDependencies(self, dependencies=None):
"""Returns a list of just direct dependencies."""
if dependencies == None:
dependencies = []
for dependency in self.dependencies:
# Check for None, corresponding to the root node.
if dependency.ref != None and dependency.ref not in dependencies:
dependencies.append(dependency.ref)
return dependencies
def _AddImportedDependencies(self, targets, dependencies=None):
"""Given a list of direct dependencies, adds indirect dependencies that
other dependencies have declared to export their settings.
This method does not operate on self. Rather, it operates on the list
of dependencies in the |dependencies| argument. For each dependency in
that list, if any declares that it exports the settings of one of its
own dependencies, those dependencies whose settings are "passed through"
are added to the list. As new items are added to the list, they too will
be processed, so it is possible to import settings through multiple levels
of dependencies.
This method is not terribly useful on its own, it depends on being
"primed" with a list of direct dependencies such as one provided by
DirectDependencies. DirectAndImportedDependencies is intended to be the
public entry point.
"""
if dependencies == None:
dependencies = []
index = 0
while index < len(dependencies):
dependency = dependencies[index]
dependency_dict = targets[dependency]
# Add any dependencies whose settings should be imported to the list
# if not already present. Newly-added items will be checked for
# their own imports when the list iteration reaches them.
# Rather than simply appending new items, insert them after the
# dependency that exported them. This is done to more closely match
# the depth-first method used by DeepDependencies.
add_index = 1
for imported_dependency in \
dependency_dict.get('export_dependent_settings', []):
if imported_dependency not in dependencies:
dependencies.insert(index + add_index, imported_dependency)
add_index = add_index + 1
index = index + 1
return dependencies
def DirectAndImportedDependencies(self, targets, dependencies=None):
"""Returns a list of a target's direct dependencies and all indirect
dependencies that a dependency has advertised settings should be exported
through the dependency for.
"""
dependencies = self.DirectDependencies(dependencies)
return self._AddImportedDependencies(targets, dependencies)
def DeepDependencies(self, dependencies=None):
"""Returns a list of all of a target's dependencies, recursively."""
if dependencies == None:
dependencies = []
for dependency in self.dependencies:
# Check for None, corresponding to the root node.
if dependency.ref != None and dependency.ref not in dependencies:
dependencies.append(dependency.ref)
dependency.DeepDependencies(dependencies)
return dependencies
def _LinkDependenciesInternal(self, targets, include_shared_libraries,
dependencies=None, initial=True):
"""Returns a list of dependency targets that are linked into this target.
This function has a split personality, depending on the setting of
|initial|. Outside callers should always leave |initial| at its default
setting.
When adding a target to the list of dependencies, this function will
recurse into itself with |initial| set to False, to collect dependencies
that are linked into the linkable target for which the list is being built.
If |include_shared_libraries| is False, the resulting dependencies will not
include shared_library targets that are linked into this target.
"""
if dependencies == None:
dependencies = []
# Check for None, corresponding to the root node.
if self.ref == None:
return dependencies
# It's kind of sucky that |targets| has to be passed into this function,
# but that's presently the easiest way to access the target dicts so that
# this function can find target types.
if 'target_name' not in targets[self.ref]:
raise GypError("Missing 'target_name' field in target.")
if 'type' not in targets[self.ref]:
raise GypError("Missing 'type' field in target %s" %
targets[self.ref]['target_name'])
target_type = targets[self.ref]['type']
is_linkable = target_type in linkable_types
if initial and not is_linkable:
# If this is the first target being examined and it's not linkable,
# return an empty list of link dependencies, because the link
# dependencies are intended to apply to the target itself (initial is
# True) and this target won't be linked.
return dependencies
# Don't traverse 'none' targets if explicitly excluded.
if (target_type == 'none' and
not targets[self.ref].get('dependencies_traverse', True)):
if self.ref not in dependencies:
dependencies.append(self.ref)
return dependencies
# Executables and loadable modules are already fully and finally linked.
# Nothing else can be a link dependency of them, there can only be
# dependencies in the sense that a dependent target might run an
# executable or load the loadable_module.
if not initial and target_type in ('executable', 'loadable_module'):
return dependencies
# Shared libraries are already fully linked. They should only be included
# in |dependencies| when adjusting static library dependencies (in order to
# link against the shared_library's import lib), but should not be included
# in |dependencies| when propagating link_settings.
# The |include_shared_libraries| flag controls which of these two cases we
# are handling.
if (not initial and target_type == 'shared_library' and
not include_shared_libraries):
return dependencies
# The target is linkable, add it to the list of link dependencies.
if self.ref not in dependencies:
dependencies.append(self.ref)
if initial or not is_linkable:
# If this is a subsequent target and it's linkable, don't look any
# further for linkable dependencies, as they'll already be linked into
# this target linkable. Always look at dependencies of the initial
# target, and always look at dependencies of non-linkables.
for dependency in self.dependencies:
dependency._LinkDependenciesInternal(targets,
include_shared_libraries,
dependencies, False)
return dependencies
def DependenciesForLinkSettings(self, targets):
"""
Returns a list of dependency targets whose link_settings should be merged
into this target.
"""
# TODO(sbaig) Currently, chrome depends on the bug that shared libraries'
# link_settings are propagated. So for now, we will allow it, unless the
# 'allow_sharedlib_linksettings_propagation' flag is explicitly set to
# False. Once chrome is fixed, we can remove this flag.
include_shared_libraries = \
targets[self.ref].get('allow_sharedlib_linksettings_propagation', True)
return self._LinkDependenciesInternal(targets, include_shared_libraries)
def DependenciesToLinkAgainst(self, targets):
"""
Returns a list of dependency targets that are linked into this target.
"""
return self._LinkDependenciesInternal(targets, True)
def BuildDependencyList(targets):
# Create a DependencyGraphNode for each target. Put it into a dict for easy
# access.
dependency_nodes = {}
for target, spec in targets.iteritems():
if target not in dependency_nodes:
dependency_nodes[target] = DependencyGraphNode(target)
# Set up the dependency links. Targets that have no dependencies are treated
# as dependent on root_node.
root_node = DependencyGraphNode(None)
for target, spec in targets.iteritems():
target_node = dependency_nodes[target]
target_build_file = gyp.common.BuildFile(target)
dependencies = spec.get('dependencies')
if not dependencies:
target_node.dependencies = [root_node]
root_node.dependents.append(target_node)
else:
for dependency in dependencies:
dependency_node = dependency_nodes.get(dependency)
if not dependency_node:
raise GypError("Dependency '%s' not found while "
"trying to load target %s" % (dependency, target))
target_node.dependencies.append(dependency_node)
dependency_node.dependents.append(target_node)
flat_list = root_node.FlattenToList()
# If there's anything left unvisited, there must be a circular dependency
# (cycle). If you need to figure out what's wrong, look for elements of
# targets that are not in flat_list.
if len(flat_list) != len(targets):
raise DependencyGraphNode.CircularException(
'Some targets not reachable, cycle in dependency graph detected: ' +
' '.join(set(flat_list) ^ set(targets)))
return [dependency_nodes, flat_list]
def VerifyNoGYPFileCircularDependencies(targets):
# Create a DependencyGraphNode for each gyp file containing a target. Put
# it into a dict for easy access.
dependency_nodes = {}
for target in targets.iterkeys():
build_file = gyp.common.BuildFile(target)
if not build_file in dependency_nodes:
dependency_nodes[build_file] = DependencyGraphNode(build_file)
# Set up the dependency links.
for target, spec in targets.iteritems():
build_file = gyp.common.BuildFile(target)
build_file_node = dependency_nodes[build_file]
target_dependencies = spec.get('dependencies', [])
for dependency in target_dependencies:
try:
dependency_build_file = gyp.common.BuildFile(dependency)
except GypError, e:
gyp.common.ExceptionAppend(
e, 'while computing dependencies of .gyp file %s' % build_file)
raise
if dependency_build_file == build_file:
# A .gyp file is allowed to refer back to itself.
continue
dependency_node = dependency_nodes.get(dependency_build_file)
if not dependency_node:
raise GypError("Dependancy '%s' not found" % dependency_build_file)
if dependency_node not in build_file_node.dependencies:
build_file_node.dependencies.append(dependency_node)
dependency_node.dependents.append(build_file_node)
# Files that have no dependencies are treated as dependent on root_node.
root_node = DependencyGraphNode(None)
for build_file_node in dependency_nodes.itervalues():
if len(build_file_node.dependencies) == 0:
build_file_node.dependencies.append(root_node)
root_node.dependents.append(build_file_node)
flat_list = root_node.FlattenToList()
# If there's anything left unvisited, there must be a circular dependency
# (cycle).
if len(flat_list) != len(dependency_nodes):
bad_files = []
for file in dependency_nodes.iterkeys():
if not file in flat_list:
bad_files.append(file)
common_path_prefix = os.path.commonprefix(dependency_nodes)
cycles = []
for cycle in root_node.FindCycles():
simplified_paths = []
for node in cycle:
assert(node.ref.startswith(common_path_prefix))
simplified_paths.append(node.ref[len(common_path_prefix):])
cycles.append('Cycle: %s' % ' -> '.join(simplified_paths))
raise DependencyGraphNode.CircularException, \
'Cycles in .gyp file dependency graph detected:\n' + '\n'.join(cycles)
def DoDependentSettings(key, flat_list, targets, dependency_nodes):
# key should be one of all_dependent_settings, direct_dependent_settings,
# or link_settings.
for target in flat_list:
target_dict = targets[target]
build_file = gyp.common.BuildFile(target)
if key == 'all_dependent_settings':
dependencies = dependency_nodes[target].DeepDependencies()
elif key == 'direct_dependent_settings':
dependencies = \
dependency_nodes[target].DirectAndImportedDependencies(targets)
elif key == 'link_settings':
dependencies = \
dependency_nodes[target].DependenciesForLinkSettings(targets)
else:
raise GypError("DoDependentSettings doesn't know how to determine "
'dependencies for ' + key)
for dependency in dependencies:
dependency_dict = targets[dependency]
if not key in dependency_dict:
continue
dependency_build_file = gyp.common.BuildFile(dependency)
MergeDicts(target_dict, dependency_dict[key],
build_file, dependency_build_file)
def AdjustStaticLibraryDependencies(flat_list, targets, dependency_nodes,
sort_dependencies):
# Recompute target "dependencies" properties. For each static library
# target, remove "dependencies" entries referring to other static libraries,
# unless the dependency has the "hard_dependency" attribute set. For each
# linkable target, add a "dependencies" entry referring to all of the
# target's computed list of link dependencies (including static libraries
# if no such entry is already present.
for target in flat_list:
target_dict = targets[target]
target_type = target_dict['type']
if target_type == 'static_library':
if not 'dependencies' in target_dict:
continue
target_dict['dependencies_original'] = target_dict.get(
'dependencies', [])[:]
# A static library should not depend on another static library unless
# the dependency relationship is "hard," which should only be done when
# a dependent relies on some side effect other than just the build
# product, like a rule or action output. Further, if a target has a
# non-hard dependency, but that dependency exports a hard dependency,
# the non-hard dependency can safely be removed, but the exported hard
# dependency must be added to the target to keep the same dependency
# ordering.
dependencies = \
dependency_nodes[target].DirectAndImportedDependencies(targets)
index = 0
while index < len(dependencies):
dependency = dependencies[index]
dependency_dict = targets[dependency]
# Remove every non-hard static library dependency and remove every
# non-static library dependency that isn't a direct dependency.
if (dependency_dict['type'] == 'static_library' and \
not dependency_dict.get('hard_dependency', False)) or \
(dependency_dict['type'] != 'static_library' and \
not dependency in target_dict['dependencies']):
# Take the dependency out of the list, and don't increment index
# because the next dependency to analyze will shift into the index
# formerly occupied by the one being removed.
del dependencies[index]
else:
index = index + 1
# Update the dependencies. If the dependencies list is empty, it's not
# needed, so unhook it.
if len(dependencies) > 0:
target_dict['dependencies'] = dependencies
else:
del target_dict['dependencies']
elif target_type in linkable_types:
# Get a list of dependency targets that should be linked into this
# target. Add them to the dependencies list if they're not already
# present.
link_dependencies = \
dependency_nodes[target].DependenciesToLinkAgainst(targets)
for dependency in link_dependencies:
if dependency == target:
continue
if not 'dependencies' in target_dict:
target_dict['dependencies'] = []
if not dependency in target_dict['dependencies']:
target_dict['dependencies'].append(dependency)
# Sort the dependencies list in the order from dependents to dependencies.
# e.g. If A and B depend on C and C depends on D, sort them in A, B, C, D.
# Note: flat_list is already sorted in the order from dependencies to
# dependents.
if sort_dependencies and 'dependencies' in target_dict:
target_dict['dependencies'] = [dep for dep in reversed(flat_list)
if dep in target_dict['dependencies']]
# Initialize this here to speed up MakePathRelative.
exception_re = re.compile(r'''["']?[-/$<>^]''')
def MakePathRelative(to_file, fro_file, item):
# If item is a relative path, it's relative to the build file dict that it's
# coming from. Fix it up to make it relative to the build file dict that
# it's going into.
# Exception: any |item| that begins with these special characters is
# returned without modification.
# / Used when a path is already absolute (shortcut optimization;
# such paths would be returned as absolute anyway)
# $ Used for build environment variables
# - Used for some build environment flags (such as -lapr-1 in a
# "libraries" section)
# < Used for our own variable and command expansions (see ExpandVariables)
# > Used for our own variable and command expansions (see ExpandVariables)
# ^ Used for our own variable and command expansions (see ExpandVariables)
#
# "/' Used when a value is quoted. If these are present, then we
# check the second character instead.
#
if to_file == fro_file or exception_re.match(item):
return item
else:
# TODO(dglazkov) The backslash/forward-slash replacement at the end is a
# temporary measure. This should really be addressed by keeping all paths
# in POSIX until actual project generation.
ret = os.path.normpath(os.path.join(
gyp.common.RelativePath(os.path.dirname(fro_file),
os.path.dirname(to_file)),
item)).replace('\\', '/')
if item[-1] == '/':
ret += '/'
return ret
def MergeLists(to, fro, to_file, fro_file, is_paths=False, append=True):
# Python documentation recommends objects which do not support hash
# set this value to None. Python library objects follow this rule.
is_hashable = lambda val: val.__hash__
# If x is hashable, returns whether x is in s. Else returns whether x is in l.
def is_in_set_or_list(x, s, l):
if is_hashable(x):
return x in s
return x in l
prepend_index = 0
# Make membership testing of hashables in |to| (in particular, strings)
# faster.
hashable_to_set = set(x for x in to if is_hashable(x))
for item in fro:
singleton = False
if isinstance(item, str) or isinstance(item, int):
# The cheap and easy case.
if is_paths:
to_item = MakePathRelative(to_file, fro_file, item)
else:
to_item = item
if not isinstance(item, str) or not item.startswith('-'):
# Any string that doesn't begin with a "-" is a singleton - it can
# only appear once in a list, to be enforced by the list merge append
# or prepend.
singleton = True
elif isinstance(item, dict):
# Make a copy of the dictionary, continuing to look for paths to fix.
# The other intelligent aspects of merge processing won't apply because
# item is being merged into an empty dict.
to_item = {}
MergeDicts(to_item, item, to_file, fro_file)
elif isinstance(item, list):
# Recurse, making a copy of the list. If the list contains any
# descendant dicts, path fixing will occur. Note that here, custom
# values for is_paths and append are dropped; those are only to be
# applied to |to| and |fro|, not sublists of |fro|. append shouldn't
# matter anyway because the new |to_item| list is empty.
to_item = []
MergeLists(to_item, item, to_file, fro_file)
else:
raise TypeError, \
'Attempt to merge list item of unsupported type ' + \
item.__class__.__name__
if append:
# If appending a singleton that's already in the list, don't append.
# This ensures that the earliest occurrence of the item will stay put.
if not singleton or not is_in_set_or_list(to_item, hashable_to_set, to):
to.append(to_item)
if is_hashable(to_item):
hashable_to_set.add(to_item)
else:
# If prepending a singleton that's already in the list, remove the
# existing instance and proceed with the prepend. This ensures that the
# item appears at the earliest possible position in the list.
while singleton and to_item in to:
to.remove(to_item)
# Don't just insert everything at index 0. That would prepend the new
# items to the list in reverse order, which would be an unwelcome
# surprise.
to.insert(prepend_index, to_item)
if is_hashable(to_item):
hashable_to_set.add(to_item)
prepend_index = prepend_index + 1
def MergeDicts(to, fro, to_file, fro_file):
# I wanted to name the parameter "from" but it's a Python keyword...
for k, v in fro.iteritems():
# It would be nice to do "if not k in to: to[k] = v" but that wouldn't give
# copy semantics. Something else may want to merge from the |fro| dict
# later, and having the same dict ref pointed to twice in the tree isn't
# what anyone wants considering that the dicts may subsequently be
# modified.
if k in to:
bad_merge = False
if isinstance(v, str) or isinstance(v, int):
if not (isinstance(to[k], str) or isinstance(to[k], int)):
bad_merge = True
elif v.__class__ != to[k].__class__:
bad_merge = True
if bad_merge:
raise TypeError, \
'Attempt to merge dict value of type ' + v.__class__.__name__ + \
' into incompatible type ' + to[k].__class__.__name__ + \
' for key ' + k
if isinstance(v, str) or isinstance(v, int):
# Overwrite the existing value, if any. Cheap and easy.
is_path = IsPathSection(k)
if is_path:
to[k] = MakePathRelative(to_file, fro_file, v)
else:
to[k] = v
elif isinstance(v, dict):
# Recurse, guaranteeing copies will be made of objects that require it.
if not k in to:
to[k] = {}
MergeDicts(to[k], v, to_file, fro_file)
elif isinstance(v, list):
# Lists in dicts can be merged with different policies, depending on
# how the key in the "from" dict (k, the from-key) is written.
#
# If the from-key has ...the to-list will have this action
# this character appended:... applied when receiving the from-list:
# = replace
# + prepend
# ? set, only if to-list does not yet exist
# (none) append
#
# This logic is list-specific, but since it relies on the associated
# dict key, it's checked in this dict-oriented function.
ext = k[-1]
append = True
if ext == '=':
list_base = k[:-1]
lists_incompatible = [list_base, list_base + '?']
to[list_base] = []
elif ext == '+':
list_base = k[:-1]
lists_incompatible = [list_base + '=', list_base + '?']
append = False
elif ext == '?':
list_base = k[:-1]
lists_incompatible = [list_base, list_base + '=', list_base + '+']
else:
list_base = k
lists_incompatible = [list_base + '=', list_base + '?']
# Some combinations of merge policies appearing together are meaningless.
# It's stupid to replace and append simultaneously, for example. Append
# and prepend are the only policies that can coexist.
for list_incompatible in lists_incompatible:
if list_incompatible in fro:
raise GypError('Incompatible list policies ' + k + ' and ' +
list_incompatible)
if list_base in to:
if ext == '?':
# If the key ends in "?", the list will only be merged if it doesn't
# already exist.
continue
if not isinstance(to[list_base], list):
# This may not have been checked above if merging in a list with an
# extension character.
raise TypeError, \
'Attempt to merge dict value of type ' + v.__class__.__name__ + \
' into incompatible type ' + to[list_base].__class__.__name__ + \
' for key ' + list_base + '(' + k + ')'
else:
to[list_base] = []
# Call MergeLists, which will make copies of objects that require it.
# MergeLists can recurse back into MergeDicts, although this will be
# to make copies of dicts (with paths fixed), there will be no
# subsequent dict "merging" once entering a list because lists are
# always replaced, appended to, or prepended to.
is_paths = IsPathSection(list_base)
MergeLists(to[list_base], v, to_file, fro_file, is_paths, append)
else:
raise TypeError, \
'Attempt to merge dict value of unsupported type ' + \
v.__class__.__name__ + ' for key ' + k
def MergeConfigWithInheritance(new_configuration_dict, build_file,
target_dict, configuration, visited):
# Skip if previously visted.
if configuration in visited:
return
# Look at this configuration.
configuration_dict = target_dict['configurations'][configuration]
# Merge in parents.
for parent in configuration_dict.get('inherit_from', []):
MergeConfigWithInheritance(new_configuration_dict, build_file,
target_dict, parent, visited + [configuration])
# Merge it into the new config.
MergeDicts(new_configuration_dict, configuration_dict,
build_file, build_file)
# Drop abstract.
if 'abstract' in new_configuration_dict:
del new_configuration_dict['abstract']
def SetUpConfigurations(target, target_dict):
# key_suffixes is a list of key suffixes that might appear on key names.
# These suffixes are handled in conditional evaluations (for =, +, and ?)
# and rules/exclude processing (for ! and /). Keys with these suffixes
# should be treated the same as keys without.
key_suffixes = ['=', '+', '?', '!', '/']
build_file = gyp.common.BuildFile(target)
# Provide a single configuration by default if none exists.
# TODO(mark): Signal an error if default_configurations exists but
# configurations does not.
if not 'configurations' in target_dict:
target_dict['configurations'] = {'Default': {}}
if not 'default_configuration' in target_dict:
concrete = [i for i in target_dict['configurations'].iterkeys()
if not target_dict['configurations'][i].get('abstract')]
target_dict['default_configuration'] = sorted(concrete)[0]
for configuration in target_dict['configurations'].keys():
old_configuration_dict = target_dict['configurations'][configuration]
# Skip abstract configurations (saves work only).
if old_configuration_dict.get('abstract'):
continue
# Configurations inherit (most) settings from the enclosing target scope.
# Get the inheritance relationship right by making a copy of the target
# dict.
new_configuration_dict = copy.deepcopy(target_dict)
# Take out the bits that don't belong in a "configurations" section.
# Since configuration setup is done before conditional, exclude, and rules
# processing, be careful with handling of the suffix characters used in
# those phases.
delete_keys = []
for key in new_configuration_dict:
key_ext = key[-1:]
if key_ext in key_suffixes:
key_base = key[:-1]
else:
key_base = key
if key_base in non_configuration_keys:
delete_keys.append(key)
for key in delete_keys:
del new_configuration_dict[key]
# Merge in configuration (with all its parents first).
MergeConfigWithInheritance(new_configuration_dict, build_file,
target_dict, configuration, [])
# Put the new result back into the target dict as a configuration.
target_dict['configurations'][configuration] = new_configuration_dict
# Now drop all the abstract ones.
for configuration in target_dict['configurations'].keys():
old_configuration_dict = target_dict['configurations'][configuration]
if old_configuration_dict.get('abstract'):
del target_dict['configurations'][configuration]
# Now that all of the target's configurations have been built, go through
# the target dict's keys and remove everything that's been moved into a
# "configurations" section.
delete_keys = []
for key in target_dict:
key_ext = key[-1:]
if key_ext in key_suffixes:
key_base = key[:-1]
else:
key_base = key
if not key_base in non_configuration_keys:
delete_keys.append(key)
for key in delete_keys:
del target_dict[key]
# Check the configurations to see if they contain invalid keys.
for configuration in target_dict['configurations'].keys():
configuration_dict = target_dict['configurations'][configuration]
for key in configuration_dict.keys():
if key in invalid_configuration_keys:
raise GypError('%s not allowed in the %s configuration, found in '
'target %s' % (key, configuration, target))
def ProcessListFiltersInDict(name, the_dict):
"""Process regular expression and exclusion-based filters on lists.
An exclusion list is in a dict key named with a trailing "!", like
"sources!". Every item in such a list is removed from the associated
main list, which in this example, would be "sources". Removed items are
placed into a "sources_excluded" list in the dict.
Regular expression (regex) filters are contained in dict keys named with a
trailing "/", such as "sources/" to operate on the "sources" list. Regex
filters in a dict take the form:
'sources/': [ ['exclude', '_(linux|mac|win)\\.cc$'],
['include', '_mac\\.cc$'] ],
The first filter says to exclude all files ending in _linux.cc, _mac.cc, and
_win.cc. The second filter then includes all files ending in _mac.cc that
are now or were once in the "sources" list. Items matching an "exclude"
filter are subject to the same processing as would occur if they were listed
by name in an exclusion list (ending in "!"). Items matching an "include"
filter are brought back into the main list if previously excluded by an
exclusion list or exclusion regex filter. Subsequent matching "exclude"
patterns can still cause items to be excluded after matching an "include".
"""
# Look through the dictionary for any lists whose keys end in "!" or "/".
# These are lists that will be treated as exclude lists and regular
# expression-based exclude/include lists. Collect the lists that are
# needed first, looking for the lists that they operate on, and assemble
# then into |lists|. This is done in a separate loop up front, because
# the _included and _excluded keys need to be added to the_dict, and that
# can't be done while iterating through it.
lists = []
del_lists = []
for key, value in the_dict.iteritems():
operation = key[-1]
if operation != '!' and operation != '/':
continue
if not isinstance(value, list):
raise ValueError, name + ' key ' + key + ' must be list, not ' + \
value.__class__.__name__
list_key = key[:-1]
if list_key not in the_dict:
# This happens when there's a list like "sources!" but no corresponding
# "sources" list. Since there's nothing for it to operate on, queue up
# the "sources!" list for deletion now.
del_lists.append(key)
continue
if not isinstance(the_dict[list_key], list):
value = the_dict[list_key]
raise ValueError, name + ' key ' + list_key + \
' must be list, not ' + \
value.__class__.__name__ + ' when applying ' + \
{'!': 'exclusion', '/': 'regex'}[operation]
if not list_key in lists:
lists.append(list_key)
# Delete the lists that are known to be unneeded at this point.
for del_list in del_lists:
del the_dict[del_list]
for list_key in lists:
the_list = the_dict[list_key]
# Initialize the list_actions list, which is parallel to the_list. Each
# item in list_actions identifies whether the corresponding item in
# the_list should be excluded, unconditionally preserved (included), or
# whether no exclusion or inclusion has been applied. Items for which
# no exclusion or inclusion has been applied (yet) have value -1, items
# excluded have value 0, and items included have value 1. Includes and
# excludes override previous actions. All items in list_actions are
# initialized to -1 because no excludes or includes have been processed
# yet.
list_actions = list((-1,) * len(the_list))
exclude_key = list_key + '!'
if exclude_key in the_dict:
for exclude_item in the_dict[exclude_key]:
for index in xrange(0, len(the_list)):
if exclude_item == the_list[index]:
# This item matches the exclude_item, so set its action to 0
# (exclude).
list_actions[index] = 0
# The "whatever!" list is no longer needed, dump it.
del the_dict[exclude_key]
regex_key = list_key + '/'
if regex_key in the_dict:
for regex_item in the_dict[regex_key]:
[action, pattern] = regex_item
pattern_re = re.compile(pattern)
if action == 'exclude':
# This item matches an exclude regex, so set its value to 0 (exclude).
action_value = 0
elif action == 'include':
# This item matches an include regex, so set its value to 1 (include).
action_value = 1
else:
# This is an action that doesn't make any sense.
raise ValueError, 'Unrecognized action ' + action + ' in ' + name + \
' key ' + regex_key
for index in xrange(0, len(the_list)):
list_item = the_list[index]
if list_actions[index] == action_value:
# Even if the regex matches, nothing will change so continue (regex
# searches are expensive).
continue
if pattern_re.search(list_item):
# Regular expression match.
list_actions[index] = action_value
# The "whatever/" list is no longer needed, dump it.
del the_dict[regex_key]
# Add excluded items to the excluded list.
#
# Note that exclude_key ("sources!") is different from excluded_key
# ("sources_excluded"). The exclude_key list is input and it was already
# processed and deleted; the excluded_key list is output and it's about
# to be created.
excluded_key = list_key + '_excluded'
if excluded_key in the_dict:
raise GypError(name + ' key ' + excluded_key +
' must not be present prior '
' to applying exclusion/regex filters for ' + list_key)
excluded_list = []
# Go backwards through the list_actions list so that as items are deleted,
# the indices of items that haven't been seen yet don't shift. That means
# that things need to be prepended to excluded_list to maintain them in the
# same order that they existed in the_list.
for index in xrange(len(list_actions) - 1, -1, -1):
if list_actions[index] == 0:
# Dump anything with action 0 (exclude). Keep anything with action 1
# (include) or -1 (no include or exclude seen for the item).
excluded_list.insert(0, the_list[index])
del the_list[index]
# If anything was excluded, put the excluded list into the_dict at
# excluded_key.
if len(excluded_list) > 0:
the_dict[excluded_key] = excluded_list
# Now recurse into subdicts and lists that may contain dicts.
for key, value in the_dict.iteritems():
if isinstance(value, dict):
ProcessListFiltersInDict(key, value)
elif isinstance(value, list):
ProcessListFiltersInList(key, value)
def ProcessListFiltersInList(name, the_list):
for item in the_list:
if isinstance(item, dict):
ProcessListFiltersInDict(name, item)
elif isinstance(item, list):
ProcessListFiltersInList(name, item)
def ValidateTargetType(target, target_dict):
"""Ensures the 'type' field on the target is one of the known types.
Arguments:
target: string, name of target.
target_dict: dict, target spec.
Raises an exception on error.
"""
VALID_TARGET_TYPES = ('executable', 'loadable_module',
'static_library', 'shared_library',
'none')
target_type = target_dict.get('type', None)
if target_type not in VALID_TARGET_TYPES:
raise GypError("Target %s has an invalid target type '%s'. "
"Must be one of %s." %
(target, target_type, '/'.join(VALID_TARGET_TYPES)))
if (target_dict.get('standalone_static_library', 0) and
not target_type == 'static_library'):
raise GypError('Target %s has type %s but standalone_static_library flag is'
' only valid for static_library type.' % (target,
target_type))
def ValidateSourcesInTarget(target, target_dict, build_file):
# TODO: Check if MSVC allows this for loadable_module targets.
if target_dict.get('type', None) not in ('static_library', 'shared_library'):
return
sources = target_dict.get('sources', [])
basenames = {}
for source in sources:
name, ext = os.path.splitext(source)
is_compiled_file = ext in [
'.c', '.cc', '.cpp', '.cxx', '.m', '.mm', '.s', '.S']
if not is_compiled_file:
continue
basename = os.path.basename(name) # Don't include extension.
basenames.setdefault(basename, []).append(source)
error = ''
for basename, files in basenames.iteritems():
if len(files) > 1:
error += ' %s: %s\n' % (basename, ' '.join(files))
if error:
print('static library %s has several files with the same basename:\n' %
target + error + 'Some build systems, e.g. MSVC08, '
'cannot handle that.')
raise GypError('Duplicate basenames in sources section, see list above')
def ValidateRulesInTarget(target, target_dict, extra_sources_for_rules):
"""Ensures that the rules sections in target_dict are valid and consistent,
and determines which sources they apply to.
Arguments:
target: string, name of target.
target_dict: dict, target spec containing "rules" and "sources" lists.
extra_sources_for_rules: a list of keys to scan for rule matches in
addition to 'sources'.
"""
# Dicts to map between values found in rules' 'rule_name' and 'extension'
# keys and the rule dicts themselves.
rule_names = {}
rule_extensions = {}
rules = target_dict.get('rules', [])
for rule in rules:
# Make sure that there's no conflict among rule names and extensions.
rule_name = rule['rule_name']
if rule_name in rule_names:
raise GypError('rule %s exists in duplicate, target %s' %
(rule_name, target))
rule_names[rule_name] = rule
rule_extension = rule['extension']
if rule_extension.startswith('.'):
rule_extension = rule_extension[1:]
if rule_extension in rule_extensions:
raise GypError(('extension %s associated with multiple rules, ' +
'target %s rules %s and %s') %
(rule_extension, target,
rule_extensions[rule_extension]['rule_name'],
rule_name))
rule_extensions[rule_extension] = rule
# Make sure rule_sources isn't already there. It's going to be
# created below if needed.
if 'rule_sources' in rule:
raise GypError(
'rule_sources must not exist in input, target %s rule %s' %
(target, rule_name))
rule_sources = []
source_keys = ['sources']
source_keys.extend(extra_sources_for_rules)
for source_key in source_keys:
for source in target_dict.get(source_key, []):
(source_root, source_extension) = os.path.splitext(source)
if source_extension.startswith('.'):
source_extension = source_extension[1:]
if source_extension == rule_extension:
rule_sources.append(source)
if len(rule_sources) > 0:
rule['rule_sources'] = rule_sources
def ValidateRunAsInTarget(target, target_dict, build_file):
target_name = target_dict.get('target_name')
run_as = target_dict.get('run_as')
if not run_as:
return
if not isinstance(run_as, dict):
raise GypError("The 'run_as' in target %s from file %s should be a "
"dictionary." %
(target_name, build_file))
action = run_as.get('action')
if not action:
raise GypError("The 'run_as' in target %s from file %s must have an "
"'action' section." %
(target_name, build_file))
if not isinstance(action, list):
raise GypError("The 'action' for 'run_as' in target %s from file %s "
"must be a list." %
(target_name, build_file))
working_directory = run_as.get('working_directory')
if working_directory and not isinstance(working_directory, str):
raise GypError("The 'working_directory' for 'run_as' in target %s "
"in file %s should be a string." %
(target_name, build_file))
environment = run_as.get('environment')
if environment and not isinstance(environment, dict):
raise GypError("The 'environment' for 'run_as' in target %s "
"in file %s should be a dictionary." %
(target_name, build_file))
def ValidateActionsInTarget(target, target_dict, build_file):
'''Validates the inputs to the actions in a target.'''
target_name = target_dict.get('target_name')
actions = target_dict.get('actions', [])
for action in actions:
action_name = action.get('action_name')
if not action_name:
raise GypError("Anonymous action in target %s. "
"An action must have an 'action_name' field." %
target_name)
inputs = action.get('inputs', None)
if inputs is None:
raise GypError('Action in target %s has no inputs.' % target_name)
action_command = action.get('action')
if action_command and not action_command[0]:
raise GypError("Empty action as command in target %s." % target_name)
def TurnIntIntoStrInDict(the_dict):
"""Given dict the_dict, recursively converts all integers into strings.
"""
# Use items instead of iteritems because there's no need to try to look at
# reinserted keys and their associated values.
for k, v in the_dict.items():
if isinstance(v, int):
v = str(v)
the_dict[k] = v
elif isinstance(v, dict):
TurnIntIntoStrInDict(v)
elif isinstance(v, list):
TurnIntIntoStrInList(v)
if isinstance(k, int):
the_dict[str(k)] = v
del the_dict[k]
def TurnIntIntoStrInList(the_list):
"""Given list the_list, recursively converts all integers into strings.
"""
for index in xrange(0, len(the_list)):
item = the_list[index]
if isinstance(item, int):
the_list[index] = str(item)
elif isinstance(item, dict):
TurnIntIntoStrInDict(item)
elif isinstance(item, list):
TurnIntIntoStrInList(item)
def PruneUnwantedTargets(targets, flat_list, dependency_nodes, root_targets,
data):
"""Return only the targets that are deep dependencies of |root_targets|."""
qualified_root_targets = []
for target in root_targets:
target = target.strip()
qualified_targets = gyp.common.FindQualifiedTargets(target, flat_list)
if not qualified_targets:
raise GypError("Could not find target %s" % target)
qualified_root_targets.extend(qualified_targets)
wanted_targets = {}
for target in qualified_root_targets:
wanted_targets[target] = targets[target]
for dependency in dependency_nodes[target].DeepDependencies():
wanted_targets[dependency] = targets[dependency]
wanted_flat_list = [t for t in flat_list if t in wanted_targets]
# Prune unwanted targets from each build_file's data dict.
for build_file in data['target_build_files']:
if not 'targets' in data[build_file]:
continue
new_targets = []
for target in data[build_file]['targets']:
qualified_name = gyp.common.QualifiedTarget(build_file,
target['target_name'],
target['toolset'])
if qualified_name in wanted_targets:
new_targets.append(target)
data[build_file]['targets'] = new_targets
return wanted_targets, wanted_flat_list
def VerifyNoCollidingTargets(targets):
"""Verify that no two targets in the same directory share the same name.
Arguments:
targets: A list of targets in the form 'path/to/file.gyp:target_name'.
"""
# Keep a dict going from 'subdirectory:target_name' to 'foo.gyp'.
used = {}
for target in targets:
# Separate out 'path/to/file.gyp, 'target_name' from
# 'path/to/file.gyp:target_name'.
path, name = target.rsplit(':', 1)
# Separate out 'path/to', 'file.gyp' from 'path/to/file.gyp'.
subdir, gyp = os.path.split(path)
# Use '.' for the current directory '', so that the error messages make
# more sense.
if not subdir:
subdir = '.'
# Prepare a key like 'path/to:target_name'.
key = subdir + ':' + name
if key in used:
# Complain if this target is already used.
raise GypError('Duplicate target name "%s" in directory "%s" used both '
'in "%s" and "%s".' % (name, subdir, gyp, used[key]))
used[key] = gyp
def SetGeneratorGlobals(generator_input_info):
# Set up path_sections and non_configuration_keys with the default data plus
# the generator-specific data.
global path_sections
path_sections = base_path_sections[:]
path_sections.extend(generator_input_info['path_sections'])
global non_configuration_keys
non_configuration_keys = base_non_configuration_keys[:]
non_configuration_keys.extend(generator_input_info['non_configuration_keys'])
global multiple_toolsets
multiple_toolsets = generator_input_info[
'generator_supports_multiple_toolsets']
global generator_filelist_paths
generator_filelist_paths = generator_input_info['generator_filelist_paths']
def Load(build_files, variables, includes, depth, generator_input_info, check,
circular_check, parallel, root_targets):
SetGeneratorGlobals(generator_input_info)
# A generator can have other lists (in addition to sources) be processed
# for rules.
extra_sources_for_rules = generator_input_info['extra_sources_for_rules']
# Load build files. This loads every target-containing build file into
# the |data| dictionary such that the keys to |data| are build file names,
# and the values are the entire build file contents after "early" or "pre"
# processing has been done and includes have been resolved.
# NOTE: data contains both "target" files (.gyp) and "includes" (.gypi), as
# well as meta-data (e.g. 'included_files' key). 'target_build_files' keeps
# track of the keys corresponding to "target" files.
data = {'target_build_files': set()}
aux_data = {}
# Normalize paths everywhere. This is important because paths will be
# used as keys to the data dict and for references between input files.
build_files = set(map(os.path.normpath, build_files))
if parallel:
LoadTargetBuildFilesParallel(build_files, data, aux_data,
variables, includes, depth, check,
generator_input_info)
else:
for build_file in build_files:
try:
LoadTargetBuildFile(build_file, data, aux_data,
variables, includes, depth, check, True)
except Exception, e:
gyp.common.ExceptionAppend(e, 'while trying to load %s' % build_file)
raise
# Build a dict to access each target's subdict by qualified name.
targets = BuildTargetsDict(data)
# Fully qualify all dependency links.
QualifyDependencies(targets)
# Remove self-dependencies from targets that have 'prune_self_dependencies'
# set to 1.
RemoveSelfDependencies(targets)
# Expand dependencies specified as build_file:*.
ExpandWildcardDependencies(targets, data)
# Apply exclude (!) and regex (/) list filters only for dependency_sections.
for target_name, target_dict in targets.iteritems():
tmp_dict = {}
for key_base in dependency_sections:
for op in ('', '!', '/'):
key = key_base + op
if key in target_dict:
tmp_dict[key] = target_dict[key]
del target_dict[key]
ProcessListFiltersInDict(target_name, tmp_dict)
# Write the results back to |target_dict|.
for key in tmp_dict:
target_dict[key] = tmp_dict[key]
# Make sure every dependency appears at most once.
RemoveDuplicateDependencies(targets)
if circular_check:
# Make sure that any targets in a.gyp don't contain dependencies in other
# .gyp files that further depend on a.gyp.
VerifyNoGYPFileCircularDependencies(targets)
[dependency_nodes, flat_list] = BuildDependencyList(targets)
if root_targets:
# Remove, from |targets| and |flat_list|, the targets that are not deep
# dependencies of the targets specified in |root_targets|.
targets, flat_list = PruneUnwantedTargets(
targets, flat_list, dependency_nodes, root_targets, data)
# Check that no two targets in the same directory have the same name.
VerifyNoCollidingTargets(flat_list)
# Handle dependent settings of various types.
for settings_type in ['all_dependent_settings',
'direct_dependent_settings',
'link_settings']:
DoDependentSettings(settings_type, flat_list, targets, dependency_nodes)
# Take out the dependent settings now that they've been published to all
# of the targets that require them.
for target in flat_list:
if settings_type in targets[target]:
del targets[target][settings_type]
# Make sure static libraries don't declare dependencies on other static
# libraries, but that linkables depend on all unlinked static libraries
# that they need so that their link steps will be correct.
gii = generator_input_info
if gii['generator_wants_static_library_dependencies_adjusted']:
AdjustStaticLibraryDependencies(flat_list, targets, dependency_nodes,
gii['generator_wants_sorted_dependencies'])
# Apply "post"/"late"/"target" variable expansions and condition evaluations.
for target in flat_list:
target_dict = targets[target]
build_file = gyp.common.BuildFile(target)
ProcessVariablesAndConditionsInDict(
target_dict, PHASE_LATE, variables, build_file)
# Move everything that can go into a "configurations" section into one.
for target in flat_list:
target_dict = targets[target]
SetUpConfigurations(target, target_dict)
# Apply exclude (!) and regex (/) list filters.
for target in flat_list:
target_dict = targets[target]
ProcessListFiltersInDict(target, target_dict)
# Apply "latelate" variable expansions and condition evaluations.
for target in flat_list:
target_dict = targets[target]
build_file = gyp.common.BuildFile(target)
ProcessVariablesAndConditionsInDict(
target_dict, PHASE_LATELATE, variables, build_file)
# Make sure that the rules make sense, and build up rule_sources lists as
# needed. Not all generators will need to use the rule_sources lists, but
# some may, and it seems best to build the list in a common spot.
# Also validate actions and run_as elements in targets.
for target in flat_list:
target_dict = targets[target]
build_file = gyp.common.BuildFile(target)
ValidateTargetType(target, target_dict)
# TODO(thakis): Get vpx_scale/arm/scalesystemdependent.c to be renamed to
# scalesystemdependent_arm_additions.c or similar.
if 'arm' not in variables.get('target_arch', ''):
ValidateSourcesInTarget(target, target_dict, build_file)
ValidateRulesInTarget(target, target_dict, extra_sources_for_rules)
ValidateRunAsInTarget(target, target_dict, build_file)
ValidateActionsInTarget(target, target_dict, build_file)
# Generators might not expect ints. Turn them into strs.
TurnIntIntoStrInDict(data)
# TODO(mark): Return |data| for now because the generator needs a list of
# build files that came in. In the future, maybe it should just accept
# a list, and not the whole data dict.
return [flat_list, targets, data]
| agpl-3.0 |
eaplatanios/tensorflow | tensorflow/python/kernel_tests/denormal_test.py | 59 | 2408 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for denormal handling."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import platform
from tensorflow.python.framework import constant_op
from tensorflow.python.ops import array_ops
from tensorflow.python.platform import test
class DenormalTest(test.TestCase):
def testPythonHasDenormals(self):
"""Non-tf numpy code should treat denormals correctly."""
for dtype in np.float32, np.float64:
tiny = np.finfo(dtype).tiny
self.assertEqual(tiny, tiny / 16 * 16)
def _flushDenormalsTest(self, use_gpu, dtypes):
if platform.machine() == "ppc64le" or platform.machine() == "s390x":
# Disabled denormal_test on power/s390x platform
# Check relevant discussion - https://github.com/tensorflow/tensorflow/issues/11902
return
with self.test_session(use_gpu=use_gpu):
array_ops.identity(7).eval()
for dtype in dtypes:
tiny = np.finfo(dtype).tiny
# Small shape to test main thread, large shape to test thread pool
for shape in (), (1 << 20,):
flush = 0.1 * constant_op.constant(tiny, shape=shape)
self.assertAllEqual(flush.eval(), np.zeros(shape))
# Make sure the flags don't leak out
self.testPythonHasDenormals()
def testFlushDenormalsCPU(self):
# On CPUs, the processor flags flush for both single and double precision.
self._flushDenormalsTest(use_gpu=False, dtypes=(np.float32, np.float64))
def testFlushDenormalsGPU(self):
# On GPUs, only single precision can flush to zero.
self._flushDenormalsTest(use_gpu=True, dtypes=(np.float32,))
if __name__ == "__main__":
test.main()
| apache-2.0 |
xuanyuanking/spark | python/pyspark/pandas/usage_logging/usage_logger.py | 14 | 4949 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
The reference implementation of usage logger using the Python standard logging library.
"""
from inspect import Signature
import logging
from typing import Any, Optional
def get_logger() -> Any:
"""An entry point of the plug-in and return the usage logger."""
return PandasOnSparkUsageLogger()
def _format_signature(signature):
return (
"({})".format(", ".join([p.name for p in signature.parameters.values()]))
if signature is not None
else ""
)
class PandasOnSparkUsageLogger(object):
"""
The reference implementation of usage logger.
The usage logger needs to provide the following methods:
- log_success(self, class_name, name, duration, signature=None)
- log_failure(self, class_name, name, ex, duration, signature=None)
- log_missing(self, class_name, name, is_deprecated=False, signature=None)
"""
def __init__(self):
self.logger = logging.getLogger("pyspark.pandas.usage_logger")
def log_success(
self, class_name: str, name: str, duration: float, signature: Optional[Signature] = None
) -> None:
"""
Log the function or property call is successfully finished.
:param class_name: the target class name
:param name: the target function or property name
:param duration: the duration to finish the function or property call
:param signature: the signature if the target is a function, else None
"""
if self.logger.isEnabledFor(logging.INFO):
msg = (
"A {function} `{class_name}.{name}{signature}` was successfully finished "
"after {duration:.3f} ms."
).format(
class_name=class_name,
name=name,
signature=_format_signature(signature),
duration=duration * 1000,
function="function" if signature is not None else "property",
)
self.logger.info(msg)
def log_failure(
self,
class_name: str,
name: str,
ex: Exception,
duration: float,
signature: Optional[Signature] = None,
) -> None:
"""
Log the function or property call failed.
:param class_name: the target class name
:param name: the target function or property name
:param ex: the exception causing the failure
:param duration: the duration until the function or property call fails
:param signature: the signature if the target is a function, else None
"""
if self.logger.isEnabledFor(logging.WARNING):
msg = (
"A {function} `{class_name}.{name}{signature}` was failed "
"after {duration:.3f} ms: {msg}"
).format(
class_name=class_name,
name=name,
signature=_format_signature(signature),
msg=str(ex),
duration=duration * 1000,
function="function" if signature is not None else "property",
)
self.logger.warning(msg)
def log_missing(
self,
class_name: str,
name: str,
is_deprecated: bool = False,
signature: Optional[Signature] = None,
) -> None:
"""
Log the missing or deprecated function or property is called.
:param class_name: the target class name
:param name: the target function or property name
:param is_deprecated: True if the function or property is marked as deprecated
:param signature: the original function signature if the target is a function, else None
"""
if self.logger.isEnabledFor(logging.INFO):
msg = "A {deprecated} {function} `{class_name}.{name}{signature}` was called.".format(
class_name=class_name,
name=name,
signature=_format_signature(signature),
function="function" if signature is not None else "property",
deprecated="deprecated" if is_deprecated else "missing",
)
self.logger.info(msg)
| apache-2.0 |
walteryang47/ovirt-engine | packaging/setup/plugins/ovirt-engine-setup/ovirt-engine/config/aaainternal.py | 8 | 6990 | #
# ovirt-engine-setup -- ovirt engine setup
# Copyright (C) 2013-2015 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""aaainternal plugin."""
import uuid
import gettext
import os
from otopi import constants as otopicons
from otopi import filetransaction, plugin, util
from ovirt_engine_setup import constants as osetupcons
from ovirt_engine_setup.engine import constants as oenginecons
from ovirt_engine_setup.engine_common import constants as oengcommcons
def _(m):
return gettext.dgettext(message=m, domain='ovirt-engine-setup')
@util.export
class Plugin(plugin.PluginBase):
"""aaainternal plugin."""
MY_TYPE = 'builtin-internal'
def __init__(self, context):
super(Plugin, self).__init__(context=context)
@plugin.event(
stage=plugin.Stages.STAGE_VALIDATION,
priority=plugin.Stages.PRIORITY_LAST,
condition=lambda self: (
self.environment[
oenginecons.CoreEnv.ENABLE
] and self.environment[
oenginecons.ConfigEnv.ADMIN_USER_AUTHZ_TYPE
] is None
),
)
def _validation(self):
self.environment[
oenginecons.ConfigEnv.ADMIN_USER_AUTHZ_TYPE
] = self.MY_TYPE
if self.environment[oenginecons.ConfigEnv.ADMIN_USER_ID] is None:
self.environment[
oenginecons.ConfigEnv.ADMIN_USER_ID
] = str(uuid.uuid4())
@plugin.event(
stage=plugin.Stages.STAGE_MISC,
condition=lambda self: (
self.environment[
oenginecons.CoreEnv.ENABLE
] and self.environment[
oenginecons.ConfigEnv.ADMIN_USER_AUTHZ_TYPE
] == self.MY_TYPE and
self.environment[
oenginecons.ConfigEnv.ADMIN_PASSWORD
] is not None
),
)
def _misc(self):
rc, stdout, stderr = self.execute(
(
oenginecons.FileLocations.OVIRT_ENGINE_CRYPTO_TOOL,
'pbe-encode',
'--password=env:pass',
),
envAppend={
'OVIRT_ENGINE_JAVA_HOME_FORCE': '1',
'OVIRT_ENGINE_JAVA_HOME': self.environment[
oengcommcons.ConfigEnv.JAVA_HOME
],
'OVIRT_JBOSS_HOME': self.environment[
oengcommcons.ConfigEnv.JBOSS_HOME
],
'pass': self.environment[
oenginecons.ConfigEnv.ADMIN_PASSWORD
],
},
)
pbe = stdout[0]
profile = self.environment[
oenginecons.ConfigEnv.ADMIN_USER
].rsplit('@', 1)[1]
self.environment[otopicons.CoreEnv.MAIN_TRANSACTION].append(
filetransaction.FileTransaction(
name=(
os.path.join(
oenginecons.FileLocations.OVIRT_ENGINE_EXTENSIONS_DIR,
'%s-authn.properties' % profile
)
),
mode=0o600,
owner=self.environment[osetupcons.SystemEnv.USER_ENGINE],
enforcePermissions=True,
content=(
'ovirt.engine.extension.name = internal-authn\n'
'ovirt.engine.extension.bindings.method = jbossmodule\n'
'ovirt.engine.extension.binding.jbossmodule.module = '
'org.ovirt.engine.extensions.builtin\n'
'ovirt.engine.extension.binding.jbossmodule.class = '
'org.ovirt.engine.extensions.aaa.builtin.internal.'
'InternalAuthn\n'
'ovirt.engine.extension.provides = '
'org.ovirt.engine.api.extensions.aaa.Authn\n'
'ovirt.engine.aaa.authn.profile.name = {profile}\n'
'ovirt.engine.aaa.authn.authz.plugin = {authzName}\n'
'config.authn.user.name = {admin_user}\n'
'config.authn.user.password = {pbe}\n'
).format(
profile=profile,
authzName=self.environment[
oenginecons.ConfigEnv.ADMIN_USER_AUTHZ_NAME
],
admin_user=self.environment[
oenginecons.ConfigEnv.ADMIN_USER
].rsplit('@', 1)[0],
pbe=pbe,
),
modifiedList=self.environment[
otopicons.CoreEnv.MODIFIED_FILES
],
)
)
self.environment[otopicons.CoreEnv.MAIN_TRANSACTION].append(
filetransaction.FileTransaction(
name=(
os.path.join(
oenginecons.FileLocations.OVIRT_ENGINE_EXTENSIONS_DIR,
'%s-authz.properties' % profile
)
),
mode=0o600,
owner=self.environment[osetupcons.SystemEnv.USER_ENGINE],
enforcePermissions=True,
content=(
'ovirt.engine.extension.name = {authzName}\n'
'ovirt.engine.extension.bindings.method = jbossmodule\n'
'ovirt.engine.extension.binding.jbossmodule.module = '
'org.ovirt.engine.extensions.builtin\n'
'ovirt.engine.extension.binding.jbossmodule.class = '
'org.ovirt.engine.extensions.aaa.builtin.internal.'
'InternalAuthz\n'
'ovirt.engine.extension.provides = '
'org.ovirt.engine.api.extensions.aaa.Authz\n'
'config.authz.user.name = {admin_user}\n'
'config.authz.user.id = {admin_id}\n'
).format(
authzName=self.environment[
oenginecons.ConfigEnv.ADMIN_USER_AUTHZ_NAME
],
admin_user=self.environment[
oenginecons.ConfigEnv.ADMIN_USER
].rsplit('@', 1)[0],
admin_id=self.environment[
oenginecons.ConfigEnv.ADMIN_USER_ID
],
),
modifiedList=self.environment[
otopicons.CoreEnv.MODIFIED_FILES
],
)
)
# vim: expandtab tabstop=4 shiftwidth=4
| apache-2.0 |
jordigh/mercurial-crew | tests/test-batching.py | 7 | 5517 | # test-batching.py - tests for transparent command batching
#
# Copyright 2011 Peter Arrenbrecht <peter@arrenbrecht.ch>
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
from mercurial.wireproto import localbatch, remotebatch, batchable, future
# equivalent of repo.repository
class thing(object):
def hello(self):
return "Ready."
# equivalent of localrepo.localrepository
class localthing(thing):
def foo(self, one, two=None):
if one:
return "%s and %s" % (one, two,)
return "Nope"
def bar(self, b, a):
return "%s und %s" % (b, a,)
def greet(self, name=None):
return "Hello, %s" % name
def batch(self):
'''Support for local batching.'''
return localbatch(self)
# usage of "thing" interface
def use(it):
# Direct call to base method shared between client and server.
print it.hello()
# Direct calls to proxied methods. They cause individual roundtrips.
print it.foo("Un", two="Deux")
print it.bar("Eins", "Zwei")
# Batched call to a couple of (possibly proxied) methods.
batch = it.batch()
# The calls return futures to eventually hold results.
foo = batch.foo(one="One", two="Two")
foo2 = batch.foo(None)
bar = batch.bar("Eins", "Zwei")
# We can call non-batchable proxy methods, but the break the current batch
# request and cause additional roundtrips.
greet = batch.greet(name="John Smith")
# We can also add local methods into the mix, but they break the batch too.
hello = batch.hello()
bar2 = batch.bar(b="Uno", a="Due")
# Only now are all the calls executed in sequence, with as few roundtrips
# as possible.
batch.submit()
# After the call to submit, the futures actually contain values.
print foo.value
print foo2.value
print bar.value
print greet.value
print hello.value
print bar2.value
# local usage
mylocal = localthing()
print
print "== Local"
use(mylocal)
# demo remoting; mimicks what wireproto and HTTP/SSH do
# shared
def escapearg(plain):
return (plain
.replace(':', '::')
.replace(',', ':,')
.replace(';', ':;')
.replace('=', ':='))
def unescapearg(escaped):
return (escaped
.replace(':=', '=')
.replace(':;', ';')
.replace(':,', ',')
.replace('::', ':'))
# server side
# equivalent of wireproto's global functions
class server(object):
def __init__(self, local):
self.local = local
def _call(self, name, args):
args = dict(arg.split('=', 1) for arg in args)
return getattr(self, name)(**args)
def perform(self, req):
print "REQ:", req
name, args = req.split('?', 1)
args = args.split('&')
vals = dict(arg.split('=', 1) for arg in args)
res = getattr(self, name)(**vals)
print " ->", res
return res
def batch(self, cmds):
res = []
for pair in cmds.split(';'):
name, args = pair.split(':', 1)
vals = {}
for a in args.split(','):
if a:
n, v = a.split('=')
vals[n] = unescapearg(v)
res.append(escapearg(getattr(self, name)(**vals)))
return ';'.join(res)
def foo(self, one, two):
return mangle(self.local.foo(unmangle(one), unmangle(two)))
def bar(self, b, a):
return mangle(self.local.bar(unmangle(b), unmangle(a)))
def greet(self, name):
return mangle(self.local.greet(unmangle(name)))
myserver = server(mylocal)
# local side
# equivalent of wireproto.encode/decodelist, that is, type-specific marshalling
# here we just transform the strings a bit to check we're properly en-/decoding
def mangle(s):
return ''.join(chr(ord(c) + 1) for c in s)
def unmangle(s):
return ''.join(chr(ord(c) - 1) for c in s)
# equivalent of wireproto.wirerepository and something like http's wire format
class remotething(thing):
def __init__(self, server):
self.server = server
def _submitone(self, name, args):
req = name + '?' + '&'.join(['%s=%s' % (n, v) for n, v in args])
return self.server.perform(req)
def _submitbatch(self, cmds):
req = []
for name, args in cmds:
args = ','.join(n + '=' + escapearg(v) for n, v in args)
req.append(name + ':' + args)
req = ';'.join(req)
res = self._submitone('batch', [('cmds', req,)])
return res.split(';')
def batch(self):
return remotebatch(self)
@batchable
def foo(self, one, two=None):
if not one:
yield "Nope", None
encargs = [('one', mangle(one),), ('two', mangle(two),)]
encresref = future()
yield encargs, encresref
yield unmangle(encresref.value)
@batchable
def bar(self, b, a):
encresref = future()
yield [('b', mangle(b),), ('a', mangle(a),)], encresref
yield unmangle(encresref.value)
# greet is coded directly. It therefore does not support batching. If it
# does appear in a batch, the batch is split around greet, and the call to
# greet is done in its own roundtrip.
def greet(self, name=None):
return unmangle(self._submitone('greet', [('name', mangle(name),)]))
# demo remote usage
myproxy = remotething(myserver)
print
print "== Remote"
use(myproxy)
| gpl-2.0 |
alshedivat/tensorflow | tensorflow/contrib/kernel_methods/python/losses_test.py | 23 | 11041 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for third_party.tensorflow.contrib.kernel_methods.python.losses."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.contrib.kernel_methods.python import losses
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.ops import array_ops
from tensorflow.python.platform import test
class SparseMulticlassHingeLossTest(test.TestCase):
def testInvalidLogitsShape(self):
"""An error is raised when logits have invalid shape."""
with self.cached_session():
logits = constant_op.constant([-1.0, 2.1], shape=(2,))
labels = constant_op.constant([0, 1])
with self.assertRaises(ValueError):
_ = losses.sparse_multiclass_hinge_loss(labels, logits)
def testInvalidLabelsShape(self):
"""An error is raised when labels have invalid shape."""
with self.cached_session():
logits = constant_op.constant([-1.0, 2.1], shape=(2, 1))
labels = constant_op.constant([1, 0], shape=(1, 1, 2))
with self.assertRaises(ValueError):
_ = losses.sparse_multiclass_hinge_loss(labels, logits)
def testInvalidWeightsShape(self):
"""An error is raised when weights have invalid shape."""
with self.cached_session():
logits = constant_op.constant([-1.0, 2.1], shape=(2, 1))
labels = constant_op.constant([1, 0], shape=(2,))
weights = constant_op.constant([1.5, 0.2], shape=(2, 1, 1))
with self.assertRaises(ValueError):
_ = losses.sparse_multiclass_hinge_loss(labels, logits, weights)
def testInvalidLabelsDtype(self):
"""An error is raised when labels have invalid shape."""
with self.cached_session():
logits = constant_op.constant([-1.0, 2.1], shape=(2, 1))
labels = constant_op.constant([1, 0], dtype=dtypes.float32)
with self.assertRaises(ValueError):
_ = losses.sparse_multiclass_hinge_loss(labels, logits)
def testNoneWeightRaisesValueError(self):
"""An error is raised when weights are None."""
with self.cached_session():
logits = constant_op.constant([-1.0, 2.1], shape=(2, 1))
labels = constant_op.constant([1, 0])
with self.assertRaises(ValueError):
_ = losses.sparse_multiclass_hinge_loss(labels, logits, weights=None)
def testInconsistentLabelsAndWeightsShapesSameRank(self):
"""Error raised when weights and labels have same ranks, different sizes."""
with self.cached_session():
logits = constant_op.constant([-1.0, 2.1, 4.1], shape=(3, 1))
labels = constant_op.constant([1, 0, 2], shape=(3, 1))
weights = constant_op.constant([1.1, 2.0], shape=(2, 1))
with self.assertRaises(ValueError):
_ = losses.sparse_multiclass_hinge_loss(labels, logits, weights)
def testInconsistentLabelsAndWeightsShapesDifferentRank(self):
"""Error raised when weights and labels have different ranks and sizes."""
with self.cached_session():
logits = constant_op.constant([-1.0, 2.1], shape=(2, 1))
labels = constant_op.constant([1, 0], shape=(2, 1))
weights = constant_op.constant([1.1, 2.0, 2.8], shape=(3,))
with self.assertRaises(ValueError):
_ = losses.sparse_multiclass_hinge_loss(labels, logits, weights)
def testOutOfRangeLabels(self):
"""An error is raised when labels are not in [0, num_classes)."""
with self.cached_session():
logits = constant_op.constant([[1.2, -1.4, -1.0], [1.4, 1.8, 4.0],
[0.5, 1.8, -1.0]])
labels = constant_op.constant([1, 0, 4])
loss = losses.sparse_multiclass_hinge_loss(labels, logits)
with self.assertRaises(errors.InvalidArgumentError):
loss.eval()
def testZeroLossInt32Labels(self):
"""Loss is 0 if true class logits sufficiently higher than other classes."""
with self.cached_session():
logits = constant_op.constant([[1.2, -1.4, -1.0], [1.4, 1.8, 4.0],
[0.5, 1.8, -1.0]])
labels = constant_op.constant([0, 2, 1], dtype=dtypes.int32)
loss = losses.sparse_multiclass_hinge_loss(labels, logits)
self.assertAlmostEqual(loss.eval(), 0.0, 3)
def testZeroLossInt64Labels(self):
"""Loss is 0 if true class logits sufficiently higher than other classes."""
with self.cached_session():
logits = constant_op.constant([[2.1, -0.4, -1.0], [1.4, 2.8, 4.0],
[-0.5, 0.8, -1.0]])
labels = constant_op.constant([0, 2, 1], dtype=dtypes.int64)
loss = losses.sparse_multiclass_hinge_loss(labels, logits)
self.assertAlmostEqual(loss.eval(), 0.0, 3)
def testUnknownShape(self):
"""Result keeps same with `testZeroLossInt32Labels`"""
logits_np = np.array([[1.2, -1.4, -1.0], [1.4, 1.8, 4.0], [0.5, 1.8, -1.0]])
labels_np = np.array([0, 2, 1], dtype=np.int32)
logits_shapes = [
[3, 3], # batch_size, num_classes
[None, 3],
[3, None],
[None, None]
]
for batch_size, num_classes in logits_shapes:
with self.cached_session():
logits = array_ops.placeholder(
dtypes.float32, shape=(batch_size, num_classes))
labels = array_ops.placeholder(dtypes.int32, shape=(batch_size,))
loss = losses.sparse_multiclass_hinge_loss(labels, logits)
result = loss.eval(feed_dict={logits: logits_np, labels: labels_np})
self.assertAlmostEqual(result, 0.0, 3)
def testCorrectPredictionsSomeClassesInsideMargin(self):
"""Loss is > 0 even if true class logits are higher than other classes."""
with self.cached_session():
logits = constant_op.constant([[1.2, -1.4, 0.8], [1.4, 1.8, 4.0],
[1.5, 1.8, -1.0]])
labels = constant_op.constant([0, 2, 1])
loss = losses.sparse_multiclass_hinge_loss(labels, logits)
# The first and third samples incur some loss (0.6 and 0.7 respectively).
self.assertAlmostEqual(loss.eval(), 0.4333, 3)
def testIncorrectPredictions(self):
"""Loss is >0 when an incorrect class has higher logits than true class."""
with self.cached_session():
logits = constant_op.constant([[2.6, 0.4, 0.8], [1.4, 0.8, -1.0],
[0.5, -1.8, 2.0]])
labels = constant_op.constant([1, 0, 2])
loss = losses.sparse_multiclass_hinge_loss(labels, logits)
# The first examples incurs a high loss (3.2) since the logits of an
# incorrect class (0) are higher than the logits of the ground truth. The
# second example also incures a (smaller) loss (0.4).
self.assertAlmostEqual(loss.eval(), 1.2, 3)
def testIncorrectPredictionsColumnLabels(self):
"""Same as above but labels is a rank-2 tensor."""
with self.cached_session():
logits = constant_op.constant([[1.6, -0.4, 0.8], [1.5, 0.8, -1.0],
[0.2, -1.8, 4.0]])
labels = constant_op.constant([1, 0, 2], shape=(3, 1))
loss = losses.sparse_multiclass_hinge_loss(labels, logits)
# The first examples incurs a high loss (3.0) since the logits of an
# incorrect class (0) are higher than the logits of the ground truth. The
# second example also incures a (smaller) loss (0.3).
self.assertAlmostEqual(loss.eval(), 1.1, 3)
def testIncorrectPredictionsZeroWeights(self):
"""Loss is 0 when all weights are missing even if predictions are wrong."""
with self.cached_session():
logits = constant_op.constant([[1.6, -0.4, 0.8], [1.5, 0.8, -1.0],
[0.2, -1.8, 4.0]])
labels = constant_op.constant([1, 0, 2], shape=(3, 1))
weights = constant_op.constant([0.0, 0.0, 0.0], shape=(3, 1))
loss = losses.sparse_multiclass_hinge_loss(labels, logits, weights)
# No overall loss since all weights are 0.
self.assertAlmostEqual(loss.eval(), 0.0, 3)
def testNonZeroLossWithPythonScalarWeights(self):
"""Weighted loss is correctly computed when weights is a python scalar."""
with self.cached_session():
logits = constant_op.constant([[1.6, -0.4, 0.8], [1.5, 0.8, -1.0],
[0.2, -1.8, 4.0]])
labels = constant_op.constant([1, 0, 2], shape=(3, 1))
weights = 10.0
loss = losses.sparse_multiclass_hinge_loss(labels, logits, weights)
self.assertAlmostEqual(loss.eval(), 11.0, 3)
def testNonZeroLossWithScalarTensorWeights(self):
"""Weighted loss is correctly computed when weights is a rank-0 tensor."""
with self.cached_session():
logits = constant_op.constant([[1.6, -0.4, 0.8], [1.5, 0.8, -1.0],
[0.2, -1.8, 4.0]])
labels = constant_op.constant([1, 0, 2], shape=(3, 1))
weights = constant_op.constant(5.0)
loss = losses.sparse_multiclass_hinge_loss(labels, logits, weights)
self.assertAlmostEqual(loss.eval(), 5.5, 3)
def testNonZeroLossWith1DTensorWeightsColumnLabels(self):
"""Weighted loss is correctly computed when weights is a rank-0 tensor."""
with self.cached_session():
logits = constant_op.constant([[1.6, -0.4, 0.8], [1.5, 0.8, -1.0],
[0.2, -1.8, 4.0]])
labels = constant_op.constant([1, 0, 2], shape=(3, 1))
weights = constant_op.constant([1.0, 0.5, 2.0], shape=(3,))
loss = losses.sparse_multiclass_hinge_loss(labels, logits, weights)
# The overall loss is 1/3 *(3.0*1.0 + 0.5*0.3+ 2.0*0.0) = 1.05
self.assertAlmostEqual(loss.eval(), 1.05, 3)
def testNonZeroLossWith2DTensorWeights1DLabelsSomeWeightsMissing(self):
"""Weighted loss is correctly computed when weights is a rank-0 tensor."""
with self.cached_session():
logits = constant_op.constant([[1.6, -0.4, 0.8], [1.5, 0.8, -1.0],
[0.2, -1.8, 4.0], [1.6, 1.8, -4.0]])
labels = constant_op.constant([1, 0, 2, 1])
weights = constant_op.constant([[1.0], [0.0], [2.0], [4.0]])
loss = losses.sparse_multiclass_hinge_loss(labels, logits, weights)
# The overall loss is 1/3 *(3.0*1.0 + 0.0*0.3+ 2.0*0.0 + 4.0*0.8) = 6.2/3.
self.assertAlmostEqual(loss.eval(), 2.06666, 3)
if __name__ == '__main__':
test.main()
| apache-2.0 |
moondrop-entertainment/django-nonrel-drawp | tests/regressiontests/views/tests/generic/object_list.py | 51 | 1416 | from django.test import TestCase
class ObjectListTest(TestCase):
fixtures = ['testdata.json']
def check_pagination(self, url, expected_status_code, object_count=None):
response = self.client.get(url)
self.assertEqual(response.status_code, expected_status_code)
if object_count:
self.assertEqual(response.context['is_paginated'], True)
self.assertEqual(len(response.context['page_obj'].object_list),
object_count)
return response
def test_finds_pages(self):
# Check page count doesn't start at 0.
self.check_pagination('/views/object_list/page0/', 404)
# Check basic pages.
self.check_pagination('/views/object_list/page/', 200, 2)
self.check_pagination('/views/object_list/page1/', 200, 2)
self.check_pagination('/views/object_list/page2/', 200, 1)
self.check_pagination('/views/object_list/page3/', 404)
# Check the special "last" page.
self.check_pagination('/views/object_list/pagelast/', 200, 1)
self.check_pagination('/views/object_list/pagenotlast/', 404)
def test_no_paginate_by(self):
# Ensure that the view isn't paginated by default.
url = '/views/object_list_no_paginate_by/page1/'
response = self.check_pagination(url, 200)
self.assertEqual(response.context['is_paginated'], False)
| bsd-3-clause |
DDTChen/CookieVLC | vlc/contrib/android/libxml2/python/tests/reader6.py | 35 | 2673 | #!/usr/bin/python -u
#
# this tests the entities substitutions with the XmlTextReader interface
#
import sys
import libxml2
try:
import StringIO
str_io = StringIO.StringIO
except:
import io
str_io = io.StringIO
schema="""<element name="foo" xmlns="http://relaxng.org/ns/structure/1.0"
datatypeLibrary="http://www.w3.org/2001/XMLSchema-datatypes">
<oneOrMore>
<element name="label">
<text/>
</element>
<optional>
<element name="opt">
<empty/>
</element>
</optional>
<element name="item">
<data type="byte"/>
</element>
</oneOrMore>
</element>
"""
# Memory debug specific
libxml2.debugMemory(1)
#
# Parse the Relax NG Schemas
#
rngp = libxml2.relaxNGNewMemParserCtxt(schema, len(schema))
rngs = rngp.relaxNGParse()
del rngp
#
# Parse and validate the correct document
#
docstr="""<foo>
<label>some text</label>
<item>100</item>
</foo>"""
f = str_io(docstr)
input = libxml2.inputBuffer(f)
reader = input.newTextReader("correct")
reader.RelaxNGSetSchema(rngs)
ret = reader.Read()
while ret == 1:
ret = reader.Read()
if ret != 0:
print("Error parsing the document")
sys.exit(1)
if reader.IsValid() != 1:
print("Document failed to validate")
sys.exit(1)
#
# Parse and validate the incorrect document
#
docstr="""<foo>
<label>some text</label>
<item>1000</item>
</foo>"""
err=""
# RNG errors are not as good as before , TODO
#expect="""RNG validity error: file error line 3 element text
#Type byte doesn't allow value '1000'
#RNG validity error: file error line 3 element text
#Error validating datatype byte
#RNG validity error: file error line 3 element text
#Element item failed to validate content
#"""
expect="""Type byte doesn't allow value '1000'
Error validating datatype byte
Element item failed to validate content
"""
def callback(ctx, str):
global err
err = err + "%s" % (str)
libxml2.registerErrorHandler(callback, "")
f = str_io(docstr)
input = libxml2.inputBuffer(f)
reader = input.newTextReader("error")
reader.RelaxNGSetSchema(rngs)
ret = reader.Read()
while ret == 1:
ret = reader.Read()
if ret != 0:
print("Error parsing the document")
sys.exit(1)
if reader.IsValid() != 0:
print("Document failed to detect the validation error")
sys.exit(1)
if err != expect:
print("Did not get the expected error message:")
print(err)
sys.exit(1)
#
# cleanup
#
del f
del input
del reader
del rngs
libxml2.relaxNGCleanupTypes()
# Memory debug specific
libxml2.cleanupParser()
if libxml2.debugMemory(1) == 0:
print("OK")
else:
print("Memory leak %d bytes" % (libxml2.debugMemory(1)))
libxml2.dumpMemory()
| gpl-2.0 |
taaviteska/django | tests/template_tests/syntax_tests/test_exceptions.py | 513 | 2099 | from django.template import TemplateDoesNotExist, TemplateSyntaxError
from django.test import SimpleTestCase
from ..utils import setup
from .test_extends import inheritance_templates
class ExceptionsTests(SimpleTestCase):
@setup({'exception01': "{% extends 'nonexistent' %}"})
def test_exception01(self):
"""
Raise exception for invalid template name
"""
with self.assertRaises(TemplateDoesNotExist):
self.engine.render_to_string('exception01')
@setup({'exception02': '{% extends nonexistent %}'})
def test_exception02(self):
"""
Raise exception for invalid variable template name
"""
if self.engine.string_if_invalid:
with self.assertRaises(TemplateDoesNotExist):
self.engine.render_to_string('exception02')
else:
with self.assertRaises(TemplateSyntaxError):
self.engine.render_to_string('exception02')
@setup(
{'exception03': "{% extends 'inheritance01' %}"
"{% block first %}2{% endblock %}{% extends 'inheritance16' %}"},
inheritance_templates,
)
def test_exception03(self):
"""
Raise exception for extra {% extends %} tags
"""
with self.assertRaises(TemplateSyntaxError):
self.engine.get_template('exception03')
@setup(
{'exception04': "{% extends 'inheritance17' %}{% block first %}{% echo 400 %}5678{% endblock %}"},
inheritance_templates,
)
def test_exception04(self):
"""
Raise exception for custom tags used in child with {% load %} tag in parent, not in child
"""
with self.assertRaises(TemplateSyntaxError):
self.engine.get_template('exception04')
@setup({'exception05': '{% block first %}{{ block.super }}{% endblock %}'})
def test_exception05(self):
"""
Raise exception for block.super used in base template
"""
with self.assertRaises(TemplateSyntaxError):
self.engine.render_to_string('exception05')
| bsd-3-clause |
nicklhy/mxnet | example/rcnn/rcnn/core/tester.py | 25 | 10193 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import cPickle
import os
import time
import mxnet as mx
import numpy as np
from module import MutableModule
from rcnn.logger import logger
from rcnn.config import config
from rcnn.io import image
from rcnn.processing.bbox_transform import bbox_pred, clip_boxes
from rcnn.processing.nms import py_nms_wrapper, cpu_nms_wrapper, gpu_nms_wrapper
class Predictor(object):
def __init__(self, symbol, data_names, label_names,
context=mx.cpu(), max_data_shapes=None,
provide_data=None, provide_label=None,
arg_params=None, aux_params=None):
self._mod = MutableModule(symbol, data_names, label_names,
context=context, max_data_shapes=max_data_shapes)
self._mod.bind(provide_data, provide_label, for_training=False)
self._mod.init_params(arg_params=arg_params, aux_params=aux_params)
def predict(self, data_batch):
self._mod.forward(data_batch)
return dict(zip(self._mod.output_names, self._mod.get_outputs()))
def im_proposal(predictor, data_batch, data_names, scale):
data_dict = dict(zip(data_names, data_batch.data))
output = predictor.predict(data_batch)
# drop the batch index
boxes = output['rois_output'].asnumpy()[:, 1:]
scores = output['rois_score'].asnumpy()
# transform to original scale
boxes = boxes / scale
return scores, boxes, data_dict
def generate_proposals(predictor, test_data, imdb, vis=False, thresh=0.):
"""
Generate detections results using RPN.
:param predictor: Predictor
:param test_data: data iterator, must be non-shuffled
:param imdb: image database
:param vis: controls visualization
:param thresh: thresh for valid detections
:return: list of detected boxes
"""
assert vis or not test_data.shuffle
data_names = [k[0] for k in test_data.provide_data]
i = 0
t = time.time()
imdb_boxes = list()
original_boxes = list()
for im_info, data_batch in test_data:
t1 = time.time() - t
t = time.time()
scale = im_info[0, 2]
scores, boxes, data_dict = im_proposal(predictor, data_batch, data_names, scale)
t2 = time.time() - t
t = time.time()
# assemble proposals
dets = np.hstack((boxes, scores))
original_boxes.append(dets)
# filter proposals
keep = np.where(dets[:, 4:] > thresh)[0]
dets = dets[keep, :]
imdb_boxes.append(dets)
if vis:
vis_all_detection(data_dict['data'].asnumpy(), [dets], ['obj'], scale)
logger.info('generating %d/%d ' % (i + 1, imdb.num_images) +
'proposal %d ' % (dets.shape[0]) +
'data %.4fs net %.4fs' % (t1, t2))
i += 1
assert len(imdb_boxes) == imdb.num_images, 'calculations not complete'
# save results
rpn_folder = os.path.join(imdb.root_path, 'rpn_data')
if not os.path.exists(rpn_folder):
os.mkdir(rpn_folder)
rpn_file = os.path.join(rpn_folder, imdb.name + '_rpn.pkl')
with open(rpn_file, 'wb') as f:
cPickle.dump(imdb_boxes, f, cPickle.HIGHEST_PROTOCOL)
if thresh > 0:
full_rpn_file = os.path.join(rpn_folder, imdb.name + '_full_rpn.pkl')
with open(full_rpn_file, 'wb') as f:
cPickle.dump(original_boxes, f, cPickle.HIGHEST_PROTOCOL)
logger.info('wrote rpn proposals to %s' % rpn_file)
return imdb_boxes
def im_detect(predictor, data_batch, data_names, scale):
output = predictor.predict(data_batch)
data_dict = dict(zip(data_names, data_batch.data))
if config.TEST.HAS_RPN:
rois = output['rois_output'].asnumpy()[:, 1:]
else:
rois = data_dict['rois'].asnumpy().reshape((-1, 5))[:, 1:]
im_shape = data_dict['data'].shape
# save output
scores = output['cls_prob_reshape_output'].asnumpy()[0]
bbox_deltas = output['bbox_pred_reshape_output'].asnumpy()[0]
# post processing
pred_boxes = bbox_pred(rois, bbox_deltas)
pred_boxes = clip_boxes(pred_boxes, im_shape[-2:])
# we used scaled image & roi to train, so it is necessary to transform them back
pred_boxes = pred_boxes / scale
return scores, pred_boxes, data_dict
def pred_eval(predictor, test_data, imdb, vis=False, thresh=1e-3):
"""
wrapper for calculating offline validation for faster data analysis
in this example, all threshold are set by hand
:param predictor: Predictor
:param test_data: data iterator, must be non-shuffle
:param imdb: image database
:param vis: controls visualization
:param thresh: valid detection threshold
:return:
"""
assert vis or not test_data.shuffle
data_names = [k[0] for k in test_data.provide_data]
nms = py_nms_wrapper(config.TEST.NMS)
# limit detections to max_per_image over all classes
max_per_image = -1
num_images = imdb.num_images
# all detections are collected into:
# all_boxes[cls][image] = N x 5 array of detections in
# (x1, y1, x2, y2, score)
all_boxes = [[[] for _ in xrange(num_images)]
for _ in xrange(imdb.num_classes)]
i = 0
t = time.time()
for im_info, data_batch in test_data:
t1 = time.time() - t
t = time.time()
scale = im_info[0, 2]
scores, boxes, data_dict = im_detect(predictor, data_batch, data_names, scale)
t2 = time.time() - t
t = time.time()
for j in range(1, imdb.num_classes):
indexes = np.where(scores[:, j] > thresh)[0]
cls_scores = scores[indexes, j, np.newaxis]
cls_boxes = boxes[indexes, j * 4:(j + 1) * 4]
cls_dets = np.hstack((cls_boxes, cls_scores))
keep = nms(cls_dets)
all_boxes[j][i] = cls_dets[keep, :]
if max_per_image > 0:
image_scores = np.hstack([all_boxes[j][i][:, -1]
for j in range(1, imdb.num_classes)])
if len(image_scores) > max_per_image:
image_thresh = np.sort(image_scores)[-max_per_image]
for j in range(1, imdb.num_classes):
keep = np.where(all_boxes[j][i][:, -1] >= image_thresh)[0]
all_boxes[j][i] = all_boxes[j][i][keep, :]
if vis:
boxes_this_image = [[]] + [all_boxes[j][i] for j in range(1, imdb.num_classes)]
vis_all_detection(data_dict['data'].asnumpy(), boxes_this_image, imdb.classes, scale)
t3 = time.time() - t
t = time.time()
logger.info('testing %d/%d data %.4fs net %.4fs post %.4fs' % (i, imdb.num_images, t1, t2, t3))
i += 1
det_file = os.path.join(imdb.cache_path, imdb.name + '_detections.pkl')
with open(det_file, 'wb') as f:
cPickle.dump(all_boxes, f, protocol=cPickle.HIGHEST_PROTOCOL)
imdb.evaluate_detections(all_boxes)
def vis_all_detection(im_array, detections, class_names, scale):
"""
visualize all detections in one image
:param im_array: [b=1 c h w] in rgb
:param detections: [ numpy.ndarray([[x1 y1 x2 y2 score]]) for j in classes ]
:param class_names: list of names in imdb
:param scale: visualize the scaled image
:return:
"""
import matplotlib.pyplot as plt
import random
im = image.transform_inverse(im_array, config.PIXEL_MEANS)
plt.imshow(im)
for j, name in enumerate(class_names):
if name == '__background__':
continue
color = (random.random(), random.random(), random.random()) # generate a random color
dets = detections[j]
for det in dets:
bbox = det[:4] * scale
score = det[-1]
rect = plt.Rectangle((bbox[0], bbox[1]),
bbox[2] - bbox[0],
bbox[3] - bbox[1], fill=False,
edgecolor=color, linewidth=3.5)
plt.gca().add_patch(rect)
plt.gca().text(bbox[0], bbox[1] - 2,
'{:s} {:.3f}'.format(name, score),
bbox=dict(facecolor=color, alpha=0.5), fontsize=12, color='white')
plt.show()
def draw_all_detection(im_array, detections, class_names, scale):
"""
visualize all detections in one image
:param im_array: [b=1 c h w] in rgb
:param detections: [ numpy.ndarray([[x1 y1 x2 y2 score]]) for j in classes ]
:param class_names: list of names in imdb
:param scale: visualize the scaled image
:return:
"""
import cv2
import random
color_white = (255, 255, 255)
im = image.transform_inverse(im_array, config.PIXEL_MEANS)
# change to bgr
im = cv2.cvtColor(im, cv2.cv.CV_RGB2BGR)
for j, name in enumerate(class_names):
if name == '__background__':
continue
color = (random.randint(0, 256), random.randint(0, 256), random.randint(0, 256)) # generate a random color
dets = detections[j]
for det in dets:
bbox = det[:4] * scale
score = det[-1]
bbox = map(int, bbox)
cv2.rectangle(im, (bbox[0], bbox[1]), (bbox[2], bbox[3]), color=color, thickness=2)
cv2.putText(im, '%s %.3f' % (class_names[j], score), (bbox[0], bbox[1] + 10),
color=color_white, fontFace=cv2.FONT_HERSHEY_COMPLEX, fontScale=0.5)
return im
| apache-2.0 |
SerpentCS/odoo | addons/resource/faces/resource.py | 433 | 25890 | #@+leo-ver=4
#@+node:@file resource.py
#@@language python
#@<< Copyright >>
#@+node:<< Copyright >>
############################################################################
# Copyright (C) 2005, 2006, 2007, 2008 by Reithinger GmbH
# mreithinger@web.de
#
# This file is part of faces.
#
# faces is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# faces is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the
# Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
############################################################################
#@-node:<< Copyright >>
#@nl
#@<< Imports >>
#@+node:<< Imports >>
import pcalendar
import datetime
import utils
import string
import bisect
import plocale
#@-node:<< Imports >>
#@nl
_is_source = True
_to_datetime = pcalendar.to_datetime
_ = plocale.get_gettext()
#@+others
#@+node:_isattrib
#@+doc
#@nonl
# is used to find snapshot attributes
#@-doc
#@@code
def _isattrib(obj, a):
return a[0] != "_" \
and not callable(getattr(obj, a)) \
and not a.endswith("_members") \
and a not in ("name")
#@-node:_isattrib
#@+node:class ResourceCalendar
class ResourceCalendar(object):
"""
The resource calendar saves the load time of a resource.
Is ia sequence of time intervals of loads. An example of
such a sequence is:
[ (datetime.min, 0),
(2006/1/1, 1.0),
(2006/1/10, 0.5),
(2006/1/15, 0) ]
That means the resource:
is free till january the first 2006
is fully booked from january the first to january 10th
is half booked from january 10th to january 15th
is free since january 15th
"""
#@ @+others
#@+node:__init__
def __init__(self, src=None):
if src:
self.bookings = list(src.bookings)
else:
self.bookings = [ (datetime.datetime.min, 0) ]
#@-node:__init__
#@+node:__str__
def __str__(self):
return str(self.bookings)
#@-node:__str__
#@+node:__repr__
def __repr__(self):
return "<ResourceCalendar %s>" % (str(self))
#@-node:__repr__
#@+node:add_load
def add_load(self, start, end, load):
start = _to_datetime(start)
end = _to_datetime(end)
bookings = self.bookings
# the load will be converted in an integer to avoid
# rouning problems
load = int(load * 10000)
start_item = (start, 0)
start_pos = bisect.bisect_left(bookings, start_item)
left_load = 0
left_load = bookings[start_pos - 1][1]
if start_pos < len(bookings) and bookings[start_pos][0] == start:
prev_load = bookings[start_pos][1]
if prev_load + load == left_load:
del bookings[start_pos]
else:
bookings[start_pos] = (start, prev_load + load)
start_pos += 1
else:
bookings.insert(start_pos, (start, load + left_load))
start_pos += 1
item = (datetime.datetime.min, 0)
for i in range(start_pos, len(bookings)):
end_pos = i
item = bookings[i]
if item[0] >= end: break
bookings[i] = (item[0], item[1] + load)
else:
end_pos = len(bookings)
left_load = bookings[end_pos - 1][1]
if item[0] == end:
if item[1] == left_load:
del bookings[end_pos]
else:
bookings.insert(end_pos, (end, left_load - load))
#@-node:add_load
#@+node:end_of_booking_interval
def end_of_booking_interval(self, date):
date = _to_datetime(date)
bookings = self.bookings
date_item = (date, 999999)
date_pos = bisect.bisect_left(bookings, date_item) - 1
next_date = datetime.datetime.max
load = 0
try:
book_item = bookings[date_pos]
load = bookings[date_pos][1] / 10000.0
next_date = bookings[date_pos + 1][0]
except:
pass
return next_date, load
#@-node:end_of_booking_interval
#@+node:find_free_time
def find_free_time(self, start, length, load, max_load):
bookings = self.bookings
if isinstance(start, datetime.datetime):
adjust_date = _to_datetime
else:
adjust_date = start.calendar.EndDate
start = _to_datetime(start)
load = int(load * 10000)
max_load = int(max_load * 10000)
lb = len(bookings)
def next_possible(index):
while index < lb:
sd, lo = bookings[index]
if lo + load <= max_load:
break
index += 1
sd = adjust_date(max(start, sd))
ed = sd + length
end = _to_datetime(ed)
index += 1
while index < lb:
date, lo = bookings[index]
if date >= end:
#I found a good start date
return None, sd
if lo + load > max_load:
return index + 1, None
index += 1
return None, sd
start_item = (start, 1000000)
i = bisect.bisect_left(bookings, start_item) - 1
next_start = None
while not next_start and i < lb:
i, next_start = next_possible(i)
assert(next_start is not None)
return next_start
#@-node:find_free_time
#@+node:get_bookings
def get_bookings(self, start, end):
start = _to_datetime(start)
end = _to_datetime(end)
bookings = self.bookings
start_item = (start, 0)
start_pos = bisect.bisect_left(bookings, start_item)
if start_pos >= len(bookings) or bookings[start_pos][0] > start:
start_pos -= 1
end_item = (end, 0)
end_pos = bisect.bisect_left(bookings, end_item)
return start_pos, end_pos, bookings
#@-node:get_bookings
#@+node:get_load
def get_load(self, date):
date = _to_datetime(date)
bookings = self.bookings
item = (date, 100000)
pos = bisect.bisect_left(bookings, item) - 1
return bookings[pos][1] / 10000.0
#@-node:get_load
#@-others
#@-node:class ResourceCalendar
#@+node:class _ResourceBase
class _ResourceBase(object):
pass
#@-node:class _ResourceBase
#@+node:class _MetaResource
class _MetaResource(type):
doc_template = """
A resource class. The resources default attributes can
be changed when the class ist instanciated, i.e.
%(name)s(max_load=2.0)
@var max_load:
Specify the maximal allowed load sum of all simultaneously
allocated tasks of a resource. A ME{max_load} of 1.0 (default)
means the resource may be fully allocated. A ME{max_load} of 1.3
means the resource may be allocated with 30%% overtime.
@var title:
Specifies an alternative more descriptive name for the task.
@var efficiency:
The efficiency of a resource can be used for two purposes. First
you can use it as a crude way to model a team. A team of 5 people
should have an efficiency of 5.0. Keep in mind that you cannot
track the member of the team individually if you use this
feature. The other use is to model performance variations between
your resources.
@var vacation:
Specifies the vacation of the resource. This attribute is
specified as a list of date literals or date literal intervals.
Be aware that the end of an interval is excluded, i.e. it is
the first working date.
"""
#@ @+others
#@+node:__init__
def __init__(self, name, bases, dict_):
super(_MetaResource, self).__init__(name, bases, dict_)
self.name = name
self.title = dict_.get("title", name)
self._calendar = { None: ResourceCalendar() }
self._tasks = { }
self.__set_vacation()
self.__add_resource(bases[0])
self.__doc__ = dict_.get("__doc__", self.doc_template) % locals()
#@-node:__init__
#@+node:__or__
def __or__(self, other):
return self().__or__(other)
#@-node:__or__
#@+node:__and__
def __and__(self, other):
return self().__and__(other)
#@-node:__and__
#@+node:__cmp__
def __cmp__(self, other):
return cmp(self.name, getattr(other, "name", None))
#@-node:__cmp__
#@+node:__repr__
def __repr__(self):
return "<Resource %s>" % self.name
#@-node:__repr__
#@+node:__str__
def __str__(self):
return repr(self)
#@-node:__str__
#@+node:__set_vacation
def __set_vacation(self):
vacation = self.vacation
if isinstance(vacation, (tuple, list)):
for v in vacation:
if isinstance(v, (tuple, list)):
self.add_vacation(v[0], v[1])
else:
self.add_vacation(v)
else:
self.add_vacation(vacation)
#@-node:__set_vacation
#@+node:__add_resource
def __add_resource(self, base):
if issubclass(base, _ResourceBase):
members = getattr(base, base.__name__ + "_members", [])
members.append(self)
setattr(base, base.__name__ + "_members", members)
#@-node:__add_resource
#@+node:get_members
def get_members(self):
return getattr(self, self.__name__ + "_members", [])
#@-node:get_members
#@+node:add_vacation
def add_vacation(self, start, end=None):
start_date = _to_datetime(start)
if not end:
end_date = start_date.replace(hour=23, minute=59)
else:
end_date = _to_datetime(end)
for cal in self._calendar.itervalues():
cal.add_load(start_date, end_date, 1)
tp = Booking()
tp.start = start_date
tp.end = end_date
tp.book_start = start_date
tp.book_end = end_date
tp.work_time = end_date - start_date
tp.load = 1.0
tp.name = tp.title = _("(vacation)")
tp._id = ""
self._tasks.setdefault("", []).append(tp)
#@-node:add_vacation
#@+node:calendar
def calendar(self, scenario):
try:
return self._calendar[scenario]
except KeyError:
cal = self._calendar[scenario] = ResourceCalendar(self._calendar[None])
return cal
#@-node:calendar
#@-others
#@-node:class _MetaResource
#@+node:make_team
def make_team(resource):
members = resource.get_members()
if not members:
return resource
result = make_team(members[0])
for r in members[1:]:
result = result & make_team(r)
return result
#@-node:make_team
#@+node:class Booking
class Booking(object):
"""
A booking unit for a task.
"""
#@ << declarations >>
#@+node:<< declarations >>
book_start = datetime.datetime.min
book_end = datetime.datetime.max
actual = False
_id = ""
#@-node:<< declarations >>
#@nl
#@ @+others
#@+node:__init__
def __init__(self, task=None):
self.__task = task
#@-node:__init__
#@+node:__cmp__
def __cmp__(self, other):
return cmp(self._id, other._id)
#@-node:__cmp__
#@+node:path
def path(self):
first_dot = self._id.find(".")
return "root" + self._id[first_dot:]
path = property(path)
#@nonl
#@-node:path
#@+node:_idendity_
def _idendity_(self):
return self._id
#@-node:_idendity_
#@+node:__getattr__
def __getattr__(self, name):
if self.__task:
return getattr(self.__task, name)
raise AttributeError("'%s' is not a valid attribute" % (name))
#@-node:__getattr__
#@-others
#@-node:class Booking
#@+node:class ResourceList
class ResourceList(list):
#@ @+others
#@+node:__init__
def __init__(self, *args):
if args: self.extend(args)
#@-node:__init__
#@-others
#@-node:class ResourceList
#@+node:class Resource
class Resource(_ResourceBase):
#@ << declarations >>
#@+node:<< declarations >>
__metaclass__ = _MetaResource
__attrib_completions__ = {\
"max_load": 'max_load = ',
"title": 'title = "|"',
"efficiency": 'efficiency = ',
"vacation": 'vacation = [("|2002-02-01", "2002-02-05")]' }
__type_image__ = "resource16"
max_load = None # the maximum sum load for all task
vacation = ()
efficiency = 1.0
#@-node:<< declarations >>
#@nl
#@ @+others
#@+node:__init__
def __init__(self, **kwargs):
for k, v in kwargs.iteritems():
setattr(self, k, v)
#@-node:__init__
#@+node:_idendity_
def _idendity_(cls):
return "resource:" + cls.__name__
_idendity_ = classmethod(_idendity_)
#@-node:_idendity_
#@+node:__repr__
def __repr__(self):
return "<Resource %s>" % self.__class__.__name__
#@-node:__repr__
#@+node:__str__
def __str__(self):
return repr(self)
#@-node:__str__
#@+node:__call__
def __call__(self):
return self
#@-node:__call__
#@+node:__hash__
def __hash__(self):
return hash(self.__class__)
#@-node:__hash__
#@+node:__cmp__
def __cmp__(self, other):
return cmp(self.name, other.name)
#@-node:__cmp__
#@+node:__or__
def __or__(self, other):
if type(other) is _MetaResource:
other = other()
result = Resource()
result._subresource = _OrResourceGroup(self, other)
return result
#@-node:__or__
#@+node:__and__
def __and__(self, other):
if type(other) is _MetaResource:
other = other()
result = Resource()
result._subresource = _AndResourceGroup(self, other)
return result
#@-node:__and__
#@+node:_permutation_count
def _permutation_count(self):
if hasattr(self, "_subresource"):
return self._subresource._permutation_count()
return 1
#@-node:_permutation_count
#@+node:_get_resources
def _get_resources(self, state):
if hasattr(self, "_subresource"):
result = self._subresource._get_resources(state)
if self.name != "Resource":
result.name = self.name
if self.title != "Resource":
result.title = self.title
return result
result = ResourceList(self)
return result
#@-node:_get_resources
#@+node:all_members
def all_members(self):
if hasattr(self, "_subresource"):
return self._subresource.all_members()
return [ self.__class__ ]
#@-node:all_members
#@+node:unbook_tasks_of_project
def unbook_tasks_of_project(cls, project_id, scenario):
try:
task_list = cls._tasks[scenario]
except KeyError:
return
add_load = cls.calendar(scenario).add_load
for task_id, bookings in task_list.items():
if task_id.startswith(project_id):
for item in bookings:
add_load(item.book_start, item.book_end, -item.load)
del task_list[task_id]
if not task_list:
del cls._tasks[scenario]
unbook_tasks_of_project = classmethod(unbook_tasks_of_project)
#@-node:unbook_tasks_of_project
#@+node:unbook_task
def unbook_task(cls, task):
identdity = task._idendity_()
scenario = task.scenario
try:
task_list = cls._tasks[scenario]
bookings = task_list[identdity]
except KeyError:
return
add_load = cls.calendar(scenario).add_load
for b in bookings:
add_load(b.book_start, b.book_end, -b.load)
del task_list[identdity]
if not task_list:
del cls._tasks[scenario]
unbook_task = classmethod(unbook_task)
#@-node:unbook_task
#@+node:correct_bookings
def correct_bookings(cls, task):
#correct the booking data with the actual task data
try:
tasks = cls._tasks[task.scenario][task._idendity_()]
except KeyError:
return
for t in tasks:
t.start = task.start.to_datetime()
t.end = task.end.to_datetime()
correct_bookings = classmethod(correct_bookings)
#@-node:correct_bookings
#@+node:book_task
def book_task(cls, task, start, end, load, work_time, actual):
if not work_time: return
start = _to_datetime(start)
end = _to_datetime(end)
identdity = task._idendity_()
task_list = cls._tasks.setdefault(task.scenario, {})
bookings = task_list.setdefault(identdity, [])
add_load = cls.calendar(task.scenario).add_load
tb = Booking(task)
tb.book_start = start
tb.book_end = end
tb._id = identdity
tb.load = load
tb.start = _to_datetime(task.start)
tb.end = _to_datetime(task.end)
tb.title = task.title
tb.name = task.name
tb.work_time = int(work_time)
tb.actual = actual
bookings.append(tb)
result = add_load(start, end, load)
return result
book_task = classmethod(book_task)
#@-node:book_task
#@+node:length_of
def length_of(cls, task):
cal = task.root.calendar
bookings = cls.get_bookings(task)
return sum(map(lambda b: task._to_delta(b.work_time).round(), bookings))
length_of = classmethod(length_of)
#@-node:length_of
#@+node:done_of
def done_of(self, task):
cal = task.root.calendar
now = cal.now
bookings = self.get_bookings(task)
if task.__dict__.has_key("effort"):
efficiency = self.efficiency * task.efficiency
else:
efficiency = 1
def book_done(booking):
if booking.book_start >= now:
return 0
factor = 1
if booking.book_end > now:
start = task._to_start(booking.book_start)
end = task._to_end(booking.book_end)
cnow = task._to_start(now)
factor = float(cnow - start) / ((end - start) or 1)
return factor * booking.work_time * efficiency
return task._to_delta(sum(map(book_done, bookings)))
#@-node:done_of
#@+node:todo_of
def todo_of(self, task):
cal = task.root.calendar
now = cal.now
bookings = self.get_bookings(task)
if task.__dict__.has_key("effort"):
efficiency = self.efficiency * task.efficiency
else:
efficiency = 1
def book_todo(booking):
if booking.book_end <= now:
return 0
factor = 1
if booking.book_start < now:
start = task._to_start(booking.book_start)
end = task._to_end(booking.book_end)
cnow = task._to_start(now)
factor = float(end - cnow) / ((end - start) or 1)
return factor * booking.work_time * efficiency
return task._to_delta(sum(map(book_todo, bookings)))
#@-node:todo_of
#@+node:get_bookings
def get_bookings(cls, task):
return cls._tasks.get(task.scenario, {}).get(task._idendity_(), ())
get_bookings = classmethod(get_bookings)
#@-node:get_bookings
#@+node:get_bookings_at
def get_bookings_at(cls, start, end, scenario):
result = []
try:
items = cls._tasks[scenario].iteritems()
except KeyError:
return ()
for task_id, bookings in items:
result += [ booking for booking in bookings
if booking.book_start < end
and booking.book_end > start ]
vacations = cls._tasks.get("", ())
result += [ booking for booking in vacations
if booking.book_start < end
and booking.book_end > start ]
return result
get_bookings_at = classmethod(get_bookings_at)
#@-node:get_bookings_at
#@+node:find_free_time
def find_free_time(cls, start, length, load, max_load, scenario):
return cls.calendar(scenario).find_free_time(start, length, load, max_load)
find_free_time = classmethod(find_free_time)
#@-node:find_free_time
#@+node:get_load
def get_load(cls, date, scenario):
return cls.calendar(scenario).get_load(date)
get_load = classmethod(get_load)
#@-node:get_load
#@+node:end_of_booking_interval
def end_of_booking_interval(cls, date, task):
return cls.calendar(task.scenario).end_of_booking_interval(date)
end_of_booking_interval = classmethod(end_of_booking_interval)
#@-node:end_of_booking_interval
#@+node:snapshot
def snapshot(self):
from task import _as_string
def isattrib(a):
if a == "max_load" and self.max_load is None: return False
if a in ("name", "title", "vacation"): return False
return _isattrib(self, a)
attribs = filter(isattrib, dir(self))
attribs = map(lambda a: "%s=%s" % (a, _as_string(getattr(self, a))),
attribs)
return self.name + "(%s)" % ", ".join(attribs)
#@-node:snapshot
#@-others
#@-node:class Resource
#@+node:class _ResourceGroup
class _ResourceGroup(object):
#@ @+others
#@+node:__init__
def __init__(self, *args):
self.resources = []
for a in args:
self.__append(a)
#@-node:__init__
#@+node:all_members
def all_members(self):
group = reduce(lambda a, b: a + b.all_members(),
self.resources, [])
group = map(lambda r: (r, True), group)
group = dict(group)
group = group.keys()
return group
#@-node:all_members
#@+node:_permutation_count
def _permutation_count(self):
abstract
#@-node:_permutation_count
#@+node:_refactor
def _refactor(self, arg):
pass
#@-node:_refactor
#@+node:__append
def __append(self, arg):
if isinstance(arg, self.__class__):
self.resources += arg.resources
for r in arg.resources:
self._refactor(r)
return
elif isinstance(arg, Resource):
subresources = getattr(arg, "_subresource", None)
if subresources:
self.__append(subresources)
return
else:
self.resources.append(arg)
else:
assert(isinstance(arg, _ResourceGroup))
self.resources.append(arg)
self._refactor(arg)
#@-node:__append
#@+node:__str__
def __str__(self):
op = lower(self.__class__.__name__[0:-13])
return "(" + \
string.join([str(r) for r in self.resources],
" " + op + " ") + \
")"
#@-node:__str__
#@-others
#@-node:class _ResourceGroup
#@+node:class _OrResourceGroup
class _OrResourceGroup(_ResourceGroup):
#@ @+others
#@+node:_get_resources
def _get_resources(self, state):
for r in self.resources:
c = r._permutation_count()
if c <= state:
state -= c
else:
return r._get_resources(state)
assert(0)
#@-node:_get_resources
#@+node:_permutation_count
def _permutation_count(self):
return sum([ r._permutation_count() for r in self.resources])
#@-node:_permutation_count
#@-others
#@-node:class _OrResourceGroup
#@+node:class _AndResourceGroup
class _AndResourceGroup(_ResourceGroup):
#@ @+others
#@+node:__init__
def __init__(self, *args):
self.factors = [ 1 ]
_ResourceGroup.__init__(self, *args)
#@-node:__init__
#@+node:_refactor
def _refactor(self, arg):
count = arg._permutation_count()
self.factors = [ count * f for f in self.factors ]
self.factors.append(1)
#@-node:_refactor
#@+node:_permutation_count
#print "AndResourceGroup", count, arg, self.factors
def _permutation_count(self):
return self.factors[0]
#@-node:_permutation_count
#@+node:_get_resources
def _get_resources(self, state):
"""delivers None when there are duplicate resources"""
result = []
for i in range(1, len(self.factors)):
f = self.factors[i]
substate = state / f
state %= f
result.append(self.resources[i - 1]._get_resources(substate))
result = ResourceList(*list(utils.flatten(result)))
dupl_test = { }
for r in result:
if dupl_test.has_key(r):
return None
else:
dupl_test[r] = 1
return result
#@-node:_get_resources
#@+node:_has_duplicates
def _has_duplicates(self, state):
resources = self._get_resources(state)
tmp = { }
for r in resources:
if tmp.has_key(r):
return True
tmp[r] = 1
return False
#@-node:_has_duplicates
#@-others
#@-node:class _AndResourceGroup
#@-others
#@-node:@file resource.py
#@-leo
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
ducseb/domoticz | scripts/python/googlepubsub.py | 34 | 1993 | import httplib2
import argparse
import base64
import csv
import datetime
import random
import sys
import time
import base64
import domoticz
# This script connects Domoticz with Google Cloud PubSub API
# To make it works :
# 1 - Install the Google API Client Python Library : pip install --upgrade google-api-python-client (or see https://cloud.google.com/pubsub/libraries)
# 2 - Activate the Google Cloud PubSub API in the Google Cloud Console
# 3 - Generate a Service account key in the Google Cloud Console and download the private key in JSON format
# 4 - Transfer the json key into the domoticz install dir
# 5 - Set the ENV variable GOOGLE_APPLICATION_CREDENTIALS to the path of your Service account key
# 6 - Create a topic in the "PubSub" Google Cloud Console
# 7 - Modify the variable PUBSUB_TOPICNAME below with the topic name
# Required library to make pubsub google api working fine
from apiclient import discovery
from oauth2client import client as oauth2client
# Required privileges
PUBSUB_SCOPES = ['https://www.googleapis.com/auth/pubsub']
# Topic name defined in the Google Cloud PubSub Console
PUBSUB_TOPICNAME = 'projects/affable-enigma-136123/topics/domoticz'
def create_pubsub_client(http=None):
credentials = oauth2client.GoogleCredentials.get_application_default()
if credentials.create_scoped_required():
credentials = credentials.create_scoped(PUBSUB_SCOPES)
if not http:
http = httplib2.Http()
credentials.authorize(http)
return discovery.build('pubsub', 'v1', http=http)
def publish_message(client,topicname,message):
message1 = base64.b64encode(message)
body = {
'messages': [
{'data': message1}
]
}
resp = client.projects().topics().publish(topic=topicname, body=body).execute()
def main(argv):
client = create_pubsub_client()
publish_message(client,PUBSUB_TOPICNAME,data)
if __name__ == '__main__':
main(sys.argv)
| gpl-3.0 |
maestrano/openerp | openerp/tools/image.py | 20 | 8412 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2012-today OpenERP s.a. (<http://openerp.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import io
import StringIO
from PIL import Image
from PIL import ImageOps
from random import random
# ----------------------------------------
# Image resizing
# ----------------------------------------
def image_resize_image(base64_source, size=(1024, 1024), encoding='base64', filetype='PNG', avoid_if_small=False):
""" Function to resize an image. The image will be resized to the given
size, while keeping the aspect ratios, and holes in the image will be
filled with transparent background. The image will not be stretched if
smaller than the expected size.
Steps of the resizing:
- Compute width and height if not specified.
- if avoid_if_small: if both image sizes are smaller than the requested
sizes, the original image is returned. This is used to avoid adding
transparent content around images that we do not want to alter but
just resize if too big. This is used for example when storing images
in the 'image' field: we keep the original image, resized to a maximal
size, without adding transparent content around it if smaller.
- create a thumbnail of the source image through using the thumbnail
function. Aspect ratios are preserved when using it. Note that if the
source image is smaller than the expected size, it will not be
extended, but filled to match the size.
- create a transparent background that will hold the final image.
- paste the thumbnail on the transparent background and center it.
:param base64_source: base64-encoded version of the source
image; if False, returns False
:param size: 2-tuple(width, height). A None value for any of width or
height mean an automatically computed value based respectivelly
on height or width of the source image.
:param encoding: the output encoding
:param filetype: the output filetype
:param avoid_if_small: do not resize if image height and width
are smaller than the expected size.
"""
if not base64_source:
return False
if size == (None, None):
return base64_source
image_stream = io.BytesIO(base64_source.decode(encoding))
image = Image.open(image_stream)
asked_width, asked_height = size
if asked_width is None:
asked_width = int(image.size[0] * (float(asked_height) / image.size[1]))
if asked_height is None:
asked_height = int(image.size[1] * (float(asked_width) / image.size[0]))
size = asked_width, asked_height
# check image size: do not create a thumbnail if avoiding smaller images
if avoid_if_small and image.size[0] <= size[0] and image.size[1] <= size[1]:
return base64_source
if image.size <> size:
# If you need faster thumbnails you may use use Image.NEAREST
image = ImageOps.fit(image, size, Image.ANTIALIAS)
if image.mode not in ["1", "L", "P", "RGB", "RGBA"]:
image = image.convert("RGB")
background_stream = StringIO.StringIO()
image.save(background_stream, filetype)
return background_stream.getvalue().encode(encoding)
def image_resize_image_big(base64_source, size=(1204, 1204), encoding='base64', filetype='PNG', avoid_if_small=True):
""" Wrapper on image_resize_image, to resize images larger than the standard
'big' image size: 1024x1024px.
:param size, encoding, filetype, avoid_if_small: refer to image_resize_image
"""
return image_resize_image(base64_source, size, encoding, filetype, avoid_if_small)
def image_resize_image_medium(base64_source, size=(128, 128), encoding='base64', filetype='PNG', avoid_if_small=False):
""" Wrapper on image_resize_image, to resize to the standard 'medium'
image size: 180x180.
:param size, encoding, filetype, avoid_if_small: refer to image_resize_image
"""
return image_resize_image(base64_source, size, encoding, filetype, avoid_if_small)
def image_resize_image_small(base64_source, size=(64, 64), encoding='base64', filetype='PNG', avoid_if_small=False):
""" Wrapper on image_resize_image, to resize to the standard 'small' image
size: 50x50.
:param size, encoding, filetype, avoid_if_small: refer to image_resize_image
"""
return image_resize_image(base64_source, size, encoding, filetype, avoid_if_small)
# ----------------------------------------
# Colors
# ---------------------------------------
def image_colorize(original, randomize=True, color=(255, 255, 255)):
""" Add a color to the transparent background of an image.
:param original: file object on the original image file
:param randomize: randomize the background color
:param color: background-color, if not randomize
"""
# create a new image, based on the original one
original = Image.open(io.BytesIO(original))
image = Image.new('RGB', original.size)
# generate the background color, past it as background
if randomize:
color = (int(random() * 192 + 32), int(random() * 192 + 32), int(random() * 192 + 32))
image.paste(color)
image.paste(original, mask=original)
# return the new image
buffer = StringIO.StringIO()
image.save(buffer, 'PNG')
return buffer.getvalue()
# ----------------------------------------
# Misc image tools
# ---------------------------------------
def image_get_resized_images(base64_source, return_big=False, return_medium=True, return_small=True,
big_name='image', medium_name='image_medium', small_name='image_small',
avoid_resize_big=True, avoid_resize_medium=False, avoid_resize_small=False):
""" Standard tool function that returns a dictionary containing the
big, medium and small versions of the source image. This function
is meant to be used for the methods of functional fields for
models using images.
Default parameters are given to be used for the getter of functional
image fields, for example with res.users or res.partner. It returns
only image_medium and image_small values, to update those fields.
:param base64_source: base64-encoded version of the source
image; if False, all returnes values will be False
:param return_{..}: if set, computes and return the related resizing
of the image
:param {..}_name: key of the resized image in the return dictionary;
'image', 'image_medium' and 'image_small' by default.
:param avoid_resize_[..]: see avoid_if_small parameter
:return return_dict: dictionary with resized images, depending on
previous parameters.
"""
return_dict = dict()
if return_big:
return_dict[big_name] = image_resize_image_big(base64_source, avoid_if_small=avoid_resize_big)
if return_medium:
return_dict[medium_name] = image_resize_image_medium(base64_source, avoid_if_small=avoid_resize_medium)
if return_small:
return_dict[small_name] = image_resize_image_small(base64_source, avoid_if_small=avoid_resize_small)
return return_dict
if __name__=="__main__":
import sys
assert len(sys.argv)==3, 'Usage to Test: image.py SRC.png DEST.png'
img = file(sys.argv[1],'rb').read().encode('base64')
new = image_resize_image(img, (128,100))
file(sys.argv[2], 'wb').write(new.decode('base64'))
| agpl-3.0 |
proxysh/Safejumper-for-Mac | buildlinux/env64/lib/python2.7/site-packages/cryptography/hazmat/bindings/openssl/_conditional.py | 13 | 9173 | # This file is dual licensed under the terms of the Apache License, Version
# 2.0, and the BSD License. See the LICENSE file in the root of this repository
# for complete details.
from __future__ import absolute_import, division, print_function
# This is a temporary copy of all the CONDITIONAL_NAMES from _cffi_src so
# we can loop over them and delete them at runtime. It will be removed when
# cffi supports #if in cdef
CONDITIONAL_NAMES = {
"Cryptography_HAS_CMAC": [
"CMAC_CTX_new",
"CMAC_Init",
"CMAC_Update",
"CMAC_Final",
"CMAC_CTX_copy",
"CMAC_CTX_free",
],
"Cryptography_HAS_CMS": [
"BIO_new_CMS",
"i2d_CMS_bio_stream",
"PEM_write_bio_CMS_stream",
"CMS_final",
"CMS_sign",
"CMS_verify",
"CMS_encrypt",
"CMS_decrypt",
"CMS_add1_signer",
"CMS_TEXT",
"CMS_NOCERTS",
"CMS_NO_CONTENT_VERIFY",
"CMS_NO_ATTR_VERIFY",
"CMS_NOSIGS",
"CMS_NOINTERN",
"CMS_NO_SIGNER_CERT_VERIFY",
"CMS_NOVERIFY",
"CMS_DETACHED",
"CMS_BINARY",
"CMS_NOATTR",
"CMS_NOSMIMECAP",
"CMS_NOOLDMIMETYPE",
"CMS_CRLFEOL",
"CMS_STREAM",
"CMS_NOCRL",
"CMS_PARTIAL",
"CMS_REUSE_DIGEST",
"CMS_USE_KEYID",
"CMS_DEBUG_DECRYPT",
],
"Cryptography_HAS_EC": [
"OPENSSL_EC_NAMED_CURVE",
"EC_GROUP_new",
"EC_GROUP_free",
"EC_GROUP_clear_free",
"EC_GROUP_new_curve_GFp",
"EC_GROUP_new_by_curve_name",
"EC_GROUP_set_curve_GFp",
"EC_GROUP_get_curve_GFp",
"EC_GROUP_method_of",
"EC_GROUP_get0_generator",
"EC_GROUP_get_curve_name",
"EC_GROUP_get_degree",
"EC_GROUP_set_asn1_flag",
"EC_GROUP_set_point_conversion_form",
"EC_KEY_new",
"EC_KEY_free",
"EC_get_builtin_curves",
"EC_KEY_new_by_curve_name",
"EC_KEY_copy",
"EC_KEY_dup",
"EC_KEY_up_ref",
"EC_KEY_set_group",
"EC_KEY_get0_private_key",
"EC_KEY_set_private_key",
"EC_KEY_set_public_key",
"EC_KEY_get_enc_flags",
"EC_KEY_set_enc_flags",
"EC_KEY_set_conv_form",
"EC_KEY_set_asn1_flag",
"EC_KEY_precompute_mult",
"EC_KEY_generate_key",
"EC_KEY_check_key",
"EC_POINT_new",
"EC_POINT_free",
"EC_POINT_clear_free",
"EC_POINT_copy",
"EC_POINT_dup",
"EC_POINT_method_of",
"EC_POINT_set_to_infinity",
"EC_POINT_set_Jprojective_coordinates_GFp",
"EC_POINT_get_Jprojective_coordinates_GFp",
"EC_POINT_set_affine_coordinates_GFp",
"EC_POINT_get_affine_coordinates_GFp",
"EC_POINT_set_compressed_coordinates_GFp",
"EC_POINT_point2oct",
"EC_POINT_oct2point",
"EC_POINT_point2bn",
"EC_POINT_bn2point",
"EC_POINT_point2hex",
"EC_POINT_hex2point",
"EC_POINT_add",
"EC_POINT_dbl",
"EC_POINT_invert",
"EC_POINT_is_at_infinity",
"EC_POINT_is_on_curve",
"EC_POINT_cmp",
"EC_POINT_make_affine",
"EC_POINTs_make_affine",
"EC_POINTs_mul",
"EC_POINT_mul",
"EC_GROUP_precompute_mult",
"EC_GROUP_have_precompute_mult",
"EC_GFp_simple_method",
"EC_GFp_mont_method",
"EC_GFp_nist_method",
"EC_METHOD_get_field_type",
"EVP_PKEY_assign_EC_KEY",
"EVP_PKEY_get1_EC_KEY",
"EVP_PKEY_set1_EC_KEY",
"PEM_write_bio_ECPrivateKey",
"i2d_EC_PUBKEY",
"d2i_EC_PUBKEY",
"d2i_EC_PUBKEY_bio",
"i2d_EC_PUBKEY_bio",
"d2i_ECPrivateKey",
"d2i_ECPrivateKey_bio",
"i2d_ECPrivateKey",
"i2d_ECPrivateKey_bio",
"i2o_ECPublicKey",
"o2i_ECPublicKey",
"SSL_CTX_set_tmp_ecdh",
"POINT_CONVERSION_COMPRESSED",
"POINT_CONVERSION_UNCOMPRESSED",
"POINT_CONVERSION_HYBRID",
],
"Cryptography_HAS_EC_1_0_1": [
"EC_KEY_get_flags",
"EC_KEY_set_flags",
"EC_KEY_clear_flags",
"EC_KEY_set_public_key_affine_coordinates",
],
"Cryptography_HAS_EC2M": [
"EC_GF2m_simple_method",
"EC_POINT_set_affine_coordinates_GF2m",
"EC_POINT_get_affine_coordinates_GF2m",
"EC_POINT_set_compressed_coordinates_GF2m",
"EC_GROUP_set_curve_GF2m",
"EC_GROUP_get_curve_GF2m",
"EC_GROUP_new_curve_GF2m",
],
"Cryptography_HAS_EC_1_0_2": [
"EC_curve_nid2nist",
],
"Cryptography_HAS_ECDH": [
"ECDH_compute_key",
],
"Cryptography_HAS_SET_ECDH_AUTO": [
"SSL_CTX_set_ecdh_auto",
],
"Cryptography_HAS_ECDSA": [
"ECDSA_SIG_new",
"ECDSA_SIG_free",
"i2d_ECDSA_SIG",
"d2i_ECDSA_SIG",
"ECDSA_do_sign",
"ECDSA_do_sign_ex",
"ECDSA_do_verify",
"ECDSA_sign_setup",
"ECDSA_sign",
"ECDSA_sign_ex",
"ECDSA_verify",
"ECDSA_size",
],
"Cryptography_HAS_ENGINE_CRYPTODEV": [
"ENGINE_load_cryptodev"
],
"Cryptography_HAS_EC_CODES": [
"EC_R_UNKNOWN_GROUP",
"EC_F_EC_GROUP_NEW_BY_CURVE_NAME"
],
"Cryptography_HAS_RSA_R_PKCS_DECODING_ERROR": [
"RSA_R_PKCS_DECODING_ERROR"
],
"Cryptography_HAS_GCM": [
"EVP_CTRL_GCM_GET_TAG",
"EVP_CTRL_GCM_SET_TAG",
"EVP_CTRL_GCM_SET_IVLEN",
],
"Cryptography_HAS_EGD": [
"RAND_egd",
"RAND_egd_bytes",
"RAND_query_egd_bytes",
],
"Cryptography_HAS_MGF1_MD": [
"EVP_PKEY_CTX_set_rsa_mgf1_md",
],
"Cryptography_HAS_RSA_OAEP_MD": [
"EVP_PKEY_CTX_set_rsa_oaep_md",
],
"Cryptography_HAS_TLSv1_1": [
"SSL_OP_NO_TLSv1_1",
"TLSv1_1_method",
"TLSv1_1_server_method",
"TLSv1_1_client_method",
],
"Cryptography_HAS_TLSv1_2": [
"SSL_OP_NO_TLSv1_2",
"TLSv1_2_method",
"TLSv1_2_server_method",
"TLSv1_2_client_method",
],
"Cryptography_HAS_SSL3_METHOD": [
"SSLv3_method",
"SSLv3_client_method",
"SSLv3_server_method",
],
"Cryptography_HAS_RELEASE_BUFFERS": [
"SSL_MODE_RELEASE_BUFFERS",
],
"Cryptography_HAS_OP_NO_COMPRESSION": [
"SSL_OP_NO_COMPRESSION",
],
"Cryptography_HAS_SSL_OP_MSIE_SSLV2_RSA_PADDING": [
"SSL_OP_MSIE_SSLV2_RSA_PADDING",
],
"Cryptography_HAS_SSL_OP_NO_TICKET": [
"SSL_OP_NO_TICKET",
],
"Cryptography_HAS_NETBSD_D1_METH": [
"DTLSv1_method",
],
"Cryptography_HAS_NEXTPROTONEG": [
"SSL_CTX_set_next_protos_advertised_cb",
"SSL_CTX_set_next_proto_select_cb",
"SSL_select_next_proto",
"SSL_get0_next_proto_negotiated",
],
"Cryptography_HAS_ALPN": [
"SSL_CTX_set_alpn_protos",
"SSL_set_alpn_protos",
"SSL_CTX_set_alpn_select_cb",
"SSL_get0_alpn_selected",
],
"Cryptography_HAS_COMPRESSION": [
"SSL_get_current_compression",
"SSL_get_current_expansion",
"SSL_COMP_get_name",
],
"Cryptography_HAS_GET_SERVER_TMP_KEY": [
"SSL_get_server_tmp_key",
],
"Cryptography_HAS_102_VERIFICATION_ERROR_CODES": [
'X509_V_ERR_SUITE_B_INVALID_VERSION',
'X509_V_ERR_SUITE_B_INVALID_ALGORITHM',
'X509_V_ERR_SUITE_B_INVALID_CURVE',
'X509_V_ERR_SUITE_B_INVALID_SIGNATURE_ALGORITHM',
'X509_V_ERR_SUITE_B_LOS_NOT_ALLOWED',
'X509_V_ERR_SUITE_B_CANNOT_SIGN_P_384_WITH_P_256',
'X509_V_ERR_HOSTNAME_MISMATCH',
'X509_V_ERR_EMAIL_MISMATCH',
'X509_V_ERR_IP_ADDRESS_MISMATCH'
],
"Cryptography_HAS_102_VERIFICATION_PARAMS": [
"X509_V_FLAG_SUITEB_128_LOS_ONLY",
"X509_V_FLAG_SUITEB_192_LOS",
"X509_V_FLAG_SUITEB_128_LOS",
"X509_VERIFY_PARAM_set1_host",
"X509_VERIFY_PARAM_set1_email",
"X509_VERIFY_PARAM_set1_ip",
"X509_VERIFY_PARAM_set1_ip_asc",
"X509_VERIFY_PARAM_set_hostflags",
],
"Cryptography_HAS_X509_V_FLAG_TRUSTED_FIRST": [
"X509_V_FLAG_TRUSTED_FIRST",
],
"Cryptography_HAS_X509_V_FLAG_PARTIAL_CHAIN": [
"X509_V_FLAG_PARTIAL_CHAIN",
],
"Cryptography_HAS_SET_CERT_CB": [
"SSL_CTX_set_cert_cb",
"SSL_set_cert_cb",
],
"Cryptography_HAS_AES_CTR128_ENCRYPT": [
"AES_ctr128_encrypt",
],
"Cryptography_HAS_SSL_ST": [
"SSL_ST_BEFORE",
"SSL_ST_OK",
"SSL_ST_INIT",
"SSL_ST_RENEGOTIATE",
],
"Cryptography_HAS_TLS_ST": [
"TLS_ST_BEFORE",
"TLS_ST_OK",
],
"Cryptography_HAS_LOCKING_CALLBACKS": [
"CRYPTO_LOCK",
"CRYPTO_UNLOCK",
"CRYPTO_READ",
"CRYPTO_LOCK_SSL",
"CRYPTO_lock",
],
"Cryptography_HAS_SCRYPT": [
"EVP_PBE_scrypt",
],
"Cryptography_HAS_NPN_NEGOTIATED": [
"OPENSSL_NPN_NEGOTIATED",
],
}
| gpl-2.0 |
rraval/rdio-gmusic | rdio_gmusic.py | 1 | 5816 | from __future__ import unicode_literals
# FIXME: monkey patch the track adder to not throw an exception on missing keys
# to test, try adding Tp3e52picsj55ozzmjtxwb5xpwa
import gmusicapi.protocol
@staticmethod
def build_track_add(store_track_info):
import copy
track_dict = copy.deepcopy(store_track_info)
for key in ('kind', 'trackAvailableForPurchase',
'albumAvailableForPurchase', 'albumArtRef',
'artistId',
):
if key in track_dict:
del track_dict[key]
for key, default in {
'playCount': 0,
'rating': '0',
'genre': '',
'lastModifiedTimestamp': '0',
'deleted': False,
'beatsPerMinute': -1,
'composer': '',
'creationTimestamp': '-1',
'totalDiscCount': 0,
}.items():
track_dict.setdefault(key, default)
# TODO unsure about this
track_dict['trackType'] = 8
return {'create': track_dict}
gmusicapi.protocol.mobileclient.BatchMutateTracks.build_track_add = build_track_add
# non fugly code starts here
import itertools
import json
import os
import urllib
from collections import defaultdict
import click
import oauth2
from gmusicapi.clients import Mobileclient
# number of tracks to request from rdio at a time
RDIO_CHUNK_SIZE = 250
# oauth client identification
RDIO_KEY = '3nq3y49bhhbwexffd5fym3mz'
RDIO_SECRET = 'PdVBSRsXQv'
class Rdio(object):
def __init__(self, key, secret):
self.client = oauth2.Client(oauth2.Consumer(key, secret))
def request(self, req):
response = self.client.request(
'http://api.rdio.com/1/',
'POST',
urllib.urlencode(req)
)
return json.loads(response[1])['result']
def genTracks(self, user):
user = self.request({
'method': 'findUser',
'vanityName': user,
})
offset = 0
while True:
tracks = self.request({
'method': 'getTracksInCollection',
'user': user['key'],
'count': RDIO_CHUNK_SIZE,
'start': offset,
})
if not tracks:
break
# no yield from :(
for t in tracks:
yield t
offset += len(tracks)
class GMusic(object):
def __init__(self, user, password):
self.client = Mobileclient()
self.client.login(user, password, Mobileclient.FROM_MAC_ADDRESS)
def genTracks(self):
for chunk in self.client.get_all_songs(incremental=True):
for track in chunk:
yield track
def findTrack(self, rdio_track, keys=('artist', 'album', 'name',)):
if not keys:
return
results = self.client.search_all_access(' '.join(rdio_track[k] for k in keys))['song_hits']
if not results:
return self.findTrack(rdio_track, keys[1:])
# FIXME: is the best match always first?
best_match = results[0]
return best_match['track']
def addTrack(self, google_track):
self.client.add_aa_track(google_track['nid'])
class ChangeTracker(object):
def __init__(self, tag):
self.tag = tag
self.items = defaultdict(list)
self.item_count = 0
def add(self, rdio_track, google_track):
self.item_count += 1
item = {'rdio': rdio_track['name']}
if google_track:
item['google'] = google_track['title']
key = '{} [{}]'.format(rdio_track['album'], rdio_track['artist'])
self.items[key].append(item)
def summary(self, total):
lines = ['']
lines.append('----- {} {} ({:.2f}%) -----'.format(
self.item_count,
self.tag,
(float(self.item_count) / total) * 100,
))
lines.append('')
for key, tracks in self.items.iteritems():
lines.append('+ {}'.format(key))
for track in tracks:
lines.append('|-> {}'.format(track['rdio']))
if 'google' in track:
lines.append('| +=> {}'.format(track['google']))
lines.append('|')
lines.append('')
return lines
@click.command()
@click.argument('rdio_user')
@click.argument('google_user')
def main(rdio_user, google_user):
click.echo('Create an App Specific Password to use with your Google Account')
click.echo('See https://support.google.com/accounts/answer/185833')
click.echo()
google_password = click.prompt('{} Password'.format(google_user), hide_input=True)
click.echo()
rdio = Rdio(RDIO_KEY, RDIO_SECRET)
gmusic = GMusic(google_user, google_password)
existing_tracks = {}
with click.progressbar(
gmusic.genTracks(),
label='Existing Google Music',
show_pos=True
) as bar:
for track in bar:
if 'nid' in track:
existing_tracks[track['nid']] = track
skip = ChangeTracker('Skipped')
added = ChangeTracker('Added')
notfound = ChangeTracker('Not Found')
with click.progressbar(
rdio.genTracks(rdio_user),
label='Rdio -> Google Music',
show_pos=True
) as bar:
for track in bar:
match = gmusic.findTrack(track)
if match is None:
notfound.add(track, None)
elif match['nid'] in existing_tracks:
skip.add(track, match)
else:
gmusic.addTrack(match)
added.add(track, match)
total = sum((added.item_count, skip.item_count, notfound.item_count))
click.echo_via_pager('\n'.join(itertools.chain(
notfound.summary(total),
added.summary(total),
skip.summary(total),
)))
if __name__ == '__main__':
main()
| mit |
KellyChan/Python | javascript/backbone/backbone-templates/backbone-fileupload/venvs/lib/python2.7/site-packages/django/contrib/gis/gdal/base.py | 398 | 1143 | from ctypes import c_void_p
from types import NoneType
from django.contrib.gis.gdal.error import GDALException
class GDALBase(object):
"""
Base object for GDAL objects that has a pointer access property
that controls access to the underlying C pointer.
"""
# Initially the pointer is NULL.
_ptr = None
# Default allowed pointer type.
ptr_type = c_void_p
# Pointer access property.
def _get_ptr(self):
# Raise an exception if the pointer isn't valid don't
# want to be passing NULL pointers to routines --
# that's very bad.
if self._ptr: return self._ptr
else: raise GDALException('GDAL %s pointer no longer valid.' % self.__class__.__name__)
def _set_ptr(self, ptr):
# Only allow the pointer to be set with pointers of the
# compatible type or None (NULL).
if isinstance(ptr, (int, long)):
self._ptr = self.ptr_type(ptr)
elif isinstance(ptr, (self.ptr_type, NoneType)):
self._ptr = ptr
else:
raise TypeError('Incompatible pointer type')
ptr = property(_get_ptr, _set_ptr)
| mit |
ekalosak/boto | boto/cloudfront/logging.py | 219 | 1557 | # Copyright (c) 2006-2009 Mitch Garnaat http://garnaat.org/
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
class LoggingInfo(object):
def __init__(self, bucket='', prefix=''):
self.bucket = bucket
self.prefix = prefix
def startElement(self, name, attrs, connection):
return None
def endElement(self, name, value, connection):
if name == 'Bucket':
self.bucket = value
elif name == 'Prefix':
self.prefix = value
else:
setattr(self, name, value)
| mit |
Raghbinho/MTT | polls/forms.py | 1 | 2289 | from django.contrib.auth.forms import AuthenticationForm
from django.contrib.auth.forms import UserCreationForm
# import re
from django import forms
from django.contrib.auth.models import User
from django.utils.translation import ugettext_lazy as _
class LoginForm(AuthenticationForm):
username = forms.CharField(label="Matricule", max_length=30,
widget=forms.TextInput(attrs={'class': 'form-control', 'name': 'username'}))
password = forms.CharField(label="Password", max_length=30,
widget=forms.PasswordInput(attrs={'class': 'form-control', 'name': 'password'}))
class RegistrationForm(UserCreationForm):
username = forms.RegexField(regex=r'^\w+$', widget=forms.TextInput(attrs={'required':'True', 'max_length':'30','class': 'form-control', 'name': 'username'}),
label=_("Matricule"), error_messages={
'invalid': _("This value must contain only letters, numbers and underscores.")})
email = forms.EmailField(widget=forms.TextInput(attrs={'required':'True', 'max_length':'30','class': 'form-control', 'name': 'email'}), label=_("Email address"))
password1 = forms.CharField(
widget=forms.PasswordInput(attrs={'required':'True', 'max_length':'30','class': 'form-control', 'name': 'matricule','required':'True', 'max_length':'30', 'render_value':'False'}), label=_("Password"))
password2 = forms.CharField(
widget=forms.PasswordInput(attrs={'required':'True', 'max_length':'30','class': 'form-control', 'name': 'matricule','required':'True', 'max_length':'30', 'render_value':'False'}),
label=_("Password (again)"))
# def clean_username(self):
# try:
# user = User.objects.get(username__iexact=self.cleaned_data['username'])
# except User.DoesNotExist:
# return self.cleaned_data['username']
# raise forms.ValidationError(_("The username already exists. Please try another one."))
#
# def clean(self):
# if 'password1' in self.cleaned_data and 'password2' in self.cleaned_data:
# if self.cleaned_data['password1'] != self.cleaned_data['password2']:
# raise forms.ValidationError(_("The two password fields did not match."))
# return self.cleaned_data
| gpl-2.0 |
kularny/GeniSys.Kernel-old | toolchain/arm-cortex-a9/share/gdb/python/gdb/command/explore.py | 126 | 26824 | # GDB 'explore' command.
# Copyright (C) 2012-2014 Free Software Foundation, Inc.
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""Implementation of the GDB 'explore' command using the GDB Python API."""
import gdb
import sys
if sys.version_info[0] > 2:
# Python 3 renamed raw_input to input
raw_input = input
class Explorer(object):
"""Internal class which invokes other explorers."""
# This map is filled by the Explorer.init_env() function
type_code_to_explorer_map = { }
_SCALAR_TYPE_LIST = (
gdb.TYPE_CODE_CHAR,
gdb.TYPE_CODE_INT,
gdb.TYPE_CODE_BOOL,
gdb.TYPE_CODE_FLT,
gdb.TYPE_CODE_VOID,
gdb.TYPE_CODE_ENUM,
)
@staticmethod
def guard_expr(expr):
length = len(expr)
guard = False
if expr[0] == '(' and expr[length-1] == ')':
pass
else:
i = 0
while i < length:
c = expr[i]
if (c == '_' or ('a' <= c and c <= 'z') or
('A' <= c and c <= 'Z') or ('0' <= c and c <= '9')):
pass
else:
guard = True
break
i += 1
if guard:
return "(" + expr + ")"
else:
return expr
@staticmethod
def explore_expr(expr, value, is_child):
"""Main function to explore an expression value.
Arguments:
expr: The expression string that is being explored.
value: The gdb.Value value of the expression.
is_child: Boolean value to indicate if the expression is a child.
An expression is a child if it is derived from the main
expression entered by the user. For example, if the user
entered an expression which evaluates to a struct, then
when exploring the fields of the struct, is_child is set
to True internally.
Returns:
No return value.
"""
type_code = value.type.code
if type_code in Explorer.type_code_to_explorer_map:
explorer_class = Explorer.type_code_to_explorer_map[type_code]
while explorer_class.explore_expr(expr, value, is_child):
pass
else:
print ("Explorer for type '%s' not yet available.\n" %
str(value.type))
@staticmethod
def explore_type(name, datatype, is_child):
"""Main function to explore a data type.
Arguments:
name: The string representing the path to the data type being
explored.
datatype: The gdb.Type value of the data type being explored.
is_child: Boolean value to indicate if the name is a child.
A name is a child if it is derived from the main name
entered by the user. For example, if the user entered
the name of struct type, then when exploring the fields
of the struct, is_child is set to True internally.
Returns:
No return value.
"""
type_code = datatype.code
if type_code in Explorer.type_code_to_explorer_map:
explorer_class = Explorer.type_code_to_explorer_map[type_code]
while explorer_class.explore_type(name, datatype, is_child):
pass
else:
print ("Explorer for type '%s' not yet available.\n" %
str(datatype))
@staticmethod
def init_env():
"""Initializes the Explorer environment.
This function should be invoked before starting any exploration. If
invoked before an exploration, it need not be invoked for subsequent
explorations.
"""
Explorer.type_code_to_explorer_map = {
gdb.TYPE_CODE_CHAR : ScalarExplorer,
gdb.TYPE_CODE_INT : ScalarExplorer,
gdb.TYPE_CODE_BOOL : ScalarExplorer,
gdb.TYPE_CODE_FLT : ScalarExplorer,
gdb.TYPE_CODE_VOID : ScalarExplorer,
gdb.TYPE_CODE_ENUM : ScalarExplorer,
gdb.TYPE_CODE_STRUCT : CompoundExplorer,
gdb.TYPE_CODE_UNION : CompoundExplorer,
gdb.TYPE_CODE_PTR : PointerExplorer,
gdb.TYPE_CODE_REF : ReferenceExplorer,
gdb.TYPE_CODE_TYPEDEF : TypedefExplorer,
gdb.TYPE_CODE_ARRAY : ArrayExplorer
}
@staticmethod
def is_scalar_type(type):
"""Checks whether a type is a scalar type.
A type is a scalar type of its type is
gdb.TYPE_CODE_CHAR or
gdb.TYPE_CODE_INT or
gdb.TYPE_CODE_BOOL or
gdb.TYPE_CODE_FLT or
gdb.TYPE_CODE_VOID or
gdb.TYPE_CODE_ENUM.
Arguments:
type: The type to be checked.
Returns:
'True' if 'type' is a scalar type. 'False' otherwise.
"""
return type.code in Explorer._SCALAR_TYPE_LIST
@staticmethod
def return_to_parent_value():
"""A utility function which prints that the current exploration session
is returning to the parent value. Useful when exploring values.
"""
print ("\nReturning to parent value...\n")
@staticmethod
def return_to_parent_value_prompt():
"""A utility function which prompts the user to press the 'enter' key
so that the exploration session can shift back to the parent value.
Useful when exploring values.
"""
raw_input("\nPress enter to return to parent value: ")
@staticmethod
def return_to_enclosing_type():
"""A utility function which prints that the current exploration session
is returning to the enclosing type. Useful when exploring types.
"""
print ("\nReturning to enclosing type...\n")
@staticmethod
def return_to_enclosing_type_prompt():
"""A utility function which prompts the user to press the 'enter' key
so that the exploration session can shift back to the enclosing type.
Useful when exploring types.
"""
raw_input("\nPress enter to return to enclosing type: ")
class ScalarExplorer(object):
"""Internal class used to explore scalar values."""
@staticmethod
def explore_expr(expr, value, is_child):
"""Function to explore scalar values.
See Explorer.explore_expr and Explorer.is_scalar_type for more
information.
"""
print ("'%s' is a scalar value of type '%s'." %
(expr, value.type))
print ("%s = %s" % (expr, str(value)))
if is_child:
Explorer.return_to_parent_value_prompt()
Explorer.return_to_parent_value()
return False
@staticmethod
def explore_type(name, datatype, is_child):
"""Function to explore scalar types.
See Explorer.explore_type and Explorer.is_scalar_type for more
information.
"""
if datatype.code == gdb.TYPE_CODE_ENUM:
if is_child:
print ("%s is of an enumerated type '%s'." %
(name, str(datatype)))
else:
print ("'%s' is an enumerated type." % name)
else:
if is_child:
print ("%s is of a scalar type '%s'." %
(name, str(datatype)))
else:
print ("'%s' is a scalar type." % name)
if is_child:
Explorer.return_to_enclosing_type_prompt()
Explorer.return_to_enclosing_type()
return False
class PointerExplorer(object):
"""Internal class used to explore pointer values."""
@staticmethod
def explore_expr(expr, value, is_child):
"""Function to explore pointer values.
See Explorer.explore_expr for more information.
"""
print ("'%s' is a pointer to a value of type '%s'" %
(expr, str(value.type.target())))
option = raw_input("Continue exploring it as a pointer to a single "
"value [y/n]: ")
if option == "y":
deref_value = None
try:
deref_value = value.dereference()
str(deref_value)
except gdb.MemoryError:
print ("'%s' a pointer pointing to an invalid memory "
"location." % expr)
if is_child:
Explorer.return_to_parent_value_prompt()
return False
Explorer.explore_expr("*%s" % Explorer.guard_expr(expr),
deref_value, is_child)
return False
option = raw_input("Continue exploring it as a pointer to an "
"array [y/n]: ")
if option == "y":
while True:
index = 0
try:
index = int(raw_input("Enter the index of the element you "
"want to explore in '%s': " % expr))
except ValueError:
break
element_expr = "%s[%d]" % (Explorer.guard_expr(expr), index)
element = value[index]
try:
str(element)
except gdb.MemoryError:
print ("Cannot read value at index %d." % index)
continue
Explorer.explore_expr(element_expr, element, True)
return False
if is_child:
Explorer.return_to_parent_value()
return False
@staticmethod
def explore_type(name, datatype, is_child):
"""Function to explore pointer types.
See Explorer.explore_type for more information.
"""
target_type = datatype.target()
print ("\n%s is a pointer to a value of type '%s'." %
(name, str(target_type)))
Explorer.explore_type("the pointee type of %s" % name,
target_type,
is_child)
return False
class ReferenceExplorer(object):
"""Internal class used to explore reference (TYPE_CODE_REF) values."""
@staticmethod
def explore_expr(expr, value, is_child):
"""Function to explore array values.
See Explorer.explore_expr for more information.
"""
referenced_value = value.referenced_value()
Explorer.explore_expr(expr, referenced_value, is_child)
return False
@staticmethod
def explore_type(name, datatype, is_child):
"""Function to explore pointer types.
See Explorer.explore_type for more information.
"""
target_type = datatype.target()
Explorer.explore_type(name, target_type, is_child)
return False
class ArrayExplorer(object):
"""Internal class used to explore arrays."""
@staticmethod
def explore_expr(expr, value, is_child):
"""Function to explore array values.
See Explorer.explore_expr for more information.
"""
target_type = value.type.target()
print ("'%s' is an array of '%s'." % (expr, str(target_type)))
index = 0
try:
index = int(raw_input("Enter the index of the element you want to "
"explore in '%s': " % expr))
except ValueError:
if is_child:
Explorer.return_to_parent_value()
return False
element = None
try:
element = value[index]
str(element)
except gdb.MemoryError:
print ("Cannot read value at index %d." % index)
raw_input("Press enter to continue... ")
return True
Explorer.explore_expr("%s[%d]" % (Explorer.guard_expr(expr), index),
element, True)
return True
@staticmethod
def explore_type(name, datatype, is_child):
"""Function to explore array types.
See Explorer.explore_type for more information.
"""
target_type = datatype.target()
print ("%s is an array of '%s'." % (name, str(target_type)))
Explorer.explore_type("the array element of %s" % name, target_type,
is_child)
return False
class CompoundExplorer(object):
"""Internal class used to explore struct, classes and unions."""
@staticmethod
def _print_fields(print_list):
"""Internal function which prints the fields of a struct/class/union.
"""
max_field_name_length = 0
for pair in print_list:
if max_field_name_length < len(pair[0]):
max_field_name_length = len(pair[0])
for pair in print_list:
print (" %*s = %s" % (max_field_name_length, pair[0], pair[1]))
@staticmethod
def _get_real_field_count(fields):
real_field_count = 0;
for field in fields:
if not field.artificial:
real_field_count = real_field_count + 1
return real_field_count
@staticmethod
def explore_expr(expr, value, is_child):
"""Function to explore structs/classes and union values.
See Explorer.explore_expr for more information.
"""
datatype = value.type
type_code = datatype.code
fields = datatype.fields()
if type_code == gdb.TYPE_CODE_STRUCT:
type_desc = "struct/class"
else:
type_desc = "union"
if CompoundExplorer._get_real_field_count(fields) == 0:
print ("The value of '%s' is a %s of type '%s' with no fields." %
(expr, type_desc, str(value.type)))
if is_child:
Explorer.return_to_parent_value_prompt()
return False
print ("The value of '%s' is a %s of type '%s' with the following "
"fields:\n" % (expr, type_desc, str(value.type)))
has_explorable_fields = False
choice_to_compound_field_map = { }
current_choice = 0
print_list = [ ]
for field in fields:
if field.artificial:
continue
field_full_name = Explorer.guard_expr(expr) + "." + field.name
if field.is_base_class:
field_value = value.cast(field.type)
else:
field_value = value[field.name]
literal_value = ""
if type_code == gdb.TYPE_CODE_UNION:
literal_value = ("<Enter %d to explore this field of type "
"'%s'>" % (current_choice, str(field.type)))
has_explorable_fields = True
else:
if Explorer.is_scalar_type(field.type):
literal_value = ("%s .. (Value of type '%s')" %
(str(field_value), str(field.type)))
else:
if field.is_base_class:
field_desc = "base class"
else:
field_desc = "field"
literal_value = ("<Enter %d to explore this %s of type "
"'%s'>" %
(current_choice, field_desc,
str(field.type)))
has_explorable_fields = True
choice_to_compound_field_map[str(current_choice)] = (
field_full_name, field_value)
current_choice = current_choice + 1
print_list.append((field.name, literal_value))
CompoundExplorer._print_fields(print_list)
print ("")
if has_explorable_fields:
choice = raw_input("Enter the field number of choice: ")
if choice in choice_to_compound_field_map:
Explorer.explore_expr(choice_to_compound_field_map[choice][0],
choice_to_compound_field_map[choice][1],
True)
return True
else:
if is_child:
Explorer.return_to_parent_value()
else:
if is_child:
Explorer.return_to_parent_value_prompt()
return False
@staticmethod
def explore_type(name, datatype, is_child):
"""Function to explore struct/class and union types.
See Explorer.explore_type for more information.
"""
type_code = datatype.code
type_desc = ""
if type_code == gdb.TYPE_CODE_STRUCT:
type_desc = "struct/class"
else:
type_desc = "union"
fields = datatype.fields()
if CompoundExplorer._get_real_field_count(fields) == 0:
if is_child:
print ("%s is a %s of type '%s' with no fields." %
(name, type_desc, str(datatype)))
Explorer.return_to_enclosing_type_prompt()
else:
print ("'%s' is a %s with no fields." % (name, type_desc))
return False
if is_child:
print ("%s is a %s of type '%s' "
"with the following fields:\n" %
(name, type_desc, str(datatype)))
else:
print ("'%s' is a %s with the following "
"fields:\n" %
(name, type_desc))
has_explorable_fields = False
current_choice = 0
choice_to_compound_field_map = { }
print_list = [ ]
for field in fields:
if field.artificial:
continue
if field.is_base_class:
field_desc = "base class"
else:
field_desc = "field"
rhs = ("<Enter %d to explore this %s of type '%s'>" %
(current_choice, field_desc, str(field.type)))
print_list.append((field.name, rhs))
choice_to_compound_field_map[str(current_choice)] = (
field.name, field.type, field_desc)
current_choice = current_choice + 1
CompoundExplorer._print_fields(print_list)
print ("")
if len(choice_to_compound_field_map) > 0:
choice = raw_input("Enter the field number of choice: ")
if choice in choice_to_compound_field_map:
if is_child:
new_name = ("%s '%s' of %s" %
(choice_to_compound_field_map[choice][2],
choice_to_compound_field_map[choice][0],
name))
else:
new_name = ("%s '%s' of '%s'" %
(choice_to_compound_field_map[choice][2],
choice_to_compound_field_map[choice][0],
name))
Explorer.explore_type(new_name,
choice_to_compound_field_map[choice][1], True)
return True
else:
if is_child:
Explorer.return_to_enclosing_type()
else:
if is_child:
Explorer.return_to_enclosing_type_prompt()
return False
class TypedefExplorer(object):
"""Internal class used to explore values whose type is a typedef."""
@staticmethod
def explore_expr(expr, value, is_child):
"""Function to explore typedef values.
See Explorer.explore_expr for more information.
"""
actual_type = value.type.strip_typedefs()
print ("The value of '%s' is of type '%s' "
"which is a typedef of type '%s'" %
(expr, str(value.type), str(actual_type)))
Explorer.explore_expr(expr, value.cast(actual_type), is_child)
return False
@staticmethod
def explore_type(name, datatype, is_child):
"""Function to explore typedef types.
See Explorer.explore_type for more information.
"""
actual_type = datatype.strip_typedefs()
if is_child:
print ("The type of %s is a typedef of type '%s'." %
(name, str(actual_type)))
else:
print ("The type '%s' is a typedef of type '%s'." %
(name, str(actual_type)))
Explorer.explore_type(name, actual_type, is_child)
return False
class ExploreUtils(object):
"""Internal class which provides utilities for the main command classes."""
@staticmethod
def check_args(name, arg_str):
"""Utility to check if adequate number of arguments are passed to an
explore command.
Arguments:
name: The name of the explore command.
arg_str: The argument string passed to the explore command.
Returns:
True if adequate arguments are passed, false otherwise.
Raises:
gdb.GdbError if adequate arguments are not passed.
"""
if len(arg_str) < 1:
raise gdb.GdbError("ERROR: '%s' requires an argument."
% name)
return False
else:
return True
@staticmethod
def get_type_from_str(type_str):
"""A utility function to deduce the gdb.Type value from a string
representing the type.
Arguments:
type_str: The type string from which the gdb.Type value should be
deduced.
Returns:
The deduced gdb.Type value if possible, None otherwise.
"""
try:
# Assume the current language to be C/C++ and make a try.
return gdb.parse_and_eval("(%s *)0" % type_str).type.target()
except RuntimeError:
# If assumption of current language to be C/C++ was wrong, then
# lookup the type using the API.
try:
return gdb.lookup_type(type_str)
except RuntimeError:
return None
@staticmethod
def get_value_from_str(value_str):
"""A utility function to deduce the gdb.Value value from a string
representing the value.
Arguments:
value_str: The value string from which the gdb.Value value should
be deduced.
Returns:
The deduced gdb.Value value if possible, None otherwise.
"""
try:
return gdb.parse_and_eval(value_str)
except RuntimeError:
return None
class ExploreCommand(gdb.Command):
"""Explore a value or a type valid in the current context.
Usage:
explore ARG
- ARG is either a valid expression or a type name.
- At any stage of exploration, hit the return key (instead of a
choice, if any) to return to the enclosing type or value.
"""
def __init__(self):
super(ExploreCommand, self).__init__(name = "explore",
command_class = gdb.COMMAND_DATA,
prefix = True)
def invoke(self, arg_str, from_tty):
if ExploreUtils.check_args("explore", arg_str) == False:
return
# Check if it is a value
value = ExploreUtils.get_value_from_str(arg_str)
if value is not None:
Explorer.explore_expr(arg_str, value, False)
return
# If it is not a value, check if it is a type
datatype = ExploreUtils.get_type_from_str(arg_str)
if datatype is not None:
Explorer.explore_type(arg_str, datatype, False)
return
# If it is neither a value nor a type, raise an error.
raise gdb.GdbError(
("'%s' neither evaluates to a value nor is a type "
"in the current context." %
arg_str))
class ExploreValueCommand(gdb.Command):
"""Explore value of an expression valid in the current context.
Usage:
explore value ARG
- ARG is a valid expression.
- At any stage of exploration, hit the return key (instead of a
choice, if any) to return to the enclosing value.
"""
def __init__(self):
super(ExploreValueCommand, self).__init__(
name = "explore value", command_class = gdb.COMMAND_DATA)
def invoke(self, arg_str, from_tty):
if ExploreUtils.check_args("explore value", arg_str) == False:
return
value = ExploreUtils.get_value_from_str(arg_str)
if value is None:
raise gdb.GdbError(
(" '%s' does not evaluate to a value in the current "
"context." %
arg_str))
return
Explorer.explore_expr(arg_str, value, False)
class ExploreTypeCommand(gdb.Command):
"""Explore a type or the type of an expression valid in the current
context.
Usage:
explore type ARG
- ARG is a valid expression or a type name.
- At any stage of exploration, hit the return key (instead of a
choice, if any) to return to the enclosing type.
"""
def __init__(self):
super(ExploreTypeCommand, self).__init__(
name = "explore type", command_class = gdb.COMMAND_DATA)
def invoke(self, arg_str, from_tty):
if ExploreUtils.check_args("explore type", arg_str) == False:
return
datatype = ExploreUtils.get_type_from_str(arg_str)
if datatype is not None:
Explorer.explore_type(arg_str, datatype, False)
return
value = ExploreUtils.get_value_from_str(arg_str)
if value is not None:
print ("'%s' is of type '%s'." % (arg_str, str(value.type)))
Explorer.explore_type(str(value.type), value.type, False)
return
raise gdb.GdbError(("'%s' is not a type or value in the current "
"context." % arg_str))
Explorer.init_env()
ExploreCommand()
ExploreValueCommand()
ExploreTypeCommand()
| gpl-2.0 |
PingPesto/BitList | bitlist/models/song.py | 1 | 1181 | from .user import User
import datetime
from pyramid_mongoengine import MongoEngine
import random
db = MongoEngine()
class Song(db.Document):
title = db.StringField(required=True)
url = db.StringField(required=True)
artist= db.StringField()
original_url = db.StringField()
album = db.StringField()
album_art = db.StringField()
addedby = db.ReferenceField(User, reverse_delete_rule=db.NULLIFY)
rand = db.FloatField(unique=True)
tags = db.ListField()
created_on = db.DateTimeField(default=datetime.datetime.now)
updated_on = db.DateTimeField(default=datetime.datetime.now)
@classmethod
def get_by_id(cls, id):
song = cls.objects.get_or_404(id=id)
return song
@classmethod
def get_by_url(cls, url):
song = cls.objects.get_or_404(url=url)
return song
@classmethod
def get_random(cls):
random.seed()
rand = random.random()
songs = cls.objects(rand__gte=rand).limit(1)
return songs.first()
def clean(self):
if not self.rand:
self.rand = generate_random()
def generate_random():
random.seed()
return random.random()
| gpl-3.0 |
ack/docket | docket/commands.py | 2 | 5256 | import os
import sys
import urlparse
import subprocess
import shutil
import contextlib
from os import makedirs
from os.path import exists, join, abspath, basename, dirname, islink
import dockerfile
import util
import platforms
__all__ = ['merge', 'generate']
RELDIR = "library"
ESPB_SCRIPT = "https://github.com/ack/espb/zipball/master"
SUPERVISED = 'RUN echo "[program:{0}]\\ncommand={1}\\n" > /etc/supervisor/conf.d/{2}.conf'
def merge(*refs, **kw):
"""pull/parse multiple dockerfiles, outputting to STDOUT """
refs = list(refs)
# resolve any remote references
files = expand(*refs)
# parse the dockerfiles
parsed = parse(files)
# ensure we can proceed
errors = validate(parsed)
if errors:
for err in errors:
print >> sys.stderr, err
sys.exit(10)
workspace = join(os.getcwd(), RELDIR)
initial = parsed[0].parent
print "########## docket intro"
print "FROM {0}".format(initial)
print
# echo out the concatenated commands
for df in parsed:
for line in df.lines:
print line
if not kw.get('unsupervised'):
print "\n\n########## docket outro"
print "RUN mkdir -p /etc/supervisor/conf.d /var/log/supervisor"
print "RUN touch /etc/supervisor/supervisord.conf"
for df in parsed:
startup = df.command
if not startup:
continue
print SUPERVISED.format(df.name, startup.replace("\"", "\\\""), df.name).replace("'", "\'")
if kw.get('ssh'):
print SUPERVISED.format('ssh', '/usr/sbin/sshd -D', 'ssh')
print 'CMD ["supervisord", "-n", "-c", "/etc/supervisor/supervisord.conf"]'
def generate(name, parent, **kw):
"""
create a docket Dockerfile
"""
path = join('library', name)
if exists(path):
error("dir/file at path {0} exists!".format(path))
override = None
try:
confpath = abspath("supervisord.conf")
open(confpath)
except IOError:
confpath = abspath(join(dirname(__file__), "supervisord.conf"))
os.makedirs(path)
with util.chdir(path):
shutil.copyfile(confpath, "supervisord.conf")
with open("Dockerfile", 'w') as f:
# this one (in practice) should be ignored
print >> f, "FROM {0}".format(parent)
# install supervisor and/or pip according to platform
for dep in platforms.dependencies(parent):
print >> f, "RUN {0}".format(dep)
print >> f, "RUN mkdir -p /etc/supervisor"
print >> f, "ADD supervisord.conf /etc/supervisor/supervisord.conf"
print >> f, "ENV ETCD http://172.17.42.1:4001"
if kw.get('inject'):
print >> f, "# injected service pooling script"
print >> f, "RUN which espb || pip install {0}".format(ESPB_SCRIPT)
print >> f, 'CMD ["/usr/local/bin/espb", "register", "{0}"]'.format(name)
print join(path, "Dockerfile")
def expand(*refs):
"""
convert refs from { github-ref / directory / uri }
to { local-directory }
"""
files = []
for ref in refs:
if '.' == ref:
ref = abspath(ref)
local = join(RELDIR, ref)
if not exists(local):
local = resolve(ref)
files.append(local)
return files
def parse(files):
parsed = []
for path in files:
df = dockerfile.Dockerfile(path)
df.parse()
parsed.append(df)
return parsed
def validate(parsed):
errors = []
parents = ancestors(parsed)
if len(parents) > 1:
errors.append("Multiple ancestors detected: {0}".format(",".join(parents)))
return errors
def ancestors(parsed):
return set([f.parent for f in parsed])
def github(uri, outdir):
if 'github.com' in uri:
url = uri
elif 'git@' in uri or 'git://' in uri:
url = uri
else:
url = "https://github.com/" + uri
out, err = subprocess.Popen(["git", "clone", url, outdir],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE).communicate()
print >> sys.stderr, out
print >> sys.stderr, err
def resolve(ref):
if exists(ref):
# local directory already on disk
return ref
elif 'http' in ref and 'github.com' not in ref:
# curl down a Dockerfile
dir = basename(urlparse.urlparse(ref).path)
target = join(RELDIR, dir)
try: os.makedirs(target)
except: pass
with open(join(target, "Dockerfile")) as out:
out.write(urllib.urlopen(ref))
return target
elif ref.lower().startswith("http://") or \
ref.lower().startswith("https://") or \
ref.lower().startswith("git") or \
'/' in ref:
dir = urlparse.urlparse(ref).path
if dir.startswith('/'): dir = dir[1:]
target = join(RELDIR, dir)
if not exists(target):
try: makedirs(target)
except: pass
github(ref, target)
return target
else:
raise Exception("unknown path type: {0}".format(ref))
def error(m, exit_code=1):
print >> sys.stderr, m
sys.exit(exit_code)
| mit |
idovear/odoo | addons/edi/controllers/main.py | 374 | 1097 | import werkzeug.urls
import openerp
import openerp.addons.web.controllers.main as webmain
class EDI(openerp.http.Controller):
@openerp.http.route('/edi/import_url', type='http', auth='none')
def import_url(self, url):
# http://hostname:8069/edi/import_url?url=URIEncodedURL
req = openerp.http.request
# `url` may contain a full URL with a valid query string, we basically want to watch out for XML brackets and double-quotes
safe_url = werkzeug.url_quote_plus(url,':/?&;=')
values = dict(init='s.edi.edi_import("%s");' % safe_url)
return req.render('web.webclient_bootstrap', values)
@openerp.http.route('/edi/import_edi_url', type='json', auth='none')
def import_edi_url(self, url):
req = openerp.http.request
result = req.session.proxy('edi').import_edi_url(req.session._db, req.session._uid, req.session._password, url)
if len(result) == 1:
return {"action": webmain.clean_action(req, result[0][2])}
return True
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
RandyLowery/erpnext | erpnext/demo/user/fixed_asset.py | 11 | 1951 |
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import frappe
from frappe.utils.make_random import get_random
from erpnext.accounts.doctype.asset.asset import make_purchase_invoice, make_sales_invoice
from erpnext.accounts.doctype.asset.depreciation import post_depreciation_entries, scrap_asset
def work():
frappe.set_user(frappe.db.get_global('demo_accounts_user'))
asset_list = make_asset_purchase_entry()
if not asset_list:
# fixed_asset.work() already run
return
# post depreciation entries as on today
post_depreciation_entries()
# scrap a random asset
frappe.db.set_value("Company", "Wind Power LLC", "disposal_account", "Gain/Loss on Asset Disposal - WPL")
asset = get_random_asset()
scrap_asset(asset.name)
# Sell a random asset
sell_an_asset()
def make_asset_purchase_entry():
asset_list = frappe.get_all("Asset", filters={"purchase_invoice": ["in", ("", None)]},
fields=["name", "item_code", "gross_purchase_amount", "company", "purchase_date"])
# make purchase invoice
for asset in asset_list:
pi = make_purchase_invoice(asset.name, asset.item_code, asset.gross_purchase_amount,
asset.company, asset.purchase_date)
pi.supplier = get_random("Supplier")
pi.save()
pi.submit()
return asset_list
def sell_an_asset():
asset = get_random_asset()
si = make_sales_invoice(asset.name, asset.item_code, "Wind Power LLC")
si.customer = get_random("Customer")
si.get("items")[0].rate = asset.value_after_depreciation * 0.8 \
if asset.value_after_depreciation else asset.gross_purchase_amount * 0.9
si.save()
si.submit()
def get_random_asset():
return frappe.db.sql(""" select name, item_code, value_after_depreciation, gross_purchase_amount
from `tabAsset`
where docstatus=1 and status not in ("Scrapped", "Sold") order by rand() limit 1""", as_dict=1)[0]
| gpl-3.0 |
2014cdag16/2014cdag16 | wsgi/static/Brython2.1.0-20140419-113919/Lib/atexit.py | 743 | 1049 | """allow programmer to define multiple exit functions to be executedupon normal program termination.
Two public functions, register and unregister, are defined.
"""
class __loader__(object):
pass
def _clear(*args,**kw):
"""_clear() -> None
Clear the list of previously registered exit functions."""
pass
def _run_exitfuncs(*args,**kw):
"""_run_exitfuncs() -> None
Run all registered exit functions."""
pass
def register(*args,**kw):
"""register(func, *args, **kwargs) -> func
Register a function to be executed upon normal program termination
func - function to be called at exit
args - optional arguments to pass to func
kwargs - optional keyword arguments to pass to func
func is returned to facilitate usage as a decorator."""
pass
def unregister(*args,**kw):
"""unregister(func) -> None
Unregister a exit function which was previously registered using
atexit.register
func - function to be unregistered"""
pass
| gpl-2.0 |
kose-y/pylearn2 | pylearn2/scripts/icml_2013_wrepl/multimodal/make_submission.py | 44 | 1840 | from __future__ import print_function
import numpy as np
import sys
from theano.compat.six.moves import xrange
from theano import function
from theano import tensor as T
from pylearn2.utils import serial
from pylearn2.utils.string_utils import preprocess
def usage():
"""
Run
python make_submission.py <model> <test set>
where <test set> is public_test or private_test
(private_test will be released 72 hours before the end of the contest)
"""
if len(sys.argv) != 3:
usage()
print("(You used the wrong number of arguments)")
quit(-1)
_, model_path, test_set = sys.argv
model = serial.load(model_path)
# Load BOVW features
features_dir = preprocess('${PYLEARN2_DATA_PATH}/icml_2013_multimodal/'+test_set+'_layer_2_features')
vectors = []
for i in xrange(500):
vectors.append(serial.load(features_dir + '/' + str(i) + '.npy'))
features = np.concatenate(vectors, axis=0)
del vectors
# Load BOW targets
f = open('wordlist.txt')
wordlist = f.readlines()
f.close()
options_dir = preprocess('${PYLEARN2_DATA_PATH}/icml_2013_multimodal/'+test_set+'_options')
def load_options(option):
rval = np.zeros((500, 4000), dtype='float32')
for i in xrange(500):
f = open(options_dir + '/' + str(i) + '.option_' + str(option) + '.desc')
l = f.readlines()
f.close()
for w in l:
if w in wordlist:
rval[i, wordlist.index(w)] = 1
return rval
option_0, option_1 = [load_options(0), load_options(1)]
X = T.matrix()
Y0 = T.matrix()
Y1 = T.matrix()
Y_hat = model.fprop(X)
cost_0 = model.layers[-1].kl(Y=Y0, Y_hat=Y_hat)
cost_1 = model.layers[-1].kl(Y=Y1, Y_hat=Y_hat)
f = function([X, Y0, Y1], cost_1 < cost_0)
prediction = f(features, option_0, option_1)
f = open('submission.csv', 'w')
for i in xrange(500):
f.write(str(prediction[i])+'\n')
f.close()
| bsd-3-clause |
skoslowski/gnuradio | gnuradio-runtime/examples/volk_benchmark/volk_plot.py | 4 | 6303 | #!/usr/bin/env python
from __future__ import division
from __future__ import unicode_literals
import sys, math
import argparse
from volk_test_funcs import (create_connection, list_tables, get_results,
helper, timeit, format_results)
try:
import matplotlib
import matplotlib.pyplot as plt
except ImportError:
sys.stderr.write("Could not import Matplotlib (http://matplotlib.sourceforge.net/)\n")
sys.exit(1)
def main():
desc='Plot Volk performance results from a SQLite database. ' + \
'Run one of the volk tests first (e.g, volk_math.py)'
parser = argparse.ArgumentParser(description=desc)
parser.add_argument('-D', '--database', type=str,
default='volk_results.db',
help='Database file to read data from [default: %(default)s]')
parser.add_argument('-E', '--errorbars',
action='store_true', default=False,
help='Show error bars (1 standard dev.)')
parser.add_argument('-P', '--plot', type=str,
choices=['mean', 'min', 'max'],
default='mean',
help='Set the type of plot to produce [default: %(default)s]')
parser.add_argument('-%', '--percent', type=str,
default=None, metavar="table",
help='Show percent difference to the given type [default: %(default)s]')
args = parser.parse_args()
# Set up global plotting properties
matplotlib.rcParams['figure.subplot.bottom'] = 0.2
matplotlib.rcParams['figure.subplot.top'] = 0.95
matplotlib.rcParams['figure.subplot.right'] = 0.98
matplotlib.rcParams['ytick.labelsize'] = 16
matplotlib.rcParams['xtick.labelsize'] = 16
matplotlib.rcParams['legend.fontsize'] = 18
# Get list of tables to compare
conn = create_connection(args.database)
tables = list_tables(conn)
M = len(tables)
# Colors to distinguish each table in the bar graph
# More than 5 tables will wrap around to the start.
colors = ['b', 'r', 'g', 'm', 'k']
# Set up figure for plotting
f0 = plt.figure(0, facecolor='w', figsize=(14,10))
s0 = f0.add_subplot(1,1,1)
# Create a register of names that exist in all tables
tmp_regs = []
for table in tables:
# Get results from the next table
res = get_results(conn, table[0])
tmp_regs.append(list())
for r in res:
try:
tmp_regs[-1].index(r['kernel'])
except ValueError:
tmp_regs[-1].append(r['kernel'])
# Get only those names that are common in all tables
name_reg = tmp_regs[0]
for t in tmp_regs[1:]:
name_reg = list(set(name_reg) & set(t))
name_reg.sort()
# Pull the data out for each table into a dictionary
# we can ref the table by it's name and the data associated
# with a given kernel in name_reg by it's name.
# This ensures there is no sorting issue with the data in the
# dictionary, so the kernels are plotted against each other.
table_data = dict()
for i,table in enumerate(tables):
# Get results from the next table
res = get_results(conn, table[0])
data = dict()
for r in res:
data[r['kernel']] = r
table_data[table[0]] = data
if args.percent is not None:
for i,t in enumerate(table_data):
if args.percent == t:
norm_data = []
for name in name_reg:
if(args.plot == 'max'):
norm_data.append(table_data[t][name]['max'])
elif(args.plot == 'min'):
norm_data.append(table_data[t][name]['min'])
elif(args.plot == 'mean'):
norm_data.append(table_data[t][name]['avg'])
# Plot the results
x0 = list(range(len(name_reg)))
i = 0
for t in (table_data):
ydata = []
stds = []
for name in name_reg:
stds.append(math.sqrt(table_data[t][name]['var']))
if(args.plot == 'max'):
ydata.append(table_data[t][name]['max'])
elif(args.plot == 'min'):
ydata.append(table_data[t][name]['min'])
elif(args.plot == 'mean'):
ydata.append(table_data[t][name]['avg'])
if args.percent is not None:
ydata = [-100*(y-n)/y for y,n in zip(ydata,norm_data)]
if(args.percent != t):
# makes x values for this data set placement
# width of bars depends on number of comparisons
wdth = 0.80 / (M-1)
x1 = [x + i*wdth for x in x0]
i += 1
s0.bar(x1, ydata, width=wdth,
color=colors[(i-1)%M], label=t,
edgecolor='k', linewidth=2)
else:
# makes x values for this data set placement
# width of bars depends on number of comparisons
wdth = 0.80 / M
x1 = [x + i*wdth for x in x0]
i += 1
if(args.errorbars is False):
s0.bar(x1, ydata, width=wdth,
color=colors[(i-1)%M], label=t,
edgecolor='k', linewidth=2)
else:
s0.bar(x1, ydata, width=wdth,
yerr=stds,
color=colors[i%M], label=t,
edgecolor='k', linewidth=2,
error_kw={"ecolor": 'k', "capsize":5,
"linewidth":2})
nitems = res[0]['nitems']
if args.percent is None:
s0.set_ylabel("Processing time (sec) [{0:G} items]".format(nitems),
fontsize=22, fontweight='bold',
horizontalalignment='center')
else:
s0.set_ylabel("% Improvement over {0} [{1:G} items]".format(
args.percent, nitems),
fontsize=22, fontweight='bold')
s0.legend()
s0.set_xticks(x0)
s0.set_xticklabels(name_reg)
for label in s0.xaxis.get_ticklabels():
label.set_rotation(45)
label.set_fontsize(16)
plt.show()
if __name__ == "__main__":
main()
| gpl-3.0 |
0xffea/drizzle | tests/qp_tests/randgen_basic/selectStabilityValidator_test.py | 4 | 1491 | #! /usr/bin/env python
# -*- mode: python; indent-tabs-mode: nil; -*-
# vim:expandtab:shiftwidth=2:tabstop=2:smarttab:
#
# Copyright (C) 2011 Patrick Crews
#
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
from lib.util.mysqlBaseTestCase import mysqlBaseTestCase
server_requirements = [[]]
servers = []
server_manager = None
test_executor = None
class basicTest(mysqlBaseTestCase):
def test_selectStabilityValidator1(self):
test_cmd = "./gentest.pl --gendata=conf/drizzle/drizzle.zz --grammar=conf/drizzle/subquery_materialization_drizzle.yy --validators=SelectStability --queries=750 --threads=3"
retcode, output = self.execute_randgen(test_cmd, test_executor, servers[0])
self.assertEqual(retcode, 0, msg = output)
def tearDown(self):
server_manager.reset_servers(test_executor.name)
| gpl-2.0 |
lukasfenix/namebench | nb_third_party/dns/node.py | 215 | 5914 | # Copyright (C) 2001-2007, 2009, 2010 Nominum, Inc.
#
# Permission to use, copy, modify, and distribute this software and its
# documentation for any purpose with or without fee is hereby granted,
# provided that the above copyright notice and this permission notice
# appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
"""DNS nodes. A node is a set of rdatasets."""
import StringIO
import dns.rdataset
import dns.rdatatype
import dns.renderer
class Node(object):
"""A DNS node.
A node is a set of rdatasets
@ivar rdatasets: the node's rdatasets
@type rdatasets: list of dns.rdataset.Rdataset objects"""
__slots__ = ['rdatasets']
def __init__(self):
"""Initialize a DNS node.
"""
self.rdatasets = [];
def to_text(self, name, **kw):
"""Convert a node to text format.
Each rdataset at the node is printed. Any keyword arguments
to this method are passed on to the rdataset's to_text() method.
@param name: the owner name of the rdatasets
@type name: dns.name.Name object
@rtype: string
"""
s = StringIO.StringIO()
for rds in self.rdatasets:
print >> s, rds.to_text(name, **kw)
return s.getvalue()[:-1]
def __repr__(self):
return '<DNS node ' + str(id(self)) + '>'
def __eq__(self, other):
"""Two nodes are equal if they have the same rdatasets.
@rtype: bool
"""
#
# This is inefficient. Good thing we don't need to do it much.
#
for rd in self.rdatasets:
if rd not in other.rdatasets:
return False
for rd in other.rdatasets:
if rd not in self.rdatasets:
return False
return True
def __ne__(self, other):
return not self.__eq__(other)
def __len__(self):
return len(self.rdatasets)
def __iter__(self):
return iter(self.rdatasets)
def find_rdataset(self, rdclass, rdtype, covers=dns.rdatatype.NONE,
create=False):
"""Find an rdataset matching the specified properties in the
current node.
@param rdclass: The class of the rdataset
@type rdclass: int
@param rdtype: The type of the rdataset
@type rdtype: int
@param covers: The covered type. Usually this value is
dns.rdatatype.NONE, but if the rdtype is dns.rdatatype.SIG or
dns.rdatatype.RRSIG, then the covers value will be the rdata
type the SIG/RRSIG covers. The library treats the SIG and RRSIG
types as if they were a family of
types, e.g. RRSIG(A), RRSIG(NS), RRSIG(SOA). This makes RRSIGs much
easier to work with than if RRSIGs covering different rdata
types were aggregated into a single RRSIG rdataset.
@type covers: int
@param create: If True, create the rdataset if it is not found.
@type create: bool
@raises KeyError: An rdataset of the desired type and class does
not exist and I{create} is not True.
@rtype: dns.rdataset.Rdataset object
"""
for rds in self.rdatasets:
if rds.match(rdclass, rdtype, covers):
return rds
if not create:
raise KeyError
rds = dns.rdataset.Rdataset(rdclass, rdtype)
self.rdatasets.append(rds)
return rds
def get_rdataset(self, rdclass, rdtype, covers=dns.rdatatype.NONE,
create=False):
"""Get an rdataset matching the specified properties in the
current node.
None is returned if an rdataset of the specified type and
class does not exist and I{create} is not True.
@param rdclass: The class of the rdataset
@type rdclass: int
@param rdtype: The type of the rdataset
@type rdtype: int
@param covers: The covered type.
@type covers: int
@param create: If True, create the rdataset if it is not found.
@type create: bool
@rtype: dns.rdataset.Rdataset object or None
"""
try:
rds = self.find_rdataset(rdclass, rdtype, covers, create)
except KeyError:
rds = None
return rds
def delete_rdataset(self, rdclass, rdtype, covers=dns.rdatatype.NONE):
"""Delete the rdataset matching the specified properties in the
current node.
If a matching rdataset does not exist, it is not an error.
@param rdclass: The class of the rdataset
@type rdclass: int
@param rdtype: The type of the rdataset
@type rdtype: int
@param covers: The covered type.
@type covers: int
"""
rds = self.get_rdataset(rdclass, rdtype, covers)
if not rds is None:
self.rdatasets.remove(rds)
def replace_rdataset(self, replacement):
"""Replace an rdataset.
It is not an error if there is no rdataset matching I{replacement}.
Ownership of the I{replacement} object is transferred to the node;
in other words, this method does not store a copy of I{replacement}
at the node, it stores I{replacement} itself.
"""
self.delete_rdataset(replacement.rdclass, replacement.rdtype,
replacement.covers)
self.rdatasets.append(replacement)
| apache-2.0 |
abhitopia/tensorflow | tensorflow/contrib/keras/api/keras/datasets/reuters/__init__.py | 57 | 1065 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Reuters newswire topic classification dataset."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.keras.python.keras.datasets.reuters import get_word_index
from tensorflow.contrib.keras.python.keras.datasets.reuters import load_data
del absolute_import
del division
del print_function
| apache-2.0 |
uwafsl/MissionPlanner | Lib/site-packages/numpy/distutils/lib2def.py | 99 | 3419 | import re
import sys
import os
import subprocess
__doc__ = """This module generates a DEF file from the symbols in
an MSVC-compiled DLL import library. It correctly discriminates between
data and functions. The data is collected from the output of the program
nm(1).
Usage:
python lib2def.py [libname.lib] [output.def]
or
python lib2def.py [libname.lib] > output.def
libname.lib defaults to python<py_ver>.lib and output.def defaults to stdout
Author: Robert Kern <kernr@mail.ncifcrf.gov>
Last Update: April 30, 1999
"""
__version__ = '0.1a'
py_ver = "%d%d" % tuple(sys.version_info[:2])
DEFAULT_NM = 'nm -Cs'
DEF_HEADER = """LIBRARY python%s.dll
;CODE PRELOAD MOVEABLE DISCARDABLE
;DATA PRELOAD SINGLE
EXPORTS
""" % py_ver
# the header of the DEF file
FUNC_RE = re.compile(r"^(.*) in python%s\.dll" % py_ver, re.MULTILINE)
DATA_RE = re.compile(r"^_imp__(.*) in python%s\.dll" % py_ver, re.MULTILINE)
def parse_cmd():
"""Parses the command-line arguments.
libfile, deffile = parse_cmd()"""
if len(sys.argv) == 3:
if sys.argv[1][-4:] == '.lib' and sys.argv[2][-4:] == '.def':
libfile, deffile = sys.argv[1:]
elif sys.argv[1][-4:] == '.def' and sys.argv[2][-4:] == '.lib':
deffile, libfile = sys.argv[1:]
else:
print "I'm assuming that your first argument is the library"
print "and the second is the DEF file."
elif len(sys.argv) == 2:
if sys.argv[1][-4:] == '.def':
deffile = sys.argv[1]
libfile = 'python%s.lib' % py_ver
elif sys.argv[1][-4:] == '.lib':
deffile = None
libfile = sys.argv[1]
else:
libfile = 'python%s.lib' % py_ver
deffile = None
return libfile, deffile
def getnm(nm_cmd = ['nm', '-Cs', 'python%s.lib' % py_ver]):
"""Returns the output of nm_cmd via a pipe.
nm_output = getnam(nm_cmd = 'nm -Cs py_lib')"""
f = subprocess.Popen(nm_cmd, shell=True, stdout=subprocess.PIPE)
nm_output = f.stdout.read()
f.stdout.close()
return nm_output
def parse_nm(nm_output):
"""Returns a tuple of lists: dlist for the list of data
symbols and flist for the list of function symbols.
dlist, flist = parse_nm(nm_output)"""
data = DATA_RE.findall(nm_output)
func = FUNC_RE.findall(nm_output)
flist = []
for sym in data:
if sym in func and (sym[:2] == 'Py' or sym[:3] == '_Py' or sym[:4] == 'init'):
flist.append(sym)
dlist = []
for sym in data:
if sym not in flist and (sym[:2] == 'Py' or sym[:3] == '_Py'):
dlist.append(sym)
dlist.sort()
flist.sort()
return dlist, flist
def output_def(dlist, flist, header, file = sys.stdout):
"""Outputs the final DEF file to a file defaulting to stdout.
output_def(dlist, flist, header, file = sys.stdout)"""
for data_sym in dlist:
header = header + '\t%s DATA\n' % data_sym
header = header + '\n' # blank line
for func_sym in flist:
header = header + '\t%s\n' % func_sym
file.write(header)
if __name__ == '__main__':
libfile, deffile = parse_cmd()
if deffile is None:
deffile = sys.stdout
else:
deffile = open(deffile, 'w')
nm_cmd = [str(DEFAULT_NM), str(libfile)]
nm_output = getnm(nm_cmd)
dlist, flist = parse_nm(nm_output)
output_def(dlist, flist, DEF_HEADER, deffile)
| gpl-3.0 |
tchernomax/ansible | test/runner/lib/sanity/__init__.py | 4 | 10906 | """Execute Ansible sanity tests."""
from __future__ import absolute_import, print_function
import abc
import glob
import json
import os
import re
import sys
from lib.util import (
ApplicationError,
SubprocessError,
display,
run_command,
import_plugins,
load_plugins,
parse_to_list_of_dict,
ABC,
is_binary_file,
read_lines_without_comments,
)
from lib.ansible_util import (
ansible_environment,
)
from lib.target import (
walk_external_targets,
walk_internal_targets,
walk_sanity_targets,
)
from lib.executor import (
get_changes_filter,
AllTargetsSkipped,
Delegate,
install_command_requirements,
SUPPORTED_PYTHON_VERSIONS,
)
from lib.config import (
SanityConfig,
)
from lib.test import (
TestSuccess,
TestFailure,
TestSkipped,
TestMessage,
)
COMMAND = 'sanity'
def command_sanity(args):
"""
:type args: SanityConfig
"""
changes = get_changes_filter(args)
require = args.require + changes
targets = SanityTargets(args.include, args.exclude, require)
if not targets.include:
raise AllTargetsSkipped()
if args.delegate:
raise Delegate(require=changes)
install_command_requirements(args)
tests = sanity_get_tests()
if args.test:
tests = [t for t in tests if t.name in args.test]
else:
disabled = [t.name for t in tests if not t.enabled and not args.allow_disabled]
tests = [t for t in tests if t.enabled or args.allow_disabled]
if disabled:
display.warning('Skipping tests disabled by default without --allow-disabled: %s' % ', '.join(sorted(disabled)))
if args.skip_test:
tests = [t for t in tests if t.name not in args.skip_test]
total = 0
failed = []
for test in tests:
if args.list_tests:
display.info(test.name)
continue
if isinstance(test, SanityMultipleVersion):
versions = SUPPORTED_PYTHON_VERSIONS
else:
versions = (None,)
for version in versions:
if args.python and version and version != args.python_version:
continue
display.info('Sanity check using %s%s' % (test.name, ' with Python %s' % version if version else ''))
options = ''
if isinstance(test, SanityCodeSmellTest):
result = test.test(args, targets)
elif isinstance(test, SanityMultipleVersion):
result = test.test(args, targets, python_version=version)
options = ' --python %s' % version
elif isinstance(test, SanitySingleVersion):
result = test.test(args, targets)
else:
raise Exception('Unsupported test type: %s' % type(test))
result.write(args)
total += 1
if isinstance(result, SanityFailure):
failed.append(result.test + options)
if failed:
message = 'The %d sanity test(s) listed below (out of %d) failed. See error output above for details.\n%s' % (
len(failed), total, '\n'.join(failed))
if args.failure_ok:
display.error(message)
else:
raise ApplicationError(message)
def collect_code_smell_tests():
"""
:rtype: tuple[SanityCodeSmellTest]
"""
skip_file = 'test/sanity/code-smell/skip.txt'
skip_tests = read_lines_without_comments(skip_file, remove_blank_lines=True)
paths = glob.glob('test/sanity/code-smell/*')
paths = sorted(p for p in paths if os.access(p, os.X_OK) and os.path.isfile(p) and os.path.basename(p) not in skip_tests)
tests = tuple(SanityCodeSmellTest(p) for p in paths)
return tests
def sanity_get_tests():
"""
:rtype: tuple[SanityFunc]
"""
return SANITY_TESTS
class SanitySuccess(TestSuccess):
"""Sanity test success."""
def __init__(self, test, python_version=None):
"""
:type test: str
:type python_version: str
"""
super(SanitySuccess, self).__init__(COMMAND, test, python_version)
class SanitySkipped(TestSkipped):
"""Sanity test skipped."""
def __init__(self, test, python_version=None):
"""
:type test: str
:type python_version: str
"""
super(SanitySkipped, self).__init__(COMMAND, test, python_version)
class SanityFailure(TestFailure):
"""Sanity test failure."""
def __init__(self, test, python_version=None, messages=None, summary=None):
"""
:type test: str
:type python_version: str
:type messages: list[SanityMessage]
:type summary: unicode
"""
super(SanityFailure, self).__init__(COMMAND, test, python_version, messages, summary)
class SanityMessage(TestMessage):
"""Single sanity test message for one file."""
pass
class SanityTargets(object):
"""Sanity test target information."""
def __init__(self, include, exclude, require):
"""
:type include: list[str]
:type exclude: list[str]
:type require: list[str]
"""
self.all = not include
self.targets = tuple(sorted(walk_sanity_targets()))
self.include = walk_internal_targets(self.targets, include, exclude, require)
self.include_external, self.exclude_external = walk_external_targets(self.targets, include, exclude, require)
class SanityTest(ABC):
"""Sanity test base class."""
__metaclass__ = abc.ABCMeta
def __init__(self, name):
self.name = name
self.enabled = True
class SanityCodeSmellTest(SanityTest):
"""Sanity test script."""
def __init__(self, path):
name = os.path.splitext(os.path.basename(path))[0]
config_path = os.path.splitext(path)[0] + '.json'
super(SanityCodeSmellTest, self).__init__(name)
self.path = path
self.config_path = config_path if os.path.exists(config_path) else None
self.config = None
if self.config_path:
with open(self.config_path, 'r') as config_fd:
self.config = json.load(config_fd)
if self.config:
self.enabled = not self.config.get('disabled')
def test(self, args, targets):
"""
:type args: SanityConfig
:type targets: SanityTargets
:rtype: TestResult
"""
if self.path.endswith('.py'):
cmd = [args.python_executable, self.path]
else:
cmd = [self.path]
env = ansible_environment(args, color=False)
pattern = None
data = None
if self.config:
output = self.config.get('output')
extensions = self.config.get('extensions')
prefixes = self.config.get('prefixes')
files = self.config.get('files')
always = self.config.get('always')
text = self.config.get('text')
if output == 'path-line-column-message':
pattern = '^(?P<path>[^:]*):(?P<line>[0-9]+):(?P<column>[0-9]+): (?P<message>.*)$'
elif output == 'path-message':
pattern = '^(?P<path>[^:]*): (?P<message>.*)$'
else:
pattern = ApplicationError('Unsupported output type: %s' % output)
paths = sorted(i.path for i in targets.include)
if always:
paths = []
# short-term work-around for paths being str instead of unicode on python 2.x
if sys.version_info[0] == 2:
paths = [p.decode('utf-8') for p in paths]
if text is not None:
if text:
paths = [p for p in paths if not is_binary_file(p)]
else:
paths = [p for p in paths if is_binary_file(p)]
if extensions:
paths = [p for p in paths if os.path.splitext(p)[1] in extensions or (p.startswith('bin/') and '.py' in extensions)]
if prefixes:
paths = [p for p in paths if any(p.startswith(pre) for pre in prefixes)]
if files:
paths = [p for p in paths if os.path.basename(p) in files]
if not paths and not always:
return SanitySkipped(self.name)
data = '\n'.join(paths)
if data:
display.info(data, verbosity=4)
try:
stdout, stderr = run_command(args, cmd, data=data, env=env, capture=True)
status = 0
except SubprocessError as ex:
stdout = ex.stdout
stderr = ex.stderr
status = ex.status
if stdout and not stderr:
if pattern:
matches = parse_to_list_of_dict(pattern, stdout)
messages = [SanityMessage(
message=m['message'],
path=m['path'],
line=int(m.get('line', 0)),
column=int(m.get('column', 0)),
) for m in matches]
return SanityFailure(self.name, messages=messages)
if stderr or status:
summary = u'%s' % SubprocessError(cmd=cmd, status=status, stderr=stderr, stdout=stdout)
return SanityFailure(self.name, summary=summary)
return SanitySuccess(self.name)
class SanityFunc(SanityTest):
"""Base class for sanity test plugins."""
def __init__(self):
name = self.__class__.__name__
name = re.sub(r'Test$', '', name) # drop Test suffix
name = re.sub(r'(.)([A-Z][a-z]+)', r'\1-\2', name).lower() # use dashes instead of capitalization
super(SanityFunc, self).__init__(name)
class SanitySingleVersion(SanityFunc):
"""Base class for sanity test plugins which should run on a single python version."""
@abc.abstractmethod
def test(self, args, targets):
"""
:type args: SanityConfig
:type targets: SanityTargets
:rtype: TestResult
"""
pass
class SanityMultipleVersion(SanityFunc):
"""Base class for sanity test plugins which should run on multiple python versions."""
@abc.abstractmethod
def test(self, args, targets, python_version):
"""
:type args: SanityConfig
:type targets: SanityTargets
:type python_version: str
:rtype: TestResult
"""
pass
SANITY_TESTS = (
)
def sanity_init():
"""Initialize full sanity test list (includes code-smell scripts determined at runtime)."""
import_plugins('sanity')
sanity_plugins = {} # type: dict[str, type]
load_plugins(SanityFunc, sanity_plugins)
sanity_tests = tuple([plugin() for plugin in sanity_plugins.values()])
global SANITY_TESTS # pylint: disable=locally-disabled, global-statement
SANITY_TESTS = tuple(sorted(sanity_tests + collect_code_smell_tests(), key=lambda k: k.name))
| gpl-3.0 |
enoordeh/Pangloss | pangloss/shmr.py | 2 | 13698 |
# ===========================================================================
import pangloss
import numpy
from scipy import interpolate,optimize
# ============================================================================
class SHMR(object):
"""
NAME
SHMR
PURPOSE
The stellar mass - halo mass relation. Both Pr(M*|Mh,z) and
Pr(Mh|M*,z) are characterised by this class, enabling samples to
be drawn from them.
COMMENTS
According to some authors (eg Behroozi et al) the SHMR is quite
complicated, varying in form with mass and redshift. In cases
like this, we store the cumulative distributions interpolated
onto grids. To transform between the two conditional PDFs, we
will need Pr(Mh), the halo mass function (HMF). This is
estimated empirically from a halo catalog, which must be
supplied.
INITIALISATION
method Whose relation to use. Default = 'Behroozi'
METHODS
drawMstars(self,Mh,z): generate samples from Pr(M*|Mh,z)
drawMhalos(self,Ms,z,X=None): generate samples from Pr(Mh|M*,z)
makeHaloMassFunction(self,catalog): needs Mh catalog from sim
getHaloMassFunction(self,z,HMFcatalog='Millennium'): ??
catalog? HMFcatalog?
getPL(self,p,getM=False): return power-law fit to the HMF
makeCDFs(self): are these actually CDFs?
Mstar_to_M200(self,M_Star,redshift):
BUGS
- Code uses case-sensitive variables in places, and is untested.
AUTHORS
This file is part of the Pangloss project, distributed under the
GPL v2, by Tom Collett (IoA) and Phil Marshall (Oxford).
Please cite: Collett et al 2013, http://arxiv.org/abs/1303.6564
HISTORY
2013-03-23 Collett & Marshall (Cambridge)
"""
# ----------------------------------------------------------------------------
def __init__(self,method='Behroozi'):
self.name = self.__str__()
self.method = method
# Define the numerical grids on which we'll work:
self.nMh,self.nMs,self.nz = 501,251,10
self.Mh_axis = numpy.linspace(10.,20.,self.nMh)
self.Ms_axis = numpy.linspace(8.,13.,self.nMs)
self.zed_axis,self.dz = numpy.linspace(0.,1.6,self.nz,retstep=True)
return None
# ----------------------------------------------------------------------------
def __str__(self):
return 'Stellar Mass to Halo Mass relation'
# ----------------------------------------------------------------------------
# Return samples from Pr(M*|Mh,z):
def drawMstars(self,Mh,z):
assert len(Mh)==len(z)
MstarBest = self.H2S_model.eval(numpy.array([Mh,z]).T)
Mstar = MstarBest + numpy.random.randn(len(Mh))*0.15
# 0.15 is the intrinsic Mstar scatter of the Behroozi relation...
return Mstar
# ----------------------------------------------------------------------------
# Return samples from Pr(Mh|M*,z):
def drawMhalos(self,Ms,z,X=None):
assert len(Ms) == len(z)
if X != None: assert len(X) == len(Ms)
else: X = numpy.random.random(Ms.size)
return self.S2H_model.eval(numpy.array([Ms,X,z]).T)
# ----------------------------------------------------------------------------
# Infer halo mass function from Millenium Mh,z catalogue. We use a power-law
# approximation for this.
def makeHaloMassFunction(self,catalog):
assert catalog != None
#Infer halo mass function from Millenium Mh,z catalogue ; we use a power-law for this.
zeds,dz = numpy.linspace(0,1.8,10,retstep=True)#coarse redshift bin. these things change slowly.
self.HMF = {}
self.HMF['catalog'] = catalog
self.HMFzkeys,self.HMFdz = zeds-dz,dz
infer_from_data=True
if infer_from_data:
# Load in the catalog's list of halo masses and redshift.
# import cPickle
# F=open(self.HMFdata,'rb')
# inhalomass,inhaloZ = cPickle.load(F)
# F.close()
inhalomass,inhaloZ = pangloss.readPickle(catalog)
inhaloZ[inhaloZ<0]=0
for i in range(len(zeds)):
z=zeds[i]+dz/2.
Masses=inhalomass[inhaloZ>z-dz/2]
newinZ=inhaloZ[inhaloZ>z-dz/2]
Mhalos=Masses[newinZ<z+dz/2]
Massbins=numpy.linspace(10,20,101)
hist,bins=numpy.histogram(Mhalos,Massbins)
MOD = interpolate.splrep(Massbins[:-1],hist,s=0,k=1)
HMF = interpolate.splev(self.Mh_axis,MOD)
self.TCM = self.Mh_axis[HMF.argmax()+1:]
self.TCHM = HMF[HMF.argmax()+1:]
self.TCM=self.TCM[self.TCHM>0]
self.TCHM=self.TCHM[self.TCHM>0]
# Fit a powerlaw to the HMF
PLcoeff,ier = optimize.leastsq(self.getPL,[14.56,-1.])
self.HMF[i] = PLcoeff
# We've already fit a powerlaw to millenium: it's parameters as a function of z are
# included here.
# elif catalog='Millennium':
# for Z in zeds:
# z=Z+dz/2.
# if z>0 and z<:self.HMF[z]=
# if z> and z<:self.HMF[z]=
# if z> and z<:self.HMF[z]=
# if z> and z<:self.HMF[z]=
# if z> and z<:self.HMF[z]=
# if z> and z<:self.HMF[z]=
# if z> and z<:self.HMF[z]=
# if z> and z<:self.HMF[z]=
# if z> and z<:self.HMF[z]=
# if z> and z<:self.HMF[z]=
return
# ----------------------------------------------------------------------------
def getPL(self,p,getM=False):
N = 10**(p[0]+self.TCM*p[1])
return (N-self.TCHM)/(self.TCHM**0.5)
# ----------------------------------------------------------------------------
def getHaloMassFunction(self,z,HMFcatalog=None):
# If HMF doesn't already exist, make it:
try: self.HMF['catalog']
except AttributeError:
self.makeHaloMassFunction(HMFcatalog)
# If HMF does already exist, overwrite it if required
if HMFcatalog != None and HMFcatalog != self.HMF['catalog']:
self.makeHaloMassFunction(HMFcatalog)
# Now that we have an HMF, look up some values:
for i in range(len(self.HMFzkeys)):
key=self.HMFzkeys[i]
if key<=z+self.HMFdz/2. and key>=z-self.HMFdz/2.:
zkey=i
return 10**(self.HMF[zkey][0]+self.Mh_axis*self.HMF[zkey][1])
# ----------------------------------------------------------------------------
# Make the gridded "models" (CDFs) of the SHMR.
# BUG: are these actually CDFs? Need to use accurate variable names and
# comment accurately...
def makeCDFs(self):
#create the empty grids that we will populate:
S2H_grid = numpy.empty((self.Ms_axis.size,self.Mh_axis.size,self.zed_axis.size))
H2S_grid = numpy.empty((self.Mh_axis.size,self.zed_axis.size))
# Invert the analytic behroozi M*->Mh relation:
Mh,Ms,zeds,dz = self.Mh_axis,self.Ms_axis,self.zed_axis,self.dz
for k in range(self.nz):
z=zeds[k]
MhMean = self.Mstar_to_M200(Ms,numpy.ones(len(Ms))*z)
#fit a spline to the inverse of the behroozi relation
invModel_z = interpolate.splrep(MhMean,Ms,s=0)
# Calculate the mean M_* at fixed M_halo
MsMean = interpolate.splev(Mh,invModel_z)
H2S_grid[:,k]=MsMean
# Now we can make Pr(M*|Mh):
sigma=0.15
norm = sigma*(2*numpy.pi)**0.5
pdflist = numpy.empty((Ms.size,Mh.size))
for j in range(Mh.size):
pdf = numpy.exp(-0.5*(Ms-MsMean[j])**2/sigma**2)/norm
pdflist[:,j] = pdf
# Now we can convert this into a joint distribution,
# Pr(M*,Mh) by multiplying by the halo mass function at this
# redshift: Pr(Mh|M*) ~ P(M*|Mh)*P(Mh)
pdflist *= self.getHaloMassFunction(z)
# Calculate the CDF for P(Mh|M*) so we can sample it:
pdflist /= pdflist.sum()
cdf = numpy.cumsum(pdflist,1).astype(numpy.float32)
cdf = (cdf.T-cdf[:,0]).T
cdf = (cdf.T/cdf[:,-1]).T
CDF = numpy.empty((cdf.shape[0],Mh.size))
# BUG: do not use case-sensitive variables!
X = numpy.linspace(0.,1.,Mh.size)
for j in range(Ms.size):
# Take care of numerical stability...
tmp = numpy.round(cdf[j]*1e5).astype(numpy.int64)/1e5
lo = tmp[tmp==0].size-1
hi = tmp[tmp<1].size+1
# Re-evaulate the CDF on a regular grid
mod = interpolate.splrep(cdf[j][lo:hi],Mh[lo:hi],s=0,k=1)
q = interpolate.splev(X,mod)
CDF[j] = interpolate.splev(X,mod)
S2H_grid[:,:,k] = CDF
# Form Mh(M*,X)
axes = {}
axes[0] = interpolate.splrep(Ms,numpy.arange(Ms.size),k=1)
axes[1] = interpolate.splrep(X,numpy.arange(X.size),k=1)
axes[2] = interpolate.splrep(zeds,numpy.arange(zeds.size),k=1)
self.S2H_model = pangloss.ndInterp(axes,S2H_grid)
# Make the zero-scatter halo to stellar mass relation.
axes2 = {}
axes2[0] = interpolate.splrep(Mh,numpy.arange(Mh.size),k=1)
axes2[1] = interpolate.splrep(zeds,numpy.arange(zeds.size),k=1)
self.H2S_model = pangloss.ndInterp(axes2,H2S_grid)
return
# ----------------------------------------------------------------------
# Takes an array of stellar mass and an array of redshifts, and returns
# the best fit halo mass of {behroozi}.
def Mstar_to_M200(self,M_Star,redshift):
M_Star=10**(M_Star)
if self.method == 'Behroozi':
# Following Behroozi et al. 2010.
M_200=numpy.zeros(len(M_Star))
# Parameters:
for i in range(len(M_Star)):
z=redshift[i]
if z<0.9:
Mstar00 = 10.72
Mstar0a = 0.55
Mstar0aa=0.0
M_10 = 12.35
M_1a = 0.28
beta0 = 0.44
betaa = 0.18
delta0 = 0.57
deltaa = 0.17
gamma0 = 1.56
gammaa = 2.51
else:
Mstar00 = 11.09
Mstar0a = 0.56
Mstar0aa= 6.99
M_10 = 12.27
M_1a = -0.84
beta0 = 0.65
betaa = 0.31
delta0 = 0.56
deltaa = -0.12
gamma0 = 1.12
gammaa = -0.53
#scaled parameters:
a=1./(1.+z)
M_1=10**(M_10+M_1a*(a-1))
beta=beta0+betaa*(a-1)
Mstar0=10**(Mstar00+Mstar0a*(a-1)+Mstar0aa*(a-0.5)**2)
delta=delta0+deltaa*(a-1)
gamma=gamma0+gammaa*(a-1)
#reltationship ****NO SCATTER****
M_200[i] =(numpy.log10(M_1)+beta*numpy.log10(M_Star[i]/Mstar0)+((M_Star[i]/Mstar0)**delta)/(1.+(M_Star[i]/Mstar0)**-gamma)-0.5)
return M_200
#=============================================================================
if __name__ == '__main__':
shmr = SHMR('Behroozi',HMFdata='/data/tcollett/Pangloss/HaloMassRedshift.catalog')
shmr.makeCDFs()
li=numpy.empty(1000)
for i in range(len(li)):
li[i]=shmr.drawMstars([12],[0.1])
#=============================================================================
# PofMgivenMcommaz.py:
#
# import numpy,cPickle
# import pylab as plt
#
#
#
# # Data from lightcones
#
#
# MODI=open("/data/tcollett/Pangloss/inverse.behroozi","wb")
# MODB=open("/data/tcollett/Pangloss/truth.behroozi","wb")
# cPickle.dump(model2,MODI,2)
# cPickle.dump(model,MODB,2)
#
#
#
#
# # Tom gave lots of data!!
# #inhalomass = inhalomass[::20]
# #inhaloZ=inhaloZ[::20]
# inhalomass=inhalomass[inhaloZ<1.5]
# inhaloZ=inhaloZ[inhaloZ<1.5]
#
#
# instarmass = drawMStar(model2,inhalomass,inhaloZ)
#
#
# plt.scatter(inhalomass,instarmass,edgecolor='',c=inhaloZ)
# plt.colorbar()
# plt.show()
#
# outhalomass= drawMHalo(model,instarmass,inhaloZ)
# plt.scatter(inhalomass,outhalomass,edgecolor='',c=inhaloZ)
# x=numpy.linspace(9,16,101)
# plt.plot(x,x,c='r')
# plt.show()
#
# """
# # Does the distribution Pr(Mh|M*obs) look Gaussian? Meh....
# O = drawMhalo(11.+numpy.random.randn(instarmass.size)*0.45)
# pylab.hist(O[O>1])
# pylab.show()
#
# masses = []
# for i in range(100):
# outhalomass = drawMhalo(instarmass+numpy.random.randn(instarmass.size)*0.45)
# masses.append(outhalomass)
# masses = numpy.array(masses)
#
# # Sometimes the stellar masses scatter out of the pre-defined stellar mass
# # grid; in practice we strictly require 8 < M* < 13.
# masses[masses==0] = numpy.nan
# from scipy import stats
# M = stats.stats.nanmean(masses,0)
# e = stats.stats.nanstd(masses,0)
# pylab.errorbar(inhalomass,M,e,fmt='ko')
# pylab.plot([0.,20.],[0.,20.],'b')
# pylab.xlim(11.,17.)
# pylab.ylim(11.,17.)
# pylab.show()
#
#
# #MODI=open("/data/tcollett/Pangloss/inverse.behroozi","wb")
# MODB=open("/data/tcollett/Pangloss/mattruth.behroozi","wb")
# #cPickle.dump(invModel,MODI,2)
# cPickle.dump(model,MODB,2)
#
# """
| gpl-2.0 |
titom1986/CouchPotatoServer | couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/keek.py | 20 | 1308 | from __future__ import unicode_literals
import re
from .common import InfoExtractor
class KeekIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?keek\.com/(?:!|\w+/keeks/)(?P<videoID>\w+)'
IE_NAME = 'keek'
_TEST = {
'url': 'https://www.keek.com/ytdl/keeks/NODfbab',
'file': 'NODfbab.mp4',
'md5': '9b0636f8c0f7614afa4ea5e4c6e57e83',
'info_dict': {
'uploader': 'ytdl',
'title': 'test chars: "\'/\\\u00e4<>This is a test video for youtube-dl.For more information, contact phihag@phihag.de .',
},
}
def _real_extract(self, url):
m = re.match(self._VALID_URL, url)
video_id = m.group('videoID')
video_url = 'http://cdn.keek.com/keek/video/%s' % video_id
thumbnail = 'http://cdn.keek.com/keek/thumbnail/%s/w100/h75' % video_id
webpage = self._download_webpage(url, video_id)
uploader = self._html_search_regex(
r'<div class="user-name-and-bio">[\S\s]+?<h2>(?P<uploader>.+?)</h2>',
webpage, 'uploader', fatal=False)
return {
'id': video_id,
'url': video_url,
'ext': 'mp4',
'title': self._og_search_title(webpage),
'thumbnail': thumbnail,
'uploader': uploader
}
| gpl-3.0 |
PiRSquared17/statwiki | wikiutil.py | 3 | 19647 | # -*- coding: iso-8859-1 -*-
"""
MoinMoin - Wiki Utility Functions
@copyright: 2000 - 2004 by Jürgen Hermann <jh@web.de>
@license: GNU GPL, see COPYING for details.
"""
import sys
import os
import re
import urllib
import config
import chartypes
# Exceptions
class InvalidFileNameError(Exception):
""" Called when we find an invalid file name """
pass
# this is a thin wrapper around urllib (urllib only handles str, not unicode)
# with py <= 2.4.1, it would give incorrect results with unicode
# with py == 2.4.2, it crashes with unicode, if it contains non-ASCII chars
def url_quote(s, safe='/', want_unicode=False):
"""
Wrapper around urllib.quote doing the encoding/decoding as usually wanted:
@param s: the string to quote (can be str or unicode, if it is unicode,
config.charset is used to encode it before calling urllib)
@param safe: just passed through to urllib
@param want_unicode: for the less usual case that you want to get back
unicode and not str, set this to True
Default is False.
"""
if isinstance(s, unicode):
s = s.encode(config.charset)
elif not isinstance(s, str):
s = str(s)
s = urllib.quote(s, safe)
if want_unicode:
s = s.decode(config.charset) # ascii would also work
return s
def url_quote_plus(s, safe='/', want_unicode=False):
"""
Wrapper around urllib.quote_plus doing the encoding/decoding as usually wanted:
@param s: the string to quote (can be str or unicode, if it is unicode,
config.charset is used to encode it before calling urllib)
@param safe: just passed through to urllib
@param want_unicode: for the less usual case that you want to get back
unicode and not str, set this to True
Default is False.
"""
if isinstance(s, unicode):
s = s.encode(config.charset)
elif not isinstance(s, str):
s = str(s)
s = urllib.quote_plus(s, safe)
if want_unicode:
s = s.decode(config.charset) # ascii would also work
return s
def url_unquote(s, want_unicode=True):
"""
Wrapper around urllib.unquote doing the encoding/decoding as usually wanted:
@param s: the string to unquote (can be str or unicode, if it is unicode,
config.charset is used to encode it before calling urllib)
@param want_unicode: for the less usual case that you want to get back
str and not unicode, set this to False.
Default is True.
"""
if isinstance(s, unicode):
s = s.encode(config.charset) # ascii would also work
s = urllib.unquote(s)
if want_unicode:
s = s.decode(config.charset)
return s
# FIXME: better name would be quoteURL, as this is useful for any
# string, not only wiki names.
def quoteWikinameURL(pagename, charset=config.charset):
""" Return a url encoding of filename in plain ascii
Use urllib.quote to quote any character that is not always safe.
@param pagename: the original pagename (unicode)
@param charset: url text encoding, 'utf-8' recommended. Other charsert
might not be able to encode the page name and raise
UnicodeError. (default config.charset ('utf-8')).
@rtype: string
@return: the quoted filename, all unsafe characters encoded
"""
pagename = pagename.replace(u' ', u'_')
pagename = pagename.encode(charset)
return urllib.quote(pagename)
def escape(s, quote=0):
""" Escape possible html tags
Replace special characters '&', '<' and '>' by SGML entities.
(taken from cgi.escape so we don't have to include that, even if we
don't use cgi at all)
FIXME: should return string or unicode?
@param s: (unicode) string to escape
@param quote: bool, should transform '\"' to '"'
@rtype: (unicode) string
@return: escaped version of s
"""
if not isinstance(s, (str, unicode)):
s = str(s)
# Must first replace &
s = s.replace("&", "&")
# Then other...
s = s.replace("<", "<")
s = s.replace(">", ">")
if quote:
s = s.replace('"', """)
return s
def clean_comment(comment):
""" Clean comment - replace CR, LF, TAB by whitespace, delete control chars
TODO: move this to config, create on first call then return cached.
"""
# we only have input fields with max 200 chars, but spammers send us more
if len(comment) > 201:
comment = u''
remap_chars = {
ord(u'\t'): u' ',
ord(u'\r'): u' ',
ord(u'\n'): u' ',
}
control_chars = u'\x00\x01\x02\x03\x04\x05\x06\x07\x08\x0b\x0c\x0e\x0f' \
'\x10\x11\x12\x13\x14\x15\x16\x17\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f'
for c in control_chars:
remap_chars[c] = None
comment = comment.translate(remap_chars)
return comment
def make_breakable(text, maxlen):
""" make a text breakable by inserting spaces into nonbreakable parts
"""
text = text.split(" ")
newtext = []
for part in text:
if len(part) > maxlen:
while part:
newtext.append(part[:maxlen])
part = part[maxlen:]
else:
newtext.append(part)
return " ".join(newtext)
########################################################################
### Storage
########################################################################
# FIXME: These functions might be moved to storage module, when we have
# one. Then they will be called transparently whenever a page is saved.
# Precompiled patterns for file name [un]quoting
UNSAFE = re.compile(r'[^a-zA-Z0-9_]+')
QUOTED = re.compile(r'\(([a-fA-F0-9]+)\)')
# FIXME: better name would be quoteWikiname
def quoteWikinameFS(wikiname, charset=config.charset):
""" Return file system representation of a Unicode WikiName.
Warning: will raise UnicodeError if wikiname can not be encoded using
charset. The default value of config.charset, 'utf-8' can encode any
character.
@param wikiname: Unicode string possibly containing non-ascii characters
@param charset: charset to encode string
@rtype: string
@return: quoted name, safe for any file system
"""
wikiname = wikiname.replace(u' ', u'_') # " " -> "_"
filename = wikiname.encode(charset)
quoted = []
location = 0
for needle in UNSAFE.finditer(filename):
# append leading safe stuff
quoted.append(filename[location:needle.start()])
location = needle.end()
# Quote and append unsafe stuff
quoted.append('(')
for character in needle.group():
quoted.append('%02x' % ord(character))
quoted.append(')')
# append rest of string
quoted.append(filename[location:])
return ''.join(quoted)
# FIXME: better name would be unquoteFilename
def unquoteWikiname(filename, charsets=[config.charset]):
""" Return Unicode WikiName from quoted file name.
We raise an InvalidFileNameError if we find an invalid name, so the
wiki could alarm the admin or suggest the user to rename a page.
Invalid file names should never happen in normal use, but are rather
cheap to find.
This function should be used only to unquote file names, not page
names we receive from the user. These are handled in request by
urllib.unquote, decodePagename and normalizePagename.
Todo: search clients of unquoteWikiname and check for exceptions.
@param filename: string using charset and possibly quoted parts
@param charsets: list of charsets used by string
@rtype: Unicode String
@return: WikiName
"""
### Temporary fix start ###
# From some places we get called with Unicode strings
if isinstance(filename, type(u'')):
filename = filename.encode(config.charset)
### Temporary fix end ###
parts = []
start = 0
for needle in QUOTED.finditer(filename):
# append leading unquoted stuff
parts.append(filename[start:needle.start()])
start = needle.end()
# Append quoted stuff
group = needle.group(1)
# Filter invalid filenames
if (len(group) % 2 != 0):
raise InvalidFileNameError(filename)
try:
for i in range(0, len(group), 2):
byte = group[i:i+2]
character = chr(int(byte, 16))
parts.append(character)
except ValueError:
# byte not in hex, e.g 'xy'
raise InvalidFileNameError(filename)
# append rest of string
if start == 0:
wikiname = filename
else:
parts.append(filename[start:len(filename)])
wikiname = ''.join(parts)
# This looks wrong, because at this stage "()" can be both errors
# like open "(" without close ")", or unquoted valid characters in
# the file name. FIXME: check this.
# Filter invalid filenames. Any left (xx) must be invalid
#if '(' in wikiname or ')' in wikiname:
# raise InvalidFileNameError(filename)
wikiname = decodeUserInput(wikiname, charsets)
wikiname = wikiname.replace(u'_', u' ') # "_" -> " "
return wikiname
# time scaling
def timestamp2version(ts):
""" Convert UNIX timestamp (may be float or int) to our version
(long) int.
We don't want to use floats, so we just scale by 1e6 to get
an integer in usecs.
"""
return long(ts*1000000L) # has to be long for py 2.2.x
def version2timestamp(v):
""" Convert version number to UNIX timestamp (float).
This must ONLY be used for display purposes.
"""
return v/1000000.0
#############################################################################
### Page types (based on page names)
#############################################################################
def isSystemPage(request, pagename):
""" Is this a system page? Uses AllSystemPagesGroup internally.
@param request: the request object
@param pagename: the page name
@rtype: bool
@return: true if page is a system page
"""
return (request.dicts.has_member('SystemPagesGroup', pagename) or
isTemplatePage(request, pagename))
def isTemplatePage(request, pagename):
""" Is this a template page?
@param pagename: the page name
@rtype: bool
@return: true if page is a template page
"""
filter = re.compile(request.cfg.page_template_regex, re.UNICODE)
return filter.search(pagename) is not None
def filterCategoryPages(request, pagelist):
""" Return category pages in pagelist
WARNING: DO NOT USE THIS TO FILTER THE FULL PAGE LIST! Use
getPageList with a filter function.
If you pass a list with a single pagename, either that is returned
or an empty list, thus you can use this function like a `isCategoryPage`
one.
@param pagelist: a list of pages
@rtype: list
@return: only the category pages of pagelist
"""
func = re.compile(request.cfg.page_category_regex, re.UNICODE).search
return filter(func, pagelist)
def getFrontPage(request):
""" Convenience function to get localized front page
@param request: current request
@rtype: Page object
@return localized page_front_page, if there is a translation
"""
return getSysPage(request, request.cfg.page_front_page)
def AbsPageName(request, context, pagename):
"""
Return the absolute pagename for a (possibly) relative pagename.
@param context: name of the page where "pagename" appears on
@param pagename: the (possibly relative) page name
@rtype: string
@return: the absolute page name
"""
if pagename.startswith(PARENT_PREFIX):
pagename = '/'.join(filter(None, context.split('/')[:-1] + [pagename[PARENT_PREFIX_LEN:]]))
elif pagename.startswith(CHILD_PREFIX):
pagename = context + '/' + pagename[CHILD_PREFIX_LEN:]
return pagename
#############################################################################
### Misc
#############################################################################
def parseAttributes(request, attrstring, endtoken=None, extension=None):
"""
Parse a list of attributes and return a dict plus a possible
error message.
If extension is passed, it has to be a callable that returns
a tuple (found_flag, msg). found_flag is whether it did find and process
something, msg is '' when all was OK or any other string to return an error
message.
@param request: the request object
@param attrstring: string containing the attributes to be parsed
@param endtoken: token terminating parsing
@param extension: extension function -
gets called with the current token, the parser and the dict
@rtype: dict, msg
@return: a dict plus a possible error message
"""
import shlex, StringIO
_ = request.getText
parser = shlex.shlex(StringIO.StringIO(attrstring))
parser.commenters = ''
msg = None
attrs = {}
while not msg:
try:
key = parser.get_token()
except ValueError, err:
msg = str(err)
break
if not key: break
if endtoken and key == endtoken: break
# call extension function with the current token, the parser, and the dict
if extension:
found_flag, msg = extension(key, parser, attrs)
#request.log("%r = extension(%r, parser, %r)" % (msg, key, attrs))
if found_flag:
continue
elif msg:
break
#else (we found nothing, but also didn't have an error msg) we just continue below:
try:
eq = parser.get_token()
except ValueError, err:
msg = str(err)
break
if eq != "=":
msg = _('Expected "=" to follow "%(token)s"') % {'token': key}
break
try:
val = parser.get_token()
except ValueError, err:
msg = str(err)
break
if not val:
msg = _('Expected a value for key "%(token)s"') % {'token': key}
break
key = escape(key) # make sure nobody cheats
# safely escape and quote value
if val[0] in ["'", '"']:
val = escape(val)
else:
val = '"%s"' % escape(val, 1)
attrs[key.lower()] = val
return attrs, msg or ''
def taintfilename(basename):
"""
Make a filename that is supposed to be a plain name secure, i.e.
remove any possible path components that compromise our system.
@param basename: (possibly unsafe) filename
@rtype: string
@return: (safer) filename
"""
for x in (os.pardir, ':', '/', '\\', '<', '>'):
basename = basename.replace(x, '_')
return basename
def mapURL(request, url):
"""
Map URLs according to 'cfg.url_mappings'.
@param url: a URL
@rtype: string
@return: mapped URL
"""
# check whether we have to map URLs
if request.cfg.url_mappings:
# check URL for the configured prefixes
for prefix in request.cfg.url_mappings.keys():
if url.startswith(prefix):
# substitute prefix with replacement value
return request.cfg.url_mappings[prefix] + url[len(prefix):]
# return unchanged url
return url
def getUnicodeIndexGroup(name):
"""
Return a group letter for `name`, which must be a unicode string.
Currently supported: Hangul Syllables (U+AC00 - U+D7AF)
@param name: a string
@rtype: string
@return: group letter or None
"""
c = name[0]
if u'\uAC00' <= c <= u'\uD7AF': # Hangul Syllables
return unichr(0xac00 + (int(ord(c) - 0xac00) / 588) * 588)
else:
return c.upper() # we put lower and upper case words into the same index group
def isStrictWikiname(name, word_re=re.compile(ur"^(?:[%(u)s][%(l)s]+){2,}$" % {'u':chartypes.chars_upper, 'l':chartypes.chars_lower})):
"""
Check whether this is NOT an extended name.
@param name: the wikiname in question
@rtype: bool
@return: true if name matches the word_re
"""
return word_re.match(name)
def isPicture(url):
"""
Is this a picture's url?
@param url: the url in question
@rtype: bool
@return: true if url points to a picture
"""
extpos = url.rfind(".")
return extpos > 0 and url[extpos:].lower() in ['.gif', '.jpg', '.jpeg', '.png', '.bmp', '.ico', ]
def link_tag(params, text=None, formatter=None, on=None, **kw):
""" Create a link.
TODO: cleanup css_class
@param request: the request object
@param params: parameter string appended to the URL after the scriptname/
@param text: text / inner part of the <a>...</a> link - does NOT get
escaped, so you can give HTML here and it will be used verbatim
@param formatter: the formatter object to use
@param on: opening/closing tag only
@keyword attrs: additional attrs (HTMLified string) (removed in 1.5.3)
@rtype: string
@return: formatted link tag
"""
if kw.has_key('css_class'):
css_class = kw['css_class']
del kw['css_class'] # one time is enough
else:
css_class = None
id = kw.get('id', None)
name = kw.get('name', None)
if text is None:
text = params # default
if formatter:
url = "%s/%s" % (request.getScriptname(), params)
if on != None:
return formatter.url(on, url, css_class, **kw)
return (formatter.url(1, url, css_class, **kw) +
formatter.rawHTML(text) +
formatter.url(0))
if on != None and not on:
return '</a>'
attrs = ''
if css_class:
attrs += ' class="%s"' % css_class
if id:
attrs += ' id="%s"' % id
if name:
attrs += ' name="%s"' % name
result = '<a%s href="%s/%s">' % (attrs, request.getScriptname(), params)
if on:
return result
else:
return "%s%s</a>" % (result, text)
def inputFile2pageName(filename):
assert filename.endswith('.wiki')
return filename[:-5]
def pageName2inputFile(pagename):
assert not pagename.endswith('.wiki')
return pagename + '.wiki'
def pageName2outputFile(pagename):
assert not pagename.endswith('.wiki')
if pagename == config.general.indexpagename:
pagename = 'index'
return pagename + '.html'
def inputFile2outputFile(filename):
return pageName2outputFile(inputFile2pageName(filename))
def assertFileNameCase(filename):
if sys.platform.startswith('win'):
path, name = os.path.split(filename)
if path:
assert name in os.listdir(path), '%s: file does not exist in %s' % (name, path)
else:
assert name in os.listdir('.'), '%s: file does not exist' % name
def fixFileNameCase(filename):
if sys.platform.startswith('win'):
path, name = os.path.split(filename)
if path:
lst = os.listdir(path)
else:
lst = os.listdir('.')
lstlow = [x.lower() for x in lst]
try:
return os.path.join(path, lst[lstlow.index(name.lower())])
except ValueError:
pass
return filename
# vim:set sw=4 et:
| gpl-2.0 |
MrNuggles/HeyBoet-Telegram-Bot | temboo/Library/Xively/Devices/ReadDevice.py | 5 | 3434 | # -*- coding: utf-8 -*-
###############################################################################
#
# ReadDevice
# Returns a JSON representation of the device with the provided serial number.
#
# Python versions 2.6, 2.7, 3.x
#
# Copyright 2014, Temboo Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
# either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
#
#
###############################################################################
from temboo.core.choreography import Choreography
from temboo.core.choreography import InputSet
from temboo.core.choreography import ResultSet
from temboo.core.choreography import ChoreographyExecution
import json
class ReadDevice(Choreography):
def __init__(self, temboo_session):
"""
Create a new instance of the ReadDevice Choreo. A TembooSession object, containing a valid
set of Temboo credentials, must be supplied.
"""
super(ReadDevice, self).__init__(temboo_session, '/Library/Xively/Devices/ReadDevice')
def new_input_set(self):
return ReadDeviceInputSet()
def _make_result_set(self, result, path):
return ReadDeviceResultSet(result, path)
def _make_execution(self, session, exec_id, path):
return ReadDeviceChoreographyExecution(session, exec_id, path)
class ReadDeviceInputSet(InputSet):
"""
An InputSet with methods appropriate for specifying the inputs to the ReadDevice
Choreo. The InputSet object is used to specify input parameters when executing this Choreo.
"""
def set_APIKey(self, value):
"""
Set the value of the APIKey input for this Choreo. ((required, string) The API Key provided by Xively.)
"""
super(ReadDeviceInputSet, self)._set_input('APIKey', value)
def set_ProductID(self, value):
"""
Set the value of the ProductID input for this Choreo. ((required, string) The product ID of the device you would like to read.)
"""
super(ReadDeviceInputSet, self)._set_input('ProductID', value)
def set_SerialNumber(self, value):
"""
Set the value of the SerialNumber input for this Choreo. ((required, string) The serial number for the device you would like to read.)
"""
super(ReadDeviceInputSet, self)._set_input('SerialNumber', value)
class ReadDeviceResultSet(ResultSet):
"""
A ResultSet with methods tailored to the values returned by the ReadDevice Choreo.
The ResultSet object is used to retrieve the results of a Choreo execution.
"""
def getJSONFromString(self, str):
return json.loads(str)
def get_Response(self):
"""
Retrieve the value for the "Response" output from this Choreo execution. ((json) The response from Xively.)
"""
return self._output.get('Response', None)
class ReadDeviceChoreographyExecution(ChoreographyExecution):
def _make_result_set(self, response, path):
return ReadDeviceResultSet(response, path)
| gpl-3.0 |
vfaronov/mitmproxy-httpolice | mitmproxy_httpolice.py | 1 | 9159 | import collections
import email.utils
from http import HTTPStatus
import io
import typing
import httpolice
from mitmproxy import ctx
import mitmproxy.flow
import mitmproxy.http
import mitmproxy.net.http
import mitmproxy.types
__version__ = '0.10.0.dev1'
class MitmproxyHTTPolice:
def __init__(self):
self.last_report = None
def load(self, loader):
loader.add_option(
name='httpolice_silence',
# ``typing.Sequence[int]`` would be better, but is not supported.
typespec=typing.Sequence[str],
default=[],
help='Silence these HTTPolice notice IDs.',
)
loader.add_option(
name='httpolice_mark',
# Could make this a ``typing.Optional[str]``, but
# that doesn't work well with the interactive editors,
# so make "disable" an explicit choice.
typespec=str,
choices=[''] + [sev.name for sev in httpolice.Severity],
default='',
help=
'Mark flows where HTTPolice found notices of this severity '
'or higher (empty to disable).'
)
def request(self, flow):
if flow.request.path == '/+httpolice/':
flow.response = self.serve_report()
def response(self, flow):
exch = flow_to_exchange(flow)
attach_report(exch, flow)
mark_exchange(exch, flow)
log_exchange(exch, flow)
@mitmproxy.command.command('httpolice.report.html')
def html_report(self,
flows: typing.Sequence[mitmproxy.flow.Flow],
path: mitmproxy.types.Path) -> None:
"""Produce an HTTPolice report (HTML) on flows."""
self.report(flows, httpolice.html_report, path)
@mitmproxy.command.command('httpolice.report.text')
def text_report(self,
flows: typing.Sequence[mitmproxy.flow.Flow],
path: mitmproxy.types.Path) -> None:
"""Produce an HTTPolice report (text) on flows."""
self.report(flows, httpolice.text_report, path)
def report(self, flows, report_func, path):
exchanges = (flow_to_exchange(flow) for flow in flows)
if path == '-':
buf = io.BytesIO()
report_func(exchanges, buf)
self.last_report = buf.getvalue()
ctx.log.alert(
f'HTTPolice: saved report on {len(flows)} flows in memory')
else:
with open(path, 'wb') as f:
report_func(exchanges, f)
ctx.log.alert(
f'HTTPolice: wrote report on {len(flows)} flows to {path}')
def serve_report(self):
if self.last_report is None:
status_code = HTTPStatus.NOT_FOUND.value
content = (
f'<!DOCTYPE html><p>No report has been <a href="'
f'https://mitmproxy-httpolice.readthedocs.io/en/{__version__}/'
f'walkthrough.html#inmemory">produced</a> yet.</p>'
).encode('utf-8')
else:
status_code = HTTPStatus.OK.value
content = self.last_report
headers = {
'Date': email.utils.formatdate(usegmt=True),
'Content-Type': (
'text/html; charset=utf-8' if content.startswith(b'<!')
else 'text/plain; charset=utf-8'
),
'Cache-Control': 'no-store',
}
return mitmproxy.http.HTTPResponse.wrap(
mitmproxy.net.http.Response.make(status_code, content, headers),
)
def flow_to_exchange(flow):
req = construct_request(flow)
resp = construct_response(flow)
exch = httpolice.Exchange(req, [resp] if resp else [])
exch.silence([int(id_) for id_ in ctx.options.httpolice_silence])
httpolice.check_exchange(exch)
return exch
def construct_request(flow):
version, headers, body = extract_message_basics(flow.request)
scheme = decode(flow.request.scheme)
method = decode(flow.request.method)
# Authority-form and absolute-form requests in the tunnel
# are simply rejected as errors by mitmproxy, closing the connection.
target = decode(flow.request.path)
if version == 'HTTP/2':
pseudo_headers = httpolice.helpers.pop_pseudo_headers(headers)
authority = pseudo_headers.get(':authority')
has_host = any(k.lower() == 'host' for (k, v) in headers)
if authority and not has_host and target.startswith('/'):
# Reconstruct HTTP/2's equivalent of
# the "absolute form" of request target (RFC 7540 Section 8.1.2.3).
target = scheme + '://' + decode(authority) + target
return httpolice.Request(scheme, method, target, version, headers, body)
def construct_response(flow):
if flow.response is None:
return None
version, headers, body = extract_message_basics(flow.response)
status = flow.response.status_code
reason = decode(flow.response.reason)
if version == 'HTTP/2':
httpolice.helpers.pop_pseudo_headers(headers)
return httpolice.Response(version, status, reason, headers, body)
def extract_message_basics(msg):
version = decode(msg.http_version)
if version == 'HTTP/2.0':
version = 'HTTP/2'
headers = [(decode(k), v) for (k, v) in msg.headers.fields]
body = msg.raw_content
return version, headers, body
def attach_report(exch, flow):
buf = io.BytesIO()
httpolice.text_report([exch], buf)
report = buf.getvalue().decode('utf-8')
for_request, for_response = parse_report(report)
for title, lines in [('HTTPolice: request', for_request),
('HTTPolice: response', for_response)]:
if lines:
text = u'\n'.join(lines) + u'\n'
try:
# If this script is being run on a flow previously loaded
# from file, `flow.metadata` might already contain our keys
# in the wrong order. Reinsert them instead of just updating.
flow.metadata.pop(title, None)
flow.metadata[title] = ReprString(text)
except Exception:
# `flow.metadata` is not public API,
# so could theoretically fail.
pass
def parse_report(report):
# `report` is a text report as produced by HTTPolice. From it, we want to
# extract the notices (titles) for the request and for the response.
# This may sound stupid: why not just make HTTPolice return them
# in a structured form? But that would have to be a public API
# (mitmproxy-HTTPolice doesn't use any private APIs from HTTPolice), and I
# don't want to add public APIs to HTTPolice without a clear picture of
# how and by whom they will be used. I want some sort of "JSON report"
# in HTTPolice eventually, but I don't know the details yet. So for now,
# let's just parse semi-structured text -- a great Unix tradition.
for_request, for_response = [], []
target = for_request
for line in report.splitlines():
if line.startswith('------------ request:'):
target = for_request
elif line.startswith('------------ response:'):
target = for_response
else:
target.append(line)
return for_request, for_response
def mark_exchange(exch, flow):
if ctx.options.httpolice_mark:
mark_severity = httpolice.Severity[ctx.options.httpolice_mark]
if any(notice.severity >= mark_severity
for msg in [exch.request] + exch.responses
for notice in msg.notices):
flow.marked = True
def log_exchange(exch, flow):
# Produce lines like "1 errors, 2 comments" without hardcoding severities.
severities = collections.Counter(notice.severity
for msg in [exch.request] + exch.responses
for notice in msg.notices)
pieces = [f'{n} {severity.name}s'
for (severity, n) in sorted(severities.items(), reverse=True)
if severity > httpolice.Severity.debug]
if pieces:
log_func = (ctx.log.warn
if max(severities) >= httpolice.Severity.error
else ctx.log.info)
log_func('HTTPolice: {0} in: {1} {2} ← {3}'.format(
', '.join(pieces),
flow.request.method, ellipsize(flow.request.path),
flow.response.status_code,
))
def decode(s):
if isinstance(s, bytes):
return s.decode('iso-8859-1')
return s
def ellipsize(s, max_length=40):
if len(s) <= max_length:
return s
ellipsis = '...'
return s[:(max_length - len(ellipsis))] + ellipsis
class ReprString(str):
# Currently mitmproxy displays ``repr()`` in details view, not ``str()``.
# See also https://discourse.mitmproxy.org/t/extending-the-ui/359/5
__slots__ = []
def __repr__(self):
return str(self)
addons = [MitmproxyHTTPolice()]
if __name__ == '__main__':
# Print the path to this script,
# for substitution into the mitmproxy command.
print(__file__)
| mit |
pombredanne/metamorphosys-desktop | metamorphosys/META/externals/HCDDES/tests/Scheduler/xmlrunner.py | 3 | 14071 | # http://redsolo.blogspot.com/2007/11/hudson-embraces-python.html
# http://appfusedjango.googlecode.com/svn/trunk/tests/xmlrunner.py
"""
XML Test Runner for PyUnit
"""
# Written by Sebastian Rittau <srittau@jroger.in-berlin.de> and placed in
# the Public Domain. With contributions by Paolo Borelli.
__revision__ = "$Id: /private/python/stdlib/xmlrunner.py 16654 2007-11-12T12:46:35.368945Z srittau $"
import os.path
import re
import sys
import time
import traceback
import unittest
from StringIO import StringIO
from xml.sax.saxutils import escape
from StringIO import StringIO
def strip_ml_tags(in_text):
"""Description: Removes all HTML/XML-like tags from the input text.
Inputs: s --> string of text
Outputs: text string without the tags
# doctest unit testing framework
>>> test_text = "Keep this Text <remove><me /> KEEP </remove> 123"
>>> strip_ml_tags(test_text)
'Keep this Text KEEP 123'
"""
# convert in_text to a mutable object (e.g. list)
s_list = list(in_text)
i,j = 0,0
while i < len(s_list):
# iterate until a left-angle bracket is found
if s_list[i] == '<':
while s_list[i] != '>':
# pop everything from the the left-angle bracket until the right-angle bracket
s_list.pop(i)
# pops the right-angle bracket, too
s_list.pop(i)
else:
i=i+1
# convert the list back into text
join_char=''
return join_char.join(s_list)
class _TestInfo(object):
"""Information about a particular test.
Used by _XMLTestResult.
"""
def __init__(self, test, time):
(self._class, self._method) = test.id().rsplit(".", 1)
self._time = time
self._error = None
self._failure = None
@staticmethod
def create_success(test, time):
"""Create a _TestInfo instance for a successful test."""
return _TestInfo(test, time)
@staticmethod
def create_failure(test, time, failure):
"""Create a _TestInfo instance for a failed test."""
info = _TestInfo(test, time)
info._failure = failure
return info
@staticmethod
def create_error(test, time, error):
"""Create a _TestInfo instance for an erroneous test."""
info = _TestInfo(test, time)
info._error = error
return info
def print_report(self, stream):
"""Print information about this test case in XML format to the
supplied stream.
"""
stream.write(' <testcase classname="%(class)s" name="%(method)s" time="%(time).4f">' % \
{
"class": self._class,
"method": self._method,
"time": self._time,
})
if self._failure != None:
self._print_error(stream, 'failure', self._failure)
if self._error != None:
self._print_error(stream, 'error', self._error)
stream.write('</testcase>\n')
def _print_error(self, stream, tagname, error):
"""Print information from a failure or error to the supplied stream."""
text = escape(str(error[1]))
stream.write('\n')
stream.write(' <%s type="%s">%s\n' \
% (tagname, str(error[0]).strip("<>"), strip_ml_tags(text)))
tb_stream = StringIO()
traceback.print_tb(error[2], None, tb_stream)
stream.write(escape(tb_stream.getvalue()))
stream.write(' </%s>\n' % tagname)
stream.write(' ')
class _XMLTestResult(unittest.TestResult):
"""A test result class that stores result as XML.
Used by XMLTestRunner.
"""
def __init__(self, classname):
unittest.TestResult.__init__(self)
self._test_name = classname
self._start_time = None
self._tests = []
self._error = None
self._failure = None
def startTest(self, test):
unittest.TestResult.startTest(self, test)
self._error = None
self._failure = None
self._start_time = time.time()
def stopTest(self, test):
time_taken = time.time() - self._start_time
unittest.TestResult.stopTest(self, test)
if self._error:
info = _TestInfo.create_error(test, time_taken, self._error)
elif self._failure:
info = _TestInfo.create_failure(test, time_taken, self._failure)
else:
info = _TestInfo.create_success(test, time_taken)
self._tests.append(info)
def addError(self, test, err):
unittest.TestResult.addError(self, test, err)
self._error = err
def addFailure(self, test, err):
unittest.TestResult.addFailure(self, test, err)
self._failure = err
def print_report(self, stream, time_taken, out, err):
"""Prints the XML report to the supplied stream.
The time the tests took to perform as well as the captured standard
output and standard error streams must be passed in.a
"""
stream.write('<testsuite errors="%(e)d" failures="%(f)d" ' % \
{ "e": len(self.errors), "f": len(self.failures) })
stream.write('name="%(n)s" tests="%(t)d" time="%(time).3f">\n' % \
{
"n": self._test_name,
"t": self.testsRun,
"time": time_taken,
})
for info in self._tests:
info.print_report(stream)
stream.write(' <system-out><![CDATA[%s]]></system-out>\n' % out)
stream.write(' <system-err><![CDATA[%s]]></system-err>\n' % err)
stream.write('</testsuite>\n')
class XMLTestRunner(object):
"""A test runner that stores results in XML format compatible with JUnit.
XMLTestRunner(stream=None) -> XML test runner
The XML file is written to the supplied stream. If stream is None, the
results are stored in a file called TEST-<module>.<class>.xml in the
current working directory (if not overridden with the path property),
where <module> and <class> are the module and class name of the test class.
"""
def __init__(self, stream=None):
self._stream = stream
self._path = "."
def run(self, test):
"""Run the given test case or test suite."""
class_ = test.__class__
classname = class_.__module__ + "." + class_.__name__
if self._stream == None:
filename = "TEST-%s.xml" % classname
stream = file(os.path.join(self._path, filename), "w")
stream.write('<?xml version="1.0" encoding="utf-8"?>\n')
else:
stream = self._stream
result = _XMLTestResult(classname)
start_time = time.time()
# TODO: Python 2.5: Use the with statement
old_stdout = sys.stdout
old_stderr = sys.stderr
sys.stdout = StringIO()
sys.stderr = StringIO()
try:
test(result)
try:
out_s = sys.stdout.getvalue()
except AttributeError:
out_s = ""
try:
err_s = sys.stderr.getvalue()
except AttributeError:
err_s = ""
finally:
sys.stdout = old_stdout
sys.stderr = old_stderr
time_taken = time.time() - start_time
result.print_report(stream, time_taken, out_s, err_s)
if self._stream == None:
stream.close()
return result
def _set_path(self, path):
self._path = path
path = property(lambda self: self._path, _set_path, None,
"""The path where the XML files are stored.
This property is ignored when the XML file is written to a file
stream.""")
class XMLTestRunnerTest(unittest.TestCase):
def setUp(self):
self._stream = StringIO()
def _try_test_run(self, test_class, expected):
"""Run the test suite against the supplied test class and compare the
XML result against the expected XML string. Fail if the expected
string doesn't match the actual string. All time attribute in the
expected string should have the value "0.000". All error and failure
messages are reduced to "Foobar".
"""
runner = XMLTestRunner(self._stream)
runner.run(unittest.makeSuite(test_class))
got = self._stream.getvalue()
# Replace all time="X.YYY" attributes by time="0.000" to enable a
# simple string comparison.
got = re.sub(r'time="\d+\.\d+"', 'time="0.000"', got)
# Likewise, replace all failure and error messages by a simple "Foobar"
# string.
got = re.sub(r'(?s)<failure (.*?)>.*?</failure>', r'<failure \1>Foobar</failure>', got)
got = re.sub(r'(?s)<error (.*?)>.*?</error>', r'<error \1>Foobar</error>', got)
self.assertEqual(expected, got)
def test_no_tests(self):
"""Regression test: Check whether a test run without any tests
matches a previous run.
"""
class TestTest(unittest.TestCase):
pass
self._try_test_run(TestTest, """<testsuite errors="0" failures="0" name="unittest.TestSuite" tests="0" time="0.000">
<system-out><![CDATA[]]></system-out>
<system-err><![CDATA[]]></system-err>
</testsuite>
""")
def test_success(self):
"""Regression test: Check whether a test run with a successful test
matches a previous run.
"""
class TestTest(unittest.TestCase):
def test_foo(self):
pass
self._try_test_run(TestTest, """<testsuite errors="0" failures="0" name="unittest.TestSuite" tests="1" time="0.000">
<testcase classname="__main__.TestTest" name="test_foo" time="0.000"></testcase>
<system-out><![CDATA[]]></system-out>
<system-err><![CDATA[]]></system-err>
</testsuite>
""")
def test_failure(self):
"""Regression test: Check whether a test run with a failing test
matches a previous run.
"""
class TestTest(unittest.TestCase):
def test_foo(self):
self.assert_(False)
self._try_test_run(TestTest, """<testsuite errors="0" failures="1" name="unittest.TestSuite" tests="1" time="0.000">
<testcase classname="__main__.TestTest" name="test_foo" time="0.000">
<failure type="exceptions.AssertionError">Foobar</failure>
</testcase>
<system-out><![CDATA[]]></system-out>
<system-err><![CDATA[]]></system-err>
</testsuite>
""")
def test_error(self):
"""Regression test: Check whether a test run with a erroneous test
matches a previous run.
"""
class TestTest(unittest.TestCase):
def test_foo(self):
raise IndexError()
self._try_test_run(TestTest, """<testsuite errors="1" failures="0" name="unittest.TestSuite" tests="1" time="0.000">
<testcase classname="__main__.TestTest" name="test_foo" time="0.000">
<error type="exceptions.IndexError">Foobar</error>
</testcase>
<system-out><![CDATA[]]></system-out>
<system-err><![CDATA[]]></system-err>
</testsuite>
""")
def test_stdout_capture(self):
"""Regression test: Check whether a test run with output to stdout
matches a previous run.
"""
class TestTest(unittest.TestCase):
def test_foo(self):
print "Test"
self._try_test_run(TestTest, """<testsuite errors="0" failures="0" name="unittest.TestSuite" tests="1" time="0.000">
<testcase classname="__main__.TestTest" name="test_foo" time="0.000"></testcase>
<system-out><![CDATA[Test
]]></system-out>
<system-err><![CDATA[]]></system-err>
</testsuite>
""")
def test_stderr_capture(self):
"""Regression test: Check whether a test run with output to stderr
matches a previous run.
"""
class TestTest(unittest.TestCase):
def test_foo(self):
print >>sys.stderr, "Test"
self._try_test_run(TestTest, """<testsuite errors="0" failures="0" name="unittest.TestSuite" tests="1" time="0.000">
<testcase classname="__main__.TestTest" name="test_foo" time="0.000"></testcase>
<system-out><![CDATA[]]></system-out>
<system-err><![CDATA[Test
]]></system-err>
</testsuite>
""")
class NullStream(object):
"""A file-like object that discards everything written to it."""
def write(self, buffer):
pass
def test_unittests_changing_stdout(self):
"""Check whether the XMLTestRunner recovers gracefully from unit tests
that change stdout, but don't change it back properly.
"""
class TestTest(unittest.TestCase):
def test_foo(self):
sys.stdout = XMLTestRunnerTest.NullStream()
runner = XMLTestRunner(self._stream)
runner.run(unittest.makeSuite(TestTest))
def test_unittests_changing_stderr(self):
"""Check whether the XMLTestRunner recovers gracefully from unit tests
that change stderr, but don't change it back properly.
"""
class TestTest(unittest.TestCase):
def test_foo(self):
sys.stderr = XMLTestRunnerTest.NullStream()
runner = XMLTestRunner(self._stream)
runner.run(unittest.makeSuite(TestTest))
class XMLTestProgram(unittest.TestProgram):
def runTests(self):
if self.testRunner is None:
self.testRunner = XMLTestRunner()
unittest.TestProgram.runTests(self)
main = XMLTestProgram
if __name__ == "__main__":
main(module=None)
| mit |
CarlSorensen/lilypond-standards | scripts/etf2ly.py | 5 | 33744 | #!@TARGET_PYTHON@
# This file is part of LilyPond, the GNU music typesetter.
#
# LilyPond is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# LilyPond is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with LilyPond. If not, see <http://www.gnu.org/licenses/>.
# info mostly taken from looking at files. See also
# http://lilypond.org/wiki/?EnigmaTransportFormat
# This supports
#
# * notes
# * rests
# * ties
# * slurs
# * lyrics
# * articulation
# * grace notes
# * tuplets
#
# todo:
# * slur/stem directions
# * voices (2nd half of frame?)
# * more intelligent lyrics
# * beams (better use autobeam?)
# * more robust: try entertainer.etf (freenote)
# * dynamics
# * empty measures (eg. twopt03.etf from freenote)
#
import __main__
import getopt
import sys
import re
import os
program_name = sys.argv[0]
authors = ('Jan Nieuwenhuizen <janneke@gnu.org>',
'Han-Wen Nienhuys <hanwen@xs4all.nl>')
version = '@TOPLEVEL_VERSION@'
if version == '@' + 'TOPLEVEL_VERSION' + '@':
version = '(unknown version)' # uGUHGUHGHGUGH
"""
@relocate-preamble@
"""
################################################################
import lilylib as ly
_ = ly._
finale_clefs= ['treble', 'alto', 'tenor', 'bass', 'percussion', 'treble_8', 'bass_8', 'baritone']
def lily_clef (fin):
try:
return finale_clefs[fin]
except IndexError:
sys.stderr.write ( '\nHuh? Found clef number %d\n' % fin)
return 'treble'
def gulp_file(f):
return open (f).read ()
# notename 0 == central C
distances = [0, 2, 4, 5, 7, 9, 11, 12]
def semitones (name, acc):
return (name / 7 ) * 12 + distances[name % 7] + acc
# represent pitches as (notename, alteration), relative to C-major scale
def transpose(orig, delta):
(oname, oacc) = orig
(dname, dacc) = delta
old_pitch =semitones (oname, oacc)
delta_pitch = semitones (dname, dacc)
nname = (oname + dname)
nacc = oacc
new_pitch = semitones (nname, nacc)
nacc = nacc - (new_pitch - old_pitch - delta_pitch)
return (nname, nacc)
def interpret_finale_key_sig (finale_id):
"""
find the transposition of C-major scale that belongs here.
we are not going to insert the correct major/minor, we only want to
have the correct number of accidentals
"""
p = (0,0)
bank_number = finale_id >> 8
accidental_bits = finale_id & 0xff
if 0 <= accidental_bits < 7:
while accidental_bits > 0:
p = transpose (p, (4,0)) # a fifth up
accidental_bits = accidental_bits - 1
elif 248 < accidental_bits <= 255:
while accidental_bits < 256:
p = transpose (p, (3,0))
accidental_bits = accidental_bits + 1
if bank_number == 1:
# minor scale
p = transpose (p, (5, 0))
p = (p[0] % 7, p[1])
return KeySignature (p, bank_number)
# should cache this.
def find_scale (keysig):
cscale = map (lambda x: (x,0), range (0,7))
# print "cscale: ", cscale
ascale = map (lambda x: (x,0), range (-2,5))
# print "ascale: ", ascale
transposition = keysig.pitch
if keysig.sig_type == 1:
transposition = transpose(transposition, (2, -1))
transposition = (transposition[0] % 7, transposition[1])
trscale = map(lambda x, k=transposition: transpose(x, k), ascale)
else:
trscale = map(lambda x, k=transposition: transpose(x, k), cscale)
# print "trscale: ", trscale
return trscale
def EDU_to_duration (edu):
log = 1
d = 4096
while d > edu:
d = d >> 1
log = log << 1
edu = edu - d
dots = 0
if edu == d /2:
dots = 1
elif edu == d*3/4:
dots = 2
return (log, dots)
def rational_to_lily_skip (rat):
(n,d) = rat
basedur = 1
while d and d % 2 == 0:
basedur = basedur << 1
d = d >> 1
str = 's%d' % basedur
if n <> 1:
str = str + '*%d' % n
if d <> 1:
str = str + '/%d' % d
return str
def gcd (a,b):
if b == 0:
return a
c = a
while c:
c = a % b
a = b
b = c
return a
def rat_simplify (r):
(n,d) = r
if d < 0:
d = -d
n = -n
if n == 0:
return (0,1)
else:
g = gcd (n, d)
return (n/g, d/g)
def rat_multiply (a,b):
(x,y) = a
(p,q) = b
return rat_simplify ((x*p, y*q))
def rat_add (a,b):
(x,y) = a
(p,q) = b
return rat_simplify ((x*q + p*y, y*q))
def rat_neg (a):
(p,q) = a
return (-p,q)
def rat_subtract (a,b ):
return rat_add (a, rat_neg (b))
def lily_notename (tuple2):
(n, a) = tuple2
nn = chr ((n+ 2)%7 + ord ('a'))
return nn + {-2:'eses', -1:'es', 0:'', 1:'is', 2:'isis'}[a]
class Tuplet:
def __init__ (self, number):
self.start_note = number
self.finale = []
def append_finale (self, fin):
self.finale.append (fin)
def factor (self):
n = self.finale[0][2]*self.finale[0][3]
d = self.finale[0][0]*self.finale[0][1]
return rat_simplify( (n, d))
def dump_start (self):
return '\\times %d/%d { ' % self.factor ()
def dump_end (self):
return ' }'
def calculate (self, chords):
edu_left = self.finale[0][0] * self.finale[0][1]
startch = chords[self.start_note]
c = startch
while c and edu_left:
c.tuplet = self
if c == startch:
c.chord_prefix = self.dump_start () + c.chord_prefix
if not c.grace:
edu_left = edu_left - c.EDU_duration ()
if edu_left == 0:
c.chord_suffix = c.chord_suffix+ self.dump_end ()
c = c.next
if edu_left:
sys.stderr.write ("\nHuh? Tuplet starting at entry %d was too short." % self.start_note)
class Slur:
def __init__ (self, number, params):
self.number = number
self.finale = params
def append_entry (self, finale_e):
self.finale.append (finale_e)
def calculate (self, chords):
startnote = self.finale[5]
endnote = self.finale[3*6 + 2]
try:
cs = chords[startnote]
ce = chords[endnote]
if not cs or not ce:
raise IndexError
cs.note_suffix = '-(' + cs.note_suffix
ce.note_suffix = ce.note_suffix + '-)'
except IndexError:
sys.stderr.write ("""\nHuh? Slur no %d between (%d,%d), with %d notes""" % (self.number, startnote, endnote, len (chords)))
class Global_measure:
def __init__ (self, number):
self.timesig = ''
self.number = number
self.key_signature = None
self.scale = None
self.force_break = 0
self.repeats = []
self.finale = []
def __str__ (self):
return `self.finale `
def set_timesig (self, finale):
(beats, fdur) = finale
(log, dots) = EDU_to_duration (fdur)
if dots == 1:
beats = beats * 3
log = log * 2
dots = 0
if dots <> 0:
sys.stderr.write ("\nHuh? Beat duration has dots? (EDU Duration = %d)" % fdur)
self.timesig = (beats, log)
def length (self):
return self.timesig
def set_key_sig (self, finale):
k = interpret_finale_key_sig (finale)
self.key_signature = k
self.scale = find_scale (k)
def set_flags (self,flag1, flag2):
# flag1 isn't all that interesting.
if flag2 & 0x8000:
self.force_break = 1
if flag2 & 0x0008:
self.repeats.append ('start')
if flag2 & 0x0004:
self.repeats.append ('stop')
if flag2 & 0x0002:
if flag2 & 0x0004:
self.repeats.append ('bracket')
articulation_dict ={
94: '^',
109: '\\prall',
84: '\\turn',
62: '\\mordent',
85: '\\fermata',
46: '.',
# 3: '>',
# 18: '\arpeggio' ,
}
class Articulation_def:
def __init__ (self, n, a, b):
self.finale_glyph = a & 0xff
self.number = n
def dump (self):
try:
return articulation_dict[self.finale_glyph]
except KeyError:
sys.stderr.write ("\nUnknown articulation no. %d" % self.finale_glyph)
sys.stderr.write ("\nPlease add an entry to articulation_dict in the Python source")
return None
class Articulation:
def __init__ (self, a,b, finale):
self.definition = finale[0]
self.notenumber = b
def calculate (self, chords, defs):
c = chords[self.notenumber]
adef = defs[self.definition]
lystr =adef.dump()
if lystr == None:
lystr = '"art"'
sys.stderr.write ("\nThis happened on note %d" % self.notenumber)
c.note_suffix = '-' + lystr
class Syllable:
def __init__ (self, a,b , finale):
self.chordnum = b
self.syllable = finale[1]
self.verse = finale[0]
def calculate (self, chords, lyrics):
self.chord = chords[self.chordnum]
class Verse:
def __init__ (self, number, body):
self.body = body
self.number = number
self.split_syllables ()
def split_syllables (self):
ss = re.split ('(-| +)', self.body)
sep = 0
syls = [None]
for s in ss:
if sep:
septor = re.sub (" +", "", s)
septor = re.sub ("-", " -- ", septor)
syls[-1] = syls[-1] + septor
else:
syls.append (s)
sep = not sep
self.syllables = syls
def dump (self):
str = ''
line = ''
for s in self.syllables[1:]:
line = line + ' ' + s
if len (line) > 72:
str = str + ' ' * 4 + line + '\n'
line = ''
str = """\nverse%s = \\lyricmode {\n %s }\n""" % (encodeint (self.number - 1) ,str)
return str
class KeySignature:
def __init__(self, pitch, sig_type = 0):
self.pitch = pitch
self.sig_type = sig_type
def signature_type (self):
if self.sig_type == 1:
return "\\minor"
else:
# really only for 0, but we only know about 0 and 1
return "\\major"
def equal (self, other):
if other and other.pitch == self.pitch and other.sig_type == self.sig_type:
return 1
else:
return 0
class Measure:
def __init__(self, no):
self.number = no
self.frames = [0] * 4
self.flags = 0
self.clef = 0
self.finale = []
self.global_measure = None
self.staff = None
self.valid = 1
def valid (self):
return self.valid
def calculate (self):
fs = []
if len (self.finale) < 2:
fs = self.finale[0]
self.clef = fs[1]
self.frames = [fs[0]]
else:
fs = self.finale
self.clef = fs[0]
self.flags = fs[1]
self.frames = fs[2:]
class Frame:
def __init__ (self, finale):
self.measure = None
self.finale = finale
(number, start, end ) = finale
self.number = number
self.start = start
self.end = end
self.chords = []
def set_measure (self, m):
self.measure = m
def calculate (self):
# do grace notes.
lastch = None
in_grace = 0
for c in self.chords:
if c.grace and (lastch == None or (not lastch.grace)):
c.chord_prefix = r'\grace {' + c.chord_prefix
in_grace = 1
elif not c.grace and lastch and lastch.grace:
lastch.chord_suffix = lastch.chord_suffix + ' } '
in_grace = 0
lastch = c
if lastch and in_grace:
lastch.chord_suffix += '}'
def dump (self):
str = '%% FR(%d)\n' % self.number
left = self.measure.global_measure.length ()
ln = ''
for c in self.chords:
add = c.ly_string () + ' '
if len (ln) + len(add) > 72:
str = str + ln + '\n'
ln = ''
ln = ln + add
left = rat_subtract (left, c.length ())
str = str + ln
if left[0] < 0:
sys.stderr.write ("""\nHuh? Going backwards in frame no %d, start/end (%d,%d)""" % (self.number, self.start, self.end))
left = (0,1)
if left[0]:
str = str + rational_to_lily_skip (left)
str = str + ' |\n'
return str
def encodeint (i):
return chr ( i + ord ('A'))
class Staff:
def __init__ (self, number):
self.number = number
self.measures = []
def get_measure (self, no):
fill_list_to (self.measures, no)
if self.measures[no] == None:
m = Measure (no)
self.measures [no] =m
m.staff = self
return self.measures[no]
def staffid (self):
return 'staff' + encodeint (self.number - 1)
def layerid (self, l):
return self.staffid() + 'layer%s' % chr (l -1 + ord ('A'))
def dump_time_key_sigs (self):
k = ''
last_key = None
last_time = None
last_clef = None
gap = (0,1)
for m in self.measures[1:]:
if not m or not m.valid:
continue # ugh.
g = m.global_measure
e = ''
if g:
if g.key_signature and not g.key_signature.equal(last_key):
pitch= g.key_signature.pitch
e = e + "\\key %s %s " % (lily_notename (pitch),
g.key_signature.signature_type())
last_key = g.key_signature
if last_time <> g.timesig :
e = e + "\\time %d/%d " % g.timesig
last_time = g.timesig
if 'start' in g.repeats:
e = e + ' \\bar ".|:" '
# we don't attempt voltas since they fail easily.
if 0 : # and g.repeat_bar == '|:' or g.repeat_bar == ':|:' or g.bracket:
strs = []
if g.repeat_bar == '|:' or g.repeat_bar == ':|:' or g.bracket == 'end':
strs.append ('#f')
if g.bracket == 'start':
strs.append ('"0."')
str = ' '.join (['(volta %s)' % x for x in strs])
e = e + ' \\set Score.repeatCommands = #\'(%s) ' % str
if g.force_break:
e = e + ' \\break '
if last_clef <> m.clef :
e = e + '\\clef "%s"' % lily_clef (m.clef)
last_clef = m.clef
if e:
if gap <> (0,1):
k = k +' ' + rational_to_lily_skip (gap) + '\n'
gap = (0,1)
k = k + e
if g:
gap = rat_add (gap, g.length ())
if 'stop' in g.repeats:
k = k + ' \\bar ":|." '
k = '%sglobal = { %s }\n\n ' % (self.staffid (), k)
return k
def dump (self):
str = ''
layerids = []
for x in range (1,5): # 4 layers.
laystr = ''
last_frame = None
first_frame = None
gap = (0,1)
for m in self.measures[1:]:
if not m or not m.valid:
sys.stderr.write ("Skipping non-existant or invalid measure\n")
continue
fr = None
try:
fr = m.frames[x]
except IndexError:
sys.stderr.write ("Skipping nonexistent frame %d\n" % x)
laystr = laystr + "%% non existent frame %d (skipped)\n" % x
if fr:
first_frame = fr
if gap <> (0,1):
laystr = laystr +'} %s {\n ' % rational_to_lily_skip (gap)
gap = (0,1)
laystr = laystr + fr.dump ()
else:
if m.global_measure :
gap = rat_add (gap, m.global_measure.length ())
else:
sys.stderr.write ( \
"No global measure for staff %d measure %d\n"
% (self.number, m.number))
if first_frame:
l = self.layerid (x)
laystr = '%s = { { %s } }\n\n' % (l, laystr)
str = str + laystr
layerids.append (l)
str = str + self.dump_time_key_sigs ()
stafdef = '\\%sglobal' % self.staffid ()
for i in layerids:
stafdef = stafdef + ' \\' + i
str = str + '%s = \\context Staff = %s <<\n %s\n >>\n' % \
(self.staffid (), self.staffid (), stafdef)
return str
def ziplist (l):
if len (l) < 2:
return []
else:
return [(l[0], l[1])] + ziplist (l[2:])
class Chord:
def __init__ (self, number, contents):
self.pitches = []
self.frame = None
self.finale = contents[:7]
self.notelist = ziplist (contents[7:])
self.duration = None
self.next = None
self.prev = None
self.number = number
self.note_prefix= ''
self.note_suffix = ''
self.chord_suffix = ''
self.chord_prefix = ''
self.tuplet = None
self.grace = 0
def measure (self):
if not self.frame:
return None
return self.frame.measure
def length (self):
if self.grace:
return (0,1)
l = (1, self.duration[0])
d = 1 << self.duration[1]
dotfact = rat_subtract ((2,1), (1,d))
mylen = rat_multiply (dotfact, l)
if self.tuplet:
mylen = rat_multiply (mylen, self.tuplet.factor())
return mylen
def EDU_duration (self):
return self.finale[2]
def set_duration (self):
self.duration = EDU_to_duration(self.EDU_duration ())
def calculate (self):
self.find_realpitch ()
self.set_duration ()
flag = self.finale[4]
if Chord.GRACE_MASK & flag:
self.grace = 1
def find_realpitch (self):
meas = self.measure ()
tiestart = 0
if not meas or not meas.global_measure :
sys.stderr.write ('note %d not in measure\n' % self.number)
elif not meas.global_measure.scale:
sys.stderr.write ('note %d: no scale in this measure.' % self.number)
else:
for p in self.notelist:
(pitch, flag) = p
nib1 = pitch & 0x0f
if nib1 > 8:
nib1 = -(nib1 - 8)
rest = pitch / 16
scale = meas.global_measure.scale
(sn, sa) =scale[rest % 7]
sn = sn + (rest - (rest%7)) + 7
acc = sa + nib1
self.pitches.append ((sn, acc))
tiestart = tiestart or (flag & Chord.TIE_START_MASK)
if tiestart :
self.chord_suffix = self.chord_suffix + ' ~ '
REST_MASK = 0x40000000L
TIE_START_MASK = 0x40000000L
GRACE_MASK = 0x00800000L
def ly_string (self):
s = ''
rest = ''
if not (self.finale[4] & Chord.REST_MASK):
rest = 'r'
for p in self.pitches:
(n,a) = p
o = n/ 7
n = n % 7
nn = lily_notename ((n,a))
if o < 0:
nn = nn + (',' * -o)
elif o > 0:
nn = nn + ('\'' * o)
if s:
s = s + ' '
if rest:
nn = rest
s = s + nn
if not self.pitches:
s = 'r'
if len (self.pitches) > 1:
s = '<%s>' % s
s = s + '%d%s' % (self.duration[0], '.'* self.duration[1])
s = self.note_prefix + s + self.note_suffix
s = self.chord_prefix + s + self.chord_suffix
return s
def fill_list_to (list, no):
"""
Add None to LIST until it contains entry number NO.
"""
while len (list) <= no:
list.extend ([None] * (no - len(list) + 1))
return list
def read_finale_value (str):
"""
Pry off one value from STR. The value may be $hex, decimal, or "string".
Return: (value, rest-of-STR)
"""
while str and str[0] in ' \t\n':
str = str[1:]
if not str:
return (None,str)
if str[0] == '$':
str = str [1:]
hex = ''
while str and str[0] in '0123456789ABCDEF':
hex = hex + str[0]
str = str[1:]
return (long (hex, 16), str)
elif str[0] == '"':
str = str[1:]
s = ''
while str and str[0] <> '"':
s = s + str[0]
str = str[1:]
return (s,str)
elif str[0] in '-0123456789':
dec = ''
while str and str[0] in '-0123456789':
dec = dec + str[0]
str = str[1:]
return (int (dec), str)
else:
sys.stderr.write ("cannot convert `%s'\n" % str)
return (None, str)
def parse_etf_file (fn, tag_dict):
""" Read FN, putting ETF info into
a giant dictionary. The keys of TAG_DICT indicate which tags
to put into the dict.
"""
sys.stderr.write ('parsing ... ' )
f = open (fn)
gulp = re.sub ('[\n\r]+', '\n', f.read ())
ls = gulp.split ('\n^')
etf_file_dict = {}
for k in tag_dict:
etf_file_dict[k] = {}
last_tag = None
last_numbers = None
for l in ls:
m = re.match ('^([a-zA-Z0-9&]+)\(([^)]+)\)', l)
if m and tag_dict.has_key (m.group (1)):
tag = m.group (1)
indices = tuple ([int (s) for s in m.group (2).split (',')])
content = l[m.end (2)+1:]
tdict = etf_file_dict[tag]
if not tdict.has_key (indices):
tdict[indices] = []
parsed = []
if tag == 'verse' or tag == 'block':
m2 = re.match ('(.*)\^end', content)
if m2:
parsed = [m2.group (1)]
else:
while content:
(v, content) = read_finale_value (content)
if v <> None:
parsed.append (v)
tdict [indices].extend (parsed)
last_indices = indices
last_tag = tag
continue
# let's not do this: this really confuses when eE happens to be before a ^text.
# if last_tag and last_indices:
# etf_file_dict[last_tag][last_indices].append (l)
sys.stderr.write ('\n')
return etf_file_dict
class Etf_file:
def __init__ (self, name):
self.measures = [None]
self.chords = [None]
self.frames = [None]
self.tuplets = [None]
self.staffs = [None]
self.slurs = [None]
self.articulations = [None]
self.syllables = [None]
self.verses = [None]
self.articulation_defs = [None]
## do it
self.parse (name)
def get_global_measure (self, no):
fill_list_to (self.measures, no)
if self.measures[no] == None:
self.measures [no] = Global_measure (no)
return self.measures[no]
def get_staff(self,staffno):
fill_list_to (self.staffs, staffno)
if self.staffs[staffno] == None:
self.staffs[staffno] = Staff (staffno)
return self.staffs[staffno]
# staff-spec
def try_IS (self, indices, contents):
pass
def try_BC (self, indices, contents):
bn = indices[0]
where = contents[0] / 1024.0
def try_TP(self, indices, contents):
(nil, num) = indices
if self.tuplets[-1] == None or num <> self.tuplets[-1].start_note:
self.tuplets.append (Tuplet (num))
self.tuplets[-1].append_finale (contents)
def try_IM (self, indices, contents):
(a,b) = indices
fin = contents
self.articulations.append (Articulation (a,b,fin))
def try_verse (self, indices, contents):
a = indices[0]
body = contents[0]
body = re.sub (r"""\^[a-z]+\([^)]+\)""", "", body)
body = re.sub ("\^[a-z]+", "", body)
self.verses.append (Verse (a, body))
def try_ve (self,indices, contents):
(a,b) = indices
self.syllables.append (Syllable (a,b,contents))
def try_eE (self,indices, contents):
no = indices[0]
(prev, next, dur, pos, entryflag, extended, follow) = contents[:7]
fill_list_to (self.chords, no)
self.chords[no] =Chord (no, contents)
def try_Sx(self,indices, contents):
slurno = indices[0]
fill_list_to (self.slurs, slurno)
self.slurs[slurno] = Slur(slurno, contents)
def try_IX (self, indices, contents):
n = indices[0]
a = contents[0]
b = contents[1]
ix= None
try:
ix = self.articulation_defs[n]
except IndexError:
ix = Articulation_def (n,a,b)
self.articulation_defs.append (Articulation_def (n, a, b))
def try_GF(self, indices, contents):
(staffno,measno) = indices
st = self.get_staff (staffno)
meas = st.get_measure (measno)
meas.finale = contents
def try_FR(self, indices, contents):
frameno = indices [0]
startnote = contents[0]
endnote = contents[1]
fill_list_to (self.frames, frameno)
self.frames[frameno] = Frame ((frameno, startnote, endnote))
def try_MS (self, indices, contents):
measno = indices[0]
keynum = contents[1]
meas =self. get_global_measure (measno)
meas.set_key_sig (keynum)
beats = contents[2]
beatlen = contents[3]
meas.set_timesig ((beats, beatlen))
meas_flag1 = contents[4]
meas_flag2 = contents[5]
meas.set_flags (meas_flag1, meas_flag2);
routine_dict = {
'MS': try_MS,
'FR': try_FR,
'GF': try_GF,
'IX': try_IX,
'Sx' : try_Sx,
'eE' : try_eE,
'verse' : try_verse,
've' : try_ve,
'IM' : try_IM,
'TP' : try_TP,
'BC' : try_BC,
'IS' : try_IS,
}
def parse (self, etf_dict):
sys.stderr.write ('reconstructing ...')
sys.stderr.flush ()
for (tag,routine) in Etf_file.routine_dict.items ():
ks = etf_dict[tag].keys ()
ks.sort ()
for k in ks:
routine (self, k, etf_dict[tag][k])
sys.stderr.write ('processing ...')
sys.stderr.flush ()
self.unthread_entries ()
for st in self.staffs[1:]:
if not st:
continue
mno = 1
for m in st.measures[1:]:
if not m:
continue
m.calculate()
try:
m.global_measure = self.measures[mno]
except IndexError:
sys.stderr.write ("Non-existent global measure %d" % mno)
continue
frame_obj_list = [None]
for frno in m.frames:
try:
fr = self.frames[frno]
frame_obj_list.append (fr)
except IndexError:
sys.stderr.write ("\nNon-existent frame %d" % frno)
m.frames = frame_obj_list
for fr in frame_obj_list[1:]:
if not fr:
continue
fr.set_measure (m)
fr.chords = self.get_thread (fr.start, fr.end)
for c in fr.chords:
c.frame = fr
mno = mno + 1
for c in self.chords[1:]:
if c:
c.calculate()
for f in self.frames[1:]:
if f:
f.calculate ()
for t in self.tuplets[1:]:
t.calculate (self.chords)
for s in self.slurs[1:]:
if s:
s.calculate (self.chords)
for s in self.articulations[1:]:
s.calculate (self.chords, self.articulation_defs)
def get_thread (self, startno, endno):
thread = []
c = None
try:
c = self.chords[startno]
except IndexError:
sys.stderr.write ("Huh? Frame has invalid bounds (%d,%d)\n" % (startno, endno))
return []
while c and c.number <> endno:
d = c # hack to avoid problem with scripts/build/grand-replace.py
thread.append (d)
c = c.next
if c:
d = c # hack to avoid problem with scripts/build/grand-replace.py
thread.append (d)
return thread
def dump (self):
str = ''
staffs = []
for s in self.staffs[1:]:
if s:
str = str + '\n\n' + s.dump ()
staffs.append ('\\' + s.staffid ())
# should use \addlyrics ?
for v in self.verses[1:]:
str = str + v.dump()
if len (self.verses) > 1:
sys.stderr.write ("\nLyrics found; edit to use \\addlyrics to couple to a staff\n")
if staffs:
str += '\\version "2.3.25"\n'
str = str + '<<\n %s\n>> } ' % ' '.join (staffs)
return str
def __str__ (self):
return 'ETF FILE %s %s' % (self.measures, self.entries)
def unthread_entries (self):
for e in self.chords[1:]:
if not e:
continue
e.prev = self.chords[e.finale[0]]
e.next = self.chords[e.finale[1]]
def identify():
sys.stderr.write ("%s from LilyPond %s\n" % (program_name, version))
def warranty ():
identify ()
sys.stdout.write ('''
%s
%s
%s
%s
''' % ( _ ('Copyright (c) %s by') % '2001--2015',
'\n '.join (authors),
_ ('Distributed under terms of the GNU General Public License.'),
_ ('It comes with NO WARRANTY.')))
def get_option_parser ():
p = ly.get_option_parser (usage=_ ("%s [OPTION]... ETF-FILE") % 'etf2ly',
description=_ ("""Enigma Transport Format is a format used by Coda Music Technology's
Finale product. etf2ly converts a subset of ETF to a ready-to-use LilyPond file.
"""),
add_help_option=False)
p.add_option("-h", "--help",
action="help",
help=_ ("show this help and exit"))
p.version = "etf2ly (LilyPond) @TOPLEVEL_VERSION@"
p.add_option("--version",
action="version",
help=_ ("show version number and exit"))
p.add_option ('-o', '--output', help=_ ("write output to FILE"),
metavar=_("FILE"),
action='store')
p.add_option ('-w', '--warranty', help=_ ("show warranty and copyright"),
action='store_true',
),
p.add_option_group ('',
description=(
_ ('Report bugs via %s')
% 'http://post.gmane.org/post.php'
'?group=gmane.comp.gnu.lilypond.bugs') + '\n')
return p
def do_options ():
opt_parser = get_option_parser()
(options,args) = opt_parser.parse_args ()
if options.warranty:
warranty ()
sys.exit (0)
return (options,args)
(options, files) = do_options()
identify()
out_filename = options.output
e = None
for f in files:
if f == '-':
f = ''
sys.stderr.write ('Processing `%s\'\n' % f)
dict = parse_etf_file (f, Etf_file.routine_dict)
e = Etf_file(dict)
if not out_filename:
out_filename = os.path.basename (re.sub ('(?i).etf$', '.ly', f))
if out_filename == f:
out_filename = os.path.basename (f + '.ly')
sys.stderr.write ('Writing `%s\'' % out_filename)
ly = e.dump()
fo = open (out_filename, 'w')
fo.write ('%% lily was here -- automatically converted by etf2ly from %s\n' % f)
fo.write(ly)
fo.close ()
| gpl-3.0 |
apache/tashi | src/zoni/hardware/f10s50switch.py | 1 | 12581 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
# $Id: dellswitch.py 1034085 2010-11-11 19:56:30Z rgass $
#
# switch configuration manager for force10 s50
import os
import sys
import pexpect
import datetime
import time
import thread
import string
import getpass
import socket
import tempfile
import logging
#import zoni
from zoni.data.resourcequerysql import *
from zoni.hardware.hwswitchinterface import HwSwitchInterface
from zoni.data.resourcequerysql import ResourceQuerySql
from zoni.agents.dhcpdns import DhcpDns
''' Using pexpect to control switches because couldn't get snmp to work
'''
class HwF10S50Switch(HwSwitchInterface):
def __init__(self, config, host=None):
self.config = config
self.host = host
self.verbose = False
self.log = logging.getLogger(os.path.basename(__file__))
def setVerbose(self, verbose):
self.verbose = verbose
def __login(self):
switchIp = "ssh " + self.host['hw_userid'] + "@" + self.host['hw_name']
child = pexpect.spawn(switchIp)
# Be Verbose and print everything
if self.verbose:
child.logfile = sys.stdout
opt = child.expect(['Name:', 'assword:', 'Are you sure.*', pexpect.EOF, pexpect.TIMEOUT])
#XXX Doesn't seem to do what I want:(
child.setecho(False)
# Send a yes to register authenticity of host for ssh
if opt == 2:
child.sendline("yes")
opt = child.expect(['Name:', 'assword:', 'Are you sure.*', pexpect.EOF, pexpect.TIMEOUT])
if opt == 0:
child.sendline(self.host['hw_userid'])
i = child.expect(['assword:', 'Connection', pexpect.EOF, pexpect.TIMEOUT])
child.sendline(self.host['hw_password'])
i=child.expect(['console','#', 'Name:', pexpect.EOF, pexpect.TIMEOUT])
if i == 2:
mesg = "ERROR: Login to %s failed\n" % (self.host['hw_name'])
self.log.error(mesg)
exit(1)
if opt == 1:
# the S50 doesn't prompt for username
child.sendline(self.host['hw_password'])
i=child.expect(['console','>', 'Name:', pexpect.EOF, pexpect.TIMEOUT])
# on the S50, need to send enable, just send to all
child.sendline('enable')
i=child.expect(['assword:', pexpect.EOF, pexpect.TIMEOUT])
child.sendline(self.host['hw_password'])
i=child.expect(['#', pexpect.EOF, pexpect.TIMEOUT])
return child
def __getPrsLabel(self):
dadate = datetime.datetime.now().strftime("%Y%m%d-%H%M-%S")
return "PRS_" + dadate
def enableHostPort(self):
child = self.__login()
child.sendline('config')
cmd = "interface g 0/" + str(self.host['hw_port'])
child.sendline(cmd)
cmd = "no shutdown"
child.sendline(cmd)
child.sendline('exit')
child.terminate()
def disableHostPort(self):
child = self.__login()
child.sendline('config')
cmd = "interface g 0/" + str(self.host['hw_port'])
child.sendline(cmd)
cmd = "shutdown"
child.sendline(cmd)
child.sendline('exit')
child.terminate()
def removeVlan(self, num):
# Check for important vlans
cmd = "no interface vlan " + num
child = self.__login()
child.sendline('config')
child.sendline(cmd)
child.sendline('exit')
child.terminate()
def addVlanToTrunk(self, vlan):
mesg = "Adding Vlan %s to trunk on switch" % (vlan)
self.log.info(mesg)
child = self.__login()
child.sendline('config')
cmd = "interface port-channel 1"
child.sendline(cmd)
child.expect(["conf-if", pexpect.EOF])
child.sendline("switchport")
child.sendline("exit")
child.sendline("interface vlan " + vlan")
child.expect(["conf-if", pexpect.EOF])
cmd = "tagged port-channel 1"
child.sendline(cmd)
child.sendline('exit')
def createVlansThread(self, vlan, switch,host):
mesg = "Creating vlan %s on switch %s" % (str(vlan),str(switch))
print "host is ", host
self.log(mesg)
print "create"
self.createVlan(vlan)
print "cend"
self.addVlanToTrunk(vlan);
thread.exit()
def createVlans(self, vlan, switchlist, query):
for switch in switchlist:
#print "working on switch ", switch
#self.host = query.getSwitchInfo(switch)
#thread.start_new_thread(self.createVlansThread, (vlan, switch, self.host))
mesg = "Creating vlan %s on switch %s" % (str(vlan), str(switch))
self.log.info(mesg)
self.host = query.getSwitchInfo(switch)
self.createVlan(vlan)
self.addVlanToTrunk(vlan);
def removeVlans(self, vlan, switchlist, query):
for switch in switchlist:
mesg = "Deleting vlan %s on switch %s" % (str(vlan),str(switch))
self.log.info(mesg)
self.host = query.getSwitchInfo(switch)
self.removeVlan(vlan)
def createVlan(self, val):
vlanname = False
if ":" in val:
num = int(val.split(":")[0])
vlanname = val.split(":")[1]
else:
vlanname = self.__getPrsLabel()
num = int(val)
#if type(num) != int:
#mesg = "ERROR: Vlan must be a number (0-4095)\n"
#sys.stderr.write(mesg)
#exit(1)
if num > 4095 or num < 0:
mesg = "Vlan out of range. Must be < %s" % (self.config['vlan_max'])
self.log.error(mesg)
exit(1)
child = self.__login()
child.sendline('config')
child.expect(["config",pexpect.EOF, pexpect.TIMEOUT])
child.sendline('interface vlan %d' % num)
child.expect(["conf-if",pexpect.EOF, pexpect.TIMEOUT])
child.sendline("shutdown")
child.sendline("no ip address")
if vlanname:
child.sendline("name " + vlanname)
child.sendline('exit')
child.expect(["config",pexpect.EOF, pexpect.TIMEOUT])
# Raw Switch commands. DEBUG ONLY!, Doesn't work!
def sendSwitchCommand(self, cmds):
if len(cmds) > 0:
child = self.__login()
child.logfile = sys.stdout
for cmd in cmds.split(";"):
child.sendline(cmd)
try:
i=child.expect(['console','#', 'Name:', pexpect.EOF, pexpect.TIMEOUT], timeout=2)
i=child.expect(['console','#', 'Name:', pexpect.EOF, pexpect.TIMEOUT], timeout=2)
except EOF:
print "EOF", i
#child.sendline()
except TIMEOUT:
print "TIMEOUT", i
#child.interact(escape_character='\x1d', input_filter=None, output_filter=None)
child.terminate()
#print "before", child.before
#print "after", child.after
def addNodeToVlan(self, vlan):
mesg = "NOOP Adding Node to vlan %s" % (str(vlan))
self.log.info(mesg)
def removeNodeFromVlan(self, vlan):
mesg = "NOOP Removing Node from vlan %s" % (str(vlan))
self.log.info(mesg)
def setNativeVlan(self, vlan):
child = self.__login()
child.logfile = sys.stdout
child.sendline('config')
cmd = "interface vlan " + vlan)
child.sendline(cmd)
i=child.expect(['conf-if', pexpect.EOF, pexpect.TIMEOUT])
if i > 0:
self.log.error("setNativeVlan %s failed" % (cmd))
cmd = "untagged g 0/" + str(self.host['hw_port'])
child.sendline(cmd)
child.expect(['config-if', pexpect.EOF, pexpect.TIMEOUT])
child.sendline('exit')
child.terminate()
# Restore Native Vlan. In Dell's case, this is vlan 1
def restoreNativeVlan(self):
pass
# Setup the switch for node allocation
def allocateNode(self):
pass
# Remove all vlans from the interface
def removeAllVlans(self):
pass
def showInterfaceConfig(self):
child = self.__login()
print "\n------------------------------------"
print "SWITCH - " + self.host['hw_name'] + "/" + str(self.host['hw_port'])
print "NODE - " + self.host['location']
print "------------------------------------\n"
child.logfile = sys.stdout
cmd = "show interfaces g 0/" + str(self.host['hw_port'])
child.sendline(cmd)
i = child.expect(['#', pexpect.EOF, pexpect.TIMEOUT])
child.terminate()
def interactiveSwitchConfig(self):
switchIp = "ssh " + self.host['hw_name']
child = pexpect.spawn(switchIp)
child.setecho(False)
#child.expect('Name:')
child.sendline(self.host['hw_userid'])
#i=child.expect(['test','password:','Password:', pexpect.EOF, pexpect.TIMEOUT])
#child.logfile = sys.stdout
child.sendline(self.host['hw_password'])
child.interact(escape_character='\x1d', input_filter=None, output_filter=None)
def registerToZoni(self, user, password, host):
host = string.strip(str(host))
# Get hostname of the switch
if len(host.split(".")) == 4:
ip = host
try:
host = string.strip(socket.gethostbyaddr(ip)[0].split(".")[0])
except Exception, e:
mesg = "Host (%s) not registered in DNS, %s" % (host,str(e))
self.log.warning(mesg)
else:
# Maybe a hostname was entered...
try:
ip = socket.gethostbyname(host)
except Exception, e:
mesg = "Host (%s) not registered in DNS, %s" % (host, str(e))
self.log.error(mesg)
mesg = "Unable to resolve hostname"
self.log.critical(mesg)
exit()
switchIp = "ssh " + user + "@" + ip
child = pexpect.spawn(switchIp)
opt = child.expect(['Name:', 'assword:', 'Are you sure.*', pexpect.EOF, pexpect.TIMEOUT])
#XXX Doesn't seem to do what I want:(
child.setecho(False)
# Send a yes to register authenticity of host for ssh
if opt == 2:
child.sendline("yes")
opt = child.expect(['Name:', 'assword:', 'Are you sure.*', pexpect.EOF, pexpect.TIMEOUT])
if opt == 0:
child.sendline(user)
i = child.expect(['assword:', 'Connection', pexpect.EOF, pexpect.TIMEOUT])
child.sendline(password)
i=child.expect(['console',host, 'Name:', pexpect.EOF, pexpect.TIMEOUT])
if i == 2:
mesg = "Login to switch %s failed" % (host)
self.log.error(mesg)
exit(1)
if opt == 1:
child.sendline(password)
i=child.expect(['console',host, 'Name:', pexpect.EOF, pexpect.TIMEOUT])
# on the 6448 dell, need to send enable, just send to all
child.sendline('enable')
i=child.expect(['#', pexpect.EOF, pexpect.TIMEOUT])
fout = tempfile.TemporaryFile()
child.logfile = fout
cmd = "show system"
child.sendline(cmd)
val = host + "#"
i = child.expect([val, '\n\r\n\r', pexpect.EOF, pexpect.TIMEOUT])
cmd = "show version"
child.sendline(cmd)
i = child.expect([val, '\n\r\n\r', pexpect.EOF, pexpect.TIMEOUT])
fout.seek(0)
a={}
for i in fout.readlines():
if "System Location:" in i:
datime = time.strftime("%a, %d %b %Y %H:%M:%S +0000", time.localtime())
val = "Registered by Zoni on : " + datime
a['hw_notes'] = val + "; " + string.strip(i.split(':', 1)[1])
if "System MAC" in i:
a['hw_mac'] = string.strip(i.split(':', 1)[1])
if "SW version" in i:
a['hw_version_sw'] = string.strip(i.split(' ')[1].split()[0])
if "HW version" in i:
a['hw_version_fw'] = string.strip(i.split(' ')[1].split()[0])
a['hw_type'] = "switch"
a['hw_make'] = "F10S50"
a['hw_name'] = host
a['hw_ipaddr'] = ip
a['hw_userid'] = user
a['hw_password'] = password
child.sendline('exit')
child.sendline('exit')
child.terminate()
# Try to get more info via snmp
from pysnmp.entity.rfc3413.oneliner import cmdgen
from pysnmp.proto import rfc1902
user = "public"
oid = eval("1,3,6,1,4,1,674,10895,3000,1,2,100,1,0")
errorIndication, errorStatus, errorIndex, varBinds = cmdgen.CommandGenerator().getCmd( \
cmdgen.CommunityData('my-agent', user, 0), \
cmdgen.UdpTransportTarget((host, 161)), oid)
a['hw_model'] = str(varBinds[0][1])
oid = eval("1,3,6,1,4,1,674,10895,3000,1,2,100,3,0")
errorIndication, errorStatus, errorIndex, varBinds = cmdgen.CommandGenerator().getCmd( \
cmdgen.CommunityData('my-agent', user, 0), \
cmdgen.UdpTransportTarget((host, 161)), oid)
a['hw_make'] = str(varBinds[0][1])
# Register in dns
if self.config['dnsEnabled']:
try:
mesg = "Adding %s(%s) to dns" % (host, ip)
self.log.info(mesg)
DhcpDns(self.config, verbose=self.verbose).addDns(host, ip)
mesg = "Adding %s(%s) to dhcp" % (host, ip)
self.log.info(mesg)
DhcpDns(self.config, verbose=self.verbose).addDhcp(host, ip, a['hw_mac'])
except:
mesg = "Adding %s(%s) %s to dhcp/dns failed" % (host, ip, a['hw_mac'])
self.log.error(mesg)
# Add to db
# Register to DB
query = ResourceQuerySql(self.config, self.verbose)
query.registerHardware(a)
| apache-2.0 |
orangeduck/PyAutoC | Python27/Lib/multiprocessing/process.py | 45 | 9361 | #
# Module providing the `Process` class which emulates `threading.Thread`
#
# multiprocessing/process.py
#
# Copyright (c) 2006-2008, R Oudkerk
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. Neither the name of author nor the names of any contributors may be
# used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
# OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
# OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
# SUCH DAMAGE.
#
__all__ = ['Process', 'current_process', 'active_children']
#
# Imports
#
import os
import sys
import signal
import itertools
#
#
#
try:
ORIGINAL_DIR = os.path.abspath(os.getcwd())
except OSError:
ORIGINAL_DIR = None
#
# Public functions
#
def current_process():
'''
Return process object representing the current process
'''
return _current_process
def active_children():
'''
Return list of process objects corresponding to live child processes
'''
_cleanup()
return list(_current_process._children)
#
#
#
def _cleanup():
# check for processes which have finished
for p in list(_current_process._children):
if p._popen.poll() is not None:
_current_process._children.discard(p)
#
# The `Process` class
#
class Process(object):
'''
Process objects represent activity that is run in a separate process
The class is analagous to `threading.Thread`
'''
_Popen = None
def __init__(self, group=None, target=None, name=None, args=(), kwargs={}):
assert group is None, 'group argument must be None for now'
count = _current_process._counter.next()
self._identity = _current_process._identity + (count,)
self._authkey = _current_process._authkey
self._daemonic = _current_process._daemonic
self._tempdir = _current_process._tempdir
self._parent_pid = os.getpid()
self._popen = None
self._target = target
self._args = tuple(args)
self._kwargs = dict(kwargs)
self._name = name or type(self).__name__ + '-' + \
':'.join(str(i) for i in self._identity)
def run(self):
'''
Method to be run in sub-process; can be overridden in sub-class
'''
if self._target:
self._target(*self._args, **self._kwargs)
def start(self):
'''
Start child process
'''
assert self._popen is None, 'cannot start a process twice'
assert self._parent_pid == os.getpid(), \
'can only start a process object created by current process'
assert not _current_process._daemonic, \
'daemonic processes are not allowed to have children'
_cleanup()
if self._Popen is not None:
Popen = self._Popen
else:
from .forking import Popen
self._popen = Popen(self)
_current_process._children.add(self)
def terminate(self):
'''
Terminate process; sends SIGTERM signal or uses TerminateProcess()
'''
self._popen.terminate()
def join(self, timeout=None):
'''
Wait until child process terminates
'''
assert self._parent_pid == os.getpid(), 'can only join a child process'
assert self._popen is not None, 'can only join a started process'
res = self._popen.wait(timeout)
if res is not None:
_current_process._children.discard(self)
def is_alive(self):
'''
Return whether process is alive
'''
if self is _current_process:
return True
assert self._parent_pid == os.getpid(), 'can only test a child process'
if self._popen is None:
return False
self._popen.poll()
return self._popen.returncode is None
@property
def name(self):
return self._name
@name.setter
def name(self, name):
assert isinstance(name, basestring), 'name must be a string'
self._name = name
@property
def daemon(self):
'''
Return whether process is a daemon
'''
return self._daemonic
@daemon.setter
def daemon(self, daemonic):
'''
Set whether process is a daemon
'''
assert self._popen is None, 'process has already started'
self._daemonic = daemonic
@property
def authkey(self):
return self._authkey
@authkey.setter
def authkey(self, authkey):
'''
Set authorization key of process
'''
self._authkey = AuthenticationString(authkey)
@property
def exitcode(self):
'''
Return exit code of process or `None` if it has yet to stop
'''
if self._popen is None:
return self._popen
return self._popen.poll()
@property
def ident(self):
'''
Return identifier (PID) of process or `None` if it has yet to start
'''
if self is _current_process:
return os.getpid()
else:
return self._popen and self._popen.pid
pid = ident
def __repr__(self):
if self is _current_process:
status = 'started'
elif self._parent_pid != os.getpid():
status = 'unknown'
elif self._popen is None:
status = 'initial'
else:
if self._popen.poll() is not None:
status = self.exitcode
else:
status = 'started'
if type(status) is int:
if status == 0:
status = 'stopped'
else:
status = 'stopped[%s]' % _exitcode_to_name.get(status, status)
return '<%s(%s, %s%s)>' % (type(self).__name__, self._name,
status, self._daemonic and ' daemon' or '')
##
def _bootstrap(self):
from . import util
global _current_process
try:
self._children = set()
self._counter = itertools.count(1)
try:
sys.stdin.close()
sys.stdin = open(os.devnull)
except (OSError, ValueError):
pass
_current_process = self
util._finalizer_registry.clear()
util._run_after_forkers()
util.info('child process calling self.run()')
try:
self.run()
exitcode = 0
finally:
util._exit_function()
except SystemExit, e:
if not e.args:
exitcode = 1
elif type(e.args[0]) is int:
exitcode = e.args[0]
else:
sys.stderr.write(e.args[0] + '\n')
sys.stderr.flush()
exitcode = 1
except:
exitcode = 1
import traceback
sys.stderr.write('Process %s:\n' % self.name)
sys.stderr.flush()
traceback.print_exc()
util.info('process exiting with exitcode %d' % exitcode)
return exitcode
#
# We subclass bytes to avoid accidental transmission of auth keys over network
#
class AuthenticationString(bytes):
def __reduce__(self):
from .forking import Popen
if not Popen.thread_is_spawning():
raise TypeError(
'Pickling an AuthenticationString object is '
'disallowed for security reasons'
)
return AuthenticationString, (bytes(self),)
#
# Create object representing the main process
#
class _MainProcess(Process):
def __init__(self):
self._identity = ()
self._daemonic = False
self._name = 'MainProcess'
self._parent_pid = None
self._popen = None
self._counter = itertools.count(1)
self._children = set()
self._authkey = AuthenticationString(os.urandom(32))
self._tempdir = None
_current_process = _MainProcess()
del _MainProcess
#
# Give names to some return codes
#
_exitcode_to_name = {}
for name, signum in signal.__dict__.items():
if name[:3]=='SIG' and '_' not in name:
_exitcode_to_name[-signum] = name
| bsd-2-clause |
mancoast/CPythonPyc_test | fail/312_regrtest.py | 1 | 40995 | #! /usr/bin/env python
"""Regression test.
This will find all modules whose name is "test_*" in the test
directory, and run them. Various command line options provide
additional facilities.
Command line options:
-v: verbose -- run tests in verbose mode with output to stdout
-w: verbose2 -- re-run failed tests in verbose mode
-d: debug -- print traceback for failed tests
-q: quiet -- don't print anything except if a test fails
-x: exclude -- arguments are tests to *exclude*
-s: single -- run only a single test (see below)
-S: slow -- print the slowest 10 tests
-r: random -- randomize test execution order
-f: fromfile -- read names of tests to run from a file (see below)
-l: findleaks -- if GC is available detect tests that leak memory
-u: use -- specify which special resource intensive tests to run
-h: help -- print this text and exit
-t: threshold -- call gc.set_threshold(N)
-T: coverage -- turn on code coverage using the trace module
-D: coverdir -- Directory where coverage files are put
-N: nocoverdir -- Put coverage files alongside modules
-L: runleaks -- run the leaks(1) command just before exit
-R: huntrleaks -- search for reference leaks (needs debug build, v. slow)
-M: memlimit -- run very large memory-consuming tests
-n: nowindows -- suppress error message boxes on Windows
If non-option arguments are present, they are names for tests to run,
unless -x is given, in which case they are names for tests not to run.
If no test names are given, all tests are run.
-v is incompatible with -g and does not compare test output files.
-r randomizes test execution order. You can use --randseed=int to provide a
int seed value for the randomizer; this is useful for reproducing troublesome
test orders.
-T turns on code coverage tracing with the trace module.
-D specifies the directory where coverage files are put.
-N Put coverage files alongside modules.
-s means to run only a single test and exit. This is useful when
doing memory analysis on the Python interpreter (which tend to consume
too many resources to run the full regression test non-stop). The
file /tmp/pynexttest is read to find the next test to run. If this
file is missing, the first test_*.py file in testdir or on the command
line is used. (actually tempfile.gettempdir() is used instead of
/tmp).
-S is used to continue running tests after an aborted run. It will
maintain the order a standard run (ie, this assumes -r is not used).
This is useful after the tests have prematurely stopped for some external
reason and you want to start running from where you left off rather
than starting from the beginning.
-f reads the names of tests from the file given as f's argument, one
or more test names per line. Whitespace is ignored. Blank lines and
lines beginning with '#' are ignored. This is especially useful for
whittling down failures involving interactions among tests.
-L causes the leaks(1) command to be run just before exit if it exists.
leaks(1) is available on Mac OS X and presumably on some other
FreeBSD-derived systems.
-R runs each test several times and examines sys.gettotalrefcount() to
see if the test appears to be leaking references. The argument should
be of the form stab:run:fname where 'stab' is the number of times the
test is run to let gettotalrefcount settle down, 'run' is the number
of times further it is run and 'fname' is the name of the file the
reports are written to. These parameters all have defaults (5, 4 and
"reflog.txt" respectively), and the minimal invocation is '-R :'.
-M runs tests that require an exorbitant amount of memory. These tests
typically try to ascertain containers keep working when containing more than
2 billion objects, which only works on 64-bit systems. There are also some
tests that try to exhaust the address space of the process, which only makes
sense on 32-bit systems with at least 2Gb of memory. The passed-in memlimit,
which is a string in the form of '2.5Gb', determines howmuch memory the
tests will limit themselves to (but they may go slightly over.) The number
shouldn't be more memory than the machine has (including swap memory). You
should also keep in mind that swap memory is generally much, much slower
than RAM, and setting memlimit to all available RAM or higher will heavily
tax the machine. On the other hand, it is no use running these tests with a
limit of less than 2.5Gb, and many require more than 20Gb. Tests that expect
to use more than memlimit memory will be skipped. The big-memory tests
generally run very, very long.
-u is used to specify which special resource intensive tests to run,
such as those requiring large file support or network connectivity.
The argument is a comma-separated list of words indicating the
resources to test. Currently only the following are defined:
all - Enable all special resources.
audio - Tests that use the audio device. (There are known
cases of broken audio drivers that can crash Python or
even the Linux kernel.)
curses - Tests that use curses and will modify the terminal's
state and output modes.
largefile - It is okay to run some test that may create huge
files. These tests can take a long time and may
consume >2GB of disk space temporarily.
network - It is okay to run tests that use external network
resource, e.g. testing SSL support for sockets.
decimal - Test the decimal module against a large suite that
verifies compliance with standards.
compiler - Allow test_tokenize to verify round-trip lexing on
every file in the test library.
subprocess Run all tests for the subprocess module.
urlfetch - It is okay to download files required on testing.
gui - Run tests that require a running GUI.
To enable all resources except one, use '-uall,-<resource>'. For
example, to run all the tests except for the gui tests, give the
option '-uall,-gui'.
"""
import getopt
import os
import random
import re
import io
import sys
import time
import traceback
import warnings
import unittest
from inspect import isabstract
# I see no other way to suppress these warnings;
# putting them in test_grammar.py has no effect:
warnings.filterwarnings("ignore", "hex/oct constants", FutureWarning,
".*test.test_grammar$")
if sys.maxsize > 0x7fffffff:
# Also suppress them in <string>, because for 64-bit platforms,
# that's where test_grammar.py hides them.
warnings.filterwarnings("ignore", "hex/oct constants", FutureWarning,
"<string>")
# Ignore ImportWarnings that only occur in the source tree,
# (because of modules with the same name as source-directories in Modules/)
for mod in ("ctypes", "gzip", "zipfile", "tarfile", "encodings.zlib_codec",
"test.test_zipimport", "test.test_zlib", "test.test_zipfile",
"test.test_codecs", "test.string_tests"):
warnings.filterwarnings(module=".*%s$" % (mod,),
action="ignore", category=ImportWarning)
# MacOSX (a.k.a. Darwin) has a default stack size that is too small
# for deeply recursive regular expressions. We see this as crashes in
# the Python test suite when running test_re.py and test_sre.py. The
# fix is to set the stack limit to 2048.
# This approach may also be useful for other Unixy platforms that
# suffer from small default stack limits.
if sys.platform == 'darwin':
try:
import resource
except ImportError:
pass
else:
soft, hard = resource.getrlimit(resource.RLIMIT_STACK)
newsoft = min(hard, max(soft, 1024*2048))
resource.setrlimit(resource.RLIMIT_STACK, (newsoft, hard))
from test import support
RESOURCE_NAMES = ('audio', 'curses', 'largefile', 'network',
'decimal', 'compiler', 'subprocess', 'urlfetch', 'gui')
def usage(msg):
print(msg, file=sys.stderr)
print("Use --help for usage", file=sys.stderr)
sys.exit(2)
def main(tests=None, testdir=None, verbose=0, quiet=False, generate=False,
exclude=False, single=False, randomize=False, fromfile=None,
findleaks=False, use_resources=None, trace=False, coverdir='coverage',
runleaks=False, huntrleaks=False, verbose2=False, print_slow=False,
random_seed=None):
"""Execute a test suite.
This also parses command-line options and modifies its behavior
accordingly.
tests -- a list of strings containing test names (optional)
testdir -- the directory in which to look for tests (optional)
Users other than the Python test suite will certainly want to
specify testdir; if it's omitted, the directory containing the
Python test suite is searched for.
If the tests argument is omitted, the tests listed on the
command-line will be used. If that's empty, too, then all *.py
files beginning with test_ will be used.
The other default arguments (verbose, quiet, generate, exclude,
single, randomize, findleaks, use_resources, trace, coverdir,
print_slow, and random_seed) allow programmers calling main()
directly to set the values that would normally be set by flags
on the command line.
"""
support.record_original_stdout(sys.stdout)
try:
opts, args = getopt.getopt(sys.argv[1:], 'hvgqxsSrf:lu:t:TD:NLR:wM:n',
['help', 'verbose', 'quiet', 'exclude',
'single', 'slow', 'random', 'fromfile',
'findleaks', 'use=', 'threshold=', 'trace',
'coverdir=', 'nocoverdir', 'runleaks',
'huntrleaks=', 'verbose2', 'memlimit=',
'debug', 'start=', 'nowindows',
'randseed=',
])
except getopt.error as msg:
usage(msg)
# Defaults
if random_seed is None:
random_seed = random.randrange(10000000)
if use_resources is None:
use_resources = []
debug = False
start = None
for o, a in opts:
if o in ('-h', '--help'):
print(__doc__)
return
elif o in ('-v', '--verbose'):
verbose += 1
elif o in ('-w', '--verbose2'):
verbose2 = True
elif o in ('-d', '--debug'):
debug = True
elif o in ('-q', '--quiet'):
quiet = True;
verbose = 0
elif o in ('-x', '--exclude'):
exclude = True
elif o in ('-S', '--start'):
start = a
elif o in ('-s', '--single'):
single = True
elif o in ('-S', '--slow'):
print_slow = True
elif o in ('-r', '--randomize'):
randomize = True
elif o == '--randseed':
random_seed = int(a)
elif o in ('-f', '--fromfile'):
fromfile = a
elif o in ('-l', '--findleaks'):
findleaks = True
elif o in ('-L', '--runleaks'):
runleaks = True
elif o in ('-t', '--threshold'):
import gc
gc.set_threshold(int(a))
elif o in ('-T', '--coverage'):
trace = True
elif o in ('-D', '--coverdir'):
coverdir = os.path.join(os.getcwd(), a)
elif o in ('-N', '--nocoverdir'):
coverdir = None
elif o in ('-R', '--huntrleaks'):
huntrleaks = a.split(':')
if len(huntrleaks) not in (2, 3):
print(a, huntrleaks)
usage('-R takes 2 or 3 colon-separated arguments')
if not huntrleaks[0]:
huntrleaks[0] = 5
else:
huntrleaks[0] = int(huntrleaks[0])
if not huntrleaks[1]:
huntrleaks[1] = 4
else:
huntrleaks[1] = int(huntrleaks[1])
if len(huntrleaks) == 2 or not huntrleaks[2]:
huntrleaks[2:] = ["reflog.txt"]
# Avoid false positives due to the character cache in
# stringobject.c filling slowly with random data
warm_char_cache()
elif o in ('-M', '--memlimit'):
support.set_memlimit(a)
elif o in ('-u', '--use'):
u = [x.lower() for x in a.split(',')]
for r in u:
if r == 'all':
use_resources[:] = RESOURCE_NAMES
continue
remove = False
if r[0] == '-':
remove = True
r = r[1:]
if r not in RESOURCE_NAMES:
usage('Invalid -u/--use option: ' + a)
if remove:
if r in use_resources:
use_resources.remove(r)
elif r not in use_resources:
use_resources.append(r)
elif o in ('-n', '--nowindows'):
import msvcrt
msvcrt.SetErrorMode(msvcrt.SEM_FAILCRITICALERRORS|
msvcrt.SEM_NOALIGNMENTFAULTEXCEPT|
msvcrt.SEM_NOGPFAULTERRORBOX|
msvcrt.SEM_NOOPENFILEERRORBOX)
try:
msvcrt.CrtSetReportMode
except AttributeError:
# release build
pass
else:
for m in [msvcrt.CRT_WARN, msvcrt.CRT_ERROR, msvcrt.CRT_ASSERT]:
msvcrt.CrtSetReportMode(m, msvcrt.CRTDBG_MODE_FILE)
msvcrt.CrtSetReportFile(m, msvcrt.CRTDBG_FILE_STDERR)
else:
print(("No handler for option {}. Please report this as a bug "
"at http://bugs.python.org.").format(o), file=sys.stderr)
sys.exit(1)
if generate and verbose:
usage("-g and -v don't go together!")
if single and fromfile:
usage("-s and -f don't go together!")
good = []
bad = []
skipped = []
resource_denieds = []
if findleaks:
try:
import gc
except ImportError:
print('No GC available, disabling findleaks.')
findleaks = False
else:
# Uncomment the line below to report garbage that is not
# freeable by reference counting alone. By default only
# garbage that is not collectable by the GC is reported.
#gc.set_debug(gc.DEBUG_SAVEALL)
found_garbage = []
if single:
from tempfile import gettempdir
filename = os.path.join(gettempdir(), 'pynexttest')
try:
fp = open(filename, 'r')
next = fp.read().strip()
tests = [next]
fp.close()
except IOError:
pass
if fromfile:
tests = []
fp = open(fromfile)
for line in fp:
guts = line.split() # assuming no test has whitespace in its name
if guts and not guts[0].startswith('#'):
tests.extend(guts)
fp.close()
# Strip .py extensions.
if args:
args = list(map(removepy, args))
if tests:
tests = list(map(removepy, tests))
stdtests = STDTESTS[:]
nottests = NOTTESTS.copy()
if exclude:
for arg in args:
if arg in stdtests:
stdtests.remove(arg)
nottests.add(arg)
args = []
tests = tests or args or findtests(testdir, stdtests, nottests)
if single:
tests = tests[:1]
# Remove all the tests that precede start if it's set.
if start:
try:
del tests[:tests.index(start)]
except ValueError:
print("Couldn't find starting test (%s), using all tests" % start)
if randomize:
random.seed(random_seed)
print("Using random seed", random_seed)
random.shuffle(tests)
if trace:
import trace, tempfile
tracer = trace.Trace(ignoredirs=[sys.prefix, sys.exec_prefix,
tempfile.gettempdir()],
trace=False, count=True)
test_times = []
support.verbose = verbose # Tell tests to be moderately quiet
support.use_resources = use_resources
save_modules = sys.modules.keys()
for test in tests:
if not quiet:
print(test)
sys.stdout.flush()
if trace:
# If we're tracing code coverage, then we don't exit with status
# if on a false return value from main.
tracer.runctx('runtest(test, generate, verbose, quiet,'
' test_times, testdir)',
globals=globals(), locals=vars())
else:
try:
ok = runtest(test, generate, verbose, quiet, test_times,
testdir, huntrleaks)
except KeyboardInterrupt:
# print a newline separate from the ^C
print()
break
except:
raise
if ok > 0:
good.append(test)
elif ok == 0:
bad.append(test)
else:
skipped.append(test)
if ok == -2:
resource_denieds.append(test)
if findleaks:
gc.collect()
if gc.garbage:
print("Warning: test created", len(gc.garbage), end=' ')
print("uncollectable object(s).")
# move the uncollectable objects somewhere so we don't see
# them again
found_garbage.extend(gc.garbage)
del gc.garbage[:]
# Unload the newly imported modules (best effort finalization)
for module in sys.modules.keys():
if module not in save_modules and module.startswith("test."):
support.unload(module)
# The lists won't be sorted if running with -r
good.sort()
bad.sort()
skipped.sort()
if good and not quiet:
if not bad and not skipped and len(good) > 1:
print("All", end=' ')
print(count(len(good), "test"), "OK.")
if verbose:
print("CAUTION: stdout isn't compared in verbose mode:")
print("a test that passes in verbose mode may fail without it.")
if print_slow:
test_times.sort(reverse=True)
print("10 slowest tests:")
for time, test in test_times[:10]:
print("%s: %.1fs" % (test, time))
if bad:
print(count(len(bad), "test"), "failed:")
printlist(bad)
if skipped and not quiet:
print(count(len(skipped), "test"), "skipped:")
printlist(skipped)
e = _ExpectedSkips()
plat = sys.platform
if e.isvalid():
surprise = set(skipped) - e.getexpected() - set(resource_denieds)
if surprise:
print(count(len(surprise), "skip"), \
"unexpected on", plat + ":")
printlist(surprise)
else:
print("Those skips are all expected on", plat + ".")
else:
print("Ask someone to teach regrtest.py about which tests are")
print("expected to get skipped on", plat + ".")
if verbose2 and bad:
print("Re-running failed tests in verbose mode")
for test in bad:
print("Re-running test %r in verbose mode" % test)
sys.stdout.flush()
try:
support.verbose = True
ok = runtest(test, generate, True, quiet, test_times, testdir,
huntrleaks, debug)
except KeyboardInterrupt:
# print a newline separate from the ^C
print()
break
except:
raise
if single:
alltests = findtests(testdir, stdtests, nottests)
for i in range(len(alltests)):
if tests[0] == alltests[i]:
if i == len(alltests) - 1:
os.unlink(filename)
else:
fp = open(filename, 'w')
fp.write(alltests[i+1] + '\n')
fp.close()
break
else:
os.unlink(filename)
if trace:
r = tracer.results()
r.write_results(show_missing=True, summary=True, coverdir=coverdir)
if runleaks:
os.system("leaks %d" % os.getpid())
sys.exit(len(bad) > 0)
STDTESTS = [
'test_grammar',
'test_opcodes',
'test_dict',
'test_builtin',
'test_exceptions',
'test_types',
'test_unittest',
'test_doctest',
'test_doctest2',
]
NOTTESTS = {
'test_future1',
'test_future2',
}
def findtests(testdir=None, stdtests=STDTESTS, nottests=NOTTESTS):
"""Return a list of all applicable test modules."""
if not testdir: testdir = findtestdir()
names = os.listdir(testdir)
tests = []
for name in names:
if name[:5] == "test_" and name[-3:] == ".py":
modname = name[:-3]
if modname not in stdtests and modname not in nottests:
tests.append(modname)
tests.sort()
return stdtests + tests
def runtest(test, generate, verbose, quiet, test_times,
testdir=None, huntrleaks=False, debug=False):
"""Run a single test.
test -- the name of the test
verbose -- if true, print more messages
quiet -- if true, don't print 'skipped' messages (probably redundant)
test_times -- a list of (time, test_name) pairs
testdir -- test directory
huntrleaks -- run multiple times to test for leaks; requires a debug
build; a triple corresponding to -R's three arguments
debug -- if true, print tracebacks for failed tests regardless of
verbose setting
Return:
-2 test skipped because resource denied
-1 test skipped for some other reason
0 test failed
1 test passed
"""
try:
return runtest_inner(test, generate, verbose, quiet, test_times,
testdir, huntrleaks)
finally:
cleanup_test_droppings(test, verbose)
def runtest_inner(test, generate, verbose, quiet, test_times,
testdir=None, huntrleaks=False, debug=False):
support.unload(test)
if not testdir:
testdir = findtestdir()
if verbose:
cfp = None
else:
cfp = io.StringIO() # XXX Should use io.StringIO()
refleak = False # True if the test leaked references.
try:
save_stdout = sys.stdout
# Save various things that tests may mess up so we can restore
# them afterward.
save_environ = dict(os.environ)
save_argv = sys.argv[:]
try:
if cfp:
sys.stdout = cfp
print(test) # Output file starts with test name
if test.startswith('test.'):
abstest = test
else:
# Always import it from the test package
abstest = 'test.' + test
start_time = time.time()
the_package = __import__(abstest, globals(), locals(), [])
the_module = getattr(the_package, test)
# Old tests run to completion simply as a side-effect of
# being imported. For tests based on unittest or doctest,
# explicitly invoke their test_main() function (if it exists).
indirect_test = getattr(the_module, "test_main", None)
if indirect_test is not None:
indirect_test()
if huntrleaks:
refleak = dash_R(the_module, test, indirect_test, huntrleaks)
test_time = time.time() - start_time
test_times.append((test_time, test))
finally:
sys.stdout = save_stdout
# Restore what we saved if needed, but also complain if the test
# changed it so that the test may eventually get fixed.
if not os.environ == save_environ:
if not quiet:
print("Warning: os.environ was modified by", test)
os.environ.clear()
os.environ.update(save_environ)
if not sys.argv == save_argv:
if not quiet:
print("Warning: argv was modified by", test)
sys.argv[:] = save_argv
except support.ResourceDenied as msg:
if not quiet:
print(test, "skipped --", msg)
sys.stdout.flush()
return -2
except unittest.SkipTest as msg:
if not quiet:
print(test, "skipped --", msg)
sys.stdout.flush()
return -1
except KeyboardInterrupt:
raise
except support.TestFailed as msg:
print("test", test, "failed --", msg)
sys.stdout.flush()
return 0
except:
type, value = sys.exc_info()[:2]
print("test", test, "crashed --", str(type) + ":", value)
sys.stdout.flush()
if verbose or debug:
traceback.print_exc(file=sys.stdout)
sys.stdout.flush()
return 0
else:
if refleak:
return 0
if not cfp:
return 1
output = cfp.getvalue()
expected = test + "\n"
if output == expected or huntrleaks:
return 1
print("test", test, "produced unexpected output:")
sys.stdout.flush()
reportdiff(expected, output)
sys.stdout.flush()
return 0
def cleanup_test_droppings(testname, verbose):
import shutil
import stat
# Try to clean up junk commonly left behind. While tests shouldn't leave
# any files or directories behind, when a test fails that can be tedious
# for it to arrange. The consequences can be especially nasty on Windows,
# since if a test leaves a file open, it cannot be deleted by name (while
# there's nothing we can do about that here either, we can display the
# name of the offending test, which is a real help).
for name in (support.TESTFN,
"db_home",
):
if not os.path.exists(name):
continue
if os.path.isdir(name):
kind, nuker = "directory", shutil.rmtree
elif os.path.isfile(name):
kind, nuker = "file", os.unlink
else:
raise SystemError("os.path says %r exists but is neither "
"directory nor file" % name)
if verbose:
print("%r left behind %s %r" % (testname, kind, name))
try:
# if we have chmod, fix possible permissions problems
# that might prevent cleanup
if (hasattr(os, 'chmod')):
os.chmod(name, stat.S_IRWXU | stat.S_IRWXG | stat.S_IRWXO)
nuker(name)
except Exception as msg:
print(("%r left behind %s %r and it couldn't be "
"removed: %s" % (testname, kind, name, msg)), file=sys.stderr)
def dash_R(the_module, test, indirect_test, huntrleaks):
"""Run a test multiple times, looking for reference leaks.
Returns:
False if the test didn't leak references; True if we detected refleaks.
"""
# This code is hackish and inelegant, but it seems to do the job.
import copyreg, _abcoll
if not hasattr(sys, 'gettotalrefcount'):
raise Exception("Tracking reference leaks requires a debug build "
"of Python")
# Save current values for dash_R_cleanup() to restore.
fs = warnings.filters[:]
ps = copyreg.dispatch_table.copy()
pic = sys.path_importer_cache.copy()
abcs = {}
for abc in [getattr(_abcoll, a) for a in _abcoll.__all__]:
if not isabstract(abc):
continue
for obj in abc.__subclasses__() + [abc]:
abcs[obj] = obj._abc_registry.copy()
if indirect_test:
def run_the_test():
indirect_test()
else:
def run_the_test():
del sys.modules[the_module.__name__]
exec('import ' + the_module.__name__)
deltas = []
nwarmup, ntracked, fname = huntrleaks
repcount = nwarmup + ntracked
print("beginning", repcount, "repetitions", file=sys.stderr)
print(("1234567890"*(repcount//10 + 1))[:repcount], file=sys.stderr)
dash_R_cleanup(fs, ps, pic, abcs)
for i in range(repcount):
rc = sys.gettotalrefcount()
run_the_test()
sys.stderr.write('.')
sys.stderr.flush()
dash_R_cleanup(fs, ps, pic, abcs)
if i >= nwarmup:
deltas.append(sys.gettotalrefcount() - rc - 2)
print(file=sys.stderr)
if any(deltas):
msg = '%s leaked %s references, sum=%s' % (test, deltas, sum(deltas))
print(msg, file=sys.stderr)
refrep = open(fname, "a")
print(msg, file=refrep)
refrep.close()
return True
return False
def dash_R_cleanup(fs, ps, pic, abcs):
import gc, copyreg
import _strptime, linecache
import urllib.parse, urllib.request, mimetypes, doctest
import struct, filecmp, _abcoll
from distutils.dir_util import _path_created
from weakref import WeakSet
# Clear the warnings registry, so they can be displayed again
for mod in sys.modules.values():
if hasattr(mod, '__warningregistry__'):
del mod.__warningregistry__
# Restore some original values.
warnings.filters[:] = fs
copyreg.dispatch_table.clear()
copyreg.dispatch_table.update(ps)
sys.path_importer_cache.clear()
sys.path_importer_cache.update(pic)
# clear type cache
sys._clear_type_cache()
# Clear ABC registries, restoring previously saved ABC registries.
for abc in [getattr(_abcoll, a) for a in _abcoll.__all__]:
if not isabstract(abc):
continue
for obj in abc.__subclasses__() + [abc]:
obj._abc_registry = abcs.get(obj, WeakSet()).copy()
obj._abc_cache.clear()
obj._abc_negative_cache.clear()
# Clear assorted module caches.
_path_created.clear()
re.purge()
_strptime._regex_cache.clear()
urllib.parse.clear_cache()
urllib.request.urlcleanup()
linecache.clearcache()
mimetypes._default_mime_types()
filecmp._cache.clear()
struct._clearcache()
doctest.master = None
# Collect cyclic trash.
gc.collect()
def warm_char_cache():
s = bytes(range(256))
for i in range(256):
s[i:i+1]
def reportdiff(expected, output):
import difflib
print("*" * 70)
a = expected.splitlines(1)
b = output.splitlines(1)
sm = difflib.SequenceMatcher(a=a, b=b)
tuples = sm.get_opcodes()
def pair(x0, x1):
# x0:x1 are 0-based slice indices; convert to 1-based line indices.
x0 += 1
if x0 >= x1:
return "line " + str(x0)
else:
return "lines %d-%d" % (x0, x1)
for op, a0, a1, b0, b1 in tuples:
if op == 'equal':
pass
elif op == 'delete':
print("***", pair(a0, a1), "of expected output missing:")
for line in a[a0:a1]:
print("-", line, end='')
elif op == 'replace':
print("*** mismatch between", pair(a0, a1), "of expected", \
"output and", pair(b0, b1), "of actual output:")
for line in difflib.ndiff(a[a0:a1], b[b0:b1]):
print(line, end='')
elif op == 'insert':
print("***", pair(b0, b1), "of actual output doesn't appear", \
"in expected output after line", str(a1)+":")
for line in b[b0:b1]:
print("+", line, end='')
else:
print("get_opcodes() returned bad tuple?!?!", (op, a0, a1, b0, b1))
print("*" * 70)
def findtestdir():
if __name__ == '__main__':
file = sys.argv[0]
else:
file = __file__
testdir = os.path.dirname(file) or os.curdir
return testdir
def removepy(name):
if name.endswith(".py"):
name = name[:-3]
return name
def count(n, word):
if n == 1:
return "%d %s" % (n, word)
else:
return "%d %ss" % (n, word)
def printlist(x, width=70, indent=4):
"""Print the elements of iterable x to stdout.
Optional arg width (default 70) is the maximum line length.
Optional arg indent (default 4) is the number of blanks with which to
begin each line.
"""
from textwrap import fill
blanks = ' ' * indent
print(fill(' '.join(map(str, x)), width,
initial_indent=blanks, subsequent_indent=blanks))
# Map sys.platform to a string containing the basenames of tests
# expected to be skipped on that platform.
#
# Special cases:
# test_pep277
# The _ExpectedSkips constructor adds this to the set of expected
# skips if not os.path.supports_unicode_filenames.
# test_timeout
# Controlled by test_timeout.skip_expected. Requires the network
# resource and a socket module.
#
# Tests that are expected to be skipped everywhere except on one platform
# are also handled separately.
_expectations = {
'win32':
"""
test__locale
test_crypt
test_curses
test_dbm
test_fcntl
test_fork1
test_epoll
test_dbm_gnu
test_dbm_ndbm
test_grp
test_ioctl
test_largefile
test_kqueue
test_openpty
test_ossaudiodev
test_pipes
test_poll
test_posix
test_pty
test_pwd
test_resource
test_signal
test_syslog
test_threadsignals
test_wait3
test_wait4
""",
'linux2':
"""
test_curses
test_largefile
test_kqueue
test_ossaudiodev
""",
'mac':
"""
test_atexit
test_bz2
test_crypt
test_curses
test_dbm
test_fcntl
test_fork1
test_epoll
test_grp
test_ioctl
test_largefile
test_locale
test_kqueue
test_mmap
test_openpty
test_ossaudiodev
test_poll
test_popen
test_posix
test_pty
test_pwd
test_resource
test_signal
test_sundry
test_tarfile
""",
'unixware7':
"""
test_epoll
test_largefile
test_kqueue
test_minidom
test_openpty
test_pyexpat
test_sax
test_sundry
""",
'openunix8':
"""
test_epoll
test_largefile
test_kqueue
test_minidom
test_openpty
test_pyexpat
test_sax
test_sundry
""",
'sco_sv3':
"""
test_asynchat
test_fork1
test_epoll
test_gettext
test_largefile
test_locale
test_kqueue
test_minidom
test_openpty
test_pyexpat
test_queue
test_sax
test_sundry
test_thread
test_threaded_import
test_threadedtempfile
test_threading
""",
'darwin':
"""
test__locale
test_curses
test_epoll
test_dbm_gnu
test_largefile
test_locale
test_minidom
test_ossaudiodev
test_poll
""",
'sunos5':
"""
test_curses
test_dbm
test_epoll
test_kqueue
test_dbm_gnu
test_gzip
test_openpty
test_zipfile
test_zlib
""",
'hp-ux11':
"""
test_curses
test_epoll
test_dbm_gnu
test_gzip
test_largefile
test_locale
test_kqueue
test_minidom
test_openpty
test_pyexpat
test_sax
test_zipfile
test_zlib
""",
'atheos':
"""
test_curses
test_dbm_gnu
test_epoll
test_largefile
test_locale
test_kqueue
test_mhlib
test_mmap
test_poll
test_resource
""",
'cygwin':
"""
test_curses
test_dbm
test_epoll
test_ioctl
test_kqueue
test_largefile
test_locale
test_ossaudiodev
test_socketserver
""",
'os2emx':
"""
test_audioop
test_curses
test_epoll
test_kqueue
test_largefile
test_mmap
test_openpty
test_ossaudiodev
test_pty
test_resource
test_signal
""",
'freebsd4':
"""
test_epoll
test_dbm_gnu
test_locale
test_ossaudiodev
test_pep277
test_pty
test_socketserver
test_tcl
test_tk
test_ttk_guionly
test_ttk_textonly
test_timeout
test_urllibnet
test_multiprocessing
""",
'aix5':
"""
test_bz2
test_epoll
test_dbm_gnu
test_gzip
test_kqueue
test_ossaudiodev
test_tcl
test_tk
test_ttk_guionly
test_ttk_textonly
test_zipimport
test_zlib
""",
'openbsd3':
"""
test_ctypes
test_epoll
test_dbm_gnu
test_locale
test_normalization
test_ossaudiodev
test_pep277
test_tcl
test_tk
test_ttk_guionly
test_ttk_textonly
test_multiprocessing
""",
'netbsd3':
"""
test_ctypes
test_curses
test_epoll
test_dbm_gnu
test_locale
test_ossaudiodev
test_pep277
test_tcl
test_tk
test_ttk_guionly
test_ttk_textonly
test_multiprocessing
""",
}
_expectations['freebsd5'] = _expectations['freebsd4']
_expectations['freebsd6'] = _expectations['freebsd4']
_expectations['freebsd7'] = _expectations['freebsd4']
_expectations['freebsd8'] = _expectations['freebsd4']
class _ExpectedSkips:
def __init__(self):
import os.path
from test import test_timeout
self.valid = False
if sys.platform in _expectations:
s = _expectations[sys.platform]
self.expected = set(s.split())
# These are broken tests, for now skipped on every platform.
# XXX Fix these!
self.expected.add('test_nis')
# expected to be skipped on every platform, even Linux
if not os.path.supports_unicode_filenames:
self.expected.add('test_pep277')
# doctest, profile and cProfile tests fail when the codec for the
# fs encoding isn't built in because PyUnicode_Decode() adds two
# calls into Python.
encs = ("utf-8", "latin-1", "ascii", "mbcs", "utf-16", "utf-32")
if sys.getfilesystemencoding().lower() not in encs:
self.expected.add('test_profile')
self.expected.add('test_cProfile')
self.expected.add('test_doctest')
if test_timeout.skip_expected:
self.expected.add('test_timeout')
if sys.platform != "win32":
# test_sqlite is only reliable on Windows where the library
# is distributed with Python
WIN_ONLY = ["test_unicode_file", "test_winreg",
"test_winsound", "test_startfile",
"test_sqlite"]
for skip in WIN_ONLY:
self.expected.add(skip)
if sys.platform != 'sunos5':
self.expected.add('test_nis')
self.valid = True
def isvalid(self):
"Return true iff _ExpectedSkips knows about the current platform."
return self.valid
def getexpected(self):
"""Return set of test names we expect to skip on current platform.
self.isvalid() must be true.
"""
assert self.isvalid()
return self.expected
if __name__ == '__main__':
# Remove regrtest.py's own directory from the module search path. This
# prevents relative imports from working, and relative imports will screw
# up the testing framework. E.g. if both test.support and
# support are imported, they will not contain the same globals, and
# much of the testing framework relies on the globals in the
# test.support module.
mydir = os.path.abspath(os.path.normpath(os.path.dirname(sys.argv[0])))
i = len(sys.path)
while i >= 0:
i -= 1
if os.path.abspath(os.path.normpath(sys.path[i])) == mydir:
del sys.path[i]
main()
| gpl-3.0 |
thanhacun/odoo | addons/report_intrastat/__openerp__.py | 261 | 1805 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Intrastat Reporting',
'version': '1.0',
'category': 'Accounting & Finance',
'description': """
A module that adds intrastat reports.
=====================================
This module gives the details of the goods traded between the countries of
European Union.""",
'author': 'OpenERP SA',
'website': 'http://www.openerp.com',
'depends': ['base', 'product', 'stock', 'sale', 'purchase'],
'data': [
'security/ir.model.access.csv',
'report_intrastat_view.xml',
'intrastat_report.xml',
'report_intrastat_data.xml',
'views/report_intrastatinvoice.xml'
],
'demo': [],
'test': ['test/report_intrastat_report.yml'],
'installable': True,
'auto_install': False,
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
aroth-arsoft/arsoft-web-kpasswd | arsoft/web/templatetags/static_url.py | 3 | 1072 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# kate: space-indent on; indent-width 4; mixedindent off; indent-mode python;
from django import template
from django.urls import get_script_prefix
from django.conf import settings
register = template.Library()
class StaticURLNode(template.Node):
def __init__(self, url):
self._url = get_script_prefix()
try:
self._url = getattr(settings,'STATIC_URL')
except AttributeError:
pass
if url:
self._url = self._url + url
def render(self, context):
return self._url
def do_static_url(parser, token):
url = None
# split_contents() knows not to split quoted strings.
e = token.split_contents()
if len(e) >= 2:
url = e[1]
tag_name = e[0] if e else None
if url:
if not (url[0] == url[-1] and url[0] in ('"', "'")):
raise template.TemplateSyntaxError("%r tag's argument should be in quotes" % tag_name)
url = url[1:-1]
return StaticURLNode(url)
register.tag('static_url', do_static_url)
| gpl-3.0 |
kmonsoor/pyglet | contrib/scene2d/scene2d/drawable.py | 29 | 6741 | from pyglet.gl import *
class DrawEnv(object):
'''Sets up drawing environment.
My have either or both of a "before" and "after" method.
'''
pass
class DrawBlended(DrawEnv):
'''Sets up texture env for an alpha-blended draw.
'''
def before(self):
glPushAttrib(GL_ENABLE_BIT | GL_COLOR_BUFFER_BIT)
# XXX this belongs in a "DrawTextureBlended" or something
glEnable(GL_TEXTURE_2D)
glEnable(GL_BLEND)
glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA)
def after(self):
glPopAttrib()
DRAW_BLENDED = DrawBlended()
class Drawable(object):
def __init__(self):
self.effects = []
self._style = None
def add_effect(self, effect):
self.effects.append(effect)
self._style = None
def remove_effect(self, effect):
self.effects.remove(effect)
self._style = None
def get_drawstyle(self):
raise NotImplemented('implement on subclass')
def get_style(self):
'''Return the DrawStyle for this Drawable.
This method should return None if nothing should be drawn.
'''
if self._style is None:
self._style = self.get_drawstyle()
for effect in self.effects:
self._style = effect.apply(self._style)
return self._style
def draw(self):
'''Convenience method.
Don't use this if you have a lot of drawables and care about
performance. Collect up your drawables in a list and pass that to
draw_many().
'''
style = self.get_style()
if style is not None: style.draw()
class Effect(object):
def apply(self, style):
'''Modify some aspect of the style. If style.is_copy is False then
.copy() it. We don't do that automatically because there's a chance
this method is a NOOP.
'''
raise NotImplemented()
class TintEffect(Effect):
'''Apply a tint to the Drawable:
For each component RGBA:
resultant color = drawable.color * tint.color
'''
def __init__(self, tint):
self.tint = tint
def apply(self, style):
style = style.copy()
style.color = tuple([style.color[i] * self.tint[i] for i in range(4)])
return style
class ScaleEffect(Effect):
'''Apply a scale to the Drawable.
'''
def __init__(self, sx, sy):
self.sx, self.sy = sx, sy
def apply(self, style):
style = style.copy()
style.sx = self.sx
style.sy = self.sy
return style
class RotateEffect(Effect):
'''Apply a rotation (about the Z axis) to the Drawable.
'''
def __init__(self, angle):
self.angle = angle
def apply(self, style):
style = style.copy()
style.angle = self.angle
return style
class DrawStyle(object):
'''
Notes:
draw_func(<DrawStyle instance>)
'''
def __init__(self, color=None, texture=None, x=0, y=0, sx=1, sy=1,
angle=0, width=None, height=None, uvs=None, draw_list=None,
draw_env=None, draw_func=None):
self.color = color
self.x, self.y = x, y
self.sx, self.sy = sx, sy
self.angle = angle
self.width, self.height = width, height
self.texture = texture
if texture is not None and uvs is None:
raise ValueError('texture and uvs must both be supplied')
self.uvs = uvs
if uvs is not None and texture is None:
raise ValueError('texture and uvs must both be supplied')
self.draw_list = draw_list
self.draw_env = draw_env
self.draw_func = draw_func
self.is_copy = False
def copy(self):
s = DrawStyle(color=self.color, texture=self.texture, x=self.x,
y=self.y, width=self.width, height=self.height,
uvs=self.uvs, draw_list=self.draw_list, draw_env=self.draw_env,
draw_func=self.draw_func)
s.is_copy = True
return s
def draw(self):
if self.color is not None:
glColor4f(*self.color)
if self.texture is not None:
glBindTexture(GL_TEXTURE_2D, self.texture.id)
if hasattr(self.draw_env, 'before'):
self.draw_env.before()
transform = self.x or self.y or self.sx != self.sy != 1 or self.angle
if transform:
glPushMatrix()
if self.x or self.y:
glTranslatef(self.x, self.y, 0)
if self.sx or self.sy:
glScalef(self.sx, self.sy, 1)
if self.angle:
cx, cy = self.width/2, self.height/2
glTranslatef(cx, cy, 0)
glRotatef(self.angle, 0, 0, 1)
glTranslatef(-cx, -cy, 0)
if self.draw_func is not None:
self.draw_func(self)
if self.draw_list is not None:
glCallList(self.draw_list)
if hasattr(self.draw_env, 'after'):
self.draw_env.after()
if transform:
glPopMatrix()
def __cmp__(self, other):
return (
cmp(self.color, other.color) or
cmp(self.texture.id, other.texture.id) or
cmp(self.draw_env, other.draw_env) or
cmp(self.draw_func, other.draw_func) or
cmp(self.draw_list, other.draw_list)
)
def draw_many(drawables):
styles = filter(None, [d.get_style() for d in drawables])
drawables.sort()
old_color = None
old_texture = None
old_env = None
for d in styles:
if d.color != old_color:
glColor4f(*d.color)
old_color = d.color
if d.texture != old_texture:
if d.texture is not None:
glBindTexture(GL_TEXTURE_2D, d.texture.id)
old_texture = d.texture.id
if d.draw_env != old_env:
if old_env is not None and hasattr(old_env, 'after'):
old_env.after()
if hasattr(d.draw_env, 'before'):
d.draw_env.before()
old_env = d.draw_env
transform = d.x or d.y or d.sx != d.sy != 1 or d.angle
if transform:
glPushMatrix()
if d.x or d.y:
glTranslatef(d.x, d.y, 0)
if d.sx != 1 or d.sy != 1:
glScalef(d.sx, d.sy, 1)
if d.angle:
cx, cy = d.width/2, d.height/2
glTranslatef(cx, cy, 0)
glRotatef(d.angle, 0, 0, 1)
glTranslatef(-cx, -cy, 0)
if d.draw_list is not None:
glCallList(d.draw_list)
if d.draw_func is not None:
d.draw_func(d)
if transform:
glPopMatrix()
if old_env is not None and hasattr(old_env, 'after'):
old_env.after()
| bsd-3-clause |
jkilpatr/browbeat | rally/rally-plugins/glance-create-boot-delete/glance_create_boot_delete.py | 1 | 1908 | # Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from rally.task import scenario
from rally.plugins.openstack.scenarios.nova import utils as nova_utils
from rally.plugins.openstack.scenarios.neutron import utils as neutron_utils
from rally.plugins.openstack.scenarios.glance import utils as glance_utils
from rally.task import types
from rally.task import validation
class BrowbeatPlugin(neutron_utils.NeutronScenario,
glance_utils.GlanceScenario,
nova_utils.NovaScenario,
scenario.Scenario):
@types.convert(flavor={"type": "nova_flavor"})
@validation.flavor_exists("flavor")
@validation.required_openstack(users=True)
@scenario.configure(context={"cleanup": ["nova", "neutron", "glance"]})
def glance_create_boot_delete(self, container_format, image_location, disk_format, flavor,
network_create_args=None, subnet_create_args=None, **kwargs):
image = self._create_image(container_format, image_location, disk_format, **kwargs)
image_id = image.id
net = self._create_network(network_create_args or {})
self._create_subnet(net, subnet_create_args or {})
kwargs['nics'] = [{'net-id': net['network']['id']}]
server = self._boot_server(image_id, flavor, **kwargs)
self._delete_server(server)
self._delete_image(image)
| apache-2.0 |
PaoloC68/django-registration | registration/forms.py | 1 | 6363 | """
Forms and validation code for user registration.
Note that all of these forms assume Django's bundle default ``User``
model; since it's not possible for a form to anticipate in advance the
needs of custom user models, you will need to write your own forms if
you're using a custom model.
"""
try:
from django.contrib.auth import get_user_model
User = get_user_model()
except ImportError:
from django.contrib.auth.models import User
from django import forms
from django.utils.translation import ugettext_lazy as _
class RegistrationForm(forms.Form):
"""
Form for registering a new user account.
Validates that the requested username is not already in use, and
requires the password to be entered twice to catch typos.
Subclasses should feel free to add any additional validation they
need, but should avoid defining a ``save()`` method -- the actual
saving of collected user data is delegated to the active
registration backend.
"""
required_css_class = 'required'
username = forms.RegexField(regex=r'^[\w.@+-]+$',
max_length=30,
label=_("Username"),
error_messages={'invalid': _("This value may contain only letters, numbers and @/./+/-/_ characters.")})
email = forms.EmailField(label=_("E-mail"))
password1 = forms.CharField(widget=forms.PasswordInput,
label=_("Password"))
password2 = forms.CharField(widget=forms.PasswordInput,
label=_("Password (again)"))
def clean_username(self):
"""
Validate that the username is alphanumeric and is not already
in use.
"""
existing = User.objects.filter(username__iexact=self.cleaned_data['username'])
if existing.exists():
raise forms.ValidationError(_("A user with that username already exists."))
else:
return self.cleaned_data['username']
def clean(self):
"""
Verifiy that the values entered into the two password fields
match. Note that an error here will end up in
``non_field_errors()`` because it doesn't apply to a single
field.
"""
if 'password1' in self.cleaned_data and 'password2' in self.cleaned_data:
if self.cleaned_data['password1'] != self.cleaned_data['password2']:
raise forms.ValidationError(_("The two password fields didn't match."))
return self.cleaned_data
class RegistrationFormTermsOfService(RegistrationForm):
"""
Subclass of ``RegistrationForm`` which adds a required checkbox
for agreeing to a site's Terms of Service.
"""
tos = forms.BooleanField(widget=forms.CheckboxInput,
label=_(u'I have read and agree to the Terms of Service'),
error_messages={'required': _("You must agree to the terms to register")})
class RegistrationFormUniqueEmail(RegistrationForm):
"""
Subclass of ``RegistrationForm`` which enforces uniqueness of
email addresses.
"""
def clean_email(self):
"""
Validate that the supplied email address is unique for the
site.
"""
if User.objects.filter(email__iexact=self.cleaned_data['email']):
raise forms.ValidationError(_("This email address is already in use. Please supply a different email address."))
return self.cleaned_data['email']
class RegistrationFormNoFreeEmail(RegistrationForm):
"""
Subclass of ``RegistrationForm`` which disallows registration with
email addresses from popular free webmail services; moderately
useful for preventing automated spam registrations.
To change the list of banned domains, subclass this form and
override the attribute ``bad_domains``.
"""
bad_domains = ['aim.com', 'aol.com', 'email.com', 'gmail.com',
'googlemail.com', 'hotmail.com', 'hushmail.com',
'msn.com', 'mail.ru', 'mailinator.com', 'live.com',
'yahoo.com']
def clean_email(self):
"""
Check the supplied email address against a list of known free
webmail domains.
"""
email_domain = self.cleaned_data['email'].split('@')[1]
if email_domain in self.bad_domains:
raise forms.ValidationError(_("Registration using free email addresses is prohibited. Please supply a different email address."))
return self.cleaned_data['email']
class EmailRegistrationForm(forms.Form):
"""
Form for registering a new user account.
Validates that the requested username is not already in use, and
requires the password to be entered twice to catch typos.
Subclasses should feel free to add any additional validation they
need, but should avoid defining a ``save()`` method -- the actual
saving of collected user data is delegated to the active
registration backend.
"""
required_css_class = 'required'
email = forms.EmailField(label=_("E-mail"))
password1 = forms.CharField(widget=forms.PasswordInput,
label=_("Password"))
password2 = forms.CharField(widget=forms.PasswordInput,
label=_("Password (again)"))
def clean_email(self):
"""
Validate that the username is alphanumeric and is not already
in use.
"""
existing = User.objects.filter(email__iexact=self.cleaned_data['email'])
if existing.exists():
raise forms.ValidationError(_("A user with that email already exists."))
else:
return self.cleaned_data['email']
def clean(self):
"""
Verifiy that the values entered into the two password fields
match. Note that an error here will end up in
``non_field_errors()`` because it doesn't apply to a single
field.
"""
if 'password1' in self.cleaned_data and 'password2' in self.cleaned_data:
if self.cleaned_data['password1'] != self.cleaned_data['password2']:
raise forms.ValidationError(_("The two password fields didn't match."))
return self.cleaned_data
| bsd-3-clause |
AntonioMtn/NZBMegaSearch | requests/compat.py | 289 | 2433 | # -*- coding: utf-8 -*-
"""
pythoncompat
"""
from .packages import charade as chardet
import sys
# -------
# Pythons
# -------
# Syntax sugar.
_ver = sys.version_info
#: Python 2.x?
is_py2 = (_ver[0] == 2)
#: Python 3.x?
is_py3 = (_ver[0] == 3)
#: Python 3.0.x
is_py30 = (is_py3 and _ver[1] == 0)
#: Python 3.1.x
is_py31 = (is_py3 and _ver[1] == 1)
#: Python 3.2.x
is_py32 = (is_py3 and _ver[1] == 2)
#: Python 3.3.x
is_py33 = (is_py3 and _ver[1] == 3)
#: Python 3.4.x
is_py34 = (is_py3 and _ver[1] == 4)
#: Python 2.7.x
is_py27 = (is_py2 and _ver[1] == 7)
#: Python 2.6.x
is_py26 = (is_py2 and _ver[1] == 6)
#: Python 2.5.x
is_py25 = (is_py2 and _ver[1] == 5)
#: Python 2.4.x
is_py24 = (is_py2 and _ver[1] == 4) # I'm assuming this is not by choice.
# ---------
# Platforms
# ---------
# Syntax sugar.
_ver = sys.version.lower()
is_pypy = ('pypy' in _ver)
is_jython = ('jython' in _ver)
is_ironpython = ('iron' in _ver)
# Assume CPython, if nothing else.
is_cpython = not any((is_pypy, is_jython, is_ironpython))
# Windows-based system.
is_windows = 'win32' in str(sys.platform).lower()
# Standard Linux 2+ system.
is_linux = ('linux' in str(sys.platform).lower())
is_osx = ('darwin' in str(sys.platform).lower())
is_hpux = ('hpux' in str(sys.platform).lower()) # Complete guess.
is_solaris = ('solar==' in str(sys.platform).lower()) # Complete guess.
try:
import simplejson as json
except ImportError:
import json
# ---------
# Specifics
# ---------
if is_py2:
from urllib import quote, unquote, quote_plus, unquote_plus, urlencode
from urlparse import urlparse, urlunparse, urljoin, urlsplit, urldefrag
from urllib2 import parse_http_list
import cookielib
from Cookie import Morsel
from StringIO import StringIO
from .packages.urllib3.packages.ordered_dict import OrderedDict
builtin_str = str
bytes = str
str = unicode
basestring = basestring
numeric_types = (int, long, float)
elif is_py3:
from urllib.parse import urlparse, urlunparse, urljoin, urlsplit, urlencode, quote, unquote, quote_plus, unquote_plus, urldefrag
from urllib.request import parse_http_list
from http import cookiejar as cookielib
from http.cookies import Morsel
from io import StringIO
from collections import OrderedDict
builtin_str = str
str = str
bytes = bytes
basestring = (str, bytes)
numeric_types = (int, float)
| gpl-2.0 |
SolusOS-discontinued/pisi | pisi/operations/remove.py | 2 | 4530 | # -*- coding: utf-8 -*-
#
# Copyright (C) 2005 - 2007, TUBITAK/UEKAE
#
# This program is free software; you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by the Free
# Software Foundation; either version 2 of the License, or (at your option)
# any later version.
#
# Please read the COPYING file.
#
import sys
import gettext
__trans = gettext.translation('pisi', fallback=True)
_ = __trans.ugettext
import pisi
import pisi.context as ctx
import pisi.atomicoperations as atomicoperations
import pisi.pgraph as pgraph
import pisi.util as util
import pisi.ui as ui
import pisi.db
def remove(A, ignore_dep = False, ignore_safety = False):
"""remove set A of packages from system (A is a list of package names)"""
componentdb = pisi.db.componentdb.ComponentDB()
installdb = pisi.db.installdb.InstallDB()
A = [str(x) for x in A]
# filter packages that are not installed
A_0 = A = set(A)
if not ctx.get_option('ignore_safety') and not ctx.config.values.general.ignore_safety and not ignore_safety:
if componentdb.has_component('system.base'):
systembase = set(componentdb.get_union_component('system.base').packages)
refused = A.intersection(systembase)
if refused:
raise pisi.Error(_("Safety switch prevents the removal of "
"following packages:\n") +
util.format_by_columns(sorted(refused)))
A = A - systembase
else:
ctx.ui.warning(_("Safety switch: The component system.base cannot be found."))
Ap = []
for x in A:
if installdb.has_package(x):
Ap.append(x)
else:
ctx.ui.info(_('Package %s does not exist. Cannot remove.') % x)
A = set(Ap)
if len(A)==0:
ctx.ui.info(_('No packages to remove.'))
return False
if not ctx.config.get_option('ignore_dependency') and not ignore_dep:
G_f, order = plan_remove(A)
else:
G_f = None
order = A
ctx.ui.info(_("""The following list of packages will be removed
in the respective order to satisfy dependencies:
""") + util.strlist(order))
if len(order) > len(A_0):
if not ctx.ui.confirm(_('Do you want to continue?')):
ctx.ui.warning(_('Package removal declined'))
return False
if ctx.get_option('dry_run'):
return
ctx.ui.notify(ui.packagestogo, order = order)
for x in order:
if installdb.has_package(x):
atomicoperations.remove_single(x)
else:
ctx.ui.info(_('Package %s is not installed. Cannot remove.') % x)
def plan_remove(A):
# try to construct a pisi graph of packages to
# install / reinstall
installdb = pisi.db.installdb.InstallDB()
G_f = pgraph.PGraph(installdb) # construct G_f
# find the (install closure) graph of G_f by package
# set A using packagedb
for x in A:
G_f.add_package(x)
B = A
while len(B) > 0:
Bp = set()
for x in B:
rev_deps = installdb.get_rev_deps(x)
for (rev_dep, depinfo) in rev_deps:
# we don't deal with uninstalled rev deps
# and unsatisfied dependencies (this is important, too)
# satisfied_by_any_installed_other_than is for AnyDependency
if installdb.has_package(rev_dep) and depinfo.satisfied_by_installed() and not depinfo.satisfied_by_any_installed_other_than(x):
if not rev_dep in G_f.vertices():
Bp.add(rev_dep)
G_f.add_plain_dep(rev_dep, x)
B = Bp
if ctx.config.get_option('debug'):
G_f.write_graphviz(sys.stdout)
order = G_f.topological_sort()
return G_f, order
def remove_conflicting_packages(conflicts):
if remove(conflicts, ignore_dep=True, ignore_safety=True):
raise Exception(_("Conflicts remain"))
def remove_obsoleted_packages():
installdb = pisi.db.installdb.InstallDB()
packagedb = pisi.db.packagedb.PackageDB()
obsoletes = filter(installdb.has_package, packagedb.get_obsoletes())
if obsoletes:
if remove(obsoletes, ignore_dep=True, ignore_safety=True):
raise Exception(_("Obsoleted packages remaining"))
def remove_replaced_packages(replaced):
if remove(replaced, ignore_dep=True, ignore_safety=True):
raise Exception(_("Replaced package remains"))
| gpl-2.0 |
kennethgillen/ansible | test/units/parsing/test_mod_args.py | 66 | 4764 | # (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from ansible.parsing.mod_args import ModuleArgsParser
from ansible.errors import AnsibleParserError
from ansible.compat.tests import unittest
class TestModArgsDwim(unittest.TestCase):
# TODO: add tests that construct ModuleArgsParser with a task reference
# TODO: verify the AnsibleError raised on failure knows the task
# and the task knows the line numbers
def setUp(self):
pass
def _debug(self, mod, args, to):
print("RETURNED module = {0}".format(mod))
print(" args = {0}".format(args))
print(" to = {0}".format(to))
def tearDown(self):
pass
def test_basic_shell(self):
m = ModuleArgsParser(dict(shell='echo hi'))
mod, args, to = m.parse()
self._debug(mod, args, to)
self.assertEqual(mod, 'command')
self.assertEqual(args, dict(
_raw_params = 'echo hi',
_uses_shell = True,
))
self.assertIsNone(to)
def test_basic_command(self):
m = ModuleArgsParser(dict(command='echo hi'))
mod, args, to = m.parse()
self._debug(mod, args, to)
self.assertEqual(mod, 'command')
self.assertEqual(args, dict(
_raw_params = 'echo hi',
))
self.assertIsNone(to)
def test_shell_with_modifiers(self):
m = ModuleArgsParser(dict(shell='/bin/foo creates=/tmp/baz removes=/tmp/bleep'))
mod, args, to = m.parse()
self._debug(mod, args, to)
self.assertEqual(mod, 'command')
self.assertEqual(args, dict(
creates = '/tmp/baz',
removes = '/tmp/bleep',
_raw_params = '/bin/foo',
_uses_shell = True,
))
self.assertIsNone(to)
def test_normal_usage(self):
m = ModuleArgsParser(dict(copy='src=a dest=b'))
mod, args, to = m.parse()
self._debug(mod, args, to)
self.assertEqual(mod, 'copy')
self.assertEqual(args, dict(src='a', dest='b'))
self.assertIsNone(to)
def test_complex_args(self):
m = ModuleArgsParser(dict(copy=dict(src='a', dest='b')))
mod, args, to = m.parse()
self._debug(mod, args, to)
self.assertEqual(mod, 'copy')
self.assertEqual(args, dict(src='a', dest='b'))
self.assertIsNone(to)
def test_action_with_complex(self):
m = ModuleArgsParser(dict(action=dict(module='copy', src='a', dest='b')))
mod, args, to = m.parse()
self._debug(mod, args, to)
self.assertEqual(mod, 'copy')
self.assertEqual(args, dict(src='a', dest='b'))
self.assertIsNone(to)
def test_action_with_complex_and_complex_args(self):
m = ModuleArgsParser(dict(action=dict(module='copy', args=dict(src='a', dest='b'))))
mod, args, to = m.parse()
self._debug(mod, args, to)
self.assertEqual(mod, 'copy')
self.assertEqual(args, dict(src='a', dest='b'))
self.assertIsNone(to)
def test_local_action_string(self):
m = ModuleArgsParser(dict(local_action='copy src=a dest=b'))
mod, args, delegate_to = m.parse()
self._debug(mod, args, delegate_to)
self.assertEqual(mod, 'copy')
self.assertEqual(args, dict(src='a', dest='b'))
self.assertIs(delegate_to, 'localhost')
def test_multiple_actions(self):
m = ModuleArgsParser(dict(action='shell echo hi', local_action='shell echo hi'))
self.assertRaises(AnsibleParserError, m.parse)
m = ModuleArgsParser(dict(action='shell echo hi', shell='echo hi'))
self.assertRaises(AnsibleParserError, m.parse)
m = ModuleArgsParser(dict(local_action='shell echo hi', shell='echo hi'))
self.assertRaises(AnsibleParserError, m.parse)
m = ModuleArgsParser(dict(ping='data=hi', shell='echo hi'))
self.assertRaises(AnsibleParserError, m.parse)
| gpl-3.0 |
eLBati/stock-logistics-workflow | __unported__/product_serial/wizard/__init__.py | 9 | 1057 | # -*- encoding: utf-8 -*-
##############################################################################
#
# Product serial module for OpenERP
# Copyright (C) 2013 Akretion (http://www.akretion.com)
# @author Alexis de Lattre <alexis.delattre@akretion.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import prodlot_wizard
| agpl-3.0 |
jmunsch/vr_crash_course | opensphericalcamera/osc/bubl.py | 2 | 7986 | """
*********************
*********************
*********************
This code has not been tested with a BublCam.
The BublOscClient.js client is the only documentation I can find for the
custom commands.
*********************
*********************
*********************
"""
"""
Extensions to the Open Spherical Camera API specific to the Bubl Cam.
Documentation / Examples here:
https://bubltechnology.github.io/ScarletTests/
https://github.com/BublTechnology/osc-client/blob/master/lib/BublOscClient.js
Open Spherical Camera API proposed here:
https://developers.google.com/streetview/open-spherical-camera/reference
The library is an evolution of:
https://github.com/codetricity/theta-s-api-tests/blob/master/thetapylib.py
Usage:
At the top of your Python script, use
from osc.bubl import Bublcam
After you import the library, you can use the commands like this:
bublcam = Bublcam()
bublcam.state()
bublcam.info()
# Capture image
response = bublcam.takePicture()
# Wait for the stitching to finish
bublcam.waitForProcessing(response['id'])
# Copy image to computer
bublcam.getLatestImage()
# Capture video
response = bublcam.captureVideo()
bublcam.stop(response['id'])
# Get the port and end point for live streaming
(bublStreamPort, bublStreamEndPoint) = bublcam.stream()
# For rtsp video streaming, open the following URI
# rtsp streaming not implemented here.
rtspUri = "rtsp://%s:%s/%s" % (bubl._ip, bublStreamPort, bublStreamEndPoint)
# Turn the camera off in 30 seconds
bublcam.shutdown(30)
"""
import json
import requests
import timeit
import osc
__author__ = 'Haarm-Pieter Duiker'
__copyright__ = 'Copyright (C) 2016 - Duiker Research Corp'
__license__ = ''
__maintainer__ = 'Haarm-Pieter Duiker'
__email__ = 'support@duikerresearch.org'
__status__ = 'Production'
__major_version__ = '1'
__minor_version__ = '0'
__change_version__ = '0'
__version__ = '.'.join((__major_version__,
__minor_version__,
__change_version__))
__all__ = ['Bublcam']
#
# Bubl cam
#
class Bublcam(osc.OpenSphericalCamera):
def __init__(self, ip_base="192.168.0.100", httpPort=80):
osc.OpenSphericalCamera.__init__(self, ip_base, httpPort)
def updateFirmware(self, firmwareFilename):
"""
_bublUpdate
Update the camera firmware
Reference:
https://github.com/BublTechnology/osc-client/blob/master/lib/BublOscClient.js#L25
"""
url = self._request("_bublUpdate")
with open(firmwareFilename, 'rb') as handle:
body = handle.read()
try:
req = requests.post(url, data=body,
headers={'Content-Type': 'application/octet-stream'})
except Exception, e:
self._httpError(e)
return None
if req.status_code == 200:
response = req.json()
else:
self._oscError(req)
response = None
return response
def bublGetImage(self, fileUri):
"""
_bublGetImage
Transfer the file from the camera to computer and save the
binary data to local storage. This works, but is clunky.
There are easier ways to do this.
Not currently applying the equivalent of Javascript's encodeURIComponent
to the fileUri
Reference:
https://github.com/BublTechnology/osc-client/blob/master/lib/BublOscClient.js#L31
"""
acquired = False
if fileUri:
url = self._request("_bublGetImage/%s" % fileUri)
fileName = fileUri.split("/")[1]
try:
response = requests.get(url, stream=True)
except Exception, e:
self._httpError(e)
return acquired
if response.status_code == 200:
with open(fileName, 'wb') as handle:
for block in response.iter_content(1024):
handle.write(block)
acquired = True
else:
self._oscError(req)
return acquired
def stop(self, commandId):
"""
_bublStop
Reference:
https://github.com/BublTechnology/osc-client/blob/master/lib/BublOscClient.js#L37
"""
url = self._request("commands/_bublStop")
body = json.dumps({
"id": commandId
})
try:
req = requests.post(url, data=body)
except Exception, e:
self._httpError(e)
return None
if req.status_code == 200:
response = req.json()
else:
self._oscError(req)
response = None
return response
def poll(self, commandId, fingerprint, waitTimeout):
"""
_bublPoll
Reference:
https://github.com/BublTechnology/osc-client/blob/master/lib/BublOscClient.js#L43
"""
url = self._request("commands/_bublPoll")
body = json.dumps({
"id": commandId,
"fingerprint" : fingerprint,
"waitTimeout" : waitTimeout
})
try:
req = requests.post(url, data=body)
except Exception, e:
self._httpError(e)
return None
if req.status_code == 200:
response = req.json()
else:
self._oscError(req)
response = None
return response
def captureVideo(self):
"""
_bublCaptureVideo
Reference:
https://github.com/BublTechnology/osc-client/blob/master/lib/BublOscClient.js#L49
"""
url = self._request("commands/execute")
body = json.dumps({"name": "camera._bublCaptureVideo",
"parameters": {
"sessionId": self.sid
}
})
try:
req = requests.post(url, data=body)
except Exception, e:
self._httpError(e)
return None
if req.status_code == 200:
response = req.json()
else:
self._oscError(req)
response = None
return response
def shutdown(self, shutdownDelay):
"""
_bublShutdown
Reference:
https://github.com/BublTechnology/osc-client/blob/master/lib/BublOscClient.js#L64
"""
url = self._request("commands/execute")
body = json.dumps({"name": "camera._bublShutdown",
"parameters": {
"sessionId": self.sid,
"shutdownDelay" : shutdownDelay
}
})
try:
req = requests.post(url, data=body)
except Exception, e:
self._httpError(e)
return None
if req.status_code == 200:
response = req.json()
else:
self._oscError(req)
response = None
return response
def stream(self):
"""
_bublStream
Return the port and end point to use for rtsp video streaming
Reference:
https://github.com/BublTechnology/osc-client/blob/master/lib/BublOscClient.js#L59
"""
acquired = False
url = self._request("commands/execute")
body = json.dumps({"name": "camera._bublStream",
"parameters": {
"sessionId": self.sid
}})
try:
response = requests.post(url, data=body, stream=True)
except Exception, e:
self._httpError(e)
return acquired
if response.status_code == 200:
response = req.json()
bublStreamPort = response['_bublStreamPort']
bublStreamEndPoint = response['_bublStreamEndPoint']
else:
bublStreamPort = None
bublStreamEndPoint = None
self._oscError(response)
return (bublStreamPort, bublStreamEndPoint)
# Bublcam
| gpl-3.0 |
Sweetgrassbuffalo/ReactionSweeGrass-v2 | .meteor/local/dev_bundle/python/Lib/lib2to3/fixes/fix_unicode.py | 177 | 1269 | r"""Fixer for unicode.
* Changes unicode to str and unichr to chr.
* If "...\u..." is not unicode literal change it into "...\\u...".
* Change u"..." into "...".
"""
from ..pgen2 import token
from .. import fixer_base
_mapping = {u"unichr" : u"chr", u"unicode" : u"str"}
class FixUnicode(fixer_base.BaseFix):
BM_compatible = True
PATTERN = "STRING | 'unicode' | 'unichr'"
def start_tree(self, tree, filename):
super(FixUnicode, self).start_tree(tree, filename)
self.unicode_literals = 'unicode_literals' in tree.future_features
def transform(self, node, results):
if node.type == token.NAME:
new = node.clone()
new.value = _mapping[node.value]
return new
elif node.type == token.STRING:
val = node.value
if not self.unicode_literals and val[0] in u'\'"' and u'\\' in val:
val = ur'\\'.join([
v.replace(u'\\u', ur'\\u').replace(u'\\U', ur'\\U')
for v in val.split(ur'\\')
])
if val[0] in u'uU':
val = val[1:]
if val == node.value:
return node
new = node.clone()
new.value = val
return new
| gpl-3.0 |
2014c2g14/2014c2 | w2/static/Brython2.0.0-20140209-164925/Lib/_markupbase.py | 891 | 14598 | """Shared support for scanning document type declarations in HTML and XHTML.
This module is used as a foundation for the html.parser module. It has no
documented public API and should not be used directly.
"""
import re
_declname_match = re.compile(r'[a-zA-Z][-_.a-zA-Z0-9]*\s*').match
_declstringlit_match = re.compile(r'(\'[^\']*\'|"[^"]*")\s*').match
_commentclose = re.compile(r'--\s*>')
_markedsectionclose = re.compile(r']\s*]\s*>')
# An analysis of the MS-Word extensions is available at
# http://www.planetpublish.com/xmlarena/xap/Thursday/WordtoXML.pdf
_msmarkedsectionclose = re.compile(r']\s*>')
del re
class ParserBase:
"""Parser base class which provides some common support methods used
by the SGML/HTML and XHTML parsers."""
def __init__(self):
if self.__class__ is ParserBase:
raise RuntimeError(
"_markupbase.ParserBase must be subclassed")
def error(self, message):
raise NotImplementedError(
"subclasses of ParserBase must override error()")
def reset(self):
self.lineno = 1
self.offset = 0
def getpos(self):
"""Return current line number and offset."""
return self.lineno, self.offset
# Internal -- update line number and offset. This should be
# called for each piece of data exactly once, in order -- in other
# words the concatenation of all the input strings to this
# function should be exactly the entire input.
def updatepos(self, i, j):
if i >= j:
return j
rawdata = self.rawdata
nlines = rawdata.count("\n", i, j)
if nlines:
self.lineno = self.lineno + nlines
pos = rawdata.rindex("\n", i, j) # Should not fail
self.offset = j-(pos+1)
else:
self.offset = self.offset + j-i
return j
_decl_otherchars = ''
# Internal -- parse declaration (for use by subclasses).
def parse_declaration(self, i):
# This is some sort of declaration; in "HTML as
# deployed," this should only be the document type
# declaration ("<!DOCTYPE html...>").
# ISO 8879:1986, however, has more complex
# declaration syntax for elements in <!...>, including:
# --comment--
# [marked section]
# name in the following list: ENTITY, DOCTYPE, ELEMENT,
# ATTLIST, NOTATION, SHORTREF, USEMAP,
# LINKTYPE, LINK, IDLINK, USELINK, SYSTEM
rawdata = self.rawdata
j = i + 2
assert rawdata[i:j] == "<!", "unexpected call to parse_declaration"
if rawdata[j:j+1] == ">":
# the empty comment <!>
return j + 1
if rawdata[j:j+1] in ("-", ""):
# Start of comment followed by buffer boundary,
# or just a buffer boundary.
return -1
# A simple, practical version could look like: ((name|stringlit) S*) + '>'
n = len(rawdata)
if rawdata[j:j+2] == '--': #comment
# Locate --.*-- as the body of the comment
return self.parse_comment(i)
elif rawdata[j] == '[': #marked section
# Locate [statusWord [...arbitrary SGML...]] as the body of the marked section
# Where statusWord is one of TEMP, CDATA, IGNORE, INCLUDE, RCDATA
# Note that this is extended by Microsoft Office "Save as Web" function
# to include [if...] and [endif].
return self.parse_marked_section(i)
else: #all other declaration elements
decltype, j = self._scan_name(j, i)
if j < 0:
return j
if decltype == "doctype":
self._decl_otherchars = ''
while j < n:
c = rawdata[j]
if c == ">":
# end of declaration syntax
data = rawdata[i+2:j]
if decltype == "doctype":
self.handle_decl(data)
else:
# According to the HTML5 specs sections "8.2.4.44 Bogus
# comment state" and "8.2.4.45 Markup declaration open
# state", a comment token should be emitted.
# Calling unknown_decl provides more flexibility though.
self.unknown_decl(data)
return j + 1
if c in "\"'":
m = _declstringlit_match(rawdata, j)
if not m:
return -1 # incomplete
j = m.end()
elif c in "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ":
name, j = self._scan_name(j, i)
elif c in self._decl_otherchars:
j = j + 1
elif c == "[":
# this could be handled in a separate doctype parser
if decltype == "doctype":
j = self._parse_doctype_subset(j + 1, i)
elif decltype in {"attlist", "linktype", "link", "element"}:
# must tolerate []'d groups in a content model in an element declaration
# also in data attribute specifications of attlist declaration
# also link type declaration subsets in linktype declarations
# also link attribute specification lists in link declarations
self.error("unsupported '[' char in %s declaration" % decltype)
else:
self.error("unexpected '[' char in declaration")
else:
self.error(
"unexpected %r char in declaration" % rawdata[j])
if j < 0:
return j
return -1 # incomplete
# Internal -- parse a marked section
# Override this to handle MS-word extension syntax <![if word]>content<![endif]>
def parse_marked_section(self, i, report=1):
rawdata= self.rawdata
assert rawdata[i:i+3] == '<![', "unexpected call to parse_marked_section()"
sectName, j = self._scan_name( i+3, i )
if j < 0:
return j
if sectName in {"temp", "cdata", "ignore", "include", "rcdata"}:
# look for standard ]]> ending
match= _markedsectionclose.search(rawdata, i+3)
elif sectName in {"if", "else", "endif"}:
# look for MS Office ]> ending
match= _msmarkedsectionclose.search(rawdata, i+3)
else:
self.error('unknown status keyword %r in marked section' % rawdata[i+3:j])
if not match:
return -1
if report:
j = match.start(0)
self.unknown_decl(rawdata[i+3: j])
return match.end(0)
# Internal -- parse comment, return length or -1 if not terminated
def parse_comment(self, i, report=1):
rawdata = self.rawdata
if rawdata[i:i+4] != '<!--':
self.error('unexpected call to parse_comment()')
match = _commentclose.search(rawdata, i+4)
if not match:
return -1
if report:
j = match.start(0)
self.handle_comment(rawdata[i+4: j])
return match.end(0)
# Internal -- scan past the internal subset in a <!DOCTYPE declaration,
# returning the index just past any whitespace following the trailing ']'.
def _parse_doctype_subset(self, i, declstartpos):
rawdata = self.rawdata
n = len(rawdata)
j = i
while j < n:
c = rawdata[j]
if c == "<":
s = rawdata[j:j+2]
if s == "<":
# end of buffer; incomplete
return -1
if s != "<!":
self.updatepos(declstartpos, j + 1)
self.error("unexpected char in internal subset (in %r)" % s)
if (j + 2) == n:
# end of buffer; incomplete
return -1
if (j + 4) > n:
# end of buffer; incomplete
return -1
if rawdata[j:j+4] == "<!--":
j = self.parse_comment(j, report=0)
if j < 0:
return j
continue
name, j = self._scan_name(j + 2, declstartpos)
if j == -1:
return -1
if name not in {"attlist", "element", "entity", "notation"}:
self.updatepos(declstartpos, j + 2)
self.error(
"unknown declaration %r in internal subset" % name)
# handle the individual names
meth = getattr(self, "_parse_doctype_" + name)
j = meth(j, declstartpos)
if j < 0:
return j
elif c == "%":
# parameter entity reference
if (j + 1) == n:
# end of buffer; incomplete
return -1
s, j = self._scan_name(j + 1, declstartpos)
if j < 0:
return j
if rawdata[j] == ";":
j = j + 1
elif c == "]":
j = j + 1
while j < n and rawdata[j].isspace():
j = j + 1
if j < n:
if rawdata[j] == ">":
return j
self.updatepos(declstartpos, j)
self.error("unexpected char after internal subset")
else:
return -1
elif c.isspace():
j = j + 1
else:
self.updatepos(declstartpos, j)
self.error("unexpected char %r in internal subset" % c)
# end of buffer reached
return -1
# Internal -- scan past <!ELEMENT declarations
def _parse_doctype_element(self, i, declstartpos):
name, j = self._scan_name(i, declstartpos)
if j == -1:
return -1
# style content model; just skip until '>'
rawdata = self.rawdata
if '>' in rawdata[j:]:
return rawdata.find(">", j) + 1
return -1
# Internal -- scan past <!ATTLIST declarations
def _parse_doctype_attlist(self, i, declstartpos):
rawdata = self.rawdata
name, j = self._scan_name(i, declstartpos)
c = rawdata[j:j+1]
if c == "":
return -1
if c == ">":
return j + 1
while 1:
# scan a series of attribute descriptions; simplified:
# name type [value] [#constraint]
name, j = self._scan_name(j, declstartpos)
if j < 0:
return j
c = rawdata[j:j+1]
if c == "":
return -1
if c == "(":
# an enumerated type; look for ')'
if ")" in rawdata[j:]:
j = rawdata.find(")", j) + 1
else:
return -1
while rawdata[j:j+1].isspace():
j = j + 1
if not rawdata[j:]:
# end of buffer, incomplete
return -1
else:
name, j = self._scan_name(j, declstartpos)
c = rawdata[j:j+1]
if not c:
return -1
if c in "'\"":
m = _declstringlit_match(rawdata, j)
if m:
j = m.end()
else:
return -1
c = rawdata[j:j+1]
if not c:
return -1
if c == "#":
if rawdata[j:] == "#":
# end of buffer
return -1
name, j = self._scan_name(j + 1, declstartpos)
if j < 0:
return j
c = rawdata[j:j+1]
if not c:
return -1
if c == '>':
# all done
return j + 1
# Internal -- scan past <!NOTATION declarations
def _parse_doctype_notation(self, i, declstartpos):
name, j = self._scan_name(i, declstartpos)
if j < 0:
return j
rawdata = self.rawdata
while 1:
c = rawdata[j:j+1]
if not c:
# end of buffer; incomplete
return -1
if c == '>':
return j + 1
if c in "'\"":
m = _declstringlit_match(rawdata, j)
if not m:
return -1
j = m.end()
else:
name, j = self._scan_name(j, declstartpos)
if j < 0:
return j
# Internal -- scan past <!ENTITY declarations
def _parse_doctype_entity(self, i, declstartpos):
rawdata = self.rawdata
if rawdata[i:i+1] == "%":
j = i + 1
while 1:
c = rawdata[j:j+1]
if not c:
return -1
if c.isspace():
j = j + 1
else:
break
else:
j = i
name, j = self._scan_name(j, declstartpos)
if j < 0:
return j
while 1:
c = self.rawdata[j:j+1]
if not c:
return -1
if c in "'\"":
m = _declstringlit_match(rawdata, j)
if m:
j = m.end()
else:
return -1 # incomplete
elif c == ">":
return j + 1
else:
name, j = self._scan_name(j, declstartpos)
if j < 0:
return j
# Internal -- scan a name token and the new position and the token, or
# return -1 if we've reached the end of the buffer.
def _scan_name(self, i, declstartpos):
rawdata = self.rawdata
n = len(rawdata)
if i == n:
return None, -1
m = _declname_match(rawdata, i)
if m:
s = m.group()
name = s.strip()
if (i + len(s)) == n:
return None, -1 # end of buffer
return name.lower(), m.end()
else:
self.updatepos(declstartpos, i)
self.error("expected name token at %r"
% rawdata[declstartpos:declstartpos+20])
# To be overridden -- handlers for unknown objects
def unknown_decl(self, data):
pass
| gpl-2.0 |
graalvm/mx | select_jdk.py | 1 | 9345 | #!/usr/bin/env python
# ----------------------------------------------------------------------------------------------------
#
# Copyright (c) 2018, 2018, Oracle and/or its affiliates. All rights reserved.
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
#
# This code is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License version 2 only, as
# published by the Free Software Foundation.
#
# This code is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
# version 2 for more details (a copy is included in the LICENSE file that
# accompanied this code).
#
# You should have received a copy of the GNU General Public License version
# 2 along with this work; if not, write to the Free Software Foundation,
# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
# or visit www.oracle.com if you need additional information or have any
# questions.
#
# ----------------------------------------------------------------------------------------------------
from __future__ import print_function
import os, tempfile
from argparse import ArgumentParser, REMAINDER
from os.path import exists, expanduser, join, isdir, isfile, realpath, dirname, abspath
# Temporary imports and (re)definitions while porting mx from Python 2 to Python 3
import sys
if sys.version_info[0] < 3:
def input(prompt=None): # pylint: disable=redefined-builtin
return raw_input(prompt) # pylint: disable=undefined-variable
from StringIO import StringIO
else:
from io import StringIO
def is_valid_jdk(jdk):
"""
Determines if `jdk` looks like a valid JDK directory.
:return: True if there's a ``java`` executable in ``jdk/bin``
"""
java_exe = join(jdk, 'bin', 'java')
if not exists(java_exe):
java_exe += '.exe'
return isfile(java_exe) and os.access(java_exe, os.X_OK)
def find_system_jdks():
"""
Returns a set of valid JDK directories by searching standard locations.
"""
bases = [
'/Library/Java/JavaVirtualMachines',
'/usr/lib/jvm',
'/usr/java',
'/usr/jdk/instances',
r'C:\Program Files\Java',
join(expanduser('~'), '.mx', 'jdks') # default --to value for `mx fetch-jdk` command
]
jdks = set()
for base in bases:
if isdir(base):
for n in os.listdir(base):
jdk = join(base, n)
mac_jdk = join(jdk, 'Contents', 'Home')
if isdir(mac_jdk):
jdk = mac_jdk
if is_valid_jdk(jdk):
jdks.add(realpath(jdk))
return jdks
def get_suite_env_file(suite_dir='.'):
for n in os.listdir(suite_dir):
if n.startswith('mx.'):
suite_py = join(suite_dir, n, 'suite.py')
if exists(suite_py):
return abspath(join(suite_dir, n, 'env'))
return None
def get_setvar_format(shell):
if shell == 'csh':
return 'setenv %s %s'
if shell == 'fish':
return 'set -x %s %s'
return 'export %s=%s'
def get_PATH_sep(shell):
if shell == 'fish':
return ' '
return os.pathsep
def get_shell_commands(args, jdk, extra_jdks):
setvar_format = get_setvar_format(args.shell)
shell_commands = StringIO()
print(setvar_format % ('JAVA_HOME', jdk), file=shell_commands)
if extra_jdks:
print(setvar_format % ('EXTRA_JAVA_HOMES', os.pathsep.join(extra_jdks)), file=shell_commands)
path = os.environ.get('PATH').split(os.pathsep)
if path:
jdk_bin = join(jdk, 'bin')
old_java_home = os.environ.get('JAVA_HOME')
replace = join(old_java_home, 'bin') if old_java_home else None
if replace in path:
path = [e if e != replace else jdk_bin for e in path]
else:
path = [jdk_bin] + path
print(setvar_format % ('PATH', get_PATH_sep(args.shell).join(path)), file=shell_commands)
return shell_commands.getvalue().strip()
def apply_selection(args, jdk, extra_jdks):
print('JAVA_HOME=' + jdk)
if extra_jdks:
print('EXTRA_JAVA_HOMES=' + os.pathsep.join(extra_jdks))
if args.shell_file:
with open(args.shell_file, 'w') as fp:
print(get_shell_commands(args, jdk, extra_jdks), file=fp)
else:
env = get_suite_env_file(args.suite_path)
if env:
with open(env, 'a') as fp:
print('JAVA_HOME=' + jdk, file=fp)
if extra_jdks:
print('EXTRA_JAVA_HOMES=' + os.pathsep.join(extra_jdks), file=fp)
print('Updated', env)
else:
print()
print('To apply the above environment variable settings, eval the following in your shell:')
print()
print(get_shell_commands(args, jdk, extra_jdks))
if __name__ == '__main__':
parser = ArgumentParser(prog='select_jdk', usage='%(prog)s [options] [<primary jdk> [<secondary jdk>...]]' + """
Selects values for the JAVA_HOME, EXTRA_JAVA_HOMES and PATH environment variables based on
the explicitly supplied JDKs or on system JDKs plus previously selected JDKs (cached in ~/.mx/jdk_cache).
If the -s/--shell-source option is given, settings appropriate for the current shell are written to
the given file such that it can be eval'ed in the shell to apply the settings. For example, in ~/.config/fish/config.fish:
if test -x (dirname (which mx))/select_jdk.py
function select_jdk
set tmp_file (mktemp)
eval (dirname (which mx))/select_jdk.py -s $tmp_file $argv
source $tmp_file
rm $tmp_file
end
end
or in ~/.bashrc:
if [ -x $(dirname $(which mx))/select_jdk.py ]; then
function select_jdk {
TMP_FILE=select_jdk.$$
eval $(dirname $(which mx))/select_jdk.py -s $TMP_FILE "$@"
source $TMP_FILE
rm $TMP_FILE
}
fi
In the absence of -s, if the current directory looks like a suite, the mx.<suite>/env file is
created/updated with the selected values for JAVA_HOME and EXTRA_JAVA_HOMES.
Otherwise, the settings are printed such that they can applied manually.
""")
shell_or_env = parser.add_mutually_exclusive_group()
shell_or_env.add_argument('-s', '--shell-file', action='store', help='write shell commands for setting env vars to <path>', metavar='<path>')
shell_or_env.add_argument('-p', '--suite-path', help='directory of suite whose env file is to be updated', metavar='<path>')
parser.add_argument('--shell', action='store', help='shell syntax to use for commands', metavar='<format>', choices=['sh', 'fish', 'csh'])
parser.add_argument('jdks', nargs=REMAINDER, metavar='<primary jdk> [<secondary jdk>...]')
args = parser.parse_args()
if args.shell is None:
shell = os.environ.get('SHELL')
if shell.endswith('fish'):
args.shell = 'fish'
elif shell.endswith('csh'):
args.shell = 'csh'
else:
args.shell = 'sh'
jdk_cache_path = join(expanduser('~'), '.mx', 'jdk_cache')
if len(args.jdks) != 0:
invalid_jdks = [a for a in args.jdks if not is_valid_jdk(a)]
if invalid_jdks:
raise SystemExit('Following JDKs appear to be invalid (java executable not found):\n' + '\n'.join(invalid_jdks))
if not exists(dirname(jdk_cache_path)):
os.makedirs(dirname(jdk_cache_path))
with open(jdk_cache_path, 'a') as fp:
for jdk in args.jdks:
print(abspath(jdk), file=fp)
apply_selection(args, abspath(args.jdks[0]), [abspath(a) for a in args.jdks[1:]])
else:
jdks = find_system_jdks()
if exists(jdk_cache_path):
with open(jdk_cache_path) as fp:
jdks.update((line.strip() for line in fp.readlines() if is_valid_jdk(line.strip())))
sorted_jdks = sorted(jdks)
print("Current JDK Settings:")
for name in ['JAVA_HOME', 'EXTRA_JAVA_HOMES']:
jdk = os.environ.get(name, None)
if jdk:
if jdk in sorted_jdks:
jdk = '{} [{}]'.format(jdk, sorted_jdks.index(jdk))
print('{}={}'.format(name, jdk))
choices = list(enumerate(sorted_jdks))
if choices:
_, tmp_cache_path = tempfile.mkstemp(dir=dirname(jdk_cache_path))
with open(tmp_cache_path, 'w') as fp:
for index, jdk in choices:
print('[{}] {}'.format(index, jdk))
print(jdk, file=fp)
os.rename(tmp_cache_path, jdk_cache_path)
choices = {str(index):jdk for index, jdk in choices}
jdks = [choices[n] for n in input('Select JDK(s) (separate multiple choices by whitespace)> ').split() if n in choices]
if jdks:
apply_selection(args, jdks[0], jdks[1:])
| gpl-2.0 |
pombreda/swarming | appengine/components/components/third_party/gviz/gviz_api.py | 9 | 45686 | #!/usr/bin/python
#
# Copyright (C) 2009 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Converts Python data into data for Google Visualization API clients.
This library can be used to create a google.visualization.DataTable usable by
visualizations built on the Google Visualization API. Output formats are raw
JSON, JSON response, JavaScript, CSV, and HTML table.
See http://code.google.com/apis/visualization/ for documentation on the
Google Visualization API.
"""
__author__ = "Amit Weinstein, Misha Seltzer, Jacob Baskin"
import cgi
import cStringIO
import csv
import datetime
try:
import json
except ImportError:
import simplejson as json
import types
class DataTableException(Exception):
"""The general exception object thrown by DataTable."""
pass
class DataTableJSONEncoder(json.JSONEncoder):
"""JSON encoder that handles date/time/datetime objects correctly."""
def __init__(self):
json.JSONEncoder.__init__(self,
separators=(",", ":"),
ensure_ascii=False)
def default(self, o):
if isinstance(o, datetime.datetime):
if o.microsecond == 0:
# If the time doesn't have ms-resolution, leave it out to keep
# things smaller.
return "Date(%d,%d,%d,%d,%d,%d)" % (
o.year, o.month - 1, o.day, o.hour, o.minute, o.second)
else:
return "Date(%d,%d,%d,%d,%d,%d,%d)" % (
o.year, o.month - 1, o.day, o.hour, o.minute, o.second,
o.microsecond / 1000)
elif isinstance(o, datetime.date):
return "Date(%d,%d,%d)" % (o.year, o.month - 1, o.day)
elif isinstance(o, datetime.time):
return [o.hour, o.minute, o.second]
else:
return super(DataTableJSONEncoder, self).default(o)
class DataTable(object):
"""Wraps the data to convert to a Google Visualization API DataTable.
Create this object, populate it with data, then call one of the ToJS...
methods to return a string representation of the data in the format described.
You can clear all data from the object to reuse it, but you cannot clear
individual cells, rows, or columns. You also cannot modify the table schema
specified in the class constructor.
You can add new data one or more rows at a time. All data added to an
instantiated DataTable must conform to the schema passed in to __init__().
You can reorder the columns in the output table, and also specify row sorting
order by column. The default column order is according to the original
table_description parameter. Default row sort order is ascending, by column
1 values. For a dictionary, we sort the keys for order.
The data and the table_description are closely tied, as described here:
The table schema is defined in the class constructor's table_description
parameter. The user defines each column using a tuple of
(id[, type[, label[, custom_properties]]]). The default value for type is
string, label is the same as ID if not specified, and custom properties is
an empty dictionary if not specified.
table_description is a dictionary or list, containing one or more column
descriptor tuples, nested dictionaries, and lists. Each dictionary key, list
element, or dictionary element must eventually be defined as
a column description tuple. Here's an example of a dictionary where the key
is a tuple, and the value is a list of two tuples:
{('a', 'number'): [('b', 'number'), ('c', 'string')]}
This flexibility in data entry enables you to build and manipulate your data
in a Python structure that makes sense for your program.
Add data to the table using the same nested design as the table's
table_description, replacing column descriptor tuples with cell data, and
each row is an element in the top level collection. This will be a bit
clearer after you look at the following examples showing the
table_description, matching data, and the resulting table:
Columns as list of tuples [col1, col2, col3]
table_description: [('a', 'number'), ('b', 'string')]
AppendData( [[1, 'z'], [2, 'w'], [4, 'o'], [5, 'k']] )
Table:
a b <--- these are column ids/labels
1 z
2 w
4 o
5 k
Dictionary of columns, where key is a column, and value is a list of
columns {col1: [col2, col3]}
table_description: {('a', 'number'): [('b', 'number'), ('c', 'string')]}
AppendData( data: {1: [2, 'z'], 3: [4, 'w']}
Table:
a b c
1 2 z
3 4 w
Dictionary where key is a column, and the value is itself a dictionary of
columns {col1: {col2, col3}}
table_description: {('a', 'number'): {'b': 'number', 'c': 'string'}}
AppendData( data: {1: {'b': 2, 'c': 'z'}, 3: {'b': 4, 'c': 'w'}}
Table:
a b c
1 2 z
3 4 w
"""
def __init__(self, table_description, data=None, custom_properties=None):
"""Initialize the data table from a table schema and (optionally) data.
See the class documentation for more information on table schema and data
values.
Args:
table_description: A table schema, following one of the formats described
in TableDescriptionParser(). Schemas describe the
column names, data types, and labels. See
TableDescriptionParser() for acceptable formats.
data: Optional. If given, fills the table with the given data. The data
structure must be consistent with schema in table_description. See
the class documentation for more information on acceptable data. You
can add data later by calling AppendData().
custom_properties: Optional. A dictionary from string to string that
goes into the table's custom properties. This can be
later changed by changing self.custom_properties.
Raises:
DataTableException: Raised if the data and the description did not match,
or did not use the supported formats.
"""
self.__columns = self.TableDescriptionParser(table_description)
self.__data = []
self.custom_properties = {}
if custom_properties is not None:
self.custom_properties = custom_properties
if data:
self.LoadData(data)
@staticmethod
def CoerceValue(value, value_type):
"""Coerces a single value into the type expected for its column.
Internal helper method.
Args:
value: The value which should be converted
value_type: One of "string", "number", "boolean", "date", "datetime" or
"timeofday".
Returns:
An item of the Python type appropriate to the given value_type. Strings
are also converted to Unicode using UTF-8 encoding if necessary.
If a tuple is given, it should be in one of the following forms:
- (value, formatted value)
- (value, formatted value, custom properties)
where the formatted value is a string, and custom properties is a
dictionary of the custom properties for this cell.
To specify custom properties without specifying formatted value, one can
pass None as the formatted value.
One can also have a null-valued cell with formatted value and/or custom
properties by specifying None for the value.
This method ignores the custom properties except for checking that it is a
dictionary. The custom properties are handled in the ToJSon and ToJSCode
methods.
The real type of the given value is not strictly checked. For example,
any type can be used for string - as we simply take its str( ) and for
boolean value we just check "if value".
Examples:
CoerceValue(None, "string") returns None
CoerceValue((5, "5$"), "number") returns (5, "5$")
CoerceValue(100, "string") returns "100"
CoerceValue(0, "boolean") returns False
Raises:
DataTableException: The value and type did not match in a not-recoverable
way, for example given value 'abc' for type 'number'.
"""
if isinstance(value, tuple):
# In case of a tuple, we run the same function on the value itself and
# add the formatted value.
if (len(value) not in [2, 3] or
(len(value) == 3 and not isinstance(value[2], dict))):
raise DataTableException("Wrong format for value and formatting - %s." %
str(value))
if not isinstance(value[1], types.StringTypes + (types.NoneType,)):
raise DataTableException("Formatted value is not string, given %s." %
type(value[1]))
js_value = DataTable.CoerceValue(value[0], value_type)
return (js_value,) + value[1:]
t_value = type(value)
if value is None:
return value
if value_type == "boolean":
return bool(value)
elif value_type == "number":
if isinstance(value, (int, long, float)):
return value
raise DataTableException("Wrong type %s when expected number" % t_value)
elif value_type == "string":
if isinstance(value, unicode):
return value
else:
return str(value).decode("utf-8")
elif value_type == "date":
if isinstance(value, datetime.datetime):
return datetime.date(value.year, value.month, value.day)
elif isinstance(value, datetime.date):
return value
else:
raise DataTableException("Wrong type %s when expected date" % t_value)
elif value_type == "timeofday":
if isinstance(value, datetime.datetime):
return datetime.time(value.hour, value.minute, value.second)
elif isinstance(value, datetime.time):
return value
else:
raise DataTableException("Wrong type %s when expected time" % t_value)
elif value_type == "datetime":
if isinstance(value, datetime.datetime):
return value
else:
raise DataTableException("Wrong type %s when expected datetime" %
t_value)
# If we got here, it means the given value_type was not one of the
# supported types.
raise DataTableException("Unsupported type %s" % value_type)
@staticmethod
def EscapeForJSCode(encoder, value):
if value is None:
return "null"
elif isinstance(value, datetime.datetime):
if value.microsecond == 0:
# If it's not ms-resolution, leave that out to save space.
return "new Date(%d,%d,%d,%d,%d,%d)" % (value.year,
value.month - 1, # To match JS
value.day,
value.hour,
value.minute,
value.second)
else:
return "new Date(%d,%d,%d,%d,%d,%d,%d)" % (value.year,
value.month - 1, # match JS
value.day,
value.hour,
value.minute,
value.second,
value.microsecond / 1000)
elif isinstance(value, datetime.date):
return "new Date(%d,%d,%d)" % (value.year, value.month - 1, value.day)
else:
return encoder.encode(value)
@staticmethod
def ToString(value):
if value is None:
return "(empty)"
elif isinstance(value, (datetime.datetime,
datetime.date,
datetime.time)):
return str(value)
elif isinstance(value, unicode):
return value
elif isinstance(value, bool):
return str(value).lower()
else:
return str(value).decode("utf-8")
@staticmethod
def ColumnTypeParser(description):
"""Parses a single column description. Internal helper method.
Args:
description: a column description in the possible formats:
'id'
('id',)
('id', 'type')
('id', 'type', 'label')
('id', 'type', 'label', {'custom_prop1': 'custom_val1'})
Returns:
Dictionary with the following keys: id, label, type, and
custom_properties where:
- If label not given, it equals the id.
- If type not given, string is used by default.
- If custom properties are not given, an empty dictionary is used by
default.
Raises:
DataTableException: The column description did not match the RE, or
unsupported type was passed.
"""
if not description:
raise DataTableException("Description error: empty description given")
if not isinstance(description, (types.StringTypes, tuple)):
raise DataTableException("Description error: expected either string or "
"tuple, got %s." % type(description))
if isinstance(description, types.StringTypes):
description = (description,)
# According to the tuple's length, we fill the keys
# We verify everything is of type string
for elem in description[:3]:
if not isinstance(elem, types.StringTypes):
raise DataTableException("Description error: expected tuple of "
"strings, current element of type %s." %
type(elem))
desc_dict = {"id": description[0],
"label": description[0],
"type": "string",
"custom_properties": {}}
if len(description) > 1:
desc_dict["type"] = description[1].lower()
if len(description) > 2:
desc_dict["label"] = description[2]
if len(description) > 3:
if not isinstance(description[3], dict):
raise DataTableException("Description error: expected custom "
"properties of type dict, current element "
"of type %s." % type(description[3]))
desc_dict["custom_properties"] = description[3]
if len(description) > 4:
raise DataTableException("Description error: tuple of length > 4")
if desc_dict["type"] not in ["string", "number", "boolean",
"date", "datetime", "timeofday"]:
raise DataTableException(
"Description error: unsupported type '%s'" % desc_dict["type"])
return desc_dict
@staticmethod
def TableDescriptionParser(table_description, depth=0):
"""Parses the table_description object for internal use.
Parses the user-submitted table description into an internal format used
by the Python DataTable class. Returns the flat list of parsed columns.
Args:
table_description: A description of the table which should comply
with one of the formats described below.
depth: Optional. The depth of the first level in the current description.
Used by recursive calls to this function.
Returns:
List of columns, where each column represented by a dictionary with the
keys: id, label, type, depth, container which means the following:
- id: the id of the column
- name: The name of the column
- type: The datatype of the elements in this column. Allowed types are
described in ColumnTypeParser().
- depth: The depth of this column in the table description
- container: 'dict', 'iter' or 'scalar' for parsing the format easily.
- custom_properties: The custom properties for this column.
The returned description is flattened regardless of how it was given.
Raises:
DataTableException: Error in a column description or in the description
structure.
Examples:
A column description can be of the following forms:
'id'
('id',)
('id', 'type')
('id', 'type', 'label')
('id', 'type', 'label', {'custom_prop1': 'custom_val1'})
or as a dictionary:
'id': 'type'
'id': ('type',)
'id': ('type', 'label')
'id': ('type', 'label', {'custom_prop1': 'custom_val1'})
If the type is not specified, we treat it as string.
If no specific label is given, the label is simply the id.
If no custom properties are given, we use an empty dictionary.
input: [('a', 'date'), ('b', 'timeofday', 'b', {'foo': 'bar'})]
output: [{'id': 'a', 'label': 'a', 'type': 'date',
'depth': 0, 'container': 'iter', 'custom_properties': {}},
{'id': 'b', 'label': 'b', 'type': 'timeofday',
'depth': 0, 'container': 'iter',
'custom_properties': {'foo': 'bar'}}]
input: {'a': [('b', 'number'), ('c', 'string', 'column c')]}
output: [{'id': 'a', 'label': 'a', 'type': 'string',
'depth': 0, 'container': 'dict', 'custom_properties': {}},
{'id': 'b', 'label': 'b', 'type': 'number',
'depth': 1, 'container': 'iter', 'custom_properties': {}},
{'id': 'c', 'label': 'column c', 'type': 'string',
'depth': 1, 'container': 'iter', 'custom_properties': {}}]
input: {('a', 'number', 'column a'): { 'b': 'number', 'c': 'string'}}
output: [{'id': 'a', 'label': 'column a', 'type': 'number',
'depth': 0, 'container': 'dict', 'custom_properties': {}},
{'id': 'b', 'label': 'b', 'type': 'number',
'depth': 1, 'container': 'dict', 'custom_properties': {}},
{'id': 'c', 'label': 'c', 'type': 'string',
'depth': 1, 'container': 'dict', 'custom_properties': {}}]
input: { ('w', 'string', 'word'): ('c', 'number', 'count') }
output: [{'id': 'w', 'label': 'word', 'type': 'string',
'depth': 0, 'container': 'dict', 'custom_properties': {}},
{'id': 'c', 'label': 'count', 'type': 'number',
'depth': 1, 'container': 'scalar', 'custom_properties': {}}]
input: {'a': ('number', 'column a'), 'b': ('string', 'column b')}
output: [{'id': 'a', 'label': 'column a', 'type': 'number', 'depth': 0,
'container': 'dict', 'custom_properties': {}},
{'id': 'b', 'label': 'column b', 'type': 'string', 'depth': 0,
'container': 'dict', 'custom_properties': {}}
NOTE: there might be ambiguity in the case of a dictionary representation
of a single column. For example, the following description can be parsed
in 2 different ways: {'a': ('b', 'c')} can be thought of a single column
with the id 'a', of type 'b' and the label 'c', or as 2 columns: one named
'a', and the other named 'b' of type 'c'. We choose the first option by
default, and in case the second option is the right one, it is possible to
make the key into a tuple (i.e. {('a',): ('b', 'c')}) or add more info
into the tuple, thus making it look like this: {'a': ('b', 'c', 'b', {})}
-- second 'b' is the label, and {} is the custom properties field.
"""
# For the recursion step, we check for a scalar object (string or tuple)
if isinstance(table_description, (types.StringTypes, tuple)):
parsed_col = DataTable.ColumnTypeParser(table_description)
parsed_col["depth"] = depth
parsed_col["container"] = "scalar"
return [parsed_col]
# Since it is not scalar, table_description must be iterable.
if not hasattr(table_description, "__iter__"):
raise DataTableException("Expected an iterable object, got %s" %
type(table_description))
if not isinstance(table_description, dict):
# We expects a non-dictionary iterable item.
columns = []
for desc in table_description:
parsed_col = DataTable.ColumnTypeParser(desc)
parsed_col["depth"] = depth
parsed_col["container"] = "iter"
columns.append(parsed_col)
if not columns:
raise DataTableException("Description iterable objects should not"
" be empty.")
return columns
# The other case is a dictionary
if not table_description:
raise DataTableException("Empty dictionaries are not allowed inside"
" description")
# To differentiate between the two cases of more levels below or this is
# the most inner dictionary, we consider the number of keys (more then one
# key is indication for most inner dictionary) and the type of the key and
# value in case of only 1 key (if the type of key is string and the type of
# the value is a tuple of 0-3 items, we assume this is the most inner
# dictionary).
# NOTE: this way of differentiating might create ambiguity. See docs.
if (len(table_description) != 1 or
(isinstance(table_description.keys()[0], types.StringTypes) and
isinstance(table_description.values()[0], tuple) and
len(table_description.values()[0]) < 4)):
# This is the most inner dictionary. Parsing types.
columns = []
# We sort the items, equivalent to sort the keys since they are unique
for key, value in sorted(table_description.items()):
# We parse the column type as (key, type) or (key, type, label) using
# ColumnTypeParser.
if isinstance(value, tuple):
parsed_col = DataTable.ColumnTypeParser((key,) + value)
else:
parsed_col = DataTable.ColumnTypeParser((key, value))
parsed_col["depth"] = depth
parsed_col["container"] = "dict"
columns.append(parsed_col)
return columns
# This is an outer dictionary, must have at most one key.
parsed_col = DataTable.ColumnTypeParser(table_description.keys()[0])
parsed_col["depth"] = depth
parsed_col["container"] = "dict"
return ([parsed_col] +
DataTable.TableDescriptionParser(table_description.values()[0],
depth=depth + 1))
@property
def columns(self):
"""Returns the parsed table description."""
return self.__columns
def NumberOfRows(self):
"""Returns the number of rows in the current data stored in the table."""
return len(self.__data)
def SetRowsCustomProperties(self, rows, custom_properties):
"""Sets the custom properties for given row(s).
Can accept a single row or an iterable of rows.
Sets the given custom properties for all specified rows.
Args:
rows: The row, or rows, to set the custom properties for.
custom_properties: A string to string dictionary of custom properties to
set for all rows.
"""
if not hasattr(rows, "__iter__"):
rows = [rows]
for row in rows:
self.__data[row] = (self.__data[row][0], custom_properties)
def LoadData(self, data, custom_properties=None):
"""Loads new rows to the data table, clearing existing rows.
May also set the custom_properties for the added rows. The given custom
properties dictionary specifies the dictionary that will be used for *all*
given rows.
Args:
data: The rows that the table will contain.
custom_properties: A dictionary of string to string to set as the custom
properties for all rows.
"""
self.__data = []
self.AppendData(data, custom_properties)
def AppendData(self, data, custom_properties=None):
"""Appends new data to the table.
Data is appended in rows. Data must comply with
the table schema passed in to __init__(). See CoerceValue() for a list
of acceptable data types. See the class documentation for more information
and examples of schema and data values.
Args:
data: The row to add to the table. The data must conform to the table
description format.
custom_properties: A dictionary of string to string, representing the
custom properties to add to all the rows.
Raises:
DataTableException: The data structure does not match the description.
"""
# If the maximal depth is 0, we simply iterate over the data table
# lines and insert them using _InnerAppendData. Otherwise, we simply
# let the _InnerAppendData handle all the levels.
if not self.__columns[-1]["depth"]:
for row in data:
self._InnerAppendData(({}, custom_properties), row, 0)
else:
self._InnerAppendData(({}, custom_properties), data, 0)
def _InnerAppendData(self, prev_col_values, data, col_index):
"""Inner function to assist LoadData."""
# We first check that col_index has not exceeded the columns size
if col_index >= len(self.__columns):
raise DataTableException("The data does not match description, too deep")
# Dealing with the scalar case, the data is the last value.
if self.__columns[col_index]["container"] == "scalar":
prev_col_values[0][self.__columns[col_index]["id"]] = data
self.__data.append(prev_col_values)
return
if self.__columns[col_index]["container"] == "iter":
if not hasattr(data, "__iter__") or isinstance(data, dict):
raise DataTableException("Expected iterable object, got %s" %
type(data))
# We only need to insert the rest of the columns
# If there are less items than expected, we only add what there is.
for value in data:
if col_index >= len(self.__columns):
raise DataTableException("Too many elements given in data")
prev_col_values[0][self.__columns[col_index]["id"]] = value
col_index += 1
self.__data.append(prev_col_values)
return
# We know the current level is a dictionary, we verify the type.
if not isinstance(data, dict):
raise DataTableException("Expected dictionary at current level, got %s" %
type(data))
# We check if this is the last level
if self.__columns[col_index]["depth"] == self.__columns[-1]["depth"]:
# We need to add the keys in the dictionary as they are
for col in self.__columns[col_index:]:
if col["id"] in data:
prev_col_values[0][col["id"]] = data[col["id"]]
self.__data.append(prev_col_values)
return
# We have a dictionary in an inner depth level.
if not data.keys():
# In case this is an empty dictionary, we add a record with the columns
# filled only until this point.
self.__data.append(prev_col_values)
else:
for key in sorted(data):
col_values = dict(prev_col_values[0])
col_values[self.__columns[col_index]["id"]] = key
self._InnerAppendData((col_values, prev_col_values[1]),
data[key], col_index + 1)
def _PreparedData(self, order_by=()):
"""Prepares the data for enumeration - sorting it by order_by.
Args:
order_by: Optional. Specifies the name of the column(s) to sort by, and
(optionally) which direction to sort in. Default sort direction
is asc. Following formats are accepted:
"string_col_name" -- For a single key in default (asc) order.
("string_col_name", "asc|desc") -- For a single key.
[("col_1","asc|desc"), ("col_2","asc|desc")] -- For more than
one column, an array of tuples of (col_name, "asc|desc").
Returns:
The data sorted by the keys given.
Raises:
DataTableException: Sort direction not in 'asc' or 'desc'
"""
if not order_by:
return self.__data
proper_sort_keys = []
if isinstance(order_by, types.StringTypes) or (
isinstance(order_by, tuple) and len(order_by) == 2 and
order_by[1].lower() in ["asc", "desc"]):
order_by = (order_by,)
for key in order_by:
if isinstance(key, types.StringTypes):
proper_sort_keys.append((key, 1))
elif (isinstance(key, (list, tuple)) and len(key) == 2 and
key[1].lower() in ("asc", "desc")):
proper_sort_keys.append((key[0], key[1].lower() == "asc" and 1 or -1))
else:
raise DataTableException("Expected tuple with second value: "
"'asc' or 'desc'")
def SortCmpFunc(row1, row2):
"""cmp function for sorted. Compares by keys and 'asc'/'desc' keywords."""
for key, asc_mult in proper_sort_keys:
cmp_result = asc_mult * cmp(row1[0].get(key), row2[0].get(key))
if cmp_result:
return cmp_result
return 0
return sorted(self.__data, cmp=SortCmpFunc)
def ToJSCode(self, name, columns_order=None, order_by=()):
"""Writes the data table as a JS code string.
This method writes a string of JS code that can be run to
generate a DataTable with the specified data. Typically used for debugging
only.
Args:
name: The name of the table. The name would be used as the DataTable's
variable name in the created JS code.
columns_order: Optional. Specifies the order of columns in the
output table. Specify a list of all column IDs in the order
in which you want the table created.
Note that you must list all column IDs in this parameter,
if you use it.
order_by: Optional. Specifies the name of the column(s) to sort by.
Passed as is to _PreparedData.
Returns:
A string of JS code that, when run, generates a DataTable with the given
name and the data stored in the DataTable object.
Example result:
"var tab1 = new google.visualization.DataTable();
tab1.addColumn("string", "a", "a");
tab1.addColumn("number", "b", "b");
tab1.addColumn("boolean", "c", "c");
tab1.addRows(10);
tab1.setCell(0, 0, "a");
tab1.setCell(0, 1, 1, null, {"foo": "bar"});
tab1.setCell(0, 2, true);
...
tab1.setCell(9, 0, "c");
tab1.setCell(9, 1, 3, "3$");
tab1.setCell(9, 2, false);"
Raises:
DataTableException: The data does not match the type.
"""
encoder = DataTableJSONEncoder()
if columns_order is None:
columns_order = [col["id"] for col in self.__columns]
col_dict = dict([(col["id"], col) for col in self.__columns])
# We first create the table with the given name
jscode = "var %s = new google.visualization.DataTable();\n" % name
if self.custom_properties:
jscode += "%s.setTableProperties(%s);\n" % (
name, encoder.encode(self.custom_properties))
# We add the columns to the table
for i, col in enumerate(columns_order):
jscode += "%s.addColumn(%s, %s, %s);\n" % (
name,
encoder.encode(col_dict[col]["type"]),
encoder.encode(col_dict[col]["label"]),
encoder.encode(col_dict[col]["id"]))
if col_dict[col]["custom_properties"]:
jscode += "%s.setColumnProperties(%d, %s);\n" % (
name, i, encoder.encode(col_dict[col]["custom_properties"]))
jscode += "%s.addRows(%d);\n" % (name, len(self.__data))
# We now go over the data and add each row
for (i, (row, cp)) in enumerate(self._PreparedData(order_by)):
# We add all the elements of this row by their order
for (j, col) in enumerate(columns_order):
if col not in row or row[col] is None:
continue
value = self.CoerceValue(row[col], col_dict[col]["type"])
if isinstance(value, tuple):
cell_cp = ""
if len(value) == 3:
cell_cp = ", %s" % encoder.encode(row[col][2])
# We have a formatted value or custom property as well
jscode += ("%s.setCell(%d, %d, %s, %s%s);\n" %
(name, i, j,
self.EscapeForJSCode(encoder, value[0]),
self.EscapeForJSCode(encoder, value[1]), cell_cp))
else:
jscode += "%s.setCell(%d, %d, %s);\n" % (
name, i, j, self.EscapeForJSCode(encoder, value))
if cp:
jscode += "%s.setRowProperties(%d, %s);\n" % (
name, i, encoder.encode(cp))
return jscode
def ToHtml(self, columns_order=None, order_by=()):
"""Writes the data table as an HTML table code string.
Args:
columns_order: Optional. Specifies the order of columns in the
output table. Specify a list of all column IDs in the order
in which you want the table created.
Note that you must list all column IDs in this parameter,
if you use it.
order_by: Optional. Specifies the name of the column(s) to sort by.
Passed as is to _PreparedData.
Returns:
An HTML table code string.
Example result (the result is without the newlines):
<html><body><table border="1">
<thead><tr><th>a</th><th>b</th><th>c</th></tr></thead>
<tbody>
<tr><td>1</td><td>"z"</td><td>2</td></tr>
<tr><td>"3$"</td><td>"w"</td><td></td></tr>
</tbody>
</table></body></html>
Raises:
DataTableException: The data does not match the type.
"""
table_template = "<html><body><table border=\"1\">%s</table></body></html>"
columns_template = "<thead><tr>%s</tr></thead>"
rows_template = "<tbody>%s</tbody>"
row_template = "<tr>%s</tr>"
header_cell_template = "<th>%s</th>"
cell_template = "<td>%s</td>"
if columns_order is None:
columns_order = [col["id"] for col in self.__columns]
col_dict = dict([(col["id"], col) for col in self.__columns])
columns_list = []
for col in columns_order:
columns_list.append(header_cell_template %
cgi.escape(col_dict[col]["label"]))
columns_html = columns_template % "".join(columns_list)
rows_list = []
# We now go over the data and add each row
for row, unused_cp in self._PreparedData(order_by):
cells_list = []
# We add all the elements of this row by their order
for col in columns_order:
# For empty string we want empty quotes ("").
value = ""
if col in row and row[col] is not None:
value = self.CoerceValue(row[col], col_dict[col]["type"])
if isinstance(value, tuple):
# We have a formatted value and we're going to use it
cells_list.append(cell_template % cgi.escape(self.ToString(value[1])))
else:
cells_list.append(cell_template % cgi.escape(self.ToString(value)))
rows_list.append(row_template % "".join(cells_list))
rows_html = rows_template % "".join(rows_list)
return table_template % (columns_html + rows_html)
def ToCsv(self, columns_order=None, order_by=(), separator=","):
"""Writes the data table as a CSV string.
Output is encoded in UTF-8 because the Python "csv" module can't handle
Unicode properly according to its documentation.
Args:
columns_order: Optional. Specifies the order of columns in the
output table. Specify a list of all column IDs in the order
in which you want the table created.
Note that you must list all column IDs in this parameter,
if you use it.
order_by: Optional. Specifies the name of the column(s) to sort by.
Passed as is to _PreparedData.
separator: Optional. The separator to use between the values.
Returns:
A CSV string representing the table.
Example result:
'a','b','c'
1,'z',2
3,'w',''
Raises:
DataTableException: The data does not match the type.
"""
csv_buffer = cStringIO.StringIO()
writer = csv.writer(csv_buffer, delimiter=separator)
if columns_order is None:
columns_order = [col["id"] for col in self.__columns]
col_dict = dict([(col["id"], col) for col in self.__columns])
writer.writerow([col_dict[col]["label"].encode("utf-8")
for col in columns_order])
# We now go over the data and add each row
for row, unused_cp in self._PreparedData(order_by):
cells_list = []
# We add all the elements of this row by their order
for col in columns_order:
value = ""
if col in row and row[col] is not None:
value = self.CoerceValue(row[col], col_dict[col]["type"])
if isinstance(value, tuple):
# We have a formatted value. Using it only for date/time types.
if col_dict[col]["type"] in ["date", "datetime", "timeofday"]:
cells_list.append(self.ToString(value[1]).encode("utf-8"))
else:
cells_list.append(self.ToString(value[0]).encode("utf-8"))
else:
cells_list.append(self.ToString(value).encode("utf-8"))
writer.writerow(cells_list)
return csv_buffer.getvalue()
def ToTsvExcel(self, columns_order=None, order_by=()):
"""Returns a file in tab-separated-format readable by MS Excel.
Returns a file in UTF-16 little endian encoding, with tabs separating the
values.
Args:
columns_order: Delegated to ToCsv.
order_by: Delegated to ToCsv.
Returns:
A tab-separated little endian UTF16 file representing the table.
"""
return (self.ToCsv(columns_order, order_by, separator="\t")
.decode("utf-8").encode("UTF-16LE"))
def _ToJSonObj(self, columns_order=None, order_by=()):
"""Returns an object suitable to be converted to JSON.
Args:
columns_order: Optional. A list of all column IDs in the order in which
you want them created in the output table. If specified,
all column IDs must be present.
order_by: Optional. Specifies the name of the column(s) to sort by.
Passed as is to _PreparedData().
Returns:
A dictionary object for use by ToJSon or ToJSonResponse.
"""
if columns_order is None:
columns_order = [col["id"] for col in self.__columns]
col_dict = dict([(col["id"], col) for col in self.__columns])
# Creating the column JSON objects
col_objs = []
for col_id in columns_order:
col_obj = {"id": col_dict[col_id]["id"],
"label": col_dict[col_id]["label"],
"type": col_dict[col_id]["type"]}
if col_dict[col_id]["custom_properties"]:
col_obj["p"] = col_dict[col_id]["custom_properties"]
col_objs.append(col_obj)
# Creating the rows jsons
row_objs = []
for row, cp in self._PreparedData(order_by):
cell_objs = []
for col in columns_order:
value = self.CoerceValue(row.get(col, None), col_dict[col]["type"])
if value is None:
cell_obj = None
elif isinstance(value, tuple):
cell_obj = {"v": value[0]}
if len(value) > 1 and value[1] is not None:
cell_obj["f"] = value[1]
if len(value) == 3:
cell_obj["p"] = value[2]
else:
cell_obj = {"v": value}
cell_objs.append(cell_obj)
row_obj = {"c": cell_objs}
if cp:
row_obj["p"] = cp
row_objs.append(row_obj)
json_obj = {"cols": col_objs, "rows": row_objs}
if self.custom_properties:
json_obj["p"] = self.custom_properties
return json_obj
def ToJSon(self, columns_order=None, order_by=()):
"""Returns a string that can be used in a JS DataTable constructor.
This method writes a JSON string that can be passed directly into a Google
Visualization API DataTable constructor. Use this output if you are
hosting the visualization HTML on your site, and want to code the data
table in Python. Pass this string into the
google.visualization.DataTable constructor, e.g,:
... on my page that hosts my visualization ...
google.setOnLoadCallback(drawTable);
function drawTable() {
var data = new google.visualization.DataTable(_my_JSon_string, 0.6);
myTable.draw(data);
}
Args:
columns_order: Optional. Specifies the order of columns in the
output table. Specify a list of all column IDs in the order
in which you want the table created.
Note that you must list all column IDs in this parameter,
if you use it.
order_by: Optional. Specifies the name of the column(s) to sort by.
Passed as is to _PreparedData().
Returns:
A JSon constructor string to generate a JS DataTable with the data
stored in the DataTable object.
Example result (the result is without the newlines):
{cols: [{id:"a",label:"a",type:"number"},
{id:"b",label:"b",type:"string"},
{id:"c",label:"c",type:"number"}],
rows: [{c:[{v:1},{v:"z"},{v:2}]}, c:{[{v:3,f:"3$"},{v:"w"},null]}],
p: {'foo': 'bar'}}
Raises:
DataTableException: The data does not match the type.
"""
encoder = DataTableJSONEncoder()
return encoder.encode(
self._ToJSonObj(columns_order, order_by)).encode("utf-8")
def ToJSonResponse(self, columns_order=None, order_by=(), req_id=0,
response_handler="google.visualization.Query.setResponse"):
"""Writes a table as a JSON response that can be returned as-is to a client.
This method writes a JSON response to return to a client in response to a
Google Visualization API query. This string can be processed by the calling
page, and is used to deliver a data table to a visualization hosted on
a different page.
Args:
columns_order: Optional. Passed straight to self.ToJSon().
order_by: Optional. Passed straight to self.ToJSon().
req_id: Optional. The response id, as retrieved by the request.
response_handler: Optional. The response handler, as retrieved by the
request.
Returns:
A JSON response string to be received by JS the visualization Query
object. This response would be translated into a DataTable on the
client side.
Example result (newlines added for readability):
google.visualization.Query.setResponse({
'version':'0.6', 'reqId':'0', 'status':'OK',
'table': {cols: [...], rows: [...]}});
Note: The URL returning this string can be used as a data source by Google
Visualization Gadgets or from JS code.
"""
response_obj = {
"version": "0.6",
"reqId": str(req_id),
"table": self._ToJSonObj(columns_order, order_by),
"status": "ok"
}
encoder = DataTableJSONEncoder()
return "%s(%s);" % (response_handler,
encoder.encode(response_obj).encode("utf-8"))
def ToResponse(self, columns_order=None, order_by=(), tqx=""):
"""Writes the right response according to the request string passed in tqx.
This method parses the tqx request string (format of which is defined in
the documentation for implementing a data source of Google Visualization),
and returns the right response according to the request.
It parses out the "out" parameter of tqx, calls the relevant response
(ToJSonResponse() for "json", ToCsv() for "csv", ToHtml() for "html",
ToTsvExcel() for "tsv-excel") and passes the response function the rest of
the relevant request keys.
Args:
columns_order: Optional. Passed as is to the relevant response function.
order_by: Optional. Passed as is to the relevant response function.
tqx: Optional. The request string as received by HTTP GET. Should be in
the format "key1:value1;key2:value2...". All keys have a default
value, so an empty string will just do the default (which is calling
ToJSonResponse() with no extra parameters).
Returns:
A response string, as returned by the relevant response function.
Raises:
DataTableException: One of the parameters passed in tqx is not supported.
"""
tqx_dict = {}
if tqx:
tqx_dict = dict(opt.split(":") for opt in tqx.split(";"))
if tqx_dict.get("version", "0.6") != "0.6":
raise DataTableException(
"Version (%s) passed by request is not supported."
% tqx_dict["version"])
if tqx_dict.get("out", "json") == "json":
response_handler = tqx_dict.get("responseHandler",
"google.visualization.Query.setResponse")
return self.ToJSonResponse(columns_order, order_by,
req_id=tqx_dict.get("reqId", 0),
response_handler=response_handler)
elif tqx_dict["out"] == "html":
return self.ToHtml(columns_order, order_by)
elif tqx_dict["out"] == "csv":
return self.ToCsv(columns_order, order_by)
elif tqx_dict["out"] == "tsv-excel":
return self.ToTsvExcel(columns_order, order_by)
else:
raise DataTableException(
"'out' parameter: '%s' is not supported" % tqx_dict["out"])
| apache-2.0 |
ufal/neuralmonkey | neuralmonkey/encoders/imagenet_encoder.py | 1 | 9426 | """Pre-trained ImageNet networks."""
import sys
from typing import Any, Callable, Dict, NamedTuple, Optional, Tuple
import numpy as np
import tensorflow as tf
import tensorflow.contrib.slim as tf_slim
# pylint: disable=unused-import
# Workaround of missing slim's import
# see https://github.com/tensorflow/tensorflow/issues/6064
import tensorflow.contrib.slim.nets
# pylint: enable=unused-import
from typeguard import check_argument_types
from neuralmonkey.dataset import Dataset
from neuralmonkey.decorators import tensor
from neuralmonkey.model.feedable import FeedDict
from neuralmonkey.model.model_part import ModelPart
from neuralmonkey.model.parameterized import InitializerSpecs
from neuralmonkey.model.stateful import SpatialStatefulWithOutput
class ImageNetSpec(NamedTuple(
"ImageNetSpec",
[("scope", Callable),
("image_size", Tuple[int, int]),
("apply_net", Callable)])):
"""Specification of the Imagenet encoder.
Do not use this object directly, instead, use one of the ``get_*``functions
in this module.
Attributes:
scope: The variable scope of the network to use.
image_size: A tuple of two integers giving the image width and height
in pixels.
apply_net: The function that receives an image and applies the network.
"""
# pylint: disable=import-error
def get_alexnet() -> ImageNetSpec:
import nets.alexnet_v2
return ImageNetSpec(
scope=nets.alexnet.alexnet_v2_arg_scope,
image_size=(224, 224),
apply_net=lambda image: nets.alexnet.alexnet_v2(
image, is_training=False))
def get_vgg_by_type(vgg_type: str) -> Callable[[], ImageNetSpec]:
def get_vgg() -> ImageNetSpec:
import nets.vgg
if vgg_type == "vgg16":
net_fn = nets.vgg.vgg_16
elif vgg_type == "vgg19":
net_fn = nets.vgg.vgg_19
else:
raise ValueError(
"Unknown type of VGG net: {}".format(vgg_type))
return ImageNetSpec(
scope=nets.vgg.vgg_arg_scope,
image_size=(224, 224),
apply_net=lambda image: net_fn(
image, is_training=False, dropout_keep_prob=1.0))
return get_vgg
def get_resnet_by_type(resnet_type: str) -> Callable[[], ImageNetSpec]:
def get_resnet() -> ImageNetSpec:
import nets.resnet_v2
if resnet_type == "resnet_50":
net_fn = nets.resnet_v2.resnet_v2_50
elif resnet_type == "resnet_101":
net_fn = nets.resnet_v2.resnet_v2_101
elif resnet_type == "resnet_152":
net_fn = nets.resnet_v2.resnet_v2_152
else:
raise ValueError(
"Unknown type of ResNet: {}".format(resnet_type))
return ImageNetSpec(
scope=nets.resnet_v2.resnet_arg_scope,
image_size=(229, 229),
apply_net=lambda image: net_fn(
image, is_training=False, global_pool=False))
return get_resnet
# pylint: enable=import-error
SUPPORTED_NETWORKS = {
"alexnet_v2": get_alexnet,
"vgg_16": get_vgg_by_type("vgg16"),
"vgg_19": get_vgg_by_type("vgg19"),
"resnet_v2_50": get_resnet_by_type("resnet_50"),
"resnet_v2_101": get_resnet_by_type("resnet_101"),
"resnet_v2_152": get_resnet_by_type("resnet_152")
}
class ImageNet(ModelPart, SpatialStatefulWithOutput):
"""Pre-trained ImageNet network.
We use the ImageNet networks as they are in the tesnorflow/models
repository (https://github.com/tensorflow/models). In order use them, you
need to clone the repository and configure the ImageNet object such that it
has a full path to "research/slim" in the repository. Visit
https://github.com/tensorflow/models/tree/master/research/slim for
information about checkpoints of the pre-trained models.
"""
# pylint: disable=too-many-arguments
def __init__(self,
name: str,
data_id: str,
network_type: str,
slim_models_path: str,
load_checkpoint: str = None,
spatial_layer: str = None,
encoded_layer: str = None,
initializers: InitializerSpecs = None) -> None:
"""Initialize pre-trained ImageNet network.
Args:
name: Name of the model part (the ImageNet network, will be in its
scope, independently on `name`).
data_id: Id of series with images (list of 3D numpy arrays)
network_type: Identifier of ImageNet network from TFSlim.
spatial_layer: String identifier of the convolutional map
(model's endpoint). Check
TFSlim documentation for end point specifications.
encoded_layer: String id of the network layer that will be used as
input of a decoder. `None` means averaging the convolutional
maps.
path_to_models: Path to Slim models in tensorflow/models
repository.
load_checkpoint: Checkpoint file from which the pre-trained network
is loaded.
"""
check_argument_types()
ModelPart.__init__(self, name, load_checkpoint=load_checkpoint,
initializers=initializers, save_checkpoint=None)
sys.path.insert(0, slim_models_path)
self.data_id = data_id
self.network_type = network_type
self.spatial_layer = spatial_layer
self.encoded_layer = encoded_layer
if self.network_type not in SUPPORTED_NETWORKS:
raise ValueError(
"Network '{}' is not among the supported ones ({})".format(
self.network_type, ", ".join(SUPPORTED_NETWORKS.keys())))
self.net_specification = SUPPORTED_NETWORKS[self.network_type]()
self.height, self.width = self.net_specification.image_size
@property
def input_types(self) -> Dict[str, tf.DType]:
return {self.data_id: tf.float32}
@property
def input_shapes(self) -> Dict[str, tf.TensorShape]:
return {
self.data_id: tf.TensorShape([None, self.height, self.width, 3])}
@tensor
def input_image(self) -> tf.Tensor:
return self.dataset[self.data_id]
@tensor
def end_points(self) -> Any:
with tf_slim.arg_scope(self.net_specification.scope()):
_, end_points = self.net_specification.apply_net(self.input_image)
if (self.spatial_layer is not None
and self.spatial_layer not in end_points):
raise ValueError(
"Network '{}' does not contain endpoint '{}'".format(
self.network_type, self.spatial_layer))
if self.spatial_layer is not None:
net_output = end_points[self.spatial_layer]
if len(net_output.get_shape()) != 4:
raise ValueError(
"Endpoint '{}' for network '{}' cannot be a convolutional "
" map, its dimensionality is: {}.".format(
self.spatial_layer, self.network_type,
", ".join(
[str(d.value) for d in net_output.get_shape()])))
if (self.encoded_layer is not None
and self.encoded_layer not in end_points):
raise ValueError(
"Network '{}' does not contain endpoint '{}'.".format(
self.network_type, self.encoded_layer))
return end_points
@tensor
def spatial_states(self) -> Optional[tf.Tensor]:
if self.spatial_layer is None:
return None
# pylint: disable=unsubscriptable-object
net_output = self.end_points[self.spatial_layer]
# pylint: enable=unsubscriptable-object
net_output = tf.stop_gradient(net_output)
return net_output
@tensor
def spatial_mask(self) -> tf.Tensor:
if self.spatial_layer is None:
return None
mask = tf.ones(tf.shape(self.spatial_states)[:3])
# pylint: disable=no-member
mask.set_shape(self.spatial_states.get_shape()[:3])
# pylint: enable=no-member
return mask
@tensor
def output(self) -> tf.Tensor:
if self.encoded_layer is None:
return tf.reduce_mean(self.spatial_states, [1, 2])
# pylint: disable=unsubscriptable-object
encoded = tf.squeeze(self.end_points[self.encoded_layer], [1, 2])
# pylint: enable=unsubscriptable-object
encoded = tf.stop_gradient(encoded)
return encoded
def _init_saver(self) -> None:
if not self._saver:
with tf.variable_scope(self.name, reuse=True):
local_variables = tf.get_collection(
tf.GraphKeys.GLOBAL_VARIABLES, scope=self.name)
slim_variables = tf.get_collection(
tf.GraphKeys.GLOBAL_VARIABLES, scope=self.network_type)
self._saver = tf.train.Saver(
var_list=local_variables + slim_variables)
def feed_dict(self, dataset: Dataset, train: bool = False) -> FeedDict:
fd = ModelPart.feed_dict(self, dataset, train)
images = np.array(dataset.get_series(self.data_id))
assert images.shape[1:] == (self.height, self.width, 3)
fd[self.input_image] = images
return fd
| bsd-3-clause |
jayceyxc/hue | desktop/core/ext-py/thriftpy-0.3.9/thriftpy/parser/__init__.py | 39 | 2151 | # -*- coding: utf-8 -*-
"""
thriftpy.parser
~~~~~~~~~~~~~~~
Thrift parser using ply
"""
from __future__ import absolute_import
import os
import sys
from .parser import parse, parse_fp
def load(path, module_name=None, include_dirs=None, include_dir=None):
"""Load thrift file as a module.
The module loaded and objects inside may only be pickled if module_name
was provided.
Note: `include_dir` will be depreacated in the future, use `include_dirs`
instead. If `include_dir` was provided (not None), it will be appended to
`include_dirs`.
"""
real_module = bool(module_name)
thrift = parse(path, module_name, include_dirs=include_dirs,
include_dir=include_dir)
if real_module:
sys.modules[module_name] = thrift
return thrift
def load_fp(source, module_name):
"""Load thrift file like object as a module.
"""
thrift = parse_fp(source, module_name)
sys.modules[module_name] = thrift
return thrift
def _import_module(import_name):
if '.' in import_name:
module, obj = import_name.rsplit('.', 1)
return getattr(__import__(module, None, None, [obj]), obj)
else:
return __import__(import_name)
def load_module(fullname):
"""Load thrift_file by fullname, fullname should have '_thrift' as
suffix.
The loader will replace the '_thrift' with '.thrift' and use it as
filename to locate the real thrift file.
"""
if not fullname.endswith("_thrift"):
raise ImportError(
"ThriftPy can only load module with '_thrift' suffix")
if fullname in sys.modules:
return sys.modules[fullname]
if '.' in fullname:
module_name, thrift_module_name = fullname.rsplit('.', 1)
module = _import_module(module_name)
path_prefix = os.path.dirname(os.path.abspath(module.__file__))
path = os.path.join(path_prefix, thrift_module_name)
else:
path = fullname
thrift_file = "{0}.thrift".format(path[:-7])
module = load(thrift_file, module_name=fullname)
sys.modules[fullname] = module
return sys.modules[fullname]
| apache-2.0 |
mrkipling/maraschino | maraschino/noneditable.py | 6 | 2463 | # -*- coding: utf-8 -*-
"""Util funtions for XBMC server settings."""
from maraschino.tools import using_auth, get_setting_value
from maraschino.models import Module, Setting, XbmcServer
def server_settings():
"""Get settings for active XBMC server instance"""
# query all configured XBMC servers from the db
servers = XbmcServer.query.order_by(XbmcServer.position)
if servers.count() == 0:
return {
'hostname': None,
'port': None,
'username': None,
'password': None,
}
active_server = get_setting_value('active_server')
# if active server is not defined, set it
if not active_server:
active_server = Setting('active_server', servers.first().id)
db_session.add(active_server)
db_session.commit()
try:
server = servers.get(active_server)
except:
logger.log('Could not retrieve active server, falling back on first entry' , 'WARNING')
server = servers.first()
return {
'hostname': server.hostname,
'port': server.port,
'username': server.username,
'password': server.password,
'mac_address': server.mac_address,
}
def server_username_password():
"""Convert username and password for active XBMC server to: username:password@"""
username_password = ''
server = server_settings()
if server['username'] != None:
username_password = server['username']
if server['password'] != None:
username_password += ':' + server['password']
username_password += '@'
return username_password
def server_address():
"""Get server address with username, password, hostname and port.
The format is as following: http://username:password@hostname:port
"""
server = server_settings()
if not server['hostname'] and not server['port']:
return None
return 'http://%s%s:%s' % (server_username_password(), server['hostname'], server['port'])
def server_api_address():
"""Get address to json rpc api for active XBMC server"""
address = server_address()
if not address:
return None
return '%s/jsonrpc' % (address)
def safe_server_address():
if using_auth():
return server_address()
server = server_settings()
if not server['hostname'] and not server['port']:
return None
return 'http://%s:%s' % (server['hostname'], server['port'])
| mit |
yoava333/servo | tests/wpt/web-platform-tests/tools/pytest/_pytest/genscript.py | 191 | 4129 | """ (deprecated) generate a single-file self-contained version of pytest """
import os
import sys
import pkgutil
import py
import _pytest
def find_toplevel(name):
for syspath in sys.path:
base = py.path.local(syspath)
lib = base/name
if lib.check(dir=1):
return lib
mod = base.join("%s.py" % name)
if mod.check(file=1):
return mod
raise LookupError(name)
def pkgname(toplevel, rootpath, path):
parts = path.parts()[len(rootpath.parts()):]
return '.'.join([toplevel] + [x.purebasename for x in parts])
def pkg_to_mapping(name):
toplevel = find_toplevel(name)
name2src = {}
if toplevel.check(file=1): # module
name2src[toplevel.purebasename] = toplevel.read()
else: # package
for pyfile in toplevel.visit('*.py'):
pkg = pkgname(name, toplevel, pyfile)
name2src[pkg] = pyfile.read()
# with wheels py source code might be not be installed
# and the resulting genscript is useless, just bail out.
assert name2src, "no source code found for %r at %r" %(name, toplevel)
return name2src
def compress_mapping(mapping):
import base64, pickle, zlib
data = pickle.dumps(mapping, 2)
data = zlib.compress(data, 9)
data = base64.encodestring(data)
data = data.decode('ascii')
return data
def compress_packages(names):
mapping = {}
for name in names:
mapping.update(pkg_to_mapping(name))
return compress_mapping(mapping)
def generate_script(entry, packages):
data = compress_packages(packages)
tmpl = py.path.local(__file__).dirpath().join('standalonetemplate.py')
exe = tmpl.read()
exe = exe.replace('@SOURCES@', data)
exe = exe.replace('@ENTRY@', entry)
return exe
def pytest_addoption(parser):
group = parser.getgroup("debugconfig")
group.addoption("--genscript", action="store", default=None,
dest="genscript", metavar="path",
help="create standalone pytest script at given target path.")
def pytest_cmdline_main(config):
import _pytest.config
genscript = config.getvalue("genscript")
if genscript:
tw = _pytest.config.create_terminal_writer(config)
tw.line("WARNING: usage of genscript is deprecated.",
red=True)
deps = ['py', '_pytest', 'pytest'] # pluggy is vendored
if sys.version_info < (2,7):
deps.append("argparse")
tw.line("generated script will run on python2.6-python3.3++")
else:
tw.line("WARNING: generated script will not run on python2.6 "
"due to 'argparse' dependency. Use python2.6 "
"to generate a python2.6 compatible script", red=True)
script = generate_script(
'import pytest; raise SystemExit(pytest.cmdline.main())',
deps,
)
genscript = py.path.local(genscript)
genscript.write(script)
tw.line("generated pytest standalone script: %s" % genscript,
bold=True)
return 0
def pytest_namespace():
return {'freeze_includes': freeze_includes}
def freeze_includes():
"""
Returns a list of module names used by py.test that should be
included by cx_freeze.
"""
result = list(_iter_all_modules(py))
result += list(_iter_all_modules(_pytest))
return result
def _iter_all_modules(package, prefix=''):
"""
Iterates over the names of all modules that can be found in the given
package, recursively.
Example:
_iter_all_modules(_pytest) ->
['_pytest.assertion.newinterpret',
'_pytest.capture',
'_pytest.core',
...
]
"""
if type(package) is not str:
path, prefix = package.__path__[0], package.__name__ + '.'
else:
path = package
for _, name, is_package in pkgutil.iter_modules([path]):
if is_package:
for m in _iter_all_modules(os.path.join(path, name), prefix=name + '.'):
yield prefix + m
else:
yield prefix + name
| mpl-2.0 |
hehongliang/tensorflow | tensorflow/python/keras/losses_test.py | 1 | 9841 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for Keras loss functions."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import shutil
import numpy as np
from tensorflow.python import keras
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import test_util
from tensorflow.python.platform import test
try:
import h5py # pylint:disable=g-import-not-at-top
except ImportError:
h5py = None
ALL_LOSSES = [keras.losses.mean_squared_error,
keras.losses.mean_absolute_error,
keras.losses.mean_absolute_percentage_error,
keras.losses.mean_squared_logarithmic_error,
keras.losses.squared_hinge,
keras.losses.hinge,
keras.losses.categorical_crossentropy,
keras.losses.binary_crossentropy,
keras.losses.kullback_leibler_divergence,
keras.losses.poisson,
keras.losses.cosine_proximity,
keras.losses.logcosh,
keras.losses.categorical_hinge]
class _MSEMAELoss(object):
"""Loss function with internal state, for testing serialization code."""
def __init__(self, mse_fraction):
self.mse_fraction = mse_fraction
def __call__(self, y_true, y_pred):
return (self.mse_fraction * keras.losses.mse(y_true, y_pred) +
(1 - self.mse_fraction) * keras.losses.mae(y_true, y_pred))
def get_config(self):
return {'mse_fraction': self.mse_fraction}
class KerasLossesTest(test.TestCase):
def test_objective_shapes_3d(self):
with self.cached_session():
y_a = keras.backend.variable(np.random.random((5, 6, 7)))
y_b = keras.backend.variable(np.random.random((5, 6, 7)))
for obj in ALL_LOSSES:
objective_output = obj(y_a, y_b)
self.assertListEqual(objective_output.get_shape().as_list(), [5, 6])
def test_objective_shapes_2d(self):
with self.cached_session():
y_a = keras.backend.variable(np.random.random((6, 7)))
y_b = keras.backend.variable(np.random.random((6, 7)))
for obj in ALL_LOSSES:
objective_output = obj(y_a, y_b)
self.assertListEqual(objective_output.get_shape().as_list(), [6,])
def test_cce_one_hot(self):
with self.cached_session():
y_a = keras.backend.variable(np.random.randint(0, 7, (5, 6)))
y_b = keras.backend.variable(np.random.random((5, 6, 7)))
objective_output = keras.losses.sparse_categorical_crossentropy(y_a, y_b)
assert keras.backend.eval(objective_output).shape == (5, 6)
y_a = keras.backend.variable(np.random.randint(0, 7, (6,)))
y_b = keras.backend.variable(np.random.random((6, 7)))
objective_output = keras.losses.sparse_categorical_crossentropy(y_a, y_b)
assert keras.backend.eval(objective_output).shape == (6,)
def test_serialization(self):
fn = keras.losses.get('mse')
config = keras.losses.serialize(fn)
new_fn = keras.losses.deserialize(config)
self.assertEqual(fn, new_fn)
def test_categorical_hinge(self):
y_pred = keras.backend.variable(np.array([[0.3, 0.2, 0.1],
[0.1, 0.2, 0.7]]))
y_true = keras.backend.variable(np.array([[0, 1, 0], [1, 0, 0]]))
expected_loss = ((0.3 - 0.2 + 1) + (0.7 - 0.1 + 1)) / 2.0
loss = keras.backend.eval(keras.losses.categorical_hinge(y_true, y_pred))
self.assertAllClose(expected_loss, np.mean(loss))
def test_serializing_loss_class(self):
orig_loss_class = _MSEMAELoss(0.3)
with keras.utils.custom_object_scope({'_MSEMAELoss': _MSEMAELoss}):
serialized = keras.losses.serialize(orig_loss_class)
with keras.utils.custom_object_scope({'_MSEMAELoss': _MSEMAELoss}):
deserialized = keras.losses.deserialize(serialized)
assert isinstance(deserialized, _MSEMAELoss)
assert deserialized.mse_fraction == 0.3
def test_serializing_model_with_loss_class(self):
tmpdir = self.get_temp_dir()
self.addCleanup(shutil.rmtree, tmpdir)
model_filename = os.path.join(tmpdir, 'custom_loss.h5')
with self.cached_session():
with keras.utils.custom_object_scope({'_MSEMAELoss': _MSEMAELoss}):
loss = _MSEMAELoss(0.3)
inputs = keras.layers.Input((2,))
outputs = keras.layers.Dense(1, name='model_output')(inputs)
model = keras.models.Model(inputs, outputs)
model.compile(optimizer='sgd', loss={'model_output': loss})
model.fit(np.random.rand(256, 2), np.random.rand(256, 1))
if h5py is None:
return
model.save(model_filename)
with keras.utils.custom_object_scope({'_MSEMAELoss': _MSEMAELoss}):
loaded_model = keras.models.load_model(model_filename)
loaded_model.predict(np.random.rand(128, 2))
@test_util.run_all_in_graph_and_eager_modes
class MeanSquaredErrorTest(test.TestCase):
def test_config(self):
mse_obj = keras.losses.MeanSquaredError(
reduction=keras.losses.ReductionV2.SUM, name='mse_1')
self.assertEqual(mse_obj.name, 'mse_1')
self.assertEqual(mse_obj.reduction, keras.losses.ReductionV2.SUM)
def test_all_correct_unweighted(self):
mse_obj = keras.losses.MeanSquaredError()
y_true = constant_op.constant([4, 8, 12, 8, 1, 3], shape=(2, 3))
loss = mse_obj(y_true, y_true)
self.assertAlmostEqual(self.evaluate(loss), 0.0, 3)
def test_unweighted(self):
mse_obj = keras.losses.MeanSquaredError()
y_true = constant_op.constant([1, 9, 2, -5, -2, 6], shape=(2, 3))
y_pred = constant_op.constant([4, 8, 12, 8, 1, 3],
shape=(2, 3),
dtype=dtypes.float32)
loss = mse_obj(y_true, y_pred)
self.assertAlmostEqual(self.evaluate(loss), 49.5, 3)
def test_scalar_weighted(self):
mse_obj = keras.losses.MeanSquaredError()
y_true = constant_op.constant([1, 9, 2, -5, -2, 6], shape=(2, 3))
y_pred = constant_op.constant([4, 8, 12, 8, 1, 3],
shape=(2, 3),
dtype=dtypes.float32)
loss = mse_obj(y_true, y_pred, sample_weight=2.3)
self.assertAlmostEqual(self.evaluate(loss), 113.85, 3)
def test_sample_weighted(self):
mse_obj = keras.losses.MeanSquaredError()
y_true = constant_op.constant([1, 9, 2, -5, -2, 6], shape=(2, 3))
y_pred = constant_op.constant([4, 8, 12, 8, 1, 3],
shape=(2, 3),
dtype=dtypes.float32)
sample_weight = constant_op.constant([1.2, 3.4], shape=(2, 1))
loss = mse_obj(y_true, y_pred, sample_weight=sample_weight)
self.assertAlmostEqual(self.evaluate(loss), 767.8 / 6, 3)
def test_timestep_weighted(self):
mse_obj = keras.losses.MeanSquaredError()
y_true = constant_op.constant([1, 9, 2, -5, -2, 6], shape=(2, 3, 1))
y_pred = constant_op.constant([4, 8, 12, 8, 1, 3],
shape=(2, 3, 1),
dtype=dtypes.float32)
sample_weight = constant_op.constant([3, 6, 5, 0, 4, 2], shape=(2, 3))
loss = mse_obj(y_true, y_pred, sample_weight=sample_weight)
self.assertAlmostEqual(self.evaluate(loss), 587 / 6, 3)
def test_zero_weighted(self):
mse_obj = keras.losses.MeanSquaredError()
y_true = constant_op.constant([1, 9, 2, -5, -2, 6], shape=(2, 3))
y_pred = constant_op.constant([4, 8, 12, 8, 1, 3],
shape=(2, 3),
dtype=dtypes.float32)
loss = mse_obj(y_true, y_pred, sample_weight=0)
self.assertAlmostEqual(self.evaluate(loss), 0.0, 3)
def test_invalid_sample_weight(self):
mse_obj = keras.losses.MeanSquaredError()
y_true = constant_op.constant([1, 9, 2, -5, -2, 6], shape=(2, 3, 1))
y_pred = constant_op.constant([4, 8, 12, 8, 1, 3], shape=(2, 3, 1))
sample_weight = constant_op.constant([3, 6, 5, 0], shape=(2, 2))
with self.assertRaisesRegexp(
ValueError, r'Shapes \(2, 2\) and \(2, 3\) are incompatible'):
mse_obj(y_true, y_pred, sample_weight=sample_weight)
def test_no_reduction(self):
mse_obj = keras.losses.MeanSquaredError(
reduction=keras.losses.ReductionV2.NONE)
y_true = constant_op.constant([1, 9, 2, -5, -2, 6], shape=(2, 3))
y_pred = constant_op.constant([4, 8, 12, 8, 1, 3],
shape=(2, 3),
dtype=dtypes.float32)
loss = mse_obj(y_true, y_pred, sample_weight=2.3)
loss = self.evaluate(loss)
self.assertArrayNear(loss, [84.3333, 143.3666], 1e-3)
def test_sum_reduction(self):
mse_obj = keras.losses.MeanSquaredError(
reduction=keras.losses.ReductionV2.SUM)
y_true = constant_op.constant([1, 9, 2, -5, -2, 6], shape=(2, 3))
y_pred = constant_op.constant([4, 8, 12, 8, 1, 3],
shape=(2, 3),
dtype=dtypes.float32)
loss = mse_obj(y_true, y_pred, sample_weight=2.3)
self.assertAlmostEqual(self.evaluate(loss), 227.69998, 3)
if __name__ == '__main__':
test.main()
| apache-2.0 |
hainm/statsmodels | statsmodels/sandbox/panel/panel_short.py | 25 | 8268 | # -*- coding: utf-8 -*-
"""Panel data analysis for short T and large N
Created on Sat Dec 17 19:32:00 2011
Author: Josef Perktold
License: BSD-3
starting from scratch before looking at references again
just a stub to get the basic structure for group handling
target outsource as much as possible for reuse
Notes
-----
this is the basic version using a loop over individuals which will be more
widely applicable. Depending on the special cases, there will be faster
implementations possible (sparse, kroneker, ...)
the only two group specific methods or get_within_cov and whiten
"""
import numpy as np
from statsmodels.regression.linear_model import OLS, GLS
from statsmodels.tools.grouputils import Group, GroupSorted
#not used
class Unit(object):
def __init__(endog, exog):
self.endog = endog
self.exog = exog
def sum_outer_product_loop(x, group_iter):
'''sum outerproduct dot(x_i, x_i.T) over individuals
loop version
'''
mom = 0
for g in group_iter():
x_g = x[g]
#print 'x_g.shape', x_g.shape
mom += np.outer(x_g, x_g)
return mom
def sum_outer_product_balanced(x, n_groups):
'''sum outerproduct dot(x_i, x_i.T) over individuals
where x_i is (nobs_i, 1), and result is (nobs_i, nobs_i)
reshape-dot version, for x.ndim=1 only
'''
xrs = x.reshape(-1, n_groups, order='F')
return np.dot(xrs, xrs.T) #should be (nobs_i, nobs_i)
#x.reshape(n_groups, nobs_i, k_vars) #, order='F')
#... ? this is getting 3-dimensional dot, tensordot?
#needs (n_groups, k_vars, k_vars) array with sum over groups
#NOT
#I only need this for x is 1d, i.e. residual
def whiten_individuals_loop(x, transform, group_iter):
'''apply linear transform for each individual
loop version
'''
#Note: figure out dimension of transformed variable
#so we can pre-allocate
x_new = []
for g in group_iter():
x_g = x[g]
x_new.append(np.dot(transform, x_g))
return np.concatenate(x_new) #np.vstack(x_new) #or np.array(x_new) #check shape
class ShortPanelGLS2(object):
'''Short Panel with general intertemporal within correlation
assumes data is stacked by individuals, panel is balanced and
within correlation structure is identical across individuals.
It looks like this can just inherit GLS and overwrite whiten
'''
def __init__(self, endog, exog, group):
self.endog = endog
self.exog = exog
self.group = GroupSorted(group)
self.n_groups = self.group.n_groups
#self.nobs_group = #list for unbalanced?
def fit_ols(self):
self.res_pooled = OLS(self.endog, self.exog).fit()
return self.res_pooled #return or not
def get_within_cov(self, resid):
#central moment or not?
mom = sum_outer_product_loop(resid, self.group.group_iter)
return mom / self.n_groups #df correction ?
def whiten_groups(self, x, cholsigmainv_i):
#from scipy import sparse #use sparse
wx = whiten_individuals_loop(x, cholsigmainv_i, self.group.group_iter)
return wx
def fit(self):
res_pooled = self.fit_ols() #get starting estimate
sigma_i = self.get_within_cov(res_pooled.resid)
self.cholsigmainv_i = np.linalg.cholesky(np.linalg.pinv(sigma_i)).T
wendog = self.whiten_groups(self.endog, self.cholsigmainv_i)
wexog = self.whiten_groups(self.exog, self.cholsigmainv_i)
#print wendog.shape, wexog.shape
self.res1 = OLS(wendog, wexog).fit()
return self.res1
class ShortPanelGLS(GLS):
'''Short Panel with general intertemporal within correlation
assumes data is stacked by individuals, panel is balanced and
within correlation structure is identical across individuals.
It looks like this can just inherit GLS and overwrite whiten
'''
def __init__(self, endog, exog, group, sigma_i=None):
self.group = GroupSorted(group)
self.n_groups = self.group.n_groups
#self.nobs_group = #list for unbalanced?
nobs_i = len(endog) / self.n_groups #endog might later not be an ndarray
#balanced only for now,
#which is a requirement anyway in this case (full cov)
#needs to change for parameterized sigma_i
#
if sigma_i is None:
sigma_i = np.eye(int(nobs_i))
self.cholsigmainv_i = np.linalg.cholesky(np.linalg.pinv(sigma_i)).T
#super is taking care of endog, exog and sigma
super(self.__class__, self).__init__(endog, exog, sigma=None)
def get_within_cov(self, resid):
#central moment or not?
mom = sum_outer_product_loop(resid, self.group.group_iter)
return mom / self.n_groups #df correction ?
def whiten_groups(self, x, cholsigmainv_i):
#from scipy import sparse #use sparse
wx = whiten_individuals_loop(x, cholsigmainv_i, self.group.group_iter)
return wx
def _fit_ols(self):
#used as starting estimate in old explicity version
self.res_pooled = OLS(self.endog, self.exog).fit()
return self.res_pooled #return or not
def _fit_old(self):
#old explicit version
res_pooled = self._fit_ols() #get starting estimate
sigma_i = self.get_within_cov(res_pooled.resid)
self.cholsigmainv_i = np.linalg.cholesky(np.linalg.pinv(sigma_i)).T
wendog = self.whiten_groups(self.endog, self.cholsigmainv_i)
wexog = self.whiten_groups(self.exog, self.cholsigmainv_i)
self.res1 = OLS(wendog, wexog).fit()
return self.res1
def whiten(self, x):
#whiten x by groups, will be applied to endog and exog
wx = whiten_individuals_loop(x, self.cholsigmainv_i, self.group.group_iter)
return wx
#copied from GLSHet and adjusted (boiler plate?)
def fit_iterative(self, maxiter=3):
"""
Perform an iterative two-step procedure to estimate the GLS model.
Parameters
----------
maxiter : integer, optional
the number of iterations
Notes
-----
maxiter=1: returns the estimated based on given weights
maxiter=2: performs a second estimation with the updated weights,
this is 2-step estimation
maxiter>2: iteratively estimate and update the weights
TODO: possible extension stop iteration if change in parameter
estimates is smaller than x_tol
Repeated calls to fit_iterative, will do one redundant pinv_wexog
calculation. Calling fit_iterative(maxiter) once does not do any
redundant recalculations (whitening or calculating pinv_wexog).
"""
#Note: in contrast to GLSHet, we don't have an auxilliary regression here
# might be needed if there is more structure in cov_i
#because we only have the loop we are not attaching the ols_pooled
#initial estimate anymore compared to original version
if maxiter < 1:
raise ValueError('maxiter needs to be at least 1')
import collections
self.history = collections.defaultdict(list) #not really necessary
for i in range(maxiter):
#pinv_wexog is cached, delete it to force recalculation
if hasattr(self, 'pinv_wexog'):
del self.pinv_wexog
#fit with current cov, GLS, i.e. OLS on whitened endog, exog
results = self.fit()
self.history['self_params'].append(results.params)
if not i == maxiter-1: #skip for last iteration, could break instead
#print 'ols',
self.results_old = results #store previous results for debugging
#get cov from residuals of previous regression
sigma_i = self.get_within_cov(results.resid)
self.cholsigmainv_i = np.linalg.cholesky(np.linalg.pinv(sigma_i)).T
#calculate new whitened endog and exog
self.initialize()
#note results is the wrapper, results._results is the results instance
#results._results.results_residual_regression = res_resid
return results
if __name__ == '__main__':
pass
| bsd-3-clause |
GladeRom/android_external_chromium_org | third_party/cython/src/Cython/Tests/TestCodeWriter.py | 132 | 2316 | from Cython.TestUtils import CythonTest
class TestCodeWriter(CythonTest):
# CythonTest uses the CodeWriter heavily, so do some checking by
# roundtripping Cython code through the test framework.
# Note that this test is dependant upon the normal Cython parser
# to generate the input trees to the CodeWriter. This save *a lot*
# of time; better to spend that time writing other tests than perfecting
# this one...
# Whitespace is very significant in this process:
# - always newline on new block (!)
# - indent 4 spaces
# - 1 space around every operator
def t(self, codestr):
self.assertCode(codestr, self.fragment(codestr).root)
def test_print(self):
self.t(u"""
print x, y
print x + y ** 2
print x, y, z,
""")
def test_if(self):
self.t(u"if x:\n pass")
def test_ifelifelse(self):
self.t(u"""
if x:
pass
elif y:
pass
elif z + 34 ** 34 - 2:
pass
else:
pass
""")
def test_def(self):
self.t(u"""
def f(x, y, z):
pass
def f(x = 34, y = 54, z):
pass
""")
def test_longness_and_signedness(self):
self.t(u"def f(unsigned long long long long long int y):\n pass")
def test_signed_short(self):
self.t(u"def f(signed short int y):\n pass")
def test_typed_args(self):
self.t(u"def f(int x, unsigned long int y):\n pass")
def test_cdef_var(self):
self.t(u"""
cdef int hello
cdef int hello = 4, x = 3, y, z
""")
def test_for_loop(self):
self.t(u"""
for x, y, z in f(g(h(34) * 2) + 23):
print x, y, z
else:
print 43
""")
def test_inplace_assignment(self):
self.t(u"x += 43")
def test_attribute(self):
self.t(u"a.x")
if __name__ == "__main__":
import unittest
unittest.main()
| bsd-3-clause |
vincxu/kalok | pyopencl/nbody/part2.py | 1 | 3760 | #from OpenGL.GL import GL_ARRAY_BUFFER, GL_DYNAMIC_DRAW, glFlush
from OpenGL.GL import *
from OpenGL.GLU import *
import pyopencl as cl
import sys
import numpy
import timing
timings = timing.Timing()
class Part2(object):
def __init__(self, num, dt, *args, **kwargs):
self.clinit()
self.loadProgram("part2.cl");
self.num = num
self.dt = numpy.float32(dt)
self.timings = timings
def loadData(self, pos_vbo, col_vbo, vel):
import pyopencl as cl
mf = cl.mem_flags
self.pos_vbo = pos_vbo
self.col_vbo = col_vbo
self.pos = pos_vbo.data
self.col = col_vbo.data
self.vel = vel
#Setup vertex buffer objects and share them with OpenCL as GLBuffers
self.pos_vbo.bind()
#For some there is no single buffer but an array of buffers
#https://github.com/enjalot/adventures_in_opencl/commit/61bfd373478767249fe8a3aa77e7e36b22d453c4
try:
self.pos_cl = cl.GLBuffer(self.ctx, mf.READ_WRITE, int(self.pos_vbo.buffer))
self.col_cl = cl.GLBuffer(self.ctx, mf.READ_WRITE, int(self.col_vbo.buffer))
except AttributeError:
self.pos_cl = cl.GLBuffer(self.ctx, mf.READ_WRITE, int(self.pos_vbo.buffers[0]))
self.col_cl = cl.GLBuffer(self.ctx, mf.READ_WRITE, int(self.col_vbo.buffers[0]))
self.col_vbo.bind()
#pure OpenCL arrays
self.vel_cl = cl.Buffer(self.ctx, mf.READ_ONLY | mf.COPY_HOST_PTR, hostbuf=vel)
self.local_pos = cl.LocalMemory(128 * numpy.dtype('float32').itemsize)
self.queue.finish()
# set up the list of GL objects to share with opencl
self.gl_objects = [self.pos_cl]
@timings
def execute(self, sub_intervals):
cl.enqueue_acquire_gl_objects(self.queue, self.gl_objects)
global_size = (self.num,)
local_size = (128,)
kernelargs = (self.pos_cl,
self.vel_cl,
self.local_pos,
self.dt)
for i in xrange(0, sub_intervals):
self.program.part2(self.queue, global_size, local_size, *(kernelargs))
cl.enqueue_release_gl_objects(self.queue, self.gl_objects)
self.queue.finish()
def clinit(self):
plats = cl.get_platforms()
from pyopencl.tools import get_gl_sharing_context_properties
import sys
if sys.platform == "darwin":
self.ctx = cl.Context(properties=get_gl_sharing_context_properties(),
devices=[])
else:
self.ctx = cl.Context(properties=[
(cl.context_properties.PLATFORM, plats[0])]
+ get_gl_sharing_context_properties(), devices=None)
self.queue = cl.CommandQueue(self.ctx)
def loadProgram(self, filename):
#read in the OpenCL source file as a string
f = open(filename, 'r')
fstr = "".join(f.readlines())
#print fstr
#create the program
self.program = cl.Program(self.ctx, fstr).build()
def render(self):
glEnable(GL_POINT_SMOOTH)
glPointSize(2)
glEnable(GL_BLEND)
glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA)
#setup the VBOs
self.col_vbo.bind()
glColorPointer(4, GL_FLOAT, 0, self.col_vbo)
self.pos_vbo.bind()
glVertexPointer(4, GL_FLOAT, 0, self.pos_vbo)
glEnableClientState(GL_VERTEX_ARRAY)
glEnableClientState(GL_COLOR_ARRAY)
#draw the VBOs
glDrawArrays(GL_POINTS, 0, self.num)
glDisableClientState(GL_COLOR_ARRAY)
glDisableClientState(GL_VERTEX_ARRAY)
glDisable(GL_BLEND)
| gpl-3.0 |
zdary/intellij-community | plugins/hg4idea/testData/bin/hgext/inotify/client.py | 89 | 5950 | # client.py - inotify status client
#
# Copyright 2006, 2007, 2008 Bryan O'Sullivan <bos@serpentine.com>
# Copyright 2007, 2008 Brendan Cully <brendan@kublai.com>
# Copyright 2009 Nicolas Dumazet <nicdumz@gmail.com>
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
from mercurial.i18n import _
import common, server
import errno, os, socket, struct
class QueryFailed(Exception):
pass
def start_server(function):
"""
Decorator.
Tries to call function, if it fails, try to (re)start inotify server.
Raise QueryFailed if something went wrong
"""
def decorated_function(self, *args):
try:
return function(self, *args)
except (OSError, socket.error), err:
autostart = self.ui.configbool('inotify', 'autostart', True)
if err.args[0] == errno.ECONNREFUSED:
self.ui.warn(_('inotify-client: found dead inotify server '
'socket; removing it\n'))
os.unlink(os.path.join(self.root, '.hg', 'inotify.sock'))
if err.args[0] in (errno.ECONNREFUSED, errno.ENOENT) and autostart:
try:
try:
server.start(self.ui, self.dirstate, self.root,
dict(daemon=True, daemon_pipefds=''))
except server.AlreadyStartedException, inst:
# another process may have started its own
# inotify server while this one was starting.
self.ui.debug(str(inst))
except Exception, inst:
self.ui.warn(_('inotify-client: could not start inotify '
'server: %s\n') % inst)
else:
try:
return function(self, *args)
except socket.error, err:
self.ui.warn(_('inotify-client: could not talk to new '
'inotify server: %s\n') % err.args[-1])
elif err.args[0] in (errno.ECONNREFUSED, errno.ENOENT):
# silently ignore normal errors if autostart is False
self.ui.debug('(inotify server not running)\n')
else:
self.ui.warn(_('inotify-client: failed to contact inotify '
'server: %s\n') % err.args[-1])
self.ui.traceback()
raise QueryFailed('inotify query failed')
return decorated_function
class client(object):
def __init__(self, ui, repo):
self.ui = ui
self.dirstate = repo.dirstate
self.root = repo.root
self.sock = socket.socket(socket.AF_UNIX)
def _connect(self):
sockpath = os.path.join(self.root, '.hg', 'inotify.sock')
try:
self.sock.connect(sockpath)
except socket.error, err:
if err.args[0] == "AF_UNIX path too long":
sockpath = os.readlink(sockpath)
self.sock.connect(sockpath)
else:
raise
def _send(self, type, data):
"""Sends protocol version number, and the data"""
self.sock.sendall(chr(common.version) + type + data)
self.sock.shutdown(socket.SHUT_WR)
def _receive(self, type):
"""
Read data, check version number, extract headers,
and returns a tuple (data descriptor, header)
Raises QueryFailed on error
"""
cs = common.recvcs(self.sock)
try:
version = ord(cs.read(1))
except TypeError:
# empty answer, assume the server crashed
self.ui.warn(_('inotify-client: received empty answer from inotify '
'server'))
raise QueryFailed('server crashed')
if version != common.version:
self.ui.warn(_('(inotify: received response from incompatible '
'server version %d)\n') % version)
raise QueryFailed('incompatible server version')
readtype = cs.read(4)
if readtype != type:
self.ui.warn(_('(inotify: received \'%s\' response when expecting'
' \'%s\')\n') % (readtype, type))
raise QueryFailed('wrong response type')
hdrfmt = common.resphdrfmts[type]
hdrsize = common.resphdrsizes[type]
try:
resphdr = struct.unpack(hdrfmt, cs.read(hdrsize))
except struct.error:
raise QueryFailed('unable to retrieve query response headers')
return cs, resphdr
def query(self, type, req):
self._connect()
self._send(type, req)
return self._receive(type)
@start_server
def statusquery(self, names, match, ignored, clean, unknown=True):
def genquery():
for n in names:
yield n
states = 'almrx!'
if ignored:
raise ValueError('this is insanity')
if clean:
states += 'c'
if unknown:
states += '?'
yield states
req = '\0'.join(genquery())
cs, resphdr = self.query('STAT', req)
def readnames(nbytes):
if nbytes:
names = cs.read(nbytes)
if names:
return filter(match, names.split('\0'))
return []
results = tuple(map(readnames, resphdr[:-1]))
if names:
nbytes = resphdr[-1]
vdirs = cs.read(nbytes)
if vdirs:
for vdir in vdirs.split('\0'):
match.dir(vdir)
return results
@start_server
def debugquery(self):
cs, resphdr = self.query('DBUG', '')
nbytes = resphdr[0]
names = cs.read(nbytes)
return names.split('\0')
| apache-2.0 |
g-k/servo | tests/wpt/css-tests/tools/wptserve/wptserve/pipes.py | 180 | 13830 | from cgi import escape
import gzip as gzip_module
import re
import time
import types
import uuid
from cStringIO import StringIO
def resolve_content(response):
rv = "".join(item for item in response.iter_content())
if type(rv) == unicode:
rv = rv.encode(response.encoding)
return rv
class Pipeline(object):
pipes = {}
def __init__(self, pipe_string):
self.pipe_functions = self.parse(pipe_string)
def parse(self, pipe_string):
functions = []
for item in PipeTokenizer().tokenize(pipe_string):
if not item:
break
if item[0] == "function":
functions.append((self.pipes[item[1]], []))
elif item[0] == "argument":
functions[-1][1].append(item[1])
return functions
def __call__(self, request, response):
for func, args in self.pipe_functions:
response = func(request, response, *args)
return response
class PipeTokenizer(object):
def __init__(self):
#This whole class can likely be replaced by some regexps
self.state = None
def tokenize(self, string):
self.string = string
self.state = self.func_name_state
self._index = 0
while self.state:
yield self.state()
yield None
def get_char(self):
if self._index >= len(self.string):
return None
rv = self.string[self._index]
self._index += 1
return rv
def func_name_state(self):
rv = ""
while True:
char = self.get_char()
if char is None:
self.state = None
if rv:
return ("function", rv)
else:
return None
elif char == "(":
self.state = self.argument_state
return ("function", rv)
elif char == "|":
if rv:
return ("function", rv)
else:
rv += char
def argument_state(self):
rv = ""
while True:
char = self.get_char()
if char is None:
self.state = None
return ("argument", rv)
elif char == "\\":
rv += self.get_escape()
if rv is None:
#This should perhaps be an error instead
return ("argument", rv)
elif char == ",":
return ("argument", rv)
elif char == ")":
self.state = self.func_name_state
return ("argument", rv)
else:
rv += char
def get_escape(self):
char = self.get_char()
escapes = {"n": "\n",
"r": "\r",
"t": "\t"}
return escapes.get(char, char)
class pipe(object):
def __init__(self, *arg_converters):
self.arg_converters = arg_converters
self.max_args = len(self.arg_converters)
self.min_args = 0
opt_seen = False
for item in self.arg_converters:
if not opt_seen:
if isinstance(item, opt):
opt_seen = True
else:
self.min_args += 1
else:
if not isinstance(item, opt):
raise ValueError("Non-optional argument cannot follow optional argument")
def __call__(self, f):
def inner(request, response, *args):
if not (self.min_args <= len(args) <= self.max_args):
raise ValueError("Expected between %d and %d args, got %d" %
(self.min_args, self.max_args, len(args)))
arg_values = tuple(f(x) for f, x in zip(self.arg_converters, args))
return f(request, response, *arg_values)
Pipeline.pipes[f.__name__] = inner
#We actually want the undecorated function in the main namespace
return f
class opt(object):
def __init__(self, f):
self.f = f
def __call__(self, arg):
return self.f(arg)
def nullable(func):
def inner(arg):
if arg.lower() == "null":
return None
else:
return func(arg)
return inner
def boolean(arg):
if arg.lower() in ("true", "1"):
return True
elif arg.lower() in ("false", "0"):
return False
raise ValueError
@pipe(int)
def status(request, response, code):
"""Alter the status code.
:param code: Status code to use for the response."""
response.status = code
return response
@pipe(str, str, opt(boolean))
def header(request, response, name, value, append=False):
"""Set a HTTP header.
Replaces any existing HTTP header of the same name unless
append is set, in which case the header is appended without
replacement.
:param name: Name of the header to set.
:param value: Value to use for the header.
:param append: True if existing headers should not be replaced
"""
if not append:
response.headers.set(name, value)
else:
response.headers.append(name, value)
return response
@pipe(str)
def trickle(request, response, delays):
"""Send the response in parts, with time delays.
:param delays: A string of delays and amounts, in bytes, of the
response to send. Each component is separated by
a colon. Amounts in bytes are plain integers, whilst
delays are floats prefixed with a single d e.g.
d1:100:d2
Would cause a 1 second delay, would then send 100 bytes
of the file, and then cause a 2 second delay, before sending
the remainder of the file.
If the last token is of the form rN, instead of sending the
remainder of the file, the previous N instructions will be
repeated until the whole file has been sent e.g.
d1:100:d2:r2
Causes a delay of 1s, then 100 bytes to be sent, then a 2s delay
and then a further 100 bytes followed by a two second delay
until the response has been fully sent.
"""
def parse_delays():
parts = delays.split(":")
rv = []
for item in parts:
if item.startswith("d"):
item_type = "delay"
item = item[1:]
value = float(item)
elif item.startswith("r"):
item_type = "repeat"
value = int(item[1:])
if not value % 2 == 0:
raise ValueError
else:
item_type = "bytes"
value = int(item)
if len(rv) and rv[-1][0] == item_type:
rv[-1][1] += value
else:
rv.append((item_type, value))
return rv
delays = parse_delays()
if not delays:
return response
content = resolve_content(response)
modified_content = []
offset = [0]
def sleep(seconds):
def inner():
time.sleep(seconds)
return ""
return inner
def add_content(delays, repeat=False):
for i, (item_type, value) in enumerate(delays):
if item_type == "bytes":
modified_content.append(content[offset[0]:offset[0] + value])
offset[0] += value
elif item_type == "delay":
modified_content.append(sleep(value))
elif item_type == "repeat":
assert i == len(delays) - 1
while offset[0] < len(content):
add_content(delays[-(value + 1):-1], True)
if not repeat and offset[0] < len(content):
modified_content.append(content[offset[0]:])
add_content(delays)
response.content = modified_content
return response
@pipe(nullable(int), opt(nullable(int)))
def slice(request, response, start, end=None):
"""Send a byte range of the response body
:param start: The starting offset. Follows python semantics including
negative numbers.
:param end: The ending offset, again with python semantics and None
(spelled "null" in a query string) to indicate the end of
the file.
"""
content = resolve_content(response)
response.content = content[start:end]
return response
class ReplacementTokenizer(object):
def ident(scanner, token):
return ("ident", token)
def index(scanner, token):
token = token[1:-1]
try:
token = int(token)
except ValueError:
token = unicode(token, "utf8")
return ("index", token)
def var(scanner, token):
token = token[:-1]
return ("var", token)
def tokenize(self, string):
return self.scanner.scan(string)[0]
scanner = re.Scanner([(r"\$\w+:", var),
(r"\$?\w+(?:\(\))?", ident),
(r"\[[^\]]*\]", index)])
class FirstWrapper(object):
def __init__(self, params):
self.params = params
def __getitem__(self, key):
try:
return self.params.first(key)
except KeyError:
return ""
@pipe()
def sub(request, response):
"""Substitute environment information about the server and request into the script.
The format is a very limited template language. Substitutions are
enclosed by {{ and }}. There are several avaliable substitutions:
host
A simple string value and represents the primary host from which the
tests are being run.
domains
A dictionary of available domains indexed by subdomain name.
ports
A dictionary of lists of ports indexed by protocol.
location
A dictionary of parts of the request URL. Valid keys are
'server, 'scheme', 'host', 'hostname', 'port', 'path' and 'query'.
'server' is scheme://host:port, 'host' is hostname:port, and query
includes the leading '?', but other delimiters are omitted.
headers
A dictionary of HTTP headers in the request.
GET
A dictionary of query parameters supplied with the request.
uuid()
A pesudo-random UUID suitable for usage with stash
So for example in a setup running on localhost with a www
subdomain and a http server on ports 80 and 81::
{{host}} => localhost
{{domains[www]}} => www.localhost
{{ports[http][1]}} => 81
It is also possible to assign a value to a variable name, which must start with
the $ character, using the ":" syntax e.g.
{{$id:uuid()}
Later substitutions in the same file may then refer to the variable
by name e.g.
{{$id}}
"""
content = resolve_content(response)
new_content = template(request, content)
response.content = new_content
return response
def template(request, content):
#TODO: There basically isn't any error handling here
tokenizer = ReplacementTokenizer()
variables = {}
def config_replacement(match):
content, = match.groups()
tokens = tokenizer.tokenize(content)
if tokens[0][0] == "var":
variable = tokens[0][1]
tokens = tokens[1:]
else:
variable = None
assert tokens[0][0] == "ident" and all(item[0] == "index" for item in tokens[1:]), tokens
field = tokens[0][1]
if field in variables:
value = variables[field]
elif field == "headers":
value = request.headers
elif field == "GET":
value = FirstWrapper(request.GET)
elif field in request.server.config:
value = request.server.config[tokens[0][1]]
elif field == "location":
value = {"server": "%s://%s:%s" % (request.url_parts.scheme,
request.url_parts.hostname,
request.url_parts.port),
"scheme": request.url_parts.scheme,
"host": "%s:%s" % (request.url_parts.hostname,
request.url_parts.port),
"hostname": request.url_parts.hostname,
"port": request.url_parts.port,
"path": request.url_parts.path,
"query": "?%s" % request.url_parts.query}
elif field == "uuid()":
value = str(uuid.uuid4())
else:
raise Exception("Undefined template variable %s" % field)
for item in tokens[1:]:
value = value[item[1]]
assert isinstance(value, (int,) + types.StringTypes), tokens
if variable is not None:
variables[variable] = value
#Should possibly support escaping for other contexts e.g. script
#TODO: read the encoding of the response
return escape(unicode(value)).encode("utf-8")
template_regexp = re.compile(r"{{([^}]*)}}")
new_content, count = template_regexp.subn(config_replacement, content)
return new_content
@pipe()
def gzip(request, response):
"""This pipe gzip-encodes response data.
It sets (or overwrites) these HTTP headers:
Content-Encoding is set to gzip
Content-Length is set to the length of the compressed content
"""
content = resolve_content(response)
response.headers.set("Content-Encoding", "gzip")
out = StringIO()
with gzip_module.GzipFile(fileobj=out, mode="w") as f:
f.write(content)
response.content = out.getvalue()
response.headers.set("Content-Length", len(response.content))
return response
| mpl-2.0 |
anryko/ansible | lib/ansible/modules/network/cloudengine/ce_aaa_server.py | 13 | 70732 | #!/usr/bin/python
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = r'''
---
module: ce_aaa_server
version_added: "2.4"
short_description: Manages AAA server global configuration on HUAWEI CloudEngine switches.
description:
- Manages AAA server global configuration on HUAWEI CloudEngine switches.
author:
- wangdezhuang (@QijunPan)
notes:
- This module requires the netconf system service be enabled on the remote device being managed.
- Recommended connection is C(netconf).
- This module also works with C(local) connections for legacy playbooks.
options:
state:
description:
- Specify desired state of the resource.
type: str
choices: [ absent, present ]
default: present
authen_scheme_name:
description:
- Name of an authentication scheme.
The value is a string of 1 to 32 characters.
type: str
first_authen_mode:
description:
- Preferred authentication mode.
type: str
choices: ['invalid', 'local', 'hwtacacs', 'radius', 'none']
default: local
author_scheme_name:
description:
- Name of an authorization scheme.
The value is a string of 1 to 32 characters.
type: str
first_author_mode:
description:
- Preferred authorization mode.
type: str
choices: ['invalid', 'local', 'hwtacacs', 'if-authenticated', 'none']
default: local
acct_scheme_name:
description:
- Accounting scheme name.
The value is a string of 1 to 32 characters.
type: str
accounting_mode:
description:
- Accounting Mode.
type: str
choices: ['invalid', 'hwtacacs', 'radius', 'none']
default: none
domain_name:
description:
- Name of a domain.
The value is a string of 1 to 64 characters.
type: str
radius_server_group:
description:
- RADIUS server group's name.
The value is a string of 1 to 32 case-insensitive characters.
type: str
hwtacas_template:
description:
- Name of a HWTACACS template.
The value is a string of 1 to 32 case-insensitive characters.
type: str
local_user_group:
description:
- Name of the user group where the user belongs. The user inherits all the rights of the user group.
The value is a string of 1 to 32 characters.
type: str
'''
EXAMPLES = r'''
- name: AAA server test
hosts: cloudengine
connection: local
gather_facts: no
vars:
cli:
host: "{{ inventory_hostname }}"
port: "{{ ansible_ssh_port }}"
username: "{{ username }}"
password: "{{ password }}"
transport: cli
tasks:
- name: "Radius authentication Server Basic settings"
ce_aaa_server:
state: present
authen_scheme_name: test1
first_authen_mode: radius
radius_server_group: test2
provider: "{{ cli }}"
- name: "Undo radius authentication Server Basic settings"
ce_aaa_server:
state: absent
authen_scheme_name: test1
first_authen_mode: radius
radius_server_group: test2
provider: "{{ cli }}"
- name: "Hwtacacs accounting Server Basic settings"
ce_aaa_server:
state: present
acct_scheme_name: test1
accounting_mode: hwtacacs
hwtacas_template: test2
provider: "{{ cli }}"
- name: "Undo hwtacacs accounting Server Basic settings"
ce_aaa_server:
state: absent
acct_scheme_name: test1
accounting_mode: hwtacacs
hwtacas_template: test2
provider: "{{ cli }}"
'''
RETURN = '''
changed:
description: check to see if a change was made on the device
returned: always
type: bool
sample: true
proposed:
description: k/v pairs of parameters passed into module
returned: always
type: dict
sample: {"accounting_mode": "hwtacacs", "acct_scheme_name": "test1",
"hwtacas_template": "test2", "state": "present"}
existing:
description: k/v pairs of existing aaa server
returned: always
type: dict
sample: {"accounting scheme": [["hwtacacs"], ["default"]],
"hwtacacs template": ["huawei"]}
end_state:
description: k/v pairs of aaa params after module execution
returned: always
type: dict
sample: {"accounting scheme": [["hwtacacs", "test1"]],
"hwtacacs template": ["huawei", "test2"]}
updates:
description: command sent to the device
returned: always
type: list
sample: ["accounting-scheme test1",
"accounting-mode hwtacacs",
"hwtacacs server template test2",
"hwtacacs enable"]
'''
import re
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.network.cloudengine.ce import get_nc_config, set_nc_config, ce_argument_spec
SUCCESS = """success"""
FAILED = """failed"""
INVALID_SCHEME_CHAR = [' ', '/', '\\', ':', '*', '?', '"', '|', '<', '>']
INVALID_DOMAIN_CHAR = [' ', '*', '?', '"', '\'']
INVALID_GROUP_CHAR = ['/', '\\', ':', '*', '?', '"', '|', '<', '>']
# get authentication scheme
CE_GET_AUTHENTICATION_SCHEME = """
<filter type="subtree">
<aaa xmlns="http://www.huawei.com/netconf/vrp" content-version="1.0" format-version="1.0">
<authenticationSchemes>
<authenticationScheme>
<authenSchemeName></authenSchemeName>
<firstAuthenMode></firstAuthenMode>
<secondAuthenMode></secondAuthenMode>
</authenticationScheme>
</authenticationSchemes>
</aaa>
</filter>
"""
# merge authentication scheme
CE_MERGE_AUTHENTICATION_SCHEME = """
<config>
<aaa xmlns="http://www.huawei.com/netconf/vrp" content-version="1.0" format-version="1.0">
<authenticationSchemes>
<authenticationScheme operation="merge">
<authenSchemeName>%s</authenSchemeName>
<firstAuthenMode>%s</firstAuthenMode>
<secondAuthenMode>invalid</secondAuthenMode>
</authenticationScheme>
</authenticationSchemes>
</aaa>
</config>
"""
# create authentication scheme
CE_CREATE_AUTHENTICATION_SCHEME = """
<config>
<aaa xmlns="http://www.huawei.com/netconf/vrp" content-version="1.0" format-version="1.0">
<authenticationSchemes>
<authenticationScheme operation="create">
<authenSchemeName>%s</authenSchemeName>
<firstAuthenMode>%s</firstAuthenMode>
<secondAuthenMode>invalid</secondAuthenMode>
</authenticationScheme>
</authenticationSchemes>
</aaa>
</config>
"""
# delete authentication scheme
CE_DELETE_AUTHENTICATION_SCHEME = """
<config>
<aaa xmlns="http://www.huawei.com/netconf/vrp" content-version="1.0" format-version="1.0">
<authenticationSchemes>
<authenticationScheme operation="delete">
<authenSchemeName>%s</authenSchemeName>
<firstAuthenMode>%s</firstAuthenMode>
<secondAuthenMode>invalid</secondAuthenMode>
</authenticationScheme>
</authenticationSchemes>
</aaa>
</config>
"""
# get authorization scheme
CE_GET_AUTHORIZATION_SCHEME = """
<filter type="subtree">
<aaa xmlns="http://www.huawei.com/netconf/vrp" content-version="1.0" format-version="1.0">
<authorizationSchemes>
<authorizationScheme>
<authorSchemeName></authorSchemeName>
<firstAuthorMode></firstAuthorMode>
<secondAuthorMode></secondAuthorMode>
</authorizationScheme>
</authorizationSchemes>
</aaa>
</filter>
"""
# merge authorization scheme
CE_MERGE_AUTHORIZATION_SCHEME = """
<config>
<aaa xmlns="http://www.huawei.com/netconf/vrp" content-version="1.0" format-version="1.0">
<authorizationSchemes>
<authorizationScheme operation="merge">
<authorSchemeName>%s</authorSchemeName>
<firstAuthorMode>%s</firstAuthorMode>
<secondAuthorMode>invalid</secondAuthorMode>
</authorizationScheme>
</authorizationSchemes>
</aaa>
</config>
"""
# create authorization scheme
CE_CREATE_AUTHORIZATION_SCHEME = """
<config>
<aaa xmlns="http://www.huawei.com/netconf/vrp" content-version="1.0" format-version="1.0">
<authorizationSchemes>
<authorizationScheme operation="create">
<authorSchemeName>%s</authorSchemeName>
<firstAuthorMode>%s</firstAuthorMode>
<secondAuthorMode>invalid</secondAuthorMode>
</authorizationScheme>
</authorizationSchemes>
</aaa>
</config>
"""
# delete authorization scheme
CE_DELETE_AUTHORIZATION_SCHEME = """
<config>
<aaa xmlns="http://www.huawei.com/netconf/vrp" content-version="1.0" format-version="1.0">
<authorizationSchemes>
<authorizationScheme operation="delete">
<authorSchemeName>%s</authorSchemeName>
<firstAuthorMode>%s</firstAuthorMode>
<secondAuthorMode>invalid</secondAuthorMode>
</authorizationScheme>
</authorizationSchemes>
</aaa>
</config>
"""
# get accounting scheme
CE_GET_ACCOUNTING_SCHEME = """
<filter type="subtree">
<aaa xmlns="http://www.huawei.com/netconf/vrp" content-version="1.0" format-version="1.0">
<accountingSchemes>
<accountingScheme>
<acctSchemeName></acctSchemeName>
<accountingMode></accountingMode>
</accountingScheme>
</accountingSchemes>
</aaa>
</filter>
"""
# merge accounting scheme
CE_MERGE_ACCOUNTING_SCHEME = """
<config>
<aaa xmlns="http://www.huawei.com/netconf/vrp" content-version="1.0" format-version="1.0">
<accountingSchemes>
<accountingScheme operation="merge">
<acctSchemeName>%s</acctSchemeName>
<accountingMode>%s</accountingMode>
</accountingScheme>
</accountingSchemes>
</aaa>
</config>
"""
# create accounting scheme
CE_CREATE_ACCOUNTING_SCHEME = """
<config>
<aaa xmlns="http://www.huawei.com/netconf/vrp" content-version="1.0" format-version="1.0">
<accountingSchemes>
<accountingScheme operation="create">
<acctSchemeName>%s</acctSchemeName>
<accountingMode>%s</accountingMode>
</accountingScheme>
</accountingSchemes>
</aaa>
</config>
"""
# delete accounting scheme
CE_DELETE_ACCOUNTING_SCHEME = """
<config>
<aaa xmlns="http://www.huawei.com/netconf/vrp" content-version="1.0" format-version="1.0">
<accountingSchemes>
<accountingScheme operation="delete">
<acctSchemeName>%s</acctSchemeName>
<accountingMode>%s</accountingMode>
</accountingScheme>
</accountingSchemes>
</aaa>
</config>
"""
# get authentication domain
CE_GET_AUTHENTICATION_DOMAIN = """
<filter type="subtree">
<aaa xmlns="http://www.huawei.com/netconf/vrp" content-version="1.0" format-version="1.0">
<domains>
<domain>
<domainName></domainName>
<authenSchemeName></authenSchemeName>
</domain>
</domains>
</aaa>
</filter>
"""
# merge authentication domain
CE_MERGE_AUTHENTICATION_DOMAIN = """
<config>
<aaa xmlns="http://www.huawei.com/netconf/vrp" content-version="1.0" format-version="1.0">
<domains>
<domain operation="merge">
<domainName>%s</domainName>
<authenSchemeName>%s</authenSchemeName>
</domain>
</domains>
</aaa>
</config>
"""
# create authentication domain
CE_CREATE_AUTHENTICATION_DOMAIN = """
<config>
<aaa xmlns="http://www.huawei.com/netconf/vrp" content-version="1.0" format-version="1.0">
<domains>
<domain operation="create">
<domainName>%s</domainName>
<authenSchemeName>%s</authenSchemeName>
</domain>
</domains>
</aaa>
</config>
"""
# delete authentication domain
CE_DELETE_AUTHENTICATION_DOMAIN = """
<config>
<aaa xmlns="http://www.huawei.com/netconf/vrp" content-version="1.0" format-version="1.0">
<domains>
<domain operation="delete">
<domainName>%s</domainName>
<authenSchemeName>%s</authenSchemeName>
</domain>
</domains>
</aaa>
</config>
"""
# get authorization domain
CE_GET_AUTHORIZATION_DOMAIN = """
<filter type="subtree">
<aaa xmlns="http://www.huawei.com/netconf/vrp" content-version="1.0" format-version="1.0">
<domains>
<domain>
<domainName></domainName>
<authorSchemeName></authorSchemeName>
</domain>
</domains>
</aaa>
</filter>
"""
# merge authorization domain
CE_MERGE_AUTHORIZATION_DOMAIN = """
<config>
<aaa xmlns="http://www.huawei.com/netconf/vrp" content-version="1.0" format-version="1.0">
<domains>
<domain operation="merge">
<domainName>%s</domainName>
<authorSchemeName>%s</authorSchemeName>
</domain>
</domains>
</aaa>
</config>
"""
# create authorization domain
CE_CREATE_AUTHORIZATION_DOMAIN = """
<config>
<aaa xmlns="http://www.huawei.com/netconf/vrp" content-version="1.0" format-version="1.0">
<domains>
<domain operation="create">
<domainName>%s</domainName>
<authorSchemeName>%s</authorSchemeName>
</domain>
</domains>
</aaa>
</config>
"""
# delete authorization domain
CE_DELETE_AUTHORIZATION_DOMAIN = """
<config>
<aaa xmlns="http://www.huawei.com/netconf/vrp" content-version="1.0" format-version="1.0">
<domains>
<domain operation="delete">
<domainName>%s</domainName>
<authorSchemeName>%s</authorSchemeName>
</domain>
</domains>
</aaa>
</config>
"""
# get accounting domain
CE_GET_ACCOUNTING_DOMAIN = """
<filter type="subtree">
<aaa xmlns="http://www.huawei.com/netconf/vrp" content-version="1.0" format-version="1.0">
<domains>
<domain>
<domainName></domainName>
<acctSchemeName></acctSchemeName>
</domain>
</domains>
</aaa>
</filter>
"""
# merge accounting domain
CE_MERGE_ACCOUNTING_DOMAIN = """
<config>
<aaa xmlns="http://www.huawei.com/netconf/vrp" content-version="1.0" format-version="1.0">
<domains>
<domain operation="merge">
<domainName>%s</domainName>
<acctSchemeName>%s</acctSchemeName>
</domain>
</domains>
</aaa>
</config>
"""
# create accounting domain
CE_CREATE_ACCOUNTING_DOMAIN = """
<config>
<aaa xmlns="http://www.huawei.com/netconf/vrp" content-version="1.0" format-version="1.0">
<domains>
<domain operation="create">
<domainName>%s</domainName>
<acctSchemeName>%s</acctSchemeName>
</domain>
</domains>
</aaa>
</config>
"""
# delete accounting domain
CE_DELETE_ACCOUNTING_DOMAIN = """
<config>
<aaa xmlns="http://www.huawei.com/netconf/vrp" content-version="1.0" format-version="1.0">
<domains>
<domain operation="delete">
<domainName>%s</domainName>
<acctSchemeName>%s</acctSchemeName>
</domain>
</domains>
</aaa>
</config>
"""
# get radius template
CE_GET_RADIUS_TEMPLATE = """
<filter type="subtree">
<radius xmlns="http://www.huawei.com/netconf/vrp" content-version="1.0" format-version="1.0">
<rdsTemplates>
<rdsTemplate>
<groupName></groupName>
<retransmissionCount></retransmissionCount>
<retransmissionInterval></retransmissionInterval>
</rdsTemplate>
</rdsTemplates>
</radius>
</filter>
"""
# merge radius template
CE_MERGE_RADIUS_TEMPLATE = """
<config>
<radius xmlns="http://www.huawei.com/netconf/vrp" content-version="1.0" format-version="1.0">
<rdsTemplates>
<rdsTemplate operation="merge">
<groupName>%s</groupName>
<retransmissionCount>3</retransmissionCount>
<retransmissionInterval>5</retransmissionInterval>
</rdsTemplate>
</rdsTemplates>
</radius>
</config>
"""
# create radius template
CE_CREATE_RADIUS_TEMPLATE = """
<config>
<radius xmlns="http://www.huawei.com/netconf/vrp" content-version="1.0" format-version="1.0">
<rdsTemplates>
<rdsTemplate operation="create">
<groupName>%s</groupName>
<retransmissionCount>3</retransmissionCount>
<retransmissionInterval>5</retransmissionInterval>
</rdsTemplate>
</rdsTemplates>
</radius>
</config>
"""
# delete radius template
CE_DELETE_RADIUS_TEMPLATE = """
<config>
<radius xmlns="http://www.huawei.com/netconf/vrp" content-version="1.0" format-version="1.0">
<rdsTemplates>
<rdsTemplate operation="delete">
<groupName>%s</groupName>
<retransmissionCount>3</retransmissionCount>
<retransmissionInterval>5</retransmissionInterval>
</rdsTemplate>
</rdsTemplates>
</radius>
</config>
"""
# get hwtacacs template
CE_GET_HWTACACS_TEMPLATE = """
<filter type="subtree">
<hwtacacs xmlns="http://www.huawei.com/netconf/vrp" content-version="1.0" format-version="1.0">
<hwTacTempCfgs>
<hwTacTempCfg>
<templateName></templateName>
<isDomainInclude></isDomainInclude>
<responseTimeout></responseTimeout>
</hwTacTempCfg>
</hwTacTempCfgs>
</hwtacacs>
</filter>
"""
# merge hwtacacs template
CE_MERGE_HWTACACS_TEMPLATE = """
<config>
<hwtacacs xmlns="http://www.huawei.com/netconf/vrp" content-version="1.0" format-version="1.0">
<hwTacTempCfgs>
<hwTacTempCfg operation="merge">
<templateName>%s</templateName>
<isDomainInclude>true</isDomainInclude>
<responseTimeout>5</responseTimeout>
</hwTacTempCfg>
</hwTacTempCfgs>
</hwtacacs>
</config>
"""
# create hwtacacs template
CE_CREATE_HWTACACS_TEMPLATE = """
<config>
<hwtacacs xmlns="http://www.huawei.com/netconf/vrp" content-version="1.0" format-version="1.0">
<hwTacTempCfgs>
<hwTacTempCfg operation="create">
<templateName>%s</templateName>
<isDomainInclude>true</isDomainInclude>
<responseTimeout>5</responseTimeout>
</hwTacTempCfg>
</hwTacTempCfgs>
</hwtacacs>
</config>
"""
# delete hwtacacs template
CE_DELETE_HWTACACS_TEMPLATE = """
<config>
<hwtacacs xmlns="http://www.huawei.com/netconf/vrp" content-version="1.0" format-version="1.0">
<hwTacTempCfgs>
<hwTacTempCfg operation="delete">
<templateName>%s</templateName>
</hwTacTempCfg>
</hwTacTempCfgs>
</hwtacacs>
</config>
"""
# get radius client
CE_GET_RADIUS_CLIENT = """
<filter type="subtree">
<radius xmlns="http://www.huawei.com/netconf/vrp" content-version="1.0" format-version="1.0">
<rdsClient>
<isEnable></isEnable>
<coaEnable></coaEnable>
<authClientIdentifier></authClientIdentifier>
</rdsClient>
</radius>
</filter>
"""
# merge radius client
CE_MERGE_RADIUS_CLIENT = """
<config>
<radius xmlns="http://www.huawei.com/netconf/vrp" content-version="1.0" format-version="1.0">
<rdsClient operation="merge">
<isEnable>%s</isEnable>
</rdsClient>
</radius>
</config>
"""
# get hwtacacs global config
CE_GET_HWTACACS_GLOBAL_CFG = """
<filter type="subtree">
<hwtacacs xmlns="http://www.huawei.com/netconf/vrp" content-version="1.0" format-version="1.0">
<hwTacGlobalCfg>
<isEnable></isEnable>
<totalTemplateNo></totalTemplateNo>
<totalSrvNo></totalSrvNo>
</hwTacGlobalCfg>
</hwtacacs>
</filter>
"""
# merge hwtacacs global config
CE_MERGE_HWTACACS_GLOBAL_CFG = """
<config>
<hwtacacs xmlns="http://www.huawei.com/netconf/vrp" content-version="1.0" format-version="1.0">
<hwTacGlobalCfg operation="merge">
<isEnable>%s</isEnable>
</hwTacGlobalCfg>
</hwtacacs>
</config>
"""
# get local user group
CE_GET_LOCAL_USER_GROUP = """
<filter type="subtree">
<aaa xmlns="http://www.huawei.com/netconf/vrp" content-version="1.0" format-version="1.0">
<userGroups>
<userGroup>
<userGroupName></userGroupName>
</userGroup>
</userGroups>
</aaa>
</filter>
"""
# merge local user group
CE_MERGE_LOCAL_USER_GROUP = """
<config>
<aaa xmlns="http://www.huawei.com/netconf/vrp" content-version="1.0" format-version="1.0">
<userGroups>
<userGroup operation="merge">
<userGroupName>%s</userGroupName>
</userGroup>
</userGroups>
</aaa>
</config>
"""
# delete local user group
CE_DELETE_LOCAL_USER_GROUP = """
<config>
<aaa xmlns="http://www.huawei.com/netconf/vrp" content-version="1.0" format-version="1.0">
<userGroups>
<userGroup operation="delete">
<userGroupName>%s</userGroupName>
</userGroup>
</userGroups>
</aaa>
</config>
"""
class AaaServer(object):
""" Manages aaa configuration """
def netconf_get_config(self, **kwargs):
""" Get configure by netconf """
module = kwargs["module"]
conf_str = kwargs["conf_str"]
xml_str = get_nc_config(module, conf_str)
return xml_str
def netconf_set_config(self, **kwargs):
""" Set configure by netconf """
module = kwargs["module"]
conf_str = kwargs["conf_str"]
recv_xml = set_nc_config(module, conf_str)
return recv_xml
def get_authentication_scheme(self, **kwargs):
""" Get scheme of authentication """
module = kwargs["module"]
conf_str = CE_GET_AUTHENTICATION_SCHEME
xml_str = self.netconf_get_config(module=module, conf_str=conf_str)
result = list()
if "<data/>" in xml_str:
return result
else:
re_find = re.findall(
r'.*<authenSchemeName>(.*)</authenSchemeName>.*\s*'
r'<firstAuthenMode>(.*)</firstAuthenMode>.*\s*'
r'<secondAuthenMode>(.*)</secondAuthenMode>.*\s*', xml_str)
if re_find:
return re_find
else:
return result
def get_authentication_domain(self, **kwargs):
""" Get domain of authentication """
module = kwargs["module"]
conf_str = CE_GET_AUTHENTICATION_DOMAIN
xml_str = self.netconf_get_config(module=module, conf_str=conf_str)
result = list()
if "<data/>" in xml_str:
return result
else:
re_find = re.findall(
r'.*<domainName>(.*)</domainName>.*\s*'
r'<authenSchemeName>(.*)</authenSchemeName>.*', xml_str)
if re_find:
return re_find
else:
return result
def merge_authentication_scheme(self, **kwargs):
""" Merge scheme of authentication """
authen_scheme_name = kwargs["authen_scheme_name"]
first_authen_mode = kwargs["first_authen_mode"]
module = kwargs["module"]
conf_str = CE_MERGE_AUTHENTICATION_SCHEME % (
authen_scheme_name, first_authen_mode)
xml = self.netconf_set_config(module=module, conf_str=conf_str)
if "<ok/>" not in xml:
module.fail_json(msg='Error: Merge authentication scheme failed.')
cmds = []
cmd = "authentication-scheme %s" % authen_scheme_name
cmds.append(cmd)
cmd = "authentication-mode %s" % first_authen_mode
cmds.append(cmd)
return cmds
def merge_authentication_domain(self, **kwargs):
""" Merge domain of authentication """
domain_name = kwargs["domain_name"]
authen_scheme_name = kwargs["authen_scheme_name"]
module = kwargs["module"]
conf_str = CE_MERGE_AUTHENTICATION_DOMAIN % (
domain_name, authen_scheme_name)
xml = self.netconf_set_config(module=module, conf_str=conf_str)
if "<ok/>" not in xml:
module.fail_json(msg='Error: Merge authentication domain failed.')
cmds = []
cmd = "domain %s" % domain_name
cmds.append(cmd)
cmd = "authentication-scheme %s" % authen_scheme_name
cmds.append(cmd)
return cmds
def create_authentication_scheme(self, **kwargs):
""" Create scheme of authentication """
authen_scheme_name = kwargs["authen_scheme_name"]
first_authen_mode = kwargs["first_authen_mode"]
module = kwargs["module"]
conf_str = CE_CREATE_AUTHENTICATION_SCHEME % (
authen_scheme_name, first_authen_mode)
xml = self.netconf_set_config(module=module, conf_str=conf_str)
if "<ok/>" not in xml:
module.fail_json(msg='Error: Create authentication scheme failed.')
cmds = []
cmd = "authentication-scheme %s" % authen_scheme_name
cmds.append(cmd)
cmd = "authentication-mode %s" % first_authen_mode
cmds.append(cmd)
return cmds
def create_authentication_domain(self, **kwargs):
""" Create domain of authentication """
domain_name = kwargs["domain_name"]
authen_scheme_name = kwargs["authen_scheme_name"]
module = kwargs["module"]
conf_str = CE_CREATE_AUTHENTICATION_DOMAIN % (
domain_name, authen_scheme_name)
xml = self.netconf_set_config(module=module, conf_str=conf_str)
if "<ok/>" not in xml:
module.fail_json(msg='Error: Create authentication domain failed.')
cmds = []
cmd = "domain %s" % domain_name
cmds.append(cmd)
cmd = "authentication-scheme %s" % authen_scheme_name
cmds.append(cmd)
return cmds
def delete_authentication_scheme(self, **kwargs):
""" Delete scheme of authentication """
authen_scheme_name = kwargs["authen_scheme_name"]
first_authen_mode = kwargs["first_authen_mode"]
module = kwargs["module"]
if authen_scheme_name == "default":
return SUCCESS
conf_str = CE_DELETE_AUTHENTICATION_SCHEME % (
authen_scheme_name, first_authen_mode)
xml = self.netconf_set_config(module=module, conf_str=conf_str)
if "<ok/>" not in xml:
module.fail_json(msg='Error: Delete authentication scheme failed.')
cmds = []
cmd = "undo authentication-scheme %s" % authen_scheme_name
cmds.append(cmd)
cmd = "authentication-mode none"
cmds.append(cmd)
return cmds
def delete_authentication_domain(self, **kwargs):
""" Delete domain of authentication """
domain_name = kwargs["domain_name"]
authen_scheme_name = kwargs["authen_scheme_name"]
module = kwargs["module"]
if domain_name == "default":
return SUCCESS
conf_str = CE_DELETE_AUTHENTICATION_DOMAIN % (
domain_name, authen_scheme_name)
xml = self.netconf_set_config(module=module, conf_str=conf_str)
if "<ok/>" not in xml:
module.fail_json(msg='Error: Delete authentication domain failed.')
cmds = []
cmd = "undo authentication-scheme"
cmds.append(cmd)
cmd = "undo domain %s" % domain_name
cmds.append(cmd)
return cmds
def get_authorization_scheme(self, **kwargs):
""" Get scheme of authorization """
module = kwargs["module"]
conf_str = CE_GET_AUTHORIZATION_SCHEME
xml_str = self.netconf_get_config(module=module, conf_str=conf_str)
result = list()
if "<data/>" in xml_str:
return result
else:
re_find = re.findall(
r'.*<authorSchemeName>(.*)</authorSchemeName>.*\s*'
r'<firstAuthorMode>(.*)</firstAuthorMode>.*\s*'
r'<secondAuthorMode>(.*)</secondAuthorMode>.*\s*', xml_str)
if re_find:
return re_find
else:
return result
def get_authorization_domain(self, **kwargs):
""" Get domain of authorization """
module = kwargs["module"]
conf_str = CE_GET_AUTHORIZATION_DOMAIN
xml_str = self.netconf_get_config(module=module, conf_str=conf_str)
result = list()
if "<data/>" in xml_str:
return result
else:
re_find = re.findall(
r'.*<domainName>(.*)</domainName>.*\s*'
r'<authorSchemeName>(.*)</authorSchemeName>.*', xml_str)
if re_find:
return re_find
else:
return result
def merge_authorization_scheme(self, **kwargs):
""" Merge scheme of authorization """
author_scheme_name = kwargs["author_scheme_name"]
first_author_mode = kwargs["first_author_mode"]
module = kwargs["module"]
conf_str = CE_MERGE_AUTHORIZATION_SCHEME % (
author_scheme_name, first_author_mode)
xml = self.netconf_set_config(module=module, conf_str=conf_str)
if "<ok/>" not in xml:
module.fail_json(msg='Error: Merge authorization scheme failed.')
cmds = []
cmd = "authorization-scheme %s" % author_scheme_name
cmds.append(cmd)
cmd = "authorization-mode %s" % first_author_mode
cmds.append(cmd)
return cmds
def merge_authorization_domain(self, **kwargs):
""" Merge domain of authorization """
domain_name = kwargs["domain_name"]
author_scheme_name = kwargs["author_scheme_name"]
module = kwargs["module"]
conf_str = CE_MERGE_AUTHORIZATION_DOMAIN % (
domain_name, author_scheme_name)
xml = self.netconf_set_config(module=module, conf_str=conf_str)
if "<ok/>" not in xml:
module.fail_json(msg='Error: Merge authorization domain failed.')
cmds = []
cmd = "domain %s" % domain_name
cmds.append(cmd)
cmd = "authorization-scheme %s" % author_scheme_name
cmds.append(cmd)
return cmds
def create_authorization_scheme(self, **kwargs):
""" Create scheme of authorization """
author_scheme_name = kwargs["author_scheme_name"]
first_author_mode = kwargs["first_author_mode"]
module = kwargs["module"]
conf_str = CE_CREATE_AUTHORIZATION_SCHEME % (
author_scheme_name, first_author_mode)
xml = self.netconf_set_config(module=module, conf_str=conf_str)
if "<ok/>" not in xml:
module.fail_json(msg='Error: Create authorization scheme failed.')
cmds = []
cmd = "authorization-scheme %s" % author_scheme_name
cmds.append(cmd)
cmd = "authorization-mode %s" % first_author_mode
cmds.append(cmd)
return cmds
def create_authorization_domain(self, **kwargs):
""" Create domain of authorization """
domain_name = kwargs["domain_name"]
author_scheme_name = kwargs["author_scheme_name"]
module = kwargs["module"]
conf_str = CE_CREATE_AUTHORIZATION_DOMAIN % (
domain_name, author_scheme_name)
xml = self.netconf_set_config(module=module, conf_str=conf_str)
if "<ok/>" not in xml:
module.fail_json(msg='Error: Create authorization domain failed.')
cmds = []
cmd = "domain %s" % domain_name
cmds.append(cmd)
cmd = "authorization-scheme %s" % author_scheme_name
cmds.append(cmd)
return cmds
def delete_authorization_scheme(self, **kwargs):
""" Delete scheme of authorization """
author_scheme_name = kwargs["author_scheme_name"]
first_author_mode = kwargs["first_author_mode"]
module = kwargs["module"]
if author_scheme_name == "default":
return SUCCESS
conf_str = CE_DELETE_AUTHORIZATION_SCHEME % (
author_scheme_name, first_author_mode)
xml = self.netconf_set_config(module=module, conf_str=conf_str)
if "<ok/>" not in xml:
module.fail_json(msg='Error: Delete authorization scheme failed.')
cmds = []
cmd = "undo authorization-scheme %s" % author_scheme_name
cmds.append(cmd)
cmd = "authorization-mode none"
cmds.append(cmd)
return cmds
def delete_authorization_domain(self, **kwargs):
""" Delete domain of authorization """
domain_name = kwargs["domain_name"]
author_scheme_name = kwargs["author_scheme_name"]
module = kwargs["module"]
if domain_name == "default":
return SUCCESS
conf_str = CE_DELETE_AUTHORIZATION_DOMAIN % (
domain_name, author_scheme_name)
xml = self.netconf_set_config(module=module, conf_str=conf_str)
if "<ok/>" not in xml:
module.fail_json(msg='Error: Delete authorization domain failed.')
cmds = []
cmd = "undo authorization-scheme"
cmds.append(cmd)
cmd = "undo domain %s" % domain_name
cmds.append(cmd)
return cmds
def get_accounting_scheme(self, **kwargs):
""" Get scheme of accounting """
module = kwargs["module"]
conf_str = CE_GET_ACCOUNTING_SCHEME
xml_str = self.netconf_get_config(module=module, conf_str=conf_str)
result = list()
if "<data/>" in xml_str:
return result
else:
re_find = re.findall(r'.*<acctSchemeName>(.*)</acctSchemeName>\s*<accountingMode>(.*)</accountingMode>', xml_str)
if re_find:
return re_find
else:
return result
def get_accounting_domain(self, **kwargs):
""" Get domain of accounting """
module = kwargs["module"]
conf_str = CE_GET_ACCOUNTING_DOMAIN
xml_str = self.netconf_get_config(module=module, conf_str=conf_str)
result = list()
if "<data/>" in xml_str:
return result
else:
re_find = re.findall(
r'.*<domainName>(.*)</domainName>.*\s*'
r'<acctSchemeName>(.*)</acctSchemeName>.*', xml_str)
if re_find:
return re_find
else:
return result
def merge_accounting_scheme(self, **kwargs):
""" Merge scheme of accounting """
acct_scheme_name = kwargs["acct_scheme_name"]
accounting_mode = kwargs["accounting_mode"]
module = kwargs["module"]
conf_str = CE_MERGE_ACCOUNTING_SCHEME % (
acct_scheme_name, accounting_mode)
xml = self.netconf_set_config(module=module, conf_str=conf_str)
if "<ok/>" not in xml:
module.fail_json(msg='Error: Merge accounting scheme failed.')
cmds = []
cmd = "accounting-scheme %s" % acct_scheme_name
cmds.append(cmd)
cmd = "accounting-mode %s" % accounting_mode
cmds.append(cmd)
return cmds
def merge_accounting_domain(self, **kwargs):
""" Merge domain of accounting """
domain_name = kwargs["domain_name"]
acct_scheme_name = kwargs["acct_scheme_name"]
module = kwargs["module"]
conf_str = CE_MERGE_ACCOUNTING_DOMAIN % (domain_name, acct_scheme_name)
xml = self.netconf_set_config(module=module, conf_str=conf_str)
if "<ok/>" not in xml:
module.fail_json(msg='Error: Merge accounting domain failed.')
cmds = []
cmd = "domain %s" % domain_name
cmds.append(cmd)
cmd = "accounting-scheme %s" % acct_scheme_name
cmds.append(cmd)
return cmds
def create_accounting_scheme(self, **kwargs):
""" Create scheme of accounting """
acct_scheme_name = kwargs["acct_scheme_name"]
accounting_mode = kwargs["accounting_mode"]
module = kwargs["module"]
conf_str = CE_CREATE_ACCOUNTING_SCHEME % (
acct_scheme_name, accounting_mode)
xml = self.netconf_set_config(module=module, conf_str=conf_str)
if "<ok/>" not in xml:
module.fail_json(msg='Error: Create accounting scheme failed.')
cmds = []
cmd = "accounting-scheme %s" % acct_scheme_name
cmds.append(cmd)
cmd = "accounting-mode %s" % accounting_mode
cmds.append(cmd)
return cmds
def create_accounting_domain(self, **kwargs):
""" Create domain of accounting """
domain_name = kwargs["domain_name"]
acct_scheme_name = kwargs["acct_scheme_name"]
module = kwargs["module"]
conf_str = CE_CREATE_ACCOUNTING_DOMAIN % (
domain_name, acct_scheme_name)
xml = self.netconf_set_config(module=module, conf_str=conf_str)
if "<ok/>" not in xml:
module.fail_json(msg='Error: Create accounting domain failed.')
cmds = []
cmd = "domain %s" % domain_name
cmds.append(cmd)
cmd = "accounting-scheme %s" % acct_scheme_name
cmds.append(cmd)
return cmds
def delete_accounting_scheme(self, **kwargs):
""" Delete scheme of accounting """
acct_scheme_name = kwargs["acct_scheme_name"]
accounting_mode = kwargs["accounting_mode"]
module = kwargs["module"]
if acct_scheme_name == "default":
return SUCCESS
conf_str = CE_DELETE_ACCOUNTING_SCHEME % (
acct_scheme_name, accounting_mode)
xml = self.netconf_set_config(module=module, conf_str=conf_str)
if "<ok/>" not in xml:
module.fail_json(msg='Error: Delete accounting scheme failed.')
cmds = []
cmd = "undo accounting-scheme %s" % acct_scheme_name
cmds.append(cmd)
cmd = "accounting-mode none"
cmds.append(cmd)
return cmds
def delete_accounting_domain(self, **kwargs):
""" Delete domain of accounting """
domain_name = kwargs["domain_name"]
acct_scheme_name = kwargs["acct_scheme_name"]
module = kwargs["module"]
if domain_name == "default":
return SUCCESS
conf_str = CE_DELETE_ACCOUNTING_DOMAIN % (
domain_name, acct_scheme_name)
xml = self.netconf_set_config(module=module, conf_str=conf_str)
if "<ok/>" not in xml:
module.fail_json(msg='Error: Delete accounting domain failed.')
cmds = []
cmd = "undo domain %s" % domain_name
cmds.append(cmd)
cmd = "undo accounting-scheme"
cmds.append(cmd)
return cmds
def get_radius_template(self, **kwargs):
""" Get radius template """
module = kwargs["module"]
conf_str = CE_GET_RADIUS_TEMPLATE
xml_str = self.netconf_get_config(module=module, conf_str=conf_str)
result = list()
if "<data/>" in xml_str:
return result
else:
re_find = re.findall(
r'.*<groupName>(.*)</groupName>.*', xml_str)
if re_find:
return re_find
else:
return result
def merge_radius_template(self, **kwargs):
""" Merge radius template """
radius_server_group = kwargs["radius_server_group"]
module = kwargs["module"]
conf_str = CE_MERGE_RADIUS_TEMPLATE % radius_server_group
xml = self.netconf_set_config(module=module, conf_str=conf_str)
if "<ok/>" not in xml:
module.fail_json(msg='Error: Merge radius template failed.')
cmds = []
cmd = "radius server group %s" % radius_server_group
cmds.append(cmd)
return cmds
def create_radius_template(self, **kwargs):
""" Create radius template """
radius_server_group = kwargs["radius_server_group"]
module = kwargs["module"]
conf_str = CE_CREATE_RADIUS_TEMPLATE % radius_server_group
xml = self.netconf_set_config(module=module, conf_str=conf_str)
if "<ok/>" not in xml:
module.fail_json(msg='Error: Create radius template failed.')
cmds = []
cmd = "radius server group %s" % radius_server_group
cmds.append(cmd)
return cmds
def delete_radius_template(self, **kwargs):
""" Delete radius template """
radius_server_group = kwargs["radius_server_group"]
module = kwargs["module"]
conf_str = CE_DELETE_RADIUS_TEMPLATE % radius_server_group
xml = self.netconf_set_config(module=module, conf_str=conf_str)
if "<ok/>" not in xml:
module.fail_json(msg='Error: Delete radius template failed.')
cmds = []
cmd = "undo radius server group %s" % radius_server_group
cmds.append(cmd)
return cmds
def get_radius_client(self, **kwargs):
""" Get radius client """
module = kwargs["module"]
conf_str = CE_GET_RADIUS_CLIENT
xml_str = self.netconf_get_config(module=module, conf_str=conf_str)
result = list()
if "<data/>" in xml_str:
return result
else:
re_find = re.findall(
r'.*<isEnable>(.*)</isEnable>.*', xml_str)
if re_find:
return re_find
else:
return result
def merge_radius_client(self, **kwargs):
""" Merge radius client """
enable = kwargs["isEnable"]
module = kwargs["module"]
conf_str = CE_MERGE_RADIUS_CLIENT % enable
xml = self.netconf_set_config(module=module, conf_str=conf_str)
if "<ok/>" not in xml:
module.fail_json(msg='Error: Merge radius client failed.')
cmds = []
if enable == "true":
cmd = "radius enable"
else:
cmd = "undo radius enable"
cmds.append(cmd)
return cmds
def get_hwtacacs_template(self, **kwargs):
""" Get hwtacacs template """
module = kwargs["module"]
conf_str = CE_GET_HWTACACS_TEMPLATE
xml_str = self.netconf_get_config(module=module, conf_str=conf_str)
result = list()
if "<data/>" in xml_str:
return result
else:
re_find = re.findall(
r'.*<templateName>(.*)</templateName>.*', xml_str)
if re_find:
return re_find
else:
return result
def merge_hwtacacs_template(self, **kwargs):
""" Merge hwtacacs template """
hwtacas_template = kwargs["hwtacas_template"]
module = kwargs["module"]
conf_str = CE_MERGE_HWTACACS_TEMPLATE % hwtacas_template
xml = self.netconf_set_config(module=module, conf_str=conf_str)
if "<ok/>" not in xml:
module.fail_json(msg='Error: Merge hwtacacs template failed.')
cmds = []
cmd = "hwtacacs server template %s" % hwtacas_template
cmds.append(cmd)
return cmds
def create_hwtacacs_template(self, **kwargs):
""" Create hwtacacs template """
hwtacas_template = kwargs["hwtacas_template"]
module = kwargs["module"]
conf_str = CE_CREATE_HWTACACS_TEMPLATE % hwtacas_template
xml = self.netconf_set_config(module=module, conf_str=conf_str)
if "<ok/>" not in xml:
module.fail_json(msg='Error: Create hwtacacs template failed.')
cmds = []
cmd = "hwtacacs server template %s" % hwtacas_template
cmds.append(cmd)
return cmds
def delete_hwtacacs_template(self, **kwargs):
""" Delete hwtacacs template """
hwtacas_template = kwargs["hwtacas_template"]
module = kwargs["module"]
conf_str = CE_DELETE_HWTACACS_TEMPLATE % hwtacas_template
xml = self.netconf_set_config(module=module, conf_str=conf_str)
if "<ok/>" not in xml:
module.fail_json(msg='Error: Delete hwtacacs template failed.')
cmds = []
cmd = "undo hwtacacs server template %s" % hwtacas_template
cmds.append(cmd)
return cmds
def get_hwtacacs_global_cfg(self, **kwargs):
""" Get hwtacacs global configure """
module = kwargs["module"]
conf_str = CE_GET_HWTACACS_GLOBAL_CFG
xml_str = self.netconf_get_config(module=module, conf_str=conf_str)
result = list()
if "<data/>" in xml_str:
return result
else:
re_find = re.findall(
r'.*<isEnable>(.*)</isEnable>.*', xml_str)
if re_find:
return re_find
else:
return result
def merge_hwtacacs_global_cfg(self, **kwargs):
""" Merge hwtacacs global configure """
enable = kwargs["isEnable"]
module = kwargs["module"]
conf_str = CE_MERGE_HWTACACS_GLOBAL_CFG % enable
xml = self.netconf_set_config(module=module, conf_str=conf_str)
if "<ok/>" not in xml:
module.fail_json(msg='Error: Merge hwtacacs global config failed.')
cmds = []
if enable == "true":
cmd = "hwtacacs enable"
else:
cmd = "undo hwtacacs enable"
cmds.append(cmd)
return cmds
def get_local_user_group(self, **kwargs):
""" Get local user group """
module = kwargs["module"]
conf_str = CE_GET_LOCAL_USER_GROUP
xml_str = self.netconf_get_config(module=module, conf_str=conf_str)
result = list()
if "<data/>" in xml_str:
return result
else:
re_find = re.findall(
r'.*<userGroupName>(.*)</userGroupName>.*', xml_str)
if re_find:
return re_find
else:
return result
def merge_local_user_group(self, **kwargs):
""" Merge local user group """
local_user_group = kwargs["local_user_group"]
module = kwargs["module"]
conf_str = CE_MERGE_LOCAL_USER_GROUP % local_user_group
xml = self.netconf_set_config(module=module, conf_str=conf_str)
if "<ok/>" not in xml:
module.fail_json(msg='Error: Merge local user group failed.')
cmds = []
cmd = "user-group %s" % local_user_group
cmds.append(cmd)
return cmds
def delete_local_user_group(self, **kwargs):
""" Delete local user group """
local_user_group = kwargs["local_user_group"]
module = kwargs["module"]
conf_str = CE_DELETE_LOCAL_USER_GROUP % local_user_group
xml = self.netconf_set_config(module=module, conf_str=conf_str)
if "<ok/>" not in xml:
module.fail_json(msg='Error: Delete local user group failed.')
cmds = []
cmd = "undo user-group %s" % local_user_group
cmds.append(cmd)
return cmds
def check_name(**kwargs):
""" Check invalid name """
module = kwargs["module"]
name = kwargs["name"]
invalid_char = kwargs["invalid_char"]
for item in invalid_char:
if item in name:
module.fail_json(
msg='Error: invalid char %s is in the name %s.' % (item, name))
def check_module_argument(**kwargs):
""" Check module argument """
module = kwargs["module"]
authen_scheme_name = module.params['authen_scheme_name']
author_scheme_name = module.params['author_scheme_name']
acct_scheme_name = module.params['acct_scheme_name']
domain_name = module.params['domain_name']
radius_server_group = module.params['radius_server_group']
hwtacas_template = module.params['hwtacas_template']
local_user_group = module.params['local_user_group']
if authen_scheme_name:
if len(authen_scheme_name) > 32:
module.fail_json(
msg='Error: authen_scheme_name %s '
'is large than 32.' % authen_scheme_name)
check_name(module=module, name=authen_scheme_name,
invalid_char=INVALID_SCHEME_CHAR)
if author_scheme_name:
if len(author_scheme_name) > 32:
module.fail_json(
msg='Error: author_scheme_name %s '
'is large than 32.' % author_scheme_name)
check_name(module=module, name=author_scheme_name,
invalid_char=INVALID_SCHEME_CHAR)
if acct_scheme_name:
if len(acct_scheme_name) > 32:
module.fail_json(
msg='Error: acct_scheme_name %s '
'is large than 32.' % acct_scheme_name)
check_name(module=module, name=acct_scheme_name,
invalid_char=INVALID_SCHEME_CHAR)
if domain_name:
if len(domain_name) > 64:
module.fail_json(
msg='Error: domain_name %s '
'is large than 64.' % domain_name)
check_name(module=module, name=domain_name,
invalid_char=INVALID_DOMAIN_CHAR)
if domain_name == "-" or domain_name == "--":
module.fail_json(msg='domain_name %s '
'is invalid.' % domain_name)
if radius_server_group and len(radius_server_group) > 32:
module.fail_json(msg='Error: radius_server_group %s '
'is large than 32.' % radius_server_group)
if hwtacas_template and len(hwtacas_template) > 32:
module.fail_json(
msg='Error: hwtacas_template %s '
'is large than 32.' % hwtacas_template)
if local_user_group:
if len(local_user_group) > 32:
module.fail_json(
msg='Error: local_user_group %s '
'is large than 32.' % local_user_group)
check_name(module=module, name=local_user_group, invalid_char=INVALID_GROUP_CHAR)
def main():
""" Module main """
argument_spec = dict(
state=dict(choices=['present', 'absent'], default='present'),
authen_scheme_name=dict(type='str'),
first_authen_mode=dict(default='local', choices=['invalid', 'local', 'hwtacacs', 'radius', 'none']),
author_scheme_name=dict(type='str'),
first_author_mode=dict(default='local', choices=['invalid', 'local', 'hwtacacs', 'if-authenticated', 'none']),
acct_scheme_name=dict(type='str'),
accounting_mode=dict(default='none', choices=['invalid', 'hwtacacs', 'radius', 'none']),
domain_name=dict(type='str'),
radius_server_group=dict(type='str'),
hwtacas_template=dict(type='str'),
local_user_group=dict(type='str')
)
argument_spec.update(ce_argument_spec)
module = AnsibleModule(argument_spec=argument_spec,
supports_check_mode=True)
check_module_argument(module=module)
changed = False
proposed = dict()
existing = dict()
end_state = dict()
updates = []
state = module.params['state']
authen_scheme_name = module.params['authen_scheme_name']
first_authen_mode = module.params['first_authen_mode']
author_scheme_name = module.params['author_scheme_name']
first_author_mode = module.params['first_author_mode']
acct_scheme_name = module.params['acct_scheme_name']
accounting_mode = module.params['accounting_mode']
domain_name = module.params['domain_name']
radius_server_group = module.params['radius_server_group']
hwtacas_template = module.params['hwtacas_template']
local_user_group = module.params['local_user_group']
ce_aaa_server = AaaServer()
if not ce_aaa_server:
module.fail_json(msg='Error: init module failed.')
# get proposed
proposed["state"] = state
if authen_scheme_name:
proposed["authen_scheme_name"] = authen_scheme_name
if first_authen_mode:
proposed["first_authen_mode"] = first_authen_mode
if author_scheme_name:
proposed["author_scheme_name"] = author_scheme_name
if first_author_mode:
proposed["first_author_mode"] = first_author_mode
if acct_scheme_name:
proposed["acct_scheme_name"] = acct_scheme_name
if accounting_mode:
proposed["accounting_mode"] = accounting_mode
if domain_name:
proposed["domain_name"] = domain_name
if radius_server_group:
proposed["radius_server_group"] = radius_server_group
if hwtacas_template:
proposed["hwtacas_template"] = hwtacas_template
if local_user_group:
proposed["local_user_group"] = local_user_group
# authentication
if authen_scheme_name:
scheme_exist = ce_aaa_server.get_authentication_scheme(module=module)
scheme_new = (authen_scheme_name.lower(), first_authen_mode.lower(), "invalid")
existing["authentication scheme"] = scheme_exist
if state == "present":
# present authentication scheme
if len(scheme_exist) == 0:
cmd = ce_aaa_server.create_authentication_scheme(
module=module,
authen_scheme_name=authen_scheme_name,
first_authen_mode=first_authen_mode)
updates.append(cmd)
changed = True
elif scheme_new not in scheme_exist:
cmd = ce_aaa_server.merge_authentication_scheme(
module=module,
authen_scheme_name=authen_scheme_name,
first_authen_mode=first_authen_mode)
updates.append(cmd)
changed = True
# present authentication domain
if domain_name:
domain_exist = ce_aaa_server.get_authentication_domain(
module=module)
domain_new = (domain_name.lower(), authen_scheme_name.lower())
if len(domain_exist) == 0:
cmd = ce_aaa_server.create_authentication_domain(
module=module,
domain_name=domain_name,
authen_scheme_name=authen_scheme_name)
updates.append(cmd)
changed = True
elif domain_new not in domain_exist:
cmd = ce_aaa_server.merge_authentication_domain(
module=module,
domain_name=domain_name,
authen_scheme_name=authen_scheme_name)
updates.append(cmd)
changed = True
else:
# absent authentication scheme
if not domain_name:
if len(scheme_exist) == 0:
pass
elif scheme_new not in scheme_exist:
pass
else:
cmd = ce_aaa_server.delete_authentication_scheme(
module=module,
authen_scheme_name=authen_scheme_name,
first_authen_mode=first_authen_mode)
updates.append(cmd)
changed = True
# absent authentication domain
else:
domain_exist = ce_aaa_server.get_authentication_domain(
module=module)
domain_new = (domain_name.lower(), authen_scheme_name.lower())
if len(domain_exist) == 0:
pass
elif domain_new not in domain_exist:
pass
else:
cmd = ce_aaa_server.delete_authentication_domain(
module=module,
domain_name=domain_name,
authen_scheme_name=authen_scheme_name)
updates.append(cmd)
changed = True
scheme_end = ce_aaa_server.get_authentication_scheme(module=module)
end_state["authentication scheme"] = scheme_end
# authorization
if author_scheme_name:
scheme_exist = ce_aaa_server.get_authorization_scheme(module=module)
scheme_new = (author_scheme_name.lower(), first_author_mode.lower(), "invalid")
existing["authorization scheme"] = scheme_exist
if state == "present":
# present authorization scheme
if len(scheme_exist) == 0:
cmd = ce_aaa_server.create_authorization_scheme(
module=module,
author_scheme_name=author_scheme_name,
first_author_mode=first_author_mode)
updates.append(cmd)
changed = True
elif scheme_new not in scheme_exist:
cmd = ce_aaa_server.merge_authorization_scheme(
module=module,
author_scheme_name=author_scheme_name,
first_author_mode=first_author_mode)
updates.append(cmd)
changed = True
# present authorization domain
if domain_name:
domain_exist = ce_aaa_server.get_authorization_domain(
module=module)
domain_new = (domain_name.lower(), author_scheme_name.lower())
if len(domain_exist) == 0:
cmd = ce_aaa_server.create_authorization_domain(
module=module,
domain_name=domain_name,
author_scheme_name=author_scheme_name)
updates.append(cmd)
changed = True
elif domain_new not in domain_exist:
cmd = ce_aaa_server.merge_authorization_domain(
module=module,
domain_name=domain_name,
author_scheme_name=author_scheme_name)
updates.append(cmd)
changed = True
else:
# absent authorization scheme
if not domain_name:
if len(scheme_exist) == 0:
pass
elif scheme_new not in scheme_exist:
pass
else:
cmd = ce_aaa_server.delete_authorization_scheme(
module=module,
author_scheme_name=author_scheme_name,
first_author_mode=first_author_mode)
updates.append(cmd)
changed = True
# absent authorization domain
else:
domain_exist = ce_aaa_server.get_authorization_domain(
module=module)
domain_new = (domain_name.lower(), author_scheme_name.lower())
if len(domain_exist) == 0:
pass
elif domain_new not in domain_exist:
pass
else:
cmd = ce_aaa_server.delete_authorization_domain(
module=module,
domain_name=domain_name,
author_scheme_name=author_scheme_name)
updates.append(cmd)
changed = True
scheme_end = ce_aaa_server.get_authorization_scheme(module=module)
end_state["authorization scheme"] = scheme_end
# accounting
if acct_scheme_name:
scheme_exist = ce_aaa_server.get_accounting_scheme(module=module)
scheme_new = (acct_scheme_name.lower(), accounting_mode.lower())
existing["accounting scheme"] = scheme_exist
if state == "present":
# present accounting scheme
if len(scheme_exist) == 0:
cmd = ce_aaa_server.create_accounting_scheme(
module=module,
acct_scheme_name=acct_scheme_name,
accounting_mode=accounting_mode)
updates.append(cmd)
changed = True
elif scheme_new not in scheme_exist:
cmd = ce_aaa_server.merge_accounting_scheme(
module=module,
acct_scheme_name=acct_scheme_name,
accounting_mode=accounting_mode)
updates.append(cmd)
changed = True
# present accounting domain
if domain_name:
domain_exist = ce_aaa_server.get_accounting_domain(
module=module)
domain_new = (domain_name.lower(), acct_scheme_name.lower())
if len(domain_exist) == 0:
cmd = ce_aaa_server.create_accounting_domain(
module=module,
domain_name=domain_name,
acct_scheme_name=acct_scheme_name)
updates.append(cmd)
changed = True
elif domain_new not in domain_exist:
cmd = ce_aaa_server.merge_accounting_domain(
module=module,
domain_name=domain_name,
acct_scheme_name=acct_scheme_name)
updates.append(cmd)
changed = True
else:
# absent accounting scheme
if not domain_name:
if len(scheme_exist) == 0:
pass
elif scheme_new not in scheme_exist:
pass
else:
cmd = ce_aaa_server.delete_accounting_scheme(
module=module,
acct_scheme_name=acct_scheme_name,
accounting_mode=accounting_mode)
updates.append(cmd)
changed = True
# absent accounting domain
else:
domain_exist = ce_aaa_server.get_accounting_domain(
module=module)
domain_new = (domain_name.lower(), acct_scheme_name.lower())
if len(domain_exist) == 0:
pass
elif domain_new not in domain_exist:
pass
else:
cmd = ce_aaa_server.delete_accounting_domain(
module=module,
domain_name=domain_name,
acct_scheme_name=acct_scheme_name)
updates.append(cmd)
changed = True
scheme_end = ce_aaa_server.get_accounting_scheme(module=module)
end_state["accounting scheme"] = scheme_end
# radius group name
if (authen_scheme_name and first_authen_mode.lower() == "radius") \
or (acct_scheme_name and accounting_mode.lower() == "radius"):
if not radius_server_group:
module.fail_json(msg='please input radius_server_group when use radius.')
rds_template_exist = ce_aaa_server.get_radius_template(module=module)
rds_template_new = (radius_server_group)
rds_enable_exist = ce_aaa_server.get_radius_client(module=module)
existing["radius template"] = rds_template_exist
existing["radius enable"] = rds_enable_exist
if state == "present":
# present radius group name
if len(rds_template_exist) == 0:
cmd = ce_aaa_server.create_radius_template(
module=module, radius_server_group=radius_server_group)
updates.append(cmd)
changed = True
elif rds_template_new not in rds_template_exist:
cmd = ce_aaa_server.merge_radius_template(
module=module, radius_server_group=radius_server_group)
updates.append(cmd)
changed = True
rds_enable_new = ("true")
if rds_enable_new not in rds_enable_exist:
cmd = ce_aaa_server.merge_radius_client(
module=module, isEnable="true")
updates.append(cmd)
changed = True
else:
# absent radius group name
if len(rds_template_exist) == 0:
pass
elif rds_template_new not in rds_template_exist:
pass
else:
cmd = ce_aaa_server.delete_radius_template(
module=module, radius_server_group=radius_server_group)
updates.append(cmd)
changed = True
rds_enable_new = ("false")
if rds_enable_new not in rds_enable_exist:
cmd = ce_aaa_server.merge_radius_client(
module=module, isEnable="false")
updates.append(cmd)
changed = True
else:
pass
rds_template_end = ce_aaa_server.get_radius_template(module=module)
end_state["radius template"] = rds_template_end
rds_enable_end = ce_aaa_server.get_radius_client(module=module)
end_state["radius enable"] = rds_enable_end
tmp_scheme = author_scheme_name
# hwtacas template
if (authen_scheme_name and first_authen_mode.lower() == "hwtacacs") \
or (tmp_scheme and first_author_mode.lower() == "hwtacacs") \
or (acct_scheme_name and accounting_mode.lower() == "hwtacacs"):
if not hwtacas_template:
module.fail_json(
msg='please input hwtacas_template when use hwtacas.')
hwtacacs_exist = ce_aaa_server.get_hwtacacs_template(module=module)
hwtacacs_new = (hwtacas_template)
hwtacacs_enbale_exist = ce_aaa_server.get_hwtacacs_global_cfg(
module=module)
existing["hwtacacs template"] = hwtacacs_exist
existing["hwtacacs enable"] = hwtacacs_enbale_exist
if state == "present":
# present hwtacas template
if len(hwtacacs_exist) == 0:
cmd = ce_aaa_server.create_hwtacacs_template(
module=module, hwtacas_template=hwtacas_template)
updates.append(cmd)
changed = True
elif hwtacacs_new not in hwtacacs_exist:
cmd = ce_aaa_server.merge_hwtacacs_template(
module=module, hwtacas_template=hwtacas_template)
updates.append(cmd)
changed = True
hwtacacs_enbale_new = ("true")
if hwtacacs_enbale_new not in hwtacacs_enbale_exist:
cmd = ce_aaa_server.merge_hwtacacs_global_cfg(
module=module, isEnable="true")
updates.append(cmd)
changed = True
else:
# absent hwtacas template
if len(hwtacacs_exist) == 0:
pass
elif hwtacacs_new not in hwtacacs_exist:
pass
else:
cmd = ce_aaa_server.delete_hwtacacs_template(
module=module, hwtacas_template=hwtacas_template)
updates.append(cmd)
changed = True
hwtacacs_enbale_new = ("false")
if hwtacacs_enbale_new not in hwtacacs_enbale_exist:
cmd = ce_aaa_server.merge_hwtacacs_global_cfg(
module=module, isEnable="false")
updates.append(cmd)
changed = True
else:
pass
hwtacacs_end = ce_aaa_server.get_hwtacacs_template(module=module)
end_state["hwtacacs template"] = hwtacacs_end
hwtacacs_enable_end = ce_aaa_server.get_hwtacacs_global_cfg(
module=module)
end_state["hwtacacs enable"] = hwtacacs_enable_end
# local user group
if local_user_group:
user_group_exist = ce_aaa_server.get_local_user_group(module=module)
user_group_new = (local_user_group)
existing["local user group"] = user_group_exist
if state == "present":
# present local user group
if len(user_group_exist) == 0:
cmd = ce_aaa_server.merge_local_user_group(
module=module, local_user_group=local_user_group)
updates.append(cmd)
changed = True
elif user_group_new not in user_group_exist:
cmd = ce_aaa_server.merge_local_user_group(
module=module, local_user_group=local_user_group)
updates.append(cmd)
changed = True
else:
# absent local user group
if len(user_group_exist) == 0:
pass
elif user_group_new not in user_group_exist:
pass
else:
cmd = ce_aaa_server.delete_local_user_group(
module=module, local_user_group=local_user_group)
updates.append(cmd)
changed = True
user_group_end = ce_aaa_server.get_local_user_group(module=module)
end_state["local user group"] = user_group_end
results = dict()
results['proposed'] = proposed
results['existing'] = existing
results['changed'] = changed
results['end_state'] = end_state
results['updates'] = updates
module.exit_json(**results)
if __name__ == '__main__':
main()
| gpl-3.0 |
HPPTECH/hpp_IOSTressTest | Resources/ssh/pexpect-3.2/tests/test_missing_command.py | 2 | 1382 | #!/usr/bin/env python
'''
PEXPECT LICENSE
This license is approved by the OSI and FSF as GPL-compatible.
http://opensource.org/licenses/isc-license.txt
Copyright (c) 2012, Noah Spurrier <noah@noah.org>
PERMISSION TO USE, COPY, MODIFY, AND/OR DISTRIBUTE THIS SOFTWARE FOR ANY
PURPOSE WITH OR WITHOUT FEE IS HEREBY GRANTED, PROVIDED THAT THE ABOVE
COPYRIGHT NOTICE AND THIS PERMISSION NOTICE APPEAR IN ALL COPIES.
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
'''
import pexpect
import unittest
import PexpectTestCase
class MissingCommandTestCase (PexpectTestCase.PexpectTestCase):
def testMissingCommand(self):
try:
i = pexpect.spawn ('ZXQYQZX')
except Exception:
pass
else:
self.fail('Expected an Exception.')
if __name__ == '__main__':
unittest.main()
suite = unittest.makeSuite(MissingCommandTestCase,'test')
| mit |
eblade/telegram | bin/telegram_client.py | 1 | 1818 | from __future__ import print_function
import argparse
import time
import yaml
import json
import os
import requests
parser = argparse.ArgumentParser('telegram-client')
parser.add_argument('--config-file', '-f', default='~/.telegram/client.yaml', help='config file location (default ~/.telegram/client.yaml)')
parser.add_argument('to', help='whom to send to')
parser.add_argument('message', help='message to send')
parser.add_argument('--iterate', '-i', default=-1, type=int, help='send the message X times, and append the order number')
args = parser.parse_args()
config = yaml.load(open(os.path.expanduser(args.config_file), 'r'))
domain = config.get('domain')
username = config.get('username')
password = config.get('password')
target = args.to
session = requests.Session()
def auth(password):
response = session.post('http://%s:8080/auth' % domain,
data=json.dumps({'username': username, 'password': password}),
headers={'Content-Type': 'application/json'})
if response.status_code != 200:
print('Authentication failure')
exit(-1)
def send(message):
response = session.post('http://%s:8080/send' % domain,
data=message,
headers={
'X-Telegram-From': '%s@%s' % (username, domain),
'X-Telegram-To': args.to,
'X-Telegram-Sign': 'abcd'
})
print("Status code: %i" %response.status_code)
print(response.headers)
print(response.text)
auth(password or raw_input("Password for %s@%s:" % (username, domain)))
if args.iterate == -1:
send(args.message)
else:
for x in range(args.iterate):
send('%s (%i)' % (args.message, x+1))
| mit |
astropy/astropy | astropy/units/format/cds_lextab.py | 8 | 1455 | # -*- coding: utf-8 -*-
# Licensed under a 3-clause BSD style license - see LICENSE.rst
# This file was automatically generated from ply. To re-generate this file,
# remove it from this folder, then build astropy and run the tests in-place:
#
# python setup.py build_ext --inplace
# pytest astropy/units
#
# You can then commit the changes to this file.
# cds_lextab.py. This file automatically created by PLY (version 3.11). Don't edit!
_tabversion = '3.10'
_lextokens = set(('CLOSE_BRACKET', 'CLOSE_PAREN', 'DIMENSIONLESS', 'DIVISION', 'OPEN_BRACKET', 'OPEN_PAREN', 'PRODUCT', 'SIGN', 'UFLOAT', 'UINT', 'UNIT', 'X'))
_lexreflags = 32
_lexliterals = ''
_lexstateinfo = {'INITIAL': 'inclusive'}
_lexstatere = {'INITIAL': [('(?P<t_UFLOAT>((\\d+\\.?\\d+)|(\\.\\d+))([eE][+-]?\\d+)?)|(?P<t_UINT>\\d+)|(?P<t_SIGN>[+-](?=\\d))|(?P<t_X>[x×])|(?P<t_UNIT>\\%|°|\\\\h|((?!\\d)\\w)+)|(?P<t_DIMENSIONLESS>---|-)|(?P<t_PRODUCT>\\.)|(?P<t_OPEN_PAREN>\\()|(?P<t_CLOSE_PAREN>\\))|(?P<t_OPEN_BRACKET>\\[)|(?P<t_CLOSE_BRACKET>\\])|(?P<t_DIVISION>/)', [None, ('t_UFLOAT', 'UFLOAT'), None, None, None, None, ('t_UINT', 'UINT'), ('t_SIGN', 'SIGN'), ('t_X', 'X'), ('t_UNIT', 'UNIT'), None, ('t_DIMENSIONLESS', 'DIMENSIONLESS'), (None, 'PRODUCT'), (None, 'OPEN_PAREN'), (None, 'CLOSE_PAREN'), (None, 'OPEN_BRACKET'), (None, 'CLOSE_BRACKET'), (None, 'DIVISION')])]}
_lexstateignore = {'INITIAL': ''}
_lexstateerrorf = {'INITIAL': 't_error'}
_lexstateeoff = {}
| bsd-3-clause |
allen-fdes/python_demo | venv/Lib/site-packages/pip/commands/install.py | 50 | 14063 | from __future__ import absolute_import
import logging
import operator
import os
import tempfile
import shutil
import warnings
try:
import wheel
except ImportError:
wheel = None
from pip.req import RequirementSet
from pip.basecommand import RequirementCommand
from pip.locations import virtualenv_no_global, distutils_scheme
from pip.index import PackageFinder
from pip.exceptions import (
InstallationError, CommandError, PreviousBuildDirError,
)
from pip import cmdoptions
from pip.utils import ensure_dir
from pip.utils.build import BuildDirectory
from pip.utils.deprecation import RemovedInPip8Warning
from pip.wheel import WheelCache, WheelBuilder
logger = logging.getLogger(__name__)
class InstallCommand(RequirementCommand):
"""
Install packages from:
- PyPI (and other indexes) using requirement specifiers.
- VCS project urls.
- Local project directories.
- Local or remote source archives.
pip also supports installing from "requirements files", which provide
an easy way to specify a whole environment to be installed.
"""
name = 'install'
usage = """
%prog [options] <requirement specifier> [package-index-options] ...
%prog [options] -r <requirements file> [package-index-options] ...
%prog [options] [-e] <vcs project url> ...
%prog [options] [-e] <local project path> ...
%prog [options] <archive url/path> ..."""
summary = 'Install packages.'
def __init__(self, *args, **kw):
super(InstallCommand, self).__init__(*args, **kw)
cmd_opts = self.cmd_opts
cmd_opts.add_option(cmdoptions.constraints())
cmd_opts.add_option(cmdoptions.editable())
cmd_opts.add_option(cmdoptions.requirements())
cmd_opts.add_option(cmdoptions.build_dir())
cmd_opts.add_option(
'-t', '--target',
dest='target_dir',
metavar='dir',
default=None,
help='Install packages into <dir>. '
'By default this will not replace existing files/folders in '
'<dir>. Use --upgrade to replace existing packages in <dir> '
'with new versions.'
)
cmd_opts.add_option(
'-d', '--download', '--download-dir', '--download-directory',
dest='download_dir',
metavar='dir',
default=None,
help=("Download packages into <dir> instead of installing them, "
"regardless of what's already installed."),
)
cmd_opts.add_option(cmdoptions.download_cache())
cmd_opts.add_option(cmdoptions.src())
cmd_opts.add_option(
'-U', '--upgrade',
dest='upgrade',
action='store_true',
help='Upgrade all specified packages to the newest available '
'version. This process is recursive regardless of whether '
'a dependency is already satisfied.'
)
cmd_opts.add_option(
'--force-reinstall',
dest='force_reinstall',
action='store_true',
help='When upgrading, reinstall all packages even if they are '
'already up-to-date.')
cmd_opts.add_option(
'-I', '--ignore-installed',
dest='ignore_installed',
action='store_true',
help='Ignore the installed packages (reinstalling instead).')
cmd_opts.add_option(cmdoptions.no_deps())
cmd_opts.add_option(cmdoptions.install_options())
cmd_opts.add_option(cmdoptions.global_options())
cmd_opts.add_option(
'--user',
dest='use_user_site',
action='store_true',
help="Install to the Python user install directory for your "
"platform. Typically ~/.local/, or %APPDATA%\Python on "
"Windows. (See the Python documentation for site.USER_BASE "
"for full details.)")
cmd_opts.add_option(
'--egg',
dest='as_egg',
action='store_true',
help="Install packages as eggs, not 'flat', like pip normally "
"does. This option is not about installing *from* eggs. "
"(WARNING: Because this option overrides pip's normal install"
" logic, requirements files may not behave as expected.)")
cmd_opts.add_option(
'--root',
dest='root_path',
metavar='dir',
default=None,
help="Install everything relative to this alternate root "
"directory.")
cmd_opts.add_option(
"--compile",
action="store_true",
dest="compile",
default=True,
help="Compile py files to pyc",
)
cmd_opts.add_option(
"--no-compile",
action="store_false",
dest="compile",
help="Do not compile py files to pyc",
)
cmd_opts.add_option(cmdoptions.use_wheel())
cmd_opts.add_option(cmdoptions.no_use_wheel())
cmd_opts.add_option(cmdoptions.no_binary())
cmd_opts.add_option(cmdoptions.only_binary())
cmd_opts.add_option(
'--pre',
action='store_true',
default=False,
help="Include pre-release and development versions. By default, "
"pip only finds stable versions.")
cmd_opts.add_option(cmdoptions.no_clean())
index_opts = cmdoptions.make_option_group(
cmdoptions.index_group,
self.parser,
)
self.parser.insert_option_group(0, index_opts)
self.parser.insert_option_group(0, cmd_opts)
def _build_package_finder(self, options, index_urls, session):
"""
Create a package finder appropriate to this install command.
This method is meant to be overridden by subclasses, not
called directly.
"""
return PackageFinder(
find_links=options.find_links,
format_control=options.format_control,
index_urls=index_urls,
allow_external=options.allow_external,
allow_unverified=options.allow_unverified,
allow_all_external=options.allow_all_external,
trusted_hosts=options.trusted_hosts,
allow_all_prereleases=options.pre,
process_dependency_links=options.process_dependency_links,
session=session,
)
def run(self, options, args):
cmdoptions.resolve_wheel_no_use_binary(options)
cmdoptions.check_install_build_global(options)
if options.download_dir:
options.ignore_installed = True
if options.build_dir:
options.build_dir = os.path.abspath(options.build_dir)
options.src_dir = os.path.abspath(options.src_dir)
install_options = options.install_options or []
if options.use_user_site:
if virtualenv_no_global():
raise InstallationError(
"Can not perform a '--user' install. User site-packages "
"are not visible in this virtualenv."
)
install_options.append('--user')
install_options.append('--prefix=')
temp_target_dir = None
if options.target_dir:
options.ignore_installed = True
temp_target_dir = tempfile.mkdtemp()
options.target_dir = os.path.abspath(options.target_dir)
if (os.path.exists(options.target_dir) and not
os.path.isdir(options.target_dir)):
raise CommandError(
"Target path exists but is not a directory, will not "
"continue."
)
install_options.append('--home=' + temp_target_dir)
global_options = options.global_options or []
index_urls = [options.index_url] + options.extra_index_urls
if options.no_index:
logger.info('Ignoring indexes: %s', ','.join(index_urls))
index_urls = []
if options.download_cache:
warnings.warn(
"--download-cache has been deprecated and will be removed in "
"the future. Pip now automatically uses and configures its "
"cache.",
RemovedInPip8Warning,
)
with self._build_session(options) as session:
finder = self._build_package_finder(options, index_urls, session)
build_delete = (not (options.no_clean or options.build_dir))
wheel_cache = WheelCache(options.cache_dir, options.format_control)
with BuildDirectory(options.build_dir,
delete=build_delete) as build_dir:
requirement_set = RequirementSet(
build_dir=build_dir,
src_dir=options.src_dir,
download_dir=options.download_dir,
upgrade=options.upgrade,
as_egg=options.as_egg,
ignore_installed=options.ignore_installed,
ignore_dependencies=options.ignore_dependencies,
force_reinstall=options.force_reinstall,
use_user_site=options.use_user_site,
target_dir=temp_target_dir,
session=session,
pycompile=options.compile,
isolated=options.isolated_mode,
wheel_cache=wheel_cache,
)
self.populate_requirement_set(
requirement_set, args, options, finder, session, self.name,
wheel_cache
)
if not requirement_set.has_requirements:
return
try:
if (options.download_dir or not wheel or not
options.cache_dir):
# on -d don't do complex things like building
# wheels, and don't try to build wheels when wheel is
# not installed.
requirement_set.prepare_files(finder)
else:
# build wheels before install.
wb = WheelBuilder(
requirement_set,
finder,
build_options=[],
global_options=[],
)
# Ignore the result: a failed wheel will be
# installed from the sdist/vcs whatever.
wb.build(autobuilding=True)
if not options.download_dir:
requirement_set.install(
install_options,
global_options,
root=options.root_path,
)
reqs = sorted(
requirement_set.successfully_installed,
key=operator.attrgetter('name'))
items = []
for req in reqs:
item = req.name
try:
if hasattr(req, 'installed_version'):
if req.installed_version:
item += '-' + req.installed_version
except Exception:
pass
items.append(item)
installed = ' '.join(items)
if installed:
logger.info('Successfully installed %s', installed)
else:
downloaded = ' '.join([
req.name
for req in requirement_set.successfully_downloaded
])
if downloaded:
logger.info(
'Successfully downloaded %s', downloaded
)
except PreviousBuildDirError:
options.no_clean = True
raise
finally:
# Clean up
if not options.no_clean:
requirement_set.cleanup_files()
if options.target_dir:
ensure_dir(options.target_dir)
lib_dir = distutils_scheme('', home=temp_target_dir)['purelib']
for item in os.listdir(lib_dir):
target_item_dir = os.path.join(options.target_dir, item)
if os.path.exists(target_item_dir):
if not options.upgrade:
logger.warning(
'Target directory %s already exists. Specify '
'--upgrade to force replacement.',
target_item_dir
)
continue
if os.path.islink(target_item_dir):
logger.warning(
'Target directory %s already exists and is '
'a link. Pip will not automatically replace '
'links, please remove if replacement is '
'desired.',
target_item_dir
)
continue
if os.path.isdir(target_item_dir):
shutil.rmtree(target_item_dir)
else:
os.remove(target_item_dir)
shutil.move(
os.path.join(lib_dir, item),
target_item_dir
)
shutil.rmtree(temp_target_dir)
return requirement_set
| mit |
Hellowlol/plexpy | lib/cherrypy/lib/httputil.py | 55 | 18204 | """HTTP library functions.
This module contains functions for building an HTTP application
framework: any one, not just one whose name starts with "Ch". ;) If you
reference any modules from some popular framework inside *this* module,
FuManChu will personally hang you up by your thumbs and submit you
to a public caning.
"""
from binascii import b2a_base64
from cherrypy._cpcompat import BaseHTTPRequestHandler, HTTPDate, ntob, ntou
from cherrypy._cpcompat import basestring, bytestr, iteritems, nativestr
from cherrypy._cpcompat import reversed, sorted, unicodestr, unquote_qs
response_codes = BaseHTTPRequestHandler.responses.copy()
# From https://bitbucket.org/cherrypy/cherrypy/issue/361
response_codes[500] = ('Internal Server Error',
'The server encountered an unexpected condition '
'which prevented it from fulfilling the request.')
response_codes[503] = ('Service Unavailable',
'The server is currently unable to handle the '
'request due to a temporary overloading or '
'maintenance of the server.')
import re
import urllib
def urljoin(*atoms):
"""Return the given path \*atoms, joined into a single URL.
This will correctly join a SCRIPT_NAME and PATH_INFO into the
original URL, even if either atom is blank.
"""
url = "/".join([x for x in atoms if x])
while "//" in url:
url = url.replace("//", "/")
# Special-case the final url of "", and return "/" instead.
return url or "/"
def urljoin_bytes(*atoms):
"""Return the given path *atoms, joined into a single URL.
This will correctly join a SCRIPT_NAME and PATH_INFO into the
original URL, even if either atom is blank.
"""
url = ntob("/").join([x for x in atoms if x])
while ntob("//") in url:
url = url.replace(ntob("//"), ntob("/"))
# Special-case the final url of "", and return "/" instead.
return url or ntob("/")
def protocol_from_http(protocol_str):
"""Return a protocol tuple from the given 'HTTP/x.y' string."""
return int(protocol_str[5]), int(protocol_str[7])
def get_ranges(headervalue, content_length):
"""Return a list of (start, stop) indices from a Range header, or None.
Each (start, stop) tuple will be composed of two ints, which are suitable
for use in a slicing operation. That is, the header "Range: bytes=3-6",
if applied against a Python string, is requesting resource[3:7]. This
function will return the list [(3, 7)].
If this function returns an empty list, you should return HTTP 416.
"""
if not headervalue:
return None
result = []
bytesunit, byteranges = headervalue.split("=", 1)
for brange in byteranges.split(","):
start, stop = [x.strip() for x in brange.split("-", 1)]
if start:
if not stop:
stop = content_length - 1
start, stop = int(start), int(stop)
if start >= content_length:
# From rfc 2616 sec 14.16:
# "If the server receives a request (other than one
# including an If-Range request-header field) with an
# unsatisfiable Range request-header field (that is,
# all of whose byte-range-spec values have a first-byte-pos
# value greater than the current length of the selected
# resource), it SHOULD return a response code of 416
# (Requested range not satisfiable)."
continue
if stop < start:
# From rfc 2616 sec 14.16:
# "If the server ignores a byte-range-spec because it
# is syntactically invalid, the server SHOULD treat
# the request as if the invalid Range header field
# did not exist. (Normally, this means return a 200
# response containing the full entity)."
return None
result.append((start, stop + 1))
else:
if not stop:
# See rfc quote above.
return None
# Negative subscript (last N bytes)
#
# RFC 2616 Section 14.35.1:
# If the entity is shorter than the specified suffix-length,
# the entire entity-body is used.
if int(stop) > content_length:
result.append((0, content_length))
else:
result.append((content_length - int(stop), content_length))
return result
class HeaderElement(object):
"""An element (with parameters) from an HTTP header's element list."""
def __init__(self, value, params=None):
self.value = value
if params is None:
params = {}
self.params = params
def __cmp__(self, other):
return cmp(self.value, other.value)
def __lt__(self, other):
return self.value < other.value
def __str__(self):
p = [";%s=%s" % (k, v) for k, v in iteritems(self.params)]
return str("%s%s" % (self.value, "".join(p)))
def __bytes__(self):
return ntob(self.__str__())
def __unicode__(self):
return ntou(self.__str__())
def parse(elementstr):
"""Transform 'token;key=val' to ('token', {'key': 'val'})."""
# Split the element into a value and parameters. The 'value' may
# be of the form, "token=token", but we don't split that here.
atoms = [x.strip() for x in elementstr.split(";") if x.strip()]
if not atoms:
initial_value = ''
else:
initial_value = atoms.pop(0).strip()
params = {}
for atom in atoms:
atom = [x.strip() for x in atom.split("=", 1) if x.strip()]
key = atom.pop(0)
if atom:
val = atom[0]
else:
val = ""
params[key] = val
return initial_value, params
parse = staticmethod(parse)
def from_str(cls, elementstr):
"""Construct an instance from a string of the form 'token;key=val'."""
ival, params = cls.parse(elementstr)
return cls(ival, params)
from_str = classmethod(from_str)
q_separator = re.compile(r'; *q *=')
class AcceptElement(HeaderElement):
"""An element (with parameters) from an Accept* header's element list.
AcceptElement objects are comparable; the more-preferred object will be
"less than" the less-preferred object. They are also therefore sortable;
if you sort a list of AcceptElement objects, they will be listed in
priority order; the most preferred value will be first. Yes, it should
have been the other way around, but it's too late to fix now.
"""
def from_str(cls, elementstr):
qvalue = None
# The first "q" parameter (if any) separates the initial
# media-range parameter(s) (if any) from the accept-params.
atoms = q_separator.split(elementstr, 1)
media_range = atoms.pop(0).strip()
if atoms:
# The qvalue for an Accept header can have extensions. The other
# headers cannot, but it's easier to parse them as if they did.
qvalue = HeaderElement.from_str(atoms[0].strip())
media_type, params = cls.parse(media_range)
if qvalue is not None:
params["q"] = qvalue
return cls(media_type, params)
from_str = classmethod(from_str)
def qvalue(self):
val = self.params.get("q", "1")
if isinstance(val, HeaderElement):
val = val.value
return float(val)
qvalue = property(qvalue, doc="The qvalue, or priority, of this value.")
def __cmp__(self, other):
diff = cmp(self.qvalue, other.qvalue)
if diff == 0:
diff = cmp(str(self), str(other))
return diff
def __lt__(self, other):
if self.qvalue == other.qvalue:
return str(self) < str(other)
else:
return self.qvalue < other.qvalue
RE_HEADER_SPLIT = re.compile(',(?=(?:[^"]*"[^"]*")*[^"]*$)')
def header_elements(fieldname, fieldvalue):
"""Return a sorted HeaderElement list from a comma-separated header string.
"""
if not fieldvalue:
return []
result = []
for element in RE_HEADER_SPLIT.split(fieldvalue):
if fieldname.startswith("Accept") or fieldname == 'TE':
hv = AcceptElement.from_str(element)
else:
hv = HeaderElement.from_str(element)
result.append(hv)
return list(reversed(sorted(result)))
def decode_TEXT(value):
r"""Decode :rfc:`2047` TEXT (e.g. "=?utf-8?q?f=C3=BCr?=" -> "f\xfcr")."""
try:
# Python 3
from email.header import decode_header
except ImportError:
from email.Header import decode_header
atoms = decode_header(value)
decodedvalue = ""
for atom, charset in atoms:
if charset is not None:
atom = atom.decode(charset)
decodedvalue += atom
return decodedvalue
def valid_status(status):
"""Return legal HTTP status Code, Reason-phrase and Message.
The status arg must be an int, or a str that begins with an int.
If status is an int, or a str and no reason-phrase is supplied,
a default reason-phrase will be provided.
"""
if not status:
status = 200
status = str(status)
parts = status.split(" ", 1)
if len(parts) == 1:
# No reason supplied.
code, = parts
reason = None
else:
code, reason = parts
reason = reason.strip()
try:
code = int(code)
except ValueError:
raise ValueError("Illegal response status from server "
"(%s is non-numeric)." % repr(code))
if code < 100 or code > 599:
raise ValueError("Illegal response status from server "
"(%s is out of range)." % repr(code))
if code not in response_codes:
# code is unknown but not illegal
default_reason, message = "", ""
else:
default_reason, message = response_codes[code]
if reason is None:
reason = default_reason
return code, reason, message
# NOTE: the parse_qs functions that follow are modified version of those
# in the python3.0 source - we need to pass through an encoding to the unquote
# method, but the default parse_qs function doesn't allow us to. These do.
def _parse_qs(qs, keep_blank_values=0, strict_parsing=0, encoding='utf-8'):
"""Parse a query given as a string argument.
Arguments:
qs: URL-encoded query string to be parsed
keep_blank_values: flag indicating whether blank values in
URL encoded queries should be treated as blank strings. A
true value indicates that blanks should be retained as blank
strings. The default false value indicates that blank values
are to be ignored and treated as if they were not included.
strict_parsing: flag indicating what to do with parsing errors. If
false (the default), errors are silently ignored. If true,
errors raise a ValueError exception.
Returns a dict, as G-d intended.
"""
pairs = [s2 for s1 in qs.split('&') for s2 in s1.split(';')]
d = {}
for name_value in pairs:
if not name_value and not strict_parsing:
continue
nv = name_value.split('=', 1)
if len(nv) != 2:
if strict_parsing:
raise ValueError("bad query field: %r" % (name_value,))
# Handle case of a control-name with no equal sign
if keep_blank_values:
nv.append('')
else:
continue
if len(nv[1]) or keep_blank_values:
name = unquote_qs(nv[0], encoding)
value = unquote_qs(nv[1], encoding)
if name in d:
if not isinstance(d[name], list):
d[name] = [d[name]]
d[name].append(value)
else:
d[name] = value
return d
image_map_pattern = re.compile(r"[0-9]+,[0-9]+")
def parse_query_string(query_string, keep_blank_values=True, encoding='utf-8'):
"""Build a params dictionary from a query_string.
Duplicate key/value pairs in the provided query_string will be
returned as {'key': [val1, val2, ...]}. Single key/values will
be returned as strings: {'key': 'value'}.
"""
if image_map_pattern.match(query_string):
# Server-side image map. Map the coords to 'x' and 'y'
# (like CGI::Request does).
pm = query_string.split(",")
pm = {'x': int(pm[0]), 'y': int(pm[1])}
else:
pm = _parse_qs(query_string, keep_blank_values, encoding=encoding)
return pm
class CaseInsensitiveDict(dict):
"""A case-insensitive dict subclass.
Each key is changed on entry to str(key).title().
"""
def __getitem__(self, key):
return dict.__getitem__(self, str(key).title())
def __setitem__(self, key, value):
dict.__setitem__(self, str(key).title(), value)
def __delitem__(self, key):
dict.__delitem__(self, str(key).title())
def __contains__(self, key):
return dict.__contains__(self, str(key).title())
def get(self, key, default=None):
return dict.get(self, str(key).title(), default)
if hasattr({}, 'has_key'):
def has_key(self, key):
return str(key).title() in self
def update(self, E):
for k in E.keys():
self[str(k).title()] = E[k]
def fromkeys(cls, seq, value=None):
newdict = cls()
for k in seq:
newdict[str(k).title()] = value
return newdict
fromkeys = classmethod(fromkeys)
def setdefault(self, key, x=None):
key = str(key).title()
try:
return self[key]
except KeyError:
self[key] = x
return x
def pop(self, key, default):
return dict.pop(self, str(key).title(), default)
# TEXT = <any OCTET except CTLs, but including LWS>
#
# A CRLF is allowed in the definition of TEXT only as part of a header
# field continuation. It is expected that the folding LWS will be
# replaced with a single SP before interpretation of the TEXT value."
if nativestr == bytestr:
header_translate_table = ''.join([chr(i) for i in xrange(256)])
header_translate_deletechars = ''.join(
[chr(i) for i in xrange(32)]) + chr(127)
else:
header_translate_table = None
header_translate_deletechars = bytes(range(32)) + bytes([127])
class HeaderMap(CaseInsensitiveDict):
"""A dict subclass for HTTP request and response headers.
Each key is changed on entry to str(key).title(). This allows headers
to be case-insensitive and avoid duplicates.
Values are header values (decoded according to :rfc:`2047` if necessary).
"""
protocol = (1, 1)
encodings = ["ISO-8859-1"]
# Someday, when http-bis is done, this will probably get dropped
# since few servers, clients, or intermediaries do it. But until then,
# we're going to obey the spec as is.
# "Words of *TEXT MAY contain characters from character sets other than
# ISO-8859-1 only when encoded according to the rules of RFC 2047."
use_rfc_2047 = True
def elements(self, key):
"""Return a sorted list of HeaderElements for the given header."""
key = str(key).title()
value = self.get(key)
return header_elements(key, value)
def values(self, key):
"""Return a sorted list of HeaderElement.value for the given header."""
return [e.value for e in self.elements(key)]
def output(self):
"""Transform self into a list of (name, value) tuples."""
return list(self.encode_header_items(self.items()))
def encode_header_items(cls, header_items):
"""
Prepare the sequence of name, value tuples into a form suitable for
transmitting on the wire for HTTP.
"""
for k, v in header_items:
if isinstance(k, unicodestr):
k = cls.encode(k)
if not isinstance(v, basestring):
v = str(v)
if isinstance(v, unicodestr):
v = cls.encode(v)
# See header_translate_* constants above.
# Replace only if you really know what you're doing.
k = k.translate(header_translate_table,
header_translate_deletechars)
v = v.translate(header_translate_table,
header_translate_deletechars)
yield (k, v)
encode_header_items = classmethod(encode_header_items)
def encode(cls, v):
"""Return the given header name or value, encoded for HTTP output."""
for enc in cls.encodings:
try:
return v.encode(enc)
except UnicodeEncodeError:
continue
if cls.protocol == (1, 1) and cls.use_rfc_2047:
# Encode RFC-2047 TEXT
# (e.g. u"\u8200" -> "=?utf-8?b?6IiA?=").
# We do our own here instead of using the email module
# because we never want to fold lines--folding has
# been deprecated by the HTTP working group.
v = b2a_base64(v.encode('utf-8'))
return (ntob('=?utf-8?b?') + v.strip(ntob('\n')) + ntob('?='))
raise ValueError("Could not encode header part %r using "
"any of the encodings %r." %
(v, cls.encodings))
encode = classmethod(encode)
class Host(object):
"""An internet address.
name
Should be the client's host name. If not available (because no DNS
lookup is performed), the IP address should be used instead.
"""
ip = "0.0.0.0"
port = 80
name = "unknown.tld"
def __init__(self, ip, port, name=None):
self.ip = ip
self.port = port
if name is None:
name = ip
self.name = name
def __repr__(self):
return "httputil.Host(%r, %r, %r)" % (self.ip, self.port, self.name)
| gpl-3.0 |
evernym/plenum | plenum/test/demotion_promotion/test_demote_from_10_to_4_nodes.py | 2 | 2659 | import logging
import pytest
from plenum.test.helper import checkViewNoForNodes, waitForViewChange, sdk_send_random_and_check
from plenum.test.node_catchup.helper import ensure_all_nodes_have_same_data
from plenum.test.node_request.helper import sdk_ensure_pool_functional
from plenum.test.pool_transactions.helper import demote_node, promote_node
from plenum.test.test_node import ensureElectionsDone
from plenum.test.view_change.helper import restart_node
logging.getLogger("indy").setLevel(logging.CRITICAL)
nodeCount = 10
whitelist = ['Node is not a validator']
@pytest.fixture(scope="module")
def tconf(tconf):
old_b_size = tconf.Max3PCBatchSize
tconf.Max3PCBatchSize = 1
yield tconf
tconf.Max3PCBatchSize = old_b_size
def test_demote_promote_restart_after_promotion_from_10_to_4_nodes(txnPoolNodeSet,
looper,
sdk_pool_handle,
sdk_wallet_steward,
tdir,
tconf,
allPluginsPath):
"""
We expect that 2 changes for f value should be happened
"""
def demote_another_one(rest_pool):
demoted_node = rest_pool[-1]
rest_pool = [n for n in rest_pool if n != demoted_node]
starting_view_no = checkViewNoForNodes(rest_pool)
demote_node(looper, sdk_wallet_steward, sdk_pool_handle, demoted_node)
waitForViewChange(looper, rest_pool, expectedViewNo=starting_view_no + 1)
ensureElectionsDone(looper, rest_pool, customTimeout=60)
ensure_all_nodes_have_same_data(looper, rest_pool)
return rest_pool
rest_nodes = txnPoolNodeSet
etalon_node = txnPoolNodeSet[-1]
while len(rest_nodes) > 4:
rest_nodes = demote_another_one(rest_nodes)
sdk_send_random_and_check(looper, rest_nodes, sdk_pool_handle, sdk_wallet_steward, 5)
starting_view_no = checkViewNoForNodes(rest_nodes)
promote_node(looper, sdk_wallet_steward, sdk_pool_handle, etalon_node)
waitForViewChange(looper, rest_nodes, expectedViewNo=starting_view_no + 1)
ensure_all_nodes_have_same_data(looper, rest_nodes)
rest_nodes.append(etalon_node)
restart_node(looper, rest_nodes, etalon_node, tconf, tdir, allPluginsPath)
ensureElectionsDone(looper, rest_nodes)
sdk_ensure_pool_functional(looper, rest_nodes, sdk_wallet_steward, sdk_pool_handle)
| apache-2.0 |
thakkarparth007/mailer | deltamail/envelopes_mod/conn.py | 8 | 4226 | # -*- coding: utf-8 -*-
# Copyright (c) 2013 Tomasz Wójcik <tomek@bthlabs.pl>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
"""
envelopes.conn
==============
This module contains SMTP connection wrapper.
"""
import smtplib
import socket
TimeoutException = socket.timeout
__all__ = ['SMTP', 'GMailSMTP', 'SendGridSMTP', 'MailcatcherSMTP',
'TimeoutException']
class SMTP(object):
"""Wrapper around :py:class:`smtplib.SMTP` class."""
def __init__(self, host=None, port=25, login=None, password=None,
tls=False, timeout=None):
self._conn = None
self._host = host
self._port = port
self._login = login
self._password = password
self._tls = tls
self._timeout = timeout
@property
def is_connected(self):
"""Returns *True* if the SMTP connection is initialized and
connected. Otherwise returns *False*"""
try:
self._conn.noop()
except (AttributeError, smtplib.SMTPServerDisconnected):
return False
else:
return True
def _connect(self, replace_current=False):
if self._conn is None or replace_current:
try:
self._conn.quit()
except (AttributeError, smtplib.SMTPServerDisconnected):
pass
if self._timeout:
self._conn = smtplib.SMTP(self._host, self._port,
timeout=self._timeout)
else:
self._conn = smtplib.SMTP(self._host, self._port)
if self._tls:
self._conn.starttls()
if self._login:
self._conn.login(self._login, self._password or '')
def send(self, envelope):
"""Sends an *envelope*."""
if not self.is_connected:
self._connect()
msg = envelope.to_mime_message()
to_addrs = [envelope._addrs_to_header([addr]) for addr in envelope._to + envelope._cc + envelope._bcc]
return self._conn.sendmail(msg['From'], to_addrs, msg.as_string())
class GMailSMTP(SMTP):
"""Subclass of :py:class:`SMTP` preconfigured for GMail SMTP."""
GMAIL_SMTP_HOST = 'smtp.googlemail.com'
GMAIL_SMTP_TLS = True
def __init__(self, login=None, password=None):
super(GMailSMTP, self).__init__(
self.GMAIL_SMTP_HOST, tls=self.GMAIL_SMTP_TLS, login=login,
password=password
)
class SendGridSMTP(SMTP):
"""Subclass of :py:class:`SMTP` preconfigured for SendGrid SMTP."""
SENDGRID_SMTP_HOST = 'smtp.sendgrid.net'
SENDGRID_SMTP_PORT = 587
SENDGRID_SMTP_TLS = False
def __init__(self, login=None, password=None):
super(SendGridSMTP, self).__init__(
self.SENDGRID_SMTP_HOST, port=self.SENDGRID_SMTP_PORT,
tls=self.SENDGRID_SMTP_TLS, login=login,
password=password
)
class MailcatcherSMTP(SMTP):
"""Subclass of :py:class:`SMTP` preconfigured for local Mailcatcher
SMTP."""
MAILCATCHER_SMTP_HOST = 'localhost'
def __init__(self, port=1025):
super(MailcatcherSMTP, self).__init__(
self.MAILCATCHER_SMTP_HOST, port=port
)
| mit |
detiber/ansible | lib/ansible/modules/cloud/google/gce_instance_template.py | 42 | 17737 | #!/usr/bin/python
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: gce_instance_template
version_added: "2.3"
short_description: create or destroy intance templates of Compute Engine of GCP.
description:
- Creates or destroy Google instance templates
of Compute Engine of Google Cloud Platform.
options:
state:
description:
- The desired state for the instance template.
default: "present"
choices: ["present", "absent"]
name:
description:
- The name of the GCE instance template.
required: true
default: null
size:
description:
- The desired machine type for the instance template.
default: "f1-micro"
source:
description:
- A source disk to attach to the instance.
Cannot specify both I(image) and I(source).
default: null
image:
description:
- The image to use to create the instance.
Cannot specify both both I(image) and I(source).
default: null
image_family:
description:
- The image family to use to create the instance.
If I(image) has been used I(image_family) is ignored.
Cannot specify both I(image) and I(source).
default: null
disk_type:
description:
- Specify a C(pd-standard) disk or C(pd-ssd)
for an SSD disk.
default: pd-standard
disk_auto_delete:
description:
- Indicate that the boot disk should be
deleted when the Node is deleted.
default: true
network:
description:
- The network to associate with the instance.
default: "default"
subnetwork:
description:
- The Subnetwork resource name for this instance.
default: null
can_ip_forward:
description:
- Set to True to allow instance to
send/receive non-matching src/dst packets.
default: false
external_ip:
description:
- The external IP address to use.
If C(ephemeral), a new non-static address will be
used. If C(None), then no external address will
be used. To use an existing static IP address
specify adress name.
default: "ephemeral"
service_account_email:
description:
- service account email
default: null
service_account_permissions:
description:
- service account permissions (see
U(https://cloud.google.com/sdk/gcloud/reference/compute/instances/create),
--scopes section for detailed information)
default: null
choices: [
"bigquery", "cloud-platform", "compute-ro", "compute-rw",
"useraccounts-ro", "useraccounts-rw", "datastore", "logging-write",
"monitoring", "sql-admin", "storage-full", "storage-ro",
"storage-rw", "taskqueue", "userinfo-email"
]
automatic_restart:
description:
- Defines whether the instance should be
automatically restarted when it is
terminated by Compute Engine.
default: null
preemptible:
description:
- Defines whether the instance is preemptible.
default: null
tags:
description:
- a comma-separated list of tags to associate with the instance
default: null
metadata:
description:
- a hash/dictionary of custom data for the instance;
'{"key":"value", ...}'
default: null
description:
description:
- description of instance template
default: null
disks:
description:
- a list of persistent disks to attach to the instance; a string value
gives the name of the disk; alternatively, a dictionary value can
define 'name' and 'mode' ('READ_ONLY' or 'READ_WRITE'). The first entry
will be the boot disk (which must be READ_WRITE).
default: null
nic_gce_struct:
description:
- Support passing in the GCE-specific
formatted networkInterfaces[] structure.
default: null
project_id:
description:
- your GCE project ID
default: null
pem_file:
description:
- path to the pem file associated with the service account email
This option is deprecated. Use 'credentials_file'.
default: null
credentials_file:
description:
- path to the JSON file associated with the service account email
default: null
requirements:
- "python >= 2.6"
- "apache-libcloud >= 0.13.3, >= 0.17.0 if using JSON credentials,
>= 0.20.0 if using preemptible option"
notes:
- JSON credentials strongly preferred.
author: "Gwenael Pellen (@GwenaelPellenArkeup) <gwenael.pellen@arkeup.com>"
'''
EXAMPLES = '''
# Usage
- name: create instance template named foo
gce_instance_template:
name: foo
size: n1-standard-1
image_family: ubuntu-1604-lts
state: present
project_id: "your-project-name"
credentials_file: "/path/to/your-key.json"
service_account_email: "your-sa@your-project-name.iam.gserviceaccount.com"
# Example Playbook
- name: Compute Engine Instance Template Examples
hosts: localhost
vars:
service_account_email: "your-sa@your-project-name.iam.gserviceaccount.com"
credentials_file: "/path/to/your-key.json"
project_id: "your-project-name"
tasks:
- name: create instance template
gce_instance_template:
name: my-test-instance-template
size: n1-standard-1
image_family: ubuntu-1604-lts
state: present
project_id: "{{ project_id }}"
credentials_file: "{{ credentials_file }}"
service_account_email: "{{ service_account_email }}"
- name: delete instance template
gce_instance_template:
name: my-test-instance-template
size: n1-standard-1
image_family: ubuntu-1604-lts
state: absent
project_id: "{{ project_id }}"
credentials_file: "{{ credentials_file }}"
service_account_email: "{{ service_account_email }}"
'''
RETURN = '''
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.gce import gce_connect
try:
import libcloud
from libcloud.compute.types import Provider
from libcloud.compute.providers import get_driver
from libcloud.common.google import GoogleBaseError, QuotaExceededError, \
ResourceExistsError, ResourceInUseError, ResourceNotFoundError
from libcloud.compute.drivers.gce import GCEAddress
_ = Provider.GCE
HAS_LIBCLOUD = True
except ImportError:
HAS_LIBCLOUD = False
try:
from ast import literal_eval
HAS_PYTHON26 = True
except ImportError:
HAS_PYTHON26 = False
def get_info(inst):
"""Retrieves instance template information
"""
return({
'name': inst.name,
'extra': inst.extra,
})
def create_instance_template(module, gce):
"""Create an instance template
module : AnsibleModule object
gce: authenticated GCE libcloud driver
Returns:
instance template information
"""
# get info from module
name = module.params.get('name')
size = module.params.get('size')
source = module.params.get('source')
image = module.params.get('image')
image_family = module.params.get('image_family')
disk_type = module.params.get('disk_type')
disk_auto_delete = module.params.get('disk_auto_delete')
network = module.params.get('network')
subnetwork = module.params.get('subnetwork')
can_ip_forward = module.params.get('can_ip_forward')
external_ip = module.params.get('external_ip')
service_account_email = module.params.get('service_account_email')
service_account_permissions = module.params.get(
'service_account_permissions')
on_host_maintenance = module.params.get('on_host_maintenance')
automatic_restart = module.params.get('automatic_restart')
preemptible = module.params.get('preemptible')
tags = module.params.get('tags')
metadata = module.params.get('metadata')
description = module.params.get('description')
disks = module.params.get('disks')
changed = False
# args of ex_create_instancetemplate
gce_args = dict(
name="instance",
size="f1-micro",
source=None,
image=None,
disk_type='pd-standard',
disk_auto_delete=True,
network='default',
subnetwork=None,
can_ip_forward=None,
external_ip='ephemeral',
service_accounts=None,
on_host_maintenance=None,
automatic_restart=None,
preemptible=None,
tags=None,
metadata=None,
description=None,
disks_gce_struct=None,
nic_gce_struct=None
)
gce_args['name'] = name
gce_args['size'] = size
if source is not None:
gce_args['source'] = source
if image:
gce_args['image'] = image
else:
if image_family:
image = gce.ex_get_image_from_family(image_family)
gce_args['image'] = image
else:
gce_args['image'] = "debian-8"
gce_args['disk_type'] = disk_type
gce_args['disk_auto_delete'] = disk_auto_delete
gce_network = gce.ex_get_network(network)
gce_args['network'] = gce_network
if subnetwork is not None:
gce_args['subnetwork'] = subnetwork
if can_ip_forward is not None:
gce_args['can_ip_forward'] = can_ip_forward
if external_ip == "ephemeral":
instance_external_ip = external_ip
elif external_ip == "none":
instance_external_ip = None
else:
try:
instance_external_ip = gce.ex_get_address(external_ip)
except GoogleBaseError as err:
# external_ip is name ?
instance_external_ip = external_ip
gce_args['external_ip'] = instance_external_ip
ex_sa_perms = []
bad_perms = []
if service_account_permissions:
for perm in service_account_permissions:
if perm not in gce.SA_SCOPES_MAP:
bad_perms.append(perm)
if len(bad_perms) > 0:
module.fail_json(msg='bad permissions: %s' % str(bad_perms))
ex_sa_perms.append({'email': "default"})
ex_sa_perms[0]['scopes'] = service_account_permissions
gce_args['service_accounts'] = ex_sa_perms
if on_host_maintenance is not None:
gce_args['on_host_maintenance'] = on_host_maintenance
if automatic_restart is not None:
gce_args['automatic_restart'] = automatic_restart
if preemptible is not None:
gce_args['preemptible'] = preemptible
if tags is not None:
gce_args['tags'] = tags
# Try to convert the user's metadata value into the format expected
# by GCE. First try to ensure user has proper quoting of a
# dictionary-like syntax using 'literal_eval', then convert the python
# dict into a python list of 'key' / 'value' dicts. Should end up
# with:
# [ {'key': key1, 'value': value1}, {'key': key2, 'value': value2}, ...]
if metadata:
if isinstance(metadata, dict):
md = metadata
else:
try:
md = literal_eval(str(metadata))
if not isinstance(md, dict):
raise ValueError('metadata must be a dict')
except ValueError as e:
module.fail_json(msg='bad metadata: %s' % str(e))
except SyntaxError as e:
module.fail_json(msg='bad metadata syntax')
if hasattr(libcloud, '__version__') and libcloud.__version__ < '0.15':
items = []
for k, v in md.items():
items.append({"key": k, "value": v})
metadata = {'items': items}
else:
metadata = md
gce_args['metadata'] = metadata
if description is not None:
gce_args['description'] = description
instance = None
try:
instance = gce.ex_get_instancetemplate(name)
except ResourceNotFoundError:
try:
instance = gce.ex_create_instancetemplate(**gce_args)
changed = True
except GoogleBaseError as err:
module.fail_json(
msg='Unexpected error attempting to create instance {}, error: {}'
.format(
instance,
err.value
)
)
if instance:
json_data = get_info(instance)
else:
module.fail_json(msg="no instance template!")
return (changed, json_data, name)
def delete_instance_template(module, gce):
""" Delete instance template.
module : AnsibleModule object
gce: authenticated GCE libcloud driver
Returns:
instance template information
"""
name = module.params.get('name')
current_state = "absent"
changed = False
# get instance template
instance = None
try:
instance = gce.ex_get_instancetemplate(name)
current_state = "present"
except GoogleBaseError as err:
json_data = dict(msg='instance template not exists')
if current_state == "present":
rc = instance.destroy()
if rc:
changed = True
else:
module.fail_json(
msg='instance template destroy failed'
)
json_data = {}
return (changed, json_data, name)
def module_controller(module, gce):
''' Control module state parameter.
module : AnsibleModule object
gce: authenticated GCE libcloud driver
Returns:
nothing
Exit:
AnsibleModule object exit with json data.
'''
json_output = dict()
state = module.params.get("state")
if state == "present":
(changed, output, name) = create_instance_template(module, gce)
json_output['changed'] = changed
json_output['msg'] = output
elif state == "absent":
(changed, output, name) = delete_instance_template(module, gce)
json_output['changed'] = changed
json_output['msg'] = output
module.exit_json(**json_output)
def check_if_system_state_would_be_changed(module, gce):
''' check_if_system_state_would_be_changed !
module : AnsibleModule object
gce: authenticated GCE libcloud driver
Returns:
system_state changed
'''
changed = False
current_state = "absent"
state = module.params.get("state")
name = module.params.get("name")
instance = None
try:
instance = gce.ex_get_instancetemplate(name)
current_state = "present"
except GoogleBaseError as err:
module.fail_json(msg='GCE get instancetemplate problem')
if current_state != state:
changed = True
if current_state == "absent":
if changed:
output = 'instance template {} will be created'.format(name)
else:
output = 'nothing to do for instance template {} '.format(name)
if current_state == "present":
if changed:
output = 'instance template {} will be detroyed'.format(name)
else:
output = 'nothing to do for instance template {} '.format(name)
return (changed, output)
def main():
module = AnsibleModule(
argument_spec=dict(
state=dict(choices=['present', 'absent'], default='present'),
name=dict(require=True, aliases=['base_name']),
size=dict(default='f1-micro'),
source=dict(),
image=dict(),
image_family=dict(default='debian-8'),
disk_type=dict(choices=['pd-standard', 'pd-ssd'], default='pd-standard', type='str'),
disk_auto_delete=dict(type='bool', default=True),
network=dict(default='default'),
subnetwork=dict(),
can_ip_forward=dict(type='bool', default=False),
external_ip=dict(default='ephemeral'),
service_account_email=dict(),
service_account_permissions=dict(type='list'),
automatic_restart=dict(type='bool', default=None),
preemptible=dict(type='bool', default=None),
tags=dict(type='list'),
metadata=dict(),
description=dict(),
disks=dict(type='list'),
nic_gce_struct=dict(type='list'),
project_id=dict(),
pem_file=dict(type='path'),
credentials_file=dict(type='path'),
),
mutually_exclusive=[['source', 'image']],
required_one_of=[['image', 'image_family']],
supports_check_mode=True
)
if not HAS_PYTHON26:
module.fail_json(
msg="GCE module requires python's 'ast' module, python v2.6+")
if not HAS_LIBCLOUD:
module.fail_json(
msg='libcloud with GCE support (0.17.0+) required for this module')
try:
gce = gce_connect(module)
except GoogleBaseError as err:
module.fail_json(msg='GCE Connexion failed')
if module.check_mode:
(changed, output) = check_if_system_state_would_be_changed(module, gce)
module.exit_json(
changed=changed,
msg=output
)
else:
module_controller(module, gce)
if __name__ == '__main__':
main()
| gpl-3.0 |
r-icarus/openstack_microserver | openstack_dashboard/dashboards/admin/volumes/tests.py | 12 | 4070 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012 Nebula, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from django.core.urlresolvers import reverse # noqa
from django import http
from mox import IsA # noqa
from openstack_dashboard import api
from openstack_dashboard.api import cinder
from openstack_dashboard.api import keystone
from openstack_dashboard.test import helpers as test
class VolumeTests(test.BaseAdminViewTests):
@test.create_stubs({api.nova: ('server_list',),
cinder: ('volume_list',
'volume_type_list',),
keystone: ('tenant_list',)})
def test_index(self):
cinder.volume_list(IsA(http.HttpRequest), search_opts={
'all_tenants': True}).AndReturn(self.volumes.list())
api.nova.server_list(IsA(http.HttpRequest), search_opts={
'all_tenants': True}) \
.AndReturn([self.servers.list(), False])
cinder.volume_type_list(IsA(http.HttpRequest)).\
AndReturn(self.volume_types.list())
keystone.tenant_list(IsA(http.HttpRequest)) \
.AndReturn([self.tenants.list(), False])
self.mox.ReplayAll()
res = self.client.get(reverse('horizon:admin:volumes:index'))
self.assertTemplateUsed(res, 'admin/volumes/index.html')
volumes = res.context['volumes_table'].data
self.assertItemsEqual(volumes, self.volumes.list())
@test.create_stubs({cinder: ('volume_type_create',)})
def test_create_volume_type(self):
formData = {'name': 'volume type 1'}
cinder.volume_type_create(IsA(http.HttpRequest),
formData['name']).\
AndReturn(self.volume_types.first())
self.mox.ReplayAll()
res = self.client.post(reverse('horizon:admin:volumes:create_type'),
formData)
redirect = reverse('horizon:admin:volumes:index')
self.assertNoFormErrors(res)
self.assertRedirectsNoFollow(res, redirect)
@test.create_stubs({api.nova: ('server_list',),
cinder: ('volume_list',
'volume_type_list',
'volume_type_delete',),
keystone: ('tenant_list',)})
def test_delete_volume_type(self):
volume_type = self.volume_types.first()
formData = {'action': 'volume_types__delete__%s' % volume_type.id}
cinder.volume_list(IsA(http.HttpRequest), search_opts={
'all_tenants': True}).AndReturn(self.volumes.list())
api.nova.server_list(IsA(http.HttpRequest), search_opts={
'all_tenants': True}) \
.AndReturn([self.servers.list(), False])
cinder.volume_type_list(IsA(http.HttpRequest)).\
AndReturn(self.volume_types.list())
cinder.volume_type_delete(IsA(http.HttpRequest),
str(volume_type.id))
keystone.tenant_list(IsA(http.HttpRequest)) \
.AndReturn([self.tenants.list(), False])
self.mox.ReplayAll()
res = self.client.post(reverse('horizon:admin:volumes:index'),
formData)
redirect = reverse('horizon:admin:volumes:index')
self.assertNoFormErrors(res)
self.assertRedirectsNoFollow(res, redirect)
| apache-2.0 |
xlqian/navitia | source/sql/alembic/versions/14e13cf7a042_manage_multi_codes.py | 5 | 2592 | """Manage multi codes
Revision ID: 14e13cf7a042
Revises: 29fc422c56cb
Create Date: 2015-05-07 15:31:55.271785
"""
# revision identifiers, used by Alembic.
revision = '14e13cf7a042'
down_revision = '12660cd87568'
from alembic import op
import sqlalchemy as sa
map_merge = []
map_merge.append({"table_name": "network", "type_name": "Network"})
map_merge.append({"table_name": "line", "type_name": "Line"})
map_merge.append({"table_name": "route", "type_name": "Route"})
map_merge.append({"table_name": "stop_area", "type_name": "StopArea"})
map_merge.append({"table_name": "stop_point", "type_name": "StopPoint"})
map_merge.append({"table_name": "vehicle_journey", "type_name": "VehicleJourney"})
def upgrade():
op.create_table(
'object_code',
sa.Column('object_type_id', sa.BIGINT(), nullable=False),
sa.Column('object_id', sa.BIGINT(), nullable=False),
sa.Column('key', sa.TEXT(), nullable=False),
sa.Column('value', sa.TEXT(), nullable=False),
sa.ForeignKeyConstraint(['object_type_id'], [u'navitia.object_type.id'], name=u'object_type_id_fkey'),
schema='navitia',
)
op.drop_column("calendar", 'external_code', schema='navitia')
for table in map_merge:
query = (
"INSERT INTO navitia.object_code (object_id, object_type_id, key, value) "
"SELECT nt.id, ot.id, 'external_code', nt.external_code from navitia.{table_name} nt, navitia.object_type ot "
"where ot.name = '{type_name}' "
"and nt.external_code is not null "
"and nt.external_code <> '' ".format(table_name=table["table_name"], type_name=table["type_name"])
)
op.execute(query)
op.drop_column(table["table_name"], 'external_code', schema='navitia')
def downgrade():
op.add_column("calendar", sa.Column('external_code', sa.TEXT(), nullable=True), schema='navitia')
for table in map_merge:
op.add_column(
table["table_name"], sa.Column('external_code', sa.TEXT(), nullable=True), schema='navitia'
)
query = (
"update navitia.{table} nt set external_code=aa.value "
"from "
"(select oc.value, oc.object_id from navitia.object_code oc, navitia.object_type ot "
"where oc.key='external_code' "
"and ot.id=oc.object_type_id "
"and ot.name='{type_name}')aa "
"where nt.id=aa.object_id ".format(table=table["table_name"], type_name=table["type_name"])
)
op.execute(query)
op.drop_table('object_code', schema='navitia')
| agpl-3.0 |
JalexChang/appinventor-sources | appinventor/lib/blockly/src/i18n/status.py | 91 | 8965 | #!/usr/bin/python
# Gives the translation status of the specified apps and languages.
#
# Copyright 2013 Google Inc.
# https://blockly.googlecode.com/
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Produce a table showing the translation status of each app by language.
@author Ellen Spertus (ellen.spertus@gmail.com)
"""
import argparse
import os
from common import read_json_file
# Bogus language name representing all messages defined.
TOTAL = 'qqq'
# List of key prefixes, which are app names, except for 'Apps', which
# has common messages. It is included here for convenience.
APPS = ['Apps', 'Code', 'Graph', 'Maze', 'Plane', 'Puzzle', 'Turtle']
def get_prefix(s):
"""Gets the portion of a string before the first period.
Args:
s: A string.
Returns:
The portion of the string before the first period, or the entire
string if it does not contain a period.
"""
return s.split('.')[0]
def get_prefix_count(prefix, arr):
"""Counts how many strings in the array start with the prefix.
Args:
prefix: The prefix string.
arr: An array of strings.
Returns:
The number of strings in arr starting with prefix.
"""
# This code was chosen for its elegance not its efficiency.
return len([elt for elt in arr if elt.startswith(prefix)])
def output_as_html(messages, apps, verbose):
"""Outputs the given prefix counts and percentages as HTML.
Specifically, a sortable HTML table is produced, where the app names
are column headers, and one language is output per row. Entries
are color-coded based on the percent completeness.
Args:
messages: A dictionary of dictionaries, where the outer keys are language
codes used by translatewiki (generally, ISO 639 language codes) or
the string TOTAL, used to indicate the total set of messages. The
inner dictionary makes message keys to values in that language.
apps: Apps to consider.
verbose: Whether to list missing keys.
"""
def generate_language_url(lang):
return 'https://translatewiki.net/wiki/Special:SupportedLanguages#' + lang
def generate_number_as_percent(num, total, tag):
percent = num * 100 / total
if percent == 100:
color = 'green'
elif percent >= 90:
color = 'orange'
elif percent >= 60:
color = 'black'
else:
color = 'gray'
s = '<font color={0}>{1} ({2}%)</font>'.format(color, num, percent)
if verbose and percent < 100:
return '<a href="#{0}">{1}'.format(tag, s)
else:
return s
print('<head><title>Blockly app translation status</title></head><body>')
print("<SCRIPT LANGUAGE='JavaScript1.2' SRC='https://neil.fraser.name/"
"software/tablesort/tablesort-min.js'></SCRIPT>")
print('<table cellspacing=5><thead><tr>')
print('<th class=nocase>Language</th><th class=num>' +
'</th><th class=num>'.join(apps) + '</th></tr></thead><tbody>')
for lang in messages:
if lang != TOTAL:
print('<tr><td><a href="{1}">{0}</a></td>'.format(
lang, generate_language_url(lang)))
for app in apps:
print '<td>'
print(generate_number_as_percent(
get_prefix_count(app, messages[lang]),
get_prefix_count(app, messages[TOTAL]),
(lang + app)))
print '</td>'
print('</tr>')
print('</tbody><tfoot><tr><td>ALL</td><td>')
print('</td><td>'.join([str(get_prefix_count(app, TOTAL)) for app in apps]))
print('</td></tr></tfoot></table>')
if verbose:
for lang in messages:
if lang != TOTAL:
for app in apps:
if (get_prefix_count(app, messages[lang]) <
get_prefix_count(app, messages[TOTAL])):
print('<div id={0}{1}><strong>{1} (<a href="{2}">{0}</a>)'.
format(lang, app, generate_language_url(lang)))
print('</strong> missing: ')
print(', '.join(
[key for key in messages[TOTAL] if
key.startswith(app) and key not in messages[lang]]))
print('<br><br></div>')
print('</body>')
def output_as_text(messages, apps, verbose):
"""Outputs the given prefix counts and percentages as text.
Args:
messages: A dictionary of dictionaries, where the outer keys are language
codes used by translatewiki (generally, ISO 639 language codes) or
the string TOTAL, used to indicate the total set of messages. The
inner dictionary makes message keys to values in that language.
apps: Apps to consider.
verbose: Whether to list missing keys.
"""
def generate_number_as_percent(num, total):
return '{0} ({1}%)'.format(num, num * 100 / total)
MAX_WIDTH = len('999 (100%)') + 1
FIELD_STRING = '{0: <' + str(MAX_WIDTH) + '}'
print(FIELD_STRING.format('Language') + ''.join(
[FIELD_STRING.format(app) for app in apps]))
print(('-' * (MAX_WIDTH - 1) + ' ') * (len(apps) + 1))
for lang in messages:
if lang != TOTAL:
print(FIELD_STRING.format(lang) +
''.join([FIELD_STRING.format(generate_number_as_percent(
get_prefix_count(app, messages[lang]),
get_prefix_count(app, messages[TOTAL])))
for app in apps]))
print(FIELD_STRING.format(TOTAL) +
''.join(
[FIELD_STRING.format(get_prefix_count(app, messages[TOTAL]))
for app in apps]))
if verbose:
for lang in messages:
if lang != TOTAL:
for app in apps:
missing = [key for key in messages[TOTAL]
if key.startswith(app) and key not in messages[lang]]
print('{0} {1}: Missing: {2}'.format(
app.upper(), lang, (', '.join(missing) if missing else 'none')))
def output_as_csv(messages, apps):
"""Outputs the given prefix counts and percentages as CSV.
Args:
messages: A dictionary of dictionaries, where the outer keys are language
codes used by translatewiki (generally, ISO 639 language codes) or
the string TOTAL, used to indicate the total set of messages. The
inner dictionary makes message keys to values in that language.
apps: Apps to consider.
"""
# Header row.
print('Language, ' + ', ,'.join(apps))
# Total row.
# Put at top, rather than bottom, so it can be frozen.
print('TOTAL, ' + ', '.join(
[str(get_prefix_count(app, messages[TOTAL])) + ', '
for app in apps]))
# One line per language.
for lang in messages:
if lang != TOTAL:
print(lang + ', ' + ', '.join(
[str(get_prefix_count(app, messages[lang]))
+ ', '
+ str((get_prefix_count(app, messages[lang]) * 1.0 /
get_prefix_count(app, messages[TOTAL])))
for app in apps]))
def main():
"""Processes input files and outputs results in specified format.
"""
# Argument parsing.
parser = argparse.ArgumentParser(
description='Display translation status by app and language.')
parser.add_argument('--key_file', default='json' + os.path.sep + 'keys.json',
help='file with complete list of keys.')
parser.add_argument('--output', default='text',
choices=['text', 'html', 'csv'],
help='output format')
parser.add_argument('--verbose', action='store_true', default=False,
help='whether to indicate which messages were translated '
'(only used in text and html output modes)')
parser.add_argument('--app', default=None, choices=APPS,
help='if set, only consider the specified app (prefix).')
parser.add_argument('lang_files', nargs='+',
help='names of JSON files to examine')
args = parser.parse_args()
apps = [args.app] if args.app else APPS
# Read in JSON files.
messages = {} # A dictionary of dictionaries.
messages[TOTAL] = read_json_file(args.key_file)
for lang_file in args.lang_files:
prefix = get_prefix(os.path.split(lang_file)[1])
# Skip non-language files.
if prefix not in ['qqq', 'keys']:
messages[prefix] = read_json_file(lang_file)
# Output results.
if args.output == 'text':
output_as_text(messages, apps, args.verbose)
elif args.output == 'html':
output_as_html(messages, apps, args.verbose)
elif args.output == 'csv':
output_as_csv(messages, apps)
else:
print('No output?!')
if __name__ == '__main__':
main()
| apache-2.0 |
erudit/eruditorg | eruditorg/erudit/cache/client.py | 1 | 1334 | from django.conf import settings
from django.core.cache.backends.base import DEFAULT_TIMEOUT
from django_redis import get_redis_connection
from django_redis.client import DefaultClient
from redis_key_tagging.client import RedisKeyTagging
class EruditCacheClient(DefaultClient):
def set(
self,
key,
value,
timeout=DEFAULT_TIMEOUT,
version=None,
client=None,
nx=False,
xx=False,
pids=None,
):
if pids is None:
pids = []
redis = get_redis_connection()
# Django's DEFAULT_TIMEOUT is an object() to force backends to set their own default timeout
# values, so let's set our own default timeout here.
if timeout == DEFAULT_TIMEOUT:
timeout = settings.SHORT_TTL
# Pass tags to our cache client only if we are using RedisKeyTagging, if we have a positive
# or a `None` timeout, and if we have pids.
if isinstance(redis, RedisKeyTagging) and (timeout is None or timeout > 0) and pids:
return redis.set(
self.make_key(key, version=version),
self.encode(value),
ex=timeout,
tags=pids or [],
)
else:
return super().set(key, value, timeout, version, client, nx, xx)
| gpl-3.0 |
towerjoo/DjangoNotes | Django-1.5.1/tests/regressiontests/model_inheritance_regress/models.py | 60 | 4929 | from __future__ import unicode_literals
import datetime
from django.db import models
from django.utils.encoding import python_2_unicode_compatible
@python_2_unicode_compatible
class Place(models.Model):
name = models.CharField(max_length=50)
address = models.CharField(max_length=80)
class Meta:
ordering = ('name',)
def __str__(self):
return "%s the place" % self.name
@python_2_unicode_compatible
class Restaurant(Place):
serves_hot_dogs = models.BooleanField()
serves_pizza = models.BooleanField()
def __str__(self):
return "%s the restaurant" % self.name
@python_2_unicode_compatible
class ItalianRestaurant(Restaurant):
serves_gnocchi = models.BooleanField()
def __str__(self):
return "%s the italian restaurant" % self.name
@python_2_unicode_compatible
class ParkingLot(Place):
# An explicit link to the parent (we can control the attribute name).
parent = models.OneToOneField(Place, primary_key=True, parent_link=True)
capacity = models.IntegerField()
def __str__(self):
return "%s the parking lot" % self.name
class ParkingLot2(Place):
# In lieu of any other connector, an existing OneToOneField will be
# promoted to the primary key.
parent = models.OneToOneField(Place)
class ParkingLot3(Place):
# The parent_link connector need not be the pk on the model.
primary_key = models.AutoField(primary_key=True)
parent = models.OneToOneField(Place, parent_link=True)
class Supplier(models.Model):
restaurant = models.ForeignKey(Restaurant)
class Wholesaler(Supplier):
retailer = models.ForeignKey(Supplier,related_name='wholesale_supplier')
class Parent(models.Model):
created = models.DateTimeField(default=datetime.datetime.now)
class Child(Parent):
name = models.CharField(max_length=10)
class SelfRefParent(models.Model):
parent_data = models.IntegerField()
self_data = models.ForeignKey('self', null=True)
class SelfRefChild(SelfRefParent):
child_data = models.IntegerField()
@python_2_unicode_compatible
class Article(models.Model):
headline = models.CharField(max_length=100)
pub_date = models.DateTimeField()
class Meta:
ordering = ('-pub_date', 'headline')
def __str__(self):
return self.headline
class ArticleWithAuthor(Article):
author = models.CharField(max_length=100)
class M2MBase(models.Model):
articles = models.ManyToManyField(Article)
class M2MChild(M2MBase):
name = models.CharField(max_length=50)
class Evaluation(Article):
quality = models.IntegerField()
class Meta:
abstract = True
class QualityControl(Evaluation):
assignee = models.CharField(max_length=50)
@python_2_unicode_compatible
class BaseM(models.Model):
base_name = models.CharField(max_length=100)
def __str__(self):
return self.base_name
@python_2_unicode_compatible
class DerivedM(BaseM):
customPK = models.IntegerField(primary_key=True)
derived_name = models.CharField(max_length=100)
def __str__(self):
return "PK = %d, base_name = %s, derived_name = %s" \
% (self.customPK, self.base_name, self.derived_name)
class AuditBase(models.Model):
planned_date = models.DateField()
class Meta:
abstract = True
verbose_name_plural = 'Audits'
class CertificationAudit(AuditBase):
class Meta(AuditBase.Meta):
abstract = True
class InternalCertificationAudit(CertificationAudit):
auditing_dept = models.CharField(max_length=20)
# Check that abstract classes don't get m2m tables autocreated.
@python_2_unicode_compatible
class Person(models.Model):
name = models.CharField(max_length=100)
class Meta:
ordering = ('name',)
def __str__(self):
return self.name
@python_2_unicode_compatible
class AbstractEvent(models.Model):
name = models.CharField(max_length=100)
attendees = models.ManyToManyField(Person, related_name="%(class)s_set")
class Meta:
abstract = True
ordering = ('name',)
def __str__(self):
return self.name
class BirthdayParty(AbstractEvent):
pass
class BachelorParty(AbstractEvent):
pass
class MessyBachelorParty(BachelorParty):
pass
# Check concrete -> abstract -> concrete inheritance
class SearchableLocation(models.Model):
keywords = models.CharField(max_length=256)
class Station(SearchableLocation):
name = models.CharField(max_length=128)
class Meta:
abstract = True
class BusStation(Station):
bus_routes = models.CommaSeparatedIntegerField(max_length=128)
inbound = models.BooleanField()
class TrainStation(Station):
zone = models.IntegerField()
class User(models.Model):
username = models.CharField(max_length=30, unique=True)
class Profile(User):
profile_id = models.AutoField(primary_key=True)
extra = models.CharField(max_length=30, blank=True)
| mit |
ilhamadun/har | tests/controller/test_usercontroller.py | 1 | 1059 | import pytest
from flask import url_for
from har import app, db
from har.model import user
@pytest.fixture
def setup():
app.config['TESTING'] = True
app.config['SECRET_KEY'] = 'secretkey'
db.create_all()
yield
db.drop_all()
@pytest.fixture
def create_user():
user_id = user.create_user('email', 'password')
yield user_id
def test_login(setup, create_user):
test_app = app.test_client()
response = test_app.post('/login', data={
'email': 'email',
'password': 'password'
})
test_user = user.get_user_by_id(create_user)
assert response.status_code == 302
assert test_user.is_authenticated
assert test_user.last_login
def test_login_failed(setup, create_user):
test_app = app.test_client()
response = test_app.post('/login', data={
'email': 'asd',
'password': 'asd'
})
assert response.status_code == 302
def test_unauthorized_access(setup):
test_app = app.test_client()
response = test_app.get('log')
assert response.status_code == 302
| mit |
sebadiaz/rethinkdb | test/common/test_exceptions.py | 37 | 1071 | #!/usr/bin/env python
'''Collection of the shared exceptions used in testing'''
class TestingFrameworkException(Exception):
'''Generic exception for this testing framework, mostly a base class for others'''
_message = 'A generic testing framework error occured'
detail = None
debugInfo = None
def __init__(self, detail=None, debugInfo=None):
if detail is not None:
self.detail = str(detail)
if debugInfo is not None:
if hasattr(debugInfo, 'read'):
debugInfo.seek(0)
self.debugInfo = debugInfo.read()
else:
self.debugInfo = debugInfo
def __str__(self):
if self.detail is not None:
return "%s: %s" % (self.message(), self.detail)
else:
return self.message()
def message(self):
return self._message
class NotBuiltException(TestingFrameworkException):
'''Exception to raise when an item that was expected to be built was not'''
_message = 'An item was not built'
| agpl-3.0 |
sgerhart/ansible | lib/ansible/modules/network/voss/voss_command.py | 63 | 7891 | #!/usr/bin/python
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = """
---
module: voss_command
version_added: "2.7"
author: "Lindsay Hill (@LindsayHill)"
short_description: Run commands on remote devices running Extreme VOSS
description:
- Sends arbitrary commands to an Extreme VSP device running VOSS, and
returns the results read from the device. This module includes an
argument that will cause the module to wait for a specific condition
before returning or timing out if the condition is not met.
- This module does not support running commands in configuration mode.
Please use M(voss_config) to configure VOSS devices.
notes:
- Tested against VOSS 7.0.0
options:
commands:
description:
- List of commands to send to the remote VOSS device. The
resulting output from the command is returned. If the
I(wait_for) argument is provided, the module is not returned
until the condition is satisfied or the number of retries has
expired. If a command sent to the device requires answering a
prompt, it is possible to pass a dict containing I(command),
I(answer) and I(prompt). Common answers are 'y' or "\\r"
(carriage return, must be double quotes). See examples.
required: true
wait_for:
description:
- List of conditions to evaluate against the output of the
command. The task will wait for each condition to be true
before moving forward. If the conditional is not true
within the configured number of retries, the task fails.
See examples.
match:
description:
- The I(match) argument is used in conjunction with the
I(wait_for) argument to specify the match policy. Valid
values are C(all) or C(any). If the value is set to C(all)
then all conditionals in the wait_for must be satisfied. If
the value is set to C(any) then only one of the values must be
satisfied.
default: all
choices: ['any', 'all']
retries:
description:
- Specifies the number of retries a command should by tried
before it is considered failed. The command is run on the
target device every retry and evaluated against the
I(wait_for) conditions.
default: 10
interval:
description:
- Configures the interval in seconds to wait between retries
of the command. If the command does not pass the specified
conditions, the interval indicates how long to wait before
trying the command again.
default: 1
"""
EXAMPLES = r"""
tasks:
- name: run show sys software on remote devices
voss_command:
commands: show sys software
- name: run show sys software and check to see if output contains VOSS
voss_command:
commands: show sys software
wait_for: result[0] contains VOSS
- name: run multiple commands on remote nodes
voss_command:
commands:
- show sys software
- show interfaces vlan
- name: run multiple commands and evaluate the output
voss_command:
commands:
- show sys software
- show interfaces vlan
wait_for:
- result[0] contains Version
- result[1] contains Basic
- name: run command that requires answering a prompt
voss_command:
commands:
- command: 'reset'
prompt: 'Are you sure you want to reset the switch? (y/n)'
answer: 'y'
"""
RETURN = """
stdout:
description: The set of responses from the commands
returned: always apart from low level errors (such as action plugin)
type: list
sample: ['...', '...']
stdout_lines:
description: The value of stdout split into a list
returned: always apart from low level errors (such as action plugin)
type: list
sample: [['...', '...'], ['...'], ['...']]
failed_conditions:
description: The list of conditionals that have failed
returned: failed
type: list
sample: ['...', '...']
"""
import re
import time
from ansible.module_utils.network.voss.voss import run_commands
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.network.common.utils import ComplexList
from ansible.module_utils.network.common.parsing import Conditional
from ansible.module_utils.six import string_types
def to_lines(stdout):
for item in stdout:
if isinstance(item, string_types):
item = str(item).split('\n')
yield item
def parse_commands(module, warnings):
command = ComplexList(dict(
command=dict(key=True),
prompt=dict(),
answer=dict()
), module)
commands = command(module.params['commands'])
for item in list(commands):
configure_type = re.match(r'conf(?:\w*)(?:\s+(\w+))?', item['command'])
if module.check_mode:
if configure_type and configure_type.group(1) not in ('confirm', 'replace', 'revert', 'network'):
module.fail_json(
msg='voss_command does not support running config mode '
'commands. Please use voss_config instead'
)
if not item['command'].startswith('show'):
warnings.append(
'only show commands are supported when using check mode, not '
'executing `%s`' % item['command']
)
commands.remove(item)
return commands
def main():
"""main entry point for module execution
"""
argument_spec = dict(
commands=dict(type='list', required=True),
wait_for=dict(type='list'),
match=dict(default='all', choices=['all', 'any']),
retries=dict(default=10, type='int'),
interval=dict(default=1, type='int')
)
module = AnsibleModule(argument_spec=argument_spec,
supports_check_mode=True)
result = {'changed': False}
warnings = list()
commands = parse_commands(module, warnings)
result['warnings'] = warnings
wait_for = module.params['wait_for'] or list()
conditionals = [Conditional(c) for c in wait_for]
retries = module.params['retries']
interval = module.params['interval']
match = module.params['match']
while retries > 0:
responses = run_commands(module, commands)
for item in list(conditionals):
if item(responses):
if match == 'any':
conditionals = list()
break
conditionals.remove(item)
if not conditionals:
break
time.sleep(interval)
retries -= 1
if conditionals:
failed_conditions = [item.raw for item in conditionals]
msg = 'One or more conditional statements have not been satisfied'
module.fail_json(msg=msg, failed_conditions=failed_conditions)
result.update({
'changed': False,
'stdout': responses,
'stdout_lines': list(to_lines(responses))
})
module.exit_json(**result)
if __name__ == '__main__':
main()
| mit |
simplyguru-dot/ansible | lib/ansible/plugins/callback/oneline.py | 144 | 3487 | # (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from ansible.plugins.callback import CallbackBase
from ansible import constants as C
class CallbackModule(CallbackBase):
'''
This is the default callback interface, which simply prints messages
to stdout when new callback events are received.
'''
CALLBACK_VERSION = 2.0
CALLBACK_TYPE = 'stdout'
CALLBACK_NAME = 'oneline'
def _command_generic_msg(self, hostname, result, caption):
stdout = result.get('stdout','').replace('\n', '\\n')
if 'stderr' in result and result['stderr']:
stderr = result.get('stderr','').replace('\n', '\\n')
return "%s | %s | rc=%s | (stdout) %s (stderr) %s" % (hostname, caption, result.get('rc',0), stdout, stderr)
else:
return "%s | %s | rc=%s | (stdout) %s" % (hostname, caption, result.get('rc',0), stdout)
def v2_runner_on_failed(self, result, ignore_errors=False):
if 'exception' in result._result:
if self._display.verbosity < 3:
# extract just the actual error message from the exception text
error = result._result['exception'].strip().split('\n')[-1]
msg = "An exception occurred during task execution. To see the full traceback, use -vvv. The error was: %s" % error
else:
msg = "An exception occurred during task execution. The full traceback is:\n" + result._result['exception'].replace('\n','')
if result._task.action in C.MODULE_NO_JSON:
self._display.display(self._command_generic_msg(result._host.get_name(), result._result,'FAILED'), color='red')
else:
self._display.display(msg, color='red')
# finally, remove the exception from the result so it's not shown every time
del result._result['exception']
self._display.display("%s | FAILED! => %s" % (result._host.get_name(), self._dump_results(result._result, indent=0).replace('\n','')), color='red')
def v2_runner_on_ok(self, result):
if result._task.action in C.MODULE_NO_JSON:
self._display.display(self._command_generic_msg(result._host.get_name(), result._result,'SUCCESS'), color='green')
else:
self._display.display("%s | SUCCESS => %s" % (result._host.get_name(), self._dump_results(result._result, indent=0).replace('\n','')), color='green')
def v2_runner_on_unreachable(self, result):
self._display.display("%s | UNREACHABLE!" % result._host.get_name(), color='yellow')
def v2_runner_on_skipped(self, result):
self._display.display("%s | SKIPPED" % (result._host.get_name()), color='cyan')
| gpl-3.0 |
yelongyu/chihu | app/__init__.py | 1 | 1052 | # -*- coding: utf-8 -*-
from flask import Flask
from flask.ext.bootstrap import Bootstrap
from flask.ext.mail import Mail
from flask.ext.moment import Moment
from flask.ext.sqlalchemy import SQLAlchemy
from config import config
from flask.ext.login import LoginManager
from flask.ext.pagedown import PageDown
bootstrap = Bootstrap()
mail = Mail()
moment = Moment()
db = SQLAlchemy()
pagedown = PageDown()
# 登录管理
login_manager = LoginManager()
login_manager.session_protection = 'strong'
login_manager.login_view = 'auth.login'
def create_app(config_name):
app = Flask(__name__)
app.config.from_object(config[config_name])
config[config_name].init_app(app)
bootstrap.init_app(app)
mail.init_app(app)
moment.init_app(app)
db.init_app(app)
login_manager.init_app(app)
pagedown.init_app(app)
from .main import main as main_blueprint
app.register_blueprint(main_blueprint)
from .auth import auth as auth_blueprint
app.register_blueprint(auth_blueprint, url_prefix='/auth')
return app
| gpl-3.0 |
llonchj/sentry | src/sentry/web/frontend/organization_api_keys.py | 23 | 2234 | from __future__ import absolute_import
from django.core.urlresolvers import reverse
from django.http import HttpResponseRedirect
from operator import or_
from sentry.models import (
ApiKey, AuditLogEntry, AuditLogEntryEvent, OrganizationMemberType
)
from sentry.web.frontend.base import OrganizationView
DEFAULT_SCOPES = [
'project:read',
'event:read',
'team:read',
'org:read',
'member:read',
]
class OrganizationApiKeysView(OrganizationView):
required_access = OrganizationMemberType.ADMIN
def handle(self, request, organization):
if request.POST.get('op') == 'newkey':
key = ApiKey.objects.create(
organization=organization,
scopes=reduce(or_, [getattr(ApiKey.scopes, s) for s in DEFAULT_SCOPES])
)
AuditLogEntry.objects.create(
organization=organization,
actor=request.user,
ip_address=request.META['REMOTE_ADDR'],
target_object=key.id,
event=AuditLogEntryEvent.APIKEY_ADD,
data=key.get_audit_log_data(),
)
redirect_uri = reverse('sentry-organization-api-key-settings', args=[
organization.slug, key.id,
])
return HttpResponseRedirect(redirect_uri)
elif request.POST.get('op') == 'removekey':
key = ApiKey.objects.get(
id=request.POST.get('kid'),
organization=organization,
)
audit_data = key.get_audit_log_data()
key.delete()
AuditLogEntry.objects.create(
organization=organization,
actor=request.user,
ip_address=request.META['REMOTE_ADDR'],
target_object=key.id,
event=AuditLogEntryEvent.APIKEY_REMOVE,
data=audit_data,
)
return HttpResponseRedirect(request.path)
key_list = sorted(ApiKey.objects.filter(
organization=organization,
), key=lambda x: x.label)
context = {
'key_list': key_list,
}
return self.respond('sentry/organization-api-keys.html', context)
| bsd-3-clause |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.