repo_name stringlengths 5 100 | path stringlengths 4 375 | copies stringclasses 991 values | size stringlengths 4 7 | content stringlengths 666 1M | license stringclasses 15 values |
|---|---|---|---|---|---|
hip-odoo/odoo | addons/mrp/tests/test_stock.py | 26 | 5056 | # -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from . import common
from odoo.exceptions import except_orm
class TestWarehouse(common.TestMrpCommon):
def test_manufacturing_route(self):
warehouse_1_stock_manager = self.warehouse_1.sudo(self.user_stock_manager)
manu_rule = self.env['procurement.rule'].search([
('action', '=', 'manufacture'),
('warehouse_id', '=', self.warehouse_1.id)])
self.assertEqual(self.warehouse_1.manufacture_pull_id, manu_rule)
manu_route = manu_rule.route_id
self.assertIn(manu_route, warehouse_1_stock_manager._get_all_routes())
warehouse_1_stock_manager.write({
'manufacture_to_resupply': False
})
self.assertFalse(self.warehouse_1.manufacture_pull_id)
self.assertFalse(self.warehouse_1.manu_type_id.active)
self.assertNotIn(manu_route, warehouse_1_stock_manager._get_all_routes())
warehouse_1_stock_manager.write({
'manufacture_to_resupply': True
})
manu_rule = self.env['procurement.rule'].search([
('action', '=', 'manufacture'),
('warehouse_id', '=', self.warehouse_1.id)])
self.assertEqual(self.warehouse_1.manufacture_pull_id, manu_rule)
self.assertTrue(self.warehouse_1.manu_type_id.active)
self.assertIn(manu_route, warehouse_1_stock_manager._get_all_routes())
def test_manufacturing_scrap(self):
"""
Testing to do a scrap of consumed material.
"""
# Update demo products
(self.product_4 | self.product_2).write({
'tracking': 'lot',
})
# Update Bill Of Material to remove product with phantom bom.
self.bom_3.bom_line_ids.filtered(lambda x: x.product_id == self.product_5).unlink()
# Create Inventory Adjustment For Stick and Stone Tools with lot.
lot_product_4 = self.env['stock.production.lot'].create({
'name': '0000000000001',
'product_id': self.product_4.id,
})
lot_product_2 = self.env['stock.production.lot'].create({
'name': '0000000000002',
'product_id': self.product_2.id,
})
stock_inv_product_4 = self.env['stock.inventory'].create({
'name': 'Stock Inventory for Stick',
'filter': 'product',
'product_id': self.product_4.id,
'line_ids': [
(0, 0, {'product_id': self.product_4.id, 'product_uom_id': self.product_4.uom_id.id, 'product_qty': 8, 'prod_lot_id': lot_product_4.id, 'location_id': self.ref('stock.stock_location_14')}),
]})
stock_inv_product_2 = self.env['stock.inventory'].create({
'name': 'Stock Inventory for Stone Tools',
'filter': 'product',
'product_id': self.product_2.id,
'line_ids': [
(0, 0, {'product_id': self.product_2.id, 'product_uom_id': self.product_2.uom_id.id, 'product_qty': 12, 'prod_lot_id': lot_product_2.id, 'location_id': self.ref('stock.stock_location_14')})
]})
(stock_inv_product_4 | stock_inv_product_2).prepare_inventory()
(stock_inv_product_4 | stock_inv_product_2).action_done()
#Create Manufacturing order.
production_3 = self.env['mrp.production'].create({
'name': 'MO-Test003',
'product_id': self.product_6.id,
'product_qty': 12,
'bom_id': self.bom_3.id,
'product_uom_id': self.product_6.uom_id.id,
})
production_3.action_assign()
# Check Manufacturing order's availability.
self.assertEqual(production_3.availability, 'assigned', "Production order's availability should be Available.")
location_id = production_3.move_raw_ids.filtered(lambda x: x.state not in ('done', 'cancel')) and production_3.location_src_id.id or production_3.location_dest_id.id,
# Scrap Product Wood without lot to check assert raise ?.
with self.assertRaises(except_orm):
self.env['stock.scrap'].with_context(active_model='mrp.production', active_id=production_3.id).create({'product_id': self.product_2.id, 'scrap_qty': 1.0, 'product_uom_id': self.product_2.uom_id.id, 'location_id': location_id, 'production_id': production_3.id})
# Scrap Product Wood with lot.
self.env['stock.scrap'].with_context(active_model='mrp.production', active_id=production_3.id).create({'product_id': self.product_2.id, 'scrap_qty': 1.0, 'product_uom_id': self.product_2.uom_id.id, 'location_id': location_id, 'lot_id': lot_product_2.id, 'production_id': production_3.id})
#Check scrap move is created for this production order.
#TODO: should check with scrap objects link in between
# scrap_move = production_3.move_raw_ids.filtered(lambda x: x.product_id == self.product_2 and x.scrapped)
# self.assertTrue(scrap_move, "There are no any scrap move created for production order.")
| agpl-3.0 |
nachandr/cfme_tests | cfme/scripting/conf.py | 2 | 3831 | #!/usr/bin/env python3
"""Script to encrypt config files.
Usage:
scripts/encrypt_conf.py confname1 confname2 ... confnameN
scripts/encrypt_conf.py credentials
"""
import io
import click
import yaycl_crypt
from cfme.scripting import link_config
from cfme.utils import conf
@click.group(help='Functions affecting configuration files')
def main():
pass
main.add_command(link_config.main, name='link')
@main.command(help='Tests a yaml file')
@click.argument('conf_name', default='credentials')
def test(conf_name):
"""Test yaml file to see how many keys exist"""
creds = conf.__getattr__(conf_name)
print("{} keys found, if this value seems low, there may be a YAML error".format(len(creds)))
@main.command('show-credential', help='Shows the value of a credential key')
@click.argument('cred-or-provider-key')
@click.option('--only-credentials', is_flag=True, help='Only search credentials, (not providers)')
def show_credential(cred_or_provider_key, only_credentials):
"""Function to show the given credentials, takes either a provider key or a credential key"""
data = conf.cfme_data
if cred_or_provider_key in data.get('management_systems', {}) and not only_credentials:
endpoints_data = data['management_systems'][cred_or_provider_key].get('endpoints', {})
for endpoint in endpoints_data:
print(endpoint)
cred_key = endpoints_data[endpoint].get('credentials')
if not cred_key:
print(" No credentials defined for this endpoint.")
continue
cred_dict = conf.credentials[cred_key]
for k, v in cred_dict.items():
print(f" {k}: {v}")
elif cred_or_provider_key in conf.credentials:
cred_dict = conf.credentials[cred_or_provider_key]
for k, v in cred_dict.items():
print(f"{k}: {v}")
else:
print("Key couldn't be found in providers or credentials YAMLS")
@main.command('show-provider', help='Shows the configuration of a provider')
@click.argument('provider-key')
def show_provider(provider_key):
"""Function to show provider data"""
output = io.BytesIO()
data = conf.cfme_data
if provider_key in data.get('management_systems', {}):
data['management_systems'][provider_key].dump(output)
print(output.getvalue())
else:
print("Key couldn't be found in provider data")
@main.command(help='Encrypts a yaml file')
@click.argument('conf_name', default='credentials')
@click.option('--delete', default=False, is_flag=True,
help='If supplied delete the unencrypted config of the same name.')
def encrypt(conf_name, delete):
"""Function to encrypt a given conf file"""
conf_name = conf_name.strip()
yaycl_crypt.encrypt_yaml(conf, conf_name, delete=delete)
print(f'{conf_name} conf encrypted')
if not delete:
print('WARNING: unencrypted file left which will override encrypted')
@main.command(help='Decrypts a yaml file')
@click.argument('conf_name', default='credentials')
@click.option('--delete', default=False, is_flag=True,
help='If supplied delete the encrypted config of the same name.')
@click.option('--skip/--no-skip', default=True,
help='If supplied raise exception if decrypted file already exists')
def decrypt(conf_name, delete, skip):
"""Function to decrypt a given conf file"""
conf_name = conf_name.strip()
try:
yaycl_crypt.decrypt_yaml(conf, conf_name, delete=delete)
except yaycl_crypt.YayclCryptError as ex:
if skip and 'overwrite' in str(ex):
print(f'SKIPPED {conf_name} conf decrypt, decrypted file already exists')
return
else:
raise
print(f'{conf_name} conf decrypted')
if __name__ == "__main__":
main()
| gpl-2.0 |
nikhila05/django-seo | rollyourown/seo/backends.py | 7 | 13498 | #!/usr/bin/env python
# -*- coding: UTF-8 -*-
from django.utils.translation import ugettext_lazy as _
from django.conf import settings
from django.db import models
from django.contrib.sites.models import Site
from django.contrib.contenttypes.models import ContentType
from django.contrib.contenttypes import generic
from django.template import Template, Context
from django.utils.datastructures import SortedDict
from rollyourown.seo.utils import resolve_to_name, NotSet, Literal
RESERVED_FIELD_NAMES = ('_metadata', '_path', '_content_type', '_object_id',
'_content_object', '_view', '_site', 'objects',
'_resolve_value', '_set_context', 'id', 'pk' )
backend_registry = SortedDict()
class MetadataBaseModel(models.Model):
class Meta:
abstract = True
def __init__(self, *args, **kwargs):
super(MetadataBaseModel, self).__init__(*args, **kwargs)
# Provide access to a class instance
# TODO Rename to __metadata
self._metadata = self.__class__._metadata()
# TODO Rename to __resolve_value?
def _resolve_value(self, name):
""" Returns an appropriate value for the given name. """
name = str(name)
if name in self._metadata._meta.elements:
element = self._metadata._meta.elements[name]
# Look in instances for an explicit value
if element.editable:
value = getattr(self, name)
if value:
return value
# Otherwise, return an appropriate default value (populate_from)
populate_from = element.populate_from
if callable(populate_from):
return populate_from(self, **self._populate_from_kwargs())
elif isinstance(populate_from, Literal):
return populate_from.value
elif populate_from is not NotSet:
return self._resolve_value(populate_from)
# If this is not an element, look for an attribute on metadata
try:
value = getattr(self._metadata, name)
except AttributeError:
pass
else:
if callable(value):
if getattr(value, 'im_self', None):
return value(self)
else:
return value(self._metadata, self)
return value
def _populate_from_kwargs(self):
return {}
class BaseManager(models.Manager):
def on_current_site(self, site=None):
if isinstance(site, Site):
site_id = site.id
elif site is not None:
site_id = site and Site.objects.get(domain=site).id
else:
site_id = settings.SITE_ID
# Exclude entries for other sites
where = ['_site_id IS NULL OR _site_id=%s']
return self.get_query_set().extra(where=where, params=[site_id])
def for_site_and_language(self, site=None, language=None):
queryset = self.on_current_site(site)
if language:
queryset = queryset.filter(_language=language)
return queryset
# Following is part of an incomplete move to define backends, which will:
# - contain the business logic of backends to a short, succinct module
# - allow individual backends to be turned on and off
# - allow new backends to be added by end developers
#
# A Backend:
# - defines an abstract base class for storing the information required to associate metadata with its target (ie a view, a path, a model instance etc)
# - defines a method for retrieving an instance
#
# This is not particularly easy.
# - unique_together fields need to be defined in the same django model, as some django versions don't enforce the uniqueness when it spans subclasses
# - most backends use the path to find a matching instance. The model backend however ideally needs a content_type (found from a model instance backend, which used the path)
# - catering for all the possible options (use_sites, use_languages), needs to be done succiently, and at compile time
#
# This means that:
# - all fields that share uniqueness (backend fields, _site, _language) need to be defined in the same model
# - as backends should have full control over the model, therefore every backend needs to define the compulsory fields themselves (eg _site and _language).
# There is no way to add future compulsory fields to all backends without editing each backend individually.
# This is probably going to have to be a limitataion we need to live with.
class MetadataBackend(object):
name = None
verbose_name = None
unique_together = None
class __metaclass__(type):
def __new__(cls, name, bases, attrs):
new_class = type.__new__(cls, name, bases, attrs)
backend_registry[new_class.name] = new_class
return new_class
def get_unique_together(self, options):
ut = []
for ut_set in self.unique_together:
ut_set = [a for a in ut_set]
if options.use_sites:
ut_set.append('_site')
if options.use_i18n:
ut_set.append('_language')
ut.append(tuple(ut_set))
return tuple(ut)
def get_manager(self, options):
_get_instances = self.get_instances
class _Manager(BaseManager):
def get_instances(self, path, site=None, language=None, context=None):
queryset = self.for_site_and_language(site, language)
return _get_instances(queryset, path, context)
if not options.use_sites:
def for_site_and_language(self, site=None, language=None):
queryset = self.get_query_set()
if language:
queryset = queryset.filter(_language=language)
return queryset
return _Manager
@staticmethod
def validate(options):
""" Validates the application of this backend to a given metadata
"""
class PathBackend(MetadataBackend):
name = "path"
verbose_name = "Path"
unique_together = (("_path",),)
def get_instances(self, queryset, path, context):
return queryset.filter(_path=path)
def get_model(self, options):
class PathMetadataBase(MetadataBaseModel):
_path = models.CharField(_('path'), max_length=255, unique=not (options.use_sites or options.use_i18n))
if options.use_sites:
_site = models.ForeignKey(Site, null=True, blank=True, verbose_name=_("site"))
if options.use_i18n:
_language = models.CharField(_("language"), max_length=5, null=True, blank=True, db_index=True, choices=settings.LANGUAGES)
objects = self.get_manager(options)()
def __unicode__(self):
return self._path
def _populate_from_kwargs(self):
return {'path': self._path}
class Meta:
abstract = True
unique_together = self.get_unique_together(options)
return PathMetadataBase
class ViewBackend(MetadataBackend):
name = "view"
verbose_name = "View"
unique_together = (("_view",),)
def get_instances(self, queryset, path, context):
view_name = ""
if path is not None:
view_name = resolve_to_name(path)
return queryset.filter(_view=view_name or "")
def get_model(self, options):
class ViewMetadataBase(MetadataBaseModel):
_view = models.CharField(_('view'), max_length=255, unique=not (options.use_sites or options.use_i18n), default="", blank=True)
if options.use_sites:
_site = models.ForeignKey(Site, null=True, blank=True, verbose_name=_("site"))
if options.use_i18n:
_language = models.CharField(_("language"), max_length=5, null=True, blank=True, db_index=True, choices=settings.LANGUAGES)
objects = self.get_manager(options)()
def _process_context(self, context):
""" Use the context when rendering any substitutions. """
if 'view_context' in context:
self.__context = context['view_context']
def _populate_from_kwargs(self):
return {'view_name': self._view}
def _resolve_value(self, name):
value = super(ViewMetadataBase, self)._resolve_value(name)
try:
return _resolve(value, context=self.__context)
except AttributeError:
return value
def __unicode__(self):
return self._view
class Meta:
abstract = True
unique_together = self.get_unique_together(options)
return ViewMetadataBase
class ModelInstanceBackend(MetadataBackend):
name = "modelinstance"
verbose_name = "Model Instance"
unique_together = (("_path",), ("_content_type", "_object_id"))
def get_instances(self, queryset, path, context):
return queryset.filter(_path=path)
def get_model(self, options):
class ModelInstanceMetadataBase(MetadataBaseModel):
_path = models.CharField(_('path'), max_length=255, editable=False, unique=not (options.use_sites or options.use_i18n))
_content_type = models.ForeignKey(ContentType, editable=False)
_object_id = models.PositiveIntegerField(editable=False)
_content_object = generic.GenericForeignKey('_content_type', '_object_id')
if options.use_sites:
_site = models.ForeignKey(Site, null=True, blank=True, verbose_name=_("site"))
if options.use_i18n:
_language = models.CharField(_("language"), max_length=5, null=True, blank=True, db_index=True, choices=settings.LANGUAGES)
objects = self.get_manager(options)()
def __unicode__(self):
return self._path
class Meta:
unique_together = self.get_unique_together(options)
abstract = True
def _process_context(self, context):
context['content_type'] = self._content_type
context['model_instance'] = self
def _populate_from_kwargs(self):
return {'model_instance': self._content_object}
def save(self, *args, **kwargs):
try:
path_func = self._content_object.get_absolute_url
except AttributeError:
pass
else:
self._path = path_func()
super(ModelInstanceMetadataBase, self).save(*args, **kwargs)
return ModelInstanceMetadataBase
class ModelBackend(MetadataBackend):
name = "model"
verbose_name = "Model"
unique_together = (("_content_type",),)
def get_instances(self, queryset, path, context):
if context and 'content_type' in context:
return queryset.filter(_content_type=context['content_type'])
def get_model(self, options):
class ModelMetadataBase(MetadataBaseModel):
_content_type = models.ForeignKey(ContentType)
if options.use_sites:
_site = models.ForeignKey(Site, null=True, blank=True, verbose_name=_("site"))
if options.use_i18n:
_language = models.CharField(_("language"), max_length=5, null=True, blank=True, db_index=True, choices=settings.LANGUAGES)
objects = self.get_manager(options)()
def __unicode__(self):
return unicode(self._content_type)
def _process_context(self, context):
""" Use the given model instance as context for rendering
any substitutions.
"""
if 'model_instance' in context:
self.__instance = context['model_instance']
def _populate_from_kwargs(self):
return {'content_type': self._content_type}
def _resolve_value(self, name):
value = super(ModelMetadataBase, self)._resolve_value(name)
try:
return _resolve(value, self.__instance._content_object)
except AttributeError:
return value
class Meta:
abstract = True
unique_together = self.get_unique_together(options)
return ModelMetadataBase
@staticmethod
def validate(options):
""" Validates the application of this backend to a given metadata
"""
try:
if options.backends.index('modelinstance') > options.backends.index('model'):
raise Exception("Metadata backend 'modelinstance' must come before 'model' backend")
except ValueError:
raise Exception("Metadata backend 'modelinstance' must be installed in order to use 'model' backend")
def _resolve(value, model_instance=None, context=None):
""" Resolves any template references in the given value.
"""
if isinstance(value, basestring) and "{" in value:
if context is None:
context = Context()
if model_instance is not None:
context[model_instance._meta.module_name] = model_instance
value = Template(value).render(context)
return value
| bsd-3-clause |
baibaichen/eagle | eagle-external/eagle-ambari/lib/EAGLE/package/scripts/service_check.py | 21 | 1521 | #!/usr/bin/python
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from resource_management import *
from resource_management.libraries.script.script import Script
from resource_management.libraries.functions.validate import call_and_match_output
from resource_management.libraries.functions.format import format
from resource_management.core.logger import Logger
class EagleServiceCheck(Script):
def service_check(self,env):
Logger.info("Checking eagle service")
import params
env.set_params(params)
check_eagle_service_cmd=format("ls {eagle_service_pid_file} >/dev/null 2>&1 && ps -p `cat {eagle_service_pid_file}` >/dev/null 2>&1")
Execute(check_eagle_service_cmd,logoutput=True,try_sleep=3, tries=5)
if __name__ == "__main__":
EagleServiceCheck().execute()
| apache-2.0 |
andrewyoung1991/abjad | abjad/tools/sequencetools/remove_elements.py | 2 | 2888 | # -*- encoding: utf-8 -*-
def remove_elements(
sequence,
indices=None,
period=None,
):
'''Removes `sequence` elements at `indices`.
.. container:: example
Removes all elements:
::
>>> sequencetools.remove_elements(range(15))
[]
.. container:: example
Removes elements and indices 2 and 3:
::
>>> sequencetools.remove_elements(
... range(15),
... indices=[2, 3],
... )
[0, 1, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14]
.. container:: example
Removes elements and indices -2 and -3:
::
>>> sequencetools.remove_elements(
... range(15),
... indices=[-2, -3],
... )
[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 14]
.. container:: example
Removes elements and indices 2 and 3 (mod 4):
::
>>> sequencetools.remove_elements(
... range(15),
... indices=[2, 3],
... period=4,
... )
[0, 1, 4, 5, 8, 9, 12, 13]
.. container:: example
Removes elements and indices -2 and -3 (mod 4):
::
>>> sequencetools.remove_elements(
... range(15),
... indices=[-2, -3],
... period=4,
... )
[2, 3, 6, 7, 10, 11, 14]
.. container:: example
Removes no elements:
::
>>> sequencetools.remove_elements(
... range(15),
... indices=[],
... )
[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14]
.. container:: example
Removes no elements:
::
>>> sequencetools.remove_elements(
... range(15),
... indices=[97, 98, 99],
... )
[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14]
.. container:: example
Removes no elements:
::
>>> sequencetools.remove_elements(
... range(15),
... indices=[-97, -98, -99],
... )
[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14]
Returns elements in the order they appear in `sequence`.
Returns list.
'''
result = []
length = len(sequence)
period = period or length
if indices is None:
indices = range(length)
new_indices = []
for i in indices:
if length < abs(i):
continue
if i < 0:
i = length + i
i = i % period
new_indices.append(i)
indices = new_indices
indices.sort()
for i, element in enumerate(sequence):
if i % period not in indices:
result.append(element)
return result | gpl-3.0 |
jeshoward/RRTPath | vendor/googletest/googletest/scripts/gen_gtest_pred_impl.py | 2538 | 21986 | #!/usr/bin/env python
#
# Copyright 2006, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""gen_gtest_pred_impl.py v0.1
Generates the implementation of Google Test predicate assertions and
accompanying tests.
Usage:
gen_gtest_pred_impl.py MAX_ARITY
where MAX_ARITY is a positive integer.
The command generates the implementation of up-to MAX_ARITY-ary
predicate assertions, and writes it to file gtest_pred_impl.h in the
directory where the script is. It also generates the accompanying
unit test in file gtest_pred_impl_unittest.cc.
"""
__author__ = 'wan@google.com (Zhanyong Wan)'
import os
import sys
import time
# Where this script is.
SCRIPT_DIR = os.path.dirname(sys.argv[0])
# Where to store the generated header.
HEADER = os.path.join(SCRIPT_DIR, '../include/gtest/gtest_pred_impl.h')
# Where to store the generated unit test.
UNIT_TEST = os.path.join(SCRIPT_DIR, '../test/gtest_pred_impl_unittest.cc')
def HeaderPreamble(n):
"""Returns the preamble for the header file.
Args:
n: the maximum arity of the predicate macros to be generated.
"""
# A map that defines the values used in the preamble template.
DEFS = {
'today' : time.strftime('%m/%d/%Y'),
'year' : time.strftime('%Y'),
'command' : '%s %s' % (os.path.basename(sys.argv[0]), n),
'n' : n
}
return (
"""// Copyright 2006, Google Inc.
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// This file is AUTOMATICALLY GENERATED on %(today)s by command
// '%(command)s'. DO NOT EDIT BY HAND!
//
// Implements a family of generic predicate assertion macros.
#ifndef GTEST_INCLUDE_GTEST_GTEST_PRED_IMPL_H_
#define GTEST_INCLUDE_GTEST_GTEST_PRED_IMPL_H_
// Makes sure this header is not included before gtest.h.
#ifndef GTEST_INCLUDE_GTEST_GTEST_H_
# error Do not include gtest_pred_impl.h directly. Include gtest.h instead.
#endif // GTEST_INCLUDE_GTEST_GTEST_H_
// This header implements a family of generic predicate assertion
// macros:
//
// ASSERT_PRED_FORMAT1(pred_format, v1)
// ASSERT_PRED_FORMAT2(pred_format, v1, v2)
// ...
//
// where pred_format is a function or functor that takes n (in the
// case of ASSERT_PRED_FORMATn) values and their source expression
// text, and returns a testing::AssertionResult. See the definition
// of ASSERT_EQ in gtest.h for an example.
//
// If you don't care about formatting, you can use the more
// restrictive version:
//
// ASSERT_PRED1(pred, v1)
// ASSERT_PRED2(pred, v1, v2)
// ...
//
// where pred is an n-ary function or functor that returns bool,
// and the values v1, v2, ..., must support the << operator for
// streaming to std::ostream.
//
// We also define the EXPECT_* variations.
//
// For now we only support predicates whose arity is at most %(n)s.
// Please email googletestframework@googlegroups.com if you need
// support for higher arities.
// GTEST_ASSERT_ is the basic statement to which all of the assertions
// in this file reduce. Don't use this in your code.
#define GTEST_ASSERT_(expression, on_failure) \\
GTEST_AMBIGUOUS_ELSE_BLOCKER_ \\
if (const ::testing::AssertionResult gtest_ar = (expression)) \\
; \\
else \\
on_failure(gtest_ar.failure_message())
""" % DEFS)
def Arity(n):
"""Returns the English name of the given arity."""
if n < 0:
return None
elif n <= 3:
return ['nullary', 'unary', 'binary', 'ternary'][n]
else:
return '%s-ary' % n
def Title(word):
"""Returns the given word in title case. The difference between
this and string's title() method is that Title('4-ary') is '4-ary'
while '4-ary'.title() is '4-Ary'."""
return word[0].upper() + word[1:]
def OneTo(n):
"""Returns the list [1, 2, 3, ..., n]."""
return range(1, n + 1)
def Iter(n, format, sep=''):
"""Given a positive integer n, a format string that contains 0 or
more '%s' format specs, and optionally a separator string, returns
the join of n strings, each formatted with the format string on an
iterator ranged from 1 to n.
Example:
Iter(3, 'v%s', sep=', ') returns 'v1, v2, v3'.
"""
# How many '%s' specs are in format?
spec_count = len(format.split('%s')) - 1
return sep.join([format % (spec_count * (i,)) for i in OneTo(n)])
def ImplementationForArity(n):
"""Returns the implementation of n-ary predicate assertions."""
# A map the defines the values used in the implementation template.
DEFS = {
'n' : str(n),
'vs' : Iter(n, 'v%s', sep=', '),
'vts' : Iter(n, '#v%s', sep=', '),
'arity' : Arity(n),
'Arity' : Title(Arity(n))
}
impl = """
// Helper function for implementing {EXPECT|ASSERT}_PRED%(n)s. Don't use
// this in your code.
template <typename Pred""" % DEFS
impl += Iter(n, """,
typename T%s""")
impl += """>
AssertionResult AssertPred%(n)sHelper(const char* pred_text""" % DEFS
impl += Iter(n, """,
const char* e%s""")
impl += """,
Pred pred"""
impl += Iter(n, """,
const T%s& v%s""")
impl += """) {
if (pred(%(vs)s)) return AssertionSuccess();
""" % DEFS
impl += ' return AssertionFailure() << pred_text << "("'
impl += Iter(n, """
<< e%s""", sep=' << ", "')
impl += ' << ") evaluates to false, where"'
impl += Iter(n, """
<< "\\n" << e%s << " evaluates to " << v%s""")
impl += """;
}
// Internal macro for implementing {EXPECT|ASSERT}_PRED_FORMAT%(n)s.
// Don't use this in your code.
#define GTEST_PRED_FORMAT%(n)s_(pred_format, %(vs)s, on_failure)\\
GTEST_ASSERT_(pred_format(%(vts)s, %(vs)s), \\
on_failure)
// Internal macro for implementing {EXPECT|ASSERT}_PRED%(n)s. Don't use
// this in your code.
#define GTEST_PRED%(n)s_(pred, %(vs)s, on_failure)\\
GTEST_ASSERT_(::testing::AssertPred%(n)sHelper(#pred""" % DEFS
impl += Iter(n, """, \\
#v%s""")
impl += """, \\
pred"""
impl += Iter(n, """, \\
v%s""")
impl += """), on_failure)
// %(Arity)s predicate assertion macros.
#define EXPECT_PRED_FORMAT%(n)s(pred_format, %(vs)s) \\
GTEST_PRED_FORMAT%(n)s_(pred_format, %(vs)s, GTEST_NONFATAL_FAILURE_)
#define EXPECT_PRED%(n)s(pred, %(vs)s) \\
GTEST_PRED%(n)s_(pred, %(vs)s, GTEST_NONFATAL_FAILURE_)
#define ASSERT_PRED_FORMAT%(n)s(pred_format, %(vs)s) \\
GTEST_PRED_FORMAT%(n)s_(pred_format, %(vs)s, GTEST_FATAL_FAILURE_)
#define ASSERT_PRED%(n)s(pred, %(vs)s) \\
GTEST_PRED%(n)s_(pred, %(vs)s, GTEST_FATAL_FAILURE_)
""" % DEFS
return impl
def HeaderPostamble():
"""Returns the postamble for the header file."""
return """
#endif // GTEST_INCLUDE_GTEST_GTEST_PRED_IMPL_H_
"""
def GenerateFile(path, content):
"""Given a file path and a content string, overwrites it with the
given content."""
print 'Updating file %s . . .' % path
f = file(path, 'w+')
print >>f, content,
f.close()
print 'File %s has been updated.' % path
def GenerateHeader(n):
"""Given the maximum arity n, updates the header file that implements
the predicate assertions."""
GenerateFile(HEADER,
HeaderPreamble(n)
+ ''.join([ImplementationForArity(i) for i in OneTo(n)])
+ HeaderPostamble())
def UnitTestPreamble():
"""Returns the preamble for the unit test file."""
# A map that defines the values used in the preamble template.
DEFS = {
'today' : time.strftime('%m/%d/%Y'),
'year' : time.strftime('%Y'),
'command' : '%s %s' % (os.path.basename(sys.argv[0]), sys.argv[1]),
}
return (
"""// Copyright 2006, Google Inc.
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// This file is AUTOMATICALLY GENERATED on %(today)s by command
// '%(command)s'. DO NOT EDIT BY HAND!
// Regression test for gtest_pred_impl.h
//
// This file is generated by a script and quite long. If you intend to
// learn how Google Test works by reading its unit tests, read
// gtest_unittest.cc instead.
//
// This is intended as a regression test for the Google Test predicate
// assertions. We compile it as part of the gtest_unittest target
// only to keep the implementation tidy and compact, as it is quite
// involved to set up the stage for testing Google Test using Google
// Test itself.
//
// Currently, gtest_unittest takes ~11 seconds to run in the testing
// daemon. In the future, if it grows too large and needs much more
// time to finish, we should consider separating this file into a
// stand-alone regression test.
#include <iostream>
#include "gtest/gtest.h"
#include "gtest/gtest-spi.h"
// A user-defined data type.
struct Bool {
explicit Bool(int val) : value(val != 0) {}
bool operator>(int n) const { return value > Bool(n).value; }
Bool operator+(const Bool& rhs) const { return Bool(value + rhs.value); }
bool operator==(const Bool& rhs) const { return value == rhs.value; }
bool value;
};
// Enables Bool to be used in assertions.
std::ostream& operator<<(std::ostream& os, const Bool& x) {
return os << (x.value ? "true" : "false");
}
""" % DEFS)
def TestsForArity(n):
"""Returns the tests for n-ary predicate assertions."""
# A map that defines the values used in the template for the tests.
DEFS = {
'n' : n,
'es' : Iter(n, 'e%s', sep=', '),
'vs' : Iter(n, 'v%s', sep=', '),
'vts' : Iter(n, '#v%s', sep=', '),
'tvs' : Iter(n, 'T%s v%s', sep=', '),
'int_vs' : Iter(n, 'int v%s', sep=', '),
'Bool_vs' : Iter(n, 'Bool v%s', sep=', '),
'types' : Iter(n, 'typename T%s', sep=', '),
'v_sum' : Iter(n, 'v%s', sep=' + '),
'arity' : Arity(n),
'Arity' : Title(Arity(n)),
}
tests = (
"""// Sample functions/functors for testing %(arity)s predicate assertions.
// A %(arity)s predicate function.
template <%(types)s>
bool PredFunction%(n)s(%(tvs)s) {
return %(v_sum)s > 0;
}
// The following two functions are needed to circumvent a bug in
// gcc 2.95.3, which sometimes has problem with the above template
// function.
bool PredFunction%(n)sInt(%(int_vs)s) {
return %(v_sum)s > 0;
}
bool PredFunction%(n)sBool(%(Bool_vs)s) {
return %(v_sum)s > 0;
}
""" % DEFS)
tests += """
// A %(arity)s predicate functor.
struct PredFunctor%(n)s {
template <%(types)s>
bool operator()(""" % DEFS
tests += Iter(n, 'const T%s& v%s', sep=""",
""")
tests += """) {
return %(v_sum)s > 0;
}
};
""" % DEFS
tests += """
// A %(arity)s predicate-formatter function.
template <%(types)s>
testing::AssertionResult PredFormatFunction%(n)s(""" % DEFS
tests += Iter(n, 'const char* e%s', sep=""",
""")
tests += Iter(n, """,
const T%s& v%s""")
tests += """) {
if (PredFunction%(n)s(%(vs)s))
return testing::AssertionSuccess();
return testing::AssertionFailure()
<< """ % DEFS
tests += Iter(n, 'e%s', sep=' << " + " << ')
tests += """
<< " is expected to be positive, but evaluates to "
<< %(v_sum)s << ".";
}
""" % DEFS
tests += """
// A %(arity)s predicate-formatter functor.
struct PredFormatFunctor%(n)s {
template <%(types)s>
testing::AssertionResult operator()(""" % DEFS
tests += Iter(n, 'const char* e%s', sep=""",
""")
tests += Iter(n, """,
const T%s& v%s""")
tests += """) const {
return PredFormatFunction%(n)s(%(es)s, %(vs)s);
}
};
""" % DEFS
tests += """
// Tests for {EXPECT|ASSERT}_PRED_FORMAT%(n)s.
class Predicate%(n)sTest : public testing::Test {
protected:
virtual void SetUp() {
expected_to_finish_ = true;
finished_ = false;""" % DEFS
tests += """
""" + Iter(n, 'n%s_ = ') + """0;
}
"""
tests += """
virtual void TearDown() {
// Verifies that each of the predicate's arguments was evaluated
// exactly once."""
tests += ''.join(["""
EXPECT_EQ(1, n%s_) <<
"The predicate assertion didn't evaluate argument %s "
"exactly once.";""" % (i, i + 1) for i in OneTo(n)])
tests += """
// Verifies that the control flow in the test function is expected.
if (expected_to_finish_ && !finished_) {
FAIL() << "The predicate assertion unexpactedly aborted the test.";
} else if (!expected_to_finish_ && finished_) {
FAIL() << "The failed predicate assertion didn't abort the test "
"as expected.";
}
}
// true iff the test function is expected to run to finish.
static bool expected_to_finish_;
// true iff the test function did run to finish.
static bool finished_;
""" % DEFS
tests += Iter(n, """
static int n%s_;""")
tests += """
};
bool Predicate%(n)sTest::expected_to_finish_;
bool Predicate%(n)sTest::finished_;
""" % DEFS
tests += Iter(n, """int Predicate%%(n)sTest::n%s_;
""") % DEFS
tests += """
typedef Predicate%(n)sTest EXPECT_PRED_FORMAT%(n)sTest;
typedef Predicate%(n)sTest ASSERT_PRED_FORMAT%(n)sTest;
typedef Predicate%(n)sTest EXPECT_PRED%(n)sTest;
typedef Predicate%(n)sTest ASSERT_PRED%(n)sTest;
""" % DEFS
def GenTest(use_format, use_assert, expect_failure,
use_functor, use_user_type):
"""Returns the test for a predicate assertion macro.
Args:
use_format: true iff the assertion is a *_PRED_FORMAT*.
use_assert: true iff the assertion is a ASSERT_*.
expect_failure: true iff the assertion is expected to fail.
use_functor: true iff the first argument of the assertion is
a functor (as opposed to a function)
use_user_type: true iff the predicate functor/function takes
argument(s) of a user-defined type.
Example:
GenTest(1, 0, 0, 1, 0) returns a test that tests the behavior
of a successful EXPECT_PRED_FORMATn() that takes a functor
whose arguments have built-in types."""
if use_assert:
assrt = 'ASSERT' # 'assert' is reserved, so we cannot use
# that identifier here.
else:
assrt = 'EXPECT'
assertion = assrt + '_PRED'
if use_format:
pred_format = 'PredFormat'
assertion += '_FORMAT'
else:
pred_format = 'Pred'
assertion += '%(n)s' % DEFS
if use_functor:
pred_format_type = 'functor'
pred_format += 'Functor%(n)s()'
else:
pred_format_type = 'function'
pred_format += 'Function%(n)s'
if not use_format:
if use_user_type:
pred_format += 'Bool'
else:
pred_format += 'Int'
test_name = pred_format_type.title()
if use_user_type:
arg_type = 'user-defined type (Bool)'
test_name += 'OnUserType'
if expect_failure:
arg = 'Bool(n%s_++)'
else:
arg = 'Bool(++n%s_)'
else:
arg_type = 'built-in type (int)'
test_name += 'OnBuiltInType'
if expect_failure:
arg = 'n%s_++'
else:
arg = '++n%s_'
if expect_failure:
successful_or_failed = 'failed'
expected_or_not = 'expected.'
test_name += 'Failure'
else:
successful_or_failed = 'successful'
expected_or_not = 'UNEXPECTED!'
test_name += 'Success'
# A map that defines the values used in the test template.
defs = DEFS.copy()
defs.update({
'assert' : assrt,
'assertion' : assertion,
'test_name' : test_name,
'pf_type' : pred_format_type,
'pf' : pred_format,
'arg_type' : arg_type,
'arg' : arg,
'successful' : successful_or_failed,
'expected' : expected_or_not,
})
test = """
// Tests a %(successful)s %(assertion)s where the
// predicate-formatter is a %(pf_type)s on a %(arg_type)s.
TEST_F(%(assertion)sTest, %(test_name)s) {""" % defs
indent = (len(assertion) + 3)*' '
extra_indent = ''
if expect_failure:
extra_indent = ' '
if use_assert:
test += """
expected_to_finish_ = false;
EXPECT_FATAL_FAILURE({ // NOLINT"""
else:
test += """
EXPECT_NONFATAL_FAILURE({ // NOLINT"""
test += '\n' + extra_indent + """ %(assertion)s(%(pf)s""" % defs
test = test % defs
test += Iter(n, ',\n' + indent + extra_indent + '%(arg)s' % defs)
test += ');\n' + extra_indent + ' finished_ = true;\n'
if expect_failure:
test += ' }, "");\n'
test += '}\n'
return test
# Generates tests for all 2**6 = 64 combinations.
tests += ''.join([GenTest(use_format, use_assert, expect_failure,
use_functor, use_user_type)
for use_format in [0, 1]
for use_assert in [0, 1]
for expect_failure in [0, 1]
for use_functor in [0, 1]
for use_user_type in [0, 1]
])
return tests
def UnitTestPostamble():
"""Returns the postamble for the tests."""
return ''
def GenerateUnitTest(n):
"""Returns the tests for up-to n-ary predicate assertions."""
GenerateFile(UNIT_TEST,
UnitTestPreamble()
+ ''.join([TestsForArity(i) for i in OneTo(n)])
+ UnitTestPostamble())
def _Main():
"""The entry point of the script. Generates the header file and its
unit test."""
if len(sys.argv) != 2:
print __doc__
print 'Author: ' + __author__
sys.exit(1)
n = int(sys.argv[1])
GenerateHeader(n)
GenerateUnitTest(n)
if __name__ == '__main__':
_Main()
| gpl-3.0 |
dkodnik/Ant | addons/mrp/wizard/mrp_workcenter_load.py | 56 | 2244 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields, osv
class mrp_workcenter_load(osv.osv_memory):
_name = 'mrp.workcenter.load'
_description = 'Work Center Load'
_columns = {
'time_unit': fields.selection([('day', 'Day by day'),('week', 'Per week'),('month', 'Per month')],'Type of period', required=True),
'measure_unit': fields.selection([('hours', 'Amount in hours'),('cycles', 'Amount in cycles')],'Amount measuring unit', required=True),
}
def print_report(self, cr, uid, ids, context=None):
""" To print the report of Work Center Load
@param self: The object pointer.
@param cr: A database cursor
@param uid: ID of the user currently logged in
@param context: A standard dictionary
@return : Report
"""
if context is None:
context = {}
datas = {'ids' : context.get('active_ids',[])}
res = self.read(cr, uid, ids, ['time_unit','measure_unit'])
res = res and res[0] or {}
datas['form'] = res
return {
'type' : 'ir.actions.report.xml',
'report_name':'mrp.workcenter.load',
'datas' : datas,
}
mrp_workcenter_load()
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
Soya93/Extract-Refactoring | python/helpers/pydev/_pydevd_bundle/pydevd_comm.py | 2 | 57034 | ''' pydevd - a debugging daemon
This is the daemon you launch for python remote debugging.
Protocol:
each command has a format:
id\tsequence-num\ttext
id: protocol command number
sequence-num: each request has a sequence number. Sequence numbers
originating at the debugger are odd, sequence numbers originating
at the daemon are even. Every response uses the same sequence number
as the request.
payload: it is protocol dependent. When response is a complex structure, it
is returned as XML. Each attribute value is urlencoded, and then the whole
payload is urlencoded again to prevent stray characters corrupting protocol/xml encodings
Commands:
NUMBER NAME FROM* ARGUMENTS RESPONSE NOTE
100 series: program execution
101 RUN JAVA - -
102 LIST_THREADS JAVA RETURN with XML listing of all threads
103 THREAD_CREATE PYDB - XML with thread information
104 THREAD_KILL JAVA id (or * to exit) kills the thread
PYDB id nofies JAVA that thread was killed
105 THREAD_SUSPEND JAVA XML of the stack, suspends the thread
reason for suspension
PYDB id notifies JAVA that thread was suspended
106 CMD_THREAD_RUN JAVA id resume the thread
PYDB id \t reason notifies JAVA that thread was resumed
107 STEP_INTO JAVA thread_id
108 STEP_OVER JAVA thread_id
109 STEP_RETURN JAVA thread_id
110 GET_VARIABLE JAVA thread_id \t frame_id \t GET_VARIABLE with XML of var content
FRAME|GLOBAL \t attributes*
111 SET_BREAK JAVA file/line of the breakpoint
112 REMOVE_BREAK JAVA file/line of the return
113 CMD_EVALUATE_EXPRESSION JAVA expression result of evaluating the expression
114 CMD_GET_FRAME JAVA request for frame contents
115 CMD_EXEC_EXPRESSION JAVA
116 CMD_WRITE_TO_CONSOLE PYDB
117 CMD_CHANGE_VARIABLE
118 CMD_RUN_TO_LINE
119 CMD_RELOAD_CODE
120 CMD_GET_COMPLETIONS JAVA
500 series diagnostics/ok
501 VERSION either Version string (1.0) Currently just used at startup
502 RETURN either Depends on caller -
900 series: errors
901 ERROR either - This is reserved for unexpected errors.
* JAVA - remote debugger, the java end
* PYDB - pydevd, the python end
'''
from _pydev_bundle.pydev_imports import _queue
from _pydev_imps._pydev_saved_modules import time
from _pydev_imps._pydev_saved_modules import thread
from _pydev_imps._pydev_saved_modules import threading
from _pydev_imps._pydev_saved_modules import socket
from socket import socket, AF_INET, SOCK_STREAM, SHUT_RD, SHUT_WR
from _pydevd_bundle.pydevd_constants import * #@UnusedWildImport
try:
from urllib import quote_plus, unquote, unquote_plus
except:
from urllib.parse import quote_plus, unquote, unquote_plus #@Reimport @UnresolvedImport
import pydevconsole
from _pydevd_bundle import pydevd_vars
from _pydevd_bundle import pydevd_tracing
from _pydevd_bundle import pydevd_vm_type
import pydevd_file_utils
import traceback
from _pydevd_bundle.pydevd_utils import quote_smart as quote, compare_object_attrs, cmp_to_key, to_string
from _pydev_bundle import pydev_log
from _pydev_bundle import _pydev_completer
from _pydevd_bundle.pydevd_tracing import get_exception_traceback_str
from _pydevd_bundle import pydevd_console
from _pydev_bundle.pydev_monkey import disable_trace_thread_modules, enable_trace_thread_modules
CMD_RUN = 101
CMD_LIST_THREADS = 102
CMD_THREAD_CREATE = 103
CMD_THREAD_KILL = 104
CMD_THREAD_SUSPEND = 105
CMD_THREAD_RUN = 106
CMD_STEP_INTO = 107
CMD_STEP_OVER = 108
CMD_STEP_RETURN = 109
CMD_GET_VARIABLE = 110
CMD_SET_BREAK = 111
CMD_REMOVE_BREAK = 112
CMD_EVALUATE_EXPRESSION = 113
CMD_GET_FRAME = 114
CMD_EXEC_EXPRESSION = 115
CMD_WRITE_TO_CONSOLE = 116
CMD_CHANGE_VARIABLE = 117
CMD_RUN_TO_LINE = 118
CMD_RELOAD_CODE = 119
CMD_GET_COMPLETIONS = 120
# Note: renumbered (conflicted on merge)
CMD_CONSOLE_EXEC = 121
CMD_ADD_EXCEPTION_BREAK = 122
CMD_REMOVE_EXCEPTION_BREAK = 123
CMD_LOAD_SOURCE = 124
CMD_ADD_DJANGO_EXCEPTION_BREAK = 125
CMD_REMOVE_DJANGO_EXCEPTION_BREAK = 126
CMD_SET_NEXT_STATEMENT = 127
CMD_SMART_STEP_INTO = 128
CMD_EXIT = 129
CMD_SIGNATURE_CALL_TRACE = 130
CMD_SET_PY_EXCEPTION = 131
CMD_GET_FILE_CONTENTS = 132
CMD_SET_PROPERTY_TRACE = 133
# Pydev debug console commands
CMD_EVALUATE_CONSOLE_EXPRESSION = 134
CMD_RUN_CUSTOM_OPERATION = 135
CMD_GET_BREAKPOINT_EXCEPTION = 136
CMD_STEP_CAUGHT_EXCEPTION = 137
CMD_SEND_CURR_EXCEPTION_TRACE = 138
CMD_SEND_CURR_EXCEPTION_TRACE_PROCEEDED = 139
CMD_IGNORE_THROWN_EXCEPTION_AT = 140
CMD_ENABLE_DONT_TRACE = 141
CMD_SHOW_CONSOLE = 142
CMD_GET_ARRAY = 143
CMD_STEP_INTO_MY_CODE = 144
CMD_GET_CONCURRENCY_EVENT = 145
CMD_VERSION = 501
CMD_RETURN = 502
CMD_ERROR = 901
ID_TO_MEANING = {
'101': 'CMD_RUN',
'102': 'CMD_LIST_THREADS',
'103': 'CMD_THREAD_CREATE',
'104': 'CMD_THREAD_KILL',
'105': 'CMD_THREAD_SUSPEND',
'106': 'CMD_THREAD_RUN',
'107': 'CMD_STEP_INTO',
'108': 'CMD_STEP_OVER',
'109': 'CMD_STEP_RETURN',
'110': 'CMD_GET_VARIABLE',
'111': 'CMD_SET_BREAK',
'112': 'CMD_REMOVE_BREAK',
'113': 'CMD_EVALUATE_EXPRESSION',
'114': 'CMD_GET_FRAME',
'115': 'CMD_EXEC_EXPRESSION',
'116': 'CMD_WRITE_TO_CONSOLE',
'117': 'CMD_CHANGE_VARIABLE',
'118': 'CMD_RUN_TO_LINE',
'119': 'CMD_RELOAD_CODE',
'120': 'CMD_GET_COMPLETIONS',
'121': 'CMD_CONSOLE_EXEC',
'122': 'CMD_ADD_EXCEPTION_BREAK',
'123': 'CMD_REMOVE_EXCEPTION_BREAK',
'124': 'CMD_LOAD_SOURCE',
'125': 'CMD_ADD_DJANGO_EXCEPTION_BREAK',
'126': 'CMD_REMOVE_DJANGO_EXCEPTION_BREAK',
'127': 'CMD_SET_NEXT_STATEMENT',
'128': 'CMD_SMART_STEP_INTO',
'129': 'CMD_EXIT',
'130': 'CMD_SIGNATURE_CALL_TRACE',
'131': 'CMD_SET_PY_EXCEPTION',
'132': 'CMD_GET_FILE_CONTENTS',
'133': 'CMD_SET_PROPERTY_TRACE',
'134': 'CMD_EVALUATE_CONSOLE_EXPRESSION',
'135': 'CMD_RUN_CUSTOM_OPERATION',
'136': 'CMD_GET_BREAKPOINT_EXCEPTION',
'137': 'CMD_STEP_CAUGHT_EXCEPTION',
'138': 'CMD_SEND_CURR_EXCEPTION_TRACE',
'139': 'CMD_SEND_CURR_EXCEPTION_TRACE_PROCEEDED',
'140': 'CMD_IGNORE_THROWN_EXCEPTION_AT',
'141': 'CMD_ENABLE_DONT_TRACE',
'143': 'CMD_GET_ARRAY',
'144': 'CMD_STEP_INTO_MY_CODE',
'145': 'CMD_GET_CONCURRENCY_EVENT',
'501': 'CMD_VERSION',
'502': 'CMD_RETURN',
'901': 'CMD_ERROR',
}
MAX_IO_MSG_SIZE = 1000 #if the io is too big, we'll not send all (could make the debugger too non-responsive)
#this number can be changed if there's need to do so
VERSION_STRING = "@@BUILD_NUMBER@@"
from _pydev_bundle._pydev_filesystem_encoding import getfilesystemencoding
file_system_encoding = getfilesystemencoding()
#--------------------------------------------------------------------------------------------------- UTILITIES
#=======================================================================================================================
# pydevd_log
#=======================================================================================================================
def pydevd_log(level, *args):
""" levels are:
0 most serious warnings/errors
1 warnings/significant events
2 informational trace
"""
if level <= DebugInfoHolder.DEBUG_TRACE_LEVEL:
#yes, we can have errors printing if the console of the program has been finished (and we're still trying to print something)
try:
sys.stderr.write('%s\n' % (args,))
except:
pass
#=======================================================================================================================
# GlobalDebuggerHolder
#=======================================================================================================================
class GlobalDebuggerHolder:
'''
Holder for the global debugger.
'''
global_dbg = None # Note: don't rename (the name is used in our attach to process)
#=======================================================================================================================
# get_global_debugger
#=======================================================================================================================
def get_global_debugger():
return GlobalDebuggerHolder.global_dbg
GetGlobalDebugger = get_global_debugger # Backward-compatibility
#=======================================================================================================================
# set_global_debugger
#=======================================================================================================================
def set_global_debugger(dbg):
GlobalDebuggerHolder.global_dbg = dbg
#------------------------------------------------------------------- ACTUAL COMM
#=======================================================================================================================
# PyDBDaemonThread
#=======================================================================================================================
class PyDBDaemonThread(threading.Thread):
created_pydb_daemon_threads = {}
def __init__(self):
threading.Thread.__init__(self)
self.setDaemon(True)
self.killReceived = False
self.dontTraceMe = True
self.is_pydev_daemon_thread = True
def run(self):
created_pydb_daemon = self.created_pydb_daemon_threads
created_pydb_daemon[self] = 1
try:
try:
if IS_JYTHON:
import org.python.core as PyCore #@UnresolvedImport
ss = PyCore.PySystemState()
# Note: Py.setSystemState() affects only the current thread.
PyCore.Py.setSystemState(ss)
self._on_run()
except:
if sys is not None and traceback is not None:
traceback.print_exc()
finally:
del created_pydb_daemon[self]
def _on_run(self):
raise NotImplementedError('Should be reimplemented by: %s' % self.__class__)
def do_kill_pydev_thread(self):
#that was not working very well because jython gave some socket errors
self.killReceived = True
def _stop_trace(self):
if self.dontTraceMe:
disable_tracing = True
if pydevd_vm_type.get_vm_type() == pydevd_vm_type.PydevdVmType.JYTHON and sys.hexversion <= 0x020201f0:
# don't run untraced threads if we're in jython 2.2.1 or lower
# jython bug: if we start a thread and another thread changes the tracing facility
# it affects other threads (it's not set only for the thread but globally)
# Bug: http://sourceforge.net/tracker/index.php?func=detail&aid=1870039&group_id=12867&atid=112867
disable_tracing = False
if disable_tracing:
pydevd_tracing.SetTrace(None) # no debugging on this thread
#=======================================================================================================================
# ReaderThread
#=======================================================================================================================
class ReaderThread(PyDBDaemonThread):
""" reader thread reads and dispatches commands in an infinite loop """
def __init__(self, sock):
PyDBDaemonThread.__init__(self)
self.sock = sock
self.setName("pydevd.Reader")
from _pydevd_bundle.pydevd_process_net_command import process_net_command
self.process_net_command = process_net_command
self.global_debugger_holder = GlobalDebuggerHolder
def do_kill_pydev_thread(self):
#We must close the socket so that it doesn't stay halted there.
self.killReceived = True
try:
self.sock.shutdown(SHUT_RD) #shutdown the socket for read
except:
#just ignore that
pass
def _on_run(self):
self._stop_trace()
read_buffer = ""
try:
while not self.killReceived:
try:
r = self.sock.recv(1024)
except:
if not self.killReceived:
traceback.print_exc()
self.handle_except()
return #Finished communication.
#Note: the java backend is always expected to pass utf-8 encoded strings. We now work with unicode
#internally and thus, we may need to convert to the actual encoding where needed (i.e.: filenames
#on python 2 may need to be converted to the filesystem encoding).
if hasattr(r, 'decode'):
r = r.decode('utf-8')
read_buffer += r
if DebugInfoHolder.DEBUG_RECORD_SOCKET_READS:
sys.stderr.write('debugger: received >>%s<<\n' % (read_buffer,))
sys.stderr.flush()
if len(read_buffer) == 0:
self.handle_except()
break
while read_buffer.find('\n') != -1:
command, read_buffer = read_buffer.split('\n', 1)
args = command.split('\t', 2)
try:
cmd_id = int(args[0])
pydev_log.debug('Received command: %s %s\n' % (ID_TO_MEANING.get(str(cmd_id), '???'), command,))
self.process_command(cmd_id, int(args[1]), args[2])
except:
traceback.print_exc()
sys.stderr.write("Can't process net command: %s\n" % command)
sys.stderr.flush()
except:
traceback.print_exc()
self.handle_except()
def handle_except(self):
self.global_debugger_holder.global_dbg.finish_debugging_session()
def process_command(self, cmd_id, seq, text):
self.process_net_command(self.global_debugger_holder.global_dbg, cmd_id, seq, text)
#----------------------------------------------------------------------------------- SOCKET UTILITIES - WRITER
#=======================================================================================================================
# WriterThread
#=======================================================================================================================
class WriterThread(PyDBDaemonThread):
""" writer thread writes out the commands in an infinite loop """
def __init__(self, sock):
PyDBDaemonThread.__init__(self)
self.sock = sock
self.setName("pydevd.Writer")
self.cmdQueue = _queue.Queue()
if pydevd_vm_type.get_vm_type() == 'python':
self.timeout = 0
else:
self.timeout = 0.1
def add_command(self, cmd):
""" cmd is NetCommand """
if not self.killReceived: #we don't take new data after everybody die
self.cmdQueue.put(cmd)
def _on_run(self):
""" just loop and write responses """
self._stop_trace()
get_has_timeout = sys.hexversion >= 0x02030000 # 2.3 onwards have it.
try:
while True:
try:
try:
if get_has_timeout:
cmd = self.cmdQueue.get(1, 0.1)
else:
time.sleep(.01)
cmd = self.cmdQueue.get(0)
except _queue.Empty:
if self.killReceived:
try:
self.sock.shutdown(SHUT_WR)
self.sock.close()
except:
pass
return #break if queue is empty and killReceived
else:
continue
except:
#pydevd_log(0, 'Finishing debug communication...(1)')
#when liberating the thread here, we could have errors because we were shutting down
#but the thread was still not liberated
return
out = cmd.outgoing
if DebugInfoHolder.DEBUG_TRACE_LEVEL >= 1:
out_message = 'sending cmd --> '
out_message += "%20s" % ID_TO_MEANING.get(out[:3], 'UNKNOWN')
out_message += ' '
out_message += unquote(unquote(out)).replace('\n', ' ')
try:
sys.stderr.write('%s\n' % (out_message,))
except:
pass
if IS_PY3K:
out = bytearray(out, 'utf-8')
self.sock.send(out) #TODO: this does not guarantee that all message are sent (and jython does not have a send all)
if cmd.id == CMD_EXIT:
break
if time is None:
break #interpreter shutdown
time.sleep(self.timeout)
except Exception:
GlobalDebuggerHolder.global_dbg.finish_debugging_session()
if DebugInfoHolder.DEBUG_TRACE_LEVEL >= 0:
traceback.print_exc()
def empty(self):
return self.cmdQueue.empty()
#--------------------------------------------------- CREATING THE SOCKET THREADS
#=======================================================================================================================
# start_server
#=======================================================================================================================
def start_server(port):
""" binds to a port, waits for the debugger to connect """
s = socket(AF_INET, SOCK_STREAM)
s.bind(('', port))
s.listen(1)
newSock, _addr = s.accept()
return newSock
#=======================================================================================================================
# start_client
#=======================================================================================================================
def start_client(host, port):
""" connects to a host/port """
pydevd_log(1, "Connecting to ", host, ":", str(port))
s = socket(AF_INET, SOCK_STREAM)
MAX_TRIES = 100
i = 0
while i<MAX_TRIES:
try:
s.connect((host, port))
except:
i+=1
time.sleep(0.2)
continue
pydevd_log(1, "Connected.")
return s
sys.stderr.write("Could not connect to %s: %s\n" % (host, port))
sys.stderr.flush()
traceback.print_exc()
sys.exit(1) #TODO: is it safe?
#------------------------------------------------------------------------------------ MANY COMMUNICATION STUFF
#=======================================================================================================================
# NetCommand
#=======================================================================================================================
class NetCommand:
""" Commands received/sent over the network.
Command can represent command received from the debugger,
or one to be sent by daemon.
"""
next_seq = 0 # sequence numbers
def __init__(self, id, seq, text):
""" smart handling of parameters
if sequence is 0, new sequence will be generated
if text has carriage returns they'll be replaced"""
self.id = id
if seq == 0:
NetCommand.next_seq += 2
seq = NetCommand.next_seq
self.seq = seq
self.text = text
encoded = quote(to_string(text), '/<>_=" \t')
self.outgoing = '%s\t%s\t%s\n' % (id, seq, encoded)
#=======================================================================================================================
# NetCommandFactory
#=======================================================================================================================
class NetCommandFactory:
def _thread_to_xml(self, thread):
""" thread information as XML """
name = pydevd_vars.make_valid_xml_value(thread.getName())
cmdText = '<thread name="%s" id="%s" />' % (quote(name), get_thread_id(thread))
return cmdText
def make_error_message(self, seq, text):
cmd = NetCommand(CMD_ERROR, seq, text)
if DebugInfoHolder.DEBUG_TRACE_LEVEL > 2:
sys.stderr.write("Error: %s" % (text,))
return cmd
def make_thread_created_message(self, thread):
cmdText = "<xml>" + self._thread_to_xml(thread) + "</xml>"
return NetCommand(CMD_THREAD_CREATE, 0, cmdText)
def make_custom_frame_created_message(self, frameId, frameDescription):
frameDescription = pydevd_vars.make_valid_xml_value(frameDescription)
cmdText = '<xml><thread name="%s" id="%s"/></xml>' % (frameDescription, frameId)
return NetCommand(CMD_THREAD_CREATE, 0, cmdText)
def make_list_threads_message(self, seq):
""" returns thread listing as XML """
try:
t = threading.enumerate()
cmd_text = ["<xml>"]
append = cmd_text.append
for i in t:
if t.isAlive():
append(self._thread_to_xml(i))
append("</xml>")
return NetCommand(CMD_RETURN, seq, ''.join(cmd_text))
except:
return self.make_error_message(seq, get_exception_traceback_str())
def make_variable_changed_message(self, seq, payload):
# notify debugger that value was changed successfully
return NetCommand(CMD_RETURN, seq, payload)
def make_io_message(self, v, ctx, dbg=None):
'''
@param v: the message to pass to the debug server
@param ctx: 1 for stdio 2 for stderr
@param dbg: If not none, add to the writer
'''
try:
if len(v) > MAX_IO_MSG_SIZE:
v = v[0:MAX_IO_MSG_SIZE]
v += '...'
v = pydevd_vars.make_valid_xml_value(quote(v, '/>_= \t'))
net = NetCommand(str(CMD_WRITE_TO_CONSOLE), 0, '<xml><io s="%s" ctx="%s"/></xml>' % (v, ctx))
except:
net = self.make_error_message(0, get_exception_traceback_str())
if dbg:
dbg.writer.add_command(net)
return net
def make_version_message(self, seq):
try:
return NetCommand(CMD_VERSION, seq, VERSION_STRING)
except:
return self.make_error_message(seq, get_exception_traceback_str())
def make_thread_killed_message(self, id):
try:
return NetCommand(CMD_THREAD_KILL, 0, str(id))
except:
return self.make_error_message(0, get_exception_traceback_str())
def make_thread_suspend_str(self, thread_id, frame, stop_reason, message):
""" <xml>
<thread id="id" stop_reason="reason">
<frame id="id" name="functionName " file="file" line="line">
<var variable stuffff....
</frame>
</thread>
"""
cmd_text_list = ["<xml>"]
append = cmd_text_list.append
make_valid_xml_value = pydevd_vars.make_valid_xml_value
if message:
message = make_valid_xml_value(message)
append('<thread id="%s" stop_reason="%s" message="%s">' % (thread_id, stop_reason, message))
curr_frame = frame
try:
while curr_frame:
#print cmdText
my_id = id(curr_frame)
#print "id is ", my_id
if curr_frame.f_code is None:
break #Iron Python sometimes does not have it!
my_name = curr_frame.f_code.co_name #method name (if in method) or ? if global
if my_name is None:
break #Iron Python sometimes does not have it!
#print "name is ", my_name
abs_path_real_path_and_base = pydevd_file_utils.get_abs_path_real_path_and_base_from_frame(curr_frame)
myFile = pydevd_file_utils.norm_file_to_client(abs_path_real_path_and_base[0])
if file_system_encoding.lower() != "utf-8" and hasattr(myFile, "decode"):
# myFile is a byte string encoded using the file system encoding
# convert it to utf8
myFile = myFile.decode(file_system_encoding).encode("utf-8")
#print "file is ", myFile
#myFile = inspect.getsourcefile(curr_frame) or inspect.getfile(frame)
myLine = str(curr_frame.f_lineno)
#print "line is ", myLine
#the variables are all gotten 'on-demand'
#variables = pydevd_vars.frame_vars_to_xml(curr_frame.f_locals)
variables = ''
append('<frame id="%s" name="%s" ' % (my_id , make_valid_xml_value(my_name)))
append('file="%s" line="%s">' % (quote(myFile, '/>_= \t'), myLine))
append(variables)
append("</frame>")
curr_frame = curr_frame.f_back
except :
traceback.print_exc()
append("</thread></xml>")
return ''.join(cmd_text_list)
def make_thread_suspend_message(self, thread_id, frame, stop_reason, message):
try:
return NetCommand(CMD_THREAD_SUSPEND, 0, self.make_thread_suspend_str(thread_id, frame, stop_reason, message))
except:
return self.make_error_message(0, get_exception_traceback_str())
def make_thread_run_message(self, id, reason):
try:
return NetCommand(CMD_THREAD_RUN, 0, str(id) + "\t" + str(reason))
except:
return self.make_error_message(0, get_exception_traceback_str())
def make_get_variable_message(self, seq, payload):
try:
return NetCommand(CMD_GET_VARIABLE, seq, payload)
except Exception:
return self.make_error_message(seq, get_exception_traceback_str())
def make_get_array_message(self, seq, payload):
try:
return NetCommand(CMD_GET_ARRAY, seq, payload)
except Exception:
return self.make_error_message(seq, get_exception_traceback_str())
def make_get_frame_message(self, seq, payload):
try:
return NetCommand(CMD_GET_FRAME, seq, payload)
except Exception:
return self.make_error_message(seq, get_exception_traceback_str())
def make_evaluate_expression_message(self, seq, payload):
try:
return NetCommand(CMD_EVALUATE_EXPRESSION, seq, payload)
except Exception:
return self.make_error_message(seq, get_exception_traceback_str())
def make_get_completions_message(self, seq, payload):
try:
return NetCommand(CMD_GET_COMPLETIONS, seq, payload)
except Exception:
return self.make_error_message(seq, get_exception_traceback_str())
def make_get_file_contents(self, seq, payload):
try:
return NetCommand(CMD_GET_FILE_CONTENTS, seq, payload)
except Exception:
return self.make_error_message(seq, get_exception_traceback_str())
def make_send_breakpoint_exception_message(self, seq, payload):
try:
return NetCommand(CMD_GET_BREAKPOINT_EXCEPTION, seq, payload)
except Exception:
return self.make_error_message(seq, get_exception_traceback_str())
def make_send_curr_exception_trace_message(self, seq, thread_id, curr_frame_id, exc_type, exc_desc, trace_obj):
try:
while trace_obj.tb_next is not None:
trace_obj = trace_obj.tb_next
exc_type = pydevd_vars.make_valid_xml_value(str(exc_type)).replace('\t', ' ') or 'exception: type unknown'
exc_desc = pydevd_vars.make_valid_xml_value(str(exc_desc)).replace('\t', ' ') or 'exception: no description'
payload = str(curr_frame_id) + '\t' + exc_type + "\t" + exc_desc + "\t" + \
self.make_thread_suspend_str(thread_id, trace_obj.tb_frame, CMD_SEND_CURR_EXCEPTION_TRACE, '')
return NetCommand(CMD_SEND_CURR_EXCEPTION_TRACE, seq, payload)
except Exception:
return self.make_error_message(seq, get_exception_traceback_str())
def make_send_curr_exception_trace_proceeded_message(self, seq, thread_id):
try:
return NetCommand(CMD_SEND_CURR_EXCEPTION_TRACE_PROCEEDED, 0, str(thread_id))
except:
return self.make_error_message(0, get_exception_traceback_str())
def make_send_console_message(self, seq, payload):
try:
return NetCommand(CMD_EVALUATE_CONSOLE_EXPRESSION, seq, payload)
except Exception:
return self.make_error_message(seq, get_exception_traceback_str())
def make_custom_operation_message(self, seq, payload):
try:
return NetCommand(CMD_RUN_CUSTOM_OPERATION, seq, payload)
except Exception:
return self.make_error_message(seq, get_exception_traceback_str())
def make_load_source_message(self, seq, source, dbg=None):
try:
net = NetCommand(CMD_LOAD_SOURCE, seq, '%s' % source)
except:
net = self.make_error_message(0, get_exception_traceback_str())
if dbg:
dbg.writer.add_command(net)
return net
def make_show_console_message(self, thread_id, frame):
try:
return NetCommand(CMD_SHOW_CONSOLE, 0, self.make_thread_suspend_str(thread_id, frame, CMD_SHOW_CONSOLE, ''))
except:
return self.make_error_message(0, get_exception_traceback_str())
def make_exit_message(self):
try:
net = NetCommand(CMD_EXIT, 0, '')
except:
net = self.make_error_message(0, get_exception_traceback_str())
return net
INTERNAL_TERMINATE_THREAD = 1
INTERNAL_SUSPEND_THREAD = 2
#=======================================================================================================================
# InternalThreadCommand
#=======================================================================================================================
class InternalThreadCommand:
""" internal commands are generated/executed by the debugger.
The reason for their existence is that some commands have to be executed
on specific threads. These are the InternalThreadCommands that get
get posted to PyDB.cmdQueue.
"""
def can_be_executed_by(self, thread_id):
'''By default, it must be in the same thread to be executed
'''
return self.thread_id == thread_id or self.thread_id.endswith('|' + thread_id)
def do_it(self, dbg):
raise NotImplementedError("you have to override do_it")
class ReloadCodeCommand(InternalThreadCommand):
def __init__(self, module_name, thread_id):
self.thread_id = thread_id
self.module_name = module_name
self.executed = False
self.lock = thread.allocate_lock()
def can_be_executed_by(self, thread_id):
if self.thread_id == '*':
return True #Any thread can execute it!
return InternalThreadCommand.can_be_executed_by(self, thread_id)
def do_it(self, dbg):
self.lock.acquire()
try:
if self.executed:
return
self.executed = True
finally:
self.lock.release()
module_name = self.module_name
if not dict_contains(sys.modules, module_name):
if '.' in module_name:
new_module_name = module_name.split('.')[-1]
if dict_contains(sys.modules, new_module_name):
module_name = new_module_name
if not dict_contains(sys.modules, module_name):
sys.stderr.write('pydev debugger: Unable to find module to reload: "' + module_name + '".\n')
# Too much info...
# sys.stderr.write('pydev debugger: This usually means you are trying to reload the __main__ module (which cannot be reloaded).\n')
else:
sys.stderr.write('pydev debugger: Start reloading module: "' + module_name + '" ... \n')
from _pydevd_bundle import pydevd_reload
if pydevd_reload.xreload(sys.modules[module_name]):
sys.stderr.write('pydev debugger: reload finished\n')
else:
sys.stderr.write('pydev debugger: reload finished without applying any change\n')
#=======================================================================================================================
# InternalTerminateThread
#=======================================================================================================================
class InternalTerminateThread(InternalThreadCommand):
def __init__(self, thread_id):
self.thread_id = thread_id
def do_it(self, dbg):
pydevd_log(1, "killing ", str(self.thread_id))
cmd = dbg.cmd_factory.make_thread_killed_message(self.thread_id)
dbg.writer.add_command(cmd)
#=======================================================================================================================
# InternalRunThread
#=======================================================================================================================
class InternalRunThread(InternalThreadCommand):
def __init__(self, thread_id):
self.thread_id = thread_id
def do_it(self, dbg):
t = pydevd_find_thread_by_id(self.thread_id)
if t:
t.additional_info.pydev_step_cmd = -1
t.additional_info.pydev_step_stop = None
t.additional_info.pydev_state = STATE_RUN
#=======================================================================================================================
# InternalStepThread
#=======================================================================================================================
class InternalStepThread(InternalThreadCommand):
def __init__(self, thread_id, cmd_id):
self.thread_id = thread_id
self.cmd_id = cmd_id
def do_it(self, dbg):
t = pydevd_find_thread_by_id(self.thread_id)
if t:
t.additional_info.pydev_step_cmd = self.cmd_id
t.additional_info.pydev_state = STATE_RUN
#=======================================================================================================================
# InternalSetNextStatementThread
#=======================================================================================================================
class InternalSetNextStatementThread(InternalThreadCommand):
def __init__(self, thread_id, cmd_id, line, func_name):
self.thread_id = thread_id
self.cmd_id = cmd_id
self.line = line
if IS_PY2:
if isinstance(func_name, unicode):
# On cython with python 2.X it requires an str, not unicode (but on python 3.3 it should be a str, not bytes).
func_name = func_name.encode('utf-8')
self.func_name = func_name
def do_it(self, dbg):
t = pydevd_find_thread_by_id(self.thread_id)
if t:
t.additional_info.pydev_step_cmd = self.cmd_id
t.additional_info.pydev_next_line = int(self.line)
t.additional_info.pydev_func_name = self.func_name
t.additional_info.pydev_state = STATE_RUN
#=======================================================================================================================
# InternalGetVariable
#=======================================================================================================================
class InternalGetVariable(InternalThreadCommand):
""" gets the value of a variable """
def __init__(self, seq, thread_id, frame_id, scope, attrs):
self.sequence = seq
self.thread_id = thread_id
self.frame_id = frame_id
self.scope = scope
self.attributes = attrs
def do_it(self, dbg):
""" Converts request into python variable """
try:
xml = "<xml>"
valDict = pydevd_vars.resolve_compound_variable(self.thread_id, self.frame_id, self.scope, self.attributes)
if valDict is None:
valDict = {}
keys = valDict.keys()
if hasattr(keys, 'sort'):
keys.sort(compare_object_attrs) #Python 3.0 does not have it
else:
if IS_PY3K:
keys = sorted(keys, key=cmp_to_key(compare_object_attrs)) #Jython 2.1 does not have it (and all must be compared as strings).
else:
keys = sorted(keys, cmp=compare_object_attrs) #Jython 2.1 does not have it (and all must be compared as strings).
for k in keys:
xml += pydevd_vars.var_to_xml(valDict[k], to_string(k))
xml += "</xml>"
cmd = dbg.cmd_factory.make_get_variable_message(self.sequence, xml)
dbg.writer.add_command(cmd)
except Exception:
cmd = dbg.cmd_factory.make_error_message(self.sequence, "Error resolving variables " + get_exception_traceback_str())
dbg.writer.add_command(cmd)
#=======================================================================================================================
# InternalGetArray
#=======================================================================================================================
class InternalGetArray(InternalThreadCommand):
def __init__(self, seq, roffset, coffset, rows, cols, format, thread_id, frame_id, scope, attrs):
self.sequence = seq
self.thread_id = thread_id
self.frame_id = frame_id
self.scope = scope
self.name = attrs.split("\t")[-1]
self.attrs = attrs
self.roffset = int(roffset)
self.coffset = int(coffset)
self.rows = int(rows)
self.cols = int(cols)
self.format = format
def do_it(self, dbg):
try:
frame = pydevd_vars.find_frame(self.thread_id, self.frame_id)
var = pydevd_vars.eval_in_context(self.name, frame.f_globals, frame.f_locals)
xml = "<xml>"
var, metaxml, rows, cols, format = pydevd_vars.array_to_meta_xml(var, self.name, self.format)
xml += metaxml
self.format = '%' + format
if self.rows == -1 and self.cols == -1:
self.rows = rows
self.cols = cols
xml += pydevd_vars.array_to_xml(var, self.roffset, self.coffset, self.rows, self.cols, self.format)
xml += "</xml>"
cmd = dbg.cmd_factory.make_get_array_message(self.sequence, xml)
dbg.writer.add_command(cmd)
except:
cmd = dbg.cmd_factory.make_error_message(self.sequence, "Error resolving array: " + get_exception_traceback_str())
dbg.writer.add_command(cmd)
#=======================================================================================================================
# InternalChangeVariable
#=======================================================================================================================
class InternalChangeVariable(InternalThreadCommand):
""" changes the value of a variable """
def __init__(self, seq, thread_id, frame_id, scope, attr, expression):
self.sequence = seq
self.thread_id = thread_id
self.frame_id = frame_id
self.scope = scope
self.attr = attr
self.expression = expression
def do_it(self, dbg):
""" Converts request into python variable """
try:
result = pydevd_vars.change_attr_expression(self.thread_id, self.frame_id, self.attr, self.expression, dbg)
xml = "<xml>"
xml += pydevd_vars.var_to_xml(result, "")
xml += "</xml>"
cmd = dbg.cmd_factory.make_variable_changed_message(self.sequence, xml)
dbg.writer.add_command(cmd)
except Exception:
cmd = dbg.cmd_factory.make_error_message(self.sequence, "Error changing variable attr:%s expression:%s traceback:%s" % (self.attr, self.expression, get_exception_traceback_str()))
dbg.writer.add_command(cmd)
#=======================================================================================================================
# InternalGetFrame
#=======================================================================================================================
class InternalGetFrame(InternalThreadCommand):
""" gets the value of a variable """
def __init__(self, seq, thread_id, frame_id):
self.sequence = seq
self.thread_id = thread_id
self.frame_id = frame_id
def do_it(self, dbg):
""" Converts request into python variable """
try:
frame = pydevd_vars.find_frame(self.thread_id, self.frame_id)
if frame is not None:
xml = "<xml>"
xml += pydevd_vars.frame_vars_to_xml(frame.f_locals)
del frame
xml += "</xml>"
cmd = dbg.cmd_factory.make_get_frame_message(self.sequence, xml)
dbg.writer.add_command(cmd)
else:
#pydevd_vars.dump_frames(self.thread_id)
#don't print this error: frame not found: means that the client is not synchronized (but that's ok)
cmd = dbg.cmd_factory.make_error_message(self.sequence, "Frame not found: %s from thread: %s" % (self.frame_id, self.thread_id))
dbg.writer.add_command(cmd)
except:
cmd = dbg.cmd_factory.make_error_message(self.sequence, "Error resolving frame: %s from thread: %s" % (self.frame_id, self.thread_id))
dbg.writer.add_command(cmd)
#=======================================================================================================================
# InternalEvaluateExpression
#=======================================================================================================================
class InternalEvaluateExpression(InternalThreadCommand):
""" gets the value of a variable """
def __init__(self, seq, thread_id, frame_id, expression, doExec, doTrim):
self.sequence = seq
self.thread_id = thread_id
self.frame_id = frame_id
self.expression = expression
self.doExec = doExec
self.doTrim = doTrim
def do_it(self, dbg):
""" Converts request into python variable """
try:
result = pydevd_vars.evaluate_expression(self.thread_id, self.frame_id, self.expression, self.doExec)
xml = "<xml>"
xml += pydevd_vars.var_to_xml(result, self.expression, self.doTrim)
xml += "</xml>"
cmd = dbg.cmd_factory.make_evaluate_expression_message(self.sequence, xml)
dbg.writer.add_command(cmd)
except:
exc = get_exception_traceback_str()
sys.stderr.write('%s\n' % (exc,))
cmd = dbg.cmd_factory.make_error_message(self.sequence, "Error evaluating expression " + exc)
dbg.writer.add_command(cmd)
#=======================================================================================================================
# InternalGetCompletions
#=======================================================================================================================
class InternalGetCompletions(InternalThreadCommand):
""" Gets the completions in a given scope """
def __init__(self, seq, thread_id, frame_id, act_tok):
self.sequence = seq
self.thread_id = thread_id
self.frame_id = frame_id
self.act_tok = act_tok
def do_it(self, dbg):
""" Converts request into completions """
try:
remove_path = None
try:
frame = pydevd_vars.find_frame(self.thread_id, self.frame_id)
if frame is not None:
msg = _pydev_completer.generate_completions_as_xml(frame, self.act_tok)
cmd = dbg.cmd_factory.make_get_completions_message(self.sequence, msg)
dbg.writer.add_command(cmd)
else:
cmd = dbg.cmd_factory.make_error_message(self.sequence, "InternalGetCompletions: Frame not found: %s from thread: %s" % (self.frame_id, self.thread_id))
dbg.writer.add_command(cmd)
finally:
if remove_path is not None:
sys.path.remove(remove_path)
except:
exc = get_exception_traceback_str()
sys.stderr.write('%s\n' % (exc,))
cmd = dbg.cmd_factory.make_error_message(self.sequence, "Error evaluating expression " + exc)
dbg.writer.add_command(cmd)
#=======================================================================================================================
# InternalGetBreakpointException
#=======================================================================================================================
class InternalGetBreakpointException(InternalThreadCommand):
""" Send details of exception raised while evaluating conditional breakpoint """
def __init__(self, thread_id, exc_type, stacktrace):
self.sequence = 0
self.thread_id = thread_id
self.stacktrace = stacktrace
self.exc_type = exc_type
def do_it(self, dbg):
try:
callstack = "<xml>"
makeValid = pydevd_vars.make_valid_xml_value
for filename, line, methodname, methodobj in self.stacktrace:
if file_system_encoding.lower() != "utf-8" and hasattr(filename, "decode"):
# filename is a byte string encoded using the file system encoding
# convert it to utf8
filename = filename.decode(file_system_encoding).encode("utf-8")
callstack += '<frame thread_id = "%s" file="%s" line="%s" name="%s" obj="%s" />' \
% (self.thread_id, makeValid(filename), line, makeValid(methodname), makeValid(methodobj))
callstack += "</xml>"
cmd = dbg.cmd_factory.make_send_breakpoint_exception_message(self.sequence, self.exc_type + "\t" + callstack)
dbg.writer.add_command(cmd)
except:
exc = get_exception_traceback_str()
sys.stderr.write('%s\n' % (exc,))
cmd = dbg.cmd_factory.make_error_message(self.sequence, "Error Sending Exception: " + exc)
dbg.writer.add_command(cmd)
#=======================================================================================================================
# InternalSendCurrExceptionTrace
#=======================================================================================================================
class InternalSendCurrExceptionTrace(InternalThreadCommand):
""" Send details of the exception that was caught and where we've broken in.
"""
def __init__(self, thread_id, arg, curr_frame_id):
'''
:param arg: exception type, description, traceback object
'''
self.sequence = 0
self.thread_id = thread_id
self.curr_frame_id = curr_frame_id
self.arg = arg
def do_it(self, dbg):
try:
cmd = dbg.cmd_factory.make_send_curr_exception_trace_message(self.sequence, self.thread_id, self.curr_frame_id, *self.arg)
del self.arg
dbg.writer.add_command(cmd)
except:
exc = get_exception_traceback_str()
sys.stderr.write('%s\n' % (exc,))
cmd = dbg.cmd_factory.make_error_message(self.sequence, "Error Sending Current Exception Trace: " + exc)
dbg.writer.add_command(cmd)
#=======================================================================================================================
# InternalSendCurrExceptionTraceProceeded
#=======================================================================================================================
class InternalSendCurrExceptionTraceProceeded(InternalThreadCommand):
""" Send details of the exception that was caught and where we've broken in.
"""
def __init__(self, thread_id):
self.sequence = 0
self.thread_id = thread_id
def do_it(self, dbg):
try:
cmd = dbg.cmd_factory.make_send_curr_exception_trace_proceeded_message(self.sequence, self.thread_id)
dbg.writer.add_command(cmd)
except:
exc = get_exception_traceback_str()
sys.stderr.write('%s\n' % (exc,))
cmd = dbg.cmd_factory.make_error_message(self.sequence, "Error Sending Current Exception Trace Proceeded: " + exc)
dbg.writer.add_command(cmd)
#=======================================================================================================================
# InternalEvaluateConsoleExpression
#=======================================================================================================================
class InternalEvaluateConsoleExpression(InternalThreadCommand):
""" Execute the given command in the debug console """
def __init__(self, seq, thread_id, frame_id, line, buffer_output=True):
self.sequence = seq
self.thread_id = thread_id
self.frame_id = frame_id
self.line = line
self.buffer_output = buffer_output
def do_it(self, dbg):
""" Create an XML for console output, error and more (true/false)
<xml>
<output message=output_message></output>
<error message=error_message></error>
<more>true/false</more>
</xml>
"""
try:
frame = pydevd_vars.find_frame(self.thread_id, self.frame_id)
if frame is not None:
console_message = pydevd_console.execute_console_command(
frame, self.thread_id, self.frame_id, self.line, self.buffer_output)
cmd = dbg.cmd_factory.make_send_console_message(self.sequence, console_message.to_xml())
else:
from _pydevd_bundle.pydevd_console import ConsoleMessage
console_message = ConsoleMessage()
console_message.add_console_message(
pydevd_console.CONSOLE_ERROR,
"Select the valid frame in the debug view (thread: %s, frame: %s invalid)" % (self.thread_id, self.frame_id),
)
cmd = dbg.cmd_factory.make_error_message(self.sequence, console_message.to_xml())
except:
exc = get_exception_traceback_str()
cmd = dbg.cmd_factory.make_error_message(self.sequence, "Error evaluating expression " + exc)
dbg.writer.add_command(cmd)
#=======================================================================================================================
# InternalRunCustomOperation
#=======================================================================================================================
class InternalRunCustomOperation(InternalThreadCommand):
""" Run a custom command on an expression
"""
def __init__(self, seq, thread_id, frame_id, scope, attrs, style, encoded_code_or_file, fnname):
self.sequence = seq
self.thread_id = thread_id
self.frame_id = frame_id
self.scope = scope
self.attrs = attrs
self.style = style
self.code_or_file = unquote_plus(encoded_code_or_file)
self.fnname = fnname
def do_it(self, dbg):
try:
res = pydevd_vars.custom_operation(self.thread_id, self.frame_id, self.scope, self.attrs,
self.style, self.code_or_file, self.fnname)
resEncoded = quote_plus(res)
cmd = dbg.cmd_factory.make_custom_operation_message(self.sequence, resEncoded)
dbg.writer.add_command(cmd)
except:
exc = get_exception_traceback_str()
cmd = dbg.cmd_factory.make_error_message(self.sequence, "Error in running custom operation" + exc)
dbg.writer.add_command(cmd)
#=======================================================================================================================
# InternalConsoleGetCompletions
#=======================================================================================================================
class InternalConsoleGetCompletions(InternalThreadCommand):
""" Fetch the completions in the debug console
"""
def __init__(self, seq, thread_id, frame_id, act_tok):
self.sequence = seq
self.thread_id = thread_id
self.frame_id = frame_id
self.act_tok = act_tok
def do_it(self, dbg):
""" Get completions and write back to the client
"""
try:
frame = pydevd_vars.find_frame(self.thread_id, self.frame_id)
completions_xml = pydevd_console.get_completions(frame, self.act_tok)
cmd = dbg.cmd_factory.make_send_console_message(self.sequence, completions_xml)
dbg.writer.add_command(cmd)
except:
exc = get_exception_traceback_str()
cmd = dbg.cmd_factory.make_error_message(self.sequence, "Error in fetching completions" + exc)
dbg.writer.add_command(cmd)
#=======================================================================================================================
# InternalConsoleExec
#=======================================================================================================================
class InternalConsoleExec(InternalThreadCommand):
""" gets the value of a variable """
def __init__(self, seq, thread_id, frame_id, expression):
self.sequence = seq
self.thread_id = thread_id
self.frame_id = frame_id
self.expression = expression
def do_it(self, dbg):
""" Converts request into python variable """
try:
try:
#don't trace new threads created by console command
disable_trace_thread_modules()
result = pydevconsole.console_exec(self.thread_id, self.frame_id, self.expression)
xml = "<xml>"
xml += pydevd_vars.var_to_xml(result, "")
xml += "</xml>"
cmd = dbg.cmd_factory.make_evaluate_expression_message(self.sequence, xml)
dbg.writer.add_command(cmd)
except:
exc = get_exception_traceback_str()
sys.stderr.write('%s\n' % (exc,))
cmd = dbg.cmd_factory.make_error_message(self.sequence, "Error evaluating console expression " + exc)
dbg.writer.add_command(cmd)
finally:
enable_trace_thread_modules()
sys.stderr.flush()
sys.stdout.flush()
#=======================================================================================================================
# pydevd_find_thread_by_id
#=======================================================================================================================
def pydevd_find_thread_by_id(thread_id):
try:
# there was a deadlock here when I did not remove the tracing function when thread was dead
threads = threading.enumerate()
for i in threads:
tid = get_thread_id(i)
if thread_id == tid or thread_id.endswith('|' + tid):
return i
sys.stderr.write("Could not find thread %s\n" % thread_id)
sys.stderr.write("Available: %s\n" % [get_thread_id(t) for t in threads])
sys.stderr.flush()
except:
traceback.print_exc()
return None
| apache-2.0 |
SerialShadow/SickRage | lib/imdb/Movie.py | 126 | 17281 | """
Movie module (imdb package).
This module provides the Movie class, used to store information about
a given movie.
Copyright 2004-2010 Davide Alberani <da@erlug.linux.it>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
"""
from copy import deepcopy
from imdb import linguistics
from imdb.utils import analyze_title, build_title, canonicalTitle, \
flatten, _Container, cmpMovies
class Movie(_Container):
"""A Movie.
Every information about a movie can be accessed as:
movieObject['information']
to get a list of the kind of information stored in a
Movie object, use the keys() method; some useful aliases
are defined (as "casting" for the "casting director" key); see
the keys_alias dictionary.
"""
# The default sets of information retrieved.
default_info = ('main', 'plot')
# Aliases for some not-so-intuitive keys.
keys_alias = {
'tv schedule': 'airing',
'user rating': 'rating',
'plot summary': 'plot',
'plot summaries': 'plot',
'directed by': 'director',
'created by': 'creator',
'writing credits': 'writer',
'produced by': 'producer',
'original music by': 'original music',
'non-original music by': 'non-original music',
'music': 'original music',
'cinematography by': 'cinematographer',
'cinematography': 'cinematographer',
'film editing by': 'editor',
'film editing': 'editor',
'editing': 'editor',
'actors': 'cast',
'actresses': 'cast',
'casting by': 'casting director',
'casting': 'casting director',
'art direction by': 'art direction',
'set decoration by': 'set decoration',
'costume design by': 'costume designer',
'costume design': 'costume designer',
'makeup department': 'make up',
'makeup': 'make up',
'make-up': 'make up',
'production management': 'production manager',
'production company': 'production companies',
'second unit director or assistant director':
'assistant director',
'second unit director': 'assistant director',
'sound department': 'sound crew',
'costume and wardrobe department': 'costume department',
'special effects by': 'special effects',
'visual effects by': 'visual effects',
'special effects company': 'special effects companies',
'stunts': 'stunt performer',
'other crew': 'miscellaneous crew',
'misc crew': 'miscellaneous crew',
'miscellaneouscrew': 'miscellaneous crew',
'crewmembers': 'miscellaneous crew',
'crew members': 'miscellaneous crew',
'other companies': 'miscellaneous companies',
'misc companies': 'miscellaneous companies',
'miscellaneous company': 'miscellaneous companies',
'misc company': 'miscellaneous companies',
'other company': 'miscellaneous companies',
'aka': 'akas',
'also known as': 'akas',
'country': 'countries',
'production country': 'countries',
'production countries': 'countries',
'genre': 'genres',
'runtime': 'runtimes',
'lang': 'languages',
'color': 'color info',
'cover': 'cover url',
'full-size cover': 'full-size cover url',
'seasons': 'number of seasons',
'language': 'languages',
'certificate': 'certificates',
'certifications': 'certificates',
'certification': 'certificates',
'miscellaneous links': 'misc links',
'miscellaneous': 'misc links',
'soundclips': 'sound clips',
'videoclips': 'video clips',
'photographs': 'photo sites',
'distributor': 'distributors',
'distribution': 'distributors',
'distribution companies': 'distributors',
'distribution company': 'distributors',
'guest': 'guests',
'guest appearances': 'guests',
'tv guests': 'guests',
'notable tv guest appearances': 'guests',
'episodes cast': 'guests',
'episodes number': 'number of episodes',
'amazon review': 'amazon reviews',
'merchandising': 'merchandising links',
'merchandise': 'merchandising links',
'sales': 'merchandising links',
'faq': 'faqs',
'parental guide': 'parents guide',
'frequently asked questions': 'faqs'}
keys_tomodify_list = ('plot', 'trivia', 'alternate versions', 'goofs',
'quotes', 'dvd', 'laserdisc', 'news', 'soundtrack',
'crazy credits', 'business', 'supplements',
'video review', 'faqs')
cmpFunct = cmpMovies
def _init(self, **kwds):
"""Initialize a Movie object.
*movieID* -- the unique identifier for the movie.
*title* -- the title of the Movie, if not in the data dictionary.
*myTitle* -- your personal title for the movie.
*myID* -- your personal identifier for the movie.
*data* -- a dictionary used to initialize the object.
*currentRole* -- a Character instance representing the current role
or duty of a person in this movie, or a Person
object representing the actor/actress who played
a given character in a Movie. If a string is
passed, an object is automatically build.
*roleID* -- if available, the characterID/personID of the currentRole
object.
*roleIsPerson* -- when False (default) the currentRole is assumed
to be a Character object, otherwise a Person.
*notes* -- notes for the person referred in the currentRole
attribute; e.g.: '(voice)'.
*accessSystem* -- a string representing the data access system used.
*titlesRefs* -- a dictionary with references to movies.
*namesRefs* -- a dictionary with references to persons.
*charactersRefs* -- a dictionary with references to characters.
*modFunct* -- function called returning text fields.
"""
title = kwds.get('title')
if title and not self.data.has_key('title'):
self.set_title(title)
self.movieID = kwds.get('movieID', None)
self.myTitle = kwds.get('myTitle', u'')
def _reset(self):
"""Reset the Movie object."""
self.movieID = None
self.myTitle = u''
def set_title(self, title):
"""Set the title of the movie."""
# XXX: convert title to unicode, if it's a plain string?
d_title = analyze_title(title)
self.data.update(d_title)
def _additional_keys(self):
"""Valid keys to append to the data.keys() list."""
addkeys = []
if self.data.has_key('title'):
addkeys += ['canonical title', 'long imdb title',
'long imdb canonical title',
'smart canonical title',
'smart long imdb canonical title']
if self.data.has_key('episode of'):
addkeys += ['long imdb episode title', 'series title',
'canonical series title', 'episode title',
'canonical episode title',
'smart canonical series title',
'smart canonical episode title']
if self.data.has_key('cover url'):
addkeys += ['full-size cover url']
return addkeys
def guessLanguage(self):
"""Guess the language of the title of this movie; returns None
if there are no hints."""
lang = self.get('languages')
if lang:
lang = lang[0]
else:
country = self.get('countries')
if country:
lang = linguistics.COUNTRY_LANG.get(country[0])
return lang
def smartCanonicalTitle(self, title=None, lang=None):
"""Return the canonical title, guessing its language.
The title can be forces with the 'title' argument (internally
used) and the language can be forced with the 'lang' argument,
otherwise it's auto-detected."""
if title is None:
title = self.data.get('title', u'')
if lang is None:
lang = self.guessLanguage()
return canonicalTitle(title, lang=lang)
def _getitem(self, key):
"""Handle special keys."""
if self.data.has_key('episode of'):
if key == 'long imdb episode title':
return build_title(self.data)
elif key == 'series title':
return self.data['episode of']['title']
elif key == 'canonical series title':
ser_title = self.data['episode of']['title']
return canonicalTitle(ser_title)
elif key == 'smart canonical series title':
ser_title = self.data['episode of']['title']
return self.smartCanonicalTitle(ser_title)
elif key == 'episode title':
return self.data.get('title', u'')
elif key == 'canonical episode title':
return canonicalTitle(self.data.get('title', u''))
elif key == 'smart canonical episode title':
return self.smartCanonicalTitle(self.data.get('title', u''))
if self.data.has_key('title'):
if key == 'title':
return self.data['title']
elif key == 'long imdb title':
return build_title(self.data)
elif key == 'canonical title':
return canonicalTitle(self.data['title'])
elif key == 'smart canonical title':
return self.smartCanonicalTitle(self.data['title'])
elif key == 'long imdb canonical title':
return build_title(self.data, canonical=1)
elif key == 'smart long imdb canonical title':
return build_title(self.data, canonical=1,
lang=self.guessLanguage())
if key == 'full-size cover url' and self.data.has_key('cover url'):
return self._re_fullsizeURL.sub('', self.data.get('cover url', ''))
return None
def getID(self):
"""Return the movieID."""
return self.movieID
def __nonzero__(self):
"""The Movie is "false" if the self.data does not contain a title."""
# XXX: check the title and the movieID?
if self.data.has_key('title'): return 1
return 0
def isSameTitle(self, other):
"""Return true if this and the compared object have the same
long imdb title and/or movieID.
"""
# XXX: obsolete?
if not isinstance(other, self.__class__): return 0
if self.data.has_key('title') and \
other.data.has_key('title') and \
build_title(self.data, canonical=0) == \
build_title(other.data, canonical=0):
return 1
if self.accessSystem == other.accessSystem and \
self.movieID is not None and self.movieID == other.movieID:
return 1
return 0
isSameMovie = isSameTitle # XXX: just for backward compatiblity.
def __contains__(self, item):
"""Return true if the given Person object is listed in this Movie,
or if the the given Character is represented in this Movie."""
from Person import Person
from Character import Character
from Company import Company
if isinstance(item, Person):
for p in flatten(self.data, yieldDictKeys=1, scalar=Person,
toDescend=(list, dict, tuple, Movie)):
if item.isSame(p):
return 1
elif isinstance(item, Character):
for p in flatten(self.data, yieldDictKeys=1, scalar=Person,
toDescend=(list, dict, tuple, Movie)):
if item.isSame(p.currentRole):
return 1
elif isinstance(item, Company):
for c in flatten(self.data, yieldDictKeys=1, scalar=Company,
toDescend=(list, dict, tuple, Movie)):
if item.isSame(c):
return 1
return 0
def __deepcopy__(self, memo):
"""Return a deep copy of a Movie instance."""
m = Movie(title=u'', movieID=self.movieID, myTitle=self.myTitle,
myID=self.myID, data=deepcopy(self.data, memo),
currentRole=deepcopy(self.currentRole, memo),
roleIsPerson=self._roleIsPerson,
notes=self.notes, accessSystem=self.accessSystem,
titlesRefs=deepcopy(self.titlesRefs, memo),
namesRefs=deepcopy(self.namesRefs, memo),
charactersRefs=deepcopy(self.charactersRefs, memo))
m.current_info = list(self.current_info)
m.set_mod_funct(self.modFunct)
return m
def __repr__(self):
"""String representation of a Movie object."""
# XXX: add also currentRole and notes, if present?
if self.has_key('long imdb episode title'):
title = self.get('long imdb episode title')
else:
title = self.get('long imdb title')
r = '<Movie id:%s[%s] title:_%s_>' % (self.movieID, self.accessSystem,
title)
if isinstance(r, unicode): r = r.encode('utf_8', 'replace')
return r
def __str__(self):
"""Simply print the short title."""
return self.get('title', u'').encode('utf_8', 'replace')
def __unicode__(self):
"""Simply print the short title."""
return self.get('title', u'')
def summary(self):
"""Return a string with a pretty-printed summary for the movie."""
if not self: return u''
def _nameAndRole(personList, joiner=u', '):
"""Build a pretty string with name and role."""
nl = []
for person in personList:
n = person.get('name', u'')
if person.currentRole: n += u' (%s)' % person.currentRole
nl.append(n)
return joiner.join(nl)
s = u'Movie\n=====\nTitle: %s\n' % \
self.get('long imdb canonical title', u'')
genres = self.get('genres')
if genres: s += u'Genres: %s.\n' % u', '.join(genres)
director = self.get('director')
if director:
s += u'Director: %s.\n' % _nameAndRole(director)
writer = self.get('writer')
if writer:
s += u'Writer: %s.\n' % _nameAndRole(writer)
cast = self.get('cast')
if cast:
cast = cast[:5]
s += u'Cast: %s.\n' % _nameAndRole(cast)
runtime = self.get('runtimes')
if runtime:
s += u'Runtime: %s.\n' % u', '.join(runtime)
countries = self.get('countries')
if countries:
s += u'Country: %s.\n' % u', '.join(countries)
lang = self.get('languages')
if lang:
s += u'Language: %s.\n' % u', '.join(lang)
rating = self.get('rating')
if rating:
s += u'Rating: %s' % rating
nr_votes = self.get('votes')
if nr_votes:
s += u' (%s votes)' % nr_votes
s += u'.\n'
plot = self.get('plot')
if not plot:
plot = self.get('plot summary')
if plot:
plot = [plot]
if plot:
plot = plot[0]
i = plot.find('::')
if i != -1:
plot = plot[:i]
s += u'Plot: %s' % plot
return s
| gpl-3.0 |
redhat-cip/horizon | horizon/utils/functions.py | 19 | 4828 | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import decimal
import math
import re
from oslo_utils import units
import six
from django.conf import settings
from django.contrib.auth import logout # noqa
from django import http
from django.utils.encoding import force_text
from django.utils.functional import lazy # noqa
from django.utils import translation
def _lazy_join(separator, strings):
return separator.join([force_text(s)
for s in strings])
lazy_join = lazy(_lazy_join, six.text_type)
def bytes_to_gigabytes(bytes):
# Converts the number of bytes to the next highest number of Gigabytes
# For example 5000000 (5 Meg) would return '1'
return int(math.ceil(float(bytes) / units.Gi))
def add_logout_reason(request, response, reason):
# Store the translated string in the cookie
lang = translation.get_language_from_request(request)
with translation.override(lang):
reason = six.text_type(reason)
if six.PY2:
reason = reason.encode('utf-8')
response.set_cookie('logout_reason', reason, max_age=10)
def logout_with_message(request, msg, redirect=True):
"""Send HttpResponseRedirect to LOGOUT_URL.
`msg` is a message displayed on the login page after the logout, to explain
the logout reason.
"""
logout(request)
if redirect:
response = http.HttpResponseRedirect(
'%s?next=%s' % (settings.LOGOUT_URL, request.path))
else:
response = http.HttpResponseRedirect(settings.LOGOUT_URL)
add_logout_reason(request, response, msg)
return response
def get_page_size(request, default=20):
session = request.session
cookies = request.COOKIES
try:
page_size = int(session.get('horizon_pagesize',
cookies.get('horizon_pagesize',
getattr(settings,
'API_RESULT_PAGE_SIZE',
default))))
except ValueError:
page_size = session['horizon_pagesize'] = int(default)
return page_size
def get_log_length(request, default=35):
session = request.session
cookies = request.COOKIES
try:
log_length = int(session.get(
'instance_log_length',
cookies.get('instance_log_length',
getattr(settings,
'INSTANCE_LOG_LENGTH',
default))))
except ValueError:
log_length = session['instance_log_length'] = int(default)
return log_length
def natural_sort(attr):
return lambda x: [int(s) if s.isdigit() else s for s in
re.split(r'(\d+)', getattr(x, attr, x))]
def get_keys(tuple_of_tuples):
"""Processes a tuple of 2-element tuples and returns a tuple containing
first component of each tuple.
"""
return tuple([t[0] for t in tuple_of_tuples])
def value_for_key(tuple_of_tuples, key):
"""Processes a tuple of 2-element tuples and returns the value
corresponding to the given key. If not value is found, the key is returned.
"""
for t in tuple_of_tuples:
if t[0] == key:
return t[1]
else:
return key
def next_key(tuple_of_tuples, key):
"""Processes a tuple of 2-element tuples and returns the key which comes
after the given key.
"""
for i, t in enumerate(tuple_of_tuples):
if t[0] == key:
try:
return tuple_of_tuples[i + 1][0]
except IndexError:
return None
def previous_key(tuple_of_tuples, key):
"""Processes a tuple of 2-element tuples and returns the key which comes
before the given key.
"""
for i, t in enumerate(tuple_of_tuples):
if t[0] == key:
try:
return tuple_of_tuples[i - 1][0]
except IndexError:
return None
def format_value(value):
"""Returns the given value rounded to one decimal place if it is a
decimal, or integer if it is an integer.
"""
value = decimal.Decimal(str(value))
if int(value) == value:
return int(value)
# On Python 3, an explicit cast to float is required
return float(round(value, 1))
| apache-2.0 |
ajaxsys/dict-admin | docutils/io.py | 2 | 14324 | # $Id: io.py 7073 2011-07-07 06:49:19Z milde $
# Author: David Goodger <goodger@python.org>
# Copyright: This module has been placed in the public domain.
"""
I/O classes provide a uniform API for low-level input and output. Subclasses
will exist for a variety of input/output mechanisms.
"""
__docformat__ = 'reStructuredText'
import sys
import re
import codecs
from docutils import TransformSpec
from docutils._compat import b
from docutils.error_reporting import locale_encoding, ErrorString, ErrorOutput
class Input(TransformSpec):
"""
Abstract base class for input wrappers.
"""
component_type = 'input'
default_source_path = None
def __init__(self, source=None, source_path=None, encoding=None,
error_handler='strict'):
self.encoding = encoding
"""Text encoding for the input source."""
self.error_handler = error_handler
"""Text decoding error handler."""
self.source = source
"""The source of input data."""
self.source_path = source_path
"""A text reference to the source."""
if not source_path:
self.source_path = self.default_source_path
self.successful_encoding = None
"""The encoding that successfully decoded the source data."""
def __repr__(self):
return '%s: source=%r, source_path=%r' % (self.__class__, self.source,
self.source_path)
def read(self):
raise NotImplementedError
def decode(self, data):
"""
Decode a string, `data`, heuristically.
Raise UnicodeError if unsuccessful.
The client application should call ``locale.setlocale`` at the
beginning of processing::
locale.setlocale(locale.LC_ALL, '')
"""
if self.encoding and self.encoding.lower() == 'unicode':
assert isinstance(data, unicode), (
'input encoding is "unicode" '
'but input is not a unicode object')
if isinstance(data, unicode):
# Accept unicode even if self.encoding != 'unicode'.
return data
if self.encoding:
# We believe the user/application when the encoding is
# explicitly given.
encodings = [self.encoding]
else:
data_encoding = self.determine_encoding_from_data(data)
if data_encoding:
# If the data declares its encoding (explicitly or via a BOM),
# we believe it.
encodings = [data_encoding]
else:
# Apply heuristics only if no encoding is explicitly given and
# no BOM found. Start with UTF-8, because that only matches
# data that *IS* UTF-8:
encodings = [enc for enc in ('utf-8',
locale_encoding, # can be None
'latin-1') # fallback encoding
if enc]
for enc in encodings:
try:
decoded = unicode(data, enc, self.error_handler)
self.successful_encoding = enc
# Return decoded, removing BOMs.
return decoded.replace(u'\ufeff', u'')
except (UnicodeError, LookupError), err:
error = err # in Python 3, the <exception instance> is
# local to the except clause
raise UnicodeError(
'Unable to decode input data. Tried the following encodings: '
'%s.\n(%s)' % (', '.join([repr(enc) for enc in encodings]),
ErrorString(error)))
coding_slug = re.compile(b("coding[:=]\s*([-\w.]+)"))
"""Encoding declaration pattern."""
byte_order_marks = ((codecs.BOM_UTF8, 'utf-8'), # actually 'utf-8-sig'
(codecs.BOM_UTF16_BE, 'utf-16-be'),
(codecs.BOM_UTF16_LE, 'utf-16-le'),)
"""Sequence of (start_bytes, encoding) tuples for encoding detection.
The first bytes of input data are checked against the start_bytes strings.
A match indicates the given encoding."""
def determine_encoding_from_data(self, data):
"""
Try to determine the encoding of `data` by looking *in* `data`.
Check for a byte order mark (BOM) or an encoding declaration.
"""
# check for a byte order mark:
for start_bytes, encoding in self.byte_order_marks:
if data.startswith(start_bytes):
return encoding
# check for an encoding declaration pattern in first 2 lines of file:
for line in data.splitlines()[:2]:
match = self.coding_slug.search(line)
if match:
return match.group(1).decode('ascii')
return None
class Output(TransformSpec):
"""
Abstract base class for output wrappers.
"""
component_type = 'output'
default_destination_path = None
def __init__(self, destination=None, destination_path=None,
encoding=None, error_handler='strict'):
self.encoding = encoding
"""Text encoding for the output destination."""
self.error_handler = error_handler or 'strict'
"""Text encoding error handler."""
self.destination = destination
"""The destination for output data."""
self.destination_path = destination_path
"""A text reference to the destination."""
if not destination_path:
self.destination_path = self.default_destination_path
def __repr__(self):
return ('%s: destination=%r, destination_path=%r'
% (self.__class__, self.destination, self.destination_path))
def write(self, data):
"""`data` is a Unicode string, to be encoded by `self.encode`."""
raise NotImplementedError
def encode(self, data):
if self.encoding and self.encoding.lower() == 'unicode':
assert isinstance(data, unicode), (
'the encoding given is "unicode" but the output is not '
'a Unicode string')
return data
if not isinstance(data, unicode):
# Non-unicode (e.g. binary) output.
return data
else:
return data.encode(self.encoding, self.error_handler)
class FileInput(Input):
"""
Input for single, simple file-like objects.
"""
def __init__(self, source=None, source_path=None,
encoding=None, error_handler='strict',
autoclose=True, handle_io_errors=True, mode='rU'):
"""
:Parameters:
- `source`: either a file-like object (which is read directly), or
`None` (which implies `sys.stdin` if no `source_path` given).
- `source_path`: a path to a file, which is opened and then read.
- `encoding`: the expected text encoding of the input file.
- `error_handler`: the encoding error handler to use.
- `autoclose`: close automatically after read (except when
`sys.stdin` is the source).
- `handle_io_errors`: summarize I/O errors here, and exit?
- `mode`: how the file is to be opened (see standard function
`open`). The default 'rU' provides universal newline support
for text files.
"""
Input.__init__(self, source, source_path, encoding, error_handler)
self.autoclose = autoclose
self.handle_io_errors = handle_io_errors
self._stderr = ErrorOutput()
if source is None:
if source_path:
# Specify encoding in Python 3
if sys.version_info >= (3,0):
kwargs = {'encoding': self.encoding,
'errors': self.error_handler}
else:
kwargs = {}
try:
self.source = open(source_path, mode, **kwargs)
except IOError, error:
if not handle_io_errors:
raise
print >>self._stderr, ErrorString(error)
print >>self._stderr, (u'Unable to open source'
u" file for reading ('%s'). Exiting." % source_path)
sys.exit(1)
else:
self.source = sys.stdin
if not source_path:
try:
self.source_path = self.source.name
except AttributeError:
pass
def read(self):
"""
Read and decode a single file and return the data (Unicode string).
"""
try:
data = self.source.read()
finally:
if self.autoclose:
self.close()
return self.decode(data)
def readlines(self):
"""
Return lines of a single file as list of Unicode strings.
"""
try:
lines = self.source.readlines()
finally:
if self.autoclose:
self.close()
return [self.decode(line) for line in lines]
def close(self):
if self.source is not sys.stdin:
self.source.close()
class FileOutput(Output):
"""
Output for single, simple file-like objects.
"""
def __init__(self, destination=None, destination_path=None,
encoding=None, error_handler='strict', autoclose=True,
handle_io_errors=True):
"""
:Parameters:
- `destination`: either a file-like object (which is written
directly) or `None` (which implies `sys.stdout` if no
`destination_path` given).
- `destination_path`: a path to a file, which is opened and then
written.
- `autoclose`: close automatically after write (except when
`sys.stdout` or `sys.stderr` is the destination).
"""
Output.__init__(self, destination, destination_path,
encoding, error_handler)
self.opened = True
self.autoclose = autoclose
self.handle_io_errors = handle_io_errors
self._stderr = ErrorOutput()
if destination is None:
if destination_path:
self.opened = False
else:
self.destination = sys.stdout
if not destination_path:
try:
self.destination_path = self.destination.name
except AttributeError:
pass
def open(self):
# Specify encoding in Python 3.
# (Do not use binary mode ('wb') as this prevents the
# conversion of newlines to the system specific default.)
if sys.version_info >= (3,0):
kwargs = {'encoding': self.encoding,
'errors': self.error_handler}
else:
kwargs = {}
try:
self.destination = open(self.destination_path, 'w', **kwargs)
except IOError, error:
if not self.handle_io_errors:
raise
print >>self._stderr, ErrorString(error)
print >>self._stderr, (u'Unable to open destination file'
u" for writing ('%s'). Exiting." % self.destination_path)
sys.exit(1)
self.opened = True
def write(self, data):
"""Encode `data`, write it to a single file, and return it.
In Python 3, a (unicode) string is returned.
"""
if sys.version_info >= (3,0):
output = data # in py3k, write expects a (Unicode) string
else:
output = self.encode(data)
if not self.opened:
self.open()
try:
self.destination.write(output)
finally:
if self.autoclose:
self.close()
return output
def close(self):
if self.destination not in (sys.stdout, sys.stderr):
self.destination.close()
self.opened = False
class BinaryFileOutput(FileOutput):
"""
A version of docutils.io.FileOutput which writes to a binary file.
"""
def open(self):
try:
self.destination = open(self.destination_path, 'wb')
except IOError, error:
if not self.handle_io_errors:
raise
print >>self._stderr, ErrorString(error)
print >>self._stderr, (u'Unable to open destination file'
u" for writing ('%s'). Exiting." % self.destination_path)
sys.exit(1)
self.opened = True
class StringInput(Input):
"""
Direct string input.
"""
default_source_path = '<string>'
def read(self):
"""Decode and return the source string."""
return self.decode(self.source)
class StringOutput(Output):
"""
Direct string output.
"""
default_destination_path = '<string>'
def write(self, data):
"""Encode `data`, store it in `self.destination`, and return it."""
self.destination = self.encode(data)
return self.destination
class NullInput(Input):
"""
Degenerate input: read nothing.
"""
default_source_path = 'null input'
def read(self):
"""Return a null string."""
return u''
class NullOutput(Output):
"""
Degenerate output: write nothing.
"""
default_destination_path = 'null output'
def write(self, data):
"""Do nothing ([don't even] send data to the bit bucket)."""
pass
class DocTreeInput(Input):
"""
Adapter for document tree input.
The document tree must be passed in the ``source`` parameter.
"""
default_source_path = 'doctree input'
def read(self):
"""Return the document tree."""
return self.source
| bsd-3-clause |
timedcy/python-goose | goose/utils/__init__.py | 10 | 4267 | # -*- coding: utf-8 -*-
"""\
This is a python port of "Goose" orignialy licensed to Gravity.com
under one or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership.
Python port was written by Xavier Grangier for Recrutae
Gravity.com licenses this file
to you under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import time
import hashlib
import re
import os
import goose
import codecs
import urlparse
class BuildURL(object):
def __init__(self, url, finalurl=None):
self.url = url
self.finalurl = finalurl
def getHostname(self, o):
if o.hostname:
return o.hotname
elif self.finalurl:
oo = urlparse(self.finalurl)
if oo.hostname:
return oo.hostname
return None
def getScheme(self, o):
if o.scheme:
return o.scheme
elif self.finalurl:
oo = urlparse(self.finalurl)
if oo.scheme:
return oo.scheme
return 'http'
def getUrl(self):
"""\
"""
url_obj = urlparse(self.url)
scheme = self.getScheme(url_obj)
hostname = self.getHostname(url_obj)
class FileHelper(object):
@classmethod
def loadResourceFile(self, filename):
if not os.path.isabs('filename'):
dirpath = os.path.dirname(goose.__file__)
path = os.path.join(dirpath, 'resources', filename)
else:
path = filename
try:
f = codecs.open(path, 'r', 'utf-8')
content = f.read()
f.close()
return content
except IOError:
raise IOError("Couldn't open file %s" % path)
class ParsingCandidate(object):
def __init__(self, urlString, link_hash):
self.urlString = self.url = urlString
self.link_hash = link_hash
class RawHelper(object):
@classmethod
def get_parsing_candidate(self, url, raw_html):
if isinstance(raw_html, unicode):
raw_html = raw_html.encode('utf-8')
link_hash = '%s.%s' % (hashlib.md5(raw_html).hexdigest(), time.time())
return ParsingCandidate(url, link_hash)
class URLHelper(object):
@classmethod
def get_parsing_candidate(self, url_to_crawl):
# replace shebang is urls
final_url = url_to_crawl.replace('#!', '?_escaped_fragment_=') \
if '#!' in url_to_crawl else url_to_crawl
link_hash = '%s.%s' % (hashlib.md5(final_url).hexdigest(), time.time())
return ParsingCandidate(final_url, link_hash)
class StringSplitter(object):
"""\
"""
def __init__(self, pattern):
self.pattern = re.compile(pattern)
def split(self, string):
if not string:
return []
return self.pattern.split(string)
class StringReplacement(object):
def __init__(self, pattern, replaceWith):
self.pattern = pattern
self.replaceWith = replaceWith
def replaceAll(self, string):
if not string:
return u''
return string.replace(self.pattern, self.replaceWith)
class ReplaceSequence(object):
def __init__(self):
self.replacements = []
#@classmethod
def create(self, firstPattern, replaceWith=None):
result = StringReplacement(firstPattern, replaceWith or u'')
self.replacements.append(result)
return self
def append(self, pattern, replaceWith=None):
return self.create(pattern, replaceWith)
def replaceAll(self, string):
if not string:
return u''
mutatedString = string
for rp in self.replacements:
mutatedString = rp.replaceAll(mutatedString)
return mutatedString
| apache-2.0 |
glenn-edgar/local_controller_3 | __backup__/flask_web/werkzeug-master/werkzeug/testsuite/debug.py | 74 | 7476 | # -*- coding: utf-8 -*-
"""
werkzeug.testsuite.debug
~~~~~~~~~~~~~~~~~~~~~~~~
Tests some debug utilities.
:copyright: (c) 2011 by Armin Ronacher.
:license: BSD, see LICENSE for more details.
"""
import unittest
import sys
import re
from werkzeug.testsuite import WerkzeugTestCase
from werkzeug.debug.repr import debug_repr, DebugReprGenerator, \
dump, helper
from werkzeug.debug.console import HTMLStringO
class DebugReprTestCase(WerkzeugTestCase):
def test_basic_repr(self):
assert debug_repr([]) == u'[]'
assert debug_repr([1, 2]) == \
u'[<span class="number">1</span>, <span class="number">2</span>]'
assert debug_repr([1, 'test']) == \
u'[<span class="number">1</span>, <span class="string">\'test\'</span>]'
assert debug_repr([None]) == \
u'[<span class="object">None</span>]'
def test_sequence_repr(self):
assert debug_repr(list(range(20))) == (
u'[<span class="number">0</span>, <span class="number">1</span>, '
u'<span class="number">2</span>, <span class="number">3</span>, '
u'<span class="number">4</span>, <span class="number">5</span>, '
u'<span class="number">6</span>, <span class="number">7</span>, '
u'<span class="extended"><span class="number">8</span>, '
u'<span class="number">9</span>, <span class="number">10</span>, '
u'<span class="number">11</span>, <span class="number">12</span>, '
u'<span class="number">13</span>, <span class="number">14</span>, '
u'<span class="number">15</span>, <span class="number">16</span>, '
u'<span class="number">17</span>, <span class="number">18</span>, '
u'<span class="number">19</span></span>]'
)
def test_mapping_repr(self):
assert debug_repr({}) == u'{}'
assert debug_repr({'foo': 42}) == \
u'{<span class="pair"><span class="key"><span class="string">\'foo\''\
u'</span></span>: <span class="value"><span class="number">42' \
u'</span></span></span>}'
assert debug_repr(dict(zip(range(10), [None] * 10))) == \
u'{<span class="pair"><span class="key"><span class="number">0</span></span>: <span class="value"><span class="object">None</span></span></span>, <span class="pair"><span class="key"><span class="number">1</span></span>: <span class="value"><span class="object">None</span></span></span>, <span class="pair"><span class="key"><span class="number">2</span></span>: <span class="value"><span class="object">None</span></span></span>, <span class="pair"><span class="key"><span class="number">3</span></span>: <span class="value"><span class="object">None</span></span></span>, <span class="extended"><span class="pair"><span class="key"><span class="number">4</span></span>: <span class="value"><span class="object">None</span></span></span>, <span class="pair"><span class="key"><span class="number">5</span></span>: <span class="value"><span class="object">None</span></span></span>, <span class="pair"><span class="key"><span class="number">6</span></span>: <span class="value"><span class="object">None</span></span></span>, <span class="pair"><span class="key"><span class="number">7</span></span>: <span class="value"><span class="object">None</span></span></span>, <span class="pair"><span class="key"><span class="number">8</span></span>: <span class="value"><span class="object">None</span></span></span>, <span class="pair"><span class="key"><span class="number">9</span></span>: <span class="value"><span class="object">None</span></span></span></span>}'
assert debug_repr((1, 'zwei', u'drei')) ==\
u'(<span class="number">1</span>, <span class="string">\'' \
u'zwei\'</span>, <span class="string">u\'drei\'</span>)'
def test_custom_repr(self):
class Foo(object):
def __repr__(self):
return '<Foo 42>'
assert debug_repr(Foo()) == '<span class="object"><Foo 42></span>'
def test_list_subclass_repr(self):
class MyList(list):
pass
assert debug_repr(MyList([1, 2])) == \
u'<span class="module">werkzeug.testsuite.debug.</span>MyList([' \
u'<span class="number">1</span>, <span class="number">2</span>])'
def test_regex_repr(self):
assert debug_repr(re.compile(r'foo\d')) == \
u're.compile(<span class="string regex">r\'foo\\d\'</span>)'
assert debug_repr(re.compile(ur'foo\d')) == \
u're.compile(<span class="string regex">ur\'foo\\d\'</span>)'
def test_set_repr(self):
assert debug_repr(frozenset('x')) == \
u'frozenset([<span class="string">\'x\'</span>])'
assert debug_repr(set('x')) == \
u'set([<span class="string">\'x\'</span>])'
def test_recursive_repr(self):
a = [1]
a.append(a)
assert debug_repr(a) == u'[<span class="number">1</span>, [...]]'
def test_broken_repr(self):
class Foo(object):
def __repr__(self):
1/0
assert debug_repr(Foo()) == \
u'<span class="brokenrepr"><broken repr (ZeroDivisionError: ' \
u'integer division or modulo by zero)></span>'
class DebugHelpersTestCase(WerkzeugTestCase):
def test_object_dumping(self):
class Foo(object):
x = 42
y = 23
def __init__(self):
self.z = 15
drg = DebugReprGenerator()
out = drg.dump_object(Foo())
assert re.search('Details for werkzeug.testsuite.debug.Foo object at', out)
assert re.search('<th>x.*<span class="number">42</span>(?s)', out)
assert re.search('<th>y.*<span class="number">23</span>(?s)', out)
assert re.search('<th>z.*<span class="number">15</span>(?s)', out)
out = drg.dump_object({'x': 42, 'y': 23})
assert re.search('Contents of', out)
assert re.search('<th>x.*<span class="number">42</span>(?s)', out)
assert re.search('<th>y.*<span class="number">23</span>(?s)', out)
out = drg.dump_object({'x': 42, 'y': 23, 23: 11})
assert not re.search('Contents of', out)
out = drg.dump_locals({'x': 42, 'y': 23})
assert re.search('Local variables in frame', out)
assert re.search('<th>x.*<span class="number">42</span>(?s)', out)
assert re.search('<th>y.*<span class="number">23</span>(?s)', out)
def test_debug_dump(self):
old = sys.stdout
sys.stdout = HTMLStringO()
try:
dump([1, 2, 3])
x = sys.stdout.reset()
dump()
y = sys.stdout.reset()
finally:
sys.stdout = old
assert 'Details for list object at' in x
assert '<span class="number">1</span>' in x
assert 'Local variables in frame' in y
assert '<th>x' in y
assert '<th>old' in y
def test_debug_help(self):
old = sys.stdout
sys.stdout = HTMLStringO()
try:
helper([1, 2, 3])
x = sys.stdout.reset()
finally:
sys.stdout = old
assert 'Help on list object' in x
assert '__delitem__' in x
def suite():
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(DebugReprTestCase))
suite.addTest(unittest.makeSuite(DebugHelpersTestCase))
return suite
| mit |
dgarros/ansible | lib/ansible/modules/network/avi/avi_systemconfiguration.py | 43 | 6203 | #!/usr/bin/python
#
# Created on Aug 25, 2016
# @author: Gaurav Rastogi (grastogi@avinetworks.com)
# Eric Anderson (eanderson@avinetworks.com)
# module_check: supported
# Avi Version: 17.1.1
#
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: avi_systemconfiguration
author: Gaurav Rastogi (grastogi@avinetworks.com)
short_description: Module for setup of SystemConfiguration Avi RESTful Object
description:
- This module is used to configure SystemConfiguration object
- more examples at U(https://github.com/avinetworks/devops)
requirements: [ avisdk ]
version_added: "2.3"
options:
state:
description:
- The state that should be applied on the entity.
default: present
choices: ["absent","present"]
admin_auth_configuration:
description:
- Adminauthconfiguration settings for systemconfiguration.
dns_configuration:
description:
- Dnsconfiguration settings for systemconfiguration.
dns_virtualservice_refs:
description:
- Dns virtualservices hosting fqdn records for applications across avi vantage.
- If no virtualservices are provided, avi vantage will provide dns services for configured applications.
- Switching back to avi vantage from dns virtualservices is not allowed.
- It is a reference to an object of type virtualservice.
docker_mode:
description:
- Boolean flag to set docker_mode.
- Default value when not specified in API or module is interpreted by Avi Controller as False.
email_configuration:
description:
- Emailconfiguration settings for systemconfiguration.
global_tenant_config:
description:
- Tenantconfiguration settings for systemconfiguration.
linux_configuration:
description:
- Linuxconfiguration settings for systemconfiguration.
mgmt_ip_access_control:
description:
- Configure ip access control for controller to restrict open access.
ntp_configuration:
description:
- Ntpconfiguration settings for systemconfiguration.
portal_configuration:
description:
- Portalconfiguration settings for systemconfiguration.
proxy_configuration:
description:
- Proxyconfiguration settings for systemconfiguration.
snmp_configuration:
description:
- Snmpconfiguration settings for systemconfiguration.
ssh_ciphers:
description:
- Allowed ciphers list for ssh to the management interface on the controller and service engines.
- If this is not specified, all the default ciphers are allowed.
- Ssh -q cipher provides the list of default ciphers supported.
ssh_hmacs:
description:
- Allowed hmac list for ssh to the management interface on the controller and service engines.
- If this is not specified, all the default hmacs are allowed.
- Ssh -q mac provides the list of default hmacs supported.
tech_support_uploader_configuration:
description:
- Techsupportuploaderconfiguration settings for systemconfiguration.
url:
description:
- Avi controller URL of the object.
uuid:
description:
- Unique object identifier of the object.
extends_documentation_fragment:
- avi
'''
EXAMPLES = """
- name: Example to create SystemConfiguration object
avi_systemconfiguration:
controller: 10.10.25.42
username: admin
password: something
state: present
name: sample_systemconfiguration
"""
RETURN = '''
obj:
description: SystemConfiguration (api/systemconfiguration) object
returned: success, changed
type: dict
'''
from ansible.module_utils.basic import AnsibleModule
try:
from ansible.module_utils.avi import (
avi_common_argument_spec, HAS_AVI, avi_ansible_api)
except ImportError:
HAS_AVI = False
def main():
argument_specs = dict(
state=dict(default='present',
choices=['absent', 'present']),
admin_auth_configuration=dict(type='dict',),
dns_configuration=dict(type='dict',),
dns_virtualservice_refs=dict(type='list',),
docker_mode=dict(type='bool',),
email_configuration=dict(type='dict',),
global_tenant_config=dict(type='dict',),
linux_configuration=dict(type='dict',),
mgmt_ip_access_control=dict(type='dict',),
ntp_configuration=dict(type='dict',),
portal_configuration=dict(type='dict',),
proxy_configuration=dict(type='dict',),
snmp_configuration=dict(type='dict',),
ssh_ciphers=dict(type='list',),
ssh_hmacs=dict(type='list',),
tech_support_uploader_configuration=dict(type='dict',),
url=dict(type='str',),
uuid=dict(type='str',),
)
argument_specs.update(avi_common_argument_spec())
module = AnsibleModule(
argument_spec=argument_specs, supports_check_mode=True)
if not HAS_AVI:
return module.fail_json(msg=(
'Avi python API SDK (avisdk>=17.1) is not installed. '
'For more details visit https://github.com/avinetworks/sdk.'))
return avi_ansible_api(module, 'systemconfiguration',
set([]))
if __name__ == '__main__':
main()
| gpl-3.0 |
kofron/bigcouch | couchjs/scons/scons-local-2.0.1/SCons/Tool/hplink.py | 61 | 2390 | """SCons.Tool.hplink
Tool-specific initialization for the HP linker.
There normally shouldn't be any need to import this module directly.
It will usually be imported through the generic SCons.Tool.Tool()
selection method.
"""
#
# Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "src/engine/SCons/Tool/hplink.py 5134 2010/08/16 23:02:40 bdeegan"
import os
import os.path
import SCons.Util
import link
ccLinker = None
# search for the acc compiler and linker front end
try:
dirs = os.listdir('/opt')
except (IOError, OSError):
# Not being able to read the directory because it doesn't exist
# (IOError) or isn't readable (OSError) is okay.
dirs = []
for dir in dirs:
linker = '/opt/' + dir + '/bin/aCC'
if os.path.exists(linker):
ccLinker = linker
break
def generate(env):
"""
Add Builders and construction variables for Visual Age linker to
an Environment.
"""
link.generate(env)
env['LINKFLAGS'] = SCons.Util.CLVar('-Wl,+s -Wl,+vnocompatwarnings')
env['SHLINKFLAGS'] = SCons.Util.CLVar('$LINKFLAGS -b')
env['SHLIBSUFFIX'] = '.sl'
def exists(env):
return ccLinker
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
| apache-2.0 |
vincent-noel/SigNetSim | signetsim/views/simulate/TimeSeriesSimulationView.py | 2 | 9881 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2014-2017 Vincent Noel (vincent.noel@butantan.gov.br)
#
# This file is part of libSigNetSim.
#
# libSigNetSim is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# libSigNetSim is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with libSigNetSim. If not, see <http://www.gnu.org/licenses/>.
""" TimeSeriesSimulationView.py
This file ...
"""
from django.views.generic import TemplateView
from signetsim.views.HasWorkingModel import HasWorkingModel
from .SedmlWriter import SedmlWriter
from .TimeSeriesSimulationForm import TimeSeriesSimulationForm
from signetsim.models import Experiment, Condition, Treatment, SEDMLSimulation, new_sedml_filename
from signetsim.managers.data import buildExperiment
from signetsim.managers.models import copyModelHierarchy
from signetsim.settings.Settings import Settings
from libsignetsim import TimeseriesSimulation, LibSigNetSimException
from django.conf import settings
from django.core.files import File
from django.shortcuts import redirect
from os.path import join, dirname
from os import remove
class TimeSeriesSimulationView(TemplateView, HasWorkingModel, SedmlWriter):
template_name = 'simulate/timeseries.html'
def __init__(self, **kwargs):
TemplateView.__init__(self, **kwargs)
HasWorkingModel.__init__(self)
SedmlWriter.__init__(self)
self.form = TimeSeriesSimulationForm(self)
self.listOfVariables = None
self.listOfReactions = None
self.experiments = None
self.nbConditions = None
self.experimentName = None
self.observations = None
self.simResults = None
self.t_unit = None
self.y_unit = None
self.y_max = 0
self.experiment = None
def get_context_data(self, **kwargs):
kwargs = HasWorkingModel.get_context_data(self, **kwargs)
kwargs['species'] = self.listOfVariables
kwargs['reactions'] = self.listOfReactions
kwargs['experiments'] = self.experiments
if self.experiment is not None:
kwargs['experiment_name'] = self.experiment.name
kwargs['experiment_observations'] = [condition.listOfExperimentalData for condition in self.experiment.listOfConditions.values()]
kwargs['ids_species_selected'] = self.form.selectedSpeciesIds
kwargs['ids_reactions_selected'] = self.form.selectedReactionsIds
kwargs['sim_results'] = self.simResults
kwargs['t_unit'] = self.t_unit
kwargs['y_unit'] = self.y_unit
kwargs['y_max'] = self.y_max
kwargs['colors'] = Settings.default_colors
kwargs['form'] = self.form
return kwargs
def get(self, request, *args, **kwargs):
self.load(request, *args, **kwargs)
return TemplateView.get(self, request, *args, **kwargs)
def post(self, request, *args, **kwargs):
self.load(request, *args, **kwargs)
if "action" in request.POST:
if HasWorkingModel.isChooseModel(self, request):
self.load(request, *args, **kwargs)
elif request.POST['action'] == "simulate_model":
self.simulateModel(request)
elif request.POST['action'] == "save_simulation":
self.saveSimulation(request)
redirect('list_of_simulations')
return TemplateView.get(self, request, *args, **kwargs)
def load(self, request, *args, **kwargs):
HasWorkingModel.load(self, request, *args, **kwargs)
if self.isModelLoaded():
self.loadVariables()
self.loadReactions()
self.loadExperiments()
def read_timeseries(self, results):
# generating results
self.simResults = []
for i, result in enumerate(results):
(t_t, t_y) = result
y_filtered = {}
if self.form.selectedSpeciesIds is not None:
for var in self.form.selectedSpeciesIds:
t_sbml_id = str(self.listOfVariables[var].symbol.getSymbol())
t_name = self.listOfVariables[var].getNameOrSbmlId()
if self.form.showObservations == True:
t_name += " (model)"
y_filtered.update({t_name: t_y[t_sbml_id]})
if self.form.selectedReactionsIds is not None:
for var in self.form.selectedReactionsIds:
t_sbml_id = str(self.listOfReactions[var].symbol.getSymbol())
t_name = self.listOfReactions[var].getNameOrSbmlId()
y_filtered.update({t_name: t_y[t_sbml_id]})
if self.experiment is not None:
self.simResults.append((t_t, y_filtered, self.experiment.listOfConditions[i].name))
else:
self.simResults.append((t_t, y_filtered, ""))
# Units and max
tmax=0
for time, y_values, _ in self.simResults:
for key, value in y_values.items():
for t_value in value:
tmax = max(tmax, t_value)
self.y_max = tmax*1.1
if self.getModelInstance().timeUnits is not None:
self.t_unit = self.getModelInstance().timeUnits.getName()
if (self.form.selectedSpeciesIds is not None
and len(self.form.selectedSpeciesIds) > 0
and self.listOfVariables[self.form.selectedSpeciesIds[0]].getUnits() is not None):
self.y_unit = self.listOfVariables[self.form.selectedSpeciesIds[0]].getUnits().getNameOrSbmlId()
if (self.form.selectedReactionsIds is not None
and len(self.form.selectedReactionsIds) > 0
and self.listOfVariables[self.form.selectedReactionsIds[0]].getUnits() is not None):
self.y_unit = self.listOfVariables[self.form.selectedReactionsIds[0]].getUnits().getNameOrSbmlId()
def simulateModel(self, request):
self.form.read(request)
if not self.form.hasErrors():
if self.hasCPUTimeQuota(request):
self.experiment = None
if self.form.experimentId is not None:
t_experiment = Experiment.objects.get(id=self.experiments[self.form.experimentId].id)
self.experiment = buildExperiment(t_experiment)
try:
t_simulation = TimeseriesSimulation(
list_of_models=[self.getModelInstance()],
experiment=self.experiment,
time_min=self.form.timeMin,
time_max=self.form.timeMax,
time_ech=self.form.timeEch)
t_simulation.run(timeout=self.getCPUTimeQuota(request))
results = t_simulation.getRawData()
self.addCPUTime(request, t_simulation.getSimulationDuration())
self.read_timeseries(results)
except LibSigNetSimException as e:
self.form.addError(e.message)
else:
self.form.addError("You exceeded your allowed computation time. Please contact the administrator")
def saveSimulation(self, request):
self.form.read(request)
if not self.form.hasErrors():
self.createDocument()
timecourse = self.createUniformTimecourse(self.form.timeMin, self.form.timeMax, self.form.timeEch)
if self.form.saveModelSnapshot:
sbml_model = join(dirname(self.model_filename), copyModelHierarchy(self.model_filename))
else:
sbml_model = self.model_filename
if self.form.experimentId is not None:
self.loadExperiments()
t_experiment_id = self.experiments[self.form.experimentId].id
experiment = Experiment.objects.get(id=t_experiment_id)
conditions = Condition.objects.filter(experiment=experiment)
for i, condition in enumerate(conditions):
modifications = []
input_data = Treatment.objects.filter(condition=condition)
for data in input_data:
var = None
if self.getModelInstance().listOfSpecies.containsName(data.species):
var = self.getModelInstance().listOfSpecies.getByName(data.species)
elif self.getModelInstance().listOfParameters.containsName(data.species):
var = self.getModelInstance().listOfParameters.getByName(data.species)
elif self.getModelInstance().listOfCompartments.containsName(data.species):
var = self.getModelInstance().listOfCompartments.getByName(data.species)
if var is not None:
modifications.append((var, data.value))
model = self.addModel(sbml_model, modifications)
variables = []
if self.form.selectedSpeciesIds is not None:
for id_var in self.form.selectedSpeciesIds:
variables.append(self.listOfVariables[id_var])
if self.form.selectedReactionsIds is not None:
for id_var in self.form.selectedReactionsIds:
variables.append(self.listOfVariables[id_var])
self.addTimeseriesCurve(timecourse, model, condition.name, variables)
else:
model = self.addModel(sbml_model)
variables = []
if self.form.selectedSpeciesIds is not None:
for id_var in self.form.selectedSpeciesIds:
variables.append(self.listOfVariables[id_var])
if self.form.selectedReactionsIds is not None:
for id_var in self.form.selectedReactionsIds:
variables.append(self.listOfVariables[id_var])
self.addTimeseriesCurve(
timecourse, model,
("Simulation" if self.form.simulationName is None else self.form.simulationName),
variables
)
simulation_filename = join(settings.MEDIA_ROOT, new_sedml_filename())
open(simulation_filename, "a")
new_simulation = SEDMLSimulation(
project=self.project,
name=("Simulation" if self.form.simulationName is None else self.form.simulationName),
sedml_file=File(open(simulation_filename, "rb")),
sbml_file=sbml_model)
new_simulation.save()
filename = join(settings.MEDIA_ROOT, str(new_simulation.sedml_file))
remove(simulation_filename)
self.saveSedml(filename)
def loadExperiments(self):
self.experiments = Experiment.objects.filter(project=self.project)
def loadVariables(self):
self.listOfVariables = [obj for obj in self.getModelInstance().listOfVariables if not obj.constant and (obj.isSpecies() or obj.isParameter() or obj.isCompartment())]
def loadReactions(self):
self.listOfReactions = [obj for obj in self.getModelInstance().listOfVariables if obj.isReaction()]
| agpl-3.0 |
blinkbox/shaka-player | third_party/gjslint/python-gflags-2.0/gflags2man.py | 407 | 18864 | #!/usr/bin/env python
# Copyright (c) 2006, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""gflags2man runs a Google flags base program and generates a man page.
Run the program, parse the output, and then format that into a man
page.
Usage:
gflags2man <program> [program] ...
"""
# TODO(csilvers): work with windows paths (\) as well as unix (/)
# This may seem a bit of an end run, but it: doesn't bloat flags, can
# support python/java/C++, supports older executables, and can be
# extended to other document formats.
# Inspired by help2man.
import os
import re
import sys
import stat
import time
import gflags
_VERSION = '0.1'
def _GetDefaultDestDir():
home = os.environ.get('HOME', '')
homeman = os.path.join(home, 'man', 'man1')
if home and os.path.exists(homeman):
return homeman
else:
return os.environ.get('TMPDIR', '/tmp')
FLAGS = gflags.FLAGS
gflags.DEFINE_string('dest_dir', _GetDefaultDestDir(),
'Directory to write resulting manpage to.'
' Specify \'-\' for stdout')
gflags.DEFINE_string('help_flag', '--help',
'Option to pass to target program in to get help')
gflags.DEFINE_integer('v', 0, 'verbosity level to use for output')
_MIN_VALID_USAGE_MSG = 9 # if fewer lines than this, help is suspect
class Logging:
"""A super-simple logging class"""
def error(self, msg): print >>sys.stderr, "ERROR: ", msg
def warn(self, msg): print >>sys.stderr, "WARNING: ", msg
def info(self, msg): print msg
def debug(self, msg): self.vlog(1, msg)
def vlog(self, level, msg):
if FLAGS.v >= level: print msg
logging = Logging()
class App:
def usage(self, shorthelp=0):
print >>sys.stderr, __doc__
print >>sys.stderr, "flags:"
print >>sys.stderr, str(FLAGS)
def run(self):
main(sys.argv)
app = App()
def GetRealPath(filename):
"""Given an executable filename, find in the PATH or find absolute path.
Args:
filename An executable filename (string)
Returns:
Absolute version of filename.
None if filename could not be found locally, absolutely, or in PATH
"""
if os.path.isabs(filename): # already absolute
return filename
if filename.startswith('./') or filename.startswith('../'): # relative
return os.path.abspath(filename)
path = os.getenv('PATH', '')
for directory in path.split(':'):
tryname = os.path.join(directory, filename)
if os.path.exists(tryname):
if not os.path.isabs(directory): # relative directory
return os.path.abspath(tryname)
return tryname
if os.path.exists(filename):
return os.path.abspath(filename)
return None # could not determine
class Flag(object):
"""The information about a single flag."""
def __init__(self, flag_desc, help):
"""Create the flag object.
Args:
flag_desc The command line forms this could take. (string)
help The help text (string)
"""
self.desc = flag_desc # the command line forms
self.help = help # the help text
self.default = '' # default value
self.tips = '' # parsing/syntax tips
class ProgramInfo(object):
"""All the information gleaned from running a program with --help."""
# Match a module block start, for python scripts --help
# "goopy.logging:"
module_py_re = re.compile(r'(\S.+):$')
# match the start of a flag listing
# " -v,--verbosity: Logging verbosity"
flag_py_re = re.compile(r'\s+(-\S+):\s+(.*)$')
# " (default: '0')"
flag_default_py_re = re.compile(r'\s+\(default:\s+\'(.*)\'\)$')
# " (an integer)"
flag_tips_py_re = re.compile(r'\s+\((.*)\)$')
# Match a module block start, for c++ programs --help
# "google/base/commandlineflags":
module_c_re = re.compile(r'\s+Flags from (\S.+):$')
# match the start of a flag listing
# " -v,--verbosity: Logging verbosity"
flag_c_re = re.compile(r'\s+(-\S+)\s+(.*)$')
# Match a module block start, for java programs --help
# "com.google.common.flags"
module_java_re = re.compile(r'\s+Flags for (\S.+):$')
# match the start of a flag listing
# " -v,--verbosity: Logging verbosity"
flag_java_re = re.compile(r'\s+(-\S+)\s+(.*)$')
def __init__(self, executable):
"""Create object with executable.
Args:
executable Program to execute (string)
"""
self.long_name = executable
self.name = os.path.basename(executable) # name
# Get name without extension (PAR files)
(self.short_name, self.ext) = os.path.splitext(self.name)
self.executable = GetRealPath(executable) # name of the program
self.output = [] # output from the program. List of lines.
self.desc = [] # top level description. List of lines
self.modules = {} # { section_name(string), [ flags ] }
self.module_list = [] # list of module names in their original order
self.date = time.localtime(time.time()) # default date info
def Run(self):
"""Run it and collect output.
Returns:
1 (true) If everything went well.
0 (false) If there were problems.
"""
if not self.executable:
logging.error('Could not locate "%s"' % self.long_name)
return 0
finfo = os.stat(self.executable)
self.date = time.localtime(finfo[stat.ST_MTIME])
logging.info('Running: %s %s </dev/null 2>&1'
% (self.executable, FLAGS.help_flag))
# --help output is often routed to stderr, so we combine with stdout.
# Re-direct stdin to /dev/null to encourage programs that
# don't understand --help to exit.
(child_stdin, child_stdout_and_stderr) = os.popen4(
[self.executable, FLAGS.help_flag])
child_stdin.close() # '</dev/null'
self.output = child_stdout_and_stderr.readlines()
child_stdout_and_stderr.close()
if len(self.output) < _MIN_VALID_USAGE_MSG:
logging.error('Error: "%s %s" returned only %d lines: %s'
% (self.name, FLAGS.help_flag,
len(self.output), self.output))
return 0
return 1
def Parse(self):
"""Parse program output."""
(start_line, lang) = self.ParseDesc()
if start_line < 0:
return
if 'python' == lang:
self.ParsePythonFlags(start_line)
elif 'c' == lang:
self.ParseCFlags(start_line)
elif 'java' == lang:
self.ParseJavaFlags(start_line)
def ParseDesc(self, start_line=0):
"""Parse the initial description.
This could be Python or C++.
Returns:
(start_line, lang_type)
start_line Line to start parsing flags on (int)
lang_type Either 'python' or 'c'
(-1, '') if the flags start could not be found
"""
exec_mod_start = self.executable + ':'
after_blank = 0
start_line = 0 # ignore the passed-in arg for now (?)
for start_line in range(start_line, len(self.output)): # collect top description
line = self.output[start_line].rstrip()
# Python flags start with 'flags:\n'
if ('flags:' == line
and len(self.output) > start_line+1
and '' == self.output[start_line+1].rstrip()):
start_line += 2
logging.debug('Flags start (python): %s' % line)
return (start_line, 'python')
# SWIG flags just have the module name followed by colon.
if exec_mod_start == line:
logging.debug('Flags start (swig): %s' % line)
return (start_line, 'python')
# C++ flags begin after a blank line and with a constant string
if after_blank and line.startswith(' Flags from '):
logging.debug('Flags start (c): %s' % line)
return (start_line, 'c')
# java flags begin with a constant string
if line == 'where flags are':
logging.debug('Flags start (java): %s' % line)
start_line += 2 # skip "Standard flags:"
return (start_line, 'java')
logging.debug('Desc: %s' % line)
self.desc.append(line)
after_blank = (line == '')
else:
logging.warn('Never found the start of the flags section for "%s"!'
% self.long_name)
return (-1, '')
def ParsePythonFlags(self, start_line=0):
"""Parse python/swig style flags."""
modname = None # name of current module
modlist = []
flag = None
for line_num in range(start_line, len(self.output)): # collect flags
line = self.output[line_num].rstrip()
if not line: # blank
continue
mobj = self.module_py_re.match(line)
if mobj: # start of a new module
modname = mobj.group(1)
logging.debug('Module: %s' % line)
if flag:
modlist.append(flag)
self.module_list.append(modname)
self.modules.setdefault(modname, [])
modlist = self.modules[modname]
flag = None
continue
mobj = self.flag_py_re.match(line)
if mobj: # start of a new flag
if flag:
modlist.append(flag)
logging.debug('Flag: %s' % line)
flag = Flag(mobj.group(1), mobj.group(2))
continue
if not flag: # continuation of a flag
logging.error('Flag info, but no current flag "%s"' % line)
mobj = self.flag_default_py_re.match(line)
if mobj: # (default: '...')
flag.default = mobj.group(1)
logging.debug('Fdef: %s' % line)
continue
mobj = self.flag_tips_py_re.match(line)
if mobj: # (tips)
flag.tips = mobj.group(1)
logging.debug('Ftip: %s' % line)
continue
if flag and flag.help:
flag.help += line # multiflags tack on an extra line
else:
logging.info('Extra: %s' % line)
if flag:
modlist.append(flag)
def ParseCFlags(self, start_line=0):
"""Parse C style flags."""
modname = None # name of current module
modlist = []
flag = None
for line_num in range(start_line, len(self.output)): # collect flags
line = self.output[line_num].rstrip()
if not line: # blank lines terminate flags
if flag: # save last flag
modlist.append(flag)
flag = None
continue
mobj = self.module_c_re.match(line)
if mobj: # start of a new module
modname = mobj.group(1)
logging.debug('Module: %s' % line)
if flag:
modlist.append(flag)
self.module_list.append(modname)
self.modules.setdefault(modname, [])
modlist = self.modules[modname]
flag = None
continue
mobj = self.flag_c_re.match(line)
if mobj: # start of a new flag
if flag: # save last flag
modlist.append(flag)
logging.debug('Flag: %s' % line)
flag = Flag(mobj.group(1), mobj.group(2))
continue
# append to flag help. type and default are part of the main text
if flag:
flag.help += ' ' + line.strip()
else:
logging.info('Extra: %s' % line)
if flag:
modlist.append(flag)
def ParseJavaFlags(self, start_line=0):
"""Parse Java style flags (com.google.common.flags)."""
# The java flags prints starts with a "Standard flags" "module"
# that doesn't follow the standard module syntax.
modname = 'Standard flags' # name of current module
self.module_list.append(modname)
self.modules.setdefault(modname, [])
modlist = self.modules[modname]
flag = None
for line_num in range(start_line, len(self.output)): # collect flags
line = self.output[line_num].rstrip()
logging.vlog(2, 'Line: "%s"' % line)
if not line: # blank lines terminate module
if flag: # save last flag
modlist.append(flag)
flag = None
continue
mobj = self.module_java_re.match(line)
if mobj: # start of a new module
modname = mobj.group(1)
logging.debug('Module: %s' % line)
if flag:
modlist.append(flag)
self.module_list.append(modname)
self.modules.setdefault(modname, [])
modlist = self.modules[modname]
flag = None
continue
mobj = self.flag_java_re.match(line)
if mobj: # start of a new flag
if flag: # save last flag
modlist.append(flag)
logging.debug('Flag: %s' % line)
flag = Flag(mobj.group(1), mobj.group(2))
continue
# append to flag help. type and default are part of the main text
if flag:
flag.help += ' ' + line.strip()
else:
logging.info('Extra: %s' % line)
if flag:
modlist.append(flag)
def Filter(self):
"""Filter parsed data to create derived fields."""
if not self.desc:
self.short_desc = ''
return
for i in range(len(self.desc)): # replace full path with name
if self.desc[i].find(self.executable) >= 0:
self.desc[i] = self.desc[i].replace(self.executable, self.name)
self.short_desc = self.desc[0]
word_list = self.short_desc.split(' ')
all_names = [ self.name, self.short_name, ]
# Since the short_desc is always listed right after the name,
# trim it from the short_desc
while word_list and (word_list[0] in all_names
or word_list[0].lower() in all_names):
del word_list[0]
self.short_desc = '' # signal need to reconstruct
if not self.short_desc and word_list:
self.short_desc = ' '.join(word_list)
class GenerateDoc(object):
"""Base class to output flags information."""
def __init__(self, proginfo, directory='.'):
"""Create base object.
Args:
proginfo A ProgramInfo object
directory Directory to write output into
"""
self.info = proginfo
self.dirname = directory
def Output(self):
"""Output all sections of the page."""
self.Open()
self.Header()
self.Body()
self.Footer()
def Open(self): raise NotImplementedError # define in subclass
def Header(self): raise NotImplementedError # define in subclass
def Body(self): raise NotImplementedError # define in subclass
def Footer(self): raise NotImplementedError # define in subclass
class GenerateMan(GenerateDoc):
"""Output a man page."""
def __init__(self, proginfo, directory='.'):
"""Create base object.
Args:
proginfo A ProgramInfo object
directory Directory to write output into
"""
GenerateDoc.__init__(self, proginfo, directory)
def Open(self):
if self.dirname == '-':
logging.info('Writing to stdout')
self.fp = sys.stdout
else:
self.file_path = '%s.1' % os.path.join(self.dirname, self.info.name)
logging.info('Writing: %s' % self.file_path)
self.fp = open(self.file_path, 'w')
def Header(self):
self.fp.write(
'.\\" DO NOT MODIFY THIS FILE! It was generated by gflags2man %s\n'
% _VERSION)
self.fp.write(
'.TH %s "1" "%s" "%s" "User Commands"\n'
% (self.info.name, time.strftime('%x', self.info.date), self.info.name))
self.fp.write(
'.SH NAME\n%s \\- %s\n' % (self.info.name, self.info.short_desc))
self.fp.write(
'.SH SYNOPSIS\n.B %s\n[\\fIFLAGS\\fR]...\n' % self.info.name)
def Body(self):
self.fp.write(
'.SH DESCRIPTION\n.\\" Add any additional description here\n.PP\n')
for ln in self.info.desc:
self.fp.write('%s\n' % ln)
self.fp.write(
'.SH OPTIONS\n')
# This shows flags in the original order
for modname in self.info.module_list:
if modname.find(self.info.executable) >= 0:
mod = modname.replace(self.info.executable, self.info.name)
else:
mod = modname
self.fp.write('\n.P\n.I %s\n' % mod)
for flag in self.info.modules[modname]:
help_string = flag.help
if flag.default or flag.tips:
help_string += '\n.br\n'
if flag.default:
help_string += ' (default: \'%s\')' % flag.default
if flag.tips:
help_string += ' (%s)' % flag.tips
self.fp.write(
'.TP\n%s\n%s\n' % (flag.desc, help_string))
def Footer(self):
self.fp.write(
'.SH COPYRIGHT\nCopyright \(co %s Google.\n'
% time.strftime('%Y', self.info.date))
self.fp.write('Gflags2man created this page from "%s %s" output.\n'
% (self.info.name, FLAGS.help_flag))
self.fp.write('\nGflags2man was written by Dan Christian. '
' Note that the date on this'
' page is the modification date of %s.\n' % self.info.name)
def main(argv):
argv = FLAGS(argv) # handles help as well
if len(argv) <= 1:
app.usage(shorthelp=1)
return 1
for arg in argv[1:]:
prog = ProgramInfo(arg)
if not prog.Run():
continue
prog.Parse()
prog.Filter()
doc = GenerateMan(prog, FLAGS.dest_dir)
doc.Output()
return 0
if __name__ == '__main__':
app.run()
| apache-2.0 |
annarev/tensorflow | tensorflow/python/training/experimental/mixed_precision_test.py | 8 | 7435 | # Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
from absl.testing import parameterized
from tensorflow.core.protobuf import config_pb2
from tensorflow.python import tf2
from tensorflow.python.client import session
from tensorflow.python.eager import context
from tensorflow.python.eager import def_function
from tensorflow.python.framework import config
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
from tensorflow.python.platform import tf_logging
from tensorflow.python.training import gradient_descent as gradient_descent_v1
from tensorflow.python.training.experimental import loss_scale_optimizer as loss_scale_optimizer_v1
from tensorflow.python.training.experimental import mixed_precision
from tensorflow.python.training.experimental import mixed_precision_global_state
if tf2.enabled():
enable_mixed_precision_graph_rewrite = (
mixed_precision.enable_mixed_precision_graph_rewrite)
else:
enable_mixed_precision_graph_rewrite = (
mixed_precision.enable_mixed_precision_graph_rewrite_v1)
class MixedPrecisionTest(test.TestCase, parameterized.TestCase):
IGNORE_PERF_VAR = 'TF_AUTO_MIXED_PRECISION_GRAPH_REWRITE_IGNORE_PERFORMANCE'
def setUp(self):
super(MixedPrecisionTest, self).setUp()
# Enable the tests to be run on pre-Volta GPUs by telling the grappler pass
# to ignore performance and always transform the graph.
self._original_ignore_perf_value = os.getenv(self.IGNORE_PERF_VAR)
os.environ[self.IGNORE_PERF_VAR] = '1'
def tearDown(self):
# Set the IGNORE_PERF_VAR variable back to it's original value.
if self._original_ignore_perf_value is not None:
os.environ[self.IGNORE_PERF_VAR] = self._original_ignore_perf_value
else:
del os.environ[self.IGNORE_PERF_VAR]
mixed_precision.disable_mixed_precision_graph_rewrite()
super(MixedPrecisionTest, self).tearDown()
@test_util.run_in_graph_and_eager_modes
def test_wrap_optimizer(self):
opt = gradient_descent_v1.GradientDescentOptimizer(1.0)
opt = enable_mixed_precision_graph_rewrite(opt, 123.)
self.assertIsInstance(
opt, loss_scale_optimizer_v1.MixedPrecisionLossScaleOptimizer)
self.assertEqual(self.evaluate(opt._loss_scale()), 123.)
@test_util.run_in_graph_and_eager_modes
def test_optimizer_errors(self):
opt = 1
if tf2.enabled():
expected_regex = ('"opt" must be an instance of a '
'tf.keras.optimizers.Optimizer, but got')
else:
expected_regex = ('"opt" must be an instance of a tf.train.Optimizer or '
'a tf.keras.optimizers.Optimizer, but got')
with self.assertRaisesRegex(ValueError, expected_regex):
enable_mixed_precision_graph_rewrite(opt)
self.assertFalse(config.get_optimizer_experimental_options()
.get('auto_mixed_precision', False))
opt = gradient_descent_v1.GradientDescentOptimizer(1.0)
opt = loss_scale_optimizer_v1.MixedPrecisionLossScaleOptimizer(opt,
'dynamic')
with self.assertRaisesRegex(
ValueError, '"opt" must not already be an instance of a '
'MixedPrecisionLossScaleOptimizer.'):
enable_mixed_precision_graph_rewrite(opt)
self.assertFalse(config.get_optimizer_experimental_options()
.get('auto_mixed_precision', False))
@test_util.run_gpu_only
@test_util.run_in_graph_and_eager_modes
@test_util.disable_tfrt('Grappler rewrite doesn\'t apply to tfrt.')
def test_grappler_pass_enabled(self):
opt = gradient_descent_v1.GradientDescentOptimizer(1.0)
enable_mixed_precision_graph_rewrite(opt, 123.)
var = variables.Variable([[1.0]])
def overflow_in_float16():
out = var * 2 ** 10
out = math_ops.matmul(out, out)
return array_ops.reshape(out, ())
if context.executing_eagerly():
f = def_function.function(overflow_in_float16)
self.assertEqual(f().numpy(), float('Inf'))
# Outside a def_function.function, the grappler pass will not be applied.
self.assertAlmostEqual(overflow_in_float16().numpy(), 2 ** 20)
# Test disabling mixed precision.
mixed_precision.disable_mixed_precision_graph_rewrite()
self.assertEqual(f().numpy(), 2 ** 20)
else:
with session.Session() as sess:
out = overflow_in_float16()
sess.run(var.initializer)
self.assertEqual(sess.run(out), float('Inf'))
# Test Session will enable the auto_mixed_precision grappler pass in a
# ConfigProto passed by the user
with session.Session(config=config_pb2.ConfigProto()) as sess:
out = overflow_in_float16()
sess.run(var.initializer)
self.assertEqual(sess.run(out), float('Inf'))
# Test disabling mixed precision.
mixed_precision.disable_mixed_precision_graph_rewrite()
with session.Session() as sess:
out = overflow_in_float16()
sess.run(var.initializer)
self.assertAlmostEqual(sess.run(out), 2 ** 20)
@test.mock.patch.object(tf_logging, 'warn')
def test_warn_if_session_already_exists(self, mock_warn):
# Set this to False, so Sessions created in previous tests do not trigger
# the warning.
mixed_precision_global_state.non_mixed_precision_session_created = False
with session.Session():
enable_mixed_precision_graph_rewrite(
gradient_descent_v1.GradientDescentOptimizer(1.0))
mock_warn.assert_any_call(
'You already have existing Sessions that do not use mixed precision. '
'enable_mixed_precision_graph_rewrite() will not affect these '
'Sessions.')
@test.mock.patch.object(tf_logging, 'warn')
def test_do_not_warn_if_session_does_not_already_exist(self, mock_warn):
# Set this to False, so Sessions created in previous tests do not trigger
# the warning.
mixed_precision_global_state.non_mixed_precision_session_created = False
enable_mixed_precision_graph_rewrite(
gradient_descent_v1.GradientDescentOptimizer(1.0))
with session.Session():
# Make sure the "You already have existing Sessions" warning was not
# issued, since the Session was only created after
# enable_mixed_precision_graph_rewrite.
for call_arg in mock_warn.call_args_list:
msg = call_arg[0][0]
self.assertNotIn('You already have existing Sessions that do not use '
'mixed precision', msg)
if __name__ == '__main__':
test.main()
| apache-2.0 |
badloop/SickRage | lib/hachoir_parser/archive/ace.py | 95 | 9944 | """
ACE parser
From wotsit.org and the SDK header (bitflags)
Partial study of a new block type (5) I've called "new_recovery", as its
syntax is very close to the former one (of type 2).
Status: can only read totally file and header blocks.
Author: Christophe Gisquet <christophe.gisquet@free.fr>
Creation date: 19 january 2006
"""
from hachoir_parser import Parser
from hachoir_core.field import (StaticFieldSet, FieldSet,
Bit, Bits, NullBits, RawBytes, Enum,
UInt8, UInt16, UInt32,
PascalString8, PascalString16, String,
TimeDateMSDOS32)
from hachoir_core.text_handler import textHandler, filesizeHandler, hexadecimal
from hachoir_core.endian import LITTLE_ENDIAN
from hachoir_parser.common.msdos import MSDOSFileAttr32
MAGIC = "**ACE**"
OS_MSDOS = 0
OS_WIN32 = 2
HOST_OS = {
0: "MS-DOS",
1: "OS/2",
2: "Win32",
3: "Unix",
4: "MAC-OS",
5: "Win NT",
6: "Primos",
7: "APPLE GS",
8: "ATARI",
9: "VAX VMS",
10: "AMIGA",
11: "NEXT",
}
COMPRESSION_TYPE = {
0: "Store",
1: "Lempel-Ziv 77",
2: "ACE v2.0",
}
COMPRESSION_MODE = {
0: "fastest",
1: "fast",
2: "normal",
3: "good",
4: "best",
}
# TODO: Computing the CRC16 would also prove useful
#def markerValidate(self):
# return not self["extend"].value and self["signature"].value == MAGIC and \
# self["host_os"].value<12
class MarkerFlags(StaticFieldSet):
format = (
(Bit, "extend", "Whether the header is extended"),
(Bit, "has_comment", "Whether the archive has a comment"),
(NullBits, "unused", 7, "Reserved bits"),
(Bit, "sfx", "SFX"),
(Bit, "limited_dict", "Junior SFX with 256K dictionary"),
(Bit, "multi_volume", "Part of a set of ACE archives"),
(Bit, "has_av_string", "This header holds an AV-string"),
(Bit, "recovery_record", "Recovery record preset"),
(Bit, "locked", "Archive is locked"),
(Bit, "solid", "Archive uses solid compression")
)
def markerFlags(self):
yield MarkerFlags(self, "flags", "Marker flags")
def markerHeader(self):
yield String(self, "signature", 7, "Signature")
yield UInt8(self, "ver_extract", "Version needed to extract archive")
yield UInt8(self, "ver_created", "Version used to create archive")
yield Enum(UInt8(self, "host_os", "OS where the files were compressed"), HOST_OS)
yield UInt8(self, "vol_num", "Volume number")
yield TimeDateMSDOS32(self, "time", "Date and time (MS DOS format)")
yield Bits(self, "reserved", 64, "Reserved size for future extensions")
flags = self["flags"]
if flags["has_av_string"].value:
yield PascalString8(self, "av_string", "AV String")
if flags["has_comment"].value:
size = filesizeHandler(UInt16(self, "comment_size", "Comment size"))
yield size
if size.value > 0:
yield RawBytes(self, "compressed_comment", size.value, \
"Compressed comment")
class FileFlags(StaticFieldSet):
format = (
(Bit, "extend", "Whether the header is extended"),
(Bit, "has_comment", "Presence of file comment"),
(Bits, "unused", 10, "Unused bit flags"),
(Bit, "encrypted", "File encrypted with password"),
(Bit, "previous", "File continued from previous volume"),
(Bit, "next", "File continues on the next volume"),
(Bit, "solid", "File compressed using previously archived files")
)
def fileFlags(self):
yield FileFlags(self, "flags", "File flags")
def fileHeader(self):
yield filesizeHandler(UInt32(self, "compressed_size", "Size of the compressed file"))
yield filesizeHandler(UInt32(self, "uncompressed_size", "Uncompressed file size"))
yield TimeDateMSDOS32(self, "ftime", "Date and time (MS DOS format)")
if self["/header/host_os"].value in (OS_MSDOS, OS_WIN32):
yield MSDOSFileAttr32(self, "file_attr", "File attributes")
else:
yield textHandler(UInt32(self, "file_attr", "File attributes"), hexadecimal)
yield textHandler(UInt32(self, "file_crc32", "CRC32 checksum over the compressed file)"), hexadecimal)
yield Enum(UInt8(self, "compression_type", "Type of compression"), COMPRESSION_TYPE)
yield Enum(UInt8(self, "compression_mode", "Quality of compression"), COMPRESSION_MODE)
yield textHandler(UInt16(self, "parameters", "Compression parameters"), hexadecimal)
yield textHandler(UInt16(self, "reserved", "Reserved data"), hexadecimal)
# Filename
yield PascalString16(self, "filename", "Filename")
# Comment
if self["flags/has_comment"].value:
yield filesizeHandler(UInt16(self, "comment_size", "Size of the compressed comment"))
if self["comment_size"].value > 0:
yield RawBytes(self, "comment_data", self["comment_size"].value, "Comment data")
def fileBody(self):
size = self["compressed_size"].value
if size > 0:
yield RawBytes(self, "compressed_data", size, "Compressed data")
def fileDesc(self):
return "File entry: %s (%s)" % (self["filename"].value, self["compressed_size"].display)
def recoveryHeader(self):
yield filesizeHandler(UInt32(self, "rec_blk_size", "Size of recovery data"))
self.body_size = self["rec_blk_size"].size
yield String(self, "signature", 7, "Signature, normally '**ACE**'")
yield textHandler(UInt32(self, "relative_start",
"Relative start (to this block) of the data this block is mode of"),
hexadecimal)
yield UInt32(self, "num_blocks", "Number of blocks the data is split into")
yield UInt32(self, "size_blocks", "Size of these blocks")
yield UInt16(self, "crc16_blocks", "CRC16 over recovery data")
# size_blocks blocks of size size_blocks follow
# The ultimate data is the xor data of all those blocks
size = self["size_blocks"].value
for index in xrange(self["num_blocks"].value):
yield RawBytes(self, "data[]", size, "Recovery block %i" % index)
yield RawBytes(self, "xor_data", size, "The XOR value of the above data blocks")
def recoveryDesc(self):
return "Recovery block, size=%u" % self["body_size"].display
def newRecoveryHeader(self):
"""
This header is described nowhere
"""
if self["flags/extend"].value:
yield filesizeHandler(UInt32(self, "body_size", "Size of the unknown body following"))
self.body_size = self["body_size"].value
yield textHandler(UInt32(self, "unknown[]", "Unknown field, probably 0"),
hexadecimal)
yield String(self, "signature", 7, "Signature, normally '**ACE**'")
yield textHandler(UInt32(self, "relative_start",
"Offset (=crc16's) of this block in the file"), hexadecimal)
yield textHandler(UInt32(self, "unknown[]",
"Unknown field, probably 0"), hexadecimal)
class BaseFlags(StaticFieldSet):
format = (
(Bit, "extend", "Whether the header is extended"),
(NullBits, "unused", 15, "Unused bit flags")
)
def parseFlags(self):
yield BaseFlags(self, "flags", "Unknown flags")
def parseHeader(self):
if self["flags/extend"].value:
yield filesizeHandler(UInt32(self, "body_size", "Size of the unknown body following"))
self.body_size = self["body_size"].value
def parseBody(self):
if self.body_size > 0:
yield RawBytes(self, "body_data", self.body_size, "Body data, unhandled")
class Block(FieldSet):
TAG_INFO = {
0: ("header", "Archiver header", markerFlags, markerHeader, None),
1: ("file[]", fileDesc, fileFlags, fileHeader, fileBody),
2: ("recovery[]", recoveryDesc, recoveryHeader, None, None),
5: ("new_recovery[]", None, None, newRecoveryHeader, None)
}
def __init__(self, parent, name, description=None):
FieldSet.__init__(self, parent, name, description)
self.body_size = 0
self.desc_func = None
type = self["block_type"].value
if type in self.TAG_INFO:
self._name, desc, self.parseFlags, self.parseHeader, self.parseBody = self.TAG_INFO[type]
if desc:
if isinstance(desc, str):
self._description = desc
else:
self.desc_func = desc
else:
self.warning("Processing as unknown block block of type %u" % type)
if not self.parseFlags:
self.parseFlags = parseFlags
if not self.parseHeader:
self.parseHeader = parseHeader
if not self.parseBody:
self.parseBody = parseBody
def createFields(self):
yield textHandler(UInt16(self, "crc16", "Archive CRC16 (from byte 4 on)"), hexadecimal)
yield filesizeHandler(UInt16(self, "head_size", "Block size (from byte 4 on)"))
yield UInt8(self, "block_type", "Block type")
# Flags
for flag in self.parseFlags(self):
yield flag
# Rest of the header
for field in self.parseHeader(self):
yield field
size = self["head_size"].value - (self.current_size//8) + (2+2)
if size > 0:
yield RawBytes(self, "extra_data", size, "Extra header data, unhandled")
# Body in itself
for field in self.parseBody(self):
yield field
def createDescription(self):
if self.desc_func:
return self.desc_func(self)
else:
return "Block: %s" % self["type"].display
class AceFile(Parser):
endian = LITTLE_ENDIAN
PARSER_TAGS = {
"id": "ace",
"category": "archive",
"file_ext": ("ace",),
"mime": (u"application/x-ace-compressed",),
"min_size": 50*8,
"description": "ACE archive"
}
def validate(self):
if self.stream.readBytes(7*8, len(MAGIC)) != MAGIC:
return "Invalid magic"
return True
def createFields(self):
while not self.eof:
yield Block(self, "block[]")
| gpl-3.0 |
vamst/COSMOS2 | examples/ex2.py | 1 | 3562 | import os
import subprocess as sp
from cosmos.api import Cosmos, Dependency, draw_stage_graph, draw_task_graph, \
pygraphviz_available, default_get_submit_args
from functools import partial
from tools import echo, cat, word_count
def recipe(workflow):
# Create two Tasks that echo "hello" and "world" respectively (source nodes of the dag).
echo_tasks = [workflow.add_task(func=echo,
params=dict(word=word, out_txt='%s.txt' % word),
uid=word)
for word in ['hello', 'world']]
# Split each echo into two dependent Tasks (a one2many relationship).
word_count_tasks = []
for echo_task in echo_tasks:
word = echo_task.params['word']
for n in [1, 2]:
cat_task = workflow.add_task(
func=cat,
params=dict(in_txts=[echo_task.params['out_txt']],
out_txt='%s/%s/cat.txt' % (word, n)),
parents=[echo_task],
uid='%s_%s' % (word, n))
# Count the words in the previous stage. An example of a simple one2one relationship
# For each task in StageA, there is a single dependent task in StageB.
word_count_task = workflow.add_task(
func=word_count,
# Dependency instances allow you to specify an input and parent simultaneously
params=dict(in_txts=[Dependency(cat_task, 'out_txt')],
out_txt='%s/%s/wc.txt' % (word, n),
chars=True),
# parents=[cat_task], <-- not necessary!
uid='%s_%s' % (word, n), )
word_count_tasks.append(word_count_task)
# Cat the contents of all word_counts into one file. Only one node is being created who's
# parents are all of the WordCounts (a many2one relationship, aka a reduce operation).
summarize_task = workflow.add_task(
func=cat,
params=dict(in_txts=[Dependency(t, 'out_txt') for t in word_count_tasks],
out_txt='summary.txt'),
parents=word_count_tasks,
stage_name='Summary_Analysis',
uid='') # It's the only Task in this Stage, so doesn't need a specific uid
if __name__ == '__main__':
import argparse
p = argparse.ArgumentParser()
p.add_argument('-drm', default='local', help='', choices=('local', 'drmaa:ge', 'ge'))
p.add_argument('-q', '--queue', help='Submit to this queue of the DRM supports it')
args = p.parse_args()
cosmos = Cosmos('sqlite:///%s/sqlite.db' % os.path.dirname(os.path.abspath(__file__)),
# example of how to change arguments if you're NOT using default_drm='local'
get_submit_args=partial(default_get_submit_args, parallel_env='smp'),
default_drm=args.drm,
default_queue=args.queue)
cosmos.initdb()
sp.check_call('mkdir -p analysis_output/ex2', shell=True)
os.chdir('analysis_output/ex2')
workflow = cosmos.start('Example2', restart=True, skip_confirm=True)
recipe(workflow)
workflow.make_output_dirs()
workflow.run(max_attempts=1, max_cores=10)
if pygraphviz_available:
# These images can also be seen on the fly in the web-interface
draw_stage_graph(workflow.stage_graph(), '/tmp/ex1_task_graph.png', format='png')
draw_task_graph(workflow.task_graph(), '/tmp/ex1_stage_graph.png', format='png')
else:
print 'Pygraphviz is not available :('
| gpl-3.0 |
oleksa-pavlenko/gae-django-project-template | django/templatetags/static.py | 227 | 4084 | from django import template
from django.template.base import Node
from django.utils.encoding import iri_to_uri
from django.utils.six.moves.urllib.parse import urljoin
register = template.Library()
class PrefixNode(template.Node):
def __repr__(self):
return "<PrefixNode for %r>" % self.name
def __init__(self, varname=None, name=None):
if name is None:
raise template.TemplateSyntaxError(
"Prefix nodes must be given a name to return.")
self.varname = varname
self.name = name
@classmethod
def handle_token(cls, parser, token, name):
"""
Class method to parse prefix node and return a Node.
"""
# token.split_contents() isn't useful here because tags using this method don't accept variable as arguments
tokens = token.contents.split()
if len(tokens) > 1 and tokens[1] != 'as':
raise template.TemplateSyntaxError(
"First argument in '%s' must be 'as'" % tokens[0])
if len(tokens) > 1:
varname = tokens[2]
else:
varname = None
return cls(varname, name)
@classmethod
def handle_simple(cls, name):
try:
from django.conf import settings
except ImportError:
prefix = ''
else:
prefix = iri_to_uri(getattr(settings, name, ''))
return prefix
def render(self, context):
prefix = self.handle_simple(self.name)
if self.varname is None:
return prefix
context[self.varname] = prefix
return ''
@register.tag
def get_static_prefix(parser, token):
"""
Populates a template variable with the static prefix,
``settings.STATIC_URL``.
Usage::
{% get_static_prefix [as varname] %}
Examples::
{% get_static_prefix %}
{% get_static_prefix as static_prefix %}
"""
return PrefixNode.handle_token(parser, token, "STATIC_URL")
@register.tag
def get_media_prefix(parser, token):
"""
Populates a template variable with the media prefix,
``settings.MEDIA_URL``.
Usage::
{% get_media_prefix [as varname] %}
Examples::
{% get_media_prefix %}
{% get_media_prefix as media_prefix %}
"""
return PrefixNode.handle_token(parser, token, "MEDIA_URL")
class StaticNode(Node):
def __init__(self, varname=None, path=None):
if path is None:
raise template.TemplateSyntaxError(
"Static template nodes must be given a path to return.")
self.path = path
self.varname = varname
def url(self, context):
path = self.path.resolve(context)
return self.handle_simple(path)
def render(self, context):
url = self.url(context)
if self.varname is None:
return url
context[self.varname] = url
return ''
@classmethod
def handle_simple(cls, path):
return urljoin(PrefixNode.handle_simple("STATIC_URL"), path)
@classmethod
def handle_token(cls, parser, token):
"""
Class method to parse prefix node and return a Node.
"""
bits = token.split_contents()
if len(bits) < 2:
raise template.TemplateSyntaxError(
"'%s' takes at least one argument (path to file)" % bits[0])
path = parser.compile_filter(bits[1])
if len(bits) >= 2 and bits[-2] == 'as':
varname = bits[3]
else:
varname = None
return cls(varname, path)
@register.tag('static')
def do_static(parser, token):
"""
Joins the given path with the STATIC_URL setting.
Usage::
{% static path [as varname] %}
Examples::
{% static "myapp/css/base.css" %}
{% static variable_with_path %}
{% static "myapp/css/base.css" as admin_base_css %}
{% static variable_with_path as varname %}
"""
return StaticNode.handle_token(parser, token)
def static(path):
return StaticNode.handle_simple(path)
| mit |
longmen21/edx-platform | scripts/run_watch_data.py | 182 | 1672 | #! /usr/bin/env python
# This script requires that you have watchdog installed. You can install
# watchdog via 'pip install watchdog'
import sys
import time
import logging
import os
from subprocess import Popen
from signal import SIGTERM
from watchdog.observers import Observer
from watchdog.events import LoggingEventHandler, FileSystemEventHandler
# To watch more (or more specific) directories, change WATCH_DIRS to include the
# directories you want to watch. Note that this is recursive. If you want to
# watch fewer or more extensions, you can change EXTENSIONS. To watch all
# extensions, add "*" to EXTENSIONS.
WATCH_DIRS = ["../data", "common/lib/xmodule/xmodule/js", "common/lib/xmodule/xmodule/css"]
EXTENSIONS = ["*", "xml", "js", "css", "coffee", "scss", "html"]
WATCH_DIRS = [os.path.abspath(os.path.normpath(dir)) for dir in WATCH_DIRS]
class DjangoEventHandler(FileSystemEventHandler):
def __init__(self, process):
super(DjangoEventHandler, self).__init__()
self.process = process
def on_any_event(self, event):
for extension in EXTENSIONS:
if event.src_path.endswith(extension) or extension == "*":
print "%s changed: restarting server." % event.src_path
os.system("touch lms/__init__.py")
break
if __name__ == "__main__":
event_handler = DjangoEventHandler(Popen(['paver', 'lms']))
observer = Observer()
for dir in WATCH_DIRS:
observer.schedule(event_handler, dir, recursive=True)
observer.start()
try:
while True:
time.sleep(1)
except KeyboardInterrupt:
observer.stop()
observer.join()
| agpl-3.0 |
arenadata/ambari | ambari-server/src/main/resources/stacks/ADH/1.4/services/ATLAS/package/scripts/metadata.py | 1 | 10415 | #!/usr/bin/env python
"""
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import os
import hashlib
from resource_management import Package
from resource_management import StackFeature
from resource_management.core.resources.system import Directory, File, Execute
from resource_management.core.source import StaticFile, InlineTemplate, Template
from resource_management.core.exceptions import Fail
from resource_management.libraries.functions.format import format
from resource_management.libraries.functions.decorator import retry
from resource_management.libraries.functions import solr_cloud_util
from resource_management.libraries.functions.stack_features import check_stack_feature, get_stack_feature_version
from resource_management.libraries.resources.properties_file import PropertiesFile
from resource_management.libraries.resources.template_config import TemplateConfig
from resource_management.libraries.resources.xml_config import XmlConfig
from resource_management.libraries.functions.is_empty import is_empty
from resource_management.libraries.resources.modify_properties_file import ModifyPropertiesFile
def metadata(type='server'):
import params
# Needed by both Server and Client
Directory(params.conf_dir,
mode=0755,
cd_access='a',
owner=params.metadata_user,
group=params.user_group,
create_parents = True
)
if type == "server":
Directory([params.pid_dir],
mode=0755,
cd_access='a',
owner=params.metadata_user,
group=params.user_group,
create_parents = True
)
Directory(format('{conf_dir}/solr'),
mode=0755,
cd_access='a',
owner=params.metadata_user,
group=params.user_group,
create_parents = True,
recursive_ownership=True
)
Directory(params.log_dir,
mode=0755,
cd_access='a',
owner=params.metadata_user,
group=params.user_group,
create_parents = True
)
Directory(params.data_dir,
mode=0644,
cd_access='a',
owner=params.metadata_user,
group=params.user_group,
create_parents = True
)
Directory(params.expanded_war_dir,
mode=0644,
cd_access='a',
owner=params.metadata_user,
group=params.user_group,
create_parents = True
)
File(format("{expanded_war_dir}/atlas.war"),
content = StaticFile(format('{metadata_home}/server/webapp/atlas.war'))
)
File(format("{conf_dir}/atlas-log4j.xml"),
mode=0644,
owner=params.metadata_user,
group=params.user_group,
content=InlineTemplate(params.metadata_log4j_content)
)
File(format("{conf_dir}/atlas-env.sh"),
owner=params.metadata_user,
group=params.user_group,
mode=0755,
content=InlineTemplate(params.metadata_env_content)
)
if not is_empty(params.atlas_admin_username) and not is_empty(params.atlas_admin_password):
psswd_output = hashlib.sha256(params.atlas_admin_password).hexdigest()
ModifyPropertiesFile(format("{conf_dir}/users-credentials.properties"),
properties = {format('{atlas_admin_username}') : format('ROLE_ADMIN::{psswd_output}')},
owner = params.metadata_user
)
files_to_chown = [format("{conf_dir}/policy-store.txt"), format("{conf_dir}/users-credentials.properties")]
for file in files_to_chown:
if os.path.exists(file):
Execute(('chown', format('{metadata_user}:{user_group}'), file),
sudo=True
)
Execute(('chmod', '644', file),
sudo=True
)
if params.metadata_solrconfig_content:
File(format("{conf_dir}/solr/solrconfig.xml"),
mode=0644,
owner=params.metadata_user,
group=params.user_group,
content=InlineTemplate(params.metadata_solrconfig_content)
)
# Needed by both Server and Client
PropertiesFile(format('{conf_dir}/{conf_file}'),
properties = params.application_properties,
mode=0644,
owner=params.metadata_user,
group=params.user_group
)
if params.security_enabled:
TemplateConfig(format(params.atlas_jaas_file),
owner=params.metadata_user)
if type == 'server' and params.search_backend_solr and params.has_infra_solr:
solr_cloud_util.setup_solr_client(params.config)
check_znode()
jaasFile=params.atlas_jaas_file if params.security_enabled else None
upload_conf_set('atlas_configs', jaasFile)
if params.security_enabled: # update permissions before creating the collections
solr_cloud_util.add_solr_roles(params.config,
roles = [params.infra_solr_role_atlas, params.infra_solr_role_ranger_audit, params.infra_solr_role_dev],
new_service_principals = [params.atlas_jaas_principal])
create_collection('vertex_index', 'atlas_configs', jaasFile)
create_collection('edge_index', 'atlas_configs', jaasFile)
create_collection('fulltext_index', 'atlas_configs', jaasFile)
if params.security_enabled:
secure_znode(format('{infra_solr_znode}/configs/atlas_configs'), jaasFile)
secure_znode(format('{infra_solr_znode}/collections/vertex_index'), jaasFile)
secure_znode(format('{infra_solr_znode}/collections/edge_index'), jaasFile)
secure_znode(format('{infra_solr_znode}/collections/fulltext_index'), jaasFile)
File(params.atlas_hbase_secure_setup,
group=params.user_group,
owner=params.hbase_user,
content=Template("atlas_hbase_secure_setup.rb.j2")
)
else:
File(params.atlas_hbase_setup,
group=params.user_group,
owner=params.hbase_user,
content=Template("atlas_hbase_setup.rb.j2")
)
is_atlas_upgrade_support = check_stack_feature(StackFeature.ATLAS_UPGRADE_SUPPORT, get_stack_feature_version(params.config))
if params.security_enabled:
File(params.atlas_kafka_setup,
group=params.user_group,
owner=params.kafka_user,
content=Template("atlas_kafka_acl.sh.j2"))
# files required only in case if kafka broker is not present on the host as configured component
if not params.host_with_kafka:
File(format("{kafka_conf_dir}/kafka-env.sh"),
owner=params.kafka_user,
content=InlineTemplate(params.kafka_env_sh_template))
File(format("{kafka_conf_dir}/kafka_jaas.conf"),
group=params.user_group,
owner=params.kafka_user,
content=Template("kafka_jaas.conf.j2"))
if params.stack_supports_atlas_hdfs_site_on_namenode_ha and len(params.namenode_host) > 1:
XmlConfig("hdfs-site.xml",
conf_dir=params.conf_dir,
configurations=params.config['configurations']['hdfs-site'],
configuration_attributes=params.config['configuration_attributes']['hdfs-site'],
owner=params.metadata_user,
group=params.user_group,
mode=0644
)
else:
File(format('{conf_dir}/hdfs-site.xml'), action="delete")
'''
Atlas requires hadoop core-site.xml to resolve users/groups synced in HadoopUGI for
authentication and authorization process. Earlier the core-site.xml was available in
Hbase conf directory which is a part of Atlas class-path, from stack 2.6 onwards,
core-site.xml is no more available in Hbase conf directory. Hence need to create
core-site.xml in Atlas conf directory.
'''
Directory(format('{metadata_home}/'),
owner = params.metadata_user,
group = params.user_group,
recursive_ownership = True,
)
def upload_conf_set(config_set, jaasFile):
import params
solr_cloud_util.upload_configuration_to_zk(
zookeeper_quorum=params.zookeeper_quorum,
solr_znode=params.infra_solr_znode,
config_set_dir=format("{conf_dir}/solr"),
config_set=config_set,
tmp_dir=params.tmp_dir,
java64_home=params.java64_home,
solrconfig_content=InlineTemplate(params.metadata_solrconfig_content),
jaas_file=jaasFile,
retry=30, interval=5)
def create_collection(collection, config_set, jaasFile):
import params
solr_cloud_util.create_collection(
zookeeper_quorum=params.zookeeper_quorum,
solr_znode=params.infra_solr_znode,
collection = collection,
config_set=config_set,
java64_home=params.java64_home,
jaas_file=jaasFile,
shards=params.atlas_solr_shards,
replication_factor = params.infra_solr_replication_factor)
def secure_znode(znode, jaasFile):
import params
solr_cloud_util.secure_znode(config=params.config, zookeeper_quorum=params.zookeeper_quorum,
solr_znode=znode,
jaas_file=jaasFile,
java64_home=params.java64_home, sasl_users=[params.atlas_jaas_principal])
@retry(times=10, sleep_time=5, err_class=Fail)
def check_znode():
import params
solr_cloud_util.check_znode(
zookeeper_quorum=params.zookeeper_quorum,
solr_znode=params.infra_solr_znode,
java64_home=params.java64_home)
| apache-2.0 |
lukeiwanski/tensorflow | tensorflow/contrib/timeseries/python/timeseries/state_management.py | 67 | 11936 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Classes for wrapping a model to operate on different data shapes."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import abc
from tensorflow.contrib.timeseries.python.timeseries import feature_keys
from tensorflow.contrib.timeseries.python.timeseries import math_utils
from tensorflow.contrib.timeseries.python.timeseries.model import ModelOutputs
from tensorflow.python.estimator import estimator_lib
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.util import nest
class PassthroughStateManager(object):
"""A minimal wrapper for models which do not need state management."""
def __init__(self):
self._input_statistics = None
self._graph_initialized = False
def initialize_graph(self, model, input_statistics=None):
"""Adds required operations to the graph."""
del model # unused
self._graph_initialized = True
self._input_statistics = input_statistics
def define_loss(self, model, features, mode):
"""Wrap "model" with StateManager-specific operations.
Args:
model: The model (inheriting from TimeSeriesModel) to manage state for.
features: A dictionary with the following key/value pairs:
feature_keys.TrainEvalFeatures.TIMES: A [batch size x window size]
Tensor with times for each observation.
feature_keys.TrainEvalFeatures.VALUES: A [batch size x window size x num
features] Tensor with values for each observation.
mode: The tf.estimator.ModeKeys mode to use (TRAIN or EVAL).
Returns:
A ModelOutputs object.
Raises:
ValueError: If start state was specified.
"""
if feature_keys.State.STATE_TUPLE in features:
raise ValueError(
"Overriding start state is not supported for this model.")
return model.define_loss(features, mode)
class _OverridableStateManager(PassthroughStateManager):
"""Base class for state managers which support overriding model state."""
@abc.abstractmethod
def _define_loss_with_saved_state(self, model, features, mode):
pass
def define_loss(self, model, features, mode):
"""Switches between explicit start state and managed state."""
if feature_keys.FilteringFeatures.STATE_TUPLE in features:
# Explicit start state has been provided, so we should use that.
if mode == estimator_lib.ModeKeys.TRAIN:
raise ValueError(
"Overriding saved state for training is not supported (but a value "
"for feature {} was specified).".format(
feature_keys.FilteringFeatures.STATE_TUPLE))
start_state = features[feature_keys.FilteringFeatures.STATE_TUPLE]
del features[feature_keys.FilteringFeatures.STATE_TUPLE]
return model.get_batch_loss(
features=features, mode=mode, state=start_state)
else:
# No explicit start state; use managed state.
return self._define_loss_with_saved_state(
model=model, features=features, mode=mode)
class FilteringOnlyStateManager(_OverridableStateManager):
"""State manager for models which use state only for filtering.
Window-based models (ARModel) do not require state to be fed during training
(instead requiring a specific window size). Rather than requiring a minimum
window size for filtering, these models maintain this window in their state,
and so need state to be fed.
"""
def _define_loss_with_saved_state(self, model, features, mode):
return model.define_loss(features, mode)
class ChainingStateManager(_OverridableStateManager):
"""Maintains state across a batch for SequentialTimeSeriesModel subclasses.
The batch dimension is treated as indexing sequential chunks of the same
timeseries. End state from each chunk is fed as start state to the next chunk
during the next timestep. This is an approximation to full-batch training for
sequential models, but is typically much faster while still accurately
recovering parameters. The speedup comes from reduced scheduling overhead of
TensorFlow ops, since each operation can do much more work.
"""
def __init__(self, state_saving_interval=20, checkpoint_state=False):
"""Initialize the state manager.
Args:
state_saving_interval: This state manager saves intermediate model state
every `state_saving_interval` times. Larger values save memory, and
checkpoint size if `checkpoint_state` is enabled, but models
will need to impute across artificial gaps of up to this size
(i.e. gaps not appearing in the original data). This imputation may
affect training. Set state_saving_interval to 1 to avoid any
artificial imputation.
checkpoint_state: If True, saved intermediate model state will be
written to checkpoints. Checkpoints will then scale with dataset
size. If False, state will be freshly imputed from the beginning of a
series each time the model is restored, which means it may take a few
iterations for state to warm up.
"""
super(ChainingStateManager, self).__init__()
self._checkpoint_state = checkpoint_state
self._state_saving_interval = state_saving_interval
self._start_state = None
self._cached_states = None
def initialize_graph(self, model, input_statistics=None):
"""Adds required operations to the graph."""
super(ChainingStateManager, self).initialize_graph(
model=model, input_statistics=input_statistics)
self._start_state = model.get_start_state()
self._cached_states = math_utils.TupleOfTensorsLookup(
key_dtype=dtypes.int64,
default_values=self._start_state,
empty_key=-1,
name="cached_states",
checkpoint=self._checkpoint_state)
def _define_loss_with_saved_state(self, model, features, mode):
"""Feeds end state from one training iteration into the next.
Args:
model: The model to wrap. Compatible with children of TimeSeriesModel.
features: Dictionary with Tensor values defining the data to be
processed. The expected key/value pairs are at minimum:
feature_keys.TrainEvalFeatures.TIMES: A [number of chunks x window
size] Tensor with times for each observation, the result of chunking
a single longer time series.
feature_keys.TrainEvalFeatures.VALUES: A [number of chunks x window
size x num features] Tensor with values for each observation,
corresponding to times.
mode: The tf.estimator.ModeKeys mode to use. For EVAL and INFER, no
batching is performed, which may be slow. This is to avoid giving
cached and almost certainly stale values.
Returns:
A ModelOutputs object.
Raises:
ValueError: If initialize_graph has not been called.
"""
if not self._graph_initialized:
raise ValueError("ChainingStateManager requires initialize_graph() to be "
"called before use.")
(loss_op, end_state, batch_predictions) = self._update_cached_states(
model=model,
features=features,
mode=mode)
# Add a batch dimension so state can be used directly (e.g. for predictions)
# without the user manually reshaping it.
last_end_state_flat = [end_state_value[-1][None]
for end_state_value in nest.flatten(end_state)]
batch_predictions["observed"] = features[
feature_keys.TrainEvalFeatures.VALUES]
return ModelOutputs(
loss=loss_op,
end_state=nest.pack_sequence_as(end_state, last_end_state_flat),
predictions=batch_predictions,
prediction_times=features[feature_keys.TrainEvalFeatures.TIMES])
def _get_chunk_number(self, time):
return time // self._state_saving_interval
def _get_cached_states(self, times):
"""Retrieve cached states for a batch of times."""
read_chunk_numbers = self._get_chunk_number(times)
looked_up_state = list(self._cached_states.lookup(
math_ops.cast(read_chunk_numbers, dtypes.int64)))
looked_up_state = tuple(looked_up_state)
# We need to special-case the first chunk in a series to explicitly rely on
# the model's starting state so that gradients flow back to it. Otherwise it
# would affect only initialization, and would not be read from or updated
# during training. Not doing this also isolates that part of the graph,
# leading to errors on model reload if there are trainable variables
# affecting a model's start state.
if self._input_statistics is not None:
start_time = self._input_statistics.start_time
else:
start_time = 0
set_to_start_state = math_ops.equal(read_chunk_numbers,
self._get_chunk_number(start_time))
new_states = []
for start_state_value, cache_variable in zip(
nest.flatten(
math_utils.replicate_state(self._start_state,
array_ops.shape(times)[0])),
nest.flatten(looked_up_state)):
new_states.append(
array_ops.where(set_to_start_state, start_state_value,
cache_variable))
looked_up_state = nest.pack_sequence_as(looked_up_state, new_states)
return looked_up_state
def _update_cached_states(self, model, features, mode):
"""Read, process, and write chunks to the cache."""
times = features[feature_keys.TrainEvalFeatures.TIMES]
looked_up_state = self._get_cached_states(times[:, 0])
(model_loss, intermediate_states,
batch_predictions) = model.per_step_batch_loss(
features=features,
mode=mode,
state=looked_up_state)
# We need to at least write to the bucket after the one we read from.
min_chunk_numbers = self._get_chunk_number(times) + 1
# We write to the bucket that would have been read had the window started at
# the next sample (except for the last sample in the window, which gets
# written to the next bucket). This assumes fixed missing times (i.e. if we
# were presented with times [10, 50] we will never see times [30, 50]).
#
# TODO(allenl): Retrieve the highest time less than the current time rather
# than relying on fixed bucketing.
write_chunk_numbers = math_ops.maximum(
self._get_chunk_number(array_ops.concat(
[times[:, 1:], times[:, -1:] + 1], axis=1)),
min_chunk_numbers)
# Write once for every computed state; this may mean that we write multiple
# times to the same cell, but later writes will take precedence.
save_ops = [
self._cached_states.insert(
keys=write_chunk_numbers,
values=intermediate_states)]
end_state = nest.pack_sequence_as(
intermediate_states,
[state_element[:, -1]
for state_element in nest.flatten(intermediate_states)])
with ops.control_dependencies(save_ops):
# Make sure end states get saved at each iteration
loss_op = array_ops.identity(model_loss)
return loss_op, end_state, batch_predictions
| apache-2.0 |
Changaco/oh-mainline | vendor/packages/django-tastypie/tastypie/serializers.py | 34 | 19714 | from __future__ import unicode_literals
import datetime
import re
import django
from django.conf import settings
from django.core.exceptions import ImproperlyConfigured
from django.utils import six
from django.utils.encoding import force_text, smart_bytes
from django.core.serializers import json as djangojson
from tastypie.bundle import Bundle
from tastypie.exceptions import BadRequest, UnsupportedFormat
from tastypie.utils import format_datetime, format_date, format_time, make_naive
try:
import defusedxml.lxml as lxml
from defusedxml.common import DefusedXmlException
from defusedxml.lxml import parse as parse_xml
from lxml.etree import Element, tostring, LxmlError, XMLParser
except ImportError:
lxml = None
try:
import yaml
from django.core.serializers import pyyaml
except ImportError:
yaml = None
try:
import biplist
except ImportError:
biplist = None
import json
XML_ENCODING = re.compile('<\?xml.*?\?>', re.IGNORECASE)
# Ugh & blah.
# So doing a regular dump is generally fine, since Tastypie doesn't usually
# serialize advanced types. *HOWEVER*, it will dump out Python Unicode strings
# as a custom YAML tag, which of course ``yaml.safe_load`` can't handle.
if yaml is not None:
from yaml.constructor import SafeConstructor
from yaml.loader import Reader, Scanner, Parser, Composer, Resolver
class TastypieConstructor(SafeConstructor):
def construct_yaml_unicode_dammit(self, node):
value = self.construct_scalar(node)
try:
return value.encode('ascii')
except UnicodeEncodeError:
return value
TastypieConstructor.add_constructor(u'tag:yaml.org,2002:python/unicode', TastypieConstructor.construct_yaml_unicode_dammit)
class TastypieLoader(Reader, Scanner, Parser, Composer, TastypieConstructor, Resolver):
def __init__(self, stream):
Reader.__init__(self, stream)
Scanner.__init__(self)
Parser.__init__(self)
Composer.__init__(self)
TastypieConstructor.__init__(self)
Resolver.__init__(self)
class Serializer(object):
"""
A swappable class for serialization.
This handles most types of data as well as the following output formats::
* json
* jsonp (Disabled by default)
* xml
* yaml
* html
* plist (see http://explorapp.com/biplist/)
It was designed to make changing behavior easy, either by overridding the
various format methods (i.e. ``to_json``), by changing the
``formats/content_types`` options or by altering the other hook methods.
"""
formats = ['json', 'xml', 'yaml', 'html', 'plist']
content_types = {'json': 'application/json',
'jsonp': 'text/javascript',
'xml': 'application/xml',
'yaml': 'text/yaml',
'html': 'text/html',
'plist': 'application/x-plist'}
def __init__(self, formats=None, content_types=None, datetime_formatting=None):
if datetime_formatting is not None:
self.datetime_formatting = datetime_formatting
else:
self.datetime_formatting = getattr(settings, 'TASTYPIE_DATETIME_FORMATTING', 'iso-8601')
self.supported_formats = []
if content_types is not None:
self.content_types = content_types
if formats is not None:
self.formats = formats
if self.formats is Serializer.formats and hasattr(settings, 'TASTYPIE_DEFAULT_FORMATS'):
# We want TASTYPIE_DEFAULT_FORMATS to override unmodified defaults but not intentational changes
# on Serializer subclasses:
self.formats = settings.TASTYPIE_DEFAULT_FORMATS
if not isinstance(self.formats, (list, tuple)):
raise ImproperlyConfigured('Formats should be a list or tuple, not %r' % self.formats)
for format in self.formats:
try:
self.supported_formats.append(self.content_types[format])
except KeyError:
raise ImproperlyConfigured("Content type for specified type '%s' not found. Please provide it at either the class level or via the arguments." % format)
def get_mime_for_format(self, format):
"""
Given a format, attempts to determine the correct MIME type.
If not available on the current ``Serializer``, returns
``application/json`` by default.
"""
try:
return self.content_types[format]
except KeyError:
return 'application/json'
def format_datetime(self, data):
"""
A hook to control how datetimes are formatted.
Can be overridden at the ``Serializer`` level (``datetime_formatting``)
or globally (via ``settings.TASTYPIE_DATETIME_FORMATTING``).
Default is ``iso-8601``, which looks like "2010-12-16T03:02:14".
"""
data = make_naive(data)
if self.datetime_formatting == 'rfc-2822':
return format_datetime(data)
if self.datetime_formatting == 'iso-8601-strict':
# Remove microseconds to strictly adhere to iso-8601
data = data - datetime.timedelta(microseconds = data.microsecond)
return data.isoformat()
def format_date(self, data):
"""
A hook to control how dates are formatted.
Can be overridden at the ``Serializer`` level (``datetime_formatting``)
or globally (via ``settings.TASTYPIE_DATETIME_FORMATTING``).
Default is ``iso-8601``, which looks like "2010-12-16".
"""
if self.datetime_formatting == 'rfc-2822':
return format_date(data)
return data.isoformat()
def format_time(self, data):
"""
A hook to control how times are formatted.
Can be overridden at the ``Serializer`` level (``datetime_formatting``)
or globally (via ``settings.TASTYPIE_DATETIME_FORMATTING``).
Default is ``iso-8601``, which looks like "03:02:14".
"""
if self.datetime_formatting == 'rfc-2822':
return format_time(data)
if self.datetime_formatting == 'iso-8601-strict':
# Remove microseconds to strictly adhere to iso-8601
data = (datetime.datetime.combine(datetime.date(1,1,1),data) - datetime.timedelta(microseconds = data.microsecond)).time()
return data.isoformat()
def serialize(self, bundle, format='application/json', options=None):
"""
Given some data and a format, calls the correct method to serialize
the data and returns the result.
"""
desired_format = None
if options is None:
options = {}
for short_format, long_format in self.content_types.items():
if format == long_format:
if hasattr(self, "to_%s" % short_format):
desired_format = short_format
break
if desired_format is None:
raise UnsupportedFormat("The format indicated '%s' had no available serialization method. Please check your ``formats`` and ``content_types`` on your Serializer." % format)
serialized = getattr(self, "to_%s" % desired_format)(bundle, options)
return serialized
def deserialize(self, content, format='application/json'):
"""
Given some data and a format, calls the correct method to deserialize
the data and returns the result.
"""
desired_format = None
format = format.split(';')[0]
for short_format, long_format in self.content_types.items():
if format == long_format:
if hasattr(self, "from_%s" % short_format):
desired_format = short_format
break
if desired_format is None:
raise UnsupportedFormat("The format indicated '%s' had no available deserialization method. Please check your ``formats`` and ``content_types`` on your Serializer." % format)
if isinstance(content, six.binary_type):
content = force_text(content)
deserialized = getattr(self, "from_%s" % desired_format)(content)
return deserialized
def to_simple(self, data, options):
"""
For a piece of data, attempts to recognize it and provide a simplified
form of something complex.
This brings complex Python data structures down to native types of the
serialization format(s).
"""
if isinstance(data, (list, tuple)):
return [self.to_simple(item, options) for item in data]
if isinstance(data, dict):
return dict((key, self.to_simple(val, options)) for (key, val) in data.items())
elif isinstance(data, Bundle):
return dict((key, self.to_simple(val, options)) for (key, val) in data.data.items())
elif hasattr(data, 'dehydrated_type'):
if getattr(data, 'dehydrated_type', None) == 'related' and data.is_m2m == False:
if data.full:
return self.to_simple(data.fk_resource, options)
else:
return self.to_simple(data.value, options)
elif getattr(data, 'dehydrated_type', None) == 'related' and data.is_m2m == True:
if data.full:
return [self.to_simple(bundle, options) for bundle in data.m2m_bundles]
else:
return [self.to_simple(val, options) for val in data.value]
else:
return self.to_simple(data.value, options)
elif isinstance(data, datetime.datetime):
return self.format_datetime(data)
elif isinstance(data, datetime.date):
return self.format_date(data)
elif isinstance(data, datetime.time):
return self.format_time(data)
elif isinstance(data, bool):
return data
elif isinstance(data, (six.integer_types, float)):
return data
elif data is None:
return None
else:
return force_text(data)
def to_etree(self, data, options=None, name=None, depth=0):
"""
Given some data, converts that data to an ``etree.Element`` suitable
for use in the XML output.
"""
if isinstance(data, (list, tuple)):
element = Element(name or 'objects')
if name:
element = Element(name)
element.set('type', 'list')
else:
element = Element('objects')
for item in data:
element.append(self.to_etree(item, options, depth=depth+1))
element[:] = sorted(element, key=lambda x: x.tag)
elif isinstance(data, dict):
if depth == 0:
element = Element(name or 'response')
else:
element = Element(name or 'object')
element.set('type', 'hash')
for (key, value) in data.items():
element.append(self.to_etree(value, options, name=key, depth=depth+1))
element[:] = sorted(element, key=lambda x: x.tag)
elif isinstance(data, Bundle):
element = Element(name or 'object')
for field_name, field_object in data.data.items():
element.append(self.to_etree(field_object, options, name=field_name, depth=depth+1))
element[:] = sorted(element, key=lambda x: x.tag)
elif hasattr(data, 'dehydrated_type'):
if getattr(data, 'dehydrated_type', None) == 'related' and data.is_m2m == False:
if data.full:
return self.to_etree(data.fk_resource, options, name, depth+1)
else:
return self.to_etree(data.value, options, name, depth+1)
elif getattr(data, 'dehydrated_type', None) == 'related' and data.is_m2m == True:
if data.full:
element = Element(name or 'objects')
for bundle in data.m2m_bundles:
element.append(self.to_etree(bundle, options, bundle.resource_name, depth+1))
else:
element = Element(name or 'objects')
for value in data.value:
element.append(self.to_etree(value, options, name, depth=depth+1))
else:
return self.to_etree(data.value, options, name)
else:
element = Element(name or 'value')
simple_data = self.to_simple(data, options)
data_type = get_type_string(simple_data)
if data_type != 'string':
element.set('type', get_type_string(simple_data))
if data_type != 'null':
if isinstance(simple_data, six.text_type):
element.text = simple_data
else:
element.text = force_text(simple_data)
return element
def from_etree(self, data):
"""
Not the smartest deserializer on the planet. At the request level,
it first tries to output the deserialized subelement called "object"
or "objects" and falls back to deserializing based on hinted types in
the XML element attribute "type".
"""
if data.tag == 'request':
# if "object" or "objects" exists, return deserialized forms.
elements = data.getchildren()
for element in elements:
if element.tag in ('object', 'objects'):
return self.from_etree(element)
return dict((element.tag, self.from_etree(element)) for element in elements)
elif data.tag == 'object' or data.get('type') == 'hash':
return dict((element.tag, self.from_etree(element)) for element in data.getchildren())
elif data.tag == 'objects' or data.get('type') == 'list':
return [self.from_etree(element) for element in data.getchildren()]
else:
type_string = data.get('type')
if type_string in ('string', None):
return data.text
elif type_string == 'integer':
return int(data.text)
elif type_string == 'float':
return float(data.text)
elif type_string == 'boolean':
if data.text == 'True':
return True
else:
return False
else:
return None
def to_json(self, data, options=None):
"""
Given some Python data, produces JSON output.
"""
options = options or {}
data = self.to_simple(data, options)
return djangojson.json.dumps(data, cls=djangojson.DjangoJSONEncoder, sort_keys=True, ensure_ascii=False)
def from_json(self, content):
"""
Given some JSON data, returns a Python dictionary of the decoded data.
"""
try:
return json.loads(content)
except ValueError:
raise BadRequest
def to_jsonp(self, data, options=None):
"""
Given some Python data, produces JSON output wrapped in the provided
callback.
Due to a difference between JSON and Javascript, two
newline characters, \u2028 and \u2029, need to be escaped.
See http://timelessrepo.com/json-isnt-a-javascript-subset for
details.
"""
options = options or {}
json = self.to_json(data, options)
json = json.replace(u'\u2028', u'\\u2028').replace(u'\u2029', u'\\u2029')
return u'%s(%s)' % (options['callback'], json)
def to_xml(self, data, options=None):
"""
Given some Python data, produces XML output.
"""
options = options or {}
if lxml is None:
raise ImproperlyConfigured("Usage of the XML aspects requires lxml and defusedxml.")
return tostring(self.to_etree(data, options), xml_declaration=True, encoding='utf-8')
def from_xml(self, content, forbid_dtd=True, forbid_entities=True):
"""
Given some XML data, returns a Python dictionary of the decoded data.
By default XML entity declarations and DTDs will raise a BadRequest
exception content but subclasses may choose to override this if
necessary.
"""
if lxml is None:
raise ImproperlyConfigured("Usage of the XML aspects requires lxml and defusedxml.")
try:
# Stripping the encoding declaration. Because lxml.
# See http://lxml.de/parsing.html, "Python unicode strings".
content = XML_ENCODING.sub('', content)
parsed = parse_xml(
six.StringIO(content),
forbid_dtd=forbid_dtd,
forbid_entities=forbid_entities
)
except (LxmlError, DefusedXmlException):
raise BadRequest()
return self.from_etree(parsed.getroot())
def to_yaml(self, data, options=None):
"""
Given some Python data, produces YAML output.
"""
options = options or {}
if yaml is None:
raise ImproperlyConfigured("Usage of the YAML aspects requires yaml.")
return yaml.dump(self.to_simple(data, options))
def from_yaml(self, content):
"""
Given some YAML data, returns a Python dictionary of the decoded data.
"""
if yaml is None:
raise ImproperlyConfigured("Usage of the YAML aspects requires yaml.")
return yaml.load(content, Loader=TastypieLoader)
def to_plist(self, data, options=None):
"""
Given some Python data, produces binary plist output.
"""
options = options or {}
if biplist is None:
raise ImproperlyConfigured("Usage of the plist aspects requires biplist.")
return biplist.writePlistToString(self.to_simple(data, options))
def from_plist(self, content):
"""
Given some binary plist data, returns a Python dictionary of the decoded data.
"""
if biplist is None:
raise ImproperlyConfigured("Usage of the plist aspects requires biplist.")
if isinstance(content, six.text_type):
content = smart_bytes(content)
return biplist.readPlistFromString(content)
def to_html(self, data, options=None):
"""
Reserved for future usage.
The desire is to provide HTML output of a resource, making an API
available to a browser. This is on the TODO list but not currently
implemented.
"""
options = options or {}
return 'Sorry, not implemented yet. Please append "?format=json" to your URL.'
def from_html(self, content):
"""
Reserved for future usage.
The desire is to handle form-based (maybe Javascript?) input, making an
API available to a browser. This is on the TODO list but not currently
implemented.
"""
pass
def get_type_string(data):
"""
Translates a Python data type into a string format.
"""
data_type = type(data)
if data_type in six.integer_types:
return 'integer'
elif data_type == float:
return 'float'
elif data_type == bool:
return 'boolean'
elif data_type in (list, tuple):
return 'list'
elif data_type == dict:
return 'hash'
elif data is None:
return 'null'
elif isinstance(data, six.string_types):
return 'string'
| agpl-3.0 |
PatrickKennedy/Sybil | console/app/pygments/plugin.py | 27 | 1841 | # -*- coding: utf-8 -*-
"""
pygments.plugin
~~~~~~~~~~~~~~~
Pygments setuptools plugin interface. The methods defined
here also work if setuptools isn't installed but they just
return nothing.
lexer plugins::
[pygments.lexers]
yourlexer = yourmodule:YourLexer
formatter plugins::
[pygments.formatters]
yourformatter = yourformatter:YourFormatter
/.ext = yourformatter:YourFormatter
As you can see, you can define extensions for the formatter
with a leading slash.
syntax plugins::
[pygments.styles]
yourstyle = yourstyle:YourStyle
filter plugin::
[pygments.filter]
yourfilter = yourfilter:YourFilter
:copyright: 2006-2007 by Armin Ronacher.
:license: BSD, see LICENSE for more details.
"""
try:
import pkg_resources
except ImportError:
pkg_resources = None
LEXER_ENTRY_POINT = 'pygments.lexers'
FORMATTER_ENTRY_POINT = 'pygments.formatters'
STYLE_ENTRY_POINT = 'pygments.styles'
FILTER_ENTRY_POINT = 'pygments.filters'
def find_plugin_lexers():
if pkg_resources is None:
return
for entrypoint in pkg_resources.iter_entry_points(LEXER_ENTRY_POINT):
yield entrypoint.load()
def find_plugin_formatters():
if pkg_resources is None:
return
for entrypoint in pkg_resources.iter_entry_points(FORMATTER_ENTRY_POINT):
yield entrypoint.name, entrypoint.load()
def find_plugin_styles():
if pkg_resources is None:
return
for entrypoint in pkg_resources.iter_entry_points(STYLE_ENTRY_POINT):
yield entrypoint.name, entrypoint.load()
def find_plugin_filters():
if pkg_resources is None:
return
for entrypoint in pkg_resources.iter_entry_points(FILTER_ENTRY_POINT):
yield entrypoint.name, entrypoint.load()
| bsd-2-clause |
ishay2b/tensorflow | tensorflow/contrib/timeseries/python/timeseries/state_space_models/filtering_postprocessor_test.py | 67 | 3176 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for filtering postprocessors."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.timeseries.python.timeseries.state_space_models import filtering_postprocessor
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.platform import test
class FilteringStepPostprocessorTest(test.TestCase):
def test_gaussian_alternative(self):
for float_dtype in [dtypes.float32, dtypes.float64]:
detector = filtering_postprocessor.StateInterpolatingAnomalyDetector(
anomaly_log_likelihood=(filtering_postprocessor
.cauchy_alternative_to_gaussian),
responsibility_scaling=10.)
predicted_state = [
constant_op.constant(
[[40.], [20.]], dtype=float_dtype), constant_op.constant(
[3., 6.], dtype=float_dtype), constant_op.constant([-1, -2])
]
filtered_state = [
constant_op.constant(
[[80.], [180.]], dtype=float_dtype), constant_op.constant(
[1., 2.], dtype=float_dtype), constant_op.constant([-1, -2])
]
interpolated_state, updated_outputs = detector.process_filtering_step(
current_times=constant_op.constant([1, 2]),
current_values=constant_op.constant([[0.], [1.]], dtype=float_dtype),
predicted_state=predicted_state,
filtered_state=filtered_state,
outputs={
"mean":
constant_op.constant([[0.1], [10.]], dtype=float_dtype),
"covariance":
constant_op.constant([[[1.0]], [[1.0]]], dtype=float_dtype),
"log_likelihood":
constant_op.constant([-1., -40.], dtype=float_dtype)
})
# The first batch element is not anomalous, and so should use the inferred
# state. The second is anomalous, and should use the predicted state.
expected_state = [[[80.], [20.]],
[1., 6.],
[-1, -2]]
with self.test_session():
for interpolated, expected in zip(interpolated_state, expected_state):
self.assertAllClose(expected, interpolated.eval())
self.assertGreater(0., updated_outputs["anomaly_score"][0].eval())
self.assertLess(0., updated_outputs["anomaly_score"][1].eval())
if __name__ == "__main__":
test.main()
| apache-2.0 |
cagatayyildiz/boun-sim | bcpm/visualize.py | 1 | 1414 | '''
This study is a Bogazici University - NETAS Nova V-Gate collaboration and funded by TEYDEB project "Realization of Anomaly Detection and Prevention with Learning System Architectures, Quality Improvement, High Rate Service Availability and Rich Services in a VoIP Firewall Product'', by the Scientific and Technological Research Council Of Turkey (TUBITAK).
'''
import matplotlib.pyplot as plt
from matplotlib import gridspec
import numpy as np
import sys
from model import Data
def visualize_data(dirname, m, n):
data = Data.load(dirname)
v = data.v.transpose()
t = v.shape[1]
print(t)
if m > 0:
fig = plt.figure(figsize=(12, 4))
ax = fig.gca()
ax.pcolormesh(v[0:m, :], cmap=plt.cm.Greys)
ax.vlines(np.arange(0, t), 0, data.s * m, colors='r', linestyles='-', linewidth=2)
ax.legend(['change points'])
if n > 0:
fig = plt.figure(figsize=(12, 4))
gs = gridspec.GridSpec(n, 1, height_ratios=np.ones(n))
for i in range(n):
ax = plt.subplot(gs[i])
y = v[m + i, :]
y_lim_max = np.max(y) * 1.1
ax.plot(range(t), y, 'b-')
ax.vlines(np.arange(0, t), 0, data.s * y_lim_max, colors='r', linestyles='-', linewidth=2)
ax.set_ylim([0, y_lim_max])
plt.show()
if __name__ == '__main__':
visualize_data(sys.argv[1], int(sys.argv[2]), int(sys.argv[3]))
| gpl-2.0 |
dvarrazzo/Pyrseas | pyrseas/dbobject/table.py | 1 | 36138 | # -*- coding: utf-8 -*-
"""
pyrseas.dbobject.table
~~~~~~~~~~~~~~~~~~~~~~
This module defines six classes: DbClass derived from
DbSchemaObject, Sequence, Table and View derived from DbClass,
MaterializedView derived from View, and ClassDict derived from
DbObjectDict.
"""
import re
import os
import sys
from pyrseas.lib.pycompat import PY2
from pyrseas.dbobject import DbObjectDict, DbSchemaObject
from pyrseas.dbobject import quote_id, split_schema_obj
from pyrseas.dbobject import commentable, ownable, grantable
from pyrseas.dbobject.constraint import CheckConstraint, PrimaryKey
from pyrseas.dbobject.constraint import ForeignKey, UniqueConstraint
from pyrseas.dbobject.privileges import privileges_from_map, add_grant
MAX_BIGINT = 9223372036854775807
def seq_max_value(seq):
if seq.max_value is None or seq.max_value == MAX_BIGINT:
return " NO MAXVALUE"
return " MAXVALUE %d" % seq.max_value
def seq_min_value(seq):
if seq.min_value is None or seq.min_value == 1:
return " NO MINVALUE"
return " MINVALUE %d" % seq.min_value
class DbClass(DbSchemaObject):
"""A table, sequence or view"""
keylist = ['schema', 'name']
catalog = 'pg_class'
class Sequence(DbClass):
"A sequence generator definition"
@property
def allprivs(self):
return 'rwU'
def get_attrs(self, dbconn):
"""Get the attributes for the sequence
:param dbconn: a DbConnection object
"""
data = dbconn.fetchone(
"""SELECT start_value, increment_by, max_value, min_value,
cache_value
FROM %s.%s""" % (quote_id(self.schema), quote_id(self.name)))
for key, val in list(data.items()):
setattr(self, key, val)
def get_dependent_table(self, dbconn):
"""Get the table and column name that uses or owns the sequence
:param dbconn: a DbConnection object
"""
def split_table(obj, sch):
schema = sch or 'public'
tbl = obj
quoted = '"%s".' % schema
if obj.startswith(schema + '.'):
tbl = obj[len(schema) + 1:]
elif obj.startswith(quoted):
tbl = obj[len(quoted):]
elif sch is None:
raise ValueError("Invalid schema.table: %s" % obj)
if tbl[0] == '"' and tbl[-1:] == '"':
tbl = tbl[1:-1]
return tbl
data = dbconn.fetchone(
"""SELECT refobjid::regclass, refobjsubid
FROM pg_depend
WHERE objid = '%s'::regclass
AND refclassid = 'pg_class'::regclass""" % self.qualname())
if data:
self.owner_table = split_table(data[0], self.schema)
self.owner_column = data[1]
return
data = dbconn.fetchone(
"""SELECT adrelid::regclass
FROM pg_attrdef a JOIN pg_depend ON (a.oid = objid)
WHERE refobjid = '%s'::regclass
AND classid = 'pg_attrdef'::regclass""" % self.qualname())
if data:
self.dependent_table = split_table(data[0], self.schema)
def to_map(self, db, opts):
"""Convert a sequence definition to a YAML-suitable format
:param opts: options to include/exclude tables, etc.
:return: dictionary
"""
if hasattr(opts, 'tables') and opts.tables and \
(self.name not in opts.tables and
not hasattr(self, 'owner_table') or
self.owner_table not in opts.tables) or (
hasattr(opts, 'excl_tables') and opts.excl_tables and
self.name in opts.excl_tables):
return None
seq = self._base_map(db, opts.no_owner, opts.no_privs)
seq.pop('dependent_table', None)
for key, val in list(seq.items()):
if key == 'max_value' and val == MAX_BIGINT:
seq[key] = None
elif key == 'min_value' and val == 1:
seq[key] = None
elif key == 'privileges':
seq[key] = val
else:
if PY2:
if isinstance(val, (int, long)) and val <= sys.maxsize:
seq[key] = int(val)
else:
seq[key] = str(val)
else:
if isinstance(val, int):
seq[key] = int(val)
else:
seq[key] = str(val)
return seq
@commentable
@grantable
@ownable
def create(self):
"""Return a SQL statement to CREATE the sequence
:return: SQL statements
"""
return ["""CREATE SEQUENCE %s
START WITH %d
INCREMENT BY %d
%s
%s
CACHE %d""" % (self.qualname(), self.start_value, self.increment_by,
seq_max_value(self), seq_min_value(self), self.cache_value)]
def add_owner(self):
"""Return statement to ALTER the sequence to indicate its owner table
:return: SQL statement
"""
stmts = []
stmts.append("ALTER SEQUENCE %s OWNED BY %s.%s" % (
self.qualname(), self.qualname(self.owner_table),
quote_id(self.owner_column)))
return stmts
def alter(self, inseq, no_owner=False):
"""Generate SQL to transform an existing sequence
:param inseq: a YAML map defining the new sequence
:return: list of SQL statements
Compares the sequence to an input sequence and generates SQL
statements to transform it into the one represented by the
input.
"""
stmts = []
stmt = ""
if self.start_value != inseq.start_value:
stmt += " START WITH %d" % inseq.start_value
if self.increment_by != inseq.increment_by:
stmt += " INCREMENT BY %d" % inseq.increment_by
maxval = self.max_value
if maxval == MAX_BIGINT:
maxval = None
if maxval != inseq.max_value:
stmt += seq_max_value(inseq)
minval = self.min_value
if minval == 1:
minval = None
if minval != inseq.min_value:
stmt += seq_min_value(inseq)
if self.cache_value != inseq.cache_value:
stmt += " CACHE %d" % inseq.cache_value
if stmt:
stmts.append("ALTER SEQUENCE %s" % self.qualname() + stmt)
if hasattr(inseq, 'owner_column') and \
not hasattr(inseq, 'owner_table'):
raise ValueError("Sequence '%s' incomplete specification: "
"owner_column but no owner_table")
if hasattr(inseq, 'owner_table'):
if not hasattr(inseq, 'owner_column'):
raise ValueError("Sequence '%s' incomplete specification: "
"owner_table but no owner_column")
if not (hasattr(self, 'owner_table') and
hasattr(self, 'owner_column')):
stmts.append(inseq.add_owner())
stmts.append(super(Sequence, self).alter(inseq, no_owner=no_owner))
return stmts
def drop(self):
"""Generate SQL to drop the current sequence
:return: list of SQL statements
"""
stmts = []
if not hasattr(self, 'owner_table'):
stmts.append(super(Sequence, self).drop())
return stmts
class Table(DbClass):
"""A database table definition
A table is identified by its schema name and table name. It should
have a list of columns. It may have a primary_key, zero or more
foreign_keys, zero or more unique_constraints, and zero or more
indexes.
"""
@property
def allprivs(self):
return 'arwdDxt'
def column_names(self):
"""Return a list of column names in the table
:return: list
"""
return [c.name for c in self.columns]
def to_map(self, db, dbschemas, opts):
"""Convert a table to a YAML-suitable format
:param dbschemas: database dictionary of schemas
:param opts: options to include/exclude tables, etc.
:return: dictionary
"""
if hasattr(opts, 'excl_tables') and opts.excl_tables \
and self.name in opts.excl_tables or \
not hasattr(self, 'columns'):
return None
tbl = self._base_map(db, opts.no_owner, opts.no_privs)
cols = []
for column in self.columns:
col = column.to_map(db, opts.no_privs)
if col:
cols.append(col)
tbl['columns'] = cols
if hasattr(self, 'check_constraints'):
if 'check_constraints' not in tbl:
tbl.update(check_constraints={})
for k in list(self.check_constraints.values()):
tbl['check_constraints'].update(
self.check_constraints[k.name].to_map(
db, self.column_names()))
if hasattr(self, 'primary_key'):
tbl['primary_key'] = self.primary_key.to_map(
db, self.column_names())
if hasattr(self, 'foreign_keys'):
if 'foreign_keys' not in tbl:
tbl['foreign_keys'] = {}
for k in list(self.foreign_keys.values()):
tbls = dbschemas[k.ref_schema].tables
tbl['foreign_keys'].update(self.foreign_keys[k.name].to_map(
db, self.column_names(),
tbls[self.foreign_keys[k.name].ref_table]. column_names()))
if hasattr(self, 'unique_constraints'):
if 'unique_constraints' not in tbl:
tbl.update(unique_constraints={})
for k in list(self.unique_constraints.values()):
tbl['unique_constraints'].update(
self.unique_constraints[k.name].to_map(
db, self.column_names()))
if hasattr(self, 'indexes'):
idxs = {}
for idx in self.indexes.values():
if not getattr(idx, '_for_constraint', None):
idxs.update(idx.to_map(db))
if idxs:
# we may have only indexes not to dump, e.g. the pkey one
tbl['indexes'] = idxs
else:
tbl.pop('indexes', None)
if hasattr(self, 'inherits'):
if 'inherits' not in tbl:
tbl['inherits'] = self.inherits
if hasattr(self, 'rules'):
if 'rules' not in tbl:
tbl['rules'] = {}
for k in list(self.rules.values()):
tbl['rules'].update(self.rules[k.name].to_map(db))
if hasattr(self, 'triggers'):
if 'triggers' not in tbl:
tbl['triggers'] = {}
for k in list(self.triggers.values()):
tbl['triggers'].update(self.triggers[k.name].to_map(db))
return tbl
def create(self):
"""Return SQL statements to CREATE the table
:return: SQL statements
"""
# TODO This was *maybe* in place to guard double creations caused by
# the functions. Leaving it here, to be dropped once I'm reasonably
# certain we get called only once, when expected.
assert not hasattr(self, 'created')
stmts = []
cols = []
colprivs = []
for col in self.columns:
if not (hasattr(col, 'inherited') and col.inherited):
cols.append(" " + col.add()[0])
colprivs.append(col.add_privs())
unlogged = ''
if hasattr(self, 'unlogged') and self.unlogged:
unlogged = 'UNLOGGED '
inhclause = ''
if hasattr(self, 'inherits'):
inhclause = " INHERITS (%s)" % ", ".join(t for t in self.inherits)
opts = ''
if hasattr(self, 'options'):
opts = " WITH (%s)" % ', '.join(self.options)
tblspc = ''
if hasattr(self, 'tablespace'):
tblspc = " TABLESPACE %s" % self.tablespace
stmts.append("CREATE %sTABLE %s (\n%s)%s%s%s" % (
unlogged, self.qualname(), ",\n".join(cols), inhclause, opts,
tblspc))
if self.owner is not None:
stmts.append(self.alter_owner())
for priv in self.privileges:
stmts.append(add_grant(self, priv))
if colprivs:
stmts.append(colprivs)
if self.description is not None:
stmts.append(self.comment())
for col in self.columns:
if col.description is not None:
stmts.append(col.comment())
if hasattr(self, '_owned_seqs'):
for dep in self._owned_seqs:
stmts.append(dep.add_owner())
self.created = True
return stmts
def drop(self):
"""Return a SQL DROP statement for the table
:return: SQL statement
"""
stmts = []
if not hasattr(self, 'dropped') or not self.dropped:
if hasattr(self, '_dependent_funcs'):
for fnc in self._dependent_funcs:
stmts.append(fnc.drop())
self.dropped = True
stmts.append("DROP TABLE %s" % self.identifier())
return stmts
def diff_options(self, newopts):
"""Compare options lists and generate SQL SET or RESET clause
:newopts: list of new options
:return: SQL SET / RESET clauses
Generate ([SET|RESET storage_parameter=value) clauses from two
lists in the form of 'key=value' strings.
"""
def to_dict(optlist):
return dict(opt.split('=', 1) for opt in optlist)
oldopts = {}
if hasattr(self, 'options'):
oldopts = to_dict(self.options)
newopts = to_dict(newopts)
setclauses = []
for key, val in list(newopts.items()):
if key not in oldopts:
setclauses.append("%s=%s" % (key, val))
elif val != oldopts[key]:
setclauses.append("%s=%s" % (key, val))
resetclauses = []
for key, val in list(oldopts.items()):
if key not in newopts:
resetclauses.append("%s" % key)
clauses = ''
if setclauses:
clauses = "SET (%s)" % ', '.join(setclauses)
if resetclauses:
clauses += ', '
if resetclauses:
clauses += "RESET (%s)" % ', '.join(resetclauses)
return clauses
def alter(self, intable):
"""Generate SQL to transform an existing table
:param intable: a YAML map defining the new table
:return: list of SQL statements
Compares the table to an input table and generates SQL
statements to transform it into the one represented by the
input.
"""
stmts = []
if not hasattr(intable, 'columns'):
raise KeyError("Table '%s' has no columns" % intable.name)
colnames = [col.name for col in self.columns
if not hasattr(col, 'dropped')]
dbcols = len(colnames)
colprivs = []
base = "ALTER %s %s\n " % (self.objtype, self.qualname())
# check input columns
for (num, incol) in enumerate(intable.columns):
if hasattr(incol, 'oldname'):
assert(self.columns[num].name == incol.oldname)
stmts.append(self.columns[num].rename(incol.name))
# check existing columns
if num < dbcols and self.columns[num].name == incol.name:
(stmt, descr) = self.columns[num].alter(incol)
if stmt:
stmts.append(base + stmt)
colprivs.append(self.columns[num].diff_privileges(incol))
if descr:
stmts.append(descr)
# add new columns
elif incol.name not in colnames and \
not hasattr(incol, 'inherited'):
(stmt, descr) = incol.add()
stmts.append(base + "ADD COLUMN %s" % stmt)
colprivs.append(incol.add_privs())
if descr:
stmts.append(descr)
newopts = []
if hasattr(intable, 'options'):
newopts = intable.options
diff_opts = self.diff_options(newopts)
if diff_opts:
stmts.append("ALTER %s %s %s" % (self.objtype, self.identifier(),
diff_opts))
if colprivs:
stmts.append(colprivs)
if hasattr(intable, 'tablespace'):
if not hasattr(self, 'tablespace') \
or self.tablespace != intable.tablespace:
stmts.append(base + "SET TABLESPACE %s"
% quote_id(intable.tablespace))
elif hasattr(self, 'tablespace'):
stmts.append(base + "SET TABLESPACE pg_default")
stmts.append(super(Table, self).alter(intable))
return stmts
def alter_drop_columns(self, intable):
"""Generate SQL to drop columns from an existing table
:param intable: a YAML map defining the new table
:return: list of SQL statements
Compares the table to an input table and generates SQL
statements to drop any columns missing from the one
represented by the input.
"""
if not hasattr(intable, 'columns'):
raise KeyError("Table '%s' has no columns" % intable.name)
stmts = []
incolnames = set(attr.name for attr in intable.columns)
for attr in self.columns:
if attr.name not in incolnames:
if not getattr(attr, 'inherited', False):
stmts.append(attr.drop())
return stmts
def data_export(self, dbconn, dirpath):
"""Copy table data out to a file
:param dbconn: database connection to use
:param dirpath: full path to the directory for the file to be created
"""
filepath = os.path.join(dirpath, self.extern_filename('data'))
if hasattr(self, 'primary_key'):
order_by = [self.columns[col - 1].name
for col in self.primary_key.keycols]
else:
order_by = ['%d' % (n + 1) for n in range(len(self.columns))]
dbconn.sql_copy_to(
"COPY (SELECT * FROM %s ORDER BY %s) TO STDOUT WITH CSV" % (
self.qualname(), ', '.join(order_by)), filepath)
def data_import(self, dirpath):
"""Generate SQL to import data into a table
:param dirpath: full path for the directory for the file
:return: list of SQL statements
"""
filepath = os.path.join(dirpath, self.extern_filename('data'))
stmts = []
if hasattr(self, '_referred_by'):
stmts.append("ALTER TABLE %s DROP CONSTRAINT %s" % (
self._referred_by._table.qualname(), self._referred_by.name))
stmts.append("TRUNCATE ONLY %s" % self.qualname())
stmts.append(("\\copy ", self.qualname(), " from '", filepath,
"' csv"))
if hasattr(self, '_referred_by'):
stmts.append(self._referred_by.add())
return stmts
def get_implied_deps(self, db):
deps = super(Table, self).get_implied_deps(db)
for col in self.columns:
type = db.find_type(col.type)
if type is not None:
deps.add(type)
# Check if the column depends on a sequence to avoid stating the
# dependency explicitly.
d = getattr(col, 'default', None)
if d:
m = re.match(r"nextval\('(.*)'::regclass\)", d)
if m:
seq = db.tables.find(m.group(1), self.schema)
if seq:
deps.add(seq)
if hasattr(seq, 'owner_table'):
if not hasattr(self, '_owned_seqs'):
self._owned_seqs = []
self._owned_seqs.append(seq)
for pname in getattr(self, 'inherits', ()):
parent = db.tables.find(pname, self.schema)
assert parent is not None, "couldn't find parent table %s" % pname
deps.add(parent)
return deps
class View(DbClass):
"""A database view definition
A view is identified by its schema name and view name.
"""
privobjtype = "TABLE"
@property
def allprivs(self):
return 'arwdDxt'
def to_map(self, db, opts):
"""Convert a view to a YAML-suitable format
:param opts: options to include/exclude tables, etc.
:return: dictionary
"""
if hasattr(opts, 'excl_tables') and opts.excl_tables \
and self.name in opts.excl_tables:
return None
view = self._base_map(db, opts.no_owner, opts.no_privs)
if 'dependent_funcs' in view:
del view['dependent_funcs']
if hasattr(self, 'triggers'):
for key in list(self.triggers.values()):
view['triggers'].update(self.triggers[key.name].to_map(db))
return view
@commentable
@grantable
@ownable
def create(self, newdefn=None):
"""Return SQL statements to CREATE the view
:return: SQL statements
"""
defn = newdefn or self.definition
if defn[-1:] == ';':
defn = defn[:-1]
return ["CREATE%s VIEW %s AS\n %s" % (
newdefn and " OR REPLACE" or '', self.qualname(), defn)]
def alter(self, inview):
"""Generate SQL to transform an existing view
:param inview: a YAML map defining the new view
:return: list of SQL statements
Compares the view to an input view and generates SQL
statements to transform it into the one represented by the
input.
"""
stmts = []
if self.definition != inview.definition:
stmts.append(self.create(inview.definition))
stmts.append(super(View, self).alter(inview))
return stmts
class MaterializedView(View):
"""A materialized view definition
A materialized view is identified by its schema name and view name.
"""
@property
def objtype(self):
return "MATERIALIZED VIEW"
def to_map(self, db, opts):
"""Convert a materialized view to a YAML-suitable format
:param opts: options to include/exclude tables, etc.
:return: dictionary
"""
if hasattr(opts, 'excl_tables') and opts.excl_tables \
and self.name in opts.excl_tables:
return None
mvw = self._base_map(db, opts.no_owner, opts.no_privs)
if hasattr(self, 'indexes'):
if 'indexes' not in mvw:
mvw['indexes'] = {}
for k in list(self.indexes.values()):
mvw['indexes'].update(self.indexes[k.name].to_map(db))
return mvw
@commentable
@grantable
@ownable
def create(self, newdefn=None):
"""Return SQL statements to CREATE the materialized view
:return: SQL statements
"""
defn = newdefn or self.definition
if defn[-1:] == ';':
defn = defn[:-1]
return ["CREATE %s %s AS\n %s" % (
self.objtype, self.qualname(), defn)]
QUERY_PRE91 = \
"""SELECT c.oid,
nspname AS schema, relname AS name, relkind AS kind,
reloptions AS options, spcname AS tablespace,
rolname AS owner, array_to_string(relacl, ',') AS privileges,
CASE WHEN relkind = 'v' THEN pg_get_viewdef(c.oid, TRUE)
ELSE '' END AS definition,
obj_description(c.oid, 'pg_class') AS description
FROM pg_class c
JOIN pg_roles r ON (r.oid = relowner)
JOIN pg_namespace ON (relnamespace = pg_namespace.oid)
LEFT JOIN pg_tablespace t ON (reltablespace = t.oid)
WHERE relkind in ('r', 'S', 'v')
AND (nspname != 'pg_catalog'
AND nspname != 'information_schema')
ORDER BY nspname, relname"""
QUERY_PRE93 = \
"""SELECT c.oid,
nspname AS schema, relname AS name, relkind AS kind,
reloptions AS options, relpersistence AS persistence,
spcname AS tablespace, rolname AS owner,
array_to_string(relacl, ',') AS privileges,
CASE WHEN relkind = 'v' THEN pg_get_viewdef(c.oid, TRUE)
ELSE '' END AS definition,
obj_description(c.oid, 'pg_class') AS description
FROM pg_class c
JOIN pg_roles r ON (r.oid = relowner)
JOIN pg_namespace ON (relnamespace = pg_namespace.oid)
LEFT JOIN pg_tablespace t ON (reltablespace = t.oid)
WHERE relkind in ('r', 'S', 'v')
AND relpersistence != 't'
AND (nspname != 'pg_catalog'
AND nspname != 'information_schema')
ORDER BY nspname, relname"""
OBJTYPES = ['table', 'sequence', 'view', 'materialized view']
class ClassDict(DbObjectDict):
"The collection of tables and similar objects in a database"
cls = DbClass
query = \
"""SELECT c.oid,
nspname AS schema, relname AS name, relkind AS kind,
reloptions AS options, relpersistence AS persistence,
spcname AS tablespace, rolname AS owner,
array_to_string(relacl, ',') AS privileges,
CASE WHEN relkind ~ '[vm]' THEN pg_get_viewdef(c.oid, TRUE)
ELSE '' END AS definition,
CASE WHEN relkind = 'm' THEN relispopulated
ELSE FALSE END AS with_data,
obj_description(c.oid, 'pg_class') AS description
FROM pg_class c
JOIN pg_roles r ON (r.oid = relowner)
JOIN pg_namespace ON (relnamespace = pg_namespace.oid)
LEFT JOIN pg_tablespace t ON (reltablespace = t.oid)
WHERE relkind in ('r', 'S', 'v', 'm')
AND relpersistence != 't'
AND (nspname != 'pg_catalog'
AND nspname != 'information_schema')
ORDER BY nspname, relname"""
inhquery = \
"""SELECT inhrelid::regclass AS sub, inhparent::regclass AS parent,
inhseqno
FROM pg_inherits
ORDER BY 1, 3"""
def _from_catalog(self):
"""Initialize the dictionary of tables by querying the catalogs"""
if self.dbconn.version < 90100:
self.query = QUERY_PRE91
elif self.dbconn.version < 90300:
self.query = QUERY_PRE93
for table in self.fetch():
oid = table.oid
sch, tbl = table.key()
if hasattr(table, 'persistence'):
if table.persistence == 'u':
table.unlogged = True
del table.persistence
kind = table.kind
del table.kind
if kind == 'r':
self.by_oid[oid] = self[sch, tbl] = Table(**table.__dict__)
elif kind == 'S':
self.by_oid[oid] = self[sch, tbl] = inst \
= Sequence(**table.__dict__)
inst.get_attrs(self.dbconn)
inst.get_dependent_table(self.dbconn)
elif kind == 'v':
self.by_oid[oid] = self[sch, tbl] = View(**table.__dict__)
elif kind == 'm':
self.by_oid[oid] = self[sch, tbl] \
= MaterializedView(**table.__dict__)
inhtbls = self.dbconn.fetchall(self.inhquery)
self.dbconn.rollback()
for (tbl, partbl, num) in inhtbls:
(sch, tbl) = split_schema_obj(tbl)
table = self[(sch, tbl)]
if not hasattr(table, 'inherits'):
table.inherits = []
table.inherits.append(partbl)
def from_map(self, schema, inobjs, newdb):
"""Initalize the dictionary of tables by converting the input map
:param schema: schema owning the tables
:param inobjs: YAML map defining the schema objects
:param newdb: collection of dictionaries defining the database
"""
for k in inobjs:
inobj = inobjs[k]
objtype = None
for typ in OBJTYPES:
if k.startswith(typ):
objtype = typ
key = k[len(typ) + 1:]
if objtype is None:
raise KeyError("Unrecognized object type: %s" % k)
if objtype == 'table':
self[(schema.name, key)] = table = Table(
schema=schema.name, name=key)
intable = inobj
if not intable:
raise ValueError("Table '%s' has no specification" % k)
for attr in ['inherits', 'owner', 'tablespace', 'oldname',
'description', 'options', 'unlogged']:
if attr in intable:
setattr(table, attr, intable[attr])
try:
newdb.columns.from_map(table, intable['columns'])
except KeyError as exc:
exc.args = ("Table '%s' has no columns" % key, )
raise
newdb.constraints.from_map(table, intable, rtables=inobjs)
if 'indexes' in intable:
newdb.indexes.from_map(table, intable['indexes'])
if 'rules' in intable:
newdb.rules.from_map(table, intable['rules'])
if 'triggers' in intable:
newdb.triggers.from_map(table, intable['triggers'])
elif objtype == 'sequence':
self[(schema.name, key)] = seq = Sequence(
schema=schema.name, name=key)
inseq = inobj
if not inseq:
raise ValueError("Sequence '%s' has no specification" % k)
for attr, val in list(inseq.items()):
setattr(seq, attr, val)
elif objtype == 'view':
self[(schema.name, key)] = view = View(
schema=schema.name, name=key)
inview = inobj
if not inview:
raise ValueError("View '%s' has no specification" % k)
for attr, val in list(inview.items()):
setattr(view, attr, val)
if 'triggers' in inview:
newdb.triggers.from_map(view, inview['triggers'])
elif objtype == 'materialized view':
self[(schema.name, key)] = mview = MaterializedView(
schema=schema.name, name=key)
inmview = inobj
if not inmview:
raise ValueError("View '%s' has no specification" % k)
if 'indexes' in inmview:
newdb.indexes.from_map(mview, inmview['indexes'])
for attr, val in list(inmview.items()):
setattr(mview, attr, val)
else:
raise KeyError("Unrecognized object type: %s" % k)
obj = self[(schema.name, key)]
if 'privileges' in inobj:
if obj.owner is None:
raise ValueError("%s '%s' has privileges but no "
"owner information" %
obj.objtype.capital(), table.name)
obj.privileges = privileges_from_map(
inobj['privileges'], obj.allprivs, obj.owner)
if 'depends_on' in inobj:
obj.depends_on.extend(inobj['depends_on'])
def find(self, obj, schema=None):
"""Find a table given its name.
The name can contain array type modifiers such as '[]'
Return None if not found.
"""
sch, name = split_schema_obj(obj, schema)
name = name.rstrip('[]')
return self.get((sch, name))
def link_refs(self, dbcolumns, dbconstrs, dbindexes, dbrules, dbtriggers):
"""Connect columns, constraints, etc. to their respective tables
:param dbcolumns: dictionary of columns
:param dbconstrs: dictionary of constraints
:param dbindexes: dictionary of indexes
:param dbrules: dictionary of rules
:param dbtriggers: dictionary of triggers
Links each list of table columns in `dbcolumns` to the
corresponding table. Fills the `foreign_keys`,
`unique_constraints`, `indexes` and `triggers` dictionaries
for each table by traversing the `dbconstrs`, `dbindexes` and
`dbtriggers` dictionaries, which are keyed by schema, table
and constraint, index or trigger name.
"""
for (sch, tbl) in dbcolumns:
if (sch, tbl) in self:
assert isinstance(self[(sch, tbl)], Table)
self[(sch, tbl)].columns = dbcolumns[(sch, tbl)]
for col in dbcolumns[(sch, tbl)]:
col._table = self[(sch, tbl)]
for (sch, tbl) in self:
table = self[(sch, tbl)]
if isinstance(table, Sequence) and hasattr(table, 'owner_table'):
if isinstance(table.owner_column, int):
table.owner_column = self[(sch, table.owner_table)]. \
column_names()[table.owner_column - 1]
elif isinstance(table, Table) and hasattr(table, 'inherits'):
for partbl in table.inherits:
(parsch, partbl) = split_schema_obj(partbl)
assert self[(parsch, partbl)]
parent = self[(parsch, partbl)]
if not hasattr(parent, '_descendants'):
parent._descendants = []
parent._descendants.append(table)
for (sch, tbl, cns) in dbconstrs:
constr = dbconstrs[(sch, tbl, cns)]
if hasattr(constr, 'target'):
continue
assert self[(sch, tbl)]
constr._table = table = self[(sch, tbl)]
if isinstance(constr, CheckConstraint):
if not hasattr(table, 'check_constraints'):
table.check_constraints = {}
table.check_constraints.update({cns: constr})
elif isinstance(constr, PrimaryKey):
table.primary_key = constr
elif isinstance(constr, ForeignKey):
if not hasattr(table, 'foreign_keys'):
table.foreign_keys = {}
# link referenced and referrer
constr.references = self[(constr.ref_schema, constr.ref_table)]
# TODO: there can be more than one
self[(constr.ref_schema, constr.ref_table)]._referred_by = \
constr
table.foreign_keys.update({cns: constr})
elif isinstance(constr, UniqueConstraint):
if not hasattr(table, 'unique_constraints'):
table.unique_constraints = {}
table.unique_constraints.update({cns: constr})
def link_one(targdict, schema, tbl, objkey, objtype):
table = self[(schema, tbl)]
if not hasattr(table, objtype):
setattr(table, objtype, {})
objdict = getattr(table, objtype)
objdict.update({objkey: targdict[(schema, tbl, objkey)]})
for (sch, tbl, idx) in dbindexes:
link_one(dbindexes, sch, tbl, idx, 'indexes')
for (sch, tbl, rul) in dbrules:
link_one(dbrules, sch, tbl, rul, 'rules')
dbrules[(sch, tbl, rul)]._table = self[(sch, tbl)]
for (sch, tbl, trg) in dbtriggers:
link_one(dbtriggers, sch, tbl, trg, 'triggers')
dbtriggers[(sch, tbl, trg)]._table = self[(sch, tbl)]
| bsd-3-clause |
pyconca/2017-web | config/settings/common.py | 1 | 9927 | # -*- coding: utf-8 -*-
"""
Django settings for PyCon Canada 2017 project.
For more information on this file, see
https://docs.djangoproject.com/en/dev/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/dev/ref/settings/
"""
from __future__ import absolute_import, unicode_literals
import environ
ROOT_DIR = environ.Path(__file__) - 3
APPS_DIR = ROOT_DIR.path('pyconca2017')
env = environ.Env()
try:
env.read_env(ROOT_DIR.file('.env'))
except FileNotFoundError:
pass
# APP CONFIGURATION
# ------------------------------------------------------------------------------
DJANGO_APPS = (
# Default Django apps:
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
# Useful template tags:
# 'django.contrib.humanize',
# Admin
'django.contrib.admin',
)
THIRD_PARTY_APPS = (
'crispy_forms', # Form layouts
'allauth', # registration
'allauth.account', # registration
'allauth.socialaccount', # registration
'allauth.socialaccount.providers.github',
"bootstrapform",
"django_bleach",
"easy_thumbnails",
"django_extensions",
)
# Apps specific for this project go here.
LOCAL_APPS = (
'pyconca2017.users.apps.UsersConfig',
'pyconca2017.pycon_sponsors',
'pyconca2017.pycon_schedule',
)
# See: https://docs.djangoproject.com/en/dev/ref/settings/#installed-apps
INSTALLED_APPS = DJANGO_APPS + THIRD_PARTY_APPS + LOCAL_APPS
# MIDDLEWARE CONFIGURATION
# ------------------------------------------------------------------------------
MIDDLEWARE = (
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.locale.LocaleMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
# MIGRATIONS CONFIGURATION
# ------------------------------------------------------------------------------
MIGRATION_MODULES = {
'sites': 'pyconca2017.contrib.sites.migrations'
}
# DEBUG
# ------------------------------------------------------------------------------
# See: https://docs.djangoproject.com/en/dev/ref/settings/#debug
DEBUG = env.bool('DJANGO_DEBUG', False)
# FIXTURE CONFIGURATION
# ------------------------------------------------------------------------------
# See: https://docs.djangoproject.com/en/dev/ref/settings/#std:setting-FIXTURE_DIRS
FIXTURE_DIRS = (
str(APPS_DIR.path('fixtures')),
)
# EMAIL CONFIGURATION
# ------------------------------------------------------------------------------
EMAIL_BACKEND = env('DJANGO_EMAIL_BACKEND', default='django.core.mail.backends.smtp.EmailBackend')
# MANAGER CONFIGURATION
# ------------------------------------------------------------------------------
# See: https://docs.djangoproject.com/en/dev/ref/settings/#admins
ADMINS = (
("""PyCon Canada Organizers""", 'organizers@pycon.ca'),
)
# See: https://docs.djangoproject.com/en/dev/ref/settings/#managers
MANAGERS = ADMINS
# DATABASE CONFIGURATION
# ------------------------------------------------------------------------------
# See: https://docs.djangoproject.com/en/dev/ref/settings/#databases
DATABASES = {
'default': env.db('DATABASE_URL', default='postgres:///pyconca2017'),
}
DATABASES['default']['ATOMIC_REQUESTS'] = True
# GENERAL CONFIGURATION
# ------------------------------------------------------------------------------
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# In a Windows environment this must be set to your system time zone.
TIME_ZONE = 'America/Montreal'
# See: https://docs.djangoproject.com/en/dev/ref/settings/#language-code
LANGUAGE_CODE = 'en-us'
# See: https://docs.djangoproject.com/en/dev/ref/settings/#site-id
SITE_ID = 1
# See: https://docs.djangoproject.com/en/dev/ref/settings/#use-i18n
USE_I18N = True
from django.utils.translation import ugettext_lazy as _ # NOQA
LANGUAGES = [
('en', _('English')),
('fr', _('French')),
]
LOCALE_PATHS = [
str(APPS_DIR.path('locale')),
]
# See: https://docs.djangoproject.com/en/dev/ref/settings/#use-l10n
USE_L10N = True
# See: https://docs.djangoproject.com/en/dev/ref/settings/#use-tz
USE_TZ = True
# TEMPLATE CONFIGURATION
# ------------------------------------------------------------------------------
# See: https://docs.djangoproject.com/en/dev/ref/settings/#templates
TEMPLATES = [
{
# See: https://docs.djangoproject.com/en/dev/ref/settings/#std:setting-TEMPLATES-BACKEND
'BACKEND': 'django.template.backends.django.DjangoTemplates',
# See: https://docs.djangoproject.com/en/dev/ref/settings/#template-dirs
'DIRS': [
str(APPS_DIR.path('templates')),
],
'OPTIONS': {
# See: https://docs.djangoproject.com/en/dev/ref/settings/#template-debug
'debug': DEBUG,
# See: https://docs.djangoproject.com/en/dev/ref/settings/#template-loaders
# https://docs.djangoproject.com/en/dev/ref/templates/api/#loader-types
'loaders': [
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
],
# See: https://docs.djangoproject.com/en/dev/ref/settings/#template-context-processors
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.template.context_processors.i18n',
'django.template.context_processors.media',
'django.template.context_processors.static',
'django.template.context_processors.tz',
'django.contrib.messages.context_processors.messages',
'pyconca2017.context_processors.conference_context',
# Your stuff: custom template context processors go here
],
},
},
]
# See: http://django-crispy-forms.readthedocs.io/en/latest/install.html#template-packs
CRISPY_TEMPLATE_PACK = 'bootstrap4'
# STATIC FILE CONFIGURATION
# ------------------------------------------------------------------------------
# See: https://docs.djangoproject.com/en/dev/ref/settings/#static-root
STATIC_ROOT = str(ROOT_DIR('staticfiles'))
# See: https://docs.djangoproject.com/en/dev/ref/settings/#static-url
STATIC_URL = '/static/'
# See: https://docs.djangoproject.com/en/dev/ref/contrib/staticfiles/#std:setting-STATICFILES_DIRS
STATICFILES_DIRS = (
str(APPS_DIR.path('static')),
)
# See: https://docs.djangoproject.com/en/dev/ref/contrib/staticfiles/#staticfiles-finders
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
)
# MEDIA CONFIGURATION
# ------------------------------------------------------------------------------
# See: https://docs.djangoproject.com/en/dev/ref/settings/#media-root
MEDIA_ROOT = str(APPS_DIR('media'))
# See: https://docs.djangoproject.com/en/dev/ref/settings/#media-url
MEDIA_URL = '/media/'
# URL Configuration
# ------------------------------------------------------------------------------
ROOT_URLCONF = 'config.urls'
# See: https://docs.djangoproject.com/en/dev/ref/settings/#wsgi-application
WSGI_APPLICATION = 'config.wsgi.application'
# PASSWORD VALIDATION
# https://docs.djangoproject.com/en/dev/ref/settings/#auth-password-validators
# ------------------------------------------------------------------------------
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# AUTHENTICATION CONFIGURATION
# ------------------------------------------------------------------------------
AUTHENTICATION_BACKENDS = (
'django.contrib.auth.backends.ModelBackend',
'allauth.account.auth_backends.AuthenticationBackend',
)
# Some really nice defaults
ACCOUNT_AUTHENTICATION_METHOD = 'username'
ACCOUNT_EMAIL_REQUIRED = True
ACCOUNT_EMAIL_VERIFICATION = 'mandatory'
ACCOUNT_ALLOW_REGISTRATION = env.bool('DJANGO_ACCOUNT_ALLOW_REGISTRATION', True)
ACCOUNT_ADAPTER = 'pyconca2017.users.adapters.AccountAdapter'
SOCIALACCOUNT_ADAPTER = 'pyconca2017.users.adapters.SocialAccountAdapter'
SOCIALACCOUNT_PROVIDERS = {
'github': {
'SCOPE': [
'user',
'repo',
'read:org',
],
}
}
# Custom user app defaults
# Select the correct user model
AUTH_USER_MODEL = 'users.User'
LOGIN_REDIRECT_URL = 'users:redirect'
LOGIN_URL = 'account_login'
# SLUGLIFIER
AUTOSLUG_SLUGIFY_FUNCTION = 'slugify.slugify'
# django-compressor
# ------------------------------------------------------------------------------
INSTALLED_APPS += ("compressor", )
STATICFILES_FINDERS += ("compressor.finders.CompressorFinder", )
# Location of root django.contrib.admin URL, use {% url 'admin:index' %}
ADMIN_URL = r'^admin/'
# Your common stuff: Below this line define 3rd party library settings
# ------------------------------------------------------------------------------
PAPERCALL_TOKEN = env('PAPERCALL_TOKEN', default='')
| mit |
simonwydooghe/ansible | test/units/modules/network/check_point/test_cp_mgmt_service_dce_rpc.py | 19 | 4165 | # Ansible module to manage CheckPoint Firewall (c) 2019
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
from __future__ import absolute_import, division, print_function
__metaclass__ = type
import pytest
from units.modules.utils import set_module_args, exit_json, fail_json, AnsibleExitJson
from ansible.module_utils import basic
from ansible.modules.network.check_point import cp_mgmt_service_dce_rpc
OBJECT = {
"name": "New_DCE-RPC_Service_1",
"interface_uuid": "97aeb460-9aea-11d5-bd16-0090272ccb30",
"keep_connections_open_after_policy_installation": False
}
CREATE_PAYLOAD = {
"name": "New_DCE-RPC_Service_1",
"interface_uuid": "97aeb460-9aea-11d5-bd16-0090272ccb30",
"keep_connections_open_after_policy_installation": False
}
UPDATE_PAYLOAD = {
"name": "New_DCE-RPC_Service_1",
"color": "blue",
"interface_uuid": "44aeb460-9aea-11d5-bd16-009027266b30"
}
OBJECT_AFTER_UPDATE = UPDATE_PAYLOAD
DELETE_PAYLOAD = {
"name": "New_DCE-RPC_Service_1",
"state": "absent"
}
function_path = 'ansible.modules.network.check_point.cp_mgmt_service_dce_rpc.api_call'
api_call_object = 'service-dce-rpc'
class TestCheckpointServiceDceRpc(object):
module = cp_mgmt_service_dce_rpc
@pytest.fixture(autouse=True)
def module_mock(self, mocker):
return mocker.patch.multiple(basic.AnsibleModule, exit_json=exit_json, fail_json=fail_json)
@pytest.fixture
def connection_mock(self, mocker):
connection_class_mock = mocker.patch('ansible.module_utils.network.checkpoint.checkpoint.Connection')
return connection_class_mock.return_value
def test_create(self, mocker, connection_mock):
mock_function = mocker.patch(function_path)
mock_function.return_value = {'changed': True, api_call_object: OBJECT}
result = self._run_module(CREATE_PAYLOAD)
assert result['changed']
assert OBJECT.items() == result[api_call_object].items()
def test_create_idempotent(self, mocker, connection_mock):
mock_function = mocker.patch(function_path)
mock_function.return_value = {'changed': False, api_call_object: OBJECT}
result = self._run_module(CREATE_PAYLOAD)
assert not result['changed']
def test_update(self, mocker, connection_mock):
mock_function = mocker.patch(function_path)
mock_function.return_value = {'changed': True, api_call_object: OBJECT_AFTER_UPDATE}
result = self._run_module(UPDATE_PAYLOAD)
assert result['changed']
assert OBJECT_AFTER_UPDATE.items() == result[api_call_object].items()
def test_update_idempotent(self, mocker, connection_mock):
mock_function = mocker.patch(function_path)
mock_function.return_value = {'changed': False, api_call_object: OBJECT_AFTER_UPDATE}
result = self._run_module(UPDATE_PAYLOAD)
assert not result['changed']
def test_delete(self, mocker, connection_mock):
mock_function = mocker.patch(function_path)
mock_function.return_value = {'changed': True}
result = self._run_module(DELETE_PAYLOAD)
assert result['changed']
def test_delete_idempotent(self, mocker, connection_mock):
mock_function = mocker.patch(function_path)
mock_function.return_value = {'changed': False}
result = self._run_module(DELETE_PAYLOAD)
assert not result['changed']
def _run_module(self, module_args):
set_module_args(module_args)
with pytest.raises(AnsibleExitJson) as ex:
self.module.main()
return ex.value.args[0]
| gpl-3.0 |
JPFrancoia/scikit-learn | examples/ensemble/plot_forest_iris.py | 335 | 6271 | """
====================================================================
Plot the decision surfaces of ensembles of trees on the iris dataset
====================================================================
Plot the decision surfaces of forests of randomized trees trained on pairs of
features of the iris dataset.
This plot compares the decision surfaces learned by a decision tree classifier
(first column), by a random forest classifier (second column), by an extra-
trees classifier (third column) and by an AdaBoost classifier (fourth column).
In the first row, the classifiers are built using the sepal width and the sepal
length features only, on the second row using the petal length and sepal length
only, and on the third row using the petal width and the petal length only.
In descending order of quality, when trained (outside of this example) on all
4 features using 30 estimators and scored using 10 fold cross validation, we see::
ExtraTreesClassifier() # 0.95 score
RandomForestClassifier() # 0.94 score
AdaBoost(DecisionTree(max_depth=3)) # 0.94 score
DecisionTree(max_depth=None) # 0.94 score
Increasing `max_depth` for AdaBoost lowers the standard deviation of the scores (but
the average score does not improve).
See the console's output for further details about each model.
In this example you might try to:
1) vary the ``max_depth`` for the ``DecisionTreeClassifier`` and
``AdaBoostClassifier``, perhaps try ``max_depth=3`` for the
``DecisionTreeClassifier`` or ``max_depth=None`` for ``AdaBoostClassifier``
2) vary ``n_estimators``
It is worth noting that RandomForests and ExtraTrees can be fitted in parallel
on many cores as each tree is built independently of the others. AdaBoost's
samples are built sequentially and so do not use multiple cores.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import clone
from sklearn.datasets import load_iris
from sklearn.ensemble import (RandomForestClassifier, ExtraTreesClassifier,
AdaBoostClassifier)
from sklearn.externals.six.moves import xrange
from sklearn.tree import DecisionTreeClassifier
# Parameters
n_classes = 3
n_estimators = 30
plot_colors = "ryb"
cmap = plt.cm.RdYlBu
plot_step = 0.02 # fine step width for decision surface contours
plot_step_coarser = 0.5 # step widths for coarse classifier guesses
RANDOM_SEED = 13 # fix the seed on each iteration
# Load data
iris = load_iris()
plot_idx = 1
models = [DecisionTreeClassifier(max_depth=None),
RandomForestClassifier(n_estimators=n_estimators),
ExtraTreesClassifier(n_estimators=n_estimators),
AdaBoostClassifier(DecisionTreeClassifier(max_depth=3),
n_estimators=n_estimators)]
for pair in ([0, 1], [0, 2], [2, 3]):
for model in models:
# We only take the two corresponding features
X = iris.data[:, pair]
y = iris.target
# Shuffle
idx = np.arange(X.shape[0])
np.random.seed(RANDOM_SEED)
np.random.shuffle(idx)
X = X[idx]
y = y[idx]
# Standardize
mean = X.mean(axis=0)
std = X.std(axis=0)
X = (X - mean) / std
# Train
clf = clone(model)
clf = model.fit(X, y)
scores = clf.score(X, y)
# Create a title for each column and the console by using str() and
# slicing away useless parts of the string
model_title = str(type(model)).split(".")[-1][:-2][:-len("Classifier")]
model_details = model_title
if hasattr(model, "estimators_"):
model_details += " with {} estimators".format(len(model.estimators_))
print( model_details + " with features", pair, "has a score of", scores )
plt.subplot(3, 4, plot_idx)
if plot_idx <= len(models):
# Add a title at the top of each column
plt.title(model_title)
# Now plot the decision boundary using a fine mesh as input to a
# filled contour plot
x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, plot_step),
np.arange(y_min, y_max, plot_step))
# Plot either a single DecisionTreeClassifier or alpha blend the
# decision surfaces of the ensemble of classifiers
if isinstance(model, DecisionTreeClassifier):
Z = model.predict(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
cs = plt.contourf(xx, yy, Z, cmap=cmap)
else:
# Choose alpha blend level with respect to the number of estimators
# that are in use (noting that AdaBoost can use fewer estimators
# than its maximum if it achieves a good enough fit early on)
estimator_alpha = 1.0 / len(model.estimators_)
for tree in model.estimators_:
Z = tree.predict(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
cs = plt.contourf(xx, yy, Z, alpha=estimator_alpha, cmap=cmap)
# Build a coarser grid to plot a set of ensemble classifications
# to show how these are different to what we see in the decision
# surfaces. These points are regularly space and do not have a black outline
xx_coarser, yy_coarser = np.meshgrid(np.arange(x_min, x_max, plot_step_coarser),
np.arange(y_min, y_max, plot_step_coarser))
Z_points_coarser = model.predict(np.c_[xx_coarser.ravel(), yy_coarser.ravel()]).reshape(xx_coarser.shape)
cs_points = plt.scatter(xx_coarser, yy_coarser, s=15, c=Z_points_coarser, cmap=cmap, edgecolors="none")
# Plot the training points, these are clustered together and have a
# black outline
for i, c in zip(xrange(n_classes), plot_colors):
idx = np.where(y == i)
plt.scatter(X[idx, 0], X[idx, 1], c=c, label=iris.target_names[i],
cmap=cmap)
plot_idx += 1 # move on to the next plot in sequence
plt.suptitle("Classifiers on feature subsets of the Iris dataset")
plt.axis("tight")
plt.show()
| bsd-3-clause |
wang16/swtoolkit | site_scons/site_tools/code_signing.py | 21 | 5108 | #!/usr/bin/python2.4
# Copyright 2009, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Code signing build tool.
This module sets up code signing.
It is used as follows:
env = Environment(tools = ["code_signing"])
To sign an EXE/DLL do:
env.SignedBinary('hello_signed.exe', 'hello.exe',
CERTIFICATE_FILE='bob.pfx',
CERTIFICATE_PASSWORD='123',
TIMESTAMP_SERVER='')
If no certificate file is specified, copying instead of signing will occur.
If an empty timestamp server string is specified, there will be no timestamp.
"""
import optparse
from SCons.compat._scons_optparse import OptionConflictError
import SCons.Script
def generate(env):
# NOTE: SCons requires the use of this name, which fails gpylint.
"""SCons entry point for this tool."""
try:
SCons.Script.AddOption('--certificate-name',
dest='certificate_name',
help='select which certificate to use')
SCons.Script.Help(
' --certificate-name <NAME> select which signing certificate to use')
except (OptionConflictError, optparse.OptionConflictError):
# This gets catch to prevent duplicate help being added for this option
# for each build type.
pass
env.SetDefault(
# Path to Microsoft signtool.exe
SIGNTOOL='"$VC80_DIR/common7/tools/bin/signtool.exe"',
# No certificate by default.
CERTIFICATE_PATH='',
# No certificate password by default.
CERTIFICATE_PASSWORD='',
# The default timestamp server.
TIMESTAMP_SERVER='http://timestamp.verisign.com/scripts/timestamp.dll',
# The default certificate store.
CERTIFICATE_STORE='my',
# Set the certificate name from the command line.
CERTIFICATE_NAME=SCons.Script.GetOption('certificate_name'),
)
# Setup Builder for Signing
env['BUILDERS']['SignedBinary'] = SCons.Script.Builder(
generator=SignedBinaryGenerator,
emitter=SignedBinaryEmitter)
def SignedBinaryEmitter(target, source, env):
"""Add the signing certificate (if any) to the source dependencies."""
if env.subst('$CERTIFICATE_PATH'):
source.append(env.subst('$CERTIFICATE_PATH'))
return target, source
def SignedBinaryGenerator(source, target, env, for_signature):
"""A builder generator for code signing."""
source = source # Silence gpylint.
target = target # Silence gpylint.
for_signature = for_signature # Silence gpylint.
# Alway copy and make writable.
commands = [
SCons.Script.Copy('$TARGET', '$SOURCE'),
SCons.Script.Chmod('$TARGET', 0755),
]
# Only do signing if there is a certificate file or certificate name.
if env.subst('$CERTIFICATE_PATH') or env.subst('$CERTIFICATE_NAME'):
# The command used to do signing (target added on below).
signing_cmd = '$SIGNTOOL sign '
# Add in certificate file if any.
if env.subst('$CERTIFICATE_PATH'):
signing_cmd += ' /f "$CERTIFICATE_PATH"'
# Add certificate password if any.
if env.subst('$CERTIFICATE_PASSWORD'):
signing_cmd += ' /p "$CERTIFICATE_PASSWORD"'
# Add certificate store if any.
if env.subst('$CERTIFICATE_NAME'):
# The command used to do signing (target added on below).
signing_cmd += ' /s "$CERTIFICATE_STORE" /n "$CERTIFICATE_NAME"'
# Add timestamp server if any.
if env.subst('$TIMESTAMP_SERVER'):
signing_cmd += ' /t "$TIMESTAMP_SERVER"'
# Add in target name
signing_cmd += ' "$TARGET"'
# Add the signing to the list of commands to perform.
commands.append(signing_cmd)
return commands
| bsd-3-clause |
dubourg/openturns | python/test/t_ZipfMandelbrot_std.py | 2 | 2964 | #! /usr/bin/env python
from __future__ import print_function
from openturns import *
TESTPREAMBLE()
RandomGenerator.SetSeed(0)
try:
# Instanciate one distribution object
distribution = ZipfMandelbrot(15, 1.2, 2.)
print("Distribution ", repr(distribution))
print("Distribution ", distribution)
# Is this distribution elliptical ?
print("Elliptical = ", distribution.isElliptical())
# Is this distribution continuous ?
print("Continuous = ", distribution.isContinuous())
# Test for realization of distribution
oneRealization = distribution.getRealization()
print("oneRealization=", repr(oneRealization))
# Test for getRange
print("getRange=", repr(distribution.getRange()))
# Test for sampling
size = 1000
oneSample = distribution.getSample(size)
print("oneSample first=", repr(oneSample[0]), " last=", repr(oneSample[1]))
print("mean=", repr(oneSample.computeMean()))
print("covariance=", repr(oneSample.computeCovariance()))
## size = 100;
# for i in range(2):
## msg = ''
# if FittingTest::ChiSquared(distribution.getSample(size), distribution).getBinaryQualityMeasure():
## msg = "accepted"
# else:
## msg = "rejected"
# print "ChiSquare test for the generator, sample size=", size, " is", msg
## size *= 10;
# Define a point
point = NumericalPoint(distribution.getDimension(), 5.0)
print("Point= ", repr(point))
# Show PDF and CDF of point
eps = 1e-5
# PDF value
PDF = distribution.computePDF(point)
print("pdf =%.6f" % PDF)
# by the finite difference technique from CDF
print("pdf (FD)=%.6f" % (distribution.computeCDF(
point + NumericalPoint(1, 0)) - distribution.computeCDF(point + NumericalPoint(1, -1))))
# derivative of the PDF with regards the parameters of the distribution
CDF = distribution.computeCDF(point)
print("cdf=%.6f" % CDF)
# quantile
quantile = distribution.computeQuantile(0.95)
print("quantile=", repr(quantile))
print("cdf(quantile)=%.6f" % distribution.computeCDF(quantile))
mean = distribution.getMean()
print("mean=", repr(mean))
standardDeviation = distribution.getStandardDeviation()
print("standard deviation=", repr(standardDeviation))
skewness = distribution.getSkewness()
print("skewness=", repr(skewness))
kurtosis = distribution.getKurtosis()
print("kurtosis=", repr(kurtosis))
covariance = distribution.getCovariance()
print("covariance=", repr(covariance))
parameters = distribution.getParametersCollection()
print("parameters=", repr(parameters))
for i in range(6):
print("standard moment n=", i, " value=",
distribution.getStandardMoment(i))
print("Standard representative=", distribution.getStandardRepresentative())
except:
import sys
print("t_ZipfMandelbrot_std.py", sys.exc_info()[0], sys.exc_info()[1])
| gpl-3.0 |
hectord/lettuce | tests/functional/test_terrain.py | 18 | 2374 | # -*- coding: utf-8 -*-
# <Lettuce - Behaviour Driven Development for python>
# Copyright (C) <2010-2012> Gabriel Falcão <gabriel@nacaolivre.org>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import os
import commands
from os.path import dirname, abspath, join, curdir
from nose.tools import assert_equals, with_setup
from tests.asserts import prepare_stdout
def test_imports_terrain_under_path_that_is_run():
old_path = abspath(curdir)
os.chdir(join(abspath(dirname(__file__)), 'simple_features', '1st_feature_dir'))
status, output = commands.getstatusoutput('python -c "from lettuce import world;assert hasattr(world, \'works_fine\'); print \'it passed!\'"')
assert_equals(status, 0)
assert_equals(output, "it passed!")
os.chdir(old_path)
@with_setup(prepare_stdout)
def test_after_each_all_is_executed_before_each_all():
"terrain.before.each_all and terrain.after.each_all decorators"
from lettuce import step
from lettuce import Runner
from lettuce.terrain import before, after, world
world.all_steps = []
@before.all
def set_state_to_before():
world.all_steps.append('before')
@step('append 1 in world all steps')
def append_1_in_world_all_steps(step):
world.all_steps.append("1")
@step('append 2 more')
def append_2_more(step):
world.all_steps.append("2")
@step('append 3 in world all steps')
def append_during_to_all_steps(step):
world.all_steps.append("3")
@after.all
def set_state_to_after(total):
world.all_steps.append('after')
runner = Runner(join(abspath(dirname(__file__)), 'simple_features', '2nd_feature_dir'))
runner.run()
assert_equals(
world.all_steps,
['before', '1', '2', '3', 'after']
)
| gpl-3.0 |
hsaputra/tensorflow | tensorflow/python/debug/wrappers/framework.py | 9 | 34269 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Framework of debug wrapper sessions.
A debug wrapper session is a wrapper around a TensorFlow Python Session.
The wrapper preserves the Session interface, most importantly the run() method,
while providing abilities to:
a) Intercept a run() call to a wrapped session and insert debug tensor watches
according to externally-specified debug URLs.
b) Release control to an external (i.e., non-Session) object before and after
the run() call, so that the external object can perform actions such as
launching a UI to let users inspect the intermediate tensors and partition
graphs from the run() call.
c) (To be implemented) Intercept a run() call and give control to DebugStepper
to let it perform stepping / continuing-to actions on the graph.
b) (To be implemented in a future CL) Enter an instruction loop to let an
external object (e.g., remote client) launch run() and cont() calls
remotely.
*** The lifetime of a debug wrapper session: ***
1) The wrapper session is created by calling the constructor with a
wrapped (normal) session as the argument:
wrapper = FooDebugWrapperSession(sess)
wherein FooDebugWrapperSession is a concrete subclass implementing the
abstract BaseDebugWrapperSession class below.
2) Near the end of the constructor call, the on_session_init() callback is
invoked, with a OnSessionInitRequest object as the argument. The object
carries the wrapped (normal) session object.
3) The callback handles the request and returns a OnSessionInitResponse
object with an action field, directing the wrapper session what to do next.
If the action field in the OnSessionInitResponse is PROCEED, the constuctor
returns. Control is released back to the caller of the constructor, which can
invoke run() method of wrapper session with the same syntax as a non-wrapped
session, e.g.,:
wrapper.run(fetches, feed_dict=feeds, options=run_options)
Below, A1 - A2 is the lifetime of a wrapper run() call if the action is
PROCEED:
A1) Right at the start of each run() call, the on_run_start() callback is
invoked, with an OnRunStartRequest object carrying information such as
the fetches, the feed dict, the run options and run metadata used in
this run call, along with a count of how many run calls has occurred
on this wrapper session. The callback then returns an OnRunStartResponse
object, of which the action field directs what the wrapper session
actually will do of the run() call.
If the action is DEBUG_RUN, a debugged (tensor-watched) run will ensue,
with the debug URLs supplied in the debug_urls field of the response.
These can be file:// or grpc:// URLs, for example.
If the action is NON_DEBUG_RUN, a non-debug (normal) run will ensue.
If the action is INVOKE_STEPPER, no run() call will be issued to the
wrapped session. But instead, a DebugStepper (i.e., "continuation
debugger") will be used to perform stepping / continue-to actions on
the graph.
TODO(cais): The event loop for the DebugStepper will request additional
callbacks including on_cont_start() and on_cont_end(). Add those.
A2) Right before the run() returns, the on_run_end() callback is invoked,
with an OnRunEndRequest object as the argument, which carries information
including the actual action performed in the warpper run() call and the
run_metadata from the run() call.
However, if the action field in OnSessionInitResponse is
REMOTE_INSTR_LOOP, the constructor will automatically invoke an instruction loop
that gives the control to a remote caller.
In the remote instruction loop, the following steps will happen:
B1) Callback on_instr_start() is invoked. The callback will return an
OnInstrStartResponse object with an action field which can order one of
the following actions:
i) a run() call with fetches, feeds and debug_urls specified.
ii) a DebugStepper cont() call with target specified.
iii) value overrides in the cached tensors from the DebugStepper.
iv) exit the instruction loop.
B2) The wrapper session carries out the action specified above.
B3) If still in the instruction loop, the wrapper session invokes the
on_instr_end() callback. After the on_instr_end() callback returns, jump
back to B1.
TODO(cais): Implemented the instruction loop in B1 - B3.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import abc
import re
import threading
from tensorflow.core.protobuf import config_pb2
from tensorflow.python.client import session
from tensorflow.python.debug.lib import debug_utils
from tensorflow.python.debug.lib import stepper
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.training import monitored_session
# Helper function.
def _check_type(obj, expected_types):
"""Check if an object is of the expected type.
Args:
obj: The object being checked.
expected_types: (`type` or an iterable of `type`s) The expected `type`(s)
of obj.
Raises:
TypeError: If obj is not an instance of expected_type.
"""
if not isinstance(obj, expected_types):
raise TypeError("Expected type %s; got type %s" %
(expected_types, type(obj)))
class OnSessionInitRequest(object):
"""Request to an on-session-init callback.
This callback is invoked during the __init__ call to a debug-wrapper session.
"""
def __init__(self, sess):
"""Constructor.
Args:
sess: A tensorflow Session object.
"""
_check_type(sess, (session.BaseSession, monitored_session.MonitoredSession))
self.session = sess
class OnSessionInitAction(object):
"""Enum-like values for possible action to take on session init."""
# Proceed, without special actions, in the wrapper session initialization.
# What action the wrapper session performs next is determined by the caller
# of the wrapper session. E.g., it can call run().
PROCEED = "proceed"
# Instead of letting the caller of the wrapper session determine what actions
# the wrapper session will perform next, enter a loop to receive instructions
# from a remote client.
# For example, TensorBoard visual debugger can use this action so that it can
# launch session.run() calls remotely.
REMOTE_INSTR_LOOP = "remote_instr_loop"
class OnSessionInitResponse(object):
"""Response from an on-session-init callback."""
def __init__(self, action):
"""Constructor.
Args:
action: (`OnSessionInitAction`) Debugger action to take on session init.
"""
_check_type(action, str)
self.action = action
class OnRunStartRequest(object):
"""Request to an on-run-start callback.
This callback is invoked during a run() call of the debug-wrapper
session, immediately after the run() call counter is incremented.
"""
def __init__(self, fetches, feed_dict, run_options, run_metadata,
run_call_count, is_callable_runner=False):
"""Constructor of `OnRunStartRequest`.
Args:
fetches: Fetch targets of the run() call.
feed_dict: The feed dictionary to the run() call.
run_options: RunOptions input to the run() call.
run_metadata: RunMetadata input to the run() call.
The above four arguments are identical to the input arguments to the
run() method of a non-wrapped TensorFlow session.
run_call_count: 1-based count of how many run calls (including this one)
has been invoked.
is_callable_runner: (bool) whether a runner returned by
Session.make_callable is being run.
"""
self.fetches = fetches
self.feed_dict = feed_dict
self.run_options = run_options
self.run_metadata = run_metadata
self.run_call_count = run_call_count
self.is_callable_runner = is_callable_runner
class OnRunStartAction(object):
"""Enum-like values for possible action to take on start of a run() call."""
# Run once with debug tensor-watching.
DEBUG_RUN = "debug_run"
# Run once with profiler.
PROFILE_RUN = "profile_run"
# Run without debug tensor-watching.
NON_DEBUG_RUN = "non_debug_run"
# Instead of running the fetches as a whole, as would normally happen, invoke
# the (to-be-implemented) debug stepper.
# TODO(cais): Remove "to-be-implemented".
INVOKE_STEPPER = "invoke_stepper"
class OnRunStartResponse(object):
"""Request from an on-run-start callback.
The caller of the callback can use this response object to specify what
action the debug-wrapper session actually takes on the run() call.
"""
def __init__(self,
action,
debug_urls,
debug_ops="DebugIdentity",
node_name_regex_whitelist=None,
op_type_regex_whitelist=None,
tensor_dtype_regex_whitelist=None,
tolerate_debug_op_creation_failures=False):
"""Constructor of `OnRunStartResponse`.
Args:
action: (`OnRunStartAction`) the action actually taken by the wrapped
session for the run() call.
debug_urls: (`list` of `str`) debug_urls used in watching the tensors
during the run() call.
debug_ops: (`str` or `list` of `str`) Debug op(s) to be used by the
debugger.
node_name_regex_whitelist: Regular-expression whitelist for node
name.
op_type_regex_whitelist: Regular-expression whitelist for op type.
tensor_dtype_regex_whitelist: Regular-expression whitelist for tensor
dtype.
tolerate_debug_op_creation_failures: Whether debug op creation failures
are to be tolerated.
"""
_check_type(action, str)
self.action = action
_check_type(debug_urls, list)
self.debug_urls = debug_urls
self.debug_ops = debug_ops
self.node_name_regex_whitelist = node_name_regex_whitelist
self.op_type_regex_whitelist = op_type_regex_whitelist
self.tensor_dtype_regex_whitelist = tensor_dtype_regex_whitelist
self.tolerate_debug_op_creation_failures = (
tolerate_debug_op_creation_failures)
class OnRunEndRequest(object):
"""Request to an on-run-end callback.
The callback is invoked immediately before the wrapped run() call ends.
"""
def __init__(self,
performed_action,
run_metadata=None,
client_graph_def=None,
tf_error=None):
"""Constructor for `OnRunEndRequest`.
Args:
performed_action: (`OnRunStartAction`) Actually-performed action by the
debug-wrapper session.
run_metadata: run_metadata output from the run() call (if any).
client_graph_def: (GraphDef) GraphDef from the client side, i.e., from
the python front end of TensorFlow. Can be obtained with
session.graph.as_graph_def().
tf_error: (errors.OpError subtypes) TensorFlow OpError that occurred
during the run (if any).
"""
_check_type(performed_action, str)
self.performed_action = performed_action
if run_metadata is not None:
_check_type(run_metadata, config_pb2.RunMetadata)
self.run_metadata = run_metadata
self.client_graph_def = client_graph_def
self.tf_error = tf_error
class OnRunEndResponse(object):
"""Response from an on-run-end callback."""
def __init__(self):
# Currently only a placeholder.
pass
class BaseDebugWrapperSession(session.SessionInterface):
"""Base class of debug-wrapper session classes.
Concrete classes that inherit from this class need to implement the abstract
methods such as on_session_init, on_run_start and on_run_end.
"""
# TODO(cais): Add on_cont_start and on_cont_end callbacks once the stepper is
# is available.
def __init__(self, sess, thread_name_filter=None,
pass_through_operrors=False):
"""Constructor of `BaseDebugWrapperSession`.
Args:
sess: An (unwrapped) TensorFlow session instance. It should be a subtype
of `BaseSession` or `tf.MonitoredSession`.
thread_name_filter: Regular-expression filter (whitelist) for name(s) of
thread(s) on which the wrapper session will be active. This regular
expression is used in a start-anchored fashion on the thread name, i.e.,
by applying the `match` method of the compiled pattern. The default
`None` means that the wrapper session will be active on all threads.
E.g., r"MainThread$", r"QueueRunnerThread.*".
pass_through_operrors: If True, all captured OpErrors will be
propagated. By default this captures all OpErrors.
Raises:
ValueError: On invalid `OnSessionInitAction` value.
NotImplementedError: If a non-DirectSession sess object is received.
"""
_check_type(sess, (session.BaseSession, monitored_session.MonitoredSession))
# The session being wrapped.
self._sess = sess
self._thread_name_filter_pattern = (re.compile(thread_name_filter)
if thread_name_filter else None)
# TODO(cais/kstevens): Unittest this pass through feature.
self._pass_through_operrors = pass_through_operrors
# Keeps track of number of run calls that have been performed on this
# debug-wrapper session. The count can be used for purposes such as
# displaying the state of the Session in a UI and determining a run
# number-dependent debug URL.
self._run_call_count = 0
# Invoke on-session-init callback.
response = self.on_session_init(OnSessionInitRequest(self._sess))
_check_type(response, OnSessionInitResponse)
if response.action == OnSessionInitAction.PROCEED:
pass
elif response.action == OnSessionInitAction.REMOTE_INSTR_LOOP:
# TODO(cais): Implement REMOTE_INSTR_LOOP
raise NotImplementedError(
"OnSessionInitAction REMOTE_INSTR_LOOP has not been "
"implemented.")
else:
raise ValueError(
"Invalid OnSessionInitAction value: %s" % response.action)
self._default_session_context_manager = None
@property
def graph(self):
return self._sess.graph
@property
def graph_def(self):
return self._sess.graph_def
@property
def sess_str(self):
return self._sess.sess_str
@property
def session(self):
return self._sess
def run(self,
fetches,
feed_dict=None,
options=None,
run_metadata=None,
callable_runner=None,
callable_runner_args=None):
"""Wrapper around Session.run() that inserts tensor watch options.
Args:
fetches: Same as the `fetches` arg to regular `Session.run()`.
feed_dict: Same as the `feed_dict` arg to regular `Session.run()`.
options: Same as the `options` arg to regular `Session.run()`.
run_metadata: Same as the `run_metadata` arg to regular `Session.run()`.
callable_runner: A `callable` returned by `Session.make_callable()`.
If not `None`, `fetches` and `feed_dict` must both be `None`.
callable_runner_args: An optional list of arguments to `callable_runner`.
Returns:
Simply forwards the output of the wrapped `Session.run()` call.
Raises:
ValueError: On invalid `OnRunStartAction` value. Or if `callable_runner`
is not `None` and either or both of `fetches` and `feed_dict` is `None`.
"""
if not callable_runner:
self.increment_run_call_count()
else:
if fetches or feed_dict:
raise ValueError(
"callable_runner and fetches/feed_dict are mutually exclusive, but "
"are used simultaneously.")
if self._is_disabled_thread():
if callable_runner:
return callable_runner(*callable_runner_args)
else:
return self._sess.run(fetches,
feed_dict=feed_dict,
options=options,
run_metadata=run_metadata)
# Invoke on-run-start callback and obtain response.
run_start_resp = self.on_run_start(
OnRunStartRequest(fetches, feed_dict, options, run_metadata,
self._run_call_count,
is_callable_runner=bool(callable_runner)))
_check_type(run_start_resp, OnRunStartResponse)
if run_start_resp.action == OnRunStartAction.DEBUG_RUN:
# Decorate RunOption to fill in debugger tensor watch specifications.
decorated_run_options = options or config_pb2.RunOptions()
run_metadata = run_metadata or config_pb2.RunMetadata()
self._decorate_run_options_for_debug(
decorated_run_options,
run_start_resp.debug_urls,
debug_ops=run_start_resp.debug_ops,
node_name_regex_whitelist=run_start_resp.node_name_regex_whitelist,
op_type_regex_whitelist=run_start_resp.op_type_regex_whitelist,
tensor_dtype_regex_whitelist=(
run_start_resp.tensor_dtype_regex_whitelist),
tolerate_debug_op_creation_failures=(
run_start_resp.tolerate_debug_op_creation_failures))
# Invoke the run() method of the wrapped Session. Catch any TensorFlow
# runtime errors.
tf_error = None
try:
if callable_runner:
retvals = callable_runner(*callable_runner_args,
options=decorated_run_options,
run_metadata=run_metadata)
else:
retvals = self._sess.run(fetches,
feed_dict=feed_dict,
options=decorated_run_options,
run_metadata=run_metadata)
except errors.OpError as op_error:
if self._pass_through_operrors:
raise op_error
tf_error = op_error
retvals = op_error
run_end_req = OnRunEndRequest(
run_start_resp.action,
run_metadata=run_metadata,
client_graph_def=self._sess.graph.as_graph_def(),
tf_error=tf_error)
elif run_start_resp.action == OnRunStartAction.PROFILE_RUN:
decorated_run_options = options or config_pb2.RunOptions()
run_metadata = run_metadata or config_pb2.RunMetadata()
self._decorate_run_options_for_profile(decorated_run_options)
if callable_runner:
retvals = callable_runner(*callable_runner_args,
options=decorated_run_options,
run_metadata=run_metadata)
else:
retvals = self._sess.run(fetches,
feed_dict=feed_dict,
options=decorated_run_options,
run_metadata=run_metadata)
run_end_req = OnRunEndRequest(
run_start_resp.action,
run_metadata=run_metadata,
client_graph_def=self._sess.graph.as_graph_def())
elif (run_start_resp.action == OnRunStartAction.NON_DEBUG_RUN or
run_start_resp.action == OnRunStartAction.INVOKE_STEPPER):
if callable_runner:
raise NotImplementedError(
"Stepper mode is not implemented for callables created by "
"Session.make_callable().")
if run_start_resp.action == OnRunStartAction.INVOKE_STEPPER:
with stepper.NodeStepper(
self._sess, fetches, feed_dict) as node_stepper:
retvals = self.invoke_node_stepper(
node_stepper, restore_variable_values_on_exit=True)
# Invoke run() method of the wrapped session.
retvals = self._sess.run(
fetches,
feed_dict=feed_dict,
options=options,
run_metadata=run_metadata)
# Prepare arg for the on-run-end callback.
run_end_req = OnRunEndRequest(run_start_resp.action)
else:
raise ValueError(
"Invalid OnRunStartAction value: %s" % run_start_resp.action)
# Invoke on-run-end callback and obtain response.
run_end_resp = self.on_run_end(run_end_req)
_check_type(run_end_resp, OnRunEndResponse)
# Currently run_end_resp is only a placeholder. No action is taken on it.
return retvals
def _is_disabled_thread(self):
thread_name = threading.current_thread().name or ""
return (self._thread_name_filter_pattern and
not self._thread_name_filter_pattern.match(thread_name))
def run_step_fn(self, step_fn):
return step_fn(
monitored_session.MonitoredSession.StepContext(self._sess, self.run))
def partial_run_setup(self, fetches, feeds=None):
"""Sets up the feeds and fetches for partial runs in the session."""
raise NotImplementedError(
"partial_run_setup is not implemented for debug-wrapper sessions.")
def partial_run(self, handle, fetches, feed_dict=None):
raise NotImplementedError(
"partial_run is not implemented for debug-wrapper sessions.")
def list_devices(self, *args, **kwargs):
return self._sess.list_devices(*args, **kwargs)
def reset(self, *args, **kwargs):
return self._sess.reset(*args, **kwargs)
def make_callable(self,
fetches,
feed_list=None,
accept_options=False):
runner = self._sess.make_callable(
fetches, feed_list=feed_list, accept_options=True)
def wrapped_runner(*runner_args, **kwargs):
return self.run(None,
feed_dict=None,
options=kwargs.get("options", None),
run_metadata=kwargs.get("run_metadata", None),
callable_runner=runner,
callable_runner_args=runner_args)
return wrapped_runner
@property
def run_call_count(self):
return self._run_call_count
def increment_run_call_count(self):
self._run_call_count += 1
def _decorate_run_options_for_debug(
self,
run_options,
debug_urls,
debug_ops="DebugIdentity",
node_name_regex_whitelist=None,
op_type_regex_whitelist=None,
tensor_dtype_regex_whitelist=None,
tolerate_debug_op_creation_failures=False):
"""Modify a RunOptions object for debug tensor watching.
Specifies request for outputting partition graphs. Adds
debug_tensor_watch_opts with proper debug URLs.
Args:
run_options: (RunOptions) the modified RunOptions object.
debug_urls: (list of str) debug URLs to be entered in run_options.
debug_tensor_watch_opts.
debug_ops: (str or list of str) debug op(s) to be used by the debugger.
node_name_regex_whitelist: Regular-expression whitelist for node
name.
op_type_regex_whitelist: Regular-expression whitelist for op type.
tensor_dtype_regex_whitelist: Regular-expression whitelist for tensor
dtype.
tolerate_debug_op_creation_failures: Whether debug op creation failures
are to be tolerated.
"""
run_options.output_partition_graphs = True
debug_utils.watch_graph(
run_options,
self._sess.graph,
debug_urls=debug_urls,
debug_ops=debug_ops,
node_name_regex_whitelist=node_name_regex_whitelist,
op_type_regex_whitelist=op_type_regex_whitelist,
tensor_dtype_regex_whitelist=tensor_dtype_regex_whitelist,
tolerate_debug_op_creation_failures=tolerate_debug_op_creation_failures)
def _decorate_run_options_for_profile(self, run_options):
"""Modify a RunOptions object for profiling TensorFlow graph execution.
Args:
run_options: (RunOptions) the modified RunOptions object.
"""
run_options.trace_level = config_pb2.RunOptions.FULL_TRACE
@abc.abstractmethod
def on_session_init(self, request):
"""Callback invoked during construction of the debug-wrapper session.
This is a blocking callback.
The invocation happens right before the constructor ends.
Args:
request: (`OnSessionInitRequest`) callback request carrying information
such as the session being wrapped.
Returns:
An instance of `OnSessionInitResponse`.
"""
@abc.abstractmethod
def on_run_start(self, request):
"""Callback invoked on run() calls to the debug-wrapper session.
This is a blocking callback.
The invocation happens after the wrapper's run() call is entered,
after an increment of run call counter.
Args:
request: (`OnRunStartRequest`) callback request object carrying
information about the run call such as the fetches, feed dict, run
options, run metadata, and how many `run()` calls to this wrapper
session have occurred.
Returns:
An instance of `OnRunStartResponse`, carrying information to
1) direct the wrapper session to perform a specified action (e.g., run
with or without debug tensor watching, invoking the stepper.)
2) debug URLs used to watch the tensors.
"""
@abc.abstractmethod
def on_run_end(self, request):
"""Callback invoked on run() calls to the debug-wrapper session.
This is a blocking callback.
The invocation happens right before the wrapper exits its run() call.
Args:
request: (`OnRunEndRequest`) callback request object carrying information
such as the actual action performed by the session wrapper for the
run() call.
Returns:
An instance of `OnRunStartResponse`.
"""
def as_default(self):
return ops.default_session(self)
def __enter__(self):
if self._default_session_context_manager is None:
self._default_session_context_manager = self.as_default()
return self._default_session_context_manager.__enter__()
def __exit__(self, exec_type, exec_value, exec_tb):
self._default_session_context_manager.__exit__(
exec_type, exec_value, exec_tb)
def __del__(self):
if hasattr(self._sess, "__del__"):
self._sess.__del__()
def close(self):
self._sess.close()
# TODO(cais): Add _node_name_regex_whitelist and
# _node_op_type_regex_whitelist.
@abc.abstractmethod
def invoke_node_stepper(self,
node_stepper,
restore_variable_values_on_exit=True):
"""Callback invoked when the client intends to step through graph nodes.
Args:
node_stepper: (stepper.NodeStepper) An instance of NodeStepper to be used
in this stepping session.
restore_variable_values_on_exit: (bool) Whether any variables whose values
have been altered during this node-stepper invocation should be restored
to their old values when this invocation ends.
Returns:
The same return values as the `Session.run()` call on the same fetches as
the NodeStepper.
"""
def should_stop(self):
if hasattr(self._sess, "should_stop"):
return self._sess.should_stop()
else:
raise ValueError(
"The wrapped session %r does not have a method called 'should_stop'. "
"Do you intend to wrap a tf.MonitoredSession instead?" % self._sess)
class WatchOptions(object):
"""Type for return values of watch_fn."""
def __init__(self,
debug_ops=None,
node_name_regex_whitelist=None,
op_type_regex_whitelist=None,
tensor_dtype_regex_whitelist=None,
tolerate_debug_op_creation_failures=False):
"""Constructor of WatchOptions: Debug watch options.
Used as return values of `watch_fn`s.
Args:
debug_ops: (`str` or `list of str`) Debug ops to be used.
node_name_regex_whitelist: Regular-expression whitelist for node_name,
e.g., `"(weight_[0-9]+|bias_.*)"`
op_type_regex_whitelist: Regular-expression whitelist for the op type of
nodes, e.g., `"(Variable|Add)"`.
If both `node_name_regex_whitelist` and `op_type_regex_whitelist`
are set, the two filtering operations will occur in a logical `AND`
relation. In other words, a node will be included if and only if it
hits both whitelists.
tensor_dtype_regex_whitelist: Regular-expression whitelist for Tensor
data type, e.g., `"^int.*"`.
This whitelist operates in logical `AND` relations to the two whitelists
above.
tolerate_debug_op_creation_failures: (`bool`) whether debug op creation
failures (e.g., due to dtype incompatibility) are to be tolerated by not
throwing exceptions.
"""
if debug_ops:
self.debug_ops = debug_ops
else:
self.debug_ops = ["DebugIdentity"]
self.node_name_regex_whitelist = node_name_regex_whitelist
self.op_type_regex_whitelist = op_type_regex_whitelist
self.tensor_dtype_regex_whitelist = tensor_dtype_regex_whitelist
self.tolerate_debug_op_creation_failures = (
tolerate_debug_op_creation_failures)
def __repr__(self):
return ("WatchOptions(debug_ops=%r, node_name_regex_whitelist=%r, "
"op_type_regex_whitelist=%r, tensor_dtype_regex_whitelist=%r, "
"tolerate_debug_op_creation_failures=%r)" % (
self.debug_ops, self.node_name_regex_whitelist,
self.op_type_regex_whitelist, self.tensor_dtype_regex_whitelist,
self.tolerate_debug_op_creation_failures))
class NonInteractiveDebugWrapperSession(BaseDebugWrapperSession):
"""Base class for non-interactive (i.e., non-CLI) debug wrapper sessions."""
def __init__(self, sess, watch_fn=None, thread_name_filter=None,
pass_through_operrors=False):
"""Constructor of NonInteractiveDebugWrapperSession.
Args:
sess: The TensorFlow `Session` object being wrapped.
watch_fn: (`Callable`) A Callable that maps the fetches and feeds of a
debugged `Session.run()` call to `WatchOptions.`
* Args:
* `fetches`: the fetches to the `Session.run()` call.
* `feeds`: the feeds to the `Session.run()` call.
* Returns:
(`tf_debug.WatchOptions`) An object containing debug options including
the debug ops to use, the node names, op types and/or tensor data
types to watch, etc. See the documentation of `tf_debug.WatchOptions`
for more details.
thread_name_filter: Regular-expression white list for threads on which the
wrapper session will be active. See doc of `BaseDebugWrapperSession` for
more details.
pass_through_operrors: If true, all captured OpErrors will be
propagated. By default this captures all OpErrors.
Raises:
TypeError: If a non-None `watch_fn` is specified and it is not callable.
"""
BaseDebugWrapperSession.__init__(
self, sess, thread_name_filter=thread_name_filter,
pass_through_operrors=pass_through_operrors)
self._watch_fn = None
if watch_fn is not None:
if not callable(watch_fn):
raise TypeError("watch_fn is not callable")
self._watch_fn = watch_fn
def on_session_init(self, request):
"""See doc of BaseDebugWrapperSession.on_run_start."""
return OnSessionInitResponse(OnSessionInitAction.PROCEED)
@abc.abstractmethod
def prepare_run_debug_urls(self, fetches, feed_dict):
"""Abstract method to be implemented by concrete subclasses.
This method prepares the run-specific debug URL(s).
Args:
fetches: Same as the `fetches` argument to `Session.run()`
feed_dict: Same as the `feed_dict` argument to `Session.run()`
Returns:
debug_urls: (`str` or `list` of `str`) Debug URLs to be used in
this `Session.run()` call.
"""
def on_run_start(self, request):
"""See doc of BaseDebugWrapperSession.on_run_start."""
debug_urls, watch_opts = self._prepare_run_watch_config(
request.fetches, request.feed_dict)
return OnRunStartResponse(
OnRunStartAction.DEBUG_RUN,
debug_urls,
debug_ops=watch_opts.debug_ops,
node_name_regex_whitelist=watch_opts.node_name_regex_whitelist,
op_type_regex_whitelist=watch_opts.op_type_regex_whitelist,
tensor_dtype_regex_whitelist=watch_opts.tensor_dtype_regex_whitelist,
tolerate_debug_op_creation_failures=(
watch_opts.tolerate_debug_op_creation_failures))
def _prepare_run_watch_config(self, fetches, feed_dict):
"""Get the debug_urls, and node/op whitelists for the current run() call.
Args:
fetches: Same as the `fetches` argument to `Session.run()`.
feed_dict: Same as the `feed_dict argument` to `Session.run()`.
Returns:
debug_urls: (str or list of str) Debug URLs for the current run() call.
Currently, the list consists of only one URL that is a file:// URL.
watch_options: (WatchOptions) The return value of a watch_fn, containing
options including debug_ops, and whitelists.
"""
debug_urls = self.prepare_run_debug_urls(fetches, feed_dict)
if self._watch_fn is None:
watch_options = WatchOptions()
else:
watch_options = self._watch_fn(fetches, feed_dict)
if isinstance(watch_options, tuple):
# For legacy return type (tuples).
watch_options = WatchOptions(*watch_options)
return debug_urls, watch_options
def on_run_end(self, request):
"""See doc of BaseDebugWrapperSession.on_run_end."""
return OnRunEndResponse()
def invoke_node_stepper(self,
node_stepper,
restore_variable_values_on_exit=True):
"""See doc of BaseDebugWrapperSession.invoke_node_stepper."""
raise NotImplementedError(
"NonInteractiveDebugWrapperSession does not support node-stepper mode.")
| apache-2.0 |
mtlchun/edx | common/lib/xmodule/xmodule/tests/xml/test_inheritance.py | 193 | 1689 | """
Test that inherited fields work correctly when parsing XML
"""
from nose.tools import assert_equals, assert_in # pylint: disable=no-name-in-module
from xmodule.tests.xml import XModuleXmlImportTest
from xmodule.tests.xml.factories import CourseFactory, SequenceFactory, ProblemFactory, XmlImportFactory
class TestInheritedFieldParsing(XModuleXmlImportTest):
"""
Test that inherited fields work correctly when parsing XML
"""
def test_null_string(self):
# Test that the string inherited fields are passed through 'deserialize_field',
# which converts the string "null" to the python value None
root = CourseFactory.build(days_early_for_beta="null")
sequence = SequenceFactory.build(parent=root)
ProblemFactory.build(parent=sequence)
course = self.process_xml(root)
assert_equals(None, course.days_early_for_beta)
sequence = course.get_children()[0]
assert_equals(None, sequence.days_early_for_beta)
problem = sequence.get_children()[0]
assert_equals(None, problem.days_early_for_beta)
def test_video_attr(self):
"""
Test that video's definition_from_xml handles unknown attrs w/o choking
"""
# Fixes LMS-11491
root = CourseFactory.build()
sequence = SequenceFactory.build(parent=root)
video = XmlImportFactory(
parent=sequence,
tag='video',
attribs={
'parent_url': 'foo', 'garbage': 'asdlk',
'download_video': 'true',
}
)
video_block = self.process_xml(video)
assert_in('garbage', video_block.xml_attributes)
| agpl-3.0 |
thaim/ansible | lib/ansible/modules/network/fortios/fortios_web_proxy_forward_server_group.py | 14 | 12179 | #!/usr/bin/python
from __future__ import (absolute_import, division, print_function)
# Copyright 2019 Fortinet, Inc.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
__metaclass__ = type
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'metadata_version': '1.1'}
DOCUMENTATION = '''
---
module: fortios_web_proxy_forward_server_group
short_description: Configure a forward server group consisting or multiple forward servers. Supports failover and load balancing in Fortinet's FortiOS and
FortiGate.
description:
- This module is able to configure a FortiGate or FortiOS (FOS) device by allowing the
user to set and modify web_proxy feature and forward_server_group category.
Examples include all parameters and values need to be adjusted to datasources before usage.
Tested with FOS v6.0.5
version_added: "2.9"
author:
- Miguel Angel Munoz (@mamunozgonzalez)
- Nicolas Thomas (@thomnico)
notes:
- Requires fortiosapi library developed by Fortinet
- Run as a local_action in your playbook
requirements:
- fortiosapi>=0.9.8
options:
host:
description:
- FortiOS or FortiGate IP address.
type: str
required: false
username:
description:
- FortiOS or FortiGate username.
type: str
required: false
password:
description:
- FortiOS or FortiGate password.
type: str
default: ""
vdom:
description:
- Virtual domain, among those defined previously. A vdom is a
virtual instance of the FortiGate that can be configured and
used as a different unit.
type: str
default: root
https:
description:
- Indicates if the requests towards FortiGate must use HTTPS protocol.
type: bool
default: true
ssl_verify:
description:
- Ensures FortiGate certificate must be verified by a proper CA.
type: bool
default: true
state:
description:
- Indicates whether to create or remove the object.
type: str
required: true
choices:
- present
- absent
web_proxy_forward_server_group:
description:
- Configure a forward server group consisting or multiple forward servers. Supports failover and load balancing.
default: null
type: dict
suboptions:
affinity:
description:
- Enable/disable affinity, attaching a source-ip's traffic to the assigned forwarding server until the forward-server-affinity-timeout is
reached (under web-proxy global).
type: str
choices:
- enable
- disable
group_down_option:
description:
- "Action to take when all of the servers in the forward server group are down: block sessions until at least one server is back up or
pass sessions to their destination."
type: str
choices:
- block
- pass
ldb_method:
description:
- "Load balance method: weighted or least-session."
type: str
choices:
- weighted
- least-session
name:
description:
- Configure a forward server group consisting one or multiple forward servers. Supports failover and load balancing.
required: true
type: str
server_list:
description:
- Add web forward servers to a list to form a server group. Optionally assign weights to each server.
type: list
suboptions:
name:
description:
- Forward server name. Source web-proxy.forward-server.name.
required: true
type: str
weight:
description:
- Optionally assign a weight of the forwarding server for weighted load balancing (1 - 100)
type: int
'''
EXAMPLES = '''
- hosts: localhost
vars:
host: "192.168.122.40"
username: "admin"
password: ""
vdom: "root"
ssl_verify: "False"
tasks:
- name: Configure a forward server group consisting or multiple forward servers. Supports failover and load balancing.
fortios_web_proxy_forward_server_group:
host: "{{ host }}"
username: "{{ username }}"
password: "{{ password }}"
vdom: "{{ vdom }}"
https: "False"
state: "present"
web_proxy_forward_server_group:
affinity: "enable"
group_down_option: "block"
ldb_method: "weighted"
name: "default_name_6"
server_list:
-
name: "default_name_8 (source web-proxy.forward-server.name)"
weight: "9"
'''
RETURN = '''
build:
description: Build number of the fortigate image
returned: always
type: str
sample: '1547'
http_method:
description: Last method used to provision the content into FortiGate
returned: always
type: str
sample: 'PUT'
http_status:
description: Last result given by FortiGate on last operation applied
returned: always
type: str
sample: "200"
mkey:
description: Master key (id) used in the last call to FortiGate
returned: success
type: str
sample: "id"
name:
description: Name of the table used to fulfill the request
returned: always
type: str
sample: "urlfilter"
path:
description: Path of the table used to fulfill the request
returned: always
type: str
sample: "webfilter"
revision:
description: Internal revision number
returned: always
type: str
sample: "17.0.2.10658"
serial:
description: Serial number of the unit
returned: always
type: str
sample: "FGVMEVYYQT3AB5352"
status:
description: Indication of the operation's result
returned: always
type: str
sample: "success"
vdom:
description: Virtual domain used
returned: always
type: str
sample: "root"
version:
description: Version of the FortiGate
returned: always
type: str
sample: "v5.6.3"
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.connection import Connection
from ansible.module_utils.network.fortios.fortios import FortiOSHandler
from ansible.module_utils.network.fortimanager.common import FAIL_SOCKET_MSG
def login(data, fos):
host = data['host']
username = data['username']
password = data['password']
ssl_verify = data['ssl_verify']
fos.debug('on')
if 'https' in data and not data['https']:
fos.https('off')
else:
fos.https('on')
fos.login(host, username, password, verify=ssl_verify)
def filter_web_proxy_forward_server_group_data(json):
option_list = ['affinity', 'group_down_option', 'ldb_method',
'name', 'server_list']
dictionary = {}
for attribute in option_list:
if attribute in json and json[attribute] is not None:
dictionary[attribute] = json[attribute]
return dictionary
def underscore_to_hyphen(data):
if isinstance(data, list):
for elem in data:
elem = underscore_to_hyphen(elem)
elif isinstance(data, dict):
new_data = {}
for k, v in data.items():
new_data[k.replace('_', '-')] = underscore_to_hyphen(v)
data = new_data
return data
def web_proxy_forward_server_group(data, fos):
vdom = data['vdom']
state = data['state']
web_proxy_forward_server_group_data = data['web_proxy_forward_server_group']
filtered_data = underscore_to_hyphen(filter_web_proxy_forward_server_group_data(web_proxy_forward_server_group_data))
if state == "present":
return fos.set('web-proxy',
'forward-server-group',
data=filtered_data,
vdom=vdom)
elif state == "absent":
return fos.delete('web-proxy',
'forward-server-group',
mkey=filtered_data['name'],
vdom=vdom)
def is_successful_status(status):
return status['status'] == "success" or \
status['http_method'] == "DELETE" and status['http_status'] == 404
def fortios_web_proxy(data, fos):
if data['web_proxy_forward_server_group']:
resp = web_proxy_forward_server_group(data, fos)
return not is_successful_status(resp), \
resp['status'] == "success", \
resp
def main():
fields = {
"host": {"required": False, "type": "str"},
"username": {"required": False, "type": "str"},
"password": {"required": False, "type": "str", "default": "", "no_log": True},
"vdom": {"required": False, "type": "str", "default": "root"},
"https": {"required": False, "type": "bool", "default": True},
"ssl_verify": {"required": False, "type": "bool", "default": True},
"state": {"required": True, "type": "str",
"choices": ["present", "absent"]},
"web_proxy_forward_server_group": {
"required": False, "type": "dict", "default": None,
"options": {
"affinity": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"group_down_option": {"required": False, "type": "str",
"choices": ["block", "pass"]},
"ldb_method": {"required": False, "type": "str",
"choices": ["weighted", "least-session"]},
"name": {"required": True, "type": "str"},
"server_list": {"required": False, "type": "list",
"options": {
"name": {"required": True, "type": "str"},
"weight": {"required": False, "type": "int"}
}}
}
}
}
module = AnsibleModule(argument_spec=fields,
supports_check_mode=False)
# legacy_mode refers to using fortiosapi instead of HTTPAPI
legacy_mode = 'host' in module.params and module.params['host'] is not None and \
'username' in module.params and module.params['username'] is not None and \
'password' in module.params and module.params['password'] is not None
if not legacy_mode:
if module._socket_path:
connection = Connection(module._socket_path)
fos = FortiOSHandler(connection)
is_error, has_changed, result = fortios_web_proxy(module.params, fos)
else:
module.fail_json(**FAIL_SOCKET_MSG)
else:
try:
from fortiosapi import FortiOSAPI
except ImportError:
module.fail_json(msg="fortiosapi module is required")
fos = FortiOSAPI()
login(module.params, fos)
is_error, has_changed, result = fortios_web_proxy(module.params, fos)
fos.logout()
if not is_error:
module.exit_json(changed=has_changed, meta=result)
else:
module.fail_json(msg="Error in repo", meta=result)
if __name__ == '__main__':
main()
| mit |
pschmitt/home-assistant | tests/components/plum_lightpad/test_init.py | 4 | 3767 | """Tests for the Plum Lightpad config flow."""
from aiohttp import ContentTypeError
from requests.exceptions import HTTPError
from homeassistant.components.plum_lightpad.const import DOMAIN
from homeassistant.core import HomeAssistant
from homeassistant.setup import async_setup_component
from tests.async_mock import Mock, patch
from tests.common import MockConfigEntry
async def test_async_setup_no_domain_config(hass: HomeAssistant):
"""Test setup without configuration is noop."""
result = await async_setup_component(hass, DOMAIN, {})
assert result is True
assert DOMAIN not in hass.data
async def test_async_setup_imports_from_config(hass: HomeAssistant):
"""Test that specifying config will setup an entry."""
with patch(
"homeassistant.components.plum_lightpad.utils.Plum.loadCloudData"
) as mock_loadCloudData, patch(
"homeassistant.components.plum_lightpad.async_setup_entry", return_value=True,
) as mock_async_setup_entry:
result = await async_setup_component(
hass,
DOMAIN,
{
DOMAIN: {
"username": "test-plum-username",
"password": "test-plum-password",
}
},
)
await hass.async_block_till_done()
assert result is True
assert len(mock_loadCloudData.mock_calls) == 1
assert len(mock_async_setup_entry.mock_calls) == 1
async def test_async_setup_entry_sets_up_light(hass: HomeAssistant):
"""Test that configuring entry sets up light domain."""
config_entry = MockConfigEntry(
domain=DOMAIN,
data={"username": "test-plum-username", "password": "test-plum-password"},
)
config_entry.add_to_hass(hass)
with patch(
"homeassistant.components.plum_lightpad.utils.Plum.loadCloudData"
) as mock_loadCloudData, patch(
"homeassistant.components.plum_lightpad.light.async_setup_entry"
) as mock_light_async_setup_entry:
result = await hass.config_entries.async_setup(config_entry.entry_id)
assert result is True
await hass.async_block_till_done()
assert len(mock_loadCloudData.mock_calls) == 1
assert len(mock_light_async_setup_entry.mock_calls) == 1
async def test_async_setup_entry_handles_auth_error(hass: HomeAssistant):
"""Test that configuring entry handles Plum Cloud authentication error."""
config_entry = MockConfigEntry(
domain=DOMAIN,
data={"username": "test-plum-username", "password": "test-plum-password"},
)
config_entry.add_to_hass(hass)
with patch(
"homeassistant.components.plum_lightpad.utils.Plum.loadCloudData",
side_effect=ContentTypeError(Mock(), None),
), patch(
"homeassistant.components.plum_lightpad.light.async_setup_entry"
) as mock_light_async_setup_entry:
result = await hass.config_entries.async_setup(config_entry.entry_id)
assert result is False
assert len(mock_light_async_setup_entry.mock_calls) == 0
async def test_async_setup_entry_handles_http_error(hass: HomeAssistant):
"""Test that configuring entry handles HTTP error."""
config_entry = MockConfigEntry(
domain=DOMAIN,
data={"username": "test-plum-username", "password": "test-plum-password"},
)
config_entry.add_to_hass(hass)
with patch(
"homeassistant.components.plum_lightpad.utils.Plum.loadCloudData",
side_effect=HTTPError,
), patch(
"homeassistant.components.plum_lightpad.light.async_setup_entry"
) as mock_light_async_setup_entry:
result = await hass.config_entries.async_setup(config_entry.entry_id)
assert result is False
assert len(mock_light_async_setup_entry.mock_calls) == 0
| apache-2.0 |
ProfHoekstra/bluesky | bluesky/traffic/traffic.py | 1 | 33317 | """ BlueSky traffic implementation."""
from __future__ import print_function
try:
from collections.abc import Collection
except ImportError:
# In python <3.3 collections.abc doesn't exist
from collections import Collection
from math import *
from random import randint
import numpy as np
import bluesky as bs
from bluesky.core import Entity, timed_function
from bluesky.stack import refdata
from bluesky.tools import geo
from bluesky.tools.misc import latlon2txt
from bluesky.tools.aero import cas2tas, casormach2tas, fpm, kts, ft, g0, Rearth, nm, tas2cas,\
vatmos, vtas2cas, vtas2mach, vcasormach
from bluesky.traffic.asas import ConflictDetection, ConflictResolution
from .windsim import WindSim
from .conditional import Condition
from .trails import Trails
from .adsbmodel import ADSB
from .aporasas import APorASAS
from .autopilot import Autopilot
from .activewpdata import ActiveWaypoint
from .turbulence import Turbulence
from .trafficgroups import TrafficGroups
from .performance.perfbase import PerfBase
# Register settings defaults
bs.settings.set_variable_defaults(performance_model='openap', asas_dt=1.0)
# if bs.settings.performance_model == 'bada':
# try:
# print('Using BADA Performance model')
# from .performance.bada.perfbada import PerfBADA as Perf
# except Exception as err:# ImportError as err:
# print(err)
# print('Falling back to Open Aircraft Performance (OpenAP) model')
# bs.settings.performance_model = "openap"
# from .performance.openap import OpenAP as Perf
# elif bs.settings.performance_model == 'openap':
# print('Using Open Aircraft Performance (OpenAP) model')
# from .performance.openap import OpenAP as Perf
# else:
# print('Using BlueSky legacy performance model')
# from .performance.legacy.perfbs import PerfBS as Perf
class Traffic(Entity):
"""
Traffic class definition : Traffic data
Methods:
Traffic() : constructor
reset() : Reset traffic database w.r.t a/c data
create(acid,actype,aclat,aclon,achdg,acalt,acspd) : create aircraft
delete(acid) : delete an aircraft from traffic data
deletall() : delete all traffic
update(sim) : do a numerical integration step
id2idx(name) : return index in traffic database of given call sign
engchange(i,engtype) : change engine type of an aircraft
setnoise(A) : Add turbulence
Members: see create
Created by : Jacco M. Hoekstra
"""
def __init__(self):
super().__init__()
# Traffic is the toplevel trafficarrays object
self.setroot(self)
self.ntraf = 0
self.cond = Condition() # Conditional commands list
self.wind = WindSim()
self.turbulence = Turbulence()
self.translvl = 5000.*ft # [m] Default transition level
with self.settrafarrays():
# Aircraft Info
self.id = [] # identifier (string)
self.type = [] # aircaft type (string)
# Positions
self.lat = np.array([]) # latitude [deg]
self.lon = np.array([]) # longitude [deg]
self.distflown = np.array([]) # distance travelled [m]
self.alt = np.array([]) # altitude [m]
self.hdg = np.array([]) # traffic heading [deg]
self.trk = np.array([]) # track angle [deg]
# Velocities
self.tas = np.array([]) # true airspeed [m/s]
self.gs = np.array([]) # ground speed [m/s]
self.gsnorth = np.array([]) # ground speed [m/s]
self.gseast = np.array([]) # ground speed [m/s]
self.cas = np.array([]) # calibrated airspeed [m/s]
self.M = np.array([]) # mach number
self.vs = np.array([]) # vertical speed [m/s]
# Atmosphere
self.p = np.array([]) # air pressure [N/m2]
self.rho = np.array([]) # air density [kg/m3]
self.Temp = np.array([]) # air temperature [K]
self.dtemp = np.array([]) # delta t for non-ISA conditions
# Wind speeds
self.windnorth = np.array([]) # wind speed north component a/c pos [m/s]
self.windeast = np.array([]) # wind speed east component a/c pos [m/s]
# Traffic autopilot settings
self.selspd = np.array([]) # selected spd(CAS or Mach) [m/s or -]
self.aptas = np.array([]) # just for initializing
self.selalt = np.array([]) # selected alt[m]
self.selvs = np.array([]) # selected vertical speed [m/s]
# Whether to perform LNAV and VNAV
self.swlnav = np.array([], dtype=np.bool)
self.swvnav = np.array([], dtype=np.bool)
self.swvnavspd = np.array([], dtype=np.bool)
# Flight Models
self.cd = ConflictDetection()
self.cr = ConflictResolution()
self.ap = Autopilot()
self.aporasas = APorASAS()
self.adsb = ADSB()
self.trails = Trails()
self.actwp = ActiveWaypoint()
self.perf = PerfBase()
# Group Logic
self.groups = TrafficGroups()
# Traffic autopilot data
self.apvsdef = np.array([]) # [m/s]default vertical speed of autopilot
self.aphi = np.array([]) # [rad] bank angle setting of autopilot
self.ax = np.array([]) # [m/s2] absolute value of longitudinal accelleration
self.bank = np.array([]) # nominal bank angle, [radians]
self.swhdgsel = np.array([], dtype=np.bool) # determines whether aircraft is turning
# Traffic autothrottle settings
self.swats = np.array([], dtype=np.bool) # Switch indicating whether autothrottle system is on/off
self.thr = np.array([]) # Thottle seeting (0.0-1.0), negative = non-valid/auto
# Display information on label
self.label = [] # Text and bitmap of traffic label
# Miscallaneous
self.coslat = np.array([]) # Cosine of latitude for computations
self.eps = np.array([]) # Small nonzero numbers
self.work = np.array([]) # Work done throughout the flight
# Default bank angles per flight phase
self.bphase = np.deg2rad(np.array([15, 35, 35, 35, 15, 45]))
def reset(self):
''' Clear all traffic data upon simulation reset. '''
# Some child reset functions depend on a correct value of self.ntraf
self.ntraf = 0
# This ensures that the traffic arrays (which size is dynamic)
# are all reset as well, so all lat,lon,sdp etc but also objects adsb
super().reset()
# reset performance model
self.perf.reset()
# Reset models
self.wind.clear()
# Build new modules for turbulence
self.turbulence.reset()
# Noise (turbulence, ADBS-transmission noise, ADSB-truncated effect)
self.setnoise(False)
# Reset transition level to default value
self.translvl = 5000.*ft
def mcre(self, n, actype="B744", acalt=None, acspd=None, dest=None):
""" Create one or more random aircraft in a specified area """
area = bs.scr.getviewbounds()
# Generate random callsigns
idtmp = chr(randint(65, 90)) + chr(randint(65, 90)) + '{:>05}'
acid = [idtmp.format(i) for i in range(n)]
# Generate random positions
aclat = np.random.rand(n) * (area[1] - area[0]) + area[0]
aclon = np.random.rand(n) * (area[3] - area[2]) + area[2]
achdg = np.random.randint(1, 360, n)
acalt = acalt or np.random.randint(2000, 39000, n) * ft
acspd = acspd or np.random.randint(250, 450, n) * kts
self.cre(acid, actype, aclat, aclon, achdg, acalt, acspd)
# SAVEIC: save cre command when filled in
# Special provision in case SAVEIC is on: then save individual CRE commands
# Names of aircraft (acid) need to be recorded for saved future commands
# And positions need to be the same in case of *MCRE"
for i in range(n):
bs.stack.savecmd("CRE", " ".join(["CRE", acid[i], actype,
str(aclat[i]), str(aclon[i]),
str(int(round(achdg[i]))),
str(int(round(acalt[i]/ft))),
str(int(round(acspd[i]/kts)))]))
def cre(self, acid, actype="B744", aclat=52., aclon=4., achdg=None, acalt=0, acspd=0):
""" Create one or more aircraft. """
# Determine number of aircraft to create from array length of acid
n = 1 if isinstance(acid, str) else len(acid)
if isinstance(acid, str):
# Check if not already exist
if self.id.count(acid.upper()) > 0:
return False, acid + " already exists." # already exists do nothing
acid = n * [acid]
# Adjust the size of all traffic arrays
super().create(n)
self.ntraf += n
if isinstance(actype, str):
actype = n * [actype]
if isinstance(aclat, (float, int)):
aclat = np.array(n * [aclat])
if isinstance(aclon, (float, int)):
aclon = np.array(n * [aclon])
# Limit longitude to [-180.0, 180.0]
aclon[aclon > 180.0] -= 360.0
aclon[aclon < -180.0] += 360.0
achdg = refdata.hdg if achdg is None else achdg
# Aircraft Info
self.id[-n:] = acid
self.type[-n:] = actype
# Positions
self.lat[-n:] = aclat
self.lon[-n:] = aclon
self.alt[-n:] = acalt
self.hdg[-n:] = achdg
self.trk[-n:] = achdg
# Velocities
self.tas[-n:], self.cas[-n:], self.M[-n:] = vcasormach(acspd, acalt)
self.gs[-n:] = self.tas[-n:]
hdgrad = np.radians(achdg)
self.gsnorth[-n:] = self.tas[-n:] * np.cos(hdgrad)
self.gseast[-n:] = self.tas[-n:] * np.sin(hdgrad)
# Atmosphere
self.p[-n:], self.rho[-n:], self.Temp[-n:] = vatmos(acalt)
# Wind
if self.wind.winddim > 0:
applywind = self.alt[-n:]> 50.*ft
self.windnorth[-n:], self.windeast[-n:] = self.wind.getdata(self.lat[-n:], self.lon[-n:], self.alt[-n:])
self.gsnorth[-n:] = self.gsnorth[-n:] + self.windnorth[-n:]*applywind
self.gseast[-n:] = self.gseast[-n:] + self.windeast[-n:]*applywind
self.trk[-n:] = np.logical_not(applywind)*achdg + \
applywind*np.degrees(np.arctan2(self.gseast[-n:], self.gsnorth[-n:]))
self.gs[-n:] = np.sqrt(self.gsnorth[-n:]**2 + self.gseast[-n:]**2)
else:
self.windnorth[-n:] = 0.0
self.windeast[-n:] = 0.0
# Traffic performance data
#(temporarily default values)
self.apvsdef[-n:] = 1500. * fpm # default vertical speed of autopilot
self.aphi[-n:] = 0. # bank angle output of autopilot (optional)
self.ax[-n:] = kts # absolute value of longitudinal accelleration
self.bank[-n:] = np.radians(25.)
# Traffic autopilot settings
self.selspd[-n:] = self.cas[-n:]
self.aptas[-n:] = self.tas[-n:]
self.selalt[-n:] = self.alt[-n:]
# Display information on label
self.label[-n:] = n*[['', '', '', 0]]
# Miscallaneous
self.coslat[-n:] = np.cos(np.radians(aclat)) # Cosine of latitude for flat-earth aproximations
self.eps[-n:] = 0.01
# Finally call create for child TrafficArrays. This only needs to be done
# manually in Traffic.
self.create_children(n)
def creconfs(self, acid, actype, targetidx, dpsi, cpa, tlosh, dH=None, tlosv=None, spd=None):
''' Create an aircraft in conflict with target aircraft.
Arguments:
- acid: callsign of new aircraft
- actype: aircraft type of new aircraft
- targetidx: id (callsign) of target aircraft
- dpsi: Conflict angle (angle between tracks of ownship and intruder) (deg)
- cpa: Predicted distance at closest point of approach (NM)
- tlosh: Horizontal time to loss of separation ((hh:mm:)sec)
- dH: Vertical distance (ft)
- tlosv: Vertical time to loss of separation
- spd: Speed of new aircraft (CAS/Mach, kts/-)
'''
latref = self.lat[targetidx] # deg
lonref = self.lon[targetidx] # deg
altref = self.alt[targetidx] # m
trkref = radians(self.trk[targetidx])
gsref = self.gs[targetidx] # m/s
tasref = self.tas[targetidx] # m/s
vsref = self.vs[targetidx] # m/s
cpa = cpa * nm
pzr = bs.settings.asas_pzr * nm
pzh = bs.settings.asas_pzh * ft
trk = trkref + radians(dpsi)
if dH is None:
acalt = altref
acvs = 0.0
else:
acalt = altref + dH
tlosv = tlosh if tlosv is None else tlosv
acvs = vsref - np.sign(dH) * (abs(dH) - pzh) / tlosv
if spd:
# CAS or Mach provided: convert to groundspeed, assuming that
# wind at intruder position is similar to wind at ownship position
tas = tasref if spd is None else casormach2tas(spd, acalt)
tasn, tase = tas * cos(trk), tas * sin(trk)
wn, we = self.wind.getdata(latref, lonref, acalt)
gsn, gse = tasn + wn, tase + we
else:
# Groundspeed is the same as ownship
gsn, gse = gsref * cos(trk), gsref * sin(trk)
# Horizontal relative velocity vector
vreln, vrele = gsref * cos(trkref) - gsn, gsref * sin(trkref) - gse
# Relative velocity magnitude
vrel = sqrt(vreln * vreln + vrele * vrele)
# Relative travel distance to closest point of approach
drelcpa = tlosh * vrel + (0 if cpa > pzr else sqrt(pzr * pzr - cpa * cpa))
# Initial intruder distance
dist = sqrt(drelcpa * drelcpa + cpa * cpa)
# Rotation matrix diagonal and cross elements for distance vector
rd = drelcpa / dist
rx = cpa / dist
# Rotate relative velocity vector to obtain intruder bearing
brn = degrees(atan2(-rx * vreln + rd * vrele,
rd * vreln + rx * vrele))
# Calculate intruder lat/lon
aclat, aclon = geo.kwikpos(latref, lonref, brn, dist / nm)
# convert groundspeed to CAS, and track to heading using actual
# intruder position
wn, we = self.wind.getdata(aclat, aclon, acalt)
tasn, tase = gsn - wn, gse - we
acspd = tas2cas(sqrt(tasn * tasn + tase * tase), acalt)
achdg = degrees(atan2(tase, tasn))
# Create and, when necessary, set vertical speed
self.cre(acid, actype, aclat, aclon, achdg, acalt, acspd)
self.ap.selaltcmd(len(self.lat) - 1, altref, acvs)
self.vs[-1] = acvs
def delete(self, idx):
"""Delete an aircraft"""
# If this is a multiple delete, sort first for list delete
# (which will use list in reverse order to avoid index confusion)
if isinstance(idx, Collection):
idx = np.sort(idx)
# Call the actual delete function
super().delete(idx)
# Update number of aircraft
self.ntraf = len(self.lat)
return True
def update(self):
# Update only if there is traffic ---------------------
if self.ntraf == 0:
return
#---------- Atmosphere --------------------------------
self.p, self.rho, self.Temp = vatmos(self.alt)
#---------- ADSB Update -------------------------------
self.adsb.update()
#---------- Fly the Aircraft --------------------------
self.ap.update() # Autopilot logic
self.update_asas() # Airborne Separation Assurance
self.aporasas.update() # Decide to use autopilot or ASAS for commands
#---------- Performance Update ------------------------
self.perf.update()
#---------- Limit commanded speeds based on performance ------------------------------
self.aporasas.tas, self.aporasas.vs, self.aporasas.alt = \
self.perf.limits(self.aporasas.tas, self.aporasas.vs,
self.aporasas.alt, self.ax)
#---------- Kinematics --------------------------------
self.update_airspeed()
self.update_groundspeed()
self.update_pos()
#---------- Simulate Turbulence -----------------------
self.turbulence.update()
# Check whether new traffic state triggers conditional commands
self.cond.update()
#---------- Aftermath ---------------------------------
self.trails.update()
@timed_function(name='asas', dt=bs.settings.asas_dt, manual=True)
def update_asas(self):
# Conflict detection and resolution
self.cd.update(self, self)
self.cr.update(self.cd, self, self)
def update_airspeed(self):
# Compute horizontal acceleration
delta_spd = self.aporasas.tas - self.tas
ax = self.perf.acceleration()
need_ax = np.abs(delta_spd) > np.abs(bs.sim.simdt * ax)
self.ax = need_ax * np.sign(delta_spd) * ax
# Update velocities
self.tas = np.where(need_ax, self.tas + self.ax * bs.sim.simdt, self.aporasas.tas)
self.cas = vtas2cas(self.tas, self.alt)
self.M = vtas2mach(self.tas, self.alt)
# Turning
turnrate = np.degrees(g0 * np.tan(np.where(self.aphi>self.eps,self.aphi,self.bank) \
/ np.maximum(self.tas, self.eps)))
delhdg = (self.aporasas.hdg - self.hdg + 180) % 360 - 180 # [deg]
self.swhdgsel = np.abs(delhdg) > np.abs(bs.sim.simdt * turnrate)
# Update heading
self.hdg = np.where(self.swhdgsel,
self.hdg + bs.sim.simdt * turnrate * np.sign(delhdg), self.aporasas.hdg) % 360.0
# Update vertical speed
delta_alt = self.aporasas.alt - self.alt
self.swaltsel = np.abs(delta_alt) > np.maximum(
10 * ft, np.abs(2 * np.abs(bs.sim.simdt * self.vs)))
target_vs = self.swaltsel * np.sign(delta_alt) * np.abs(self.aporasas.vs)
delta_vs = target_vs - self.vs
# print(delta_vs / fpm)
need_az = np.abs(delta_vs) > 300 * fpm # small threshold
self.az = need_az * np.sign(delta_vs) * (300 * fpm) # fixed vertical acc approx 1.6 m/s^2
self.vs = np.where(need_az, self.vs+self.az*bs.sim.simdt, target_vs)
self.vs = np.where(np.isfinite(self.vs), self.vs, 0) # fix vs nan issue
def update_groundspeed(self):
# Compute ground speed and track from heading, airspeed and wind
if self.wind.winddim == 0: # no wind
self.gsnorth = self.tas * np.cos(np.radians(self.hdg))
self.gseast = self.tas * np.sin(np.radians(self.hdg))
self.gs = self.tas
self.trk = self.hdg
self.windnorth[:], self.windeast[:] = 0.0,0.0
else:
applywind = self.alt>50.*ft # Only apply wind when airborne
vnwnd,vewnd = self.wind.getdata(self.lat, self.lon, self.alt)
self.windnorth[:], self.windeast[:] = vnwnd,vewnd
self.gsnorth = self.tas * np.cos(np.radians(self.hdg)) + self.windnorth*applywind
self.gseast = self.tas * np.sin(np.radians(self.hdg)) + self.windeast*applywind
self.gs = np.logical_not(applywind)*self.tas + \
applywind*np.sqrt(self.gsnorth**2 + self.gseast**2)
self.trk = np.logical_not(applywind)*self.hdg + \
applywind*np.degrees(np.arctan2(self.gseast, self.gsnorth)) % 360.
self.work += (self.perf.thrust * bs.sim.simdt * np.sqrt(self.gs * self.gs + self.vs * self.vs))
def update_pos(self):
# Update position
self.alt = np.where(self.swaltsel, np.round(self.alt + self.vs * bs.sim.simdt, 6), self.aporasas.alt)
self.lat = self.lat + np.degrees(bs.sim.simdt * self.gsnorth / Rearth)
self.coslat = np.cos(np.deg2rad(self.lat))
self.lon = self.lon + np.degrees(bs.sim.simdt * self.gseast / self.coslat / Rearth)
self.distflown += self.gs * bs.sim.simdt
def id2idx(self, acid):
"""Find index of aircraft id"""
if not isinstance(acid, str):
# id2idx is called for multiple id's
# Fast way of finding indices of all ACID's in a given list
tmp = dict((v, i) for i, v in enumerate(self.id))
return [tmp.get(acidi, -1) for acidi in acid]
else:
# Catch last created id (* or # symbol)
if acid in ('#', '*'):
return self.ntraf - 1
try:
return self.id.index(acid.upper())
except:
return -1
def setnoise(self, noise=None):
"""Noise (turbulence, ADBS-transmission noise, ADSB-truncated effect)"""
if noise is None:
return True, "Noise is currently " + ("on" if self.turbulence.active else "off")
self.turbulence.setnoise(noise)
self.adsb.setnoise(noise)
return True
def engchange(self, acid, engid):
"""Change of engines"""
self.perf.engchange(acid, engid)
return
def move(self, idx, lat, lon, alt=None, hdg=None, casmach=None, vspd=None):
self.lat[idx] = lat
self.lon[idx] = lon
if alt is not None:
self.alt[idx] = alt
self.selalt[idx] = alt
if hdg is not None:
self.hdg[idx] = hdg
self.ap.trk[idx] = hdg
if casmach is not None:
self.tas[idx], self.selspd[idx], _ = vcasormach(casmach, alt)
if vspd is not None:
self.vs[idx] = vspd
self.swvnav[idx] = False
def nom(self, idx):
""" Reset acceleration back to nominal (1 kt/s^2): NOM acid """
self.ax[idx] = kts #[m/s2]
def poscommand(self, idxorwp):# Show info on aircraft(int) or waypoint or airport (str)
"""POS command: Show info or an aircraft, airport, waypoint or navaid"""
# Aircraft index
if type(idxorwp)==int and idxorwp >= 0:
idx = idxorwp
acid = self.id[idx]
actype = self.type[idx]
latlon = latlon2txt(self.lat[idx], self.lon[idx])
alt = round(self.alt[idx] / ft)
hdg = round(self.hdg[idx])
trk = round(self.trk[idx])
cas = round(self.cas[idx] / kts)
tas = round(self.tas[idx] / kts)
gs = round(self.gs[idx]/kts)
M = self.M[idx]
VS = round(self.vs[idx]/ft*60.)
route = self.ap.route[idx]
# Position report
lines = "Info on %s %s index = %d\n" %(acid, actype, idx) \
+ "Pos: "+latlon+ "\n" \
+ "Hdg: %03d Trk: %03d\n" %(hdg, trk) \
+ "Alt: %d ft V/S: %d fpm\n" %(alt,VS) \
+ "CAS/TAS/GS: %d/%d/%d kts M: %.3f\n"%(cas,tas,gs,M)
# FMS AP modes
if self.swlnav[idx] and route.nwp > 0 and route.iactwp >= 0:
if self.swvnav[idx]:
if self.swvnavspd[idx]:
lines = lines + "VNAV (incl.VNAVSPD), "
else:
lines = lines + "VNAV (NOT VNAVSPD), "
lines += "LNAV to " + route.wpname[route.iactwp] + "\n"
# Flight info: Destination and origin
if self.ap.orig[idx] != "" or self.ap.dest[idx] != "":
lines = lines + "Flying"
if self.ap.orig[idx] != "":
lines = lines + " from " + self.ap.orig[idx]
if self.ap.dest[idx] != "":
lines = lines + " to " + self.ap.dest[idx]
# Show a/c info and highlight route of aircraft in radar window
# and pan to a/c (to show route)
bs.scr.showroute(acid)
return True, lines
# Waypoint: airport, navaid or fix
else:
wp = idxorwp.upper()
# Reference position for finding nearest
reflat, reflon = bs.scr.getviewctr()
lines = "Info on "+wp+":\n"
# First try airports (most used and shorter, hence faster list)
iap = bs.navdb.getaptidx(wp)
if iap>=0:
aptypes = ["large","medium","small"]
lines = lines + bs.navdb.aptname[iap]+"\n" \
+ "is a "+ aptypes[max(-1,bs.navdb.aptype[iap]-1)] \
+" airport at:\n" \
+ latlon2txt(bs.navdb.aptlat[iap], \
bs.navdb.aptlon[iap]) + "\n" \
+ "Elevation: " \
+ str(int(round(bs.navdb.aptelev[iap]/ft))) \
+ " ft \n"
# Show country name
try:
ico = bs.navdb.cocode2.index(bs.navdb.aptco[iap].upper())
lines = lines + "in "+bs.navdb.coname[ico]+" ("+ \
bs.navdb.aptco[iap]+")"
except:
ico = -1
lines = lines + "Country code: "+bs.navdb.aptco[iap]
try:
runways = bs.navdb.rwythresholds[bs.navdb.aptid[iap]].keys()
if runways:
lines = lines + "\nRunways: " + ", ".join(runways)
except KeyError:
pass
# Not found as airport, try waypoints & navaids
else:
iwps = bs.navdb.getwpindices(wp,reflat,reflon)
if iwps[0]>=0:
typetxt = ""
desctxt = ""
lastdesc = "XXXXXXXX"
for i in iwps:
# One line type text
if typetxt == "":
typetxt = typetxt+bs.navdb.wptype[i]
else:
typetxt = typetxt+" and "+bs.navdb.wptype[i]
# Description: multi-line
samedesc = bs.navdb.wpdesc[i]==lastdesc
if desctxt == "":
desctxt = desctxt +bs.navdb.wpdesc[i]
lastdesc = bs.navdb.wpdesc[i]
elif not samedesc:
desctxt = desctxt +"\n"+bs.navdb.wpdesc[i]
lastdesc = bs.navdb.wpdesc[i]
# Navaid: frequency
if bs.navdb.wptype[i] in ["VOR","DME","TACAN"] and not samedesc:
desctxt = desctxt + " "+ str(bs.navdb.wpfreq[i])+" MHz"
elif bs.navdb.wptype[i]=="NDB" and not samedesc:
desctxt = desctxt+ " " + str(bs.navdb.wpfreq[i])+" kHz"
iwp = iwps[0]
# Basic info
lines = lines + wp +" is a "+ typetxt \
+ " at\n"\
+ latlon2txt(bs.navdb.wplat[iwp], \
bs.navdb.wplon[iwp])
# Navaids have description
if len(desctxt)>0:
lines = lines+ "\n" + desctxt
# VOR give variation
if bs.navdb.wptype[iwp]=="VOR":
lines = lines + "\nVariation: "+ \
str(bs.navdb.wpvar[iwp])+" deg"
# How many others?
nother = bs.navdb.wpid.count(wp)-len(iwps)
if nother>0:
verb = ["is ","are "][min(1,max(0,nother-1))]
lines = lines +"\nThere "+verb + str(nother) +\
" other waypoint(s) also named " + wp
# In which airways?
connect = bs.navdb.listconnections(wp, \
bs.navdb.wplat[iwp],
bs.navdb.wplon[iwp])
if len(connect)>0:
awset = set([])
for c in connect:
awset.add(c[0])
lines = lines+"\nAirways: "+"-".join(awset)
# Try airway id
else: # airway
awid = wp
airway = bs.navdb.listairway(awid)
if len(airway)>0:
lines = ""
for segment in airway:
lines = lines+"Airway "+ awid + ": " + \
" - ".join(segment)+"\n"
lines = lines[:-1] # cut off final newline
else:
return False,idxorwp+" not found as a/c, airport, navaid or waypoint"
# Show what we found on airport and navaid/waypoint
return True, lines
def airwaycmd(self, key):
''' Show conections of a waypoint or airway. '''
reflat, reflon = bs.scr.getviewctr()
if bs.navdb.awid.count(key) > 0:
return self.poscommand(key)
# Find connecting airway legs
wpid = key
iwp = bs.navdb.getwpidx(wpid,reflat,reflon)
if iwp < 0:
return False,key + " not found."
wplat = bs.navdb.wplat[iwp]
wplon = bs.navdb.wplon[iwp]
connect = bs.navdb.listconnections(key, wplat, wplon)
if connect:
lines = ""
for c in connect:
if len(c)>=2:
# Add airway, direction, waypoint
lines = lines+ c[0]+": to "+c[1]+"\n"
return True, lines[:-1] # exclude final newline
return False, f"No airway legs found for {key}"
def settrans(self, alt=-999.):
""" Set or show transition level"""
# in case a valid value is ginve set it
if alt > -900.:
if alt > 0.:
self.translvl = alt
return True
return False,"Transition level needs to be ft/FL and larger than zero"
# In case no value is given, show it
tlvl = int(round(self.translvl/ft))
return True, f"Transition level = {tlvl}/FL{int(round(tlvl/100.))}"
def setbanklim(self, idx, bankangle=None):
''' Set bank limit for given aircraft. '''
if bankangle:
self.bank[idx] = np.radians(bankangle) # [rad]
return True
return True, f"Banklimit of {self.id[idx]} is {int(np.degrees(self.bank[idx]))} deg"
def setthrottle(self,idx,throttle=""):
"""Set throttle to given value or AUTO, meaning autothrottle on (default)"""
if throttle:
if throttle in ('AUTO', 'OFF'): # throttle mode off, ATS on
self.swats[idx] = True # Autothrottle on
self.thr[idx] = -999. # Set to invalid
elif throttle == "IDLE":
self.swats[idx] = False
self.thr[idx] = 0.0
else:
# Check for percent unit
if throttle.count("%")==1:
throttle= throttle.replace("%","")
factor = 0.01
else:
factor = 1.0
# Remaining option is that it is a float, so try conversion
try:
x = factor*float(throttle)
except:
return False,"THR invalid argument "+throttle
# Check whether value makes sense
if x<0.0 or x>1.0:
return False, "THR invalid value " + throttle +". Needs to be [0.0 , 1.0]"
# Valid value, set throttle and disable autothrottle
self.swats[idx] = False
self.thr[idx] = x
return True
if self.swats[idx]:
return True,"ATS of "+self.id[idx]+" is ON"
return True, "ATS of " + self.id[idx] + " is OFF. THR is "+str(self.thr[idx])
| gpl-3.0 |
mnahm5/django-estore | Lib/site-packages/troposphere/sqs.py | 27 | 1054 | # Copyright (c) 2012-2013, Mark Peek <mark@peek.org>
# All rights reserved.
#
# See LICENSE file for full license.
from . import AWSObject, AWSProperty
from .validators import integer
try:
from awacs.aws import Policy
policytypes = (dict, Policy)
except ImportError:
policytypes = dict,
class RedrivePolicy(AWSProperty):
props = {
'deadLetterTargetArn': (basestring, False),
'maxReceiveCount': (integer, False),
}
class Queue(AWSObject):
resource_type = "AWS::SQS::Queue"
props = {
'DelaySeconds': (integer, False),
'MaximumMessageSize': (integer, False),
'MessageRetentionPeriod': (integer, False),
'QueueName': (basestring, False),
'ReceiveMessageWaitTimeSeconds': (integer, False),
'RedrivePolicy': (RedrivePolicy, False),
'VisibilityTimeout': (integer, False),
}
class QueuePolicy(AWSObject):
resource_type = "AWS::SQS::QueuePolicy"
props = {
'PolicyDocument': (policytypes, False),
'Queues': (list, True),
}
| mit |
trdean/grEME | gr-qtgui/examples/pyqt_histogram_f.py | 7 | 6691 | #!/usr/bin/env python
#
# Copyright 2013 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
from gnuradio import gr
from gnuradio import blocks
import sys
try:
from gnuradio import qtgui
from PyQt4 import QtGui, QtCore
import sip
except ImportError:
sys.stderr.write("Error: Program requires PyQt4 and gr-qtgui.\n")
sys.exit(1)
try:
from gnuradio import analog
except ImportError:
sys.stderr.write("Error: Program requires gr-analog.\n")
sys.exit(1)
class dialog_box(QtGui.QWidget):
def __init__(self, display, control):
QtGui.QWidget.__init__(self, None)
self.setWindowTitle('PyQt Test GUI')
self.boxlayout = QtGui.QBoxLayout(QtGui.QBoxLayout.LeftToRight, self)
self.boxlayout.addWidget(display, 1)
self.boxlayout.addWidget(control)
self.resize(800, 500)
class control_box(QtGui.QWidget):
def __init__(self, snk, parent=None):
QtGui.QWidget.__init__(self, parent)
self.setWindowTitle('Control Panel')
self.snk = snk
self.setToolTip('Control the signals')
QtGui.QToolTip.setFont(QtGui.QFont('OldEnglish', 10))
self.layout = QtGui.QFormLayout(self)
# Control the first signal
self.freq1Edit = QtGui.QLineEdit(self)
self.freq1Edit.setMinimumWidth(100)
self.layout.addRow("Sine Frequency:", self.freq1Edit)
self.connect(self.freq1Edit, QtCore.SIGNAL("editingFinished()"),
self.freq1EditText)
self.amp1Edit = QtGui.QLineEdit(self)
self.amp1Edit.setMinimumWidth(100)
self.layout.addRow("Sine Amplitude:", self.amp1Edit)
self.connect(self.amp1Edit, QtCore.SIGNAL("editingFinished()"),
self.amp1EditText)
# Control the second signal
self.amp2Edit = QtGui.QLineEdit(self)
self.amp2Edit.setMinimumWidth(100)
self.layout.addRow("Noise Amplitude:", self.amp2Edit)
self.connect(self.amp2Edit, QtCore.SIGNAL("editingFinished()"),
self.amp2EditText)
# Control the histogram
self.hist_npts = QtGui.QLineEdit(self)
self.hist_npts.setMinimumWidth(100)
self.hist_npts.setValidator(QtGui.QIntValidator(0, 8191))
self.hist_npts.setText("{0}".format(self.snk.nsamps()))
self.layout.addRow("Number of Points:", self.hist_npts)
self.connect(self.hist_npts, QtCore.SIGNAL("editingFinished()"),
self.set_nsamps)
self.hist_bins = QtGui.QLineEdit(self)
self.hist_bins.setMinimumWidth(100)
self.hist_bins.setValidator(QtGui.QIntValidator(0, 1000))
self.hist_bins.setText("{0}".format(self.snk.bins()))
self.layout.addRow("Number of Bins:", self.hist_bins)
self.connect(self.hist_bins, QtCore.SIGNAL("editingFinished()"),
self.set_bins)
self.hist_auto = QtGui.QPushButton("scale", self)
self.layout.addRow("Autoscale X:", self.hist_auto)
self.connect(self.hist_auto, QtCore.SIGNAL("pressed()"),
self.autoscalex)
self.quit = QtGui.QPushButton('Close', self)
self.quit.setMinimumWidth(100)
self.layout.addWidget(self.quit)
self.connect(self.quit, QtCore.SIGNAL('clicked()'),
QtGui.qApp, QtCore.SLOT('quit()'))
def attach_signal1(self, signal):
self.signal1 = signal
self.freq1Edit.setText(QtCore.QString("%1").arg(self.signal1.frequency()))
self.amp1Edit.setText(QtCore.QString("%1").arg(self.signal1.amplitude()))
def attach_signal2(self, signal):
self.signal2 = signal
self.amp2Edit.setText(QtCore.QString("%1").arg(self.signal2.amplitude()))
def freq1EditText(self):
try:
newfreq = float(self.freq1Edit.text())
self.signal1.set_frequency(newfreq)
except ValueError:
print "Bad frequency value entered"
def amp1EditText(self):
try:
newamp = float(self.amp1Edit.text())
self.signal1.set_amplitude(newamp)
except ValueError:
print "Bad amplitude value entered"
def amp2EditText(self):
try:
newamp = float(self.amp2Edit.text())
self.signal2.set_amplitude(newamp)
except ValueError:
print "Bad amplitude value entered"
def set_nsamps(self):
res = self.hist_npts.text().toInt()
if(res[1]):
self.snk.set_nsamps(res[0])
def set_bins(self):
res = self.hist_bins.text().toInt()
if(res[1]):
self.snk.set_bins(res[0])
def autoscalex(self):
self.snk.autoscalex()
class my_top_block(gr.top_block):
def __init__(self):
gr.top_block.__init__(self)
Rs = 8000
f1 = 100
npts = 2048
self.qapp = QtGui.QApplication(sys.argv)
src1 = analog.sig_source_f(Rs, analog.GR_SIN_WAVE, f1, 0, 0)
src2 = analog.noise_source_f(analog.GR_GAUSSIAN, 1)
src = blocks.add_ff()
thr = blocks.throttle(gr.sizeof_float, 100*npts)
self.snk1 = qtgui.histogram_sink_f(npts, 200, -5, 5,
"Histogram")
self.connect(src1, (src,0))
self.connect(src2, (src,1))
self.connect(src, thr, self.snk1)
self.ctrl_win = control_box(self.snk1)
self.ctrl_win.attach_signal1(src1)
self.ctrl_win.attach_signal2(src2)
# Get the reference pointer to the SpectrumDisplayForm QWidget
pyQt = self.snk1.pyqwidget()
# Wrap the pointer as a PyQt SIP object
# This can now be manipulated as a PyQt4.QtGui.QWidget
pyWin = sip.wrapinstance(pyQt, QtGui.QWidget)
#pyWin.show()
self.main_box = dialog_box(pyWin, self.ctrl_win)
self.main_box.show()
if __name__ == "__main__":
tb = my_top_block();
tb.start()
tb.qapp.exec_()
tb.stop()
| gpl-3.0 |
mcsalgado/ansible | lib/ansible/parsing/yaml/objects.py | 169 | 2012 | # (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from six import text_type
class AnsibleBaseYAMLObject(object):
'''
the base class used to sub-class python built-in objects
so that we can add attributes to them during yaml parsing
'''
_data_source = None
_line_number = 0
_column_number = 0
def _get_ansible_position(self):
return (self._data_source, self._line_number, self._column_number)
def _set_ansible_position(self, obj):
try:
(src, line, col) = obj
except (TypeError, ValueError):
raise AssertionError(
'ansible_pos can only be set with a tuple/list '
'of three values: source, line number, column number'
)
self._data_source = src
self._line_number = line
self._column_number = col
ansible_pos = property(_get_ansible_position, _set_ansible_position)
class AnsibleMapping(AnsibleBaseYAMLObject, dict):
''' sub class for dictionaries '''
pass
class AnsibleUnicode(AnsibleBaseYAMLObject, text_type):
''' sub class for unicode objects '''
pass
class AnsibleSequence(AnsibleBaseYAMLObject, list):
''' sub class for lists '''
pass
| gpl-3.0 |
wilsonianb/nacl_contracts | src/trusted/validator_mips/dgen/dgen_dump.py | 6 | 1434 | #!/usr/bin/python
#
# Copyright 2012 The Native Client Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can
# be found in the LICENSE file.
# Copyright 2012, Google Inc.
#
"""
Produces a table from the in-memory representation. Useful for storing the
optimized table for later use.
"""
import dgen_opt
def dump_tables(tables, out):
"""Dumps the given tables into a text file.
Args:
tables: list of Table objects to process.
out: an output stream.
"""
if len(tables) == 0: raise Exception('No tables provided.')
_generate_header(out)
for t in tables:
_generate_table(t, out)
def _generate_header(out):
# TODO do we need a big ridiculous license banner in generated code?
out.write('# DO NOT EDIT: GENERATED CODE\n')
def _generate_table(t, out):
rows = dgen_opt.optimize_rows(t.rows)
print ('Table %s: %d rows minimized to %d.'
% (t.name, len(t.rows), len(rows)))
out.write('\n')
out.write('-- %s (%s)\n' % (t.name, t.citation))
num_cols = len(rows[0].patterns)
headers = ['pat%- 31s' % (str(n) + '(31:0)') for n in range(0, num_cols)]
out.write(''.join(headers))
out.write('\n')
for row in rows:
out.write(''.join(['%- 34s' % p for p in row.patterns]))
out.write(row.action)
if row.arch:
out.write('(%s)' % row.arch)
out.write('\n')
| bsd-3-clause |
midgardproject/midgard-core | tests/GIR/test_200_user.py | 1 | 2601 | # coding=utf-8
import sys
import struct
import unittest
from test_000_config import TestConfig
from test_020_connection import TestConnection
from gi.repository import Midgard
from gi.repository import GObject
class TestUser(unittest.TestCase):
mgd = None
def setUp(self):
if self.mgd == None:
self.mgd = TestConnection.openConnection()
self.mgd.beginTransaction()
def tearDown(self):
self.mgd.commitTransaction()
self.mgd.close()
self.mgd = None
def getNewUser(self):
user = Midgard.User(connection = self.mgd, login = "John", authtype = "Plaintext", active = True)
return user
def testNew(self):
user = self.getNewUser()
self.assertIsNot(user, None)
self.assertIsInstance(user, Midgard.User)
def testCreate(self):
user = self.getNewUser()
self.assertTrue(user.create())
self.assertTrue(user.get_property("active"))
# Try to create another user with the same.log_in and authentication type
new_user = self.getNewUser()
self.assertFalse(new_user.create())
# Cleanup
user.delete()
def testUpdate(self):
user = self.getNewUser()
self.assertTrue(user.create())
user.set_property("active", False)
self.assertTrue(user.update())
self.assertFalse(user.get_property("active"))
# Cleanup
user.delete()
def testDelete(self):
user = self.getNewUser()
self.assertTrue(user.create())
self.assertTrue(user.delete())
def testQuery(self):
# Should be deprecated
pass
def testGetSetPerson(self):
user = self.getNewUser()
person = Midgard.Object.factory(self.mgd, "midgard_person", None)
self.assertTrue(person.create())
self.assertTrue(user.create())
self.assertTrue(user.set_person(person))
self.assertEqual(user.get_person(), person)
# Cleanup
self.assertTrue(user.delete())
self.assertTrue(person.purge(False))
def testLogin(self):
user = self.getNewUser()
self.assertFalse(user.log_in())
self.assertTrue(user.create())
self.assertTrue(user.log_in())
self.assertTrue(user.log_in())
# cleanup
self.assertTrue(user.delete())
def testLogout(self):
user = self.getNewUser()
self.assertFalse(user.log_out())
self.assertTrue(user.create())
self.assertTrue(user.log_in())
self.assertTrue(user.log_out())
self.assertFalse(user.log_out())
# cleanup
self.assertTrue(user.delete())
def testInheritance(self):
user = self.getNewUser()
self.assertIsInstance(user, Midgard.DBObject)
if __name__ == "__main__":
unittest.main()
| lgpl-2.1 |
Stavitsky/nova | nova/tests/unit/objects/test_vcpu_model.py | 36 | 3702 | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from nova.compute import arch
from nova.compute import cpumodel
from nova import objects
from nova.tests.unit.objects import test_objects
fake_cpu_model_feature = {
'policy': cpumodel.POLICY_REQUIRE,
'name': 'sse2',
}
fake_cpu_model_feature_obj = objects.VirtCPUFeature(
**fake_cpu_model_feature)
fake_vcpumodel_dict = {
'arch': arch.I686,
'vendor': 'fake-vendor',
'match': cpumodel.MATCH_EXACT,
'topology': objects.VirtCPUTopology(sockets=1, cores=1, threads=1),
'features': [fake_cpu_model_feature_obj],
'mode': cpumodel.MODE_HOST_MODEL,
'model': 'fake-model',
}
fake_vcpumodel = objects.VirtCPUModel(**fake_vcpumodel_dict)
class _TestVirtCPUFeatureObj(object):
def test_policy_limitation(self):
obj = objects.VirtCPUFeature()
self.assertRaises(ValueError, setattr, obj, 'policy', 'foo')
class TestVirtCPUFeatureObj(test_objects._LocalTest,
_TestVirtCPUFeatureObj):
pass
class TestRemoteVirtCPUFeatureObj(test_objects._LocalTest,
_TestVirtCPUFeatureObj):
pass
class _TestVirtCPUModel(object):
def test_create(self):
model = objects.VirtCPUModel(**fake_vcpumodel_dict)
self.assertEqual(fake_vcpumodel_dict['model'], model.model)
self.assertEqual(fake_vcpumodel_dict['topology']['sockets'],
model.topology.sockets)
feature = model.features[0]
self.assertEqual(fake_cpu_model_feature['policy'],
feature.policy)
def test_defaults(self):
model = objects.VirtCPUModel()
self.assertIsNone(model.mode)
self.assertIsNone(model.model)
self.assertIsNone(model.vendor)
self.assertIsNone(model.arch)
self.assertIsNone(model.match)
self.assertEqual([], model.features)
self.assertIsNone(model.topology)
def test_arch_field(self):
model = objects.VirtCPUModel(**fake_vcpumodel_dict)
self.assertRaises(ValueError, setattr, model, 'arch', 'foo')
def test_serialize(self):
modelin = objects.VirtCPUModel(**fake_vcpumodel_dict)
modelout = objects.VirtCPUModel.from_json(modelin.to_json())
self.assertEqual(modelin.mode, modelout.mode)
self.assertEqual(modelin.model, modelout.model)
self.assertEqual(modelin.vendor, modelout.vendor)
self.assertEqual(modelin.arch, modelout.arch)
self.assertEqual(modelin.match, modelout.match)
self.assertEqual(modelin.features[0].policy,
modelout.features[0].policy)
self.assertEqual(modelin.features[0].name, modelout.features[0].name)
self.assertEqual(modelin.topology.sockets, modelout.topology.sockets)
self.assertEqual(modelin.topology.cores, modelout.topology.cores)
self.assertEqual(modelin.topology.threads, modelout.topology.threads)
class TestVirtCPUModel(test_objects._LocalTest,
_TestVirtCPUModel):
pass
class TestRemoteVirtCPUModel(test_objects._LocalTest,
_TestVirtCPUModel):
pass
| apache-2.0 |
mlperf/inference_results_v0.7 | closed/QCT/code/rnnt/tensorrt/harness.py | 12 | 5140 | # Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os, sys
sys.path.insert(0, os.getcwd())
from code.common import logging, dict_get, run_command, args_to_string
from code.common import BENCHMARKS, SCENARIOS
from code.common.harness import BaseBenchmarkHarness, scenario_result_regex
import code.common.arguments as common_args
import pycuda
import pycuda.autoinit
class RNNTHarness(BaseBenchmarkHarness):
required_engine_files = [
"encoder.plan",
"decoder.plan",
"fc1_a.plan",
"fc1_b.plan",
"igather.plan",
"isel.plan",
"joint_backend.plan",
"joint_fc2_bias_ckpt.fp32.dat",
"joint_fc2_weight_ckpt.fp32.dat",
"joint_fc2_bias_ckpt.fp16.dat",
"joint_fc2_weight_ckpt.fp16.dat",
]
def __init__(self, args, name=""):
super().__init__(args, name)
self.flag_builder_custom_args = common_args.LOADGEN_ARGS + ["gpu_batch_size", "use_graphs", "nopipelined_execution", "nobatch_sorting", "noenable_audio_processing",
"nouse_copy_kernel", "num_warmups", "dali_batches_issue_ahead", "dali_pipeline_depth", "audio_batch_size",
"audio_buffer_num_lines", "audio_fp16_input", "server_num_issue_query_threads", "devices", "max_seq_length"]
def _get_harness_executable(self):
return "./build/bin/harness_rnnt"
# Currently, RNNTHarness is using non-standard directory structure and filenames to store its engine files. Since
# BaseBenchmarkHarness calls this function at the end of __init__, we have to put our custom directory structure setup
# here instead of in __init__.
def enumerate_engines(self):
for fname in RNNTHarness.required_engine_files:
self.check_file_exists(os.path.join(self.engine_dir, fname))
def _build_custom_flags(self, flag_dict):
# Rename gpu_batch_size to batch_size
batch_size = dict_get(self.args, "gpu_batch_size", default=None)
flag_dict["batch_size"] = batch_size
flag_dict["gpu_batch_size"] = None
# Rename use_graphs to cuda_graph
use_graphs = dict_get(self.args, "use_graphs", default=False)
flag_dict["cuda_graph"] = use_graphs
flag_dict["use_graphs"] = None
# Rename max_seq_length to hp_max_seq_length
max_seq_length = dict_get(self.args, "max_seq_length", default=None)
flag_dict["hp_max_seq_length"] = max_seq_length
flag_dict["max_seq_length"] = None
# Handle more harness_rnnt knobs
no_pipelined = dict_get(self.args, "nopipelined_execution", default=False)
flag_dict["pipelined_execution"] = not no_pipelined
flag_dict["nopipelined_execution"] = None
# Handle more harness_rnnt knobs : disable batch sorting by sequence length
no_sorting = dict_get(self.args, "nobatch_sorting", default=False)
flag_dict["batch_sorting"] = not no_sorting
flag_dict["nobatch_sorting"] = None
# Handle yet another harness_rnnt knob: turning off DALI preprocessing for debug
no_dali = dict_get(self.args, "noenable_audio_processing", default=False)
flag_dict["enable_audio_processing"] = not no_dali
flag_dict["noenable_audio_processing"] = None
# Handle yet another harness_rnnt knob: disable DALI's scatter gather kernel
no_copy_kernel = dict_get(self.args, "nouse_copy_kernel", default=False)
flag_dict["use_copy_kernel"] = not no_copy_kernel
flag_dict["nouse_copy_kernel"] = None
# Rename gpu_inference_streams to streams_per_gpu
num_inference = dict_get(self.args, "gpu_inference_streams", default=None)
flag_dict["streams_per_gpu"] = num_inference
flag_dict["gpu_inference_streams"] = None
audio_fp16_input = dict_get(self.args, "audio_fp16_input", default=True)
flag_dict["audio_fp16_input"] = audio_fp16_input
start_from_device = dict_get(self.args, "start_from_device", default=False)
flag_dict["start_from_device"] = start_from_device
audio_input_suffix = "fp16" if audio_fp16_input else "fp32"
flag_dict["audio_serialized_pipeline_file"] = "build/bin/dali" + "/dali_pipeline_gpu_" + audio_input_suffix + ".pth"
argstr = args_to_string(flag_dict) + " --scenario {:} --model {:}".format(self.scenario, self.name)
# Handle engine dir
argstr += " --engine_dir={:}".format(self.engine_dir)
return argstr
| apache-2.0 |
rahuldhote/scikit-learn | sklearn/neighbors/tests/test_dist_metrics.py | 230 | 5234 | import itertools
import pickle
import numpy as np
from numpy.testing import assert_array_almost_equal
import scipy
from scipy.spatial.distance import cdist
from sklearn.neighbors.dist_metrics import DistanceMetric
from nose import SkipTest
def dist_func(x1, x2, p):
return np.sum((x1 - x2) ** p) ** (1. / p)
def cmp_version(version1, version2):
version1 = tuple(map(int, version1.split('.')[:2]))
version2 = tuple(map(int, version2.split('.')[:2]))
if version1 < version2:
return -1
elif version1 > version2:
return 1
else:
return 0
class TestMetrics:
def __init__(self, n1=20, n2=25, d=4, zero_frac=0.5,
rseed=0, dtype=np.float64):
np.random.seed(rseed)
self.X1 = np.random.random((n1, d)).astype(dtype)
self.X2 = np.random.random((n2, d)).astype(dtype)
# make boolean arrays: ones and zeros
self.X1_bool = self.X1.round(0)
self.X2_bool = self.X2.round(0)
V = np.random.random((d, d))
VI = np.dot(V, V.T)
self.metrics = {'euclidean': {},
'cityblock': {},
'minkowski': dict(p=(1, 1.5, 2, 3)),
'chebyshev': {},
'seuclidean': dict(V=(np.random.random(d),)),
'wminkowski': dict(p=(1, 1.5, 3),
w=(np.random.random(d),)),
'mahalanobis': dict(VI=(VI,)),
'hamming': {},
'canberra': {},
'braycurtis': {}}
self.bool_metrics = ['matching', 'jaccard', 'dice',
'kulsinski', 'rogerstanimoto', 'russellrao',
'sokalmichener', 'sokalsneath']
def test_cdist(self):
for metric, argdict in self.metrics.items():
keys = argdict.keys()
for vals in itertools.product(*argdict.values()):
kwargs = dict(zip(keys, vals))
D_true = cdist(self.X1, self.X2, metric, **kwargs)
yield self.check_cdist, metric, kwargs, D_true
for metric in self.bool_metrics:
D_true = cdist(self.X1_bool, self.X2_bool, metric)
yield self.check_cdist_bool, metric, D_true
def check_cdist(self, metric, kwargs, D_true):
if metric == 'canberra' and cmp_version(scipy.__version__, '0.9') <= 0:
raise SkipTest("Canberra distance incorrect in scipy < 0.9")
dm = DistanceMetric.get_metric(metric, **kwargs)
D12 = dm.pairwise(self.X1, self.X2)
assert_array_almost_equal(D12, D_true)
def check_cdist_bool(self, metric, D_true):
dm = DistanceMetric.get_metric(metric)
D12 = dm.pairwise(self.X1_bool, self.X2_bool)
assert_array_almost_equal(D12, D_true)
def test_pdist(self):
for metric, argdict in self.metrics.items():
keys = argdict.keys()
for vals in itertools.product(*argdict.values()):
kwargs = dict(zip(keys, vals))
D_true = cdist(self.X1, self.X1, metric, **kwargs)
yield self.check_pdist, metric, kwargs, D_true
for metric in self.bool_metrics:
D_true = cdist(self.X1_bool, self.X1_bool, metric)
yield self.check_pdist_bool, metric, D_true
def check_pdist(self, metric, kwargs, D_true):
if metric == 'canberra' and cmp_version(scipy.__version__, '0.9') <= 0:
raise SkipTest("Canberra distance incorrect in scipy < 0.9")
dm = DistanceMetric.get_metric(metric, **kwargs)
D12 = dm.pairwise(self.X1)
assert_array_almost_equal(D12, D_true)
def check_pdist_bool(self, metric, D_true):
dm = DistanceMetric.get_metric(metric)
D12 = dm.pairwise(self.X1_bool)
assert_array_almost_equal(D12, D_true)
def test_haversine_metric():
def haversine_slow(x1, x2):
return 2 * np.arcsin(np.sqrt(np.sin(0.5 * (x1[0] - x2[0])) ** 2
+ np.cos(x1[0]) * np.cos(x2[0]) *
np.sin(0.5 * (x1[1] - x2[1])) ** 2))
X = np.random.random((10, 2))
haversine = DistanceMetric.get_metric("haversine")
D1 = haversine.pairwise(X)
D2 = np.zeros_like(D1)
for i, x1 in enumerate(X):
for j, x2 in enumerate(X):
D2[i, j] = haversine_slow(x1, x2)
assert_array_almost_equal(D1, D2)
assert_array_almost_equal(haversine.dist_to_rdist(D1),
np.sin(0.5 * D2) ** 2)
def test_pyfunc_metric():
X = np.random.random((10, 3))
euclidean = DistanceMetric.get_metric("euclidean")
pyfunc = DistanceMetric.get_metric("pyfunc", func=dist_func, p=2)
# Check if both callable metric and predefined metric initialized
# DistanceMetric object is picklable
euclidean_pkl = pickle.loads(pickle.dumps(euclidean))
pyfunc_pkl = pickle.loads(pickle.dumps(pyfunc))
D1 = euclidean.pairwise(X)
D2 = pyfunc.pairwise(X)
D1_pkl = euclidean_pkl.pairwise(X)
D2_pkl = pyfunc_pkl.pairwise(X)
assert_array_almost_equal(D1, D2)
assert_array_almost_equal(D1_pkl, D2_pkl)
| bsd-3-clause |
camptocamp/odoo | addons/sale_layout/models/sale_layout.py | 62 | 5080 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2014-Today OpenERP SA (<http://www.openerp.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import osv, fields
from itertools import groupby
def grouplines(self, ordered_lines, sortkey):
"""Return lines from a specified invoice or sale order grouped by category"""
grouped_lines = []
for key, valuesiter in groupby(ordered_lines, sortkey):
group = {}
group['category'] = key
group['lines'] = list(v for v in valuesiter)
if 'subtotal' in key and key.subtotal is True:
group['subtotal'] = sum(line.price_subtotal for line in group['lines'])
grouped_lines.append(group)
return grouped_lines
class SaleLayoutCategory(osv.Model):
_name = 'sale_layout.category'
_order = 'sequence'
_columns = {
'name': fields.char('Name', required=True),
'sequence': fields.integer('Sequence', required=True),
'subtotal': fields.boolean('Add subtotal'),
'separator': fields.boolean('Add separator'),
'pagebreak': fields.boolean('Add pagebreak')
}
_defaults = {
'subtotal': True,
'separator': True,
'pagebreak': False,
'sequence': 10
}
class AccountInvoice(osv.Model):
_inherit = 'account.invoice'
def sale_layout_lines(self, cr, uid, ids, invoice_id=None, context=None):
"""
Returns invoice lines from a specified invoice ordered by
sale_layout_category sequence. Used in sale_layout module.
:Parameters:
-'invoice_id' (int): specify the concerned invoice.
"""
ordered_lines = self.browse(cr, uid, invoice_id, context=context).invoice_line
# We chose to group first by category model and, if not present, by invoice name
sortkey = lambda x: x.sale_layout_cat_id if x.sale_layout_cat_id else ''
return grouplines(self, ordered_lines, sortkey)
class AccountInvoiceLine(osv.Model):
_inherit = 'account.invoice.line'
_columns = {
'sale_layout_cat_id': fields.many2one('sale_layout.category',
string='Section'),
'categ_sequence': fields.related('sale_layout_cat_id',
'sequence', type='integer',
string='Layout Sequence', store=True)
# Store is intentionally set in order to keep the "historic" order.
}
_order = 'invoice_id, categ_sequence, sequence, id'
class SaleOrder(osv.Model):
_inherit = 'sale.order'
def sale_layout_lines(self, cr, uid, ids, order_id=None, context=None):
"""
Returns order lines from a specified sale ordered by
sale_layout_category sequence. Used in sale_layout module.
:Parameters:
-'order_id' (int): specify the concerned sale order.
"""
ordered_lines = self.browse(cr, uid, order_id, context=context).order_line
sortkey = lambda x: x.sale_layout_cat_id if x.sale_layout_cat_id else ''
return grouplines(self, ordered_lines, sortkey)
class SaleOrderLine(osv.Model):
_inherit = 'sale.order.line'
_columns = {
'sale_layout_cat_id': fields.many2one('sale_layout.category',
string='Section'),
'categ_sequence': fields.related('sale_layout_cat_id',
'sequence', type='integer',
string='Layout Sequence', store=True)
# Store is intentionally set in order to keep the "historic" order.
}
_order = 'order_id, categ_sequence, sequence, id'
def _prepare_order_line_invoice_line(self, cr, uid, line, account_id=False, context=None):
"""Save the layout when converting to an invoice line."""
invoice_vals = super(SaleOrderLine, self)._prepare_order_line_invoice_line(cr, uid, line, account_id=account_id, context=context)
if line.sale_layout_cat_id:
invoice_vals['sale_layout_cat_id'] = line.sale_layout_cat_id.id
if line.categ_sequence:
invoice_vals['categ_sequence'] = line.categ_sequence
return invoice_vals
| agpl-3.0 |
cloudnautique/python-agent | cattle/plugins/docker/network/ipsec_tunnel.py | 1 | 1316 | import logging
from .util import has_service
from cattle.utils import get_or_create_map, get_or_create_list
from cattle.agent.handler import BaseHandler
log = logging.getLogger('docker')
class IpsecTunnelSetup(BaseHandler):
def __init__(self):
pass
def before_start(self, instance, host, config, start_config):
if instance.get('agentId') is None:
network_agent = False
elif instance.get('systemContainer') is None or \
instance.get('systemContainer') == 'NetworkAgent':
network_agent = True
else:
network_agent = False
if not network_agent or \
not has_service(instance, 'ipsecTunnelService'):
return
try:
id = str(host.id)
nat = instance.data.ipsec[id]['nat']
isakmp = instance.data.ipsec[id]['isakmp']
ports = get_or_create_list(config, 'ports')
binding = get_or_create_map(start_config, 'port_bindings')
ports.append((500, 'udp'))
ports.append((4500, 'udp'))
binding['500/udp'] = ('0.0.0.0', isakmp)
binding['4500/udp'] = ('0.0.0.0', nat)
except (KeyError, AttributeError):
pass
def after_start(self, instance, host, id):
pass
| apache-2.0 |
WMD-group/SMACT | examples/Practical_tutorial/Site/surface_points.py | 1 | 13393 | #!/usr/bin/env python
# Defines the positions (on a 2D grid) of the positions of under-coordinated
# atoms at the surface of a material
################################################################################
# Copyright Keith T Butler (2015) #
# #
# This file is part of SMACT: builder.py is free software: you can #
# redistribute it and/or modify it under the terms of the GNU General Public #
# License as published by the Free Software Foundation, either version 3 of #
# the License, or (at your option) any later version. #
# This program is distributed in the hope that it will be useful, but WITHOUT #
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or #
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for #
# more details. #
# You should have received a copy of the GNU General Public License along with #
# this program. If not, see <http://www.gnu.org/licenses/>. #
# #
################################################################################
'''
The general form of the surface_points file.
Each new species or class of materials is defined as a function, the function contains a dictionary of the known surfaces of that material or class.
The surfaces contain the 2D projection of the under-coordinated surface atoms, there may be several possible terminations, then there are all listed.
The function has a list called 'exists', this contains all surfaces for which surface points are available. If you add a new surface be sure to include it in this list too.
Each dictionary item contains a list of the surface cuts and must end with an empty tuple, note the ,() at the end of each item.
'''
def anatase(miller):
exists = ['001','010','110','100','101']
if miller in exists:
surfaces = {}
surfaces['110'] = ([0.,0.],[0.2,0],[0.3,0.5],[0.5,0.5],[0.7,0.5],[0.8,0.]),()
surfaces['001'] = ([0.5,0.],[0.5,0.5]),()
surfaces['010'] = ([0.5,0.],[0.,0.],[0.,0.2],[0.5,0.5],[0.5,0.75],[0.8,0.]) ,()
surfaces['100'] = ([0.5,0.],[0.,0.],[0.,0.2],[0.5,0.5],[0.5,0.75],[0.8,0.]) ,()
surfaces['101'] = ([0.,0.17],[0.5,0.12],[0.,0.37],[0.,0.62],[0.5,0.67],[0.5,0.87]),([0.,0.14],[0.,0.34],[0.5,0.68],[0.5,0.84]),([0.,0.15],[0.,0.36],[0.5,0.4],[0.,0.57],[0.5,0.65],[0.5,0.86],[0.,0.9]),()
return surfaces[miller]
else:
print "No non-polar surface",miller,"is currently in the database, maybe you want to add it."
return []
def TiO2a(miller):
exists = ['001','010','110','100','101']
if miller in exists:
surfaces = {}
surfaces['110'] = ([0.,0.],[0.2,0],[0.3,0.5],[0.5,0.5],[0.7,0.5],[0.8,0.]),()
surfaces['001'] = ([0.5,0.],[0.5,0.5]),()
surfaces['010'] = ([0.5,0.],[0.,0.],[0.,0.2],[0.5,0.5],[0.5,0.75],[0.8,0.]) ,()
surfaces['100'] = ([0.5,0.],[0.,0.],[0.,0.2],[0.5,0.5],[0.5,0.75],[0.8,0.]) ,()
surfaces['101'] = ([0.,0.17],[0.5,0.12],[0.,0.37],[0.,0.62],[0.5,0.67],[0.5,0.87]),([0.,0.14],[0.,0.34],[0.5,0.68],[0.5,0.84]),([0.,0.15],[0.,0.36],[0.5,0.4],[0.,0.57],[0.5,0.65],[0.5,0.86],[0.,0.9]),()
return surfaces[miller]
else:
print "No non-polar surface",miller,"is currently in the database, maybe you want to add it."
return []
def WO3(miller):
exists = ['100','110']
if miller in exists:
surfaces = {}
surfaces['100'] = ([0.,0.25],[0,0.75],[0.5,0.25],[0.5,0.75]),([0.25,0.2],[0.5,0.25],[0.5,0.75],[0.7,0.7],[0.5,1.0],[1.0,1.0]),()
surfaces['110'] = ([0.5,0.3],[0.75,0.3],[1.0,0.8],[0.75,0.80]),()
return surfaces[miller]
else:
print "No non-polar surface",miller,"is currently in the database, maybe you want to add it."
return []
def perovskite(miller):
exists = ['100','110','112']
if miller in exists:
surfaces = {}
surfaces['100'] = ([0,0],[0.5,0.5]),([0.5,0],[0,0.5],[0.5,0.5]),()
surfaces['112'] = ([0.,0.],[0.5,0.],[0.5,0.5]),()
surfaces['110'] = ([0.,0.],[0.,0.5],[0.75,0.5]),()
return surfaces[miller]
else:
print "No non-polar surface",miller,"is currently in the database, maybe you want to add it."
return []
def CH3NH3PbI3(miller):
exists = ['100','110','112']
if miller in exists:
surfaces = {}
surfaces['100'] = ([0,0],[0.5,0.5]),([0.5,0],[0,0.5],[0.5,0.5]),()
surfaces['112'] = ([0.,0.],[0.5,0.],[0.5,0.5]),()
surfaces['110'] = ([0.,0.],[0.,0.5],[0.75,0.5]),()
return surfaces[miller]
else:
print "No non-polar surface",miller,"is currently in the database, maybe you want to add it."
return []
def SrTiO3(miller):
exists = ['100','110','112']
if miller in exists:
surfaces = {}
surfaces['100'] = ([0,0],[0.5,0.5]),([0.5,0],[0,0.5],[0.5,0.5]),()
surfaces['112'] = ([0.,0.],[0.5,0.],[0.5,0.5]),()
surfaces['110'] = ([0.,0.],[0.,0.5],[0.75,0.5]),()
return surfaces[miller]
else:
print "No non-polar surface",miller,"is currently in the database, maybe you want to add it."
return []
def zincblende(miller):
exists = ['100','110']
if miller in exists:
surfaces = {}
surfaces['100'] = ([0.75,0.25],[0.,0.]),()
surfaces['110'] = ([0.25,0.9],[0.25,0.4],[0.5,0.7],[0.5,0.2]),()
return surfaces[miller]
else:
print "No non-polar surface",miller,"is currently in the database, maybe you want to add it."
return []
def CuIz(miller):
exists = ['100','110']
if miller in exists:
surfaces = {}
surfaces['100'] = ([0.75,0.25],[0.,0.]),()
surfaces['001'] = ([0.75,0.25],[0.,0.]),()
surfaces['110'] = ([0.25,0.9],[0.25,0.4],[0.5,0.7],[0.5,0.2]),()
surfaces['011'] = ([0.25,0.9],[0.25,0.4],[0.5,0.7],[0.5,0.2]),()
return surfaces[miller]
else:
print "No non-polar surface",miller,"is currently in the database, maybe you want to add it."
return []
def rocksalt(miller):
exists = ['001','100','110','011']
if miller in exists:
surfaces = {}
surfaces['100'] = ([0.,0.],[0.5,0.5]),()
surfaces['001'] = ([0.,0.],[0.5,0.5]),()
surfaces['110'] = ([0.,0.],[0.,0.5],[0.5,0.],[0.5,0.5]),()
surfaces['011'] = ([0.,0.],[0.,0.5],[0.5,0.],[0.5,0.5]),()
return surfaces[miller]
else:
print "No non-polar surface",miller,"is currently in the database, maybe you want to add it."
return []
def ZnTe(miller):
exists = ['001','100','110','011']
if miller in exists:
surfaces = {}
surfaces['100'] = ([0.,0.],[0.5,0.5]),()
surfaces['001'] = ([0.,0.],[0.5,0.5]),()
surfaces['110'] = ([0.,0.],[0.,0.5],[0.5,0.],[0.5,0.5]),()
surfaces['011'] = ([0.,0.],[0.,0.5],[0.5,0.],[0.5,0.5]),()
return surfaces[miller]
else:
print "No non-polar surface",miller,"is currently in the database, maybe you want to add it."
return []
def bixybite(miller):
exists = ['100']
if miller in exists:
surfaces = {}
surfaces['100'] = ([0.2,0.9],[0.6,0.9],[0.9,0.6],[0.4,0.4],[0.9,0.4],[0.7,0.1]),([0.2,0.2],[0.2,0.7],[0.7,0.2],[0.7,0.7],[0.0,0.3],[0.3,0.5],[0.8,0.5],[0.5,0.8]),()
return surfaces[miller]
else:
print "No non-polar surface",miller,"is currently in the database, maybe you want to add it."
return []
def rutile(miller):
exists = ['001','010','110','011']
if miller in exists:
surfaces = {}
surfaces['001'] = ([0.5,0.5],[0.2,0.8],[0.8,0.2]),()
surfaces['010'] = ([0.5,0.5],[0.7,0.0]),()
surfaces['110'] = ([0.0,0.9],[0.0,0.45]),()
surfaces['011'] = ([0.0,0.7],[0.3,0.9],[0.2,0.4],[0.5,0.2]),()
return surfaces[miller]
else:
print "No non-polar surface",miller,"is currently in the database, maybe you want to add it."
return []
def MoO3(miller):
exists = ['001','101']
if miller in exists:
surfaces = {}
surfaces['001'] = ([0.9,0.75],[0.7,0.25],[0.6,0.25],[0.4,0.25]),([0.9,0.75],[0.7,0.25],[0.6,0.25],[0.6,0.75],[0.4,0.25],[0.4,0.75],[0.3,0.75],[0.,0.25]),()
surfaces['101'] = ([0.25,1.0],[0.75,0.7],[0.75,0.66],[0.25,0.5],[0.75,0.3],[0.25,0.1]),()
return surfaces[miller]
else:
print "No non-polar surface",miller,"is currently in the database, maybe you want to add it."
return []
def wurtzite(miller):
exists = ['100','010','110']
if miller in exists:
surfaces = {}
surfaces['100'] = ([0,0],[0,0.37]),()
surfaces['010'] = ([0,0],[0,0.37]),()
surfaces['110'] = ([0,0.8],[0.37,0.8],[0.5,0.17],[0.87,0.17]),()
return surfaces[miller]
else:
print "No non-polar surface",miller,"is currently in the database, maybe you want to add it."
return []
def GaN(miller):
exists = ['100','010','110']
if miller in exists:
surfaces = {}
surfaces['100'] = ([0,0],[0,0.37]),()
surfaces['010'] = ([0,0],[0,0.37]),()
surfaces['110'] = ([0,0.8],[0.37,0.8],[0.5,0.17],[0.87,0.17]),()
return surfaces[miller]
else:
print "No non-polar surface",miller,"is currently in the database, maybe you want to add it."
return []
def SiC(miller):
exists = ['100','010','110']
if miller in exists:
surfaces = {}
surfaces['100'] = ([0,0],[0,0.37]),()
surfaces['010'] = ([0,0],[0,0.37]),()
surfaces['110'] = ([0,0.8],[0.37,0.8],[0.5,0.17],[0.87,0.17]),()
return surfaces[miller]
else:
print "No non-polar surface",miller,"is currently in the database, maybe you want to add it."
return []
def Cu2O(miller):
exists = ['100','110','001','011']
if miller in exists:
surfaces = {}
surfaces['100'] = ([0.,0.],),()
surfaces['110'] = ([0.,0.],),()
surfaces['001'] = ([0.,0.],),()
surfaces['011'] = ([0.,0.],),()
return surfaces[miller]
else:
print "No non-polar surface",miller,"is currently in the database, maybe you want to add it."
return []
def In2S3(miller):
exists = ['001','010']
if miller in exists:
surfaces = {}
surfaces['001'] = ([0.,0.25],[0.5,0.]),([0.,0.],[0.5,0.],[0.25,0.75],[0.75,0.75],[0.,0.5],[0.5,0.]),([0.,0.],[0.75,0.25],[0.,0.5],[0.25,0.75],[0.25,0.25]),()
surfaces['010'] = ([0.5,0.],[0.75,0.2],[0.25,0.2],[0.75,0.25],[0.75,0.3],[0.25,0.3],[0.,0.45],[0.75,0.5],[0.25,0.5],[0.75,0.6,0.25,0.6],[0.75,0.66],[0.25,0.66],[0.5,0.7],[0.,0.8],[0.25,0.84],[0.75,0.84],[0.75,0.92],[0.75,0.92]),()
return surfaces[miller]
else:
print "No non-polar surface",miller,"is currently in the database, maybe you want to add it."
return []
def MnTiO3(miller):
exists = ['010']
if miller in exists:
surfaces = {}
surfaces['010'] = ([0.6,0.1],[0.9,0.15],[0.2,0.25],[0.9,0.36],[0.52,0.42],[0.25,0.58],[0.9,0.64],[0.6,0.75],[0.9,0.85],[0.23,0.9]),()
return surfaces[miller]
else:
print "No non-polar surface",miller,"is currently in the database, maybe you want to add it."
return []
def ZnTiO3(miller):
exists = ['011']
if miller in exists:
surfaces = {}
surfaces['011'] = ([0.45,0.09],[0.30,0.45],[0.82,0.26],[0.97,0.60],[0.73,0.82]),()
return surfaces[miller]
else:
print "No non-polar surface",miller,"is currently in the database, maybe you want to add it."
return []
def SnS2(miller):
exists = ['100']
if miller in exists:
surfaces = {}
surfaces['100'] = ([0.0,0.0],),([0.67,0.33],),()
return surfaces[miller]
else:
print "No non-polar surface",miller,"is currently in the database, maybe you want to add it."
return []
def Ce2O3(miller):
exists = ['101']
if miller in exists:
surfaces = {}
surfaces['101'] = ([0.69,0.54],),()
return surfaces[miller]
else:
print "No non-polar surface",miller,"is currently in the database, maybe you want to add it."
return []
def LiNbO3(miller):
exists = ['010']
if miller in exists:
surfaces = {}
surfaces['010'] = ([0.60,0.90],[0.89,0.83],[0.19,0.73],[0.89,0.61],[0.51,0.57],[0.21,0.40],[0.89,0.33],[0.58,0.23],[0.89,0.11],[0.28,0.07]),()
return surfaces[miller]
else:
print "No non-polar surface",miller,"is currently in the database, maybe you want to add it."
return []
def Ce2S3(miller):
exists = ['101','110','100','010']
if miller in exists:
surfaces = {}
surfaces['101'] = ([0.25,0.99],[0.25,0.80],[0.25,0.64],[0.25,0.47],[0.75,0.30],[0.25,0.22],[0.75,0.11]),()
surfaces['110'] = ([0.93,0.65],[0.89,0.25],[0.78,0.88],[0.70,0.14],[0.72,0.47],[0.54,0.56],[0.43,0.36],[0.39,0.76],[0.29,0.46],[0.20,0.87],[0.04,0.45]),()
surfaces['100'] = ([0.75,0.89],[0.75,0.70],[0.25,0.57],[0.75,0.46],[0.75,0.28],[0.25,0.20],[0.75,0.07]),()
surfaces['010'] = ([0.73,0.04],[0.13,0.07],[0.85,0.22],[0.35,0.28],[0.23,0.46],[0.63,0.43],[0.99,0.61],[0.36,0.70],[0.86,0.80],[0.49,0.89]),()
return surfaces[miller]
else:
print "No non-polar surface",miller,"is currently in the database, maybe you want to add it."
return []
| mit |
ArneBab/pypyjs | website/demo/home/rfk/repos/pypy/lib_pypy/ctypes_config_cache/resource.ctc.py | 13 | 1502 | """
'ctypes_configure' source for resource.py.
Run this to rebuild _resource_cache.py.
"""
from ctypes import sizeof
import dumpcache
from ctypes_configure.configure import (configure,
ExternalCompilationInfo, ConstantInteger, DefinedConstantInteger,
SimpleType)
_CONSTANTS = (
'RLIM_INFINITY',
'RLIM_NLIMITS',
)
_OPTIONAL_CONSTANTS = (
'RLIMIT_CPU',
'RLIMIT_FSIZE',
'RLIMIT_DATA',
'RLIMIT_STACK',
'RLIMIT_CORE',
'RLIMIT_RSS',
'RLIMIT_NPROC',
'RLIMIT_NOFILE',
'RLIMIT_OFILE',
'RLIMIT_MEMLOCK',
'RLIMIT_AS',
'RLIMIT_LOCKS',
'RLIMIT_SIGPENDING',
'RLIMIT_MSGQUEUE',
'RLIMIT_NICE',
'RLIMIT_RTPRIO',
'RLIMIT_VMEM',
'RUSAGE_BOTH',
'RUSAGE_SELF',
'RUSAGE_CHILDREN',
)
# Setup our configure
class ResourceConfigure:
_compilation_info_ = ExternalCompilationInfo(includes=['sys/resource.h'])
rlim_t = SimpleType('rlim_t')
for key in _CONSTANTS:
setattr(ResourceConfigure, key, ConstantInteger(key))
for key in _OPTIONAL_CONSTANTS:
setattr(ResourceConfigure, key, DefinedConstantInteger(key))
# Configure constants and types
config = configure(ResourceConfigure)
config['rlim_t_max'] = (1<<(sizeof(config['rlim_t']) * 8)) - 1
optional_constants = []
for key in _OPTIONAL_CONSTANTS:
if config[key] is not None:
optional_constants.append(key)
else:
del config[key]
config['ALL_CONSTANTS'] = _CONSTANTS + tuple(optional_constants)
dumpcache.dumpcache2('resource', config)
| mit |
arista-eosplus/ansible | lib/ansible/modules/files/fetch.py | 27 | 4058 | # this is a virtual module that is entirely implemented server side
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['stableinterface'],
'supported_by': 'core'}
DOCUMENTATION = '''
---
module: fetch
short_description: Fetches a file from remote nodes
description:
- This module works like M(copy), but in reverse. It is used for fetching
files from remote machines and storing them locally in a file tree,
organized by hostname.
version_added: "0.2"
options:
src:
description:
- The file on the remote system to fetch. This I(must) be a file, not a
directory. Recursive fetching may be supported in a later release.
required: true
default: null
aliases: []
dest:
description:
- A directory to save the file into. For example, if the I(dest)
directory is C(/backup) a I(src) file named C(/etc/profile) on host
C(host.example.com), would be saved into
C(/backup/host.example.com/etc/profile)
required: true
default: null
fail_on_missing:
version_added: "1.1"
description:
- When set to 'yes', the task will fail if the remote file cannot be
read for any reason. Prior to Ansible-2.4, setting this would only fail
if the source file was missing.
- The default was changed to "yes" in Ansible-2.4.
required: false
choices: [ "yes", "no" ]
default: "yes"
validate_checksum:
version_added: "1.4"
description:
- Verify that the source and destination checksums match after the files are fetched.
required: false
choices: [ "yes", "no" ]
default: "yes"
aliases: [ "validate_md5" ]
flat:
version_added: "1.2"
description:
- Allows you to override the default behavior of appending
hostname/path/to/file to the destination. If dest ends with '/', it
will use the basename of the source file, similar to the copy module.
Obviously this is only handy if the filenames are unique.
requirements: []
author:
- "Ansible Core Team"
- "Michael DeHaan"
notes:
- When running fetch with C(become), the M(slurp) module will also be
used to fetch the contents of the file for determining the remote
checksum. This effectively doubles the transfer size, and
depending on the file size can consume all available memory on the
remote or local hosts causing a C(MemoryError). Due to this it is
advisable to run this module without C(become) whenever possible.
- Prior to Ansible-2.4 this module would not fail if reading the remote
file was impossible unless fail_on_missing was set. In Ansible-2.4+,
playbook authors are encouraged to use fail_when or ignore_errors to
get this ability. They may also explicitly set fail_on_missing to False
to get the non-failing behaviour.
'''
EXAMPLES = '''
# Store file into /tmp/fetched/host.example.com/tmp/somefile
- fetch:
src: /tmp/somefile
dest: /tmp/fetched
# Specifying a path directly
- fetch:
src: /tmp/somefile
dest: /tmp/prefix-{{ inventory_hostname }}
flat: yes
# Specifying a destination path
- fetch:
src: /tmp/uniquefile
dest: /tmp/special/
flat: yes
# Storing in a path relative to the playbook
- fetch:
src: /tmp/uniquefile
dest: special/prefix-{{ inventory_hostname }}
flat: yes
'''
| gpl-3.0 |
streamlink/streamlink | src/streamlink/utils/url.py | 3 | 4825 | import re
from collections import OrderedDict
from urllib.parse import parse_qsl, quote_plus, urlencode, urljoin, urlparse, urlunparse
_re_uri_implicit_scheme = re.compile(r"""^[a-z0-9][a-z0-9.+-]*://""", re.IGNORECASE)
def update_scheme(current: str, target: str) -> str:
"""
Take the scheme from the current URL and apply it to the
target URL if the target URL starts with // or is missing a scheme
:param current: current URL
:param target: target URL
:return: target URL with the current URLs scheme
"""
target_p = urlparse(target)
if (
# target URLs with implicit scheme and netloc including a port: ("http://", "foo.bar:1234") -> "http://foo.bar:1234"
# urllib.parse.urlparse has incorrect behavior in py<3.9, so we'll have to use a regex here
# py>=3.9: urlparse("127.0.0.1:1234") == ParseResult(scheme='127.0.0.1', netloc='', path='1234', ...)
# py<3.9 : urlparse("127.0.0.1:1234") == ParseResult(scheme='', netloc='', path='127.0.0.1:1234', ...)
not _re_uri_implicit_scheme.search(target) and not target.startswith("//")
# target URLs without scheme and netloc: ("http://", "foo.bar/foo") -> "http://foo.bar/foo"
or not target_p.scheme and not target_p.netloc
):
return f"{urlparse(current).scheme}://{urlunparse(target_p)}"
# target URLs without scheme but with netloc: ("http://", "//foo.bar/foo") -> "http://foo.bar/foo"
if not target_p.scheme and target_p.netloc:
return f"{urlparse(current).scheme}:{urlunparse(target_p)}"
# target URLs with scheme
return target
def url_equal(first, second, ignore_scheme=False, ignore_netloc=False, ignore_path=False, ignore_params=False,
ignore_query=False, ignore_fragment=False):
"""
Compare two URLs and return True if they are equal, some parts of the URLs can be ignored
:param first: URL
:param second: URL
:param ignore_scheme: ignore the scheme
:param ignore_netloc: ignore the netloc
:param ignore_path: ignore the path
:param ignore_params: ignore the params
:param ignore_query: ignore the query string
:param ignore_fragment: ignore the fragment
:return: result of comparison
"""
# <scheme>://<netloc>/<path>;<params>?<query>#<fragment>
firstp = urlparse(first)
secondp = urlparse(second)
return (
(firstp.scheme == secondp.scheme or ignore_scheme)
and (firstp.netloc == secondp.netloc or ignore_netloc)
and (firstp.path == secondp.path or ignore_path)
and (firstp.params == secondp.params or ignore_params)
and (firstp.query == secondp.query or ignore_query)
and (firstp.fragment == secondp.fragment or ignore_fragment)
)
def url_concat(base, *parts, **kwargs):
"""
Join extra paths to a URL, does not join absolute paths
:param base: the base URL
:param parts: a list of the parts to join
:param allow_fragments: include url fragments
:return: the joined URL
"""
allow_fragments = kwargs.get("allow_fragments", True)
for part in parts:
base = urljoin(base.rstrip("/") + "/", part.strip("/"), allow_fragments)
return base
def update_qsd(url, qsd=None, remove=None, keep_blank_values=True, safe="", quote_via=quote_plus):
"""
Update or remove keys from a query string in a URL
:param url: URL to update
:param qsd: dict of keys to update, a None value leaves it unchanged
:param remove: list of keys to remove, or "*" to remove all
note: updated keys are never removed, even if unchanged
:param keep_blank_values: whether params with blank values should be kept or not
:param safe: string of reserved encoding characters, passed to the quote_via function
:param quote_via: function which encodes query string keys and values. Default: urllib.parse.quote_plus
:return: updated URL
"""
qsd = qsd or {}
remove = remove or []
# parse current query string
parsed = urlparse(url)
current_qsd = OrderedDict(parse_qsl(parsed.query, keep_blank_values=True))
# * removes all possible keys
if remove == "*":
remove = list(current_qsd.keys())
# remove keys before updating, but leave updated keys untouched
for key in remove:
if key not in qsd:
del current_qsd[key]
# and update the query string
for key, value in qsd.items():
if value is not None:
current_qsd[key] = value
for key, value in list(current_qsd.items()): # use list() to create a view of the current_qsd
if not value and not keep_blank_values and key not in qsd:
del current_qsd[key]
query = urlencode(query=current_qsd, safe=safe, quote_via=quote_via)
return parsed._replace(query=query).geturl()
| bsd-2-clause |
CMartelLML/numpy | numpy/polynomial/tests/test_hermite_e.py | 123 | 17069 | """Tests for hermite_e module.
"""
from __future__ import division, absolute_import, print_function
import numpy as np
import numpy.polynomial.hermite_e as herme
from numpy.polynomial.polynomial import polyval
from numpy.testing import (
TestCase, assert_almost_equal, assert_raises,
assert_equal, assert_, run_module_suite)
He0 = np.array([1])
He1 = np.array([0, 1])
He2 = np.array([-1, 0, 1])
He3 = np.array([0, -3, 0, 1])
He4 = np.array([3, 0, -6, 0, 1])
He5 = np.array([0, 15, 0, -10, 0, 1])
He6 = np.array([-15, 0, 45, 0, -15, 0, 1])
He7 = np.array([0, -105, 0, 105, 0, -21, 0, 1])
He8 = np.array([105, 0, -420, 0, 210, 0, -28, 0, 1])
He9 = np.array([0, 945, 0, -1260, 0, 378, 0, -36, 0, 1])
Helist = [He0, He1, He2, He3, He4, He5, He6, He7, He8, He9]
def trim(x):
return herme.hermetrim(x, tol=1e-6)
class TestConstants(TestCase):
def test_hermedomain(self):
assert_equal(herme.hermedomain, [-1, 1])
def test_hermezero(self):
assert_equal(herme.hermezero, [0])
def test_hermeone(self):
assert_equal(herme.hermeone, [1])
def test_hermex(self):
assert_equal(herme.hermex, [0, 1])
class TestArithmetic(TestCase):
x = np.linspace(-3, 3, 100)
def test_hermeadd(self):
for i in range(5):
for j in range(5):
msg = "At i=%d, j=%d" % (i, j)
tgt = np.zeros(max(i, j) + 1)
tgt[i] += 1
tgt[j] += 1
res = herme.hermeadd([0]*i + [1], [0]*j + [1])
assert_equal(trim(res), trim(tgt), err_msg=msg)
def test_hermesub(self):
for i in range(5):
for j in range(5):
msg = "At i=%d, j=%d" % (i, j)
tgt = np.zeros(max(i, j) + 1)
tgt[i] += 1
tgt[j] -= 1
res = herme.hermesub([0]*i + [1], [0]*j + [1])
assert_equal(trim(res), trim(tgt), err_msg=msg)
def test_hermemulx(self):
assert_equal(herme.hermemulx([0]), [0])
assert_equal(herme.hermemulx([1]), [0, 1])
for i in range(1, 5):
ser = [0]*i + [1]
tgt = [0]*(i - 1) + [i, 0, 1]
assert_equal(herme.hermemulx(ser), tgt)
def test_hermemul(self):
# check values of result
for i in range(5):
pol1 = [0]*i + [1]
val1 = herme.hermeval(self.x, pol1)
for j in range(5):
msg = "At i=%d, j=%d" % (i, j)
pol2 = [0]*j + [1]
val2 = herme.hermeval(self.x, pol2)
pol3 = herme.hermemul(pol1, pol2)
val3 = herme.hermeval(self.x, pol3)
assert_(len(pol3) == i + j + 1, msg)
assert_almost_equal(val3, val1*val2, err_msg=msg)
def test_hermediv(self):
for i in range(5):
for j in range(5):
msg = "At i=%d, j=%d" % (i, j)
ci = [0]*i + [1]
cj = [0]*j + [1]
tgt = herme.hermeadd(ci, cj)
quo, rem = herme.hermediv(tgt, ci)
res = herme.hermeadd(herme.hermemul(quo, ci), rem)
assert_equal(trim(res), trim(tgt), err_msg=msg)
class TestEvaluation(TestCase):
# coefficients of 1 + 2*x + 3*x**2
c1d = np.array([4., 2., 3.])
c2d = np.einsum('i,j->ij', c1d, c1d)
c3d = np.einsum('i,j,k->ijk', c1d, c1d, c1d)
# some random values in [-1, 1)
x = np.random.random((3, 5))*2 - 1
y = polyval(x, [1., 2., 3.])
def test_hermeval(self):
#check empty input
assert_equal(herme.hermeval([], [1]).size, 0)
#check normal input)
x = np.linspace(-1, 1)
y = [polyval(x, c) for c in Helist]
for i in range(10):
msg = "At i=%d" % i
tgt = y[i]
res = herme.hermeval(x, [0]*i + [1])
assert_almost_equal(res, tgt, err_msg=msg)
#check that shape is preserved
for i in range(3):
dims = [2]*i
x = np.zeros(dims)
assert_equal(herme.hermeval(x, [1]).shape, dims)
assert_equal(herme.hermeval(x, [1, 0]).shape, dims)
assert_equal(herme.hermeval(x, [1, 0, 0]).shape, dims)
def test_hermeval2d(self):
x1, x2, x3 = self.x
y1, y2, y3 = self.y
#test exceptions
assert_raises(ValueError, herme.hermeval2d, x1, x2[:2], self.c2d)
#test values
tgt = y1*y2
res = herme.hermeval2d(x1, x2, self.c2d)
assert_almost_equal(res, tgt)
#test shape
z = np.ones((2, 3))
res = herme.hermeval2d(z, z, self.c2d)
assert_(res.shape == (2, 3))
def test_hermeval3d(self):
x1, x2, x3 = self.x
y1, y2, y3 = self.y
#test exceptions
assert_raises(ValueError, herme.hermeval3d, x1, x2, x3[:2], self.c3d)
#test values
tgt = y1*y2*y3
res = herme.hermeval3d(x1, x2, x3, self.c3d)
assert_almost_equal(res, tgt)
#test shape
z = np.ones((2, 3))
res = herme.hermeval3d(z, z, z, self.c3d)
assert_(res.shape == (2, 3))
def test_hermegrid2d(self):
x1, x2, x3 = self.x
y1, y2, y3 = self.y
#test values
tgt = np.einsum('i,j->ij', y1, y2)
res = herme.hermegrid2d(x1, x2, self.c2d)
assert_almost_equal(res, tgt)
#test shape
z = np.ones((2, 3))
res = herme.hermegrid2d(z, z, self.c2d)
assert_(res.shape == (2, 3)*2)
def test_hermegrid3d(self):
x1, x2, x3 = self.x
y1, y2, y3 = self.y
#test values
tgt = np.einsum('i,j,k->ijk', y1, y2, y3)
res = herme.hermegrid3d(x1, x2, x3, self.c3d)
assert_almost_equal(res, tgt)
#test shape
z = np.ones((2, 3))
res = herme.hermegrid3d(z, z, z, self.c3d)
assert_(res.shape == (2, 3)*3)
class TestIntegral(TestCase):
def test_hermeint(self):
# check exceptions
assert_raises(ValueError, herme.hermeint, [0], .5)
assert_raises(ValueError, herme.hermeint, [0], -1)
assert_raises(ValueError, herme.hermeint, [0], 1, [0, 0])
# test integration of zero polynomial
for i in range(2, 5):
k = [0]*(i - 2) + [1]
res = herme.hermeint([0], m=i, k=k)
assert_almost_equal(res, [0, 1])
# check single integration with integration constant
for i in range(5):
scl = i + 1
pol = [0]*i + [1]
tgt = [i] + [0]*i + [1/scl]
hermepol = herme.poly2herme(pol)
hermeint = herme.hermeint(hermepol, m=1, k=[i])
res = herme.herme2poly(hermeint)
assert_almost_equal(trim(res), trim(tgt))
# check single integration with integration constant and lbnd
for i in range(5):
scl = i + 1
pol = [0]*i + [1]
hermepol = herme.poly2herme(pol)
hermeint = herme.hermeint(hermepol, m=1, k=[i], lbnd=-1)
assert_almost_equal(herme.hermeval(-1, hermeint), i)
# check single integration with integration constant and scaling
for i in range(5):
scl = i + 1
pol = [0]*i + [1]
tgt = [i] + [0]*i + [2/scl]
hermepol = herme.poly2herme(pol)
hermeint = herme.hermeint(hermepol, m=1, k=[i], scl=2)
res = herme.herme2poly(hermeint)
assert_almost_equal(trim(res), trim(tgt))
# check multiple integrations with default k
for i in range(5):
for j in range(2, 5):
pol = [0]*i + [1]
tgt = pol[:]
for k in range(j):
tgt = herme.hermeint(tgt, m=1)
res = herme.hermeint(pol, m=j)
assert_almost_equal(trim(res), trim(tgt))
# check multiple integrations with defined k
for i in range(5):
for j in range(2, 5):
pol = [0]*i + [1]
tgt = pol[:]
for k in range(j):
tgt = herme.hermeint(tgt, m=1, k=[k])
res = herme.hermeint(pol, m=j, k=list(range(j)))
assert_almost_equal(trim(res), trim(tgt))
# check multiple integrations with lbnd
for i in range(5):
for j in range(2, 5):
pol = [0]*i + [1]
tgt = pol[:]
for k in range(j):
tgt = herme.hermeint(tgt, m=1, k=[k], lbnd=-1)
res = herme.hermeint(pol, m=j, k=list(range(j)), lbnd=-1)
assert_almost_equal(trim(res), trim(tgt))
# check multiple integrations with scaling
for i in range(5):
for j in range(2, 5):
pol = [0]*i + [1]
tgt = pol[:]
for k in range(j):
tgt = herme.hermeint(tgt, m=1, k=[k], scl=2)
res = herme.hermeint(pol, m=j, k=list(range(j)), scl=2)
assert_almost_equal(trim(res), trim(tgt))
def test_hermeint_axis(self):
# check that axis keyword works
c2d = np.random.random((3, 4))
tgt = np.vstack([herme.hermeint(c) for c in c2d.T]).T
res = herme.hermeint(c2d, axis=0)
assert_almost_equal(res, tgt)
tgt = np.vstack([herme.hermeint(c) for c in c2d])
res = herme.hermeint(c2d, axis=1)
assert_almost_equal(res, tgt)
tgt = np.vstack([herme.hermeint(c, k=3) for c in c2d])
res = herme.hermeint(c2d, k=3, axis=1)
assert_almost_equal(res, tgt)
class TestDerivative(TestCase):
def test_hermeder(self):
# check exceptions
assert_raises(ValueError, herme.hermeder, [0], .5)
assert_raises(ValueError, herme.hermeder, [0], -1)
# check that zeroth deriviative does nothing
for i in range(5):
tgt = [0]*i + [1]
res = herme.hermeder(tgt, m=0)
assert_equal(trim(res), trim(tgt))
# check that derivation is the inverse of integration
for i in range(5):
for j in range(2, 5):
tgt = [0]*i + [1]
res = herme.hermeder(herme.hermeint(tgt, m=j), m=j)
assert_almost_equal(trim(res), trim(tgt))
# check derivation with scaling
for i in range(5):
for j in range(2, 5):
tgt = [0]*i + [1]
res = herme.hermeder(
herme.hermeint(tgt, m=j, scl=2), m=j, scl=.5)
assert_almost_equal(trim(res), trim(tgt))
def test_hermeder_axis(self):
# check that axis keyword works
c2d = np.random.random((3, 4))
tgt = np.vstack([herme.hermeder(c) for c in c2d.T]).T
res = herme.hermeder(c2d, axis=0)
assert_almost_equal(res, tgt)
tgt = np.vstack([herme.hermeder(c) for c in c2d])
res = herme.hermeder(c2d, axis=1)
assert_almost_equal(res, tgt)
class TestVander(TestCase):
# some random values in [-1, 1)
x = np.random.random((3, 5))*2 - 1
def test_hermevander(self):
# check for 1d x
x = np.arange(3)
v = herme.hermevander(x, 3)
assert_(v.shape == (3, 4))
for i in range(4):
coef = [0]*i + [1]
assert_almost_equal(v[..., i], herme.hermeval(x, coef))
# check for 2d x
x = np.array([[1, 2], [3, 4], [5, 6]])
v = herme.hermevander(x, 3)
assert_(v.shape == (3, 2, 4))
for i in range(4):
coef = [0]*i + [1]
assert_almost_equal(v[..., i], herme.hermeval(x, coef))
def test_hermevander2d(self):
# also tests hermeval2d for non-square coefficient array
x1, x2, x3 = self.x
c = np.random.random((2, 3))
van = herme.hermevander2d(x1, x2, [1, 2])
tgt = herme.hermeval2d(x1, x2, c)
res = np.dot(van, c.flat)
assert_almost_equal(res, tgt)
# check shape
van = herme.hermevander2d([x1], [x2], [1, 2])
assert_(van.shape == (1, 5, 6))
def test_hermevander3d(self):
# also tests hermeval3d for non-square coefficient array
x1, x2, x3 = self.x
c = np.random.random((2, 3, 4))
van = herme.hermevander3d(x1, x2, x3, [1, 2, 3])
tgt = herme.hermeval3d(x1, x2, x3, c)
res = np.dot(van, c.flat)
assert_almost_equal(res, tgt)
# check shape
van = herme.hermevander3d([x1], [x2], [x3], [1, 2, 3])
assert_(van.shape == (1, 5, 24))
class TestFitting(TestCase):
def test_hermefit(self):
def f(x):
return x*(x - 1)*(x - 2)
# Test exceptions
assert_raises(ValueError, herme.hermefit, [1], [1], -1)
assert_raises(TypeError, herme.hermefit, [[1]], [1], 0)
assert_raises(TypeError, herme.hermefit, [], [1], 0)
assert_raises(TypeError, herme.hermefit, [1], [[[1]]], 0)
assert_raises(TypeError, herme.hermefit, [1, 2], [1], 0)
assert_raises(TypeError, herme.hermefit, [1], [1, 2], 0)
assert_raises(TypeError, herme.hermefit, [1], [1], 0, w=[[1]])
assert_raises(TypeError, herme.hermefit, [1], [1], 0, w=[1, 1])
# Test fit
x = np.linspace(0, 2)
y = f(x)
#
coef3 = herme.hermefit(x, y, 3)
assert_equal(len(coef3), 4)
assert_almost_equal(herme.hermeval(x, coef3), y)
#
coef4 = herme.hermefit(x, y, 4)
assert_equal(len(coef4), 5)
assert_almost_equal(herme.hermeval(x, coef4), y)
#
coef2d = herme.hermefit(x, np.array([y, y]).T, 3)
assert_almost_equal(coef2d, np.array([coef3, coef3]).T)
# test weighting
w = np.zeros_like(x)
yw = y.copy()
w[1::2] = 1
y[0::2] = 0
wcoef3 = herme.hermefit(x, yw, 3, w=w)
assert_almost_equal(wcoef3, coef3)
#
wcoef2d = herme.hermefit(x, np.array([yw, yw]).T, 3, w=w)
assert_almost_equal(wcoef2d, np.array([coef3, coef3]).T)
# test scaling with complex values x points whose square
# is zero when summed.
x = [1, 1j, -1, -1j]
assert_almost_equal(herme.hermefit(x, x, 1), [0, 1])
class TestCompanion(TestCase):
def test_raises(self):
assert_raises(ValueError, herme.hermecompanion, [])
assert_raises(ValueError, herme.hermecompanion, [1])
def test_dimensions(self):
for i in range(1, 5):
coef = [0]*i + [1]
assert_(herme.hermecompanion(coef).shape == (i, i))
def test_linear_root(self):
assert_(herme.hermecompanion([1, 2])[0, 0] == -.5)
class TestGauss(TestCase):
def test_100(self):
x, w = herme.hermegauss(100)
# test orthogonality. Note that the results need to be normalized,
# otherwise the huge values that can arise from fast growing
# functions like Laguerre can be very confusing.
v = herme.hermevander(x, 99)
vv = np.dot(v.T * w, v)
vd = 1/np.sqrt(vv.diagonal())
vv = vd[:, None] * vv * vd
assert_almost_equal(vv, np.eye(100))
# check that the integral of 1 is correct
tgt = np.sqrt(2*np.pi)
assert_almost_equal(w.sum(), tgt)
class TestMisc(TestCase):
def test_hermefromroots(self):
res = herme.hermefromroots([])
assert_almost_equal(trim(res), [1])
for i in range(1, 5):
roots = np.cos(np.linspace(-np.pi, 0, 2*i + 1)[1::2])
pol = herme.hermefromroots(roots)
res = herme.hermeval(roots, pol)
tgt = 0
assert_(len(pol) == i + 1)
assert_almost_equal(herme.herme2poly(pol)[-1], 1)
assert_almost_equal(res, tgt)
def test_hermeroots(self):
assert_almost_equal(herme.hermeroots([1]), [])
assert_almost_equal(herme.hermeroots([1, 1]), [-1])
for i in range(2, 5):
tgt = np.linspace(-1, 1, i)
res = herme.hermeroots(herme.hermefromroots(tgt))
assert_almost_equal(trim(res), trim(tgt))
def test_hermetrim(self):
coef = [2, -1, 1, 0]
# Test exceptions
assert_raises(ValueError, herme.hermetrim, coef, -1)
# Test results
assert_equal(herme.hermetrim(coef), coef[:-1])
assert_equal(herme.hermetrim(coef, 1), coef[:-3])
assert_equal(herme.hermetrim(coef, 2), [0])
def test_hermeline(self):
assert_equal(herme.hermeline(3, 4), [3, 4])
def test_herme2poly(self):
for i in range(10):
assert_almost_equal(herme.herme2poly([0]*i + [1]), Helist[i])
def test_poly2herme(self):
for i in range(10):
assert_almost_equal(herme.poly2herme(Helist[i]), [0]*i + [1])
def test_weight(self):
x = np.linspace(-5, 5, 11)
tgt = np.exp(-.5*x**2)
res = herme.hermeweight(x)
assert_almost_equal(res, tgt)
if __name__ == "__main__":
run_module_suite()
| bsd-3-clause |
beckdaniel/GPy | GPy/old_tests/mapping_tests.py | 15 | 2156 | # Copyright (c) 2012, 2013 GPy authors (see AUTHORS.txt).
# Licensed under the BSD 3-clause license (see LICENSE.txt)
import unittest
import numpy as np
import GPy
class MappingGradChecker(GPy.core.Model):
"""
This class has everything we need to check the gradient of a mapping. It
implement a simple likelihood which is the sum of the outputs of the
mapping. the gradients are checked against the parameters of the mapping
and the input.
"""
def __init__(self, mapping, X, name):
super(MappingChecker).__init__(self, name)
self.mapping = mapping
self.add_parameter(self.mapping)
self.X = GPy.core.Param('X',X)
self.add_parameter(self.X)
self.dL_dY = np.ones((self.X.shape[0]. self.mapping.output_dim))
def log_likelihood(self):
return np.sum(self.mapping.f(X))
def parameters_changed(self):
self.X.gradient = self.mapping.gradients_X(self.dL_dY, self.X)
self.mapping.update_gradients(self.dL_dY, self.X)
class MappingTests(unittest.TestCase):
def test_kernelmapping(self):
verbose = False
mapping = GPy.mappings.Kernel(np.random.rand(10, 3), 2)
self.assertTrue(GPy.core.mapping.Mapping_check_df_dtheta(mapping=mapping).checkgrad(verbose=verbose))
self.assertTrue(GPy.core.mapping.Mapping_check_df_dX(mapping=mapping).checkgrad(verbose=verbose))
def test_linearmapping(self):
verbose = False
mapping = GPy.mappings.Linear(3, 2)
self.assertTrue(GPy.core.Mapping_check_df_dtheta(mapping=mapping).checkgrad(verbose=verbose))
self.assertTrue(GPy.core.Mapping_check_df_dX(mapping=mapping).checkgrad(verbose=verbose))
def test_mlpmapping(self):
verbose = False
mapping = GPy.mappings.MLP(input_dim=2, hidden_dim=[3, 4, 8, 2], output_dim=2)
self.assertTrue(GPy.core.Mapping_check_df_dtheta(mapping=mapping).checkgrad(verbose=verbose))
self.assertTrue(GPy.core.Mapping_check_df_dX(mapping=mapping).checkgrad(verbose=verbose))
if __name__ == "__main__":
print "Running unit tests, please be (very) patient..."
unittest.main()
| bsd-3-clause |
qvazzler/Flexget | tests/test_exec.py | 1 | 3067 | from __future__ import unicode_literals, division, absolute_import
from builtins import * # pylint: disable=unused-import, redefined-builtin
import sys
class TestExec(object):
__tmp__ = True
config = """
templates:
global:
set:
temp_dir: '__tmp__'
accept_all: yes
tasks:
replace_from_entry:
mock:
- {title: 'replace'}
- {title: 'replace with spaces'}
exec: """ + sys.executable + """ exec.py "{{temp_dir}}" "{{title}}"
test_adv_format:
mock:
- {title: entry1, location: '/path/with spaces', quotefield: "with'quote"}
exec:
on_output:
for_entries: """ + sys.executable + """ exec.py "{{temp_dir}}" "{{title}}" "{{location}}" "/the/final destinaton/" "a {{quotefield}}" "/a hybrid{{location}}"
test_auto_escape:
mock:
- {title: entry2, quotes: single ' double", otherchars: '% a $a! ` *'}
exec:
auto_escape: yes
on_output:
for_entries: """ + sys.executable + """ exec.py "{{temp_dir}}" "{{title}}" "{{quotes}}" "/start/{{quotes}}" "{{otherchars}}"
"""
def test_replace_from_entry(self, execute_task, tmpdir):
task = execute_task('replace_from_entry')
assert len(task.accepted) == 2, "not all entries were accepted"
for entry in task.accepted:
assert tmpdir.join(entry['title']).exists(), "exec.py did not create a file for %s" % entry['title']
def test_adv_format(self, execute_task, tmpdir):
task = execute_task('test_adv_format')
for entry in task.accepted:
with tmpdir.join(entry['title']).open('r') as infile:
line = infile.readline().rstrip('\n')
assert line == '/path/with spaces', '%s != /path/with spaces' % line
line = infile.readline().rstrip('\n')
assert line == '/the/final destinaton/', '%s != /the/final destinaton/' % line
line = infile.readline().rstrip('\n')
assert line == 'a with\'quote', '%s != a with\'quote' % line
line = infile.readline().rstrip('\n')
assert line == '/a hybrid/path/with spaces', '%s != /a hybrid/path/with spaces' % line
# TODO: This doesn't work on linux.
"""
def test_auto_escape(self, execute_task):
task = execute_task('test_auto_escape')
for entry in task.accepted:
with open(os.path.join(self.__tmp__, entry['title']), 'r') as infile:
line = infile.readline().rstrip('\n')
assert line == 'single \' double\"', '%s != single \' double\"' % line
line = infile.readline().rstrip('\n')
assert line == '/start/single \' double\"', '%s != /start/single \' double\"' % line
line = infile.readline().rstrip('\n')
assert line == '% a $a! ` *', '%s != % a $a! ` *' % line
"""
| mit |
beni55/picochess | libs/requests/auth.py | 331 | 6123 | # -*- coding: utf-8 -*-
"""
requests.auth
~~~~~~~~~~~~~
This module contains the authentication handlers for Requests.
"""
import os
import re
import time
import hashlib
from base64 import b64encode
from .compat import urlparse, str
from .cookies import extract_cookies_to_jar
from .utils import parse_dict_header
CONTENT_TYPE_FORM_URLENCODED = 'application/x-www-form-urlencoded'
CONTENT_TYPE_MULTI_PART = 'multipart/form-data'
def _basic_auth_str(username, password):
"""Returns a Basic Auth string."""
return 'Basic ' + b64encode(('%s:%s' % (username, password)).encode('latin1')).strip().decode('latin1')
class AuthBase(object):
"""Base class that all auth implementations derive from"""
def __call__(self, r):
raise NotImplementedError('Auth hooks must be callable.')
class HTTPBasicAuth(AuthBase):
"""Attaches HTTP Basic Authentication to the given Request object."""
def __init__(self, username, password):
self.username = username
self.password = password
def __call__(self, r):
r.headers['Authorization'] = _basic_auth_str(self.username, self.password)
return r
class HTTPProxyAuth(HTTPBasicAuth):
"""Attaches HTTP Proxy Authentication to a given Request object."""
def __call__(self, r):
r.headers['Proxy-Authorization'] = _basic_auth_str(self.username, self.password)
return r
class HTTPDigestAuth(AuthBase):
"""Attaches HTTP Digest Authentication to the given Request object."""
def __init__(self, username, password):
self.username = username
self.password = password
self.last_nonce = ''
self.nonce_count = 0
self.chal = {}
self.pos = None
def build_digest_header(self, method, url):
realm = self.chal['realm']
nonce = self.chal['nonce']
qop = self.chal.get('qop')
algorithm = self.chal.get('algorithm')
opaque = self.chal.get('opaque')
if algorithm is None:
_algorithm = 'MD5'
else:
_algorithm = algorithm.upper()
# lambdas assume digest modules are imported at the top level
if _algorithm == 'MD5' or _algorithm == 'MD5-SESS':
def md5_utf8(x):
if isinstance(x, str):
x = x.encode('utf-8')
return hashlib.md5(x).hexdigest()
hash_utf8 = md5_utf8
elif _algorithm == 'SHA':
def sha_utf8(x):
if isinstance(x, str):
x = x.encode('utf-8')
return hashlib.sha1(x).hexdigest()
hash_utf8 = sha_utf8
KD = lambda s, d: hash_utf8("%s:%s" % (s, d))
if hash_utf8 is None:
return None
# XXX not implemented yet
entdig = None
p_parsed = urlparse(url)
path = p_parsed.path
if p_parsed.query:
path += '?' + p_parsed.query
A1 = '%s:%s:%s' % (self.username, realm, self.password)
A2 = '%s:%s' % (method, path)
HA1 = hash_utf8(A1)
HA2 = hash_utf8(A2)
if nonce == self.last_nonce:
self.nonce_count += 1
else:
self.nonce_count = 1
ncvalue = '%08x' % self.nonce_count
s = str(self.nonce_count).encode('utf-8')
s += nonce.encode('utf-8')
s += time.ctime().encode('utf-8')
s += os.urandom(8)
cnonce = (hashlib.sha1(s).hexdigest()[:16])
noncebit = "%s:%s:%s:%s:%s" % (nonce, ncvalue, cnonce, qop, HA2)
if _algorithm == 'MD5-SESS':
HA1 = hash_utf8('%s:%s:%s' % (HA1, nonce, cnonce))
if qop is None:
respdig = KD(HA1, "%s:%s" % (nonce, HA2))
elif qop == 'auth' or 'auth' in qop.split(','):
respdig = KD(HA1, noncebit)
else:
# XXX handle auth-int.
return None
self.last_nonce = nonce
# XXX should the partial digests be encoded too?
base = 'username="%s", realm="%s", nonce="%s", uri="%s", ' \
'response="%s"' % (self.username, realm, nonce, path, respdig)
if opaque:
base += ', opaque="%s"' % opaque
if algorithm:
base += ', algorithm="%s"' % algorithm
if entdig:
base += ', digest="%s"' % entdig
if qop:
base += ', qop="auth", nc=%s, cnonce="%s"' % (ncvalue, cnonce)
return 'Digest %s' % (base)
def handle_401(self, r, **kwargs):
"""Takes the given response and tries digest-auth, if needed."""
if self.pos is not None:
# Rewind the file position indicator of the body to where
# it was to resend the request.
r.request.body.seek(self.pos)
num_401_calls = getattr(self, 'num_401_calls', 1)
s_auth = r.headers.get('www-authenticate', '')
if 'digest' in s_auth.lower() and num_401_calls < 2:
setattr(self, 'num_401_calls', num_401_calls + 1)
pat = re.compile(r'digest ', flags=re.IGNORECASE)
self.chal = parse_dict_header(pat.sub('', s_auth, count=1))
# Consume content and release the original connection
# to allow our new request to reuse the same one.
r.content
r.raw.release_conn()
prep = r.request.copy()
extract_cookies_to_jar(prep._cookies, r.request, r.raw)
prep.prepare_cookies(prep._cookies)
prep.headers['Authorization'] = self.build_digest_header(
prep.method, prep.url)
_r = r.connection.send(prep, **kwargs)
_r.history.append(r)
_r.request = prep
return _r
setattr(self, 'num_401_calls', 1)
return r
def __call__(self, r):
# If we have a saved nonce, skip the 401
if self.last_nonce:
r.headers['Authorization'] = self.build_digest_header(r.method, r.url)
try:
self.pos = r.body.tell()
except AttributeError:
pass
r.register_hook('response', self.handle_401)
return r
| gpl-3.0 |
leiferikb/bitpop | build/third_party/sqlalchemy_0_7_1/sqlalchemy/orm/unitofwork.py | 8 | 21483 | # orm/unitofwork.py
# Copyright (C) 2005-2011 the SQLAlchemy authors and contributors <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""The internals for the unit of work system.
The session's flush() process passes objects to a contextual object
here, which assembles flush tasks based on mappers and their properties,
organizes them in order of dependency, and executes.
"""
from sqlalchemy import util, event
from sqlalchemy.util import topological
from sqlalchemy.orm import attributes, interfaces
from sqlalchemy.orm import util as mapperutil
session = util.importlater("sqlalchemy.orm", "session")
def track_cascade_events(descriptor, prop):
"""Establish event listeners on object attributes which handle
cascade-on-set/append.
"""
key = prop.key
def append(state, item, initiator):
# process "save_update" cascade rules for when
# an instance is appended to the list of another instance
sess = session._state_session(state)
if sess:
prop = state.manager.mapper._props[key]
item_state = attributes.instance_state(item)
if prop.cascade.save_update and \
(prop.cascade_backrefs or key == initiator.key) and \
not sess._contains_state(item_state):
sess._save_or_update_state(item_state)
return item
def remove(state, item, initiator):
sess = session._state_session(state)
if sess:
prop = state.manager.mapper._props[key]
# expunge pending orphans
item_state = attributes.instance_state(item)
if prop.cascade.delete_orphan and \
item_state in sess._new and \
prop.mapper._is_orphan(item_state):
sess.expunge(item)
def set_(state, newvalue, oldvalue, initiator):
# process "save_update" cascade rules for when an instance
# is attached to another instance
if oldvalue is newvalue:
return newvalue
sess = session._state_session(state)
if sess:
prop = state.manager.mapper._props[key]
if newvalue is not None:
newvalue_state = attributes.instance_state(newvalue)
if prop.cascade.save_update and \
(prop.cascade_backrefs or key == initiator.key) and \
not sess._contains_state(newvalue_state):
sess._save_or_update_state(newvalue_state)
if oldvalue is not None and prop.cascade.delete_orphan:
oldvalue_state = attributes.instance_state(oldvalue)
if oldvalue_state in sess._new and \
prop.mapper._is_orphan(oldvalue_state):
sess.expunge(oldvalue)
return newvalue
event.listen(descriptor, 'append', append, raw=True, retval=True)
event.listen(descriptor, 'remove', remove, raw=True, retval=True)
event.listen(descriptor, 'set', set_, raw=True, retval=True)
class UOWTransaction(object):
def __init__(self, session):
self.session = session
# dictionary used by external actors to
# store arbitrary state information.
self.attributes = {}
# dictionary of mappers to sets of
# DependencyProcessors, which are also
# set to be part of the sorted flush actions,
# which have that mapper as a parent.
self.deps = util.defaultdict(set)
# dictionary of mappers to sets of InstanceState
# items pending for flush which have that mapper
# as a parent.
self.mappers = util.defaultdict(set)
# a dictionary of Preprocess objects, which gather
# additional states impacted by the flush
# and determine if a flush action is needed
self.presort_actions = {}
# dictionary of PostSortRec objects, each
# one issues work during the flush within
# a certain ordering.
self.postsort_actions = {}
# a set of 2-tuples, each containing two
# PostSortRec objects where the second
# is dependent on the first being executed
# first
self.dependencies = set()
# dictionary of InstanceState-> (isdelete, listonly)
# tuples, indicating if this state is to be deleted
# or insert/updated, or just refreshed
self.states = {}
# tracks InstanceStates which will be receiving
# a "post update" call. Keys are mappers,
# values are a set of states and a set of the
# columns which should be included in the update.
self.post_update_states = util.defaultdict(lambda: (set(), set()))
@property
def has_work(self):
return bool(self.states)
def is_deleted(self, state):
"""return true if the given state is marked as deleted
within this uowtransaction."""
return state in self.states and self.states[state][0]
def memo(self, key, callable_):
if key in self.attributes:
return self.attributes[key]
else:
self.attributes[key] = ret = callable_()
return ret
def remove_state_actions(self, state):
"""remove pending actions for a state from the uowtransaction."""
isdelete = self.states[state][0]
self.states[state] = (isdelete, True)
def get_attribute_history(self, state, key,
passive=attributes.PASSIVE_NO_INITIALIZE):
"""facade to attributes.get_state_history(), including caching of results."""
hashkey = ("history", state, key)
# cache the objects, not the states; the strong reference here
# prevents newly loaded objects from being dereferenced during the
# flush process
if hashkey in self.attributes:
history, state_history, cached_passive = self.attributes[hashkey]
# if the cached lookup was "passive" and now
# we want non-passive, do a non-passive lookup and re-cache
if cached_passive is not attributes.PASSIVE_OFF \
and passive is attributes.PASSIVE_OFF:
impl = state.manager[key].impl
history = impl.get_history(state, state.dict,
attributes.PASSIVE_OFF)
if history and impl.uses_objects:
state_history = history.as_state()
else:
state_history = history
self.attributes[hashkey] = (history, state_history, passive)
else:
impl = state.manager[key].impl
# TODO: store the history as (state, object) tuples
# so we don't have to keep converting here
history = impl.get_history(state, state.dict, passive)
if history and impl.uses_objects:
state_history = history.as_state()
else:
state_history = history
self.attributes[hashkey] = (history, state_history, passive)
return state_history
def has_dep(self, processor):
return (processor, True) in self.presort_actions
def register_preprocessor(self, processor, fromparent):
key = (processor, fromparent)
if key not in self.presort_actions:
self.presort_actions[key] = Preprocess(processor, fromparent)
def register_object(self, state, isdelete=False,
listonly=False, cancel_delete=False,
operation=None, prop=None):
if not self.session._contains_state(state):
if not state.deleted and operation is not None:
util.warn("Object of type %s not in session, %s operation "
"along '%s' will not proceed" %
(mapperutil.state_class_str(state), operation, prop))
return False
if state not in self.states:
mapper = state.manager.mapper
if mapper not in self.mappers:
mapper._per_mapper_flush_actions(self)
self.mappers[mapper].add(state)
self.states[state] = (isdelete, listonly)
else:
if not listonly and (isdelete or cancel_delete):
self.states[state] = (isdelete, False)
return True
def issue_post_update(self, state, post_update_cols):
mapper = state.manager.mapper.base_mapper
states, cols = self.post_update_states[mapper]
states.add(state)
cols.update(post_update_cols)
@util.memoized_property
def _mapper_for_dep(self):
"""return a dynamic mapping of (Mapper, DependencyProcessor) to
True or False, indicating if the DependencyProcessor operates
on objects of that Mapper.
The result is stored in the dictionary persistently once
calculated.
"""
return util.PopulateDict(
lambda tup:tup[0]._props.get(tup[1].key) is tup[1].prop
)
def filter_states_for_dep(self, dep, states):
"""Filter the given list of InstanceStates to those relevant to the
given DependencyProcessor.
"""
mapper_for_dep = self._mapper_for_dep
return [s for s in states if mapper_for_dep[(s.manager.mapper, dep)]]
def states_for_mapper_hierarchy(self, mapper, isdelete, listonly):
checktup = (isdelete, listonly)
for mapper in mapper.base_mapper.self_and_descendants:
for state in self.mappers[mapper]:
if self.states[state] == checktup:
yield state
def _generate_actions(self):
"""Generate the full, unsorted collection of PostSortRecs as
well as dependency pairs for this UOWTransaction.
"""
# execute presort_actions, until all states
# have been processed. a presort_action might
# add new states to the uow.
while True:
ret = False
for action in list(self.presort_actions.values()):
if action.execute(self):
ret = True
if not ret:
break
# see if the graph of mapper dependencies has cycles.
self.cycles = cycles = topological.find_cycles(
self.dependencies,
self.postsort_actions.values())
if cycles:
# if yes, break the per-mapper actions into
# per-state actions
convert = dict(
(rec, set(rec.per_state_flush_actions(self)))
for rec in cycles
)
# rewrite the existing dependencies to point to
# the per-state actions for those per-mapper actions
# that were broken up.
for edge in list(self.dependencies):
if None in edge or \
edge[0].disabled or edge[1].disabled or \
cycles.issuperset(edge):
self.dependencies.remove(edge)
elif edge[0] in cycles:
self.dependencies.remove(edge)
for dep in convert[edge[0]]:
self.dependencies.add((dep, edge[1]))
elif edge[1] in cycles:
self.dependencies.remove(edge)
for dep in convert[edge[1]]:
self.dependencies.add((edge[0], dep))
return set([a for a in self.postsort_actions.values()
if not a.disabled
]
).difference(cycles)
def execute(self):
postsort_actions = self._generate_actions()
#sort = topological.sort(self.dependencies, postsort_actions)
#print "--------------"
#print self.dependencies
#print list(sort)
#print "COUNT OF POSTSORT ACTIONS", len(postsort_actions)
# execute
if self.cycles:
for set_ in topological.sort_as_subsets(
self.dependencies,
postsort_actions):
while set_:
n = set_.pop()
n.execute_aggregate(self, set_)
else:
for rec in topological.sort(
self.dependencies,
postsort_actions):
rec.execute(self)
def finalize_flush_changes(self):
"""mark processed objects as clean / deleted after a successful flush().
this method is called within the flush() method after the
execute() method has succeeded and the transaction has been committed.
"""
for state, (isdelete, listonly) in self.states.iteritems():
if isdelete:
self.session._remove_newly_deleted(state)
else:
# if listonly:
# debug... would like to see how many do this
self.session._register_newly_persistent(state)
class IterateMappersMixin(object):
def _mappers(self, uow):
if self.fromparent:
return iter(
m for m in self.dependency_processor.parent.self_and_descendants
if uow._mapper_for_dep[(m, self.dependency_processor)]
)
else:
return self.dependency_processor.mapper.self_and_descendants
class Preprocess(IterateMappersMixin):
def __init__(self, dependency_processor, fromparent):
self.dependency_processor = dependency_processor
self.fromparent = fromparent
self.processed = set()
self.setup_flush_actions = False
def execute(self, uow):
delete_states = set()
save_states = set()
for mapper in self._mappers(uow):
for state in uow.mappers[mapper].difference(self.processed):
(isdelete, listonly) = uow.states[state]
if not listonly:
if isdelete:
delete_states.add(state)
else:
save_states.add(state)
if delete_states:
self.dependency_processor.presort_deletes(uow, delete_states)
self.processed.update(delete_states)
if save_states:
self.dependency_processor.presort_saves(uow, save_states)
self.processed.update(save_states)
if (delete_states or save_states):
if not self.setup_flush_actions and (
self.dependency_processor.\
prop_has_changes(uow, delete_states, True) or
self.dependency_processor.\
prop_has_changes(uow, save_states, False)
):
self.dependency_processor.per_property_flush_actions(uow)
self.setup_flush_actions = True
return True
else:
return False
class PostSortRec(object):
disabled = False
def __new__(cls, uow, *args):
key = (cls, ) + args
if key in uow.postsort_actions:
return uow.postsort_actions[key]
else:
uow.postsort_actions[key] = \
ret = \
object.__new__(cls)
return ret
def execute_aggregate(self, uow, recs):
self.execute(uow)
def __repr__(self):
return "%s(%s)" % (
self.__class__.__name__,
",".join(str(x) for x in self.__dict__.values())
)
class ProcessAll(IterateMappersMixin, PostSortRec):
def __init__(self, uow, dependency_processor, delete, fromparent):
self.dependency_processor = dependency_processor
self.delete = delete
self.fromparent = fromparent
uow.deps[dependency_processor.parent.base_mapper].add(dependency_processor)
def execute(self, uow):
states = self._elements(uow)
if self.delete:
self.dependency_processor.process_deletes(uow, states)
else:
self.dependency_processor.process_saves(uow, states)
def per_state_flush_actions(self, uow):
# this is handled by SaveUpdateAll and DeleteAll,
# since a ProcessAll should unconditionally be pulled
# into per-state if either the parent/child mappers
# are part of a cycle
return iter([])
def __repr__(self):
return "%s(%s, delete=%s)" % (
self.__class__.__name__,
self.dependency_processor,
self.delete
)
def _elements(self, uow):
for mapper in self._mappers(uow):
for state in uow.mappers[mapper]:
(isdelete, listonly) = uow.states[state]
if isdelete == self.delete and not listonly:
yield state
class IssuePostUpdate(PostSortRec):
def __init__(self, uow, mapper, isdelete):
self.mapper = mapper
self.isdelete = isdelete
def execute(self, uow):
states, cols = uow.post_update_states[self.mapper]
states = [s for s in states if uow.states[s][0] == self.isdelete]
self.mapper._post_update(states, uow, cols)
class SaveUpdateAll(PostSortRec):
def __init__(self, uow, mapper):
self.mapper = mapper
assert mapper is mapper.base_mapper
def execute(self, uow):
self.mapper._save_obj(
uow.states_for_mapper_hierarchy(self.mapper, False, False),
uow
)
def per_state_flush_actions(self, uow):
states = list(uow.states_for_mapper_hierarchy(self.mapper, False, False))
for rec in self.mapper._per_state_flush_actions(
uow,
states,
False):
yield rec
for dep in uow.deps[self.mapper]:
states_for_prop = uow.filter_states_for_dep(dep, states)
dep.per_state_flush_actions(uow, states_for_prop, False)
class DeleteAll(PostSortRec):
def __init__(self, uow, mapper):
self.mapper = mapper
assert mapper is mapper.base_mapper
def execute(self, uow):
self.mapper._delete_obj(
uow.states_for_mapper_hierarchy(self.mapper, True, False),
uow
)
def per_state_flush_actions(self, uow):
states = list(uow.states_for_mapper_hierarchy(self.mapper, True, False))
for rec in self.mapper._per_state_flush_actions(
uow,
states,
True):
yield rec
for dep in uow.deps[self.mapper]:
states_for_prop = uow.filter_states_for_dep(dep, states)
dep.per_state_flush_actions(uow, states_for_prop, True)
class ProcessState(PostSortRec):
def __init__(self, uow, dependency_processor, delete, state):
self.dependency_processor = dependency_processor
self.delete = delete
self.state = state
def execute_aggregate(self, uow, recs):
cls_ = self.__class__
dependency_processor = self.dependency_processor
delete = self.delete
our_recs = [r for r in recs
if r.__class__ is cls_ and
r.dependency_processor is dependency_processor and
r.delete is delete]
recs.difference_update(our_recs)
states = [self.state] + [r.state for r in our_recs]
if delete:
dependency_processor.process_deletes(uow, states)
else:
dependency_processor.process_saves(uow, states)
def __repr__(self):
return "%s(%s, %s, delete=%s)" % (
self.__class__.__name__,
self.dependency_processor,
mapperutil.state_str(self.state),
self.delete
)
class SaveUpdateState(PostSortRec):
def __init__(self, uow, state, mapper):
self.state = state
self.mapper = mapper
def execute_aggregate(self, uow, recs):
cls_ = self.__class__
mapper = self.mapper
our_recs = [r for r in recs
if r.__class__ is cls_ and
r.mapper is mapper]
recs.difference_update(our_recs)
mapper._save_obj(
[self.state] +
[r.state for r in our_recs],
uow)
def __repr__(self):
return "%s(%s)" % (
self.__class__.__name__,
mapperutil.state_str(self.state)
)
class DeleteState(PostSortRec):
def __init__(self, uow, state, mapper):
self.state = state
self.mapper = mapper
def execute_aggregate(self, uow, recs):
cls_ = self.__class__
mapper = self.mapper
our_recs = [r for r in recs
if r.__class__ is cls_ and
r.mapper is mapper]
recs.difference_update(our_recs)
states = [self.state] + [r.state for r in our_recs]
mapper._delete_obj(
[s for s in states if uow.states[s][0]],
uow)
def __repr__(self):
return "%s(%s)" % (
self.__class__.__name__,
mapperutil.state_str(self.state)
)
| gpl-3.0 |
root-mirror/root | build/unix/makeversion.py | 15 | 1920 | # Script to update base/inc/RVersion.h.
# Called by main Makefile as soon as build/version_number has been updated.
#
# Author: Axel, 2020-03-06
import os, subprocess, re
from datetime import date, datetime
versionline = ""
with open("build/version_number", "r") as file:
versionline = file.read().replace('\n', '')
matches = re.match(r'^(\d+)[.](\d+)/(\d+)$', versionline).groups()
if len(matches) != 3:
raise RuntimeError("build/version_number: invalid syntax")
major = int(matches[0])
minor = int(matches[1])
patch = int(matches[2])
vers_code = (major << 16) + (minor << 8) + patch
datenow = date.today().strftime("%b %d %Y") # Sep 11 2019
timenow = datetime.now().strftime("%H:%M:%S") # 15:05:55
sourcecode = """#ifndef ROOT_RVersion
#define ROOT_RVersion
/* Version information automatically generated by installer. */
/*
* These macros can be used in the following way:
*
* #if ROOT_VERSION_CODE >= ROOT_VERSION(6,32,4)
* #include <newheader.h>
* #else
* #include <oldheader.h>
* #endif
*
*/
#define ROOT_RELEASE "{}"
#define ROOT_RELEASE_DATE "{}"
#define ROOT_RELEASE_TIME "{}"
#define ROOT_VERSION(a,b,c) (((a) << 16) + ((b) << 8) + (c))
#define ROOT_VERSION_CODE ROOT_VERSION({},{},{}) /* {} */
#endif
""".format(versionline, datenow, timenow, major, minor, patch, vers_code)
with open('core/foundation/inc/RVersion.h', 'w') as file:
file.write(sourcecode)
subprocess.check_call("build/unix/coreteam.sh rootx/src/rootcoreteam.h", shell = True)
print("Committing changes.")
subprocess.check_call(['git', 'commit',
'core/foundation/inc/RVersion.h', 'rootx/src/rootcoreteam.h',
'build/version_number', 'documentation/doxygen/Doxyfile',
'-m', '"Update ROOT version files to v{}."'.format(versionline)])
print("""
New version is {}.
See https://root.cern/release-checklist for the next steps,
for instance tagging if this is a release.""".format(versionline))
| lgpl-2.1 |
pombredanne/django-moderation | tests/tests/unit/testforms.py | 2 | 3554 | from __future__ import unicode_literals
from django.db.models.fields.files import ImageFieldFile
from django.forms import CharField
from django.contrib.auth.models import User
from django.test.testcases import TestCase
from tests.models import UserProfile, ModelWithImage
from moderation.forms import BaseModeratedObjectForm
from tests.utils import setup_moderation, teardown_moderation
from moderation.utils import django_17
class FormsTestCase(TestCase):
fixtures = ['test_users.json']
def setUp(self):
self.user = User.objects.get(username='moderator')
class ModeratedObjectForm(BaseModeratedObjectForm):
extra = CharField(required=False)
class Meta:
model = UserProfile
if django_17():
fields = '__all__'
self.ModeratedObjectForm = ModeratedObjectForm
self.moderation = setup_moderation([UserProfile, ModelWithImage])
def tearDown(self):
teardown_moderation()
def test_create_form_class(self):
form = self.ModeratedObjectForm()
self.assertEqual(form._meta.model.__name__, 'UserProfile')
def test_if_form_is_initialized_new_object(self):
profile = UserProfile(description="New description",
url='http://test.com',
user=self.user)
profile.save()
form = self.ModeratedObjectForm(instance=profile)
self.assertEqual(form.initial['description'], 'New description')
def test_if_form_is_initialized_existing_object(self):
profile = UserProfile(description="old description",
url='http://test.com',
user=self.user)
profile.save()
profile.moderated_object.approve(moderated_by=self.user)
profile.description = "Changed description"
profile.save()
form = self.ModeratedObjectForm(instance=profile)
profile = UserProfile.objects.get(id=1)
self.assertEqual(profile.description, "old description")
self.assertEqual(form.initial['description'], 'Changed description')
def test_if_form_has_image_field_instance_of_image_field_file(self):
object = ModelWithImage(image='my_image.jpg')
object.save()
object = ModelWithImage.unmoderated_objects.get(id=1)
form = self.ModeratedObjectForm(instance=object)
self.assertTrue(isinstance(form.initial['image'], ImageFieldFile),
'image in form.initial is instance of ImageField File')
def test_form_when_obj_has_no_moderated_obj(self):
self.moderation.unregister(UserProfile)
profile = UserProfile(description="old description",
url='http://test.com',
user=self.user)
profile.save()
self.moderation.register(UserProfile)
form = self.ModeratedObjectForm(instance=profile)
self.assertEqual(form.initial['description'], 'old description')
def test_if_form_is_initialized_new_object_with_initial(self):
profile = UserProfile(description="New description",
url='http://test.com',
user=self.user)
profile.save()
form = self.ModeratedObjectForm(initial={'extra': 'value'},
instance=profile)
self.assertEqual(form.initial['description'], 'New description')
self.assertEqual(form.initial['extra'], 'value')
| bsd-3-clause |
jpallas/beakerx | beakerx/beakerx/commands.py | 1 | 2777 | # Copyright 2018 TWO SIGMA OPEN SOURCE, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import sys
import beakerx
from notebook import notebookapp as app
from .install import install, uninstall
from .bkr2ipynb import main
def install_subparser(subparser):
install_parser = subparser.add_parser('install', help='installs BeakerX extensions')
install_parser.set_defaults(func=install)
install_parser.add_argument("--prefix",
help="location of the environment to install into",
default=sys.prefix)
install_parser.add_argument("--lab",
help="install lab extension",
action='store_true')
return subparser
def uninstall_subparser(subparser):
uninstall_parser = subparser.add_parser('uninstall', help='uninstalls BeakerX extensions')
uninstall_parser.set_defaults(func=uninstall)
uninstall_parser.add_argument("--prefix",
help="location of the environment to uninstall from",
default=sys.prefix)
return subparser
def bkr2ipynb_subparser(subparser):
bkr2ipynb_parser = subparser.add_parser('bkr2ipynb', help='converts Beaker notebooks to ipynb format')
bkr2ipynb_parser.set_defaults(func=main)
bkr2ipynb_parser.add_argument('notebooks', nargs='+',
help="Beaker notebooks to be converted. Enter *.bkr in case you want to convert all notebooks at once.")
return subparser
def run_jupyter(jupyter_commands):
app.launch_new_instance(jupyter_commands)
def init_parser():
parser = argparse.ArgumentParser()
parser.add_argument('--version', action='version', version=beakerx.__version__)
parser.set_defaults(func=run_jupyter)
subparsers = parser.add_subparsers()
install_subparser(subparsers)
uninstall_subparser(subparsers)
bkr2ipynb_subparser(subparsers)
return parser
def parse():
parser = init_parser()
args, jupyter_commands = parser.parse_known_args()
if args.func == run_jupyter:
args.func(jupyter_commands)
elif not jupyter_commands:
args.func(args)
else:
parser.parse_args(jupyter_commands)
| apache-2.0 |
oudalab/fajita | pythonAPI/flask/lib/python3.5/site-packages/jinja2/optimizer.py | 222 | 1722 | # -*- coding: utf-8 -*-
"""
jinja2.optimizer
~~~~~~~~~~~~~~~~
The jinja optimizer is currently trying to constant fold a few expressions
and modify the AST in place so that it should be easier to evaluate it.
Because the AST does not contain all the scoping information and the
compiler has to find that out, we cannot do all the optimizations we
want. For example loop unrolling doesn't work because unrolled loops would
have a different scoping.
The solution would be a second syntax tree that has the scoping rules stored.
:copyright: (c) 2017 by the Jinja Team.
:license: BSD.
"""
from jinja2 import nodes
from jinja2.visitor import NodeTransformer
def optimize(node, environment):
"""The context hint can be used to perform an static optimization
based on the context given."""
optimizer = Optimizer(environment)
return optimizer.visit(node)
class Optimizer(NodeTransformer):
def __init__(self, environment):
self.environment = environment
def fold(self, node, eval_ctx=None):
"""Do constant folding."""
node = self.generic_visit(node)
try:
return nodes.Const.from_untrusted(node.as_const(eval_ctx),
lineno=node.lineno,
environment=self.environment)
except nodes.Impossible:
return node
visit_Add = visit_Sub = visit_Mul = visit_Div = visit_FloorDiv = \
visit_Pow = visit_Mod = visit_And = visit_Or = visit_Pos = visit_Neg = \
visit_Not = visit_Compare = visit_Getitem = visit_Getattr = visit_Call = \
visit_Filter = visit_Test = visit_CondExpr = fold
del fold
| mit |
cratuki/solent | scenarios/eng_40_simple_udp_sub.py | 2 | 5645 | # // license
# Copyright 2016, Free Software Foundation.
#
# This file is part of Solent.
#
# Solent is free software: you can redistribute it and/or modify it under the
# terms of the GNU Lesser General Public License as published by the Free
# Software Foundation, either version 3 of the License, or (at your option)
# any later version.
#
# Solent is distributed in the hope that it will be useful, but WITHOUT ANY
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# Solent. If not, see <http://www.gnu.org/licenses/>.
from solent import Engine
from solent import log
from solent import ns
from solent import RailLineFinder
from solent import SolentQuitException
# --------------------------------------------------------
# model
# --------------------------------------------------------
I_NEARCAST = '''
i message h
i field h
message init
field net_addr
field net_port
message exit
message received_from_network
field msg
'''
class RailBroadcastListener:
def __init__(self):
self.rail_line_finder = RailLineFinder()
self.cs_broadcast_listener_data = ns()
def call_broadcast_listener_data(self, rail_h, msg):
self.cs_broadcast_listener_data.rail_h = rail_h
self.cs_broadcast_listener_data.msg = msg
self.cb_broadcast_listener_data(
cs_broadcast_listener_data=self.cs_broadcast_listener_data)
def zero(self, rail_h, cb_broadcast_listener_data, engine, ip, port):
self.rail_h = rail_h
self.cb_broadcast_listener_data = cb_broadcast_listener_data
self.engine = engine
self.ip = ip
self.port = port
#
self.rail_line_finder.zero(
rail_h=self.rail_h, # we pass the rail_h value through
cb_line_finder_event=self.cb_line_finder_event)
#
print("CALLING OPEN %s %s"%(ip, port))
self.engine.open_sub(
addr=ip,
port=port,
cb_sub_start=self.cb_sub_start,
cb_sub_stop=self.cb_sub_stop,
cb_sub_recv=self.cb_sub_recv)
#
def cb_line_finder_event(self, cs_line_finder_event):
rail_h = cs_line_finder_event.rail_h
msg = cs_line_finder_event.msg
#
# Note that above we caught the rail_h value that we had previously
# passed through.
self.call_broadcast_listener_data(
rail_h=rail_h,
msg=msg)
#
def cb_sub_start(self, cs_sub_start):
engine = cs_sub_start.engine
sub_sid = cs_sub_start.sub_sid
addr = cs_sub_start.addr
port = cs_sub_start.port
#
log('sub %s started %s:%s'%(sub_sid, addr, port))
def cb_sub_stop(self, cs_sub_stop):
engine = cs_sub_stop.engine
sub_sid = cs_sub_stop.sub_sid
message = cs_sub_stop.message
#
log('sub stopped %s'%sub_sid)
#
self.rail_line_finder.clear()
def cb_sub_recv(self, cs_sub_recv):
engine = cs_sub_recv.engine
sub_sid = cs_sub_recv.sub_sid
bb = cs_sub_recv.bb
#
log('sub recv (len %s)'%(len(bb)))
#
self.rail_line_finder.accept_bytes(
bb=bb)
class CogBroadcastListener:
def __init__(self, cog_h, orb, engine):
self.cog_h = cog_h
self.orb = orb
self.engine = engine
#
self.rail_broadcast_listener = RailBroadcastListener()
def on_init(self, net_addr, net_port):
self.rail_broadcast_listener.zero(
rail_h='broadcast_listener.only',
cb_broadcast_listener_data=self.cb_broadcast_listener_data,
engine=self.engine,
ip=net_addr,
port=net_port)
#
def cb_broadcast_listener_data(self, cs_broadcast_listener_data):
rail_h = cs_broadcast_listener_data.rail_h
msg = cs_broadcast_listener_data.msg
#
self.nearcast.received_from_network(
msg=msg)
class CogPrinter:
def __init__(self, cog_h, orb, engine):
self.cog_h = cog_h
self.orb = orb
self.engine = engine
def on_received_from_network(self, msg):
log('! received [%s] :)'%(msg))
def init(engine, net_addr, net_port):
orb = engine.init_orb(
i_nearcast=I_NEARCAST)
orb.init_cog(CogBroadcastListener)
orb.init_cog(CogPrinter)
#
bridge = orb.init_autobridge()
bridge.nc_init(
net_addr=net_addr,
net_port=net_port)
# --------------------------------------------------------
# launch
# --------------------------------------------------------
MTU = 1350
NET_ADDR = '127.255.255.255'
NET_PORT = 3000
def main():
print('''test this with
echo "Hello" | socat - UDP-DATAGRAM:%s:%s,broadcast
Or
python3 -m solent.tools.qd_poll 127.255.255.255 50000
'''%(NET_ADDR, NET_PORT))
#
engine = Engine(
mtu=MTU)
try:
init(
engine=engine,
net_addr=NET_ADDR,
net_port=NET_PORT)
#
# You can use this to print more info about the event loop. This would be
# useful if you had a flailing event loop and could not work out what was
# causing the activity.
engine.debug_eloop_on()
engine.event_loop()
except KeyboardInterrupt:
pass
except SolentQuitException:
pass
finally:
engine.close()
if __name__ == '__main__':
main()
| lgpl-3.0 |
kwlzn/pex | tests/test_integration.py | 1 | 31413 | # Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
import functools
import os
import platform
import subprocess
import sys
from contextlib import contextmanager
from textwrap import dedent
import pytest
from twitter.common.contextutil import environment_as, temporary_dir
from pex.compatibility import WINDOWS
from pex.installer import EggInstaller
from pex.pex_bootstrapper import get_pex_info
from pex.testing import (
IS_PYPY,
NOT_CPYTHON27,
NOT_CPYTHON27_OR_OSX,
NOT_CPYTHON36,
NOT_CPYTHON36_OR_LINUX,
ensure_python_interpreter,
get_dep_dist_names_from_pex,
run_pex_command,
run_simple_pex,
run_simple_pex_test,
temporary_content
)
from pex.util import DistributionHelper, named_temporary_file
def test_pex_execute():
body = "print('Hello')"
_, rc = run_simple_pex_test(body, coverage=True)
assert rc == 0
def test_pex_raise():
body = "raise Exception('This will improve coverage.')"
run_simple_pex_test(body, coverage=True)
def test_pex_root():
with temporary_dir() as tmp_home:
with environment_as(HOME=tmp_home):
with temporary_dir() as td:
with temporary_dir() as output_dir:
env = os.environ.copy()
env['PEX_INTERPRETER'] = '1'
output_path = os.path.join(output_dir, 'pex.pex')
args = ['pex', '-o', output_path, '--not-zip-safe', '--pex-root={0}'.format(td)]
results = run_pex_command(args=args, env=env)
results.assert_success()
assert ['pex.pex'] == os.listdir(output_dir), 'Expected built pex file.'
assert [] == os.listdir(tmp_home), 'Expected empty temp home dir.'
assert 'build' in os.listdir(td), 'Expected build directory in tmp pex root.'
def test_cache_disable():
with temporary_dir() as tmp_home:
with environment_as(HOME=tmp_home):
with temporary_dir() as td:
with temporary_dir() as output_dir:
env = os.environ.copy()
env['PEX_INTERPRETER'] = '1'
output_path = os.path.join(output_dir, 'pex.pex')
args = [
'pex',
'-o', output_path,
'--not-zip-safe',
'--disable-cache',
'--pex-root={0}'.format(td),
]
results = run_pex_command(args=args, env=env)
results.assert_success()
assert ['pex.pex'] == os.listdir(output_dir), 'Expected built pex file.'
assert [] == os.listdir(tmp_home), 'Expected empty temp home dir.'
def test_pex_interpreter():
with named_temporary_file() as fp:
fp.write(b"print('Hello world')")
fp.flush()
env = os.environ.copy()
env['PEX_INTERPRETER'] = '1'
so, rc = run_simple_pex_test("", args=(fp.name,), coverage=True, env=env)
assert so == b'Hello world\n'
assert rc == 0
def test_pex_repl_cli():
"""Tests the REPL in the context of the pex cli itself."""
stdin_payload = b'import sys; sys.exit(3)'
with temporary_dir() as output_dir:
# Create a temporary pex containing just `requests` with no entrypoint.
pex_path = os.path.join(output_dir, 'pex.pex')
results = run_pex_command(['--disable-cache',
'wheel',
'requests',
'./',
'-e', 'pex.bin.pex:main',
'-o', pex_path])
results.assert_success()
# Test that the REPL is functional.
stdout, rc = run_simple_pex(pex_path, stdin=stdin_payload)
assert rc == 3
assert b'>>>' in stdout
def test_pex_repl_built():
"""Tests the REPL in the context of a built pex."""
stdin_payload = b'import requests; import sys; sys.exit(3)'
with temporary_dir() as output_dir:
# Create a temporary pex containing just `requests` with no entrypoint.
pex_path = os.path.join(output_dir, 'requests.pex')
results = run_pex_command(['--disable-cache', 'requests', '-o', pex_path])
results.assert_success()
# Test that the REPL is functional.
stdout, rc = run_simple_pex(pex_path, stdin=stdin_payload)
assert rc == 3
assert b'>>>' in stdout
@pytest.mark.skipif(WINDOWS, reason='No symlinks on windows')
def test_pex_python_symlink():
with temporary_dir() as td:
with environment_as(HOME=td):
symlink_path = os.path.join(td, 'python-symlink')
os.symlink(sys.executable, symlink_path)
pexrc_path = os.path.join(td, '.pexrc')
with open(pexrc_path, 'w') as pexrc:
pexrc.write("PEX_PYTHON=%s" % symlink_path)
body = "print('Hello')"
_, rc = run_simple_pex_test(body, coverage=True)
assert rc == 0
def test_entry_point_exit_code():
setup_py = dedent("""
from setuptools import setup
setup(
name='my_app',
version='0.0.0',
zip_safe=True,
packages=[''],
entry_points={'console_scripts': ['my_app = my_app:do_something']},
)
""")
error_msg = 'setuptools expects this to exit non-zero'
my_app = dedent("""
def do_something():
return '%s'
""" % error_msg)
with temporary_content({'setup.py': setup_py, 'my_app.py': my_app}) as project_dir:
installer = EggInstaller(project_dir)
dist = DistributionHelper.distribution_from_path(installer.bdist())
so, rc = run_simple_pex_test('', env={'PEX_SCRIPT': 'my_app'}, dists=[dist])
assert so.decode('utf-8').strip() == error_msg
assert rc == 1
# TODO: https://github.com/pantsbuild/pex/issues/479
@pytest.mark.skipif(NOT_CPYTHON36_OR_LINUX,
reason='inherits linux abi on linux w/ no backing packages')
def test_pex_multi_resolve():
"""Tests multi-interpreter + multi-platform resolution."""
with temporary_dir() as output_dir:
pex_path = os.path.join(output_dir, 'pex.pex')
results = run_pex_command(['--disable-cache',
'lxml==3.8.0',
'--no-build',
'--platform=linux-x86_64',
'--platform=macosx-10.6-x86_64',
'--python=python2.7',
'--python=python3.6',
'-o', pex_path])
results.assert_success()
included_dists = get_dep_dist_names_from_pex(pex_path, 'lxml')
assert len(included_dists) == 4
for dist_substr in ('-cp27-', '-cp36-', '-manylinux1_x86_64', '-macosx_'):
assert any(dist_substr in f for f in included_dists)
@pytest.mark.xfail(reason='See https://github.com/pantsbuild/pants/issues/4682')
def test_pex_re_exec_failure():
with temporary_dir() as output_dir:
# create 2 pex files for PEX_PATH
pex1_path = os.path.join(output_dir, 'pex1.pex')
res1 = run_pex_command(['--disable-cache', 'requests', '-o', pex1_path])
res1.assert_success()
pex2_path = os.path.join(output_dir, 'pex2.pex')
res2 = run_pex_command(['--disable-cache', 'flask', '-o', pex2_path])
res2.assert_success()
pex_path = ':'.join(os.path.join(output_dir, name) for name in ('pex1.pex', 'pex2.pex'))
# create test file test.py that attmepts to import modules from pex1/pex2
test_file_path = os.path.join(output_dir, 'test.py')
with open(test_file_path, 'w') as fh:
fh.write(dedent('''
import requests
import flask
import sys
import os
import subprocess
if 'RAN_ONCE' in os.environ::
print('Hello world')
else:
env = os.environ.copy()
env['RAN_ONCE'] = '1'
subprocess.call([sys.executable] + sys.argv, env=env)
sys.exit()
'''))
# set up env for pex build with PEX_PATH in the environment
env = os.environ.copy()
env['PEX_PATH'] = pex_path
# build composite pex of pex1/pex1
pex_out_path = os.path.join(output_dir, 'out.pex')
run_pex_command(['--disable-cache',
'wheel',
'-o', pex_out_path])
# run test.py with composite env
stdout, rc = run_simple_pex(pex_out_path, [test_file_path], env=env)
assert rc == 0
assert stdout == b'Hello world\n'
def test_pex_path_arg():
with temporary_dir() as output_dir:
# create 2 pex files for PEX_PATH
pex1_path = os.path.join(output_dir, 'pex1.pex')
res1 = run_pex_command(['--disable-cache', 'requests', '-o', pex1_path])
res1.assert_success()
pex2_path = os.path.join(output_dir, 'pex2.pex')
res2 = run_pex_command(['--disable-cache', 'flask', '-o', pex2_path])
res2.assert_success()
pex_path = ':'.join(os.path.join(output_dir, name) for name in ('pex1.pex', 'pex2.pex'))
# parameterize the pex arg for test.py
pex_out_path = os.path.join(output_dir, 'out.pex')
# create test file test.py that attempts to import modules from pex1/pex2
test_file_path = os.path.join(output_dir, 'test.py')
with open(test_file_path, 'w') as fh:
fh.write(dedent('''
import requests
import flask
import sys
import os
import subprocess
if 'RAN_ONCE' in os.environ:
print('Success!')
else:
env = os.environ.copy()
env['RAN_ONCE'] = '1'
subprocess.call([sys.executable] + ['%s'] + sys.argv, env=env)
sys.exit()
''' % pex_out_path))
# build out.pex composed from pex1/pex1
run_pex_command(['--disable-cache',
'--pex-path={}'.format(pex_path),
'wheel',
'-o', pex_out_path])
# run test.py with composite env
stdout, rc = run_simple_pex(pex_out_path, [test_file_path])
assert rc == 0
assert stdout == b'Success!\n'
def test_pex_path_in_pex_info_and_env():
with temporary_dir() as output_dir:
# create 2 pex files for PEX-INFO pex_path
pex1_path = os.path.join(output_dir, 'pex1.pex')
res1 = run_pex_command(['--disable-cache', 'requests', '-o', pex1_path])
res1.assert_success()
pex2_path = os.path.join(output_dir, 'pex2.pex')
res2 = run_pex_command(['--disable-cache', 'flask', '-o', pex2_path])
res2.assert_success()
pex_path = ':'.join(os.path.join(output_dir, name) for name in ('pex1.pex', 'pex2.pex'))
# create a pex for environment PEX_PATH
pex3_path = os.path.join(output_dir, 'pex3.pex')
res3 = run_pex_command(['--disable-cache', 'wheel', '-o', pex3_path])
res3.assert_success()
env_pex_path = os.path.join(output_dir, 'pex3.pex')
# parameterize the pex arg for test.py
pex_out_path = os.path.join(output_dir, 'out.pex')
# create test file test.py that attempts to import modules from pex1/pex2
test_file_path = os.path.join(output_dir, 'test.py')
with open(test_file_path, 'w') as fh:
fh.write(dedent('''
import requests
import flask
import wheel
import sys
import os
import subprocess
print('Success!')
'''))
# build out.pex composed from pex1/pex1
run_pex_command(['--disable-cache',
'--pex-path={}'.format(pex_path),
'-o', pex_out_path])
# load secondary PEX_PATH
env = os.environ.copy()
env['PEX_PATH'] = env_pex_path
# run test.py with composite env
stdout, rc = run_simple_pex(pex_out_path, [test_file_path], env=env)
assert rc == 0
assert stdout == b'Success!\n'
def test_interpreter_constraints_to_pex_info_py2():
with temporary_dir() as output_dir:
# target python 2
pex_out_path = os.path.join(output_dir, 'pex_py2.pex')
res = run_pex_command(['--disable-cache',
'--interpreter-constraint=>=2.7',
'--interpreter-constraint=<3',
'-o', pex_out_path])
res.assert_success()
pex_info = get_pex_info(pex_out_path)
assert set(['>=2.7', '<3']) == set(pex_info.interpreter_constraints)
@pytest.mark.skipif(IS_PYPY)
def test_interpreter_constraints_to_pex_info_py3():
py3_interpreter = ensure_python_interpreter('3.6.3')
with environment_as(PATH=os.path.dirname(py3_interpreter)):
with temporary_dir() as output_dir:
# target python 3
pex_out_path = os.path.join(output_dir, 'pex_py3.pex')
res = run_pex_command(['--disable-cache',
'--interpreter-constraint=>3',
'-o', pex_out_path])
res.assert_success()
pex_info = get_pex_info(pex_out_path)
assert ['>3'] == pex_info.interpreter_constraints
def test_interpreter_resolution_with_constraint_option():
with temporary_dir() as output_dir:
pex_out_path = os.path.join(output_dir, 'pex1.pex')
res = run_pex_command(['--disable-cache',
'--interpreter-constraint=>=2.7',
'--interpreter-constraint=<3',
'-o', pex_out_path])
res.assert_success()
pex_info = get_pex_info(pex_out_path)
assert set(['>=2.7', '<3']) == set(pex_info.interpreter_constraints)
assert pex_info.build_properties['version'][0] < 3
@pytest.mark.skipif(IS_PYPY)
def test_interpreter_resolution_with_pex_python_path():
with temporary_dir() as td:
pexrc_path = os.path.join(td, '.pexrc')
with open(pexrc_path, 'w') as pexrc:
# set pex python path
pex_python_path = ':'.join([
ensure_python_interpreter('2.7.10'),
ensure_python_interpreter('3.6.3')
])
pexrc.write("PEX_PYTHON_PATH=%s" % pex_python_path)
# constraints to build pex cleanly; PPP + pex_bootstrapper.py
# will use these constraints to override sys.executable on pex re-exec
interpreter_constraint1 = '>3' if sys.version_info[0] == 3 else '<3'
interpreter_constraint2 = '<3.8' if sys.version_info[0] == 3 else '>=2.7'
pex_out_path = os.path.join(td, 'pex.pex')
res = run_pex_command(['--disable-cache',
'--rcfile=%s' % pexrc_path,
'--interpreter-constraint=%s' % interpreter_constraint1,
'--interpreter-constraint=%s' % interpreter_constraint2,
'-o', pex_out_path])
res.assert_success()
stdin_payload = b'import sys; print(sys.executable); sys.exit(0)'
stdout, rc = run_simple_pex(pex_out_path, stdin=stdin_payload)
assert rc == 0
if sys.version_info[0] == 3:
assert str(pex_python_path.split(':')[1]).encode() in stdout
else:
assert str(pex_python_path.split(':')[0]).encode() in stdout
@pytest.mark.skipif(NOT_CPYTHON36)
def test_interpreter_resolution_pex_python_path_precedence_over_pex_python():
with temporary_dir() as td:
pexrc_path = os.path.join(td, '.pexrc')
with open(pexrc_path, 'w') as pexrc:
# set both PPP and PP
pex_python_path = ':'.join([
ensure_python_interpreter('2.7.10'),
ensure_python_interpreter('3.6.3')
])
pexrc.write("PEX_PYTHON_PATH=%s\n" % pex_python_path)
pex_python = '/path/to/some/python'
pexrc.write("PEX_PYTHON=%s" % pex_python)
pex_out_path = os.path.join(td, 'pex.pex')
res = run_pex_command(['--disable-cache',
'--rcfile=%s' % pexrc_path,
'--interpreter-constraint=>3',
'--interpreter-constraint=<3.8',
'-o', pex_out_path])
res.assert_success()
stdin_payload = b'import sys; print(sys.executable); sys.exit(0)'
stdout, rc = run_simple_pex(pex_out_path, stdin=stdin_payload)
assert rc == 0
correct_interpreter_path = pex_python_path.split(':')[1].encode()
assert correct_interpreter_path in stdout
def test_plain_pex_exec_no_ppp_no_pp_no_constraints():
with temporary_dir() as td:
pex_out_path = os.path.join(td, 'pex.pex')
res = run_pex_command(['--disable-cache',
'-o', pex_out_path])
res.assert_success()
stdin_payload = b'import sys; print(sys.executable); sys.exit(0)'
stdout, rc = run_simple_pex(pex_out_path, stdin=stdin_payload)
assert rc == 0
assert str(sys.executable).encode() in stdout
@pytest.mark.skipif(IS_PYPY)
def test_pex_exec_with_pex_python_path_only():
with temporary_dir() as td:
pexrc_path = os.path.join(td, '.pexrc')
with open(pexrc_path, 'w') as pexrc:
# set pex python path
pex_python_path = ':'.join([
ensure_python_interpreter('2.7.10'),
ensure_python_interpreter('3.6.3')
])
pexrc.write("PEX_PYTHON_PATH=%s" % pex_python_path)
pex_out_path = os.path.join(td, 'pex.pex')
res = run_pex_command(['--disable-cache',
'--rcfile=%s' % pexrc_path,
'-o', pex_out_path])
res.assert_success()
# test that pex bootstrapper selects lowest version interpreter
# in pex python path (python2.7)
stdin_payload = b'import sys; print(sys.executable); sys.exit(0)'
stdout, rc = run_simple_pex(pex_out_path, stdin=stdin_payload)
assert rc == 0
assert str(pex_python_path.split(':')[0]).encode() in stdout
@pytest.mark.skipif(IS_PYPY)
def test_pex_exec_with_pex_python_path_and_pex_python_but_no_constraints():
with temporary_dir() as td:
pexrc_path = os.path.join(td, '.pexrc')
with open(pexrc_path, 'w') as pexrc:
# set both PPP and PP
pex_python_path = ':'.join([
ensure_python_interpreter('2.7.10'),
ensure_python_interpreter('3.6.3')
])
pexrc.write("PEX_PYTHON_PATH=%s\n" % pex_python_path)
pex_python = '/path/to/some/python'
pexrc.write("PEX_PYTHON=%s" % pex_python)
pex_out_path = os.path.join(td, 'pex.pex')
res = run_pex_command(['--disable-cache',
'--rcfile=%s' % pexrc_path,
'-o', pex_out_path])
res.assert_success()
# test that pex bootstrapper selects lowest version interpreter
# in pex python path (python2.7)
stdin_payload = b'import sys; print(sys.executable); sys.exit(0)'
stdout, rc = run_simple_pex(pex_out_path, stdin=stdin_payload)
assert rc == 0
assert str(pex_python_path.split(':')[0]).encode() in stdout
@pytest.mark.skipif(IS_PYPY)
def test_pex_python():
py2_path_interpreter = ensure_python_interpreter('2.7.10')
py3_path_interpreter = ensure_python_interpreter('3.6.3')
path = ':'.join([os.path.dirname(py2_path_interpreter), os.path.dirname(py3_path_interpreter)])
with environment_as(PATH=path):
with temporary_dir() as td:
pexrc_path = os.path.join(td, '.pexrc')
with open(pexrc_path, 'w') as pexrc:
pex_python = ensure_python_interpreter('3.6.3')
pexrc.write("PEX_PYTHON=%s" % pex_python)
# test PEX_PYTHON with valid constraints
pex_out_path = os.path.join(td, 'pex.pex')
res = run_pex_command(['--disable-cache',
'--rcfile=%s' % pexrc_path,
'--interpreter-constraint=>3',
'--interpreter-constraint=<3.8',
'-o', pex_out_path])
res.assert_success()
stdin_payload = b'import sys; print(sys.executable); sys.exit(0)'
stdout, rc = run_simple_pex(pex_out_path, stdin=stdin_payload)
assert rc == 0
correct_interpreter_path = pex_python.encode()
assert correct_interpreter_path in stdout
# test PEX_PYTHON with incompatible constraints
pexrc_path = os.path.join(td, '.pexrc')
with open(pexrc_path, 'w') as pexrc:
pex_python = ensure_python_interpreter('2.7.10')
pexrc.write("PEX_PYTHON=%s" % pex_python)
pex_out_path = os.path.join(td, 'pex2.pex')
res = run_pex_command(['--disable-cache',
'--rcfile=%s' % pexrc_path,
'--interpreter-constraint=>3',
'--interpreter-constraint=<3.8',
'-o', pex_out_path])
res.assert_success()
stdin_payload = b'import sys; print(sys.executable); sys.exit(0)'
stdout, rc = run_simple_pex(pex_out_path, stdin=stdin_payload)
assert rc == 1
fail_str = 'not compatible with specified interpreter constraints'.encode()
assert fail_str in stdout
# test PEX_PYTHON with no constraints
pex_out_path = os.path.join(td, 'pex3.pex')
res = run_pex_command(['--disable-cache',
'--rcfile=%s' % pexrc_path,
'-o', pex_out_path])
res.assert_success()
stdin_payload = b'import sys; print(sys.executable); sys.exit(0)'
stdout, rc = run_simple_pex(pex_out_path, stdin=stdin_payload)
assert rc == 0
correct_interpreter_path = pex_python.encode()
assert correct_interpreter_path in stdout
@pytest.mark.skipif(IS_PYPY)
def test_entry_point_targeting():
"""Test bugfix for https://github.com/pantsbuild/pex/issues/434"""
with temporary_dir() as td:
pexrc_path = os.path.join(td, '.pexrc')
with open(pexrc_path, 'w') as pexrc:
pex_python = ensure_python_interpreter('3.6.3')
pexrc.write("PEX_PYTHON=%s" % pex_python)
# test pex with entry point
pex_out_path = os.path.join(td, 'pex.pex')
res = run_pex_command(['--disable-cache',
'autopep8',
'-e', 'autopep8',
'-o', pex_out_path])
res.assert_success()
stdout, rc = run_simple_pex(pex_out_path)
assert 'usage: autopep8'.encode() in stdout
@pytest.mark.skipif(IS_PYPY)
def test_interpreter_selection_using_os_environ_for_bootstrap_reexec():
"""
This is a test for verifying the proper function of the
pex bootstrapper's interpreter selection logic and validate a corresponding
bugfix. More details on the nature of the bug can be found at:
https://github.com/pantsbuild/pex/pull/441
"""
with temporary_dir() as td:
pexrc_path = os.path.join(td, '.pexrc')
# Select pexrc interpreter versions based on test environemnt.
# The parent interpreter is the interpreter we expect the parent pex to
# execute with. The child interpreter is the interpreter we expect the
# child pex to execute with.
if (sys.version_info[0], sys.version_info[1]) == (3, 6):
child_pex_interpreter_version = '3.6.3'
else:
child_pex_interpreter_version = '2.7.10'
# Write parent pex's pexrc.
with open(pexrc_path, 'w') as pexrc:
pexrc.write("PEX_PYTHON=%s" % sys.executable)
test_setup_path = os.path.join(td, 'setup.py')
with open(test_setup_path, 'w') as fh:
fh.write(dedent('''
from setuptools import setup
setup(
name='tester',
version='1.0',
description='tests',
author='tester',
author_email='test@test.com',
packages=['testing']
)
'''))
os.mkdir(os.path.join(td, 'testing'))
test_init_path = os.path.join(td, 'testing/__init__.py')
with open(test_init_path, 'w') as fh:
fh.write(dedent('''
def tester():
from pex.testing import (
run_pex_command,
run_simple_pex
)
import os
import tempfile
import shutil
from textwrap import dedent
td = tempfile.mkdtemp()
try:
pexrc_path = os.path.join(td, '.pexrc')
with open(pexrc_path, 'w') as pexrc:
pexrc.write("PEX_PYTHON={}")
test_file_path = os.path.join(td, 'build_and_run_child_pex.py')
with open(test_file_path, 'w') as fh:
fh.write(dedent("""
import sys
print(sys.executable)
"""))
pex_out_path = os.path.join(td, 'child.pex')
res = run_pex_command(['--disable-cache',
'-o', pex_out_path])
stdin_payload = b'import sys; print(sys.executable); sys.exit(0)'
stdout, rc = run_simple_pex(pex_out_path, stdin=stdin_payload)
print(stdout)
finally:
shutil.rmtree(td)
'''.format(ensure_python_interpreter(child_pex_interpreter_version))))
pex_out_path = os.path.join(td, 'parent.pex')
res = run_pex_command(['--disable-cache',
'pex',
'{}'.format(td),
'-e', 'testing:tester',
'-o', pex_out_path])
res.assert_success()
stdout, rc = run_simple_pex(pex_out_path)
assert rc == 0
# Ensure that child pex used the proper interpreter as specified by its pexrc.
correct_interpreter_path = ensure_python_interpreter(child_pex_interpreter_version)
correct_interpreter_path = correct_interpreter_path.encode() # Py 2/3 compatibility
assert correct_interpreter_path in stdout
def test_inherit_path_fallback():
inherit_path("=fallback")
def test_inherit_path_backwards_compatibility():
inherit_path("")
def test_inherit_path_prefer():
inherit_path("=prefer")
def inherit_path(inherit_path):
with temporary_dir() as output_dir:
exe = os.path.join(output_dir, 'exe.py')
body = "import sys ; print('\\n'.join(sys.path))"
with open(exe, 'w') as f:
f.write(body)
pex_path = os.path.join(output_dir, 'pex.pex')
results = run_pex_command([
'--disable-cache',
'msgpack_python',
'--inherit-path{}'.format(inherit_path),
'-o',
pex_path,
])
results.assert_success()
env = os.environ.copy()
env["PYTHONPATH"] = "/doesnotexist"
stdout, rc = run_simple_pex(
pex_path,
args=(exe,),
env=env,
)
assert rc == 0
stdout_lines = stdout.decode().split('\n')
requests_paths = tuple(i for i, l in enumerate(stdout_lines) if 'msgpack_python' in l)
sys_paths = tuple(i for i, l in enumerate(stdout_lines) if 'doesnotexist' in l)
assert len(requests_paths) == 1
assert len(sys_paths) == 1
if inherit_path == "=fallback":
assert requests_paths[0] < sys_paths[0]
else:
assert requests_paths[0] > sys_paths[0]
def test_pex_multi_resolve_2():
"""Tests multi-interpreter + multi-platform resolution using extended platform notation."""
with temporary_dir() as output_dir:
pex_path = os.path.join(output_dir, 'pex.pex')
results = run_pex_command(['--disable-cache',
'lxml==3.8.0',
'--no-build',
'--platform=linux-x86_64-cp-36-m',
'--platform=linux-x86_64-cp-27-m',
'--platform=macosx-10.6-x86_64-cp-36-m',
'--platform=macosx-10.6-x86_64-cp-27-m',
'-o', pex_path])
results.assert_success()
included_dists = get_dep_dist_names_from_pex(pex_path, 'lxml')
assert len(included_dists) == 4
for dist_substr in ('-cp27-', '-cp36-', '-manylinux1_x86_64', '-macosx_'):
assert any(dist_substr in f for f in included_dists), (
'{} was not found in wheel'.format(dist_substr)
)
@contextmanager
def pex_manylinux_and_tag_selection_context():
with temporary_dir() as output_dir:
def do_resolve(req_name, req_version, platform, extra_flags=None):
extra_flags = extra_flags or ''
pex_path = os.path.join(output_dir, 'test.pex')
results = run_pex_command(['--disable-cache',
'--no-build',
'%s==%s' % (req_name, req_version),
'--platform=%s' % (platform),
'-o', pex_path] + extra_flags.split())
return pex_path, results
def test_resolve(req_name, req_version, platform, substr, extra_flags=None):
pex_path, results = do_resolve(req_name, req_version, platform, extra_flags)
results.assert_success()
included_dists = get_dep_dist_names_from_pex(pex_path, req_name.replace('-', '_'))
assert any(
substr in d for d in included_dists
), 'couldnt find {} in {}'.format(substr, included_dists)
def ensure_failure(req_name, req_version, platform, extra_flags):
pex_path, results = do_resolve(req_name, req_version, platform, extra_flags)
results.assert_failure()
yield test_resolve, ensure_failure
@pytest.mark.skipif(IS_PYPY)
def test_pex_manylinux_and_tag_selection_linux_msgpack():
"""Tests resolver manylinux support and tag targeting."""
with pex_manylinux_and_tag_selection_context() as (test_resolve, ensure_failure):
msgpack, msgpack_ver = 'msgpack-python', '0.4.7'
test_msgpack = functools.partial(test_resolve, msgpack, msgpack_ver)
# Exclude 3.3 and 3.6 because no 33/36 wheel exists on pypi.
if (sys.version_info[0], sys.version_info[1]) not in [(3, 3), (3, 6)]:
test_msgpack('linux-x86_64', 'manylinux1_x86_64.whl')
test_msgpack('linux-x86_64-cp-27-m', 'msgpack_python-0.4.7-cp27-cp27m-manylinux1_x86_64.whl')
test_msgpack('linux-x86_64-cp-27-mu', 'msgpack_python-0.4.7-cp27-cp27mu-manylinux1_x86_64.whl')
test_msgpack('linux-i686-cp-27-m', 'msgpack_python-0.4.7-cp27-cp27m-manylinux1_i686.whl')
test_msgpack('linux-i686-cp-27-mu', 'msgpack_python-0.4.7-cp27-cp27mu-manylinux1_i686.whl')
test_msgpack('linux-x86_64-cp-27-mu', 'msgpack_python-0.4.7-cp27-cp27mu-manylinux1_x86_64.whl')
test_msgpack('linux-x86_64-cp-34-m', 'msgpack_python-0.4.7-cp34-cp34m-manylinux1_x86_64.whl')
test_msgpack('linux-x86_64-cp-35-m', 'msgpack_python-0.4.7-cp35-cp35m-manylinux1_x86_64.whl')
ensure_failure(msgpack, msgpack_ver, 'linux-x86_64', '--no-manylinux')
def test_pex_manylinux_and_tag_selection_lxml_osx():
with pex_manylinux_and_tag_selection_context() as (test_resolve, ensure_failure):
test_resolve('lxml', '3.8.0', 'macosx-10.6-x86_64-cp-27-m', 'lxml-3.8.0-cp27-cp27m-macosx')
test_resolve('lxml', '3.8.0', 'macosx-10.6-x86_64-cp-36-m', 'lxml-3.8.0-cp36-cp36m-macosx')
@pytest.mark.skipif(NOT_CPYTHON27_OR_OSX)
def test_pex_manylinux_runtime():
"""Tests resolver manylinux support and runtime resolution (and --platform=current)."""
test_stub = dedent(
"""
import msgpack
print(msgpack.unpackb(msgpack.packb([1, 2, 3])))
"""
)
with temporary_content({'tester.py': test_stub}) as output_dir:
pex_path = os.path.join(output_dir, 'test.pex')
tester_path = os.path.join(output_dir, 'tester.py')
results = run_pex_command(['--disable-cache',
'--no-build',
'msgpack-python==0.4.7',
'--platform=current'.format(platform),
'-o', pex_path])
results.assert_success()
out = subprocess.check_output([pex_path, tester_path])
assert out.strip() == '[1, 2, 3]'
@pytest.mark.skipif(NOT_CPYTHON27)
def test_platform_specific_inline_egg_resolution():
with temporary_dir() as td:
pex_out_path = os.path.join(td, 'pex.pex')
res = run_pex_command(['--disable-cache',
'--no-wheel',
'MarkupSafe==1.0',
'-o', pex_out_path])
res.assert_success()
@pytest.mark.skipif(NOT_CPYTHON27)
def test_platform_specific_egg_resolution():
with temporary_dir() as td:
pex_out_path = os.path.join(td, 'pex.pex')
res = run_pex_command(['--disable-cache',
'--no-wheel',
'--no-build',
'--no-pypi',
'--platform=linux-x86_64',
'--find-links=tests/example_packages/',
'M2Crypto==0.22.3',
'-o', pex_out_path])
res.assert_success()
@pytest.mark.skipif(NOT_CPYTHON27)
def test_platform_specific_egg_resolution_matching():
with temporary_dir() as td:
pex_out_path = os.path.join(td, 'pex.pex')
res = run_pex_command(['--disable-cache',
'--no-wheel',
'--no-build',
'netifaces==0.10.6', # Only provides win32 eggs.
'-o', pex_out_path])
res.assert_failure()
| apache-2.0 |
sysalexis/kbengine | kbe/res/scripts/common/Lib/tkinter/commondialog.py | 153 | 1412 | # base class for tk common dialogues
#
# this module provides a base class for accessing the common
# dialogues available in Tk 4.2 and newer. use filedialog,
# colorchooser, and messagebox to access the individual
# dialogs.
#
# written by Fredrik Lundh, May 1997
#
from tkinter import *
class Dialog:
command = None
def __init__(self, master=None, **options):
# FIXME: should this be placed on the module level instead?
if TkVersion < 4.2:
raise TclError("this module requires Tk 4.2 or newer")
self.master = master
self.options = options
if not master and options.get('parent'):
self.master = options['parent']
def _fixoptions(self):
pass # hook
def _fixresult(self, widget, result):
return result # hook
def show(self, **options):
# update instance options
for k, v in options.items():
self.options[k] = v
self._fixoptions()
# we need a dummy widget to properly process the options
# (at least as long as we use Tkinter 1.63)
w = Frame(self.master)
try:
s = w.tk.call(self.command, *w._options(self.options))
s = self._fixresult(w, s)
finally:
try:
# get rid of the widget
w.destroy()
except:
pass
return s
| lgpl-3.0 |
AutorestCI/azure-sdk-for-python | azure-cognitiveservices-search-videosearch/azure/cognitiveservices/search/videosearch/models/trending_videos_subcategory.py | 2 | 1152 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class TrendingVideosSubcategory(Model):
"""TrendingVideosSubcategory.
:param title:
:type title: str
:param tiles:
:type tiles:
list[~azure.cognitiveservices.search.videosearch.models.TrendingVideosTile]
"""
_validation = {
'title': {'required': True},
'tiles': {'required': True},
}
_attribute_map = {
'title': {'key': 'title', 'type': 'str'},
'tiles': {'key': 'tiles', 'type': '[TrendingVideosTile]'},
}
def __init__(self, title, tiles):
super(TrendingVideosSubcategory, self).__init__()
self.title = title
self.tiles = tiles
| mit |
michalliu/chromium-depot_tools | third_party/boto/auth.py | 51 | 26228 | # Copyright 2010 Google Inc.
# Copyright (c) 2011 Mitch Garnaat http://garnaat.org/
# Copyright (c) 2011, Eucalyptus Systems, Inc.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
"""
Handles authentication required to AWS and GS
"""
import base64
import boto
import boto.auth_handler
import boto.exception
import boto.plugin
import boto.utils
import hmac
import sys
import urllib
import time
import datetime
import copy
from email.utils import formatdate
from boto.auth_handler import AuthHandler
from boto.exception import BotoClientError
#
# the following is necessary because of the incompatibilities
# between Python 2.4, 2.5, and 2.6 as well as the fact that some
# people running 2.4 have installed hashlib as a separate module
# this fix was provided by boto user mccormix.
# see: http://code.google.com/p/boto/issues/detail?id=172
# for more details.
#
try:
from hashlib import sha1 as sha
from hashlib import sha256 as sha256
if sys.version[:3] == "2.4":
# we are using an hmac that expects a .new() method.
class Faker:
def __init__(self, which):
self.which = which
self.digest_size = self.which().digest_size
def new(self, *args, **kwargs):
return self.which(*args, **kwargs)
sha = Faker(sha)
sha256 = Faker(sha256)
except ImportError:
import sha
sha256 = None
class HmacKeys(object):
"""Key based Auth handler helper."""
def __init__(self, host, config, provider):
if provider.access_key is None or provider.secret_key is None:
raise boto.auth_handler.NotReadyToAuthenticate()
self.host = host
self.update_provider(provider)
def update_provider(self, provider):
self._provider = provider
self._hmac = hmac.new(self._provider.secret_key, digestmod=sha)
if sha256:
self._hmac_256 = hmac.new(self._provider.secret_key,
digestmod=sha256)
else:
self._hmac_256 = None
def algorithm(self):
if self._hmac_256:
return 'HmacSHA256'
else:
return 'HmacSHA1'
def _get_hmac(self):
if self._hmac_256:
digestmod = sha256
else:
digestmod = sha
return hmac.new(self._provider.secret_key,
digestmod=digestmod)
def sign_string(self, string_to_sign):
new_hmac = self._get_hmac()
new_hmac.update(string_to_sign)
return base64.encodestring(new_hmac.digest()).strip()
def __getstate__(self):
pickled_dict = copy.copy(self.__dict__)
del pickled_dict['_hmac']
del pickled_dict['_hmac_256']
return pickled_dict
def __setstate__(self, dct):
self.__dict__ = dct
self.update_provider(self._provider)
class AnonAuthHandler(AuthHandler, HmacKeys):
"""
Implements Anonymous requests.
"""
capability = ['anon']
def __init__(self, host, config, provider):
AuthHandler.__init__(self, host, config, provider)
def add_auth(self, http_request, **kwargs):
pass
class HmacAuthV1Handler(AuthHandler, HmacKeys):
""" Implements the HMAC request signing used by S3 and GS."""
capability = ['hmac-v1', 's3']
def __init__(self, host, config, provider):
AuthHandler.__init__(self, host, config, provider)
HmacKeys.__init__(self, host, config, provider)
self._hmac_256 = None
def update_provider(self, provider):
super(HmacAuthV1Handler, self).update_provider(provider)
self._hmac_256 = None
def add_auth(self, http_request, **kwargs):
headers = http_request.headers
method = http_request.method
auth_path = http_request.auth_path
if 'Date' not in headers:
headers['Date'] = formatdate(usegmt=True)
if self._provider.security_token:
key = self._provider.security_token_header
headers[key] = self._provider.security_token
string_to_sign = boto.utils.canonical_string(method, auth_path,
headers, None,
self._provider)
boto.log.debug('StringToSign:\n%s' % string_to_sign)
b64_hmac = self.sign_string(string_to_sign)
auth_hdr = self._provider.auth_header
headers['Authorization'] = ("%s %s:%s" %
(auth_hdr,
self._provider.access_key, b64_hmac))
class HmacAuthV2Handler(AuthHandler, HmacKeys):
"""
Implements the simplified HMAC authorization used by CloudFront.
"""
capability = ['hmac-v2', 'cloudfront']
def __init__(self, host, config, provider):
AuthHandler.__init__(self, host, config, provider)
HmacKeys.__init__(self, host, config, provider)
self._hmac_256 = None
def update_provider(self, provider):
super(HmacAuthV2Handler, self).update_provider(provider)
self._hmac_256 = None
def add_auth(self, http_request, **kwargs):
headers = http_request.headers
if 'Date' not in headers:
headers['Date'] = formatdate(usegmt=True)
if self._provider.security_token:
key = self._provider.security_token_header
headers[key] = self._provider.security_token
b64_hmac = self.sign_string(headers['Date'])
auth_hdr = self._provider.auth_header
headers['Authorization'] = ("%s %s:%s" %
(auth_hdr,
self._provider.access_key, b64_hmac))
class HmacAuthV3Handler(AuthHandler, HmacKeys):
"""Implements the new Version 3 HMAC authorization used by Route53."""
capability = ['hmac-v3', 'route53', 'ses']
def __init__(self, host, config, provider):
AuthHandler.__init__(self, host, config, provider)
HmacKeys.__init__(self, host, config, provider)
def add_auth(self, http_request, **kwargs):
headers = http_request.headers
if 'Date' not in headers:
headers['Date'] = formatdate(usegmt=True)
if self._provider.security_token:
key = self._provider.security_token_header
headers[key] = self._provider.security_token
b64_hmac = self.sign_string(headers['Date'])
s = "AWS3-HTTPS AWSAccessKeyId=%s," % self._provider.access_key
s += "Algorithm=%s,Signature=%s" % (self.algorithm(), b64_hmac)
headers['X-Amzn-Authorization'] = s
class HmacAuthV3HTTPHandler(AuthHandler, HmacKeys):
"""
Implements the new Version 3 HMAC authorization used by DynamoDB.
"""
capability = ['hmac-v3-http']
def __init__(self, host, config, provider):
AuthHandler.__init__(self, host, config, provider)
HmacKeys.__init__(self, host, config, provider)
def headers_to_sign(self, http_request):
"""
Select the headers from the request that need to be included
in the StringToSign.
"""
headers_to_sign = {}
headers_to_sign = {'Host': self.host}
for name, value in http_request.headers.items():
lname = name.lower()
if lname.startswith('x-amz'):
headers_to_sign[name] = value
return headers_to_sign
def canonical_headers(self, headers_to_sign):
"""
Return the headers that need to be included in the StringToSign
in their canonical form by converting all header keys to lower
case, sorting them in alphabetical order and then joining
them into a string, separated by newlines.
"""
l = sorted(['%s:%s' % (n.lower().strip(),
headers_to_sign[n].strip()) for n in headers_to_sign])
return '\n'.join(l)
def string_to_sign(self, http_request):
"""
Return the canonical StringToSign as well as a dict
containing the original version of all headers that
were included in the StringToSign.
"""
headers_to_sign = self.headers_to_sign(http_request)
canonical_headers = self.canonical_headers(headers_to_sign)
string_to_sign = '\n'.join([http_request.method,
http_request.auth_path,
'',
canonical_headers,
'',
http_request.body])
return string_to_sign, headers_to_sign
def add_auth(self, req, **kwargs):
"""
Add AWS3 authentication to a request.
:type req: :class`boto.connection.HTTPRequest`
:param req: The HTTPRequest object.
"""
# This could be a retry. Make sure the previous
# authorization header is removed first.
if 'X-Amzn-Authorization' in req.headers:
del req.headers['X-Amzn-Authorization']
req.headers['X-Amz-Date'] = formatdate(usegmt=True)
if self._provider.security_token:
req.headers['X-Amz-Security-Token'] = self._provider.security_token
string_to_sign, headers_to_sign = self.string_to_sign(req)
boto.log.debug('StringToSign:\n%s' % string_to_sign)
hash_value = sha256(string_to_sign).digest()
b64_hmac = self.sign_string(hash_value)
s = "AWS3 AWSAccessKeyId=%s," % self._provider.access_key
s += "Algorithm=%s," % self.algorithm()
s += "SignedHeaders=%s," % ';'.join(headers_to_sign)
s += "Signature=%s" % b64_hmac
req.headers['X-Amzn-Authorization'] = s
class HmacAuthV4Handler(AuthHandler, HmacKeys):
"""
Implements the new Version 4 HMAC authorization.
"""
capability = ['hmac-v4']
def __init__(self, host, config, provider,
service_name=None, region_name=None):
AuthHandler.__init__(self, host, config, provider)
HmacKeys.__init__(self, host, config, provider)
# You can set the service_name and region_name to override the
# values which would otherwise come from the endpoint, e.g.
# <service>.<region>.amazonaws.com.
self.service_name = service_name
self.region_name = region_name
def _sign(self, key, msg, hex=False):
if hex:
sig = hmac.new(key, msg.encode('utf-8'), sha256).hexdigest()
else:
sig = hmac.new(key, msg.encode('utf-8'), sha256).digest()
return sig
def headers_to_sign(self, http_request):
"""
Select the headers from the request that need to be included
in the StringToSign.
"""
headers_to_sign = {}
headers_to_sign = {'Host': self.host}
for name, value in http_request.headers.items():
lname = name.lower()
if lname.startswith('x-amz'):
headers_to_sign[name] = value
return headers_to_sign
def query_string(self, http_request):
parameter_names = sorted(http_request.params.keys())
pairs = []
for pname in parameter_names:
pval = str(http_request.params[pname]).encode('utf-8')
pairs.append(urllib.quote(pname, safe='') + '=' +
urllib.quote(pval, safe='-_~'))
return '&'.join(pairs)
def canonical_query_string(self, http_request):
# POST requests pass parameters in through the
# http_request.body field.
if http_request.method == 'POST':
return ""
l = []
for param in sorted(http_request.params):
value = str(http_request.params[param])
l.append('%s=%s' % (urllib.quote(param, safe='-_.~'),
urllib.quote(value, safe='-_.~')))
return '&'.join(l)
def canonical_headers(self, headers_to_sign):
"""
Return the headers that need to be included in the StringToSign
in their canonical form by converting all header keys to lower
case, sorting them in alphabetical order and then joining
them into a string, separated by newlines.
"""
l = sorted(['%s:%s' % (n.lower().strip(),
' '.join(headers_to_sign[n].strip().split()))
for n in headers_to_sign])
return '\n'.join(l)
def signed_headers(self, headers_to_sign):
l = ['%s' % n.lower().strip() for n in headers_to_sign]
l = sorted(l)
return ';'.join(l)
def canonical_uri(self, http_request):
return http_request.auth_path
def payload(self, http_request):
body = http_request.body
# If the body is a file like object, we can use
# boto.utils.compute_hash, which will avoid reading
# the entire body into memory.
if hasattr(body, 'seek') and hasattr(body, 'read'):
return boto.utils.compute_hash(body, hash_algorithm=sha256)[0]
return sha256(http_request.body).hexdigest()
def canonical_request(self, http_request):
cr = [http_request.method.upper()]
cr.append(self.canonical_uri(http_request))
cr.append(self.canonical_query_string(http_request))
headers_to_sign = self.headers_to_sign(http_request)
cr.append(self.canonical_headers(headers_to_sign) + '\n')
cr.append(self.signed_headers(headers_to_sign))
cr.append(self.payload(http_request))
return '\n'.join(cr)
def scope(self, http_request):
scope = [self._provider.access_key]
scope.append(http_request.timestamp)
scope.append(http_request.region_name)
scope.append(http_request.service_name)
scope.append('aws4_request')
return '/'.join(scope)
def credential_scope(self, http_request):
scope = []
http_request.timestamp = http_request.headers['X-Amz-Date'][0:8]
scope.append(http_request.timestamp)
# The service_name and region_name either come from:
# * The service_name/region_name attrs or (if these values are None)
# * parsed from the endpoint <service>.<region>.amazonaws.com.
parts = http_request.host.split('.')
if self.region_name is not None:
region_name = self.region_name
else:
if len(parts) == 3:
region_name = 'us-east-1'
else:
region_name = parts[1]
if self.service_name is not None:
service_name = self.service_name
else:
service_name = parts[0]
http_request.service_name = service_name
http_request.region_name = region_name
scope.append(http_request.region_name)
scope.append(http_request.service_name)
scope.append('aws4_request')
return '/'.join(scope)
def string_to_sign(self, http_request, canonical_request):
"""
Return the canonical StringToSign as well as a dict
containing the original version of all headers that
were included in the StringToSign.
"""
sts = ['AWS4-HMAC-SHA256']
sts.append(http_request.headers['X-Amz-Date'])
sts.append(self.credential_scope(http_request))
sts.append(sha256(canonical_request).hexdigest())
return '\n'.join(sts)
def signature(self, http_request, string_to_sign):
key = self._provider.secret_key
k_date = self._sign(('AWS4' + key).encode('utf-8'),
http_request.timestamp)
k_region = self._sign(k_date, http_request.region_name)
k_service = self._sign(k_region, http_request.service_name)
k_signing = self._sign(k_service, 'aws4_request')
return self._sign(k_signing, string_to_sign, hex=True)
def add_auth(self, req, **kwargs):
"""
Add AWS4 authentication to a request.
:type req: :class`boto.connection.HTTPRequest`
:param req: The HTTPRequest object.
"""
# This could be a retry. Make sure the previous
# authorization header is removed first.
if 'X-Amzn-Authorization' in req.headers:
del req.headers['X-Amzn-Authorization']
now = datetime.datetime.utcnow()
req.headers['X-Amz-Date'] = now.strftime('%Y%m%dT%H%M%SZ')
if self._provider.security_token:
req.headers['X-Amz-Security-Token'] = self._provider.security_token
qs = self.query_string(req)
if qs and req.method == 'POST':
# Stash request parameters into post body
# before we generate the signature.
req.body = qs
req.headers['Content-Type'] = 'application/x-www-form-urlencoded; charset=UTF-8'
req.headers['Content-Length'] = str(len(req.body))
else:
# Safe to modify req.path here since
# the signature will use req.auth_path.
req.path = req.path.split('?')[0]
req.path = req.path + '?' + qs
canonical_request = self.canonical_request(req)
boto.log.debug('CanonicalRequest:\n%s' % canonical_request)
string_to_sign = self.string_to_sign(req, canonical_request)
boto.log.debug('StringToSign:\n%s' % string_to_sign)
signature = self.signature(req, string_to_sign)
boto.log.debug('Signature:\n%s' % signature)
headers_to_sign = self.headers_to_sign(req)
l = ['AWS4-HMAC-SHA256 Credential=%s' % self.scope(req)]
l.append('SignedHeaders=%s' % self.signed_headers(headers_to_sign))
l.append('Signature=%s' % signature)
req.headers['Authorization'] = ','.join(l)
class QuerySignatureHelper(HmacKeys):
"""
Helper for Query signature based Auth handler.
Concrete sub class need to implement _calc_sigature method.
"""
def add_auth(self, http_request, **kwargs):
headers = http_request.headers
params = http_request.params
params['AWSAccessKeyId'] = self._provider.access_key
params['SignatureVersion'] = self.SignatureVersion
params['Timestamp'] = boto.utils.get_ts()
qs, signature = self._calc_signature(
http_request.params, http_request.method,
http_request.auth_path, http_request.host)
boto.log.debug('query_string: %s Signature: %s' % (qs, signature))
if http_request.method == 'POST':
headers['Content-Type'] = 'application/x-www-form-urlencoded; charset=UTF-8'
http_request.body = qs + '&Signature=' + urllib.quote_plus(signature)
http_request.headers['Content-Length'] = str(len(http_request.body))
else:
http_request.body = ''
# if this is a retried request, the qs from the previous try will
# already be there, we need to get rid of that and rebuild it
http_request.path = http_request.path.split('?')[0]
http_request.path = (http_request.path + '?' + qs +
'&Signature=' + urllib.quote_plus(signature))
class QuerySignatureV0AuthHandler(QuerySignatureHelper, AuthHandler):
"""Provides Signature V0 Signing"""
SignatureVersion = 0
capability = ['sign-v0']
def _calc_signature(self, params, *args):
boto.log.debug('using _calc_signature_0')
hmac = self._get_hmac()
s = params['Action'] + params['Timestamp']
hmac.update(s)
keys = params.keys()
keys.sort(cmp=lambda x, y: cmp(x.lower(), y.lower()))
pairs = []
for key in keys:
val = boto.utils.get_utf8_value(params[key])
pairs.append(key + '=' + urllib.quote(val))
qs = '&'.join(pairs)
return (qs, base64.b64encode(hmac.digest()))
class QuerySignatureV1AuthHandler(QuerySignatureHelper, AuthHandler):
"""
Provides Query Signature V1 Authentication.
"""
SignatureVersion = 1
capability = ['sign-v1', 'mturk']
def __init__(self, *args, **kw):
QuerySignatureHelper.__init__(self, *args, **kw)
AuthHandler.__init__(self, *args, **kw)
self._hmac_256 = None
def _calc_signature(self, params, *args):
boto.log.debug('using _calc_signature_1')
hmac = self._get_hmac()
keys = params.keys()
keys.sort(cmp=lambda x, y: cmp(x.lower(), y.lower()))
pairs = []
for key in keys:
hmac.update(key)
val = boto.utils.get_utf8_value(params[key])
hmac.update(val)
pairs.append(key + '=' + urllib.quote(val))
qs = '&'.join(pairs)
return (qs, base64.b64encode(hmac.digest()))
class QuerySignatureV2AuthHandler(QuerySignatureHelper, AuthHandler):
"""Provides Query Signature V2 Authentication."""
SignatureVersion = 2
capability = ['sign-v2', 'ec2', 'ec2', 'emr', 'fps', 'ecs',
'sdb', 'iam', 'rds', 'sns', 'sqs', 'cloudformation']
def _calc_signature(self, params, verb, path, server_name):
boto.log.debug('using _calc_signature_2')
string_to_sign = '%s\n%s\n%s\n' % (verb, server_name.lower(), path)
hmac = self._get_hmac()
params['SignatureMethod'] = self.algorithm()
if self._provider.security_token:
params['SecurityToken'] = self._provider.security_token
keys = sorted(params.keys())
pairs = []
for key in keys:
val = boto.utils.get_utf8_value(params[key])
pairs.append(urllib.quote(key, safe='') + '=' +
urllib.quote(val, safe='-_~'))
qs = '&'.join(pairs)
boto.log.debug('query string: %s' % qs)
string_to_sign += qs
boto.log.debug('string_to_sign: %s' % string_to_sign)
hmac.update(string_to_sign)
b64 = base64.b64encode(hmac.digest())
boto.log.debug('len(b64)=%d' % len(b64))
boto.log.debug('base64 encoded digest: %s' % b64)
return (qs, b64)
class POSTPathQSV2AuthHandler(QuerySignatureV2AuthHandler, AuthHandler):
"""
Query Signature V2 Authentication relocating signed query
into the path and allowing POST requests with Content-Types.
"""
capability = ['mws']
def add_auth(self, req, **kwargs):
req.params['AWSAccessKeyId'] = self._provider.access_key
req.params['SignatureVersion'] = self.SignatureVersion
req.params['Timestamp'] = boto.utils.get_ts()
qs, signature = self._calc_signature(req.params, req.method,
req.auth_path, req.host)
boto.log.debug('query_string: %s Signature: %s' % (qs, signature))
if req.method == 'POST':
req.headers['Content-Length'] = str(len(req.body))
req.headers['Content-Type'] = req.headers.get('Content-Type',
'text/plain')
else:
req.body = ''
# if this is a retried req, the qs from the previous try will
# already be there, we need to get rid of that and rebuild it
req.path = req.path.split('?')[0]
req.path = (req.path + '?' + qs +
'&Signature=' + urllib.quote_plus(signature))
def get_auth_handler(host, config, provider, requested_capability=None):
"""Finds an AuthHandler that is ready to authenticate.
Lists through all the registered AuthHandlers to find one that is willing
to handle for the requested capabilities, config and provider.
:type host: string
:param host: The name of the host
:type config:
:param config:
:type provider:
:param provider:
Returns:
An implementation of AuthHandler.
Raises:
boto.exception.NoAuthHandlerFound
"""
ready_handlers = []
auth_handlers = boto.plugin.get_plugin(AuthHandler, requested_capability)
total_handlers = len(auth_handlers)
for handler in auth_handlers:
try:
ready_handlers.append(handler(host, config, provider))
except boto.auth_handler.NotReadyToAuthenticate:
pass
if not ready_handlers:
checked_handlers = auth_handlers
names = [handler.__name__ for handler in checked_handlers]
raise boto.exception.NoAuthHandlerFound(
'No handler was ready to authenticate. %d handlers were checked.'
' %s '
'Check your credentials' % (len(names), str(names)))
# We select the last ready auth handler that was loaded, to allow users to
# customize how auth works in environments where there are shared boto
# config files (e.g., /etc/boto.cfg and ~/.boto): The more general,
# system-wide shared configs should be loaded first, and the user's
# customizations loaded last. That way, for example, the system-wide
# config might include a plugin_directory that includes a service account
# auth plugin shared by all users of a Google Compute Engine instance
# (allowing sharing of non-user data between various services), and the
# user could override this with a .boto config that includes user-specific
# credentials (for access to user data).
return ready_handlers[-1]
| bsd-3-clause |
kaiw/meld | meld/vcview.py | 1 | 36042 | # Copyright (C) 2002-2006 Stephen Kennedy <stevek@gnome.org>
# Copyright (C) 2010-2013 Kai Willadsen <kai.willadsen@gmail.com>
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301,
# USA.
from __future__ import print_function
import atexit
import tempfile
import shutil
import os
import stat
import sys
from gettext import gettext as _
import gtk
import pango
from . import melddoc
from . import misc
from . import paths
from . import recent
from . import tree
from . import vc
from .ui import emblemcellrenderer
from .ui import gnomeglade
from .ui import vcdialogs
from meld.vc import _null
def _commonprefix(files):
if len(files) != 1:
workdir = misc.commonprefix(files)
else:
workdir = os.path.dirname(files[0]) or "."
return workdir
def cleanup_temp():
temp_location = tempfile.gettempdir()
# The strings below will probably end up as debug log, and are deliberately
# not marked for translation.
for f in _temp_files:
try:
assert (os.path.exists(f) and os.path.isabs(f) and
os.path.dirname(f) == temp_location)
# Windows throws permissions errors if we remove read-only files
if os.name == "nt":
os.chmod(f, stat.S_IWRITE)
os.remove(f)
except:
except_str = "{0[0]}: \"{0[1]}\"".format(sys.exc_info())
print("File \"{0}\" not removed due to".format(f), except_str,
file=sys.stderr)
for f in _temp_dirs:
try:
assert (os.path.exists(f) and os.path.isabs(f) and
os.path.dirname(f) == temp_location)
shutil.rmtree(f, ignore_errors=1)
except:
except_str = "{0[0]}: \"{0[1]}\"".format(sys.exc_info())
print("Directory \"{0}\" not removed due to".format(f), except_str,
file=sys.stderr)
_temp_dirs, _temp_files = [], []
atexit.register(cleanup_temp)
class ConsoleStream(object):
def __init__(self, textview):
self.textview = textview
buf = textview.get_buffer()
self.command_tag = buf.create_tag("command")
self.command_tag.props.weight = pango.WEIGHT_BOLD
self.output_tag = buf.create_tag("output")
self.error_tag = buf.create_tag("error")
# FIXME: Need to add this to the gtkrc?
self.error_tag.props.foreground = "#cc0000"
self.end_mark = buf.create_mark(None, buf.get_end_iter(),
left_gravity=False)
def command(self, message):
self.write(message, self.command_tag)
def output(self, message):
self.write(message, self.output_tag)
def error(self, message):
self.write(message, self.error_tag)
def write(self, message, tag):
if not message:
return
buf = self.textview.get_buffer()
buf.insert_with_tags(buf.get_end_iter(), message, tag)
self.textview.scroll_mark_onscreen(self.end_mark)
COL_LOCATION, COL_STATUS, COL_REVISION, COL_OPTIONS, COL_END = \
list(range(tree.COL_END, tree.COL_END + 5))
class VcTreeStore(tree.DiffTreeStore):
def __init__(self):
tree.DiffTreeStore.__init__(self, 1, [str] * 5)
################################################################################
# filters
################################################################################
entry_modified = lambda x: (x.state >= tree.STATE_NEW) or (x.isdir and (x.state > tree.STATE_NONE))
entry_normal = lambda x: (x.state == tree.STATE_NORMAL)
entry_nonvc = lambda x: (x.state == tree.STATE_NONE) or (x.isdir and (x.state > tree.STATE_IGNORED))
entry_ignored = lambda x: (x.state == tree.STATE_IGNORED) or x.isdir
################################################################################
#
# VcView
#
################################################################################
class VcView(melddoc.MeldDoc, gnomeglade.Component):
# Map action names to VC commands and required arguments list
action_vc_cmds_map = {
"VcCommit": ("commit_command", ("",)),
"VcUpdate": ("update_command", ()),
"VcPush": ("push", (lambda *args, **kwargs: None, )),
"VcAdd": ("add_command", ()),
"VcResolved": ("resolved_command", ()),
"VcRemove": ("remove_command", ()),
"VcRevert": ("revert_command", ()),
}
state_actions = {
"flatten": ("VcFlatten", None),
"modified": ("VcShowModified", entry_modified),
"normal": ("VcShowNormal", entry_normal),
"unknown": ("VcShowNonVC", entry_nonvc),
"ignored": ("VcShowIgnored", entry_ignored),
}
def __init__(self, prefs):
melddoc.MeldDoc.__init__(self, prefs)
gnomeglade.Component.__init__(self, paths.ui_dir("vcview.ui"),
"vcview")
actions = (
("VcCompare", gtk.STOCK_DIALOG_INFO, _("_Compare"), None,
_("Compare selected files"),
self.on_button_diff_clicked),
("VcCommit", "vc-commit-24", _("Co_mmit..."), None,
_("Commit changes to version control"),
self.on_button_commit_clicked),
("VcUpdate", "vc-update-24", _("_Update"), None,
_("Update working copy from version control"),
self.on_button_update_clicked),
("VcPush", "vc-push-24", _("_Push"), None,
_("Push local changes to remote"),
self.on_button_push_clicked),
("VcAdd", "vc-add-24", _("_Add"), None,
_("Add to version control"),
self.on_button_add_clicked),
("VcRemove", "vc-remove-24", _("_Remove"), None,
_("Remove from version control"),
self.on_button_remove_clicked),
("VcResolved", "vc-resolve-24", _("Mar_k as Resolved"), None,
_("Mark as resolved in version control"),
self.on_button_resolved_clicked),
("VcRevert", gtk.STOCK_REVERT_TO_SAVED, _("Re_vert"), None,
_("Revert working copy to original state"),
self.on_button_revert_clicked),
("VcDeleteLocally", gtk.STOCK_DELETE, None, None,
_("Delete from working copy"),
self.on_button_delete_clicked),
)
toggleactions = (
("VcFlatten", gtk.STOCK_GOTO_BOTTOM, _("_Flatten"), None,
_("Flatten directories"),
self.on_button_flatten_toggled, False),
("VcShowModified", "filter-modified-24", _("_Modified"), None,
_("Show modified files"),
self.on_filter_state_toggled, False),
("VcShowNormal", "filter-normal-24", _("_Normal"), None,
_("Show normal files"),
self.on_filter_state_toggled, False),
("VcShowNonVC", "filter-nonvc-24", _("Un_versioned"), None,
_("Show unversioned files"),
self.on_filter_state_toggled, False),
("VcShowIgnored", "filter-ignored-24", _("Ignored"), None,
_("Show ignored files"),
self.on_filter_state_toggled, False),
)
self.ui_file = paths.ui_dir("vcview-ui.xml")
self.actiongroup = gtk.ActionGroup('VcviewActions')
self.actiongroup.set_translation_domain("meld")
self.actiongroup.add_actions(actions)
self.actiongroup.add_toggle_actions(toggleactions)
for action in ("VcCompare", "VcFlatten", "VcShowModified",
"VcShowNormal", "VcShowNonVC", "VcShowIgnored"):
self.actiongroup.get_action(action).props.is_important = True
for action in ("VcCommit", "VcUpdate", "VcPush", "VcAdd", "VcRemove",
"VcShowModified", "VcShowNormal", "VcShowNonVC",
"VcShowIgnored", "VcResolved"):
button = self.actiongroup.get_action(action)
button.props.icon_name = button.props.stock_id
self.model = VcTreeStore()
self.widget.connect("style-set", self.model.on_style_set)
self.treeview.set_model(self.model)
selection = self.treeview.get_selection()
selection.set_mode(gtk.SELECTION_MULTIPLE)
selection.connect("changed", self.on_treeview_selection_changed)
self.treeview.set_headers_visible(1)
self.treeview.set_search_equal_func(self.model.treeview_search_cb)
self.current_path, self.prev_path, self.next_path = None, None, None
self.column_name_map = {}
column = gtk.TreeViewColumn(_("Name"))
column.set_resizable(True)
renicon = emblemcellrenderer.EmblemCellRenderer()
rentext = gtk.CellRendererText()
column.pack_start(renicon, expand=0)
column.pack_start(rentext, expand=1)
col_index = self.model.column_index
column.set_attributes(renicon,
icon_name=col_index(tree.COL_ICON, 0),
icon_tint=col_index(tree.COL_TINT, 0))
column.set_attributes(rentext,
text=col_index(tree.COL_TEXT, 0),
foreground_gdk=col_index(tree.COL_FG, 0),
style=col_index(tree.COL_STYLE, 0),
weight=col_index(tree.COL_WEIGHT, 0),
strikethrough=col_index(tree.COL_STRIKE, 0))
column_index = self.treeview.append_column(column) - 1
self.column_name_map[vc.DATA_NAME] = column_index
def addCol(name, num, data_name=None):
column = gtk.TreeViewColumn(name)
column.set_resizable(True)
rentext = gtk.CellRendererText()
column.pack_start(rentext, expand=0)
column.set_attributes(rentext,
markup=self.model.column_index(num, 0))
column_index = self.treeview.append_column(column) - 1
if data_name:
self.column_name_map[data_name] = column_index
return column
self.treeview_column_location = addCol(_("Location"), COL_LOCATION)
addCol(_("Status"), COL_STATUS, vc.DATA_STATE)
addCol(_("Revision"), COL_REVISION, vc.DATA_REVISION)
addCol(_("Options"), COL_OPTIONS, vc.DATA_OPTIONS)
self.state_filters = []
for s in self.state_actions:
if s in self.prefs.vc_status_filters:
action_name = self.state_actions[s][0]
self.state_filters.append(s)
self.actiongroup.get_action(action_name).set_active(True)
self.consolestream = ConsoleStream(self.consoleview)
self.location = None
self.treeview_column_location.set_visible(self.actiongroup.get_action("VcFlatten").get_active())
if not self.prefs.vc_console_visible:
self.on_console_view_toggle(self.console_hide_box)
self.vc = None
self.valid_vc_actions = tuple()
# VC ComboBox
self.combobox_vcs = gtk.ComboBox()
self.combobox_vcs.lock = True
self.combobox_vcs.set_model(gtk.ListStore(str, object, bool))
cell = gtk.CellRendererText()
self.combobox_vcs.pack_start(cell, False)
self.combobox_vcs.add_attribute(cell, 'text', 0)
self.combobox_vcs.add_attribute(cell, 'sensitive', 2)
self.combobox_vcs.lock = False
self.hbox2.pack_end(self.combobox_vcs, expand=False)
self.combobox_vcs.show()
self.combobox_vcs.connect("changed", self.on_vc_change)
def on_container_switch_in_event(self, ui):
melddoc.MeldDoc.on_container_switch_in_event(self, ui)
self.scheduler.add_task(self.on_treeview_cursor_changed)
def update_visible_columns(self):
for data_id in self.column_name_map:
col = self.treeview.get_column(self.column_name_map[data_id])
col.set_visible(data_id in self.vc.VC_COLUMNS)
def update_actions_sensitivity(self):
"""Disable actions that use not implemented VC plugin methods"""
valid_vc_actions = ["VcDeleteLocally"]
for action_name, (meth_name, args) in self.action_vc_cmds_map.items():
action = self.actiongroup.get_action(action_name)
try:
getattr(self.vc, meth_name)(*args)
action.props.sensitive = True
valid_vc_actions.append(action_name)
except NotImplementedError:
action.props.sensitive = False
self.valid_vc_actions = tuple(valid_vc_actions)
def choose_vc(self, vcs):
"""Display VC plugin(s) that can handle the location"""
self.combobox_vcs.lock = True
self.combobox_vcs.get_model().clear()
default_active = -1
valid_vcs = []
# Try to keep the same VC plugin active on refresh()
for idx, avc in enumerate(vcs):
# See if the necessary version control command exists. If so,
# make sure what we're diffing is a valid respository. If either
# check fails don't let the user select the that version control
# tool and display a basic error message in the drop-down menu.
err_str = ""
def vc_installed(cmd):
if not cmd:
return True
try:
return not vc._vc.call(["which", cmd])
except OSError:
if os.name == 'nt':
return not vc._vc.call(["where", cmd])
if not vc_installed(avc.CMD):
# TRANSLATORS: this is an error message when a version control
# application isn't installed or can't be found
err_str = _("%s not installed" % avc.CMD)
elif not avc.valid_repo():
# TRANSLATORS: this is an error message when a version
# controlled repository is invalid or corrupted
err_str = _("Invalid repository")
else:
valid_vcs.append(idx)
if (self.vc is not None and
self.vc.__class__ == avc.__class__):
default_active = idx
if err_str:
self.combobox_vcs.get_model().append(
[_("%s (%s)") % (avc.NAME, err_str), avc, False])
else:
name = avc.NAME or _("None")
self.combobox_vcs.get_model().append([name, avc, True])
if not valid_vcs:
# If we didn't get any valid vcs then fallback to null
null_vcs = _null.Vc(vcs[0].location)
vcs.append(null_vcs)
self.combobox_vcs.get_model().insert(
0, [_("None"), null_vcs, True])
default_active = 0
if default_active == -1:
if valid_vcs:
default_active = min(valid_vcs)
else:
default_active = 0
# If we only have the null VC, give a better error message.
if (len(vcs) == 1 and not vcs[0].CMD) or (len(valid_vcs) == 0):
tooltip = _("No valid version control system found in this folder")
elif len(vcs) == 1:
tooltip = _("Only one version control system found in this folder")
else:
tooltip = _("Choose which version control system to use")
self.combobox_vcs.set_tooltip_text(tooltip)
self.combobox_vcs.set_sensitive(len(vcs) > 1)
self.combobox_vcs.lock = False
self.combobox_vcs.set_active(default_active)
def on_vc_change(self, cb):
if not cb.lock:
self.vc = cb.get_model()[cb.get_active_iter()][1]
self._set_location(self.vc.location)
self.update_actions_sensitivity()
self.update_visible_columns()
def set_location(self, location):
self.choose_vc(vc.get_vcs(os.path.abspath(location or ".")))
def _set_location(self, location):
self.location = location
self.current_path = None
self.model.clear()
self.fileentry.set_filename(location)
self.fileentry.prepend_history(location)
it = self.model.add_entries(None, [location])
self.treeview.grab_focus()
self.treeview.get_selection().select_iter(it)
self.model.set_path_state(it, 0, tree.STATE_NORMAL, isdir=1)
self.recompute_label()
self.scheduler.remove_all_tasks()
# If the user is just diffing a file (ie not a directory), there's no
# need to scan the rest of the repository
if os.path.isdir(self.vc.location):
root = self.model.get_iter_root()
try:
col = self.model.column_index(COL_OPTIONS, 0)
self.model.set_value(root, col,
self.vc.get_commits_to_push_summary())
except NotImplementedError:
pass
self.scheduler.add_task(self._search_recursively_iter(root))
self.scheduler.add_task(self.on_treeview_selection_changed)
self.scheduler.add_task(self.on_treeview_cursor_changed)
def get_comparison(self):
return recent.TYPE_VC, [self.location]
def recompute_label(self):
self.label_text = os.path.basename(self.location)
# TRANSLATORS: This is the location of the directory the user is diffing
self.tooltip_text = _("%s: %s") % (_("Location"), self.location)
self.label_changed()
def _search_recursively_iter(self, iterstart):
rootname = self.model.value_path(iterstart, 0)
prefixlen = len(self.location) + 1
todo = [(self.model.get_path(iterstart), rootname)]
flattened = self.actiongroup.get_action("VcFlatten").get_active()
active_action = lambda a: self.actiongroup.get_action(a).get_active()
filters = [a[1] for a in self.state_actions.values() if
active_action(a[0]) and a[1]]
yield _("Scanning %s") % rootname
self.vc.cache_inventory(rootname)
while todo:
# This needs to happen sorted and depth-first in order for our row
# references to remain valid while we traverse.
todo.sort()
treepath, path = todo.pop(0)
it = self.model.get_iter(treepath)
yield _("Scanning %s") % path[prefixlen:]
entries = self.vc.listdir(path)
entries = [e for e in entries if any(f(e) for f in filters)]
for e in entries:
if e.isdir and flattened:
todo.append(((0,), e.path))
continue
child = self.model.add_entries(it, [e.path])
self._update_item_state(child, e, path[prefixlen:])
if e.isdir:
todo.append((self.model.get_path(child), e.path))
if flattened:
self.treeview.expand_row((0,), 0)
else:
if not entries:
self.model.add_empty(it, _("(Empty)"))
if any(e.state != tree.STATE_NORMAL for e in entries):
self.treeview.expand_to_path(treepath)
def on_fileentry_activate(self, fileentry):
path = fileentry.get_full_path()
self.set_location(path)
def on_delete_event(self, appquit=0):
self.scheduler.remove_all_tasks()
return gtk.RESPONSE_OK
def on_row_activated(self, treeview, path, tvc):
it = self.model.get_iter(path)
if self.model.iter_has_child(it):
if self.treeview.row_expanded(path):
self.treeview.collapse_row(path)
else:
self.treeview.expand_row(path, 0)
else:
path = self.model.value_path(it, 0)
self.run_diff(path)
def run_diff(self, path):
if os.path.isdir(path):
self.emit("create-diff", [path], {})
return
if self.vc.get_entry(path).state == tree.STATE_CONFLICT and \
hasattr(self.vc, 'get_path_for_conflict'):
# We create new temp files for other, base and this, and
# then set the output to the current file.
conflicts = (tree.CONFLICT_OTHER, tree.CONFLICT_MERGED,
tree.CONFLICT_THIS)
diffs = [self.vc.get_path_for_conflict(path, conflict=c)
for c in conflicts]
temps = [p for p, is_temp in diffs if is_temp]
diffs = [p for p, is_temp in diffs]
kwargs = {
'auto_merge': False,
'merge_output': path,
}
else:
comp_path = self.vc.get_path_for_repo_file(path)
temps = [comp_path]
diffs = [comp_path, path]
kwargs = {}
for temp_file in temps:
os.chmod(temp_file, 0o444)
_temp_files.append(temp_file)
self.emit("create-diff", diffs, kwargs)
def on_treeview_popup_menu(self, treeview):
time = gtk.get_current_event_time()
self.popup_menu.popup(None, None, None, 0, time)
return True
def on_button_press_event(self, treeview, event):
if event.button == 3:
path = treeview.get_path_at_pos(int(event.x), int(event.y))
if path is None:
return False
selection = treeview.get_selection()
model, rows = selection.get_selected_rows()
if path[0] not in rows:
selection.unselect_all()
selection.select_path(path[0])
treeview.set_cursor(path[0])
self.popup_menu.popup(None, None, None, event.button, event.time)
return True
return False
def on_button_flatten_toggled(self, button):
action = self.actiongroup.get_action("VcFlatten")
self.treeview_column_location.set_visible(action.get_active())
self.on_filter_state_toggled(button)
def on_filter_state_toggled(self, button):
active_action = lambda a: self.actiongroup.get_action(a).get_active()
active_filters = [a for a in self.state_actions if
active_action(self.state_actions[a][0])]
if set(active_filters) == set(self.state_filters):
return
self.state_filters = active_filters
self.prefs.vc_status_filters = active_filters
self.refresh()
def on_treeview_selection_changed(self, selection=None):
def set_sensitive(action, sensitive):
self.actiongroup.get_action(action).set_sensitive(sensitive)
if selection is None:
selection = self.treeview.get_selection()
model, rows = selection.get_selected_rows()
if hasattr(self.vc, 'update_actions_for_paths'):
paths = [self.model.value_path(model.get_iter(r), 0) for r in rows]
states = [self.model.get_state(model.get_iter(r), 0) for r in rows]
action_sensitivity = {
"VcCompare": False,
"VcCommit": False,
"VcUpdate": False,
"VcPush": False,
"VcAdd": False,
"VcResolved": False,
"VcRemove": False,
"VcRevert": False,
"VcDeleteLocally": bool(paths) and self.vc.root not in paths,
}
path_states = dict(zip(paths, states))
self.vc.update_actions_for_paths(path_states, action_sensitivity)
for action, sensitivity in action_sensitivity.items():
set_sensitive(action, sensitivity)
else:
have_selection = bool(rows)
for action in self.valid_vc_actions:
set_sensitive(action, have_selection)
def _get_selected_files(self):
model, rows = self.treeview.get_selection().get_selected_rows()
sel = [self.model.value_path(self.model.get_iter(r), 0) for r in rows]
# Remove empty entries and trailing slashes
return [x[-1] != "/" and x or x[:-1] for x in sel if x is not None]
def _command_iter(self, command, files, refresh, working_dir=None):
"""Run 'command' on 'files'. Return a tuple of the directory the
command was executed in and the output of the command.
"""
msg = misc.shelljoin(command)
yield "[%s] %s" % (self.label_text, msg.replace("\n", "\t"))
def relpath(pbase, p):
kill = 0
if len(pbase) and p.startswith(pbase):
kill = len(pbase) + 1
return p[kill:] or "."
if working_dir:
workdir = self.vc.get_working_directory(working_dir)
elif len(files) == 1 and os.path.isdir(files[0]):
workdir = self.vc.get_working_directory(files[0])
else:
workdir = self.vc.get_working_directory(_commonprefix(files))
files = [relpath(workdir, f) for f in files]
r = None
self.consolestream.command(misc.shelljoin(command + files) + " (in %s)\n" % workdir)
readiter = misc.read_pipe_iter(command + files, self.consolestream,
workdir=workdir)
try:
while r is None:
r = next(readiter)
self.consolestream.output(r)
yield 1
except IOError as e:
misc.run_dialog("Error running command.\n'%s'\n\nThe error was:\n%s" % ( misc.shelljoin(command), e),
parent=self, messagetype=gtk.MESSAGE_ERROR)
self.consolestream.output("\n")
if refresh:
self.refresh_partial(workdir)
yield workdir, r
def _command(self, command, files, refresh=1, working_dir=None):
"""Run 'command' on 'files'.
"""
self.scheduler.add_task(self._command_iter(command, files, refresh,
working_dir))
def _command_on_selected(self, command, refresh=1):
files = self._get_selected_files()
if len(files):
self._command(command, files, refresh)
def on_button_update_clicked(self, obj):
try:
self.vc.update(self._command, self._get_selected_files())
except NotImplementedError:
self._command_on_selected(self.vc.update_command())
def on_button_push_clicked(self, obj):
vcdialogs.PushDialog(self).run()
def on_button_commit_clicked(self, obj):
vcdialogs.CommitDialog(self).run()
def on_button_add_clicked(self, obj):
# This is an evil hack to let CVS and SVN < 1.7 deal with the
# requirement of adding folders from their immediate parent.
if self.vc.NAME in ("CVS", "Subversion"):
selected = self._get_selected_files()
dirs = [s for s in selected if os.path.isdir(s)]
files = [s for s in selected if os.path.isfile(s)]
for path in dirs:
self._command(self.vc.add_command(), [path],
working_dir=os.path.dirname(path))
if files:
self._command(self.vc.add_command(), files)
else:
self._command_on_selected(self.vc.add_command())
def on_button_remove_clicked(self, obj):
selected = self._get_selected_files()
if any(os.path.isdir(p) for p in selected):
# TODO: Improve and reuse this dialog for the non-VC delete action
dialog = gtk.MessageDialog(
parent=self.widget.get_toplevel(),
flags=gtk.DIALOG_MODAL | gtk.DIALOG_DESTROY_WITH_PARENT,
type=gtk.MESSAGE_WARNING,
message_format=_("Remove folder and all its files?"))
dialog.format_secondary_text(
_("This will remove all selected files and folders, and all "
"files within any selected folders, from version control."))
dialog.add_button(gtk.STOCK_CANCEL, gtk.RESPONSE_CANCEL)
dialog.add_button(_("_Remove"), gtk.RESPONSE_OK)
response = dialog.run()
dialog.destroy()
if response != gtk.RESPONSE_OK:
return
try:
self.vc.remove(self._command, self._get_selected_files())
except NotImplementedError:
self._command_on_selected(self.vc.remove_command())
def on_button_resolved_clicked(self, obj):
try:
self.vc.resolve(self._command, self._get_selected_files())
except NotImplementedError:
self._command_on_selected(self.vc.resolved_command())
def on_button_revert_clicked(self, obj):
try:
self.vc.revert(self._command, self._get_selected_files())
except NotImplementedError:
self._command_on_selected(self.vc.revert_command())
def on_button_delete_clicked(self, obj):
files = self._get_selected_files()
for name in files:
try:
if os.path.isfile(name):
os.remove(name)
elif os.path.isdir(name):
if misc.run_dialog(_("'%s' is a directory.\nRemove recursively?") % os.path.basename(name),
parent = self,
buttonstype=gtk.BUTTONS_OK_CANCEL) == gtk.RESPONSE_OK:
shutil.rmtree(name)
except OSError as e:
misc.run_dialog(_("Error removing %s\n\n%s.") % (name, e),
parent=self)
workdir = _commonprefix(files)
self.refresh_partial(workdir)
def on_button_diff_clicked(self, obj):
files = self._get_selected_files()
for f in files:
self.run_diff(f)
def open_external(self):
self._open_files(self._get_selected_files())
def refresh(self):
self.set_location(self.model.value_path(self.model.get_iter_root(), 0))
def refresh_partial(self, where):
if not self.actiongroup.get_action("VcFlatten").get_active():
it = self.find_iter_by_name(where)
if it:
newiter = self.model.insert_after(None, it)
self.model.set_value(
newiter, self.model.column_index(tree.COL_PATH, 0), where)
self.model.set_path_state(newiter, 0, tree.STATE_NORMAL, True)
self.model.remove(it)
self.treeview.grab_focus()
self.treeview.get_selection().select_iter(newiter)
self.scheduler.add_task(self._search_recursively_iter(newiter))
self.scheduler.add_task(self.on_treeview_selection_changed)
self.scheduler.add_task(self.on_treeview_cursor_changed)
else:
# XXX fixme
self.refresh()
def _update_item_state(self, it, vcentry, location):
e = vcentry
self.model.set_path_state(it, 0, e.state, e.isdir)
def setcol(col, val):
self.model.set_value(it, self.model.column_index(col, 0), val)
setcol(COL_LOCATION, location)
setcol(COL_STATUS, e.get_status())
setcol(COL_REVISION, e.rev)
setcol(COL_OPTIONS, e.options)
def on_file_changed(self, filename):
it = self.find_iter_by_name(filename)
if it:
path = self.model.value_path(it, 0)
self.vc.update_file_state(path)
files = self.vc.lookup_files([], [(os.path.basename(path), path)])[1]
for e in files:
if e.path == path:
prefixlen = 1 + len( self.model.value_path( self.model.get_iter_root(), 0 ) )
self._update_item_state( it, e, e.parent[prefixlen:])
return
def find_iter_by_name(self, name):
it = self.model.get_iter_root()
path = self.model.value_path(it, 0)
while it:
if name == path:
return it
elif name.startswith(path):
child = self.model.iter_children( it )
while child:
path = self.model.value_path(child, 0)
if name == path:
return child
elif name.startswith(path):
break
else:
child = self.model.iter_next( child )
it = child
else:
break
return None
def on_console_view_toggle(self, box, event=None):
if box == self.console_hide_box:
self.prefs.vc_console_visible = 0
self.console_hbox.hide()
self.console_show_box.show()
else:
self.prefs.vc_console_visible = 1
self.console_hbox.show()
self.console_show_box.hide()
def on_consoleview_populate_popup(self, textview, menu):
buf = textview.get_buffer()
clear_cb = lambda *args: buf.delete(*buf.get_bounds())
clear_action = gtk.ImageMenuItem(gtk.STOCK_CLEAR)
clear_action.connect("activate", clear_cb)
menu.insert(clear_action, 0)
menu.insert(gtk.SeparatorMenuItem(), 1)
menu.show_all()
def on_treeview_cursor_changed(self, *args):
cursor_path, cursor_col = self.treeview.get_cursor()
if not cursor_path:
self.emit("next-diff-changed", False, False)
self.current_path = cursor_path
return
# If invoked directly rather than through a callback, we always check
if not args:
skip = False
else:
try:
old_cursor = self.model.get_iter(self.current_path)
except (ValueError, TypeError):
# An invalid path gives ValueError; None gives a TypeError
skip = False
else:
# We can skip recalculation if the new cursor is between
# the previous/next bounds, and we weren't on a changed row
state = self.model.get_state(old_cursor, 0)
if state not in (tree.STATE_NORMAL, tree.STATE_EMPTY):
skip = False
else:
if self.prev_path is None and self.next_path is None:
skip = True
elif self.prev_path is None:
skip = cursor_path < self.next_path
elif self.next_path is None:
skip = self.prev_path < cursor_path
else:
skip = self.prev_path < cursor_path < self.next_path
if not skip:
prev, next = self.model._find_next_prev_diff(cursor_path)
self.prev_path, self.next_path = prev, next
have_next_diffs = (prev is not None, next is not None)
self.emit("next-diff-changed", *have_next_diffs)
self.current_path = cursor_path
def next_diff(self, direction):
if direction == gtk.gdk.SCROLL_UP:
path = self.prev_path
else:
path = self.next_path
if path:
self.treeview.expand_to_path(path)
self.treeview.set_cursor(path)
def on_refresh_activate(self, *extra):
self.on_fileentry_activate(self.fileentry)
def on_find_activate(self, *extra):
self.treeview.emit("start-interactive-search")
| gpl-2.0 |
WaldurChatbot/Waldur-Chatbot | common/offline_graphs/totalcosts_offline.py | 1 | 16907 | import json
import collections
import matplotlib
matplotlib.use('Agg') # requirement of matplotlib
import matplotlib.pyplot as plt
import numpy as np
from textwrap import wrap
myinput = """[
{
"url":"https://api.etais.ee/api/invoices/9e67980771a94de3bd0075fe84522b05/",
"uuid":"9e67980771a94de3bd0075fe84522b05",
"number":100151,
"customer":"https://api.etais.ee/api/customers/5991d0c109df4e8cab4f9dd660295517/",
"price":"87.7300000",
"tax":"0.0000000",
"total":"87.7300000",
"state":"pending",
"year":2018,
"month":1,
"issuer_details":{
"phone":{
"national_number":"5555555",
"country_code":"372"
},
"account":"123456789",
"country_code":"EE",
"address":"Lille 4-205",
"country":"Estonia",
"company":"OpenNode",
"postal":"80041",
"vat_code":"EE123456789",
"email":"info@opennodecloud.com",
"bank":"Estonian Bank"
},
"invoice_date":null,
"due_date":null,
"customer_details":null,
"openstack_items":[
{
"name":"WaldurChatbot (Small / Generic)",
"price":87.73,
"tax":"0.0000000",
"total":"87.7300000",
"unit_price":"2.8300000",
"unit":"day",
"start":"2017-12-01T00:00:00Z",
"end":"2017-12-31T23:59:59.999999Z",
"product_code":"",
"article_code":"",
"project_name":"Waldur Chatbot testbed",
"project_uuid":"88879e68a4c84f6ea0e05fb9bc59ea8f",
"scope_type":"OpenStack.Tenant",
"scope_uuid":"ed505f9ebd8c491b94c6f8dfc30b54b0",
"package":"https://api.etais.ee/api/openstack-packages/517047bdfefe418899c981663f1ea5f5/",
"tenant_name":"WaldurChatbot",
"tenant_uuid":"ed505f9ebd8c491b94c6f8dfc30b54b0",
"usage_days":31,
"template_name":"Generic",
"template_uuid":"a85daef727d344b3858541e4bc29a274",
"template_category":"Small"
}
],
"offering_items":[
],
"generic_items":[
]
},
{
"url":"https://api.etais.ee/api/invoices/9e67980771a94de3bd0075fe84522b05/",
"uuid":"9e67980771a94de3bd0075fe84522b05",
"number":100151,
"customer":"https://api.etais.ee/api/customers/5991d0c109df4e8cab4f9dd660295517/",
"price":"87.7300000",
"tax":"0.0000000",
"total":"87.7300000",
"state":"pending",
"year":2017,
"month":12,
"issuer_details":{
"phone":{
"national_number":"5555555",
"country_code":"372"
},
"account":"123456789",
"country_code":"EE",
"address":"Lille 4-205",
"country":"Estonia",
"company":"OpenNode",
"postal":"80041",
"vat_code":"EE123456789",
"email":"info@opennodecloud.com",
"bank":"Estonian Bank"
},
"invoice_date":null,
"due_date":null,
"customer_details":null,
"openstack_items":[
{
"name":"WaldurChatbot (Small / Generic)",
"price":87.73,
"tax":"0.0000000",
"total":"87.7300000",
"unit_price":"2.8300000",
"unit":"day",
"start":"2017-12-01T00:00:00Z",
"end":"2017-12-31T23:59:59.999999Z",
"product_code":"",
"article_code":"",
"project_name":"Waldur Chatbot testbed",
"project_uuid":"88879e68a4c84f6ea0e05fb9bc59ea8f",
"scope_type":"OpenStack.Tenant",
"scope_uuid":"ed505f9ebd8c491b94c6f8dfc30b54b0",
"package":"https://api.etais.ee/api/openstack-packages/517047bdfefe418899c981663f1ea5f5/",
"tenant_name":"WaldurChatbot",
"tenant_uuid":"ed505f9ebd8c491b94c6f8dfc30b54b0",
"usage_days":31,
"template_name":"Generic",
"template_uuid":"a85daef727d344b3858541e4bc29a274",
"template_category":"Small"
}
],
"offering_items":[
],
"generic_items":[
]
},
{
"url":"https://api.etais.ee/api/invoices/59fd12a0d3e34f829d6a0eefd2e5ee41/",
"uuid":"59fd12a0d3e34f829d6a0eefd2e5ee41",
"number":100156,
"customer":"https://api.etais.ee/api/customers/0d689685ab3444bbb592338e24613f03/",
"price":"87.7300000",
"tax":"0.0000000",
"total":"87.7300000",
"state":"pending",
"year":2017,
"month":12,
"issuer_details":{
"phone":{
"national_number":"5555555",
"country_code":"372"
},
"account":"123456789",
"country_code":"EE",
"address":"Lille 4-205",
"country":"Estonia",
"company":"OpenNode",
"postal":"80041",
"vat_code":"EE123456789",
"email":"info@opennodecloud.com",
"bank":"Estonian Bank"
},
"invoice_date":null,
"due_date":null,
"customer_details":null,
"openstack_items":[
{
"name":"Waldur Maie cloud (Small / Generic)",
"price":87.73,
"tax":"0.0000000",
"total":"87.7300000",
"unit_price":"2.8300000",
"unit":"day",
"start":"2017-12-01T00:00:00Z",
"end":"2017-12-31T23:59:59.999999Z",
"product_code":"",
"article_code":"",
"project_name":"W-M project",
"project_uuid":"26fc83e64ea0473fb9f57f0ae978b396",
"scope_type":"OpenStack.Tenant",
"scope_uuid":"1571bca1f6594ad3bede4d2c8d64755a",
"package":"https://api.etais.ee/api/openstack-packages/81e93543103b4cf8a5d3658e026e98f3/",
"tenant_name":"Waldur Maie cloud",
"tenant_uuid":"1571bca1f6594ad3bede4d2c8d64755a",
"usage_days":31,
"template_name":"Generic",
"template_uuid":"a85daef727d344b3858541e4bc29a274",
"template_category":"Small"
}
],
"offering_items":[
],
"generic_items":[
]
},
{
"url":"https://api.etais.ee/api/invoices/bb6f38e908e7493791c65b26e88e1619/",
"uuid":"bb6f38e908e7493791c65b26e88e1619",
"number":100121,
"customer":"https://api.etais.ee/api/customers/5991d0c109df4e8cab4f9dd660295517/",
"price":"84.9000000",
"tax":"0.0000000",
"total":"84.9000000",
"state":"created",
"year":2017,
"month":11,
"issuer_details":{
"phone":{
"national_number":"5555555",
"country_code":"372"
},
"account":"123456789",
"country_code":"EE",
"address":"Lille 4-205",
"country":"Estonia",
"company":"OpenNode",
"postal":"80041",
"vat_code":"EE123456789",
"email":"info@opennodecloud.com",
"bank":"Estonian Bank"
},
"invoice_date":"2017-12-01",
"due_date":"2017-12-31",
"customer_details":null,
"openstack_items":[
{
"name":"WaldurChatbot (Small / Generic)",
"price":84.9,
"tax":"0.0000000",
"total":"84.9000000",
"unit_price":"2.8300000",
"unit":"day",
"start":"2017-11-01T00:00:00Z",
"end":"2017-11-30T23:59:59.999999Z",
"product_code":"",
"article_code":"",
"project_name":"Waldur Chatbot testbed",
"project_uuid":"88879e68a4c84f6ea0e05fb9bc59ea8f",
"scope_type":"OpenStack.Tenant",
"scope_uuid":"ed505f9ebd8c491b94c6f8dfc30b54b0",
"package":"https://api.etais.ee/api/openstack-packages/517047bdfefe418899c981663f1ea5f5/",
"tenant_name":"WaldurChatbot",
"tenant_uuid":"ed505f9ebd8c491b94c6f8dfc30b54b0",
"usage_days":30,
"template_name":"Generic",
"template_uuid":"a85daef727d344b3858541e4bc29a274",
"template_category":"Small"
}
],
"offering_items":[
],
"generic_items":[
]
},
{
"url":"https://api.etais.ee/api/invoices/d13cdd4ef4d2478e8e0cf0961d20e6f2/",
"uuid":"d13cdd4ef4d2478e8e0cf0961d20e6f2",
"number":100129,
"customer":"https://api.etais.ee/api/customers/0d689685ab3444bbb592338e24613f03/",
"price":"53.7700000",
"tax":"0.0000000",
"total":"53.7700000",
"state":"created",
"year":2017,
"month":11,
"issuer_details":{
"phone":{
"national_number":"5555555",
"country_code":"372"
},
"account":"123456789",
"country_code":"EE",
"address":"Lille 4-205",
"country":"Estonia",
"company":"OpenNode",
"postal":"80041",
"vat_code":"EE123456789",
"email":"info@opennodecloud.com",
"bank":"Estonian Bank"
},
"invoice_date":"2017-12-01",
"due_date":"2017-12-31",
"customer_details":null,
"openstack_items":[
{
"name":"Waldur Maie cloud (Small / Generic)",
"price":53.77,
"tax":"0.0000000",
"total":"53.7700000",
"unit_price":"2.8300000",
"unit":"day",
"start":"2017-11-12T11:29:21.522230Z",
"end":"2017-11-30T23:59:59.999999Z",
"product_code":"",
"article_code":"",
"project_name":"W-M project",
"project_uuid":"26fc83e64ea0473fb9f57f0ae978b396",
"scope_type":"OpenStack.Tenant",
"scope_uuid":"1571bca1f6594ad3bede4d2c8d64755a",
"package":"https://api.etais.ee/api/openstack-packages/81e93543103b4cf8a5d3658e026e98f3/",
"tenant_name":"Waldur Maie cloud",
"tenant_uuid":"1571bca1f6594ad3bede4d2c8d64755a",
"usage_days":19,
"template_name":"Generic",
"template_uuid":"a85daef727d344b3858541e4bc29a274",
"template_category":"Small"
}
],
"offering_items":[
],
"generic_items":[
]
},
{
"url":"https://api.etais.ee/api/invoices/b094173f50a848e19d3362c84eabebc4/",
"uuid":"b094173f50a848e19d3362c84eabebc4",
"number":100096,
"customer":"https://api.etais.ee/api/customers/5991d0c109df4e8cab4f9dd660295517/",
"price":"87.7300000",
"tax":"0.0000000",
"total":"87.7300000",
"state":"created",
"year":2017,
"month":10,
"issuer_details":{
"phone":{
"national_number":"5555555",
"country_code":"372"
},
"account":"123456789",
"country_code":"EE",
"address":"Lille 4-205",
"country":"Estonia",
"company":"OpenNode",
"postal":"80041",
"vat_code":"EE123456789",
"email":"info@opennodecloud.com",
"bank":"Estonian Bank"
},
"invoice_date":"2017-11-01",
"due_date":"2017-12-01",
"customer_details":null,
"openstack_items":[
{
"name":"WaldurChatbot (Small / Generic)",
"price":87.73,
"tax":"0.0000000",
"total":"87.7300000",
"unit_price":"2.8300000",
"unit":"day",
"start":"2017-10-01T00:00:00Z",
"end":"2017-10-31T23:59:59.999999Z",
"product_code":"",
"article_code":"",
"project_name":"Waldur Chatbot testbed",
"project_uuid":"88879e68a4c84f6ea0e05fb9bc59ea8f",
"scope_type":"OpenStack.Tenant",
"scope_uuid":"ed505f9ebd8c491b94c6f8dfc30b54b0",
"package":"https://api.etais.ee/api/openstack-packages/517047bdfefe418899c981663f1ea5f5/",
"tenant_name":"WaldurChatbot",
"tenant_uuid":"ed505f9ebd8c491b94c6f8dfc30b54b0",
"usage_days":31,
"template_name":"Generic",
"template_uuid":"a85daef727d344b3858541e4bc29a274",
"template_category":"Small"
}
],
"offering_items":[
],
"generic_items":[
]
},
{
"url":"https://api.etais.ee/api/invoices/b636ee1236e0486994cdd1ffda4c7e1d/",
"uuid":"b636ee1236e0486994cdd1ffda4c7e1d",
"number":100076,
"customer":"https://api.etais.ee/api/customers/5991d0c109df4e8cab4f9dd660295517/",
"price":"11.3200000",
"tax":"0.0000000",
"total":"11.3200000",
"state":"created",
"year":2017,
"month":9,
"issuer_details":{
"phone":{
"national_number":"5555555",
"country_code":"372"
},
"account":"123456789",
"country_code":"EE",
"address":"Lille 4-205",
"country":"Estonia",
"company":"OpenNode",
"postal":"80041",
"vat_code":"EE123456789",
"email":"info@opennodecloud.com",
"bank":"Estonian Bank"
},
"invoice_date":"2017-10-01",
"due_date":"2017-10-31",
"customer_details":null,
"openstack_items":[
{
"name":"WaldurChatbot (Small / Generic)",
"price":11.32,
"tax":"0.0000000",
"total":"11.3200000",
"unit_price":"2.8300000",
"unit":"day",
"start":"2017-09-27T13:53:31.425080Z",
"end":"2017-09-30T23:59:59.999999Z",
"product_code":"",
"article_code":"",
"project_name":"Waldur Chatbot testbed",
"project_uuid":"88879e68a4c84f6ea0e05fb9bc59ea8f",
"scope_type":"OpenStack.Tenant",
"scope_uuid":"ed505f9ebd8c491b94c6f8dfc30b54b0",
"package":"https://api.etais.ee/api/openstack-packages/517047bdfefe418899c981663f1ea5f5/",
"tenant_name":"WaldurChatbot",
"tenant_uuid":"ed505f9ebd8c491b94c6f8dfc30b54b0",
"usage_days":4,
"template_name":"Generic",
"template_uuid":"a85daef727d344b3858541e4bc29a274",
"template_category":"Small"
}
],
"offering_items":[
],
"generic_items":[
]
}
]"""
data = json.loads(myinput)
num_to_monthdict = {
1:'Jan',
2:'Feb',
3:'Mar',
4:'Apr',
5:'May',
6:'Jun',
7:'Jul',
8:'Aug',
9:'Sep',
10:'Oct',
11:'Nov',
12:'Dec'
}
plotx = []
ploty = []
uuid = '5991d0c109df4e8cab4f9dd660295517'
customer = 'https://api.etais.ee/api/customers/' + uuid + '/'
newlist = []
print(type(data))
print(type(data[0]))
for i in range((len(data)-1), -1, -1):
if data[i]['customer'] == customer:
newlist.append(data[i])
plotx.append(num_to_monthdict[data[i]['month']] + " " + str(data[i]['year']))
ploty.append(float(data[i]['total']))
print("### " + str(len(newlist)))
'''
result = collections.OrderedDict()
for i in range(len(plotx)):
result[plotx[i]] = float(ploty[i])
'''
print(plotx)
print(ploty)
N = len(ploty)
ind = np.arange(N)
width = 0.35
fig, ax = plt.subplots()
rects1 = ax.bar(ind, ploty, width, color='#75ad58')
ax.set_xlabel('Months')
ax.set_ylabel('Total costs')
ax.set_xticks(ind + width / 2)
ax.set_xticklabels(plotx)
title = ax.set_title("\n".join(wrap('Last ' + str(N) + 'month total costs but then everytime the title gets longer '
'omg like wtf when does it stop OMG HELP well okay'
'let me tell you a story all about how'
'my life got turned upside down'
'so id like to take a moment just sit right there', 60)))
def autolabel(rects, ax):
# Get y-axis height to calculate label position from.
(y_bottom, y_top) = ax.get_ylim()
y_height = y_top - y_bottom
for rect in rects:
height = rect.get_height()
label_position = height + (y_height * 0.01)
ax.text(rect.get_x() + rect.get_width()/2., label_position,
'%d' % int(height),
ha='center', va='bottom')
autolabel(rects1, ax)
print()
counter = 1
for child in ax.get_children():
if counter == N:
child.set_color('#2388d6')
print("HERE:" + str(child))
else:
print(child)
counter += 1
real_invoice = matplotlib.patches.Patch(color='#75ad58', label='Invoice')
estimate_invoice = matplotlib.patches.Patch(color='#2388d6', label='Estimation')
plt.legend(handles=[real_invoice, estimate_invoice])
fig.tight_layout()
title.set_y(1.05)
fig.subplots_adjust(top=0.8)
#plt.show()
fig.savefig('foo.png')
| mit |
kobejean/tensorflow | tensorflow/contrib/optimizer_v2/gradient_descent.py | 19 | 2974 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""GradientDescent optimizer for TensorFlow."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.optimizer_v2 import optimizer_v2
from tensorflow.python.framework import ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.training import training_ops
class GradientDescentOptimizer(optimizer_v2.OptimizerV2):
"""Optimizer that implements the gradient descent algorithm."""
def __init__(self, learning_rate, use_locking=False, name="GradientDescent"):
"""Construct a new gradient descent optimizer.
The learning rate arg below is a hyperparameter where a hyperparameter is
defined as a scalar Tensor, a regular Python value or a callable (which
will be evaluated when `apply_gradients` is called) returning a scalar
Tensor or a Python value.
Args:
learning_rate: A float hyperparameter. The learning rate to use.
use_locking: If True use locks for update operations.
name: Optional name prefix for the operations created when applying
gradients. Defaults to "GradientDescent".
"""
super(GradientDescentOptimizer, self).__init__(use_locking, name)
self._set_hyper("learning_rate", learning_rate)
def _apply_dense(self, grad, var, state):
return training_ops.apply_gradient_descent(
var,
state.get_hyper("learning_rate", var.dtype.base_dtype),
grad,
use_locking=self._use_locking).op
def _resource_apply_dense(self, grad, handle, state):
lr = state.get_hyper("learning_rate", grad.dtype.base_dtype)
return training_ops.resource_apply_gradient_descent(
handle.handle, lr, grad, use_locking=self._use_locking)
def _resource_apply_sparse_duplicate_indices(
self, grad, handle, indices, state):
lr = state.get_hyper("learning_rate", grad.dtype.base_dtype)
return resource_variable_ops.resource_scatter_add(
handle.handle, indices, -grad * lr)
def _apply_sparse_duplicate_indices(self, grad, var, state):
delta = ops.IndexedSlices(
grad.values * state.get_hyper("learning_rate", var.dtype.base_dtype),
grad.indices, grad.dense_shape)
return var.scatter_sub(delta, use_locking=self._use_locking)
| apache-2.0 |
cshallue/models | research/neural_programmer/data_utils.py | 5 | 27733 | # Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Functions for constructing vocabulary, converting the examples to integer format and building the required masks for batch computation Author: aneelakantan (Arvind Neelakantan)
"""
from __future__ import print_function
import copy
import numbers
import numpy as np
import wiki_data
def return_index(a):
for i in range(len(a)):
if (a[i] == 1.0):
return i
def construct_vocab(data, utility, add_word=False):
ans = []
for example in data:
sent = ""
for word in example.question:
if (not (isinstance(word, numbers.Number))):
sent += word + " "
example.original_nc = copy.deepcopy(example.number_columns)
example.original_wc = copy.deepcopy(example.word_columns)
example.original_nc_names = copy.deepcopy(example.number_column_names)
example.original_wc_names = copy.deepcopy(example.word_column_names)
if (add_word):
continue
number_found = 0
if (not (example.is_bad_example)):
for word in example.question:
if (isinstance(word, numbers.Number)):
number_found += 1
else:
if (not (utility.word_ids.has_key(word))):
utility.words.append(word)
utility.word_count[word] = 1
utility.word_ids[word] = len(utility.word_ids)
utility.reverse_word_ids[utility.word_ids[word]] = word
else:
utility.word_count[word] += 1
for col_name in example.word_column_names:
for word in col_name:
if (isinstance(word, numbers.Number)):
number_found += 1
else:
if (not (utility.word_ids.has_key(word))):
utility.words.append(word)
utility.word_count[word] = 1
utility.word_ids[word] = len(utility.word_ids)
utility.reverse_word_ids[utility.word_ids[word]] = word
else:
utility.word_count[word] += 1
for col_name in example.number_column_names:
for word in col_name:
if (isinstance(word, numbers.Number)):
number_found += 1
else:
if (not (utility.word_ids.has_key(word))):
utility.words.append(word)
utility.word_count[word] = 1
utility.word_ids[word] = len(utility.word_ids)
utility.reverse_word_ids[utility.word_ids[word]] = word
else:
utility.word_count[word] += 1
def word_lookup(word, utility):
if (utility.word_ids.has_key(word)):
return word
else:
return utility.unk_token
def convert_to_int_2d_and_pad(a, utility):
ans = []
#print a
for b in a:
temp = []
if (len(b) > utility.FLAGS.max_entry_length):
b = b[0:utility.FLAGS.max_entry_length]
for remaining in range(len(b), utility.FLAGS.max_entry_length):
b.append(utility.dummy_token)
assert len(b) == utility.FLAGS.max_entry_length
for word in b:
temp.append(utility.word_ids[word_lookup(word, utility)])
ans.append(temp)
#print ans
return ans
def convert_to_bool_and_pad(a, utility):
a = a.tolist()
for i in range(len(a)):
for j in range(len(a[i])):
if (a[i][j] < 1):
a[i][j] = False
else:
a[i][j] = True
a[i] = a[i] + [False] * (utility.FLAGS.max_elements - len(a[i]))
return a
seen_tables = {}
def partial_match(question, table, number):
answer = []
match = {}
for i in range(len(table)):
temp = []
for j in range(len(table[i])):
temp.append(0)
answer.append(temp)
for i in range(len(table)):
for j in range(len(table[i])):
for word in question:
if (number):
if (word == table[i][j]):
answer[i][j] = 1.0
match[i] = 1.0
else:
if (word in table[i][j]):
answer[i][j] = 1.0
match[i] = 1.0
return answer, match
def exact_match(question, table, number):
#performs exact match operation
answer = []
match = {}
matched_indices = []
for i in range(len(table)):
temp = []
for j in range(len(table[i])):
temp.append(0)
answer.append(temp)
for i in range(len(table)):
for j in range(len(table[i])):
if (number):
for word in question:
if (word == table[i][j]):
match[i] = 1.0
answer[i][j] = 1.0
else:
table_entry = table[i][j]
for k in range(len(question)):
if (k + len(table_entry) <= len(question)):
if (table_entry == question[k:(k + len(table_entry))]):
#if(len(table_entry) == 1):
#print "match: ", table_entry, question
match[i] = 1.0
answer[i][j] = 1.0
matched_indices.append((k, len(table_entry)))
return answer, match, matched_indices
def partial_column_match(question, table, number):
answer = []
for i in range(len(table)):
answer.append(0)
for i in range(len(table)):
for word in question:
if (word in table[i]):
answer[i] = 1.0
return answer
def exact_column_match(question, table, number):
#performs exact match on column names
answer = []
matched_indices = []
for i in range(len(table)):
answer.append(0)
for i in range(len(table)):
table_entry = table[i]
for k in range(len(question)):
if (k + len(table_entry) <= len(question)):
if (table_entry == question[k:(k + len(table_entry))]):
answer[i] = 1.0
matched_indices.append((k, len(table_entry)))
return answer, matched_indices
def get_max_entry(a):
e = {}
for w in a:
if (w != "UNK, "):
if (e.has_key(w)):
e[w] += 1
else:
e[w] = 1
if (len(e) > 0):
(key, val) = sorted(e.items(), key=lambda x: -1 * x[1])[0]
if (val > 1):
return key
else:
return -1.0
else:
return -1.0
def list_join(a):
ans = ""
for w in a:
ans += str(w) + ", "
return ans
def group_by_max(table, number):
#computes the most frequently occurring entry in a column
answer = []
for i in range(len(table)):
temp = []
for j in range(len(table[i])):
temp.append(0)
answer.append(temp)
for i in range(len(table)):
if (number):
curr = table[i]
else:
curr = [list_join(w) for w in table[i]]
max_entry = get_max_entry(curr)
#print i, max_entry
for j in range(len(curr)):
if (max_entry == curr[j]):
answer[i][j] = 1.0
else:
answer[i][j] = 0.0
return answer
def pick_one(a):
for i in range(len(a)):
if (1.0 in a[i]):
return True
return False
def check_processed_cols(col, utility):
return True in [
True for y in col
if (y != utility.FLAGS.pad_int and y !=
utility.FLAGS.bad_number_pre_process)
]
def complete_wiki_processing(data, utility, train=True):
#convert to integers and padding
processed_data = []
num_bad_examples = 0
for example in data:
number_found = 0
if (example.is_bad_example):
num_bad_examples += 1
if (not (example.is_bad_example)):
example.string_question = example.question[:]
#entry match
example.processed_number_columns = example.processed_number_columns[:]
example.processed_word_columns = example.processed_word_columns[:]
example.word_exact_match, word_match, matched_indices = exact_match(
example.string_question, example.original_wc, number=False)
example.number_exact_match, number_match, _ = exact_match(
example.string_question, example.original_nc, number=True)
if (not (pick_one(example.word_exact_match)) and not (
pick_one(example.number_exact_match))):
assert len(word_match) == 0
assert len(number_match) == 0
example.word_exact_match, word_match = partial_match(
example.string_question, example.original_wc, number=False)
#group by max
example.word_group_by_max = group_by_max(example.original_wc, False)
example.number_group_by_max = group_by_max(example.original_nc, True)
#column name match
example.word_column_exact_match, wcol_matched_indices = exact_column_match(
example.string_question, example.original_wc_names, number=False)
example.number_column_exact_match, ncol_matched_indices = exact_column_match(
example.string_question, example.original_nc_names, number=False)
if (not (1.0 in example.word_column_exact_match) and not (
1.0 in example.number_column_exact_match)):
example.word_column_exact_match = partial_column_match(
example.string_question, example.original_wc_names, number=False)
example.number_column_exact_match = partial_column_match(
example.string_question, example.original_nc_names, number=False)
if (len(word_match) > 0 or len(number_match) > 0):
example.question.append(utility.entry_match_token)
if (1.0 in example.word_column_exact_match or
1.0 in example.number_column_exact_match):
example.question.append(utility.column_match_token)
example.string_question = example.question[:]
example.number_lookup_matrix = np.transpose(
example.number_lookup_matrix)[:]
example.word_lookup_matrix = np.transpose(example.word_lookup_matrix)[:]
example.columns = example.number_columns[:]
example.word_columns = example.word_columns[:]
example.len_total_cols = len(example.word_column_names) + len(
example.number_column_names)
example.column_names = example.number_column_names[:]
example.word_column_names = example.word_column_names[:]
example.string_column_names = example.number_column_names[:]
example.string_word_column_names = example.word_column_names[:]
example.sorted_number_index = []
example.sorted_word_index = []
example.column_mask = []
example.word_column_mask = []
example.processed_column_mask = []
example.processed_word_column_mask = []
example.word_column_entry_mask = []
example.question_attention_mask = []
example.question_number = example.question_number_1 = -1
example.question_attention_mask = []
example.ordinal_question = []
example.ordinal_question_one = []
new_question = []
if (len(example.number_columns) > 0):
example.len_col = len(example.number_columns[0])
else:
example.len_col = len(example.word_columns[0])
for (start, length) in matched_indices:
for j in range(length):
example.question[start + j] = utility.unk_token
#print example.question
for word in example.question:
if (isinstance(word, numbers.Number) or wiki_data.is_date(word)):
if (not (isinstance(word, numbers.Number)) and
wiki_data.is_date(word)):
word = word.replace("X", "").replace("-", "")
number_found += 1
if (number_found == 1):
example.question_number = word
if (len(example.ordinal_question) > 0):
example.ordinal_question[len(example.ordinal_question) - 1] = 1.0
else:
example.ordinal_question.append(1.0)
elif (number_found == 2):
example.question_number_1 = word
if (len(example.ordinal_question_one) > 0):
example.ordinal_question_one[len(example.ordinal_question_one) -
1] = 1.0
else:
example.ordinal_question_one.append(1.0)
else:
new_question.append(word)
example.ordinal_question.append(0.0)
example.ordinal_question_one.append(0.0)
example.question = [
utility.word_ids[word_lookup(w, utility)] for w in new_question
]
example.question_attention_mask = [0.0] * len(example.question)
#when the first question number occurs before a word
example.ordinal_question = example.ordinal_question[0:len(
example.question)]
example.ordinal_question_one = example.ordinal_question_one[0:len(
example.question)]
#question-padding
example.question = [utility.word_ids[utility.dummy_token]] * (
utility.FLAGS.question_length - len(example.question)
) + example.question
example.question_attention_mask = [-10000.0] * (
utility.FLAGS.question_length - len(example.question_attention_mask)
) + example.question_attention_mask
example.ordinal_question = [0.0] * (utility.FLAGS.question_length -
len(example.ordinal_question)
) + example.ordinal_question
example.ordinal_question_one = [0.0] * (utility.FLAGS.question_length -
len(example.ordinal_question_one)
) + example.ordinal_question_one
if (True):
#number columns and related-padding
num_cols = len(example.columns)
start = 0
for column in example.number_columns:
if (check_processed_cols(example.processed_number_columns[start],
utility)):
example.processed_column_mask.append(0.0)
sorted_index = sorted(
range(len(example.processed_number_columns[start])),
key=lambda k: example.processed_number_columns[start][k],
reverse=True)
sorted_index = sorted_index + [utility.FLAGS.pad_int] * (
utility.FLAGS.max_elements - len(sorted_index))
example.sorted_number_index.append(sorted_index)
example.columns[start] = column + [utility.FLAGS.pad_int] * (
utility.FLAGS.max_elements - len(column))
example.processed_number_columns[start] += [utility.FLAGS.pad_int] * (
utility.FLAGS.max_elements -
len(example.processed_number_columns[start]))
start += 1
example.column_mask.append(0.0)
for remaining in range(num_cols, utility.FLAGS.max_number_cols):
example.sorted_number_index.append([utility.FLAGS.pad_int] *
(utility.FLAGS.max_elements))
example.columns.append([utility.FLAGS.pad_int] *
(utility.FLAGS.max_elements))
example.processed_number_columns.append([utility.FLAGS.pad_int] *
(utility.FLAGS.max_elements))
example.number_exact_match.append([0.0] *
(utility.FLAGS.max_elements))
example.number_group_by_max.append([0.0] *
(utility.FLAGS.max_elements))
example.column_mask.append(-100000000.0)
example.processed_column_mask.append(-100000000.0)
example.number_column_exact_match.append(0.0)
example.column_names.append([utility.dummy_token])
#word column and related-padding
start = 0
word_num_cols = len(example.word_columns)
for column in example.word_columns:
if (check_processed_cols(example.processed_word_columns[start],
utility)):
example.processed_word_column_mask.append(0.0)
sorted_index = sorted(
range(len(example.processed_word_columns[start])),
key=lambda k: example.processed_word_columns[start][k],
reverse=True)
sorted_index = sorted_index + [utility.FLAGS.pad_int] * (
utility.FLAGS.max_elements - len(sorted_index))
example.sorted_word_index.append(sorted_index)
column = convert_to_int_2d_and_pad(column, utility)
example.word_columns[start] = column + [[
utility.word_ids[utility.dummy_token]
] * utility.FLAGS.max_entry_length] * (utility.FLAGS.max_elements -
len(column))
example.processed_word_columns[start] += [utility.FLAGS.pad_int] * (
utility.FLAGS.max_elements -
len(example.processed_word_columns[start]))
example.word_column_entry_mask.append([0] * len(column) + [
utility.word_ids[utility.dummy_token]
] * (utility.FLAGS.max_elements - len(column)))
start += 1
example.word_column_mask.append(0.0)
for remaining in range(word_num_cols, utility.FLAGS.max_word_cols):
example.sorted_word_index.append([utility.FLAGS.pad_int] *
(utility.FLAGS.max_elements))
example.word_columns.append([[utility.word_ids[utility.dummy_token]] *
utility.FLAGS.max_entry_length] *
(utility.FLAGS.max_elements))
example.word_column_entry_mask.append(
[utility.word_ids[utility.dummy_token]] *
(utility.FLAGS.max_elements))
example.word_exact_match.append([0.0] * (utility.FLAGS.max_elements))
example.word_group_by_max.append([0.0] * (utility.FLAGS.max_elements))
example.processed_word_columns.append([utility.FLAGS.pad_int] *
(utility.FLAGS.max_elements))
example.word_column_mask.append(-100000000.0)
example.processed_word_column_mask.append(-100000000.0)
example.word_column_exact_match.append(0.0)
example.word_column_names.append([utility.dummy_token] *
utility.FLAGS.max_entry_length)
seen_tables[example.table_key] = 1
#convert column and word column names to integers
example.column_ids = convert_to_int_2d_and_pad(example.column_names,
utility)
example.word_column_ids = convert_to_int_2d_and_pad(
example.word_column_names, utility)
for i_em in range(len(example.number_exact_match)):
example.number_exact_match[i_em] = example.number_exact_match[
i_em] + [0.0] * (utility.FLAGS.max_elements -
len(example.number_exact_match[i_em]))
example.number_group_by_max[i_em] = example.number_group_by_max[
i_em] + [0.0] * (utility.FLAGS.max_elements -
len(example.number_group_by_max[i_em]))
for i_em in range(len(example.word_exact_match)):
example.word_exact_match[i_em] = example.word_exact_match[
i_em] + [0.0] * (utility.FLAGS.max_elements -
len(example.word_exact_match[i_em]))
example.word_group_by_max[i_em] = example.word_group_by_max[
i_em] + [0.0] * (utility.FLAGS.max_elements -
len(example.word_group_by_max[i_em]))
example.exact_match = example.number_exact_match + example.word_exact_match
example.group_by_max = example.number_group_by_max + example.word_group_by_max
example.exact_column_match = example.number_column_exact_match + example.word_column_exact_match
#answer and related mask, padding
if (example.is_lookup):
example.answer = example.calc_answer
example.number_print_answer = example.number_lookup_matrix.tolist()
example.word_print_answer = example.word_lookup_matrix.tolist()
for i_answer in range(len(example.number_print_answer)):
example.number_print_answer[i_answer] = example.number_print_answer[
i_answer] + [0.0] * (utility.FLAGS.max_elements -
len(example.number_print_answer[i_answer]))
for i_answer in range(len(example.word_print_answer)):
example.word_print_answer[i_answer] = example.word_print_answer[
i_answer] + [0.0] * (utility.FLAGS.max_elements -
len(example.word_print_answer[i_answer]))
example.number_lookup_matrix = convert_to_bool_and_pad(
example.number_lookup_matrix, utility)
example.word_lookup_matrix = convert_to_bool_and_pad(
example.word_lookup_matrix, utility)
for remaining in range(num_cols, utility.FLAGS.max_number_cols):
example.number_lookup_matrix.append([False] *
utility.FLAGS.max_elements)
example.number_print_answer.append([0.0] * utility.FLAGS.max_elements)
for remaining in range(word_num_cols, utility.FLAGS.max_word_cols):
example.word_lookup_matrix.append([False] *
utility.FLAGS.max_elements)
example.word_print_answer.append([0.0] * utility.FLAGS.max_elements)
example.print_answer = example.number_print_answer + example.word_print_answer
else:
example.answer = example.calc_answer
example.print_answer = [[0.0] * (utility.FLAGS.max_elements)] * (
utility.FLAGS.max_number_cols + utility.FLAGS.max_word_cols)
#question_number masks
if (example.question_number == -1):
example.question_number_mask = np.zeros([utility.FLAGS.max_elements])
else:
example.question_number_mask = np.ones([utility.FLAGS.max_elements])
if (example.question_number_1 == -1):
example.question_number_one_mask = -10000.0
else:
example.question_number_one_mask = np.float64(0.0)
if (example.len_col > utility.FLAGS.max_elements):
continue
processed_data.append(example)
return processed_data
def add_special_words(utility):
utility.words.append(utility.entry_match_token)
utility.word_ids[utility.entry_match_token] = len(utility.word_ids)
utility.reverse_word_ids[utility.word_ids[
utility.entry_match_token]] = utility.entry_match_token
utility.entry_match_token_id = utility.word_ids[utility.entry_match_token]
print("entry match token: ", utility.word_ids[
utility.entry_match_token], utility.entry_match_token_id)
utility.words.append(utility.column_match_token)
utility.word_ids[utility.column_match_token] = len(utility.word_ids)
utility.reverse_word_ids[utility.word_ids[
utility.column_match_token]] = utility.column_match_token
utility.column_match_token_id = utility.word_ids[utility.column_match_token]
print("entry match token: ", utility.word_ids[
utility.column_match_token], utility.column_match_token_id)
utility.words.append(utility.dummy_token)
utility.word_ids[utility.dummy_token] = len(utility.word_ids)
utility.reverse_word_ids[utility.word_ids[
utility.dummy_token]] = utility.dummy_token
utility.dummy_token_id = utility.word_ids[utility.dummy_token]
utility.words.append(utility.unk_token)
utility.word_ids[utility.unk_token] = len(utility.word_ids)
utility.reverse_word_ids[utility.word_ids[
utility.unk_token]] = utility.unk_token
def perform_word_cutoff(utility):
if (utility.FLAGS.word_cutoff > 0):
for word in utility.word_ids.keys():
if (utility.word_count.has_key(word) and utility.word_count[word] <
utility.FLAGS.word_cutoff and word != utility.unk_token and
word != utility.dummy_token and word != utility.entry_match_token and
word != utility.column_match_token):
utility.word_ids.pop(word)
utility.words.remove(word)
def word_dropout(question, utility):
if (utility.FLAGS.word_dropout_prob > 0.0):
new_question = []
for i in range(len(question)):
if (question[i] != utility.dummy_token_id and
utility.random.random() > utility.FLAGS.word_dropout_prob):
new_question.append(utility.word_ids[utility.unk_token])
else:
new_question.append(question[i])
return new_question
else:
return question
def generate_feed_dict(data, curr, batch_size, gr, train=False, utility=None):
#prepare feed dict dictionary
feed_dict = {}
feed_examples = []
for j in range(batch_size):
feed_examples.append(data[curr + j])
if (train):
feed_dict[gr.batch_question] = [
word_dropout(feed_examples[j].question, utility)
for j in range(batch_size)
]
else:
feed_dict[gr.batch_question] = [
feed_examples[j].question for j in range(batch_size)
]
feed_dict[gr.batch_question_attention_mask] = [
feed_examples[j].question_attention_mask for j in range(batch_size)
]
feed_dict[
gr.batch_answer] = [feed_examples[j].answer for j in range(batch_size)]
feed_dict[gr.batch_number_column] = [
feed_examples[j].columns for j in range(batch_size)
]
feed_dict[gr.batch_processed_number_column] = [
feed_examples[j].processed_number_columns for j in range(batch_size)
]
feed_dict[gr.batch_processed_sorted_index_number_column] = [
feed_examples[j].sorted_number_index for j in range(batch_size)
]
feed_dict[gr.batch_processed_sorted_index_word_column] = [
feed_examples[j].sorted_word_index for j in range(batch_size)
]
feed_dict[gr.batch_question_number] = np.array(
[feed_examples[j].question_number for j in range(batch_size)]).reshape(
(batch_size, 1))
feed_dict[gr.batch_question_number_one] = np.array(
[feed_examples[j].question_number_1 for j in range(batch_size)]).reshape(
(batch_size, 1))
feed_dict[gr.batch_question_number_mask] = [
feed_examples[j].question_number_mask for j in range(batch_size)
]
feed_dict[gr.batch_question_number_one_mask] = np.array(
[feed_examples[j].question_number_one_mask for j in range(batch_size)
]).reshape((batch_size, 1))
feed_dict[gr.batch_print_answer] = [
feed_examples[j].print_answer for j in range(batch_size)
]
feed_dict[gr.batch_exact_match] = [
feed_examples[j].exact_match for j in range(batch_size)
]
feed_dict[gr.batch_group_by_max] = [
feed_examples[j].group_by_max for j in range(batch_size)
]
feed_dict[gr.batch_column_exact_match] = [
feed_examples[j].exact_column_match for j in range(batch_size)
]
feed_dict[gr.batch_ordinal_question] = [
feed_examples[j].ordinal_question for j in range(batch_size)
]
feed_dict[gr.batch_ordinal_question_one] = [
feed_examples[j].ordinal_question_one for j in range(batch_size)
]
feed_dict[gr.batch_number_column_mask] = [
feed_examples[j].column_mask for j in range(batch_size)
]
feed_dict[gr.batch_number_column_names] = [
feed_examples[j].column_ids for j in range(batch_size)
]
feed_dict[gr.batch_processed_word_column] = [
feed_examples[j].processed_word_columns for j in range(batch_size)
]
feed_dict[gr.batch_word_column_mask] = [
feed_examples[j].word_column_mask for j in range(batch_size)
]
feed_dict[gr.batch_word_column_names] = [
feed_examples[j].word_column_ids for j in range(batch_size)
]
feed_dict[gr.batch_word_column_entry_mask] = [
feed_examples[j].word_column_entry_mask for j in range(batch_size)
]
return feed_dict
| apache-2.0 |
sophacles/invoke | invoke/config.py | 1 | 19816 | import copy
import imp
import json
import os
from os.path import join, splitext, expanduser
from .vendor import six
if six.PY3:
from .vendor import yaml3 as yaml
else:
from .vendor import yaml2 as yaml
from .env import Environment
from .exceptions import UnknownFileType
from .util import debug
class DataProxy(object):
"""
Helper class implementing nested dict+attr access for `.Config`.
"""
# Attributes which get proxied through to inner etc.Config obj.
_proxies = tuple("""
clear
get
has_key
items
iteritems
iterkeys
itervalues
keys
pop
popitem
setdefault
update
values
""".split()) + tuple("__{0}__".format(x) for x in """
cmp
contains
iter
sizeof
""".split())
# Alt constructor used so we aren't getting in the way of Config's real
# __init__().
@classmethod
def from_data(cls, data):
obj = cls()
obj.config = data
return obj
def __getattr__(self, key):
try:
return self._get(key)
except KeyError:
# Proxy most special vars to config for dict procotol.
if key in self._proxies:
return getattr(self.config, key)
# Otherwise, raise useful AttributeError to follow getattr proto.
err = "No attribute or config key found for {0!r}".format(key)
attrs = [x for x in dir(self.__class__) if not x.startswith('_')]
err += "\n\nValid keys: {0!r}".format(list(self.config.keys()))
err += "\n\nValid real attributes: {0!r}".format(attrs)
raise AttributeError(err)
def __hasattr__(self, key):
return key in self.config or key in self._proxies
def __iter__(self):
# For some reason Python is ignoring our __hasattr__ when determining
# whether we support __iter__. BOO
return iter(self.config)
def __eq__(self, other):
# Can't proxy __eq__ because the RHS will always be an obj of the
# current class, not the proxied-to class, and that causes
# NotImplemented.
return self.config == other.config
def __len__(self):
# Can't proxy __len__ either apparently? ugh
return len(self.config)
def __setitem__(self, key, value):
# ... or __setitem__? thanks for nothing Python >:(
self.config[key] = value
def __delitem__(self, key):
# OK this is really getting annoying
del self.config[key]
def __getitem__(self, key):
return self._get(key)
def _get(self, key):
value = self.config[key]
if isinstance(value, dict):
value = DataProxy.from_data(value)
return value
def __str__(self):
return str(self.config)
def __unicode__(self):
return unicode(self.config)
def __repr__(self):
return repr(self.config)
def __contains__(self, key):
return key in self.config
# TODO: copy()?
class Config(DataProxy):
"""
Invoke's primary configuration handling class.
See :doc:`/concepts/configuration` for details on the configuration system
this class implements, including the :ref:`configuration hierarchy
<config-hierarchy>`. The rest of this class' documentation assumes
familiarity with that document.
**Access**
Configuration values may be accessed using dict syntax::
config['foo']
or attribute syntax::
config.foo
.. warning::
Any "real" attributes (methods, etc) on `.Config` take precedence over
settings values - so if you have top level settings named ``clone``,
``defaults``, etc, you *must* use dict syntax to access it.
Nesting works the same way - dict config values are turned into objects
which honor both the dictionary protocol and the attribute-access method::
config['foo']['bar']
config.foo.bar
**Non-data attributes & methods**
This class implements the entire dictionary protocol: methods such as
``keys``, ``values``, ``items``, ``pop`` and so forth should all function
as they do on regular dicts.
Individual configuration 'levels' and their source locations (if
applicable) may be accessed via attributes such as
`.project`/`.project_path` and so forth - see the documentation for
individual members below for details.
**Lifecycle**
On initialization, `.Config` will seek out and load various configuration
files from disk, then `.merge` the results with other in-memory sources
such as defaults and CLI overrides.
Typically, the `.load_collection` and `.load_shell_env` methods are called
after initialization - `.load_collection` prior to each task invocation
(because collection-level config data may change depending on the task) and
`.load_shell_env` as the final step (as it needs the rest of the config to
know which env vars are valid to load).
Once users are given a copy of the configuration (usually via their task's
`.Context` argument) all the above loading (& a final `.merge`) has been
performed and they are free to modify it as they would any other regular
dictionary.
.. warning::
Calling `.merge` after manually modifying `.Config` objects may
overwrite those manual changes, since it overwrites the core config
dict with data from per-source attributes like `.defaults` or `.user`.
"""
def __init__(self, defaults=None, overrides=None, system_prefix=None,
user_prefix=None, project_home=None, env_prefix=None,
runtime_path=None):
"""
Creates a new config object.
:param dict defaults:
A dict containing default (lowest level) config data. Default:
``{}``.
:param dict overrides:
A dict containing override-level config data. Default: ``{}``.
:param str system_prefix:
Path & partial filename for the global config file location. Should
include everything but the dot & file extension.
Default: ``/etc/invoke`` (e.g. ``/etc/invoke.yaml`` or
``/etc/invoke.json``).
:param str user_prefix:
Like ``system_prefix`` but for the per-user config file.
Default: ``~/.invoke`` (e.g. ``~/.invoke.yaml``).
:param str project_home:
Optional directory path location of the currently loaded
`.Collection` (as loaded by `.Loader`). When non-empty, will
trigger seeking of per-project config files in this location +
``invoke.(yaml|json|py)``.
:param str env_prefix:
Environment variable seek prefix; optional, defaults to ``None``.
When not ``None``, only environment variables beginning with this
value will be loaded. If it is set, the keys will have the prefix
stripped out before processing, so e.g. ``env_prefix='INVOKE_'``
means users must set ``INVOKE_MYSETTING`` in the shell to affect
the ``"mysetting"`` setting.
:param str runtime_path:
Optional file path to a runtime configuration file.
Used to fill the penultimate slot in the config hierarchy. Should
be a full file path to an existing file, not a directory path, or a
prefix.
"""
# Config file suffixes to search, in preference order.
self.file_suffixes = ('yaml', 'json', 'py')
# Technically an implementation detail - do not expose in public API.
# Stores merged configs and is accessed via DataProxy.
self.config = {}
#: Default configuration values, typically hardcoded in the
#: CLI/execution machinery.
self.defaults = {} if defaults is None else defaults
#: Collection-driven config data, gathered from the collection tree
#: containing the currently executing task.
self.collection = {}
#: Path prefix searched for the system config file.
self.system_prefix = ('/etc/invoke' if system_prefix is None
else system_prefix)
#: Path to loaded system config file, if any.
self.system_path = None
#: Whether the system config file has been loaded or not (or ``None``
#: if no loading has been attempted yet.)
self.system_found = None
#: Data loaded from the system config file.
self.system = {}
#: Path prefix searched for per-user config files.
self.user_prefix = '~/.invoke' if user_prefix is None else user_prefix
#: Path to loaded user config file, if any.
self.user_path = None
#: Whether the user config file has been loaded or not (or ``None``
#: if no loading has been attempted yet.)
self.user_found = None
#: Data loaded from the per-user config file.
self.user = {}
#: Parent directory of the current root tasks file, if applicable.
self.project_home = project_home
# And a normalized prefix version not really publicly exposed
self.project_prefix = None
if self.project_home is not None:
self.project_prefix = join(project_home, 'invoke')
#: Path to loaded per-project config file, if any.
self.project_path = None
#: Whether the project config file has been loaded or not (or ``None``
#: if no loading has been attempted yet.)
self.project_found = None
#: Data loaded from the per-project config file.
self.project = {}
#: Environment variable name prefix
# TODO: make this INVOKE_ and update tests to account?
self.env_prefix = '' if env_prefix is None else env_prefix
#: Config data loaded from the shell environment.
self.env = {}
#: Path to the user-specified runtime config file.
self.runtime_path = runtime_path
#: Data loaded from the runtime config file.
self.runtime = {}
#: Whether the runtime config file has been loaded or not (or ``None``
#: if no loading has been attempted yet.)
self.runtime_found = None
#: Overrides - highest possible config level. Typically filled in from
#: command-line flags.
self.overrides = {} if overrides is None else overrides
# Perform initial load & merge.
self.load_files()
self.merge()
def load_shell_env(self):
"""
Load values from the shell environment.
`.load_shell_env` is intended for execution late in a `.Config`
object's lifecycle, once all other sources have been merged. Loading
from the shell is not terrifically expensive, but must be done at a
specific point in time to ensure the "only known config keys are loaded
from the env" behavior works correctly.
See :ref:`env-vars` for details on this design decision and other info
re: how environment variables are scanned and loaded.
"""
# Force merge of existing data to ensure we have an up to date picture
debug("Running pre-merge for shell env loading...")
self.merge()
debug("Done with pre-merge.")
loader = Environment(config=self.config, prefix=self.env_prefix)
self.env = loader.load()
debug("Loaded shell environment, triggering final merge")
self.merge()
def load_collection(self, data):
"""
Update collection-driven config data.
`.load_collection` is intended for use by the core task execution
machinery, which is responsible for obtaining per-task
collection-driven data. See :ref:`collection-configuration` for
details.
.. note:: This method triggers `.merge` after it runs.
"""
self.collection = data
self.merge()
def clone(self):
"""
Return a copy of this configuration object.
The new object will be identical in terms of configured sources and any
loaded/merged data, but will be a distinct object with no shared
mutable state.
"""
new = Config()
for name in """
config
defaults
collection
system_prefix
system_path
system_found
system
user_prefix
user_path
user_found
user
project_home
project_prefix
project_path
project_found
project
env_prefix
env
runtime_path
runtime_found
runtime
overrides
""".split():
setattr(new, name, copy.deepcopy(getattr(self, name)))
return new
def load_files(self):
"""
Load any unloaded/un-searched-for config file sources.
Specifically, any file sources whose ``_found`` values are ``None``
will be sought and loaded if found; if their ``_found`` value is non
``None`` (e.g. ``True`` or ``False``) they will be skipped. Typically
this means this method is idempotent and becomes a no-op after the
first run.
Execution of this method does not imply merging; use `.merge` for that.
"""
self._load_file(prefix='system')
self._load_file(prefix='user')
self._load_file(prefix='project')
self._load_file(prefix='runtime', absolute=True)
def _load_file(self, prefix, absolute=False):
# Setup
found = "{0}_found".format(prefix)
path = "{0}_path".format(prefix)
data = prefix
# Short-circuit if loading appears to have occurred already
if getattr(self, found) is not None:
return
# Moar setup
if absolute:
absolute_path = getattr(self, path)
# None -> expected absolute path but none set, short circuit
if absolute_path is None:
return
paths = [absolute_path]
else:
path_prefix = getattr(self, "{0}_prefix".format(prefix))
# Short circuit if loading seems unnecessary (eg for project config
# files when not running out of a project)
if path_prefix is None:
return
paths = [
'.'.join((path_prefix, x))
for x in self.file_suffixes
]
# Poke 'em
for filepath in paths:
# Normalize
filepath = expanduser(filepath)
try:
try:
type_ = splitext(filepath)[1].lstrip('.')
loader = getattr(self, "_load_{0}".format(type_))
except AttributeError as e:
msg = "Config files of type {0!r} (from file {1!r}) are not supported! Please use one of: {2!r}"
raise UnknownFileType(msg.format(
type_, filepath, self.file_suffixes))
# Store data, the path it was found at, and fact that it was
# found
setattr(self, data, loader(filepath))
setattr(self, path, filepath)
setattr(self, found, True)
break
# Typically means 'no such file', so just note & skip past.
except IOError as e:
# TODO: is there a better / x-platform way to detect this?
if "No such file" in e.strerror:
err = "Didn't see any {0}, skipping."
debug(err.format(filepath))
else:
raise
# Still None -> no suffixed paths were found, record this fact
if getattr(self, path) is None:
setattr(self, found, False)
def merge(self):
"""
Merge all config sources, in order, to `.config`.
Does not imply loading of config files or environment variables; use
`.load_files` and/or `.load_shell_env` beforehand instead.
"""
debug("Merging config sources in order...")
debug("Defaults: {0!r}".format(self.defaults))
merge_dicts(self.config, self.defaults)
debug("Collection-driven: {0!r}".format(self.collection))
merge_dicts(self.config, self.collection)
self._merge_file('system', "System-wide")
self._merge_file('user', "Per-user")
self._merge_file('project', "Per-project")
debug("Environment variable config: {0!r}".format(self.env))
merge_dicts(self.config, self.env)
self._merge_file('runtime', "Runtime")
debug("Overrides: {0!r}".format(self.overrides))
merge_dicts(self.config, self.overrides)
def _merge_file(self, name, desc):
# Setup
desc += " config file" # yup
found = getattr(self, "{0}_found".format(name))
path = getattr(self, "{0}_path".format(name))
data = getattr(self, name)
# None -> no loading occurred yet
if found is None:
debug("{0} has not been loaded yet, skipping".format(desc))
# True -> hooray
elif found:
debug("{0} ({1}): {2!r}".format(desc, path, data))
merge_dicts(self.config, data)
# False -> did try, did not succeed
else:
# TODO: how to preserve what was tried for each case but only for
# the negative? Just a branch here based on 'name'?
debug("{0} not found, skipping".format(desc))
@property
def paths(self):
"""
An iterable of all successfully loaded config file paths.
No specific order.
"""
paths = []
for prefix in "system user project runtime".split():
value = getattr(self, "{0}_path".format(prefix))
if value is not None:
paths.append(value)
return paths
def _load_yaml(self, path):
with open(path) as fd:
return yaml.load(fd)
def _load_json(self, path):
with open(path) as fd:
return json.load(fd)
def _load_py(self, path):
data = {}
for key, value in six.iteritems(vars(imp.load_source('mod', path))):
if key.startswith('__'):
continue
data[key] = value
return data
def merge_dicts(base, updates):
"""
Recursively merge dict ``updates`` into dict ``base`` (mutating ``base``.)
* Values which are themselves dicts will be recursed into.
* Values which are a dict in one input and *not* a dict in the other input
(e.g. if our inputs were ``{'foo': 5}`` and ``{'foo': {'bar': 5}}``) are
irreconciliable and will generate an exception.
"""
for key, value in updates.items():
# Dict values whose keys also exist in 'base' -> recurse
# (But only if both types are dicts.)
if key in base:
if isinstance(value, dict):
if isinstance(base[key], dict):
merge_dicts(base[key], value)
else:
raise _merge_error(base[key], value)
else:
if isinstance(base[key], dict):
raise _merge_error(base[key], value)
else:
base[key] = value
# New values just get set straight
else:
base[key] = value
def _merge_error(orig, new_):
return AmbiguousMergeError("Can't cleanly merge {0} with {1}".format(
_format_mismatch(orig), _format_mismatch(new_)
))
def _format_mismatch(x):
return "{0} ({1!r})".format(type(x), x)
| bsd-2-clause |
gestrem/Arduino | arduino-core/src/processing/app/i18n/python/requests/packages/urllib3/_collections.py | 309 | 2903 | # urllib3/_collections.py
# Copyright 2008-2012 Andrey Petrov and contributors (see CONTRIBUTORS.txt)
#
# This module is part of urllib3 and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
from collections import MutableMapping
from threading import Lock
try: # Python 2.7+
from collections import OrderedDict
except ImportError:
from .packages.ordered_dict import OrderedDict
__all__ = ['RecentlyUsedContainer']
_Null = object()
class RecentlyUsedContainer(MutableMapping):
"""
Provides a thread-safe dict-like container which maintains up to
``maxsize`` keys while throwing away the least-recently-used keys beyond
``maxsize``.
:param maxsize:
Maximum number of recent elements to retain.
:param dispose_func:
Every time an item is evicted from the container,
``dispose_func(value)`` is called. Callback which will get called
"""
ContainerCls = OrderedDict
def __init__(self, maxsize=10, dispose_func=None):
self._maxsize = maxsize
self.dispose_func = dispose_func
self._container = self.ContainerCls()
self._lock = Lock()
def __getitem__(self, key):
# Re-insert the item, moving it to the end of the eviction line.
with self._lock:
item = self._container.pop(key)
self._container[key] = item
return item
def __setitem__(self, key, value):
evicted_value = _Null
with self._lock:
# Possibly evict the existing value of 'key'
evicted_value = self._container.get(key, _Null)
self._container[key] = value
# If we didn't evict an existing value, we might have to evict the
# least recently used item from the beginning of the container.
if len(self._container) > self._maxsize:
_key, evicted_value = self._container.popitem(last=False)
if self.dispose_func and evicted_value is not _Null:
self.dispose_func(evicted_value)
def __delitem__(self, key):
with self._lock:
value = self._container.pop(key)
if self.dispose_func:
self.dispose_func(value)
def __len__(self):
with self._lock:
return len(self._container)
def __iter__(self):
raise NotImplementedError('Iteration over this class is unlikely to be threadsafe.')
def clear(self):
with self._lock:
# Copy pointers to all values, then wipe the mapping
# under Python 2, this copies the list of values twice :-|
values = list(self._container.values())
self._container.clear()
if self.dispose_func:
for value in values:
self.dispose_func(value)
def keys(self):
with self._lock:
return self._container.keys()
| lgpl-2.1 |
Filechaser/nzbToMedia | libs/subliminal/providers/addic7ed.py | 25 | 11049 | # -*- coding: utf-8 -*-
import logging
import re
from babelfish import Language, language_converters
from guessit import guessit
from requests import Session
from . import ParserBeautifulSoup, Provider
from .. import __short_version__
from ..cache import SHOW_EXPIRATION_TIME, region
from ..exceptions import AuthenticationError, ConfigurationError, DownloadLimitExceeded, TooManyRequests
from ..score import get_equivalent_release_groups
from ..subtitle import Subtitle, fix_line_ending, guess_matches
from ..utils import sanitize, sanitize_release_group
from ..video import Episode
logger = logging.getLogger(__name__)
language_converters.register('addic7ed = subliminal.converters.addic7ed:Addic7edConverter')
#: Series header parsing regex
series_year_re = re.compile(r'^(?P<series>[ \w\'.:(),&!?-]+?)(?: \((?P<year>\d{4})\))?$')
class Addic7edSubtitle(Subtitle):
"""Addic7ed Subtitle."""
provider_name = 'addic7ed'
def __init__(self, language, hearing_impaired, page_link, series, season, episode, title, year, version,
download_link):
super(Addic7edSubtitle, self).__init__(language, hearing_impaired, page_link)
self.series = series
self.season = season
self.episode = episode
self.title = title
self.year = year
self.version = version
self.download_link = download_link
@property
def id(self):
return self.download_link
def get_matches(self, video):
matches = set()
# series
if video.series and sanitize(self.series) == sanitize(video.series):
matches.add('series')
# season
if video.season and self.season == video.season:
matches.add('season')
# episode
if video.episode and self.episode == video.episode:
matches.add('episode')
# title
if video.title and sanitize(self.title) == sanitize(video.title):
matches.add('title')
# year
if video.original_series and self.year is None or video.year and video.year == self.year:
matches.add('year')
# release_group
if (video.release_group and self.version and
any(r in sanitize_release_group(self.version)
for r in get_equivalent_release_groups(sanitize_release_group(video.release_group)))):
matches.add('release_group')
# resolution
if video.resolution and self.version and video.resolution in self.version.lower():
matches.add('resolution')
# format
if video.format and self.version and video.format.lower() in self.version.lower():
matches.add('format')
# other properties
matches |= guess_matches(video, guessit(self.version), partial=True)
return matches
class Addic7edProvider(Provider):
"""Addic7ed Provider."""
languages = {Language('por', 'BR')} | {Language(l) for l in [
'ara', 'aze', 'ben', 'bos', 'bul', 'cat', 'ces', 'dan', 'deu', 'ell', 'eng', 'eus', 'fas', 'fin', 'fra', 'glg',
'heb', 'hrv', 'hun', 'hye', 'ind', 'ita', 'jpn', 'kor', 'mkd', 'msa', 'nld', 'nor', 'pol', 'por', 'ron', 'rus',
'slk', 'slv', 'spa', 'sqi', 'srp', 'swe', 'tha', 'tur', 'ukr', 'vie', 'zho'
]}
video_types = (Episode,)
server_url = 'http://www.addic7ed.com/'
def __init__(self, username=None, password=None):
if username is not None and password is None or username is None and password is not None:
raise ConfigurationError('Username and password must be specified')
self.username = username
self.password = password
self.logged_in = False
def initialize(self):
self.session = Session()
self.session.headers['User-Agent'] = 'Subliminal/%s' % __short_version__
# login
if self.username is not None and self.password is not None:
logger.info('Logging in')
data = {'username': self.username, 'password': self.password, 'Submit': 'Log in'}
r = self.session.post(self.server_url + 'dologin.php', data, allow_redirects=False, timeout=10)
if r.status_code != 302:
raise AuthenticationError(self.username)
logger.debug('Logged in')
self.logged_in = True
def terminate(self):
# logout
if self.logged_in:
logger.info('Logging out')
r = self.session.get(self.server_url + 'logout.php', timeout=10)
r.raise_for_status()
logger.debug('Logged out')
self.logged_in = False
self.session.close()
@region.cache_on_arguments(expiration_time=SHOW_EXPIRATION_TIME)
def _get_show_ids(self):
"""Get the ``dict`` of show ids per series by querying the `shows.php` page.
:return: show id per series, lower case and without quotes.
:rtype: dict
"""
# get the show page
logger.info('Getting show ids')
r = self.session.get(self.server_url + 'shows.php', timeout=10)
r.raise_for_status()
soup = ParserBeautifulSoup(r.content, ['lxml', 'html.parser'])
# populate the show ids
show_ids = {}
for show in soup.select('td.version > h3 > a[href^="/show/"]'):
show_ids[sanitize(show.text)] = int(show['href'][6:])
logger.debug('Found %d show ids', len(show_ids))
return show_ids
@region.cache_on_arguments(expiration_time=SHOW_EXPIRATION_TIME)
def _search_show_id(self, series, year=None):
"""Search the show id from the `series` and `year`.
:param str series: series of the episode.
:param year: year of the series, if any.
:type year: int
:return: the show id, if found.
:rtype: int
"""
# addic7ed doesn't support search with quotes
series = series.replace('\'', ' ')
# build the params
series_year = '%s %d' % (series, year) if year is not None else series
params = {'search': series_year, 'Submit': 'Search'}
# make the search
logger.info('Searching show ids with %r', params)
r = self.session.get(self.server_url + 'search.php', params=params, timeout=10)
r.raise_for_status()
if r.status_code == 304:
raise TooManyRequests()
soup = ParserBeautifulSoup(r.content, ['lxml', 'html.parser'])
# get the suggestion
suggestion = soup.select('span.titulo > a[href^="/show/"]')
if not suggestion:
logger.warning('Show id not found: no suggestion')
return None
if not sanitize(suggestion[0].i.text.replace('\'', ' ')) == sanitize(series_year):
logger.warning('Show id not found: suggestion does not match')
return None
show_id = int(suggestion[0]['href'][6:])
logger.debug('Found show id %d', show_id)
return show_id
def get_show_id(self, series, year=None, country_code=None):
"""Get the best matching show id for `series`, `year` and `country_code`.
First search in the result of :meth:`_get_show_ids` and fallback on a search with :meth:`_search_show_id`.
:param str series: series of the episode.
:param year: year of the series, if any.
:type year: int
:param country_code: country code of the series, if any.
:type country_code: str
:return: the show id, if found.
:rtype: int
"""
series_sanitized = sanitize(series).lower()
show_ids = self._get_show_ids()
show_id = None
# attempt with country
if not show_id and country_code:
logger.debug('Getting show id with country')
show_id = show_ids.get('%s %s' % (series_sanitized, country_code.lower()))
# attempt with year
if not show_id and year:
logger.debug('Getting show id with year')
show_id = show_ids.get('%s %d' % (series_sanitized, year))
# attempt clean
if not show_id:
logger.debug('Getting show id')
show_id = show_ids.get(series_sanitized)
# search as last resort
if not show_id:
logger.warning('Series not found in show ids')
show_id = self._search_show_id(series)
return show_id
def query(self, series, season, year=None, country=None):
# get the show id
show_id = self.get_show_id(series, year, country)
if show_id is None:
logger.error('No show id found for %r (%r)', series, {'year': year, 'country': country})
return []
# get the page of the season of the show
logger.info('Getting the page of show id %d, season %d', show_id, season)
r = self.session.get(self.server_url + 'show/%d' % show_id, params={'season': season}, timeout=10)
r.raise_for_status()
if r.status_code == 304:
raise TooManyRequests()
soup = ParserBeautifulSoup(r.content, ['lxml', 'html.parser'])
# loop over subtitle rows
match = series_year_re.match(soup.select('#header font')[0].text.strip()[:-10])
series = match.group('series')
year = int(match.group('year')) if match.group('year') else None
subtitles = []
for row in soup.select('tr.epeven'):
cells = row('td')
# ignore incomplete subtitles
status = cells[5].text
if status != 'Completed':
logger.debug('Ignoring subtitle with status %s', status)
continue
# read the item
language = Language.fromaddic7ed(cells[3].text)
hearing_impaired = bool(cells[6].text)
page_link = self.server_url + cells[2].a['href'][1:]
season = int(cells[0].text)
episode = int(cells[1].text)
title = cells[2].text
version = cells[4].text
download_link = cells[9].a['href'][1:]
subtitle = Addic7edSubtitle(language, hearing_impaired, page_link, series, season, episode, title, year,
version, download_link)
logger.debug('Found subtitle %r', subtitle)
subtitles.append(subtitle)
return subtitles
def list_subtitles(self, video, languages):
return [s for s in self.query(video.series, video.season, video.year)
if s.language in languages and s.episode == video.episode]
def download_subtitle(self, subtitle):
# download the subtitle
logger.info('Downloading subtitle %r', subtitle)
r = self.session.get(self.server_url + subtitle.download_link, headers={'Referer': subtitle.page_link},
timeout=10)
r.raise_for_status()
# detect download limit exceeded
if r.headers['Content-Type'] == 'text/html':
raise DownloadLimitExceeded
subtitle.content = fix_line_ending(r.content)
| gpl-3.0 |
sylvestre/bedrock | bin/run-db-download.py | 5 | 2276 | #!/usr/bin/env python
import os
import sys
import requests
from db_s3_utils import (
get_db_checksum,
get_git_sha,
get_prev_db_data,
set_db_data,
JSON_DATA_FILE,
DB_FILE,
)
BUCKET_NAME = os.getenv('AWS_DB_S3_BUCKET', 'bedrock-db-dev')
REGION_NAME = os.getenv('AWS_DB_REGION', 'us-west-2')
S3_BASE_URL = 'https://s3-{}.amazonaws.com/{}'.format(
REGION_NAME,
BUCKET_NAME,
)
def get_file_url(filename):
return '/'.join([S3_BASE_URL, filename])
def download_db_info():
try:
resp = requests.get(get_file_url(JSON_DATA_FILE))
resp.raise_for_status()
except requests.RequestException:
return None
try:
return resp.json()
except ValueError:
# not JSON
return None
def download_db_file(filename):
resp = requests.get(get_file_url(filename), stream=True)
with open(filename, 'wb') as fp:
for chunk in resp.iter_content(chunk_size=128):
fp.write(chunk)
def update_live_db_file(filename):
os.rename(filename, DB_FILE)
def main(args):
force = '--force' in args
ignore_git = '--ignore-git' in args
db_info = download_db_info()
if not db_info:
return 'ERROR: Could not get database info'
if not force:
prev_data = get_prev_db_data()
if prev_data and prev_data['checksum'] == db_info['checksum']:
print('Checksums match. No update required.')
return 0
if prev_data and prev_data['updated'] > db_info['updated']:
print('Remote database older than local. No update required.')
return 0
if not ignore_git:
git_sha = get_git_sha()
if git_sha != db_info['git_sha']:
print('Git hashes do not match. No update required.')
return 0
new_db_file = db_info['file_name']
download_db_file(new_db_file)
checksum = get_db_checksum(new_db_file)
if checksum == db_info['checksum']:
update_live_db_file(new_db_file)
set_db_data(db_info)
print('Database successfully updated')
return 0
os.remove(new_db_file)
return 'ERROR: Checksums do not match. Bad db download. Aborting.'
if __name__ == '__main__':
sys.exit(main(sys.argv[1:]))
| mpl-2.0 |
nccgroup/umap | USBEndpoint.py | 1 | 3016 | # USBEndpoint.py
#
# Contains class definition for USBEndpoint.
class USBEndpoint:
direction_out = 0x00
direction_in = 0x01
transfer_type_control = 0x00
transfer_type_isochronous = 0x01
transfer_type_bulk = 0x02
transfer_type_interrupt = 0x03
sync_type_none = 0x00
sync_type_async = 0x01
sync_type_adaptive = 0x02
sync_type_synchronous = 0x03
usage_type_data = 0x00
usage_type_feedback = 0x01
usage_type_implicit_feedback = 0x02
def __init__(self, maxusb_app, number, direction, transfer_type, sync_type,
usage_type, max_packet_size, interval, handler):
self.maxusb_app = maxusb_app
self.number = number
self.direction = direction
self.transfer_type = transfer_type
self.sync_type = sync_type
self.usage_type = usage_type
self.max_packet_size = max_packet_size
self.interval = interval
self.handler = handler
self.interface = None
self.request_handlers = {
1 : self.handle_clear_feature_request
}
def handle_clear_feature_request(self, req):
if self.maxusb_app.mode != 2:
#print("received CLEAR_FEATURE request for endpoint", self.number,
# "with value", req.value)
self.interface.configuration.device.maxusb_app.send_on_endpoint(0, b'')
def set_interface(self, interface):
self.interface = interface
# see Table 9-13 of USB 2.0 spec (pdf page 297)
def get_descriptor(self):
address = (self.number & 0x0f) | (self.direction << 7)
attributes = (self.transfer_type & 0x03) \
| ((self.sync_type & 0x03) << 2) \
| ((self.usage_type & 0x03) << 4)
if self.maxusb_app.testcase[1] == "end_bLength":
bLength = self.maxusb_app.testcase[2]
else:
bLength = 7
if self.maxusb_app.testcase[1] == "end_bDescriptorType":
bDescriptorType = self.maxusb_app.testcase[2]
else:
bDescriptorType = 5
if self.maxusb_app.testcase[1] == "end_bEndpointAddress":
bEndpointAddress = self.maxusb_app.testcase[2]
else:
bEndpointAddress = address
if self.maxusb_app.testcase[1] == "end_wMaxPacketSize":
wMaxPacketSize = self.maxusb_app.testcase[2]
else:
wMaxPacketSize = self.max_packet_size
d = bytearray([
bLength, # length of descriptor in bytes
bDescriptorType, # descriptor type 5 == endpoint
bEndpointAddress,
attributes,
(wMaxPacketSize >> 8) & 0xff,
wMaxPacketSize & 0xff,
self.interval
])
return d
| agpl-3.0 |
eevol/django-cities-light | cities_light/south_migrations/0008_add_region_geoname_id.py | 2 | 4208 | # -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'Region.geoname_id'
db.add_column('cities_light_region', 'geoname_id',
self.gf('django.db.models.fields.IntegerField')(null=True, blank=True),
keep_default=False)
def backwards(self, orm):
# Deleting field 'Region.geoname_id'
db.delete_column('cities_light_region', 'geoname_id')
models = {
'cities_light.city': {
'Meta': {'unique_together': "(('region', 'name'),)", 'object_name': 'City'},
'alternate_names': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'country': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cities_light.Country']"}),
'geoname_id': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'latitude': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '8', 'decimal_places': '5', 'blank': 'True'}),
'longitude': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '8', 'decimal_places': '5', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200', 'db_index': 'True'}),
'name_ascii': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '200', 'blank': 'True'}),
'region': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cities_light.Region']", 'null': 'True'}),
'search_names': ('django.db.models.fields.TextField', [], {'default': "''", 'max_length': '4000', 'db_index': 'True', 'blank': 'True'}),
'slug': ('autoslug.fields.AutoSlugField', [], {'unique_with': '()', 'max_length': '50', 'populate_from': 'None'})
},
'cities_light.country': {
'Meta': {'object_name': 'Country'},
'code2': ('django.db.models.fields.CharField', [], {'max_length': '2', 'unique': 'True', 'null': 'True', 'blank': 'True'}),
'code3': ('django.db.models.fields.CharField', [], {'max_length': '3', 'unique': 'True', 'null': 'True', 'blank': 'True'}),
'continent': ('django.db.models.fields.CharField', [], {'max_length': '2', 'db_index': 'True'}),
'geoname_id': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '200'}),
'name_ascii': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '200', 'blank': 'True'}),
'slug': ('autoslug.fields.AutoSlugField', [], {'unique_with': '()', 'max_length': '50', 'populate_from': 'None'}),
'tld': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '5', 'blank': 'True'})
},
'cities_light.region': {
'Meta': {'unique_together': "(('country', 'name'),)", 'object_name': 'Region'},
'country': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cities_light.Country']"}),
'geoname_code': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '50', 'null': 'True', 'blank': 'True'}),
'geoname_id': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200', 'db_index': 'True'}),
'name_ascii': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '200', 'blank': 'True'}),
'slug': ('autoslug.fields.AutoSlugField', [], {'unique_with': '()', 'max_length': '50', 'populate_from': 'None'})
}
}
complete_apps = ['cities_light'] | mit |
wikimedia/operations-debs-python-kafka | test/record/test_util.py | 8 | 3754 | import struct
import pytest
from kafka.record import util
varint_data = [
(b"\x00", 0),
(b"\x01", -1),
(b"\x02", 1),
(b"\x7E", 63),
(b"\x7F", -64),
(b"\x80\x01", 64),
(b"\x81\x01", -65),
(b"\xFE\x7F", 8191),
(b"\xFF\x7F", -8192),
(b"\x80\x80\x01", 8192),
(b"\x81\x80\x01", -8193),
(b"\xFE\xFF\x7F", 1048575),
(b"\xFF\xFF\x7F", -1048576),
(b"\x80\x80\x80\x01", 1048576),
(b"\x81\x80\x80\x01", -1048577),
(b"\xFE\xFF\xFF\x7F", 134217727),
(b"\xFF\xFF\xFF\x7F", -134217728),
(b"\x80\x80\x80\x80\x01", 134217728),
(b"\x81\x80\x80\x80\x01", -134217729),
(b"\xFE\xFF\xFF\xFF\x7F", 17179869183),
(b"\xFF\xFF\xFF\xFF\x7F", -17179869184),
(b"\x80\x80\x80\x80\x80\x01", 17179869184),
(b"\x81\x80\x80\x80\x80\x01", -17179869185),
(b"\xFE\xFF\xFF\xFF\xFF\x7F", 2199023255551),
(b"\xFF\xFF\xFF\xFF\xFF\x7F", -2199023255552),
(b"\x80\x80\x80\x80\x80\x80\x01", 2199023255552),
(b"\x81\x80\x80\x80\x80\x80\x01", -2199023255553),
(b"\xFE\xFF\xFF\xFF\xFF\xFF\x7F", 281474976710655),
(b"\xFF\xFF\xFF\xFF\xFF\xFF\x7F", -281474976710656),
(b"\x80\x80\x80\x80\x80\x80\x80\x01", 281474976710656),
(b"\x81\x80\x80\x80\x80\x80\x80\x01", -281474976710657),
(b"\xFE\xFF\xFF\xFF\xFF\xFF\xFF\x7F", 36028797018963967),
(b"\xFF\xFF\xFF\xFF\xFF\xFF\xFF\x7F", -36028797018963968),
(b"\x80\x80\x80\x80\x80\x80\x80\x80\x01", 36028797018963968),
(b"\x81\x80\x80\x80\x80\x80\x80\x80\x01", -36028797018963969),
(b"\xFE\xFF\xFF\xFF\xFF\xFF\xFF\xFF\x7F", 4611686018427387903),
(b"\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\x7F", -4611686018427387904),
(b"\x80\x80\x80\x80\x80\x80\x80\x80\x80\x01", 4611686018427387904),
(b"\x81\x80\x80\x80\x80\x80\x80\x80\x80\x01", -4611686018427387905),
]
@pytest.mark.parametrize("encoded, decoded", varint_data)
def test_encode_varint(encoded, decoded):
res = bytearray()
util.encode_varint(decoded, res.append)
assert res == encoded
@pytest.mark.parametrize("encoded, decoded", varint_data)
def test_decode_varint(encoded, decoded):
# We add a bit of bytes around just to check position is calculated
# correctly
value, pos = util.decode_varint(
bytearray(b"\x01\xf0" + encoded + b"\xff\x01"), 2)
assert value == decoded
assert pos - 2 == len(encoded)
@pytest.mark.parametrize("encoded, decoded", varint_data)
def test_size_of_varint(encoded, decoded):
assert util.size_of_varint(decoded) == len(encoded)
@pytest.mark.parametrize("crc32_func", [util.crc32c_c, util.crc32c_py])
def test_crc32c(crc32_func):
def make_crc(data):
crc = crc32_func(data)
return struct.pack(">I", crc)
assert make_crc(b"") == b"\x00\x00\x00\x00"
assert make_crc(b"a") == b"\xc1\xd0\x43\x30"
# Took from librdkafka testcase
long_text = b"""\
This software is provided 'as-is', without any express or implied
warranty. In no event will the author be held liable for any damages
arising from the use of this software.
Permission is granted to anyone to use this software for any purpose,
including commercial applications, and to alter it and redistribute it
freely, subject to the following restrictions:
1. The origin of this software must not be misrepresented; you must not
claim that you wrote the original software. If you use this software
in a product, an acknowledgment in the product documentation would be
appreciated but is not required.
2. Altered source versions must be plainly marked as such, and must not be
misrepresented as being the original software.
3. This notice may not be removed or altered from any source distribution."""
assert make_crc(long_text) == b"\x7d\xcd\xe1\x13"
| apache-2.0 |
tumbl3w33d/ansible | lib/ansible/modules/storage/purestorage/purefa_info.py | 13 | 38316 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2019, Simon Dodsley (simon@purestorage.com)
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = r'''
---
module: purefa_info
version_added: '2.9'
short_description: Collect information from Pure Storage FlashArray
description:
- Collect information from a Pure Storage Flasharray running the
Purity//FA operating system. By default, the module will collect basic
information including hosts, host groups, protection
groups and volume counts. Additional information can be collected
based on the configured set of arguments.
author:
- Pure Storage ansible Team (@sdodsley) <pure-ansible-team@purestorage.com>
options:
gather_subset:
description:
- When supplied, this argument will define the information to be collected.
Possible values for this include all, minimum, config, performance,
capacity, network, subnet, interfaces, hgroups, pgroups, hosts,
admins, volumes, snapshots, pods, vgroups, offload, apps, arrays,
certs and kmip.
type: list
required: false
default: minimum
extends_documentation_fragment:
- purestorage.fa
'''
EXAMPLES = r'''
- name: collect default set of information
purefa_info:
fa_url: 10.10.10.2
api_token: e31060a7-21fc-e277-6240-25983c6c4592
register: array_info
- name: show default information
debug:
msg: "{{ array_info['purefa_info']['default'] }}"
- name: collect configuration and capacity information
purefa_info:
gather_subset:
- config
fa_url: 10.10.10.2
api_token: e31060a7-21fc-e277-6240-25983c6c4592
register: array_info
- name: show configuration information
debug:
msg: "{{ array_info['purefa_info']['config'] }}"
- name: collect all information
purefa_info:
gather_subset:
- all
fa_url: 10.10.10.2
api_token: e31060a7-21fc-e277-6240-25983c6c4592
- name: show all information
debug:
msg: "{{ array_info['purefa_info'] }}"
'''
RETURN = r'''
purefa_info:
description: Returns the information collected from the FlashArray
returned: always
type: complex
sample: {
"admins": {
"pureuser": {
"role": "array_admin",
"type": "local"
}
},
"apps": {
"offload": {
"description": "Snapshot offload to NFS or Amazon S3",
"status": "healthy",
"version": "5.2.1"
}
},
"arrays": {},
"capacity": {
"data_reduction": 11.664774599686346,
"free_space": 6995782867042,
"provisioned_space": 442391871488,
"shared_space": 3070918120,
"snapshot_space": 284597118,
"system_space": 0,
"thin_provisioning": 0.8201773449669771,
"total_capacity": 7002920315199,
"total_reduction": 64.86821472825108,
"volume_space": 3781932919
},
"config": {
"directory_service": {
"base_dn": null,
"bind_password": null,
"bind_user": null,
"check_peer": false,
"enabled": false,
"uri": [],
"user_login_attribute": null,
"user_object_class": null
},
"directory_service_roles": {
"array_admin": {
"group": null,
"group_base": null
},
"ops_admin": {
"group": null,
"group_base": null
},
"readonly": {
"group": null,
"group_base": null
},
"storage_admin": {
"group": null,
"group_base": null
}
},
"dns": {
"domain": "acme.com",
"nameservers": [
"8.8.4.4"
]
},
"global_admin": {
"lockout_duration": null,
"max_login_attempts": null,
"min_password_length": 1,
"single_sign_on_enabled": false
},
"idle_timeout": 0,
"ntp": [
"prod-ntp1.puretec.purestorage.com"
],
"phonehome": "enabled",
"proxy": "",
"relayhost": "smtp.puretec.purestorage.com",
"scsi_timeout": 60,
"senderdomain": "purestorage.com",
"smtp": [
{
"enabled": true,
"name": "flasharray-alerts@purestorage.com"
}
],
"snmp": [
{
"auth_passphrase": null,
"auth_protocol": null,
"community": "****",
"host": "10.21.23.34",
"name": "manager1",
"notification": "trap",
"privacy_passphrase": null,
"privacy_protocol": null,
"user": null,
"version": "v2c"
}
],
"syslog": [
"udp://prod-ntp2.puretec.purestorage.com:333"
]
},
"default": {
"admins": 1,
"array_model": "FA-405",
"array_name": "array",
"connected_arrays": 0,
"connection_key": "c6033033-fe69-2515-a9e8-966bb7fe4b40",
"hostgroups": 0,
"hosts": 15,
"pods": 1,
"protection_groups": 1,
"purity_version": "5.2.1",
"snapshots": 2,
"volume_groups": 1
},
"hgroups": {},
"hosts": {
"@offload": {
"hgroup": null,
"iqn": [],
"nqn": [],
"personality": null,
"preferred_array": [],
"target_port": [],
"wwn": []
},
"docker-host": {
"hgroup": null,
"iqn": [
"iqn.1994-05.com.redhat:d97adf78472"
],
"nqn": [],
"personality": null,
"preferred_array": [],
"target_port": [
"CT0.ETH4",
"CT1.ETH4"
],
"wwn": []
}
},
"interfaces": {
"CT0.ETH4": "iqn.2010-06.com.purestorage:flasharray.2111b767484e4682",
"CT1.ETH4": "iqn.2010-06.com.purestorage:flasharray.2111b767484e4682",
},
"network": {
"@offload.data0": {
"address": "10.21.200.222",
"gateway": "10.21.200.1",
"hwaddr": "52:54:30:02:b9:4e",
"mtu": 1500,
"netmask": "255.255.255.0",
"services": [
"app"
],
"speed": 10000000000
},
"ct0.eth0": {
"address": "10.21.200.211",
"gateway": "10.21.200.1",
"hwaddr": "ec:f4:bb:c8:8a:04",
"mtu": 1500,
"netmask": "255.255.255.0",
"services": [
"management"
],
"speed": 1000000000
},
"ct0.eth2": {
"address": "10.21.200.218",
"gateway": null,
"hwaddr": "ec:f4:bb:c8:8a:00",
"mtu": 1500,
"netmask": "255.255.255.0",
"services": [
"replication"
],
"speed": 10000000000
},
"ct0.eth4": {
"address": "10.21.200.214",
"gateway": null,
"hwaddr": "90:e2:ba:83:79:0c",
"mtu": 1500,
"netmask": "255.255.255.0",
"services": [
"iscsi"
],
"speed": 10000000000
},
"ct1.eth0": {
"address": "10.21.200.212",
"gateway": "10.21.200.1",
"hwaddr": "ec:f4:bb:e4:c6:3c",
"mtu": 1500,
"netmask": "255.255.255.0",
"services": [
"management"
],
"speed": 1000000000
},
"ct1.eth2": {
"address": "10.21.200.220",
"gateway": null,
"hwaddr": "ec:f4:bb:e4:c6:38",
"mtu": 1500,
"netmask": "255.255.255.0",
"services": [
"replication"
],
"speed": 10000000000
},
"ct1.eth4": {
"address": "10.21.200.216",
"gateway": null,
"hwaddr": "90:e2:ba:8b:b1:8c",
"mtu": 1500,
"netmask": "255.255.255.0",
"services": [
"iscsi"
],
"speed": 10000000000
},
"vir0": {
"address": "10.21.200.210",
"gateway": "10.21.200.1",
"hwaddr": "fe:ba:e9:e7:6b:0f",
"mtu": 1500,
"netmask": "255.255.255.0",
"services": [
"management"
],
"speed": 1000000000
}
},
"nfs_offload": {},
"performance": {
"input_per_sec": 0,
"local_queue_usec_per_op": 0,
"output_per_sec": 0,
"qos_rate_limit_usec_per_read_op": 0,
"qos_rate_limit_usec_per_write_op": 0,
"queue_depth": 0,
"queue_usec_per_read_op": 0,
"queue_usec_per_write_op": 0,
"reads_per_sec": 0,
"san_usec_per_read_op": 0,
"san_usec_per_write_op": 0,
"time": "2019-08-14T21:33:51Z",
"usec_per_read_op": 0,
"usec_per_write_op": 0,
"writes_per_sec": 0
},
"pgroups": {
"test_pg": {
"hgroups": null,
"hosts": null,
"source": "docker-host",
"targets": null,
"volumes": null
}
},
"pods": {
"test": {
"arrays": [
{
"array_id": "043be47c-1233-4399-b9d6-8fe38727dd9d",
"mediator_status": "online",
"name": "array2",
"status": "online"
}
],
"source": null
}
},
"s3_offload": {
"s3-offload": {
"access_key_id": "AKIAILNVEPWZTV4FGWZQ",
"bucket": "offload-bucket",
"protocol": "s3",
"status": "connected"
}
},
"snapshots": {
"@offload_boot.1": {
"created": "2019-03-14T15:29:20Z",
"size": 68719476736,
"source": "@offload_boot"
}
},
"subnet": {},
"vgroups": {
"test": {
"volumes": [
"test/test",
"test/test1"
]
}
},
"volumes": {
"@offload_boot": {
"bandwidth": null,
"hosts": [
[
"@offload",
1
]
],
"serial": "43BE47C12334399B00013959",
"size": 68719476736,
"source": null
},
"docker-store": {
"bandwidth": null,
"hosts": [
[
"docker-host",
1
]
],
"serial": "43BE47C12334399B00011418",
"size": 21474836480,
"source": null
}
}
}
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.pure import get_system, purefa_argument_spec
import time
ADMIN_API_VERSION = '1.14'
S3_REQUIRED_API_VERSION = '1.16'
LATENCY_REQUIRED_API_VERSION = '1.16'
AC_REQUIRED_API_VERSION = '1.14'
CAP_REQUIRED_API_VERSION = '1.6'
SAN_REQUIRED_API_VERSION = '1.10'
NVME_API_VERSION = '1.16'
PREFERRED_API_VERSION = '1.15'
P53_API_VERSION = '1.17'
def generate_default_dict(array):
default_info = {}
defaults = array.get()
api_version = array._list_available_rest_versions()
if AC_REQUIRED_API_VERSION in api_version:
default_info['volume_groups'] = len(array.list_vgroups())
default_info['connected_arrays'] = len(array.list_array_connections())
default_info['pods'] = len(array.list_pods())
default_info['connection_key'] = array.get(connection_key=True)['connection_key']
hosts = array.list_hosts()
admins = array.list_admins()
snaps = array.list_volumes(snap=True, pending=True)
pgroups = array.list_pgroups(pending=True)
hgroups = array.list_hgroups()
# Old FA arrays only report model from the primary controller
ct0_model = array.get_hardware('CT0')['model']
if ct0_model:
model = ct0_model
else:
ct1_model = array.get_hardware('CT1')['model']
model = ct1_model
default_info['array_model'] = model
default_info['array_name'] = defaults['array_name']
default_info['purity_version'] = defaults['version']
default_info['hosts'] = len(hosts)
default_info['snapshots'] = len(snaps)
default_info['protection_groups'] = len(pgroups)
default_info['hostgroups'] = len(hgroups)
default_info['admins'] = len(admins)
if P53_API_VERSION in api_version:
default_info['maintenance_window'] = array.list_maintenance_windows()
return default_info
def generate_perf_dict(array):
perf_info = {}
api_version = array._list_available_rest_versions()
if LATENCY_REQUIRED_API_VERSION in api_version:
latency_info = array.get(action='monitor', latency=True)[0]
perf_info = array.get(action='monitor')[0]
# IOPS
perf_info['writes_per_sec'] = perf_info['writes_per_sec']
perf_info['reads_per_sec'] = perf_info['reads_per_sec']
# Bandwidth
perf_info['input_per_sec'] = perf_info['input_per_sec']
perf_info['output_per_sec'] = perf_info['output_per_sec']
# Latency
if LATENCY_REQUIRED_API_VERSION in api_version:
perf_info['san_usec_per_read_op'] = latency_info['san_usec_per_read_op']
perf_info['san_usec_per_write_op'] = latency_info['san_usec_per_write_op']
perf_info['queue_usec_per_read_op'] = latency_info['queue_usec_per_read_op']
perf_info['queue_usec_per_write_op'] = latency_info['queue_usec_per_write_op']
perf_info['qos_rate_limit_usec_per_read_op'] = latency_info['qos_rate_limit_usec_per_read_op']
perf_info['qos_rate_limit_usec_per_write_op'] = latency_info['qos_rate_limit_usec_per_write_op']
perf_info['local_queue_usec_per_op'] = perf_info['local_queue_usec_per_op']
perf_info['usec_per_read_op'] = perf_info['usec_per_read_op']
perf_info['usec_per_write_op'] = perf_info['usec_per_write_op']
perf_info['queue_depth'] = perf_info['queue_depth']
return perf_info
def generate_config_dict(array):
config_info = {}
api_version = array._list_available_rest_versions()
# DNS
config_info['dns'] = array.get_dns()
# SMTP
config_info['smtp'] = array.list_alert_recipients()
# SNMP
config_info['snmp'] = array.list_snmp_managers()
config_info['snmp_v3_engine_id'] = array.get_snmp_engine_id()['engine_id']
# DS
config_info['directory_service'] = array.get_directory_service()
if S3_REQUIRED_API_VERSION in api_version:
config_info['directory_service_roles'] = {}
roles = array.list_directory_service_roles()
for role in range(0, len(roles)):
role_name = roles[role]['name']
config_info['directory_service_roles'][role_name] = {
'group': roles[role]['group'],
'group_base': roles[role]['group_base'],
}
else:
config_info['directory_service'].update(array.get_directory_service(groups=True))
# NTP
config_info['ntp'] = array.get(ntpserver=True)['ntpserver']
# SYSLOG
config_info['syslog'] = array.get(syslogserver=True)['syslogserver']
# Phonehome
config_info['phonehome'] = array.get(phonehome=True)['phonehome']
# Proxy
config_info['proxy'] = array.get(proxy=True)['proxy']
# Relay Host
config_info['relayhost'] = array.get(relayhost=True)['relayhost']
# Sender Domain
config_info['senderdomain'] = array.get(senderdomain=True)['senderdomain']
# SYSLOG
config_info['syslog'] = array.get(syslogserver=True)['syslogserver']
# Idle Timeout
config_info['idle_timeout'] = array.get(idle_timeout=True)['idle_timeout']
# SCSI Timeout
config_info['scsi_timeout'] = array.get(scsi_timeout=True)['scsi_timeout']
# Global Admin settings
if S3_REQUIRED_API_VERSION in api_version:
config_info['global_admin'] = array.get_global_admin_attributes()
return config_info
def generate_admin_dict(array):
admin_info = {}
api_version = array._list_available_rest_versions()
if ADMIN_API_VERSION in api_version:
admins = array.list_admins()
for admin in range(0, len(admins)):
admin_name = admins[admin]['name']
admin_info[admin_name] = {
'type': admins[admin]['type'],
'role': admins[admin]['role'],
}
return admin_info
def generate_subnet_dict(array):
sub_info = {}
subnets = array.list_subnets()
for sub in range(0, len(subnets)):
sub_name = subnets[sub]['name']
if subnets[sub]['enabled']:
sub_info[sub_name] = {
'gateway': subnets[sub]['gateway'],
'mtu': subnets[sub]['mtu'],
'vlan': subnets[sub]['vlan'],
'prefix': subnets[sub]['prefix'],
'interfaces': subnets[sub]['interfaces'],
'services': subnets[sub]['services'],
}
return sub_info
def generate_network_dict(array):
net_info = {}
ports = array.list_network_interfaces()
for port in range(0, len(ports)):
int_name = ports[port]['name']
net_info[int_name] = {
'hwaddr': ports[port]['hwaddr'],
'mtu': ports[port]['mtu'],
'enabled': ports[port]['enabled'],
'speed': ports[port]['speed'],
'address': ports[port]['address'],
'slaves': ports[port]['slaves'],
'services': ports[port]['services'],
'gateway': ports[port]['gateway'],
'netmask': ports[port]['netmask'],
}
if ports[port]['subnet']:
subnets = array.get_subnet(ports[port]['subnet'])
if subnets['enabled']:
net_info[int_name]['subnet'] = {
'name': subnets['name'],
'prefix': subnets['prefix'],
'vlan': subnets['vlan'],
}
return net_info
def generate_capacity_dict(array):
capacity_info = {}
api_version = array._list_available_rest_versions()
if CAP_REQUIRED_API_VERSION in api_version:
volumes = array.list_volumes(pending=True)
capacity_info['provisioned_space'] = sum(item['size'] for item in volumes)
capacity = array.get(space=True)
total_capacity = capacity[0]['capacity']
used_space = capacity[0]["total"]
capacity_info['free_space'] = total_capacity - used_space
capacity_info['total_capacity'] = total_capacity
capacity_info['data_reduction'] = capacity[0]['data_reduction']
capacity_info['system_space'] = capacity[0]['system']
capacity_info['volume_space'] = capacity[0]['volumes']
capacity_info['shared_space'] = capacity[0]['shared_space']
capacity_info['snapshot_space'] = capacity[0]['snapshots']
capacity_info['thin_provisioning'] = capacity[0]['thin_provisioning']
capacity_info['total_reduction'] = capacity[0]['total_reduction']
return capacity_info
def generate_snap_dict(array):
snap_info = {}
snaps = array.list_volumes(snap=True)
for snap in range(0, len(snaps)):
snapshot = snaps[snap]['name']
snap_info[snapshot] = {
'size': snaps[snap]['size'],
'source': snaps[snap]['source'],
'created': snaps[snap]['created'],
}
return snap_info
def generate_vol_dict(array):
volume_info = {}
vols = array.list_volumes()
for vol in range(0, len(vols)):
volume = vols[vol]['name']
volume_info[volume] = {
'source': vols[vol]['source'],
'size': vols[vol]['size'],
'serial': vols[vol]['serial'],
'hosts': [],
'bandwidth': ""
}
api_version = array._list_available_rest_versions()
if AC_REQUIRED_API_VERSION in api_version:
qvols = array.list_volumes(qos=True)
for qvol in range(0, len(qvols)):
volume = qvols[qvol]['name']
qos = qvols[qvol]['bandwidth_limit']
volume_info[volume]['bandwidth'] = qos
if P53_API_VERSION in api_version:
iops = qvols[qvol]['iops_limit']
volume_info[volume]['iops_limit'] = iops
vvols = array.list_volumes(protocol_endpoint=True)
for vvol in range(0, len(vvols)):
volume = vvols[vvol]['name']
volume_info[volume] = {
'source': vvols[vvol]['source'],
'serial': vvols[vvol]['serial'],
'hosts': []
}
if P53_API_VERSION in array._list_available_rest_versions():
pe_e2ees = array.list_volumes(protocol_endpoint=True, host_encryption_key=True)
for pe_e2ee in range(0, len(pe_e2ees)):
volume = pe_e2ees[pe_e2ee]['name']
volume_info[volume]['host_encryption_key_status'] = pe_e2ees[pe_e2ee]['host_encryption_key_status']
if P53_API_VERSION in array._list_available_rest_versions():
e2ees = array.list_volumes(host_encryption_key=True)
for e2ee in range(0, len(e2ees)):
volume = e2ees[e2ee]['name']
volume_info[volume]['host_encryption_key_status'] = e2ees[e2ee]['host_encryption_key_status']
cvols = array.list_volumes(connect=True)
for cvol in range(0, len(cvols)):
volume = cvols[cvol]['name']
voldict = {'host': cvols[cvol]['host'], 'lun': cvols[cvol]['lun']}
volume_info[volume]['hosts'].append(voldict)
return volume_info
def generate_host_dict(array):
api_version = array._list_available_rest_versions()
host_info = {}
hosts = array.list_hosts()
for host in range(0, len(hosts)):
hostname = hosts[host]['name']
tports = []
host_all_info = array.get_host(hostname, all=True)
if host_all_info:
tports = host_all_info[0]['target_port']
host_info[hostname] = {
'hgroup': hosts[host]['hgroup'],
'iqn': hosts[host]['iqn'],
'wwn': hosts[host]['wwn'],
'personality': array.get_host(hostname,
personality=True)['personality'],
'target_port': tports
}
if NVME_API_VERSION in api_version:
host_info[hostname]['nqn'] = hosts[host]['nqn']
if PREFERRED_API_VERSION in api_version:
hosts = array.list_hosts(preferred_array=True)
for host in range(0, len(hosts)):
hostname = hosts[host]['name']
host_info[hostname]['preferred_array'] = hosts[host]['preferred_array']
return host_info
def generate_pgroups_dict(array):
pgroups_info = {}
pgroups = array.list_pgroups()
for pgroup in range(0, len(pgroups)):
protgroup = pgroups[pgroup]['name']
pgroups_info[protgroup] = {
'hgroups': pgroups[pgroup]['hgroups'],
'hosts': pgroups[pgroup]['hosts'],
'source': pgroups[pgroup]['source'],
'targets': pgroups[pgroup]['targets'],
'volumes': pgroups[pgroup]['volumes'],
}
prot_sched = array.get_pgroup(protgroup, schedule=True)
prot_reten = array.get_pgroup(protgroup, retention=True)
if prot_sched['snap_enabled'] or prot_sched['replicate_enabled']:
pgroups_info[protgroup]['snap_freqyency'] = prot_sched['snap_frequency']
pgroups_info[protgroup]['replicate_freqyency'] = prot_sched['replicate_frequency']
pgroups_info[protgroup]['snap_enabled'] = prot_sched['snap_enabled']
pgroups_info[protgroup]['replicate_enabled'] = prot_sched['replicate_enabled']
pgroups_info[protgroup]['snap_at'] = prot_sched['snap_at']
pgroups_info[protgroup]['replicate_at'] = prot_sched['replicate_at']
pgroups_info[protgroup]['replicate_blackout'] = prot_sched['replicate_blackout']
pgroups_info[protgroup]['per_day'] = prot_reten['per_day']
pgroups_info[protgroup]['target_per_day'] = prot_reten['target_per_day']
pgroups_info[protgroup]['target_days'] = prot_reten['target_days']
pgroups_info[protgroup]['days'] = prot_reten['days']
pgroups_info[protgroup]['all_for'] = prot_reten['all_for']
pgroups_info[protgroup]['target_all_for'] = prot_reten['target_all_for']
if ":" in protgroup:
snap_transfers = array.get_pgroup(protgroup, snap=True, transfer=True)
pgroups_info[protgroup]['snaps'] = {}
for snap_transfer in range(0, len(snap_transfers)):
snap = snap_transfers[snap_transfer]['name']
pgroups_info[protgroup]['snaps'][snap] = {
'created': snap_transfers[snap_transfer]['created'],
'started': snap_transfers[snap_transfer]['started'],
'completed': snap_transfers[snap_transfer]['completed'],
'physical_bytes_written': snap_transfers[snap_transfer]['physical_bytes_written'],
'data_transferred': snap_transfers[snap_transfer]['data_transferred'],
'progress': snap_transfers[snap_transfer]['progress'],
}
return pgroups_info
def generate_pods_dict(array):
pods_info = {}
api_version = array._list_available_rest_versions()
if AC_REQUIRED_API_VERSION in api_version:
pods = array.list_pods()
for pod in range(0, len(pods)):
acpod = pods[pod]['name']
pods_info[acpod] = {
'source': pods[pod]['source'],
'arrays': pods[pod]['arrays'],
}
return pods_info
def generate_conn_array_dict(array):
conn_array_info = {}
api_version = array._list_available_rest_versions()
carrays = array.list_array_connections()
for carray in range(0, len(carrays)):
arrayname = carrays[carray]['array_name']
conn_array_info[arrayname] = {
'array_id': carrays[carray]['id'],
'throttled': carrays[carray]['throttled'],
'version': carrays[carray]['version'],
'type': carrays[carray]['type'],
'mgmt_ip': carrays[carray]['management_address'],
'repl_ip': carrays[carray]['replication_address'],
}
if P53_API_VERSION in api_version:
conn_array_info[arrayname]['status'] = carrays[carray]['status']
throttles = array.list_array_connections(throttle=True)
for throttle in range(0, len(throttles)):
arrayname = throttles[throttle]['array_name']
if conn_array_info[arrayname]['throttled']:
conn_array_info[arrayname]['throttling'] = {
'default_limit': throttles[throttle]['default_limit'],
'window_limit': throttles[throttle]['window_limit'],
'window': throttles[throttle]['window'],
}
return conn_array_info
def generate_apps_dict(array):
apps_info = {}
api_version = array._list_available_rest_versions()
if SAN_REQUIRED_API_VERSION in api_version:
apps = array.list_apps()
for app in range(0, len(apps)):
appname = apps[app]['name']
apps_info[appname] = {
'version': apps[app]['version'],
'status': apps[app]['status'],
'description': apps[app]['description'],
}
return apps_info
def generate_vgroups_dict(array):
vgroups_info = {}
api_version = array._list_available_rest_versions()
if AC_REQUIRED_API_VERSION in api_version:
vgroups = array.list_vgroups()
for vgroup in range(0, len(vgroups)):
virtgroup = vgroups[vgroup]['name']
vgroups_info[virtgroup] = {
'volumes': vgroups[vgroup]['volumes'],
}
return vgroups_info
def generate_certs_dict(array):
certs_info = {}
api_version = array._list_available_rest_versions()
if P53_API_VERSION in api_version:
certs = array.list_certificates()
for cert in range(0, len(certs)):
certificate = certs[cert]['name']
valid_from = time.strftime("%a, %d %b %Y %H:%M:%S %Z", time.localtime(certs[cert]['valid_from'] / 1000))
valid_to = time.strftime("%a, %d %b %Y %H:%M:%S %Z", time.localtime(certs[cert]['valid_to'] / 1000))
certs_info[certificate] = {
'status': certs[cert]['status'],
'issued_to': certs[cert]['issued_to'],
'valid_from': valid_from,
'locality': certs[cert]['locality'],
'country': certs[cert]['country'],
'issued_by': certs[cert]['issued_by'],
'valid_to': valid_to,
'state': certs[cert]['state'],
'key_size': certs[cert]['key_size'],
'org_unit': certs[cert]['organizational_unit'],
'common_name': certs[cert]['common_name'],
'organization': certs[cert]['organization'],
'email': certs[cert]['email'],
}
return certs_info
def generate_kmip_dict(array):
kmip_info = {}
api_version = array._list_available_rest_versions()
if P53_API_VERSION in api_version:
kmips = array.list_kmip()
for kmip in range(0, len(kmips)):
key = kmips[kmip]['name']
kmip_info[key] = {
'certificate': kmips[kmip]['certificate'],
'ca_cert_configured': kmips[kmip]['ca_certificate_configured'],
'uri': kmips[kmip]['uri'],
}
return kmip_info
def generate_nfs_offload_dict(array):
offload_info = {}
api_version = array._list_available_rest_versions()
if AC_REQUIRED_API_VERSION in api_version:
offload = array.list_nfs_offload()
for target in range(0, len(offload)):
offloadt = offload[target]['name']
offload_info[offloadt] = {
'status': offload[target]['status'],
'mount_point': offload[target]['mount_point'],
'protocol': offload[target]['protocol'],
'mount_options': offload[target]['mount_options'],
'address': offload[target]['address'],
}
return offload_info
def generate_s3_offload_dict(array):
offload_info = {}
api_version = array._list_available_rest_versions()
if S3_REQUIRED_API_VERSION in api_version:
offload = array.list_s3_offload()
for target in range(0, len(offload)):
offloadt = offload[target]['name']
offload_info[offloadt] = {
'status': offload[target]['status'],
'bucket': offload[target]['bucket'],
'protocol': offload[target]['protocol'],
'access_key_id': offload[target]['access_key_id'],
}
if P53_API_VERSION in api_version:
offload_info[offloadt]['placement_strategy'] = offload[target]['placement_strategy']
return offload_info
def generate_azure_offload_dict(array):
offload_info = {}
api_version = array._list_available_rest_versions()
if P53_API_VERSION in api_version:
offload = array.list_azure_offload()
for target in range(0, len(offload)):
offloadt = offload[target]['name']
offload_info[offloadt] = {
'status': offload[target]['status'],
'account_name': offload[target]['account_name'],
'protocol': offload[target]['protocol'],
'secret_access_key': offload[target]['secret_access_key'],
'container_name': offload[target]['container_name'],
}
return offload_info
def generate_hgroups_dict(array):
hgroups_info = {}
hgroups = array.list_hgroups()
for hgroup in range(0, len(hgroups)):
hostgroup = hgroups[hgroup]['name']
hgroups_info[hostgroup] = {
'hosts': hgroups[hgroup]['hosts'],
'pgs': [],
'vols': [],
}
pghgroups = array.list_hgroups(protect=True)
for pghg in range(0, len(pghgroups)):
pgname = pghgroups[pghg]['name']
hgroups_info[pgname]['pgs'].append(pghgroups[pghg]['protection_group'])
volhgroups = array.list_hgroups(connect=True)
for pgvol in range(0, len(volhgroups)):
pgname = volhgroups[pgvol]['name']
volpgdict = [volhgroups[pgvol]['vol'], volhgroups[pgvol]['lun']]
hgroups_info[pgname]['vols'].append(volpgdict)
return hgroups_info
def generate_interfaces_dict(array):
api_version = array._list_available_rest_versions()
int_info = {}
ports = array.list_ports()
for port in range(0, len(ports)):
int_name = ports[port]['name']
if ports[port]['wwn']:
int_info[int_name] = ports[port]['wwn']
if ports[port]['iqn']:
int_info[int_name] = ports[port]['iqn']
if NVME_API_VERSION in api_version:
if ports[port]['nqn']:
int_info[int_name] = ports[port]['nqn']
return int_info
def main():
argument_spec = purefa_argument_spec()
argument_spec.update(dict(
gather_subset=dict(default='minimum', type='list',)
))
module = AnsibleModule(argument_spec, supports_check_mode=False)
array = get_system(module)
subset = [test.lower() for test in module.params['gather_subset']]
valid_subsets = ('all', 'minimum', 'config', 'performance', 'capacity',
'network', 'subnet', 'interfaces', 'hgroups', 'pgroups',
'hosts', 'admins', 'volumes', 'snapshots', 'pods',
'vgroups', 'offload', 'apps', 'arrays', 'certs', 'kmip')
subset_test = (test in valid_subsets for test in subset)
if not all(subset_test):
module.fail_json(msg="value must gather_subset must be one or more of: %s, got: %s"
% (",".join(valid_subsets), ",".join(subset)))
info = {}
if 'minimum' in subset or 'all' in subset or 'apps' in subset:
info['default'] = generate_default_dict(array)
if 'performance' in subset or 'all' in subset:
info['performance'] = generate_perf_dict(array)
if 'config' in subset or 'all' in subset:
info['config'] = generate_config_dict(array)
if 'capacity' in subset or 'all' in subset:
info['capacity'] = generate_capacity_dict(array)
if 'network' in subset or 'all' in subset:
info['network'] = generate_network_dict(array)
if 'subnet' in subset or 'all' in subset:
info['subnet'] = generate_subnet_dict(array)
if 'interfaces' in subset or 'all' in subset:
info['interfaces'] = generate_interfaces_dict(array)
if 'hosts' in subset or 'all' in subset:
info['hosts'] = generate_host_dict(array)
if 'volumes' in subset or 'all' in subset:
info['volumes'] = generate_vol_dict(array)
if 'snapshots' in subset or 'all' in subset:
info['snapshots'] = generate_snap_dict(array)
if 'hgroups' in subset or 'all' in subset:
info['hgroups'] = generate_hgroups_dict(array)
if 'pgroups' in subset or 'all' in subset:
info['pgroups'] = generate_pgroups_dict(array)
if 'pods' in subset or 'all' in subset:
info['pods'] = generate_pods_dict(array)
if 'admins' in subset or 'all' in subset:
info['admins'] = generate_admin_dict(array)
if 'vgroups' in subset or 'all' in subset:
info['vgroups'] = generate_vgroups_dict(array)
if 'offload' in subset or 'all' in subset:
info['azure_offload'] = generate_azure_offload_dict(array)
info['nfs_offload'] = generate_nfs_offload_dict(array)
info['s3_offload'] = generate_s3_offload_dict(array)
if 'apps' in subset or 'all' in subset:
if 'CBS' not in info['default']['array_model']:
info['apps'] = generate_apps_dict(array)
else:
info['apps'] = {}
if 'arrays' in subset or 'all' in subset:
info['arrays'] = generate_conn_array_dict(array)
if 'certs' in subset or 'all' in subset:
info['certs'] = generate_certs_dict(array)
if 'kmip' in subset or 'all' in subset:
info['kmip'] = generate_kmip_dict(array)
module.exit_json(changed=False, purefa_info=info)
if __name__ == '__main__':
main()
| gpl-3.0 |
jhbradley/moose | scripts/cluster_launcher.py | 15 | 5493 | #!/usr/bin/env python
import os, sys, re, shutil
from optparse import OptionParser, OptionGroup, Values
# Determine the MOOSE Directory
MOOSE_PYTHON_DIR = None
if os.environ.has_key('MOOSE_DIR'):
MOOSE_PYTHON_DIR = os.path.join(os.environ['MOOSE_DIR'], 'python')
else:
MOOSE_PYTHON_DIR = os.path.join(os.path.split(os.path.dirname(os.path.abspath(__file__)))[0], 'python')
# Add moose/python to path
if os.path.exists(MOOSE_PYTHON_DIR):
sys.path.append(MOOSE_PYTHON_DIR)
else:
raise Exception('Unable to locate moose/python directory, please set MOOSE_DIR environment variable')
# Import the TestHarness and Helper functions from the MOOSE toolkit
from FactorySystem import InputParameters, Factory
from ClusterLauncher import PBSJob
import ParseGetPot
# Default file to read if only a directory is supplied
job_list = 'job_list'
def getNextDirName(file_name, files):
largest_serial_num = 0
for name in files:
m = re.search(file_name + '_(\d{3})', name)
if m != None and int(m.group(1)) > largest_serial_num:
largest_serial_num = int(m.group(1))
return file_name + "_" + str(largest_serial_num+1).zfill(3)
class ClusterLauncher:
def __init__(self):
self.factory = Factory()
def parseJobsFile(self, template_dir, job_file):
jobs = []
# We expect the job list to be named "job_list"
filename = template_dir + job_file
try:
data = ParseGetPot.readInputFile(filename)
except: # ParseGetPot class
print "Parse Error: " + filename
return jobs
# We expect our root node to be called "Jobs"
if 'Jobs' in data.children:
jobs_node = data.children['Jobs']
# Get the active line
active_jobs = None
if 'active' in jobs_node.params:
active_jobs = jobs_node.params['active'].split(' ')
for jobname, job_node in jobs_node.children.iteritems():
# Make sure this job is active
if active_jobs != None and not jobname in active_jobs:
continue
# First retrieve the type so we can get the valid params
if 'type' not in job_node.params:
print "Type missing in " + filename
sys.exit(1)
params = self.factory.validParams(job_node.params['type'])
params['job_name'] = jobname
# Now update all the base level keys
params_parsed = set()
params_ignored = set()
for key, value in job_node.params.iteritems():
params_parsed.add(key)
if key in params:
if params.type(key) == list:
params[key] = value.split(' ')
else:
if re.match('".*"', value): # Strip quotes
params[key] = value[1:-1]
else:
params[key] = value
else:
params_ignored.add(key)
# Make sure that all required parameters are supplied
required_params_missing = params.required_keys() - params_parsed
if len(required_params_missing):
print 'Required Missing Parameter(s): ', required_params_missing
sys.exit(1)
if len(params_ignored):
print 'Ignored Parameter(s): ', params_ignored
jobs.append(params)
return jobs
def createAndLaunchJob(self, template_dir, job_file, specs, options):
next_dir = getNextDirName(specs['job_name'], os.listdir('.'))
os.mkdir(template_dir + next_dir)
# Log it
if options.message:
f = open(template_dir + 'jobs.log', 'a')
f.write(next_dir.ljust(20) + ': ' + options.message + '\n')
f.close()
saved_cwd = os.getcwd()
os.chdir(template_dir + next_dir)
# Turn the remaining work over to the Job instance
# To keep everything consistent we'll also append our serial number to our job name
specs['job_name'] = next_dir
job_instance = self.factory.create(specs['type'], specs['job_name'], specs)
# Copy files
job_instance.copyFiles(job_file)
# Prepare the Job Script
job_instance.prepareJobScript()
# Launch it!
job_instance.launch()
os.chdir(saved_cwd)
def registerJobType(self, type, name):
self.factory.register(type, name)
### Parameter Dump ###
def printDump(self):
self.factory.printDump("Jobs")
sys.exit(0)
def run(self, template_dir, job_file, options):
jobs = self.parseJobsFile(template_dir, job_file)
for job in jobs:
self.createAndLaunchJob(template_dir, job_file, job, options)
########################################################
def main():
parser = OptionParser(usage='Usage: %prog [options] <template directory>')
parser.add_option("--dump", action="store_true", dest="dump", default=False, help="Dump the parameters for the testers in GetPot Format")
parser.add_option("-m", action="store", dest="message", help="A message that will be stored in a local log file that describes the job")
(options, location) = parser.parse_args()
cluster_launcher = ClusterLauncher()
cluster_launcher.registerJobType(PBSJob, 'PBSJob')
if options.dump:
cluster_launcher.printDump()
if not location:
parser.print_help()
sys.exit(1)
# See if the user passed a file or a directory
abs_location = os.path.abspath(location[0])
if os.path.isdir(abs_location):
dir = abs_location
file = job_list
elif os.path.isfile(abs_location):
(dir, file) = os.path.split(abs_location)
dir = dir + '/'
# Launch it
cluster_launcher.run(dir, file, options)
if __name__ == '__main__':
main()
| lgpl-2.1 |
filodej/django-filer | filer/utils/loader.py | 38 | 1668 | #-*- coding: utf-8 -*-
"""
This function is snatched from
https://github.com/ojii/django-load/blob/3058ab9d9d4875589638cc45e84b59e7e1d7c9c3/django_load/core.py#L49
local changes:
* added check for basestring to allow values that are already an object
or method.
"""
from django.utils.importlib import import_module
def load_object(import_path):
"""
Loads an object from an 'import_path', like in MIDDLEWARE_CLASSES and the
likes.
Import paths should be: "mypackage.mymodule.MyObject". It then imports the
module up until the last dot and tries to get the attribute after that dot
from the imported module.
If the import path does not contain any dots, a TypeError is raised.
If the module cannot be imported, an ImportError is raised.
If the attribute does not exist in the module, a AttributeError is raised.
"""
if not isinstance(import_path, basestring):
return import_path
if '.' not in import_path:
raise TypeError(
"'import_path' argument to 'django_load.core.load_object' " +\
"must contain at least one dot.")
module_name, object_name = import_path.rsplit('.', 1)
module = import_module(module_name)
return getattr(module, object_name)
def storage_factory(klass, location, base_url):
"""
This factory returns an instance of the storage class provided.
args:
* klass: must be inherit from ``django.core.files.storage.Storage``
* location: is a string representing the PATH similar to MEDIA_ROOT
* base_url: is a string representing the URL similar to MEDIA_URL
"""
return klass(location=location, base_url=base_url)
| bsd-3-clause |
GbalsaC/bitnamiP | venv/lib/python2.7/site-packages/rest_framework/tests/test_relations_nested.py | 17 | 14625 | from __future__ import unicode_literals
from django.db import models
from django.test import TestCase
from rest_framework import serializers
from .models import OneToOneTarget
class OneToOneSource(models.Model):
name = models.CharField(max_length=100)
target = models.OneToOneField(OneToOneTarget, related_name='source',
null=True, blank=True)
class OneToManyTarget(models.Model):
name = models.CharField(max_length=100)
class OneToManySource(models.Model):
name = models.CharField(max_length=100)
target = models.ForeignKey(OneToManyTarget, related_name='sources')
class ReverseNestedOneToOneTests(TestCase):
def setUp(self):
class OneToOneSourceSerializer(serializers.ModelSerializer):
class Meta:
model = OneToOneSource
fields = ('id', 'name')
class OneToOneTargetSerializer(serializers.ModelSerializer):
source = OneToOneSourceSerializer()
class Meta:
model = OneToOneTarget
fields = ('id', 'name', 'source')
self.Serializer = OneToOneTargetSerializer
for idx in range(1, 4):
target = OneToOneTarget(name='target-%d' % idx)
target.save()
source = OneToOneSource(name='source-%d' % idx, target=target)
source.save()
def test_one_to_one_retrieve(self):
queryset = OneToOneTarget.objects.all()
serializer = self.Serializer(queryset, many=True)
expected = [
{'id': 1, 'name': 'target-1', 'source': {'id': 1, 'name': 'source-1'}},
{'id': 2, 'name': 'target-2', 'source': {'id': 2, 'name': 'source-2'}},
{'id': 3, 'name': 'target-3', 'source': {'id': 3, 'name': 'source-3'}}
]
self.assertEqual(serializer.data, expected)
def test_one_to_one_create(self):
data = {'id': 4, 'name': 'target-4', 'source': {'id': 4, 'name': 'source-4'}}
serializer = self.Serializer(data=data)
self.assertTrue(serializer.is_valid())
obj = serializer.save()
self.assertEqual(serializer.data, data)
self.assertEqual(obj.name, 'target-4')
# Ensure (target 4, target_source 4, source 4) are added, and
# everything else is as expected.
queryset = OneToOneTarget.objects.all()
serializer = self.Serializer(queryset, many=True)
expected = [
{'id': 1, 'name': 'target-1', 'source': {'id': 1, 'name': 'source-1'}},
{'id': 2, 'name': 'target-2', 'source': {'id': 2, 'name': 'source-2'}},
{'id': 3, 'name': 'target-3', 'source': {'id': 3, 'name': 'source-3'}},
{'id': 4, 'name': 'target-4', 'source': {'id': 4, 'name': 'source-4'}}
]
self.assertEqual(serializer.data, expected)
def test_one_to_one_create_with_invalid_data(self):
data = {'id': 4, 'name': 'target-4', 'source': {'id': 4}}
serializer = self.Serializer(data=data)
self.assertFalse(serializer.is_valid())
self.assertEqual(serializer.errors, {'source': [{'name': ['This field is required.']}]})
def test_one_to_one_update(self):
data = {'id': 3, 'name': 'target-3-updated', 'source': {'id': 3, 'name': 'source-3-updated'}}
instance = OneToOneTarget.objects.get(pk=3)
serializer = self.Serializer(instance, data=data)
self.assertTrue(serializer.is_valid())
obj = serializer.save()
self.assertEqual(serializer.data, data)
self.assertEqual(obj.name, 'target-3-updated')
# Ensure (target 3, target_source 3, source 3) are updated,
# and everything else is as expected.
queryset = OneToOneTarget.objects.all()
serializer = self.Serializer(queryset, many=True)
expected = [
{'id': 1, 'name': 'target-1', 'source': {'id': 1, 'name': 'source-1'}},
{'id': 2, 'name': 'target-2', 'source': {'id': 2, 'name': 'source-2'}},
{'id': 3, 'name': 'target-3-updated', 'source': {'id': 3, 'name': 'source-3-updated'}}
]
self.assertEqual(serializer.data, expected)
class ForwardNestedOneToOneTests(TestCase):
def setUp(self):
class OneToOneTargetSerializer(serializers.ModelSerializer):
class Meta:
model = OneToOneTarget
fields = ('id', 'name')
class OneToOneSourceSerializer(serializers.ModelSerializer):
target = OneToOneTargetSerializer()
class Meta:
model = OneToOneSource
fields = ('id', 'name', 'target')
self.Serializer = OneToOneSourceSerializer
for idx in range(1, 4):
target = OneToOneTarget(name='target-%d' % idx)
target.save()
source = OneToOneSource(name='source-%d' % idx, target=target)
source.save()
def test_one_to_one_retrieve(self):
queryset = OneToOneSource.objects.all()
serializer = self.Serializer(queryset, many=True)
expected = [
{'id': 1, 'name': 'source-1', 'target': {'id': 1, 'name': 'target-1'}},
{'id': 2, 'name': 'source-2', 'target': {'id': 2, 'name': 'target-2'}},
{'id': 3, 'name': 'source-3', 'target': {'id': 3, 'name': 'target-3'}}
]
self.assertEqual(serializer.data, expected)
def test_one_to_one_create(self):
data = {'id': 4, 'name': 'source-4', 'target': {'id': 4, 'name': 'target-4'}}
serializer = self.Serializer(data=data)
self.assertTrue(serializer.is_valid())
obj = serializer.save()
self.assertEqual(serializer.data, data)
self.assertEqual(obj.name, 'source-4')
# Ensure (target 4, target_source 4, source 4) are added, and
# everything else is as expected.
queryset = OneToOneSource.objects.all()
serializer = self.Serializer(queryset, many=True)
expected = [
{'id': 1, 'name': 'source-1', 'target': {'id': 1, 'name': 'target-1'}},
{'id': 2, 'name': 'source-2', 'target': {'id': 2, 'name': 'target-2'}},
{'id': 3, 'name': 'source-3', 'target': {'id': 3, 'name': 'target-3'}},
{'id': 4, 'name': 'source-4', 'target': {'id': 4, 'name': 'target-4'}}
]
self.assertEqual(serializer.data, expected)
def test_one_to_one_create_with_invalid_data(self):
data = {'id': 4, 'name': 'source-4', 'target': {'id': 4}}
serializer = self.Serializer(data=data)
self.assertFalse(serializer.is_valid())
self.assertEqual(serializer.errors, {'target': [{'name': ['This field is required.']}]})
def test_one_to_one_update(self):
data = {'id': 3, 'name': 'source-3-updated', 'target': {'id': 3, 'name': 'target-3-updated'}}
instance = OneToOneSource.objects.get(pk=3)
serializer = self.Serializer(instance, data=data)
self.assertTrue(serializer.is_valid())
obj = serializer.save()
self.assertEqual(serializer.data, data)
self.assertEqual(obj.name, 'source-3-updated')
# Ensure (target 3, target_source 3, source 3) are updated,
# and everything else is as expected.
queryset = OneToOneSource.objects.all()
serializer = self.Serializer(queryset, many=True)
expected = [
{'id': 1, 'name': 'source-1', 'target': {'id': 1, 'name': 'target-1'}},
{'id': 2, 'name': 'source-2', 'target': {'id': 2, 'name': 'target-2'}},
{'id': 3, 'name': 'source-3-updated', 'target': {'id': 3, 'name': 'target-3-updated'}}
]
self.assertEqual(serializer.data, expected)
def test_one_to_one_update_to_null(self):
data = {'id': 3, 'name': 'source-3-updated', 'target': None}
instance = OneToOneSource.objects.get(pk=3)
serializer = self.Serializer(instance, data=data)
self.assertTrue(serializer.is_valid())
obj = serializer.save()
self.assertEqual(serializer.data, data)
self.assertEqual(obj.name, 'source-3-updated')
self.assertEqual(obj.target, None)
queryset = OneToOneSource.objects.all()
serializer = self.Serializer(queryset, many=True)
expected = [
{'id': 1, 'name': 'source-1', 'target': {'id': 1, 'name': 'target-1'}},
{'id': 2, 'name': 'source-2', 'target': {'id': 2, 'name': 'target-2'}},
{'id': 3, 'name': 'source-3-updated', 'target': None}
]
self.assertEqual(serializer.data, expected)
# TODO: Nullable 1-1 tests
# def test_one_to_one_delete(self):
# data = {'id': 3, 'name': 'target-3', 'target_source': None}
# instance = OneToOneTarget.objects.get(pk=3)
# serializer = self.Serializer(instance, data=data)
# self.assertTrue(serializer.is_valid())
# serializer.save()
# # Ensure (target_source 3, source 3) are deleted,
# # and everything else is as expected.
# queryset = OneToOneTarget.objects.all()
# serializer = self.Serializer(queryset)
# expected = [
# {'id': 1, 'name': 'target-1', 'source': {'id': 1, 'name': 'source-1'}},
# {'id': 2, 'name': 'target-2', 'source': {'id': 2, 'name': 'source-2'}},
# {'id': 3, 'name': 'target-3', 'source': None}
# ]
# self.assertEqual(serializer.data, expected)
class ReverseNestedOneToManyTests(TestCase):
def setUp(self):
class OneToManySourceSerializer(serializers.ModelSerializer):
class Meta:
model = OneToManySource
fields = ('id', 'name')
class OneToManyTargetSerializer(serializers.ModelSerializer):
sources = OneToManySourceSerializer(many=True, allow_add_remove=True)
class Meta:
model = OneToManyTarget
fields = ('id', 'name', 'sources')
self.Serializer = OneToManyTargetSerializer
target = OneToManyTarget(name='target-1')
target.save()
for idx in range(1, 4):
source = OneToManySource(name='source-%d' % idx, target=target)
source.save()
def test_one_to_many_retrieve(self):
queryset = OneToManyTarget.objects.all()
serializer = self.Serializer(queryset, many=True)
expected = [
{'id': 1, 'name': 'target-1', 'sources': [{'id': 1, 'name': 'source-1'},
{'id': 2, 'name': 'source-2'},
{'id': 3, 'name': 'source-3'}]},
]
self.assertEqual(serializer.data, expected)
def test_one_to_many_create(self):
data = {'id': 1, 'name': 'target-1', 'sources': [{'id': 1, 'name': 'source-1'},
{'id': 2, 'name': 'source-2'},
{'id': 3, 'name': 'source-3'},
{'id': 4, 'name': 'source-4'}]}
instance = OneToManyTarget.objects.get(pk=1)
serializer = self.Serializer(instance, data=data)
self.assertTrue(serializer.is_valid())
obj = serializer.save()
self.assertEqual(serializer.data, data)
self.assertEqual(obj.name, 'target-1')
# Ensure source 4 is added, and everything else is as
# expected.
queryset = OneToManyTarget.objects.all()
serializer = self.Serializer(queryset, many=True)
expected = [
{'id': 1, 'name': 'target-1', 'sources': [{'id': 1, 'name': 'source-1'},
{'id': 2, 'name': 'source-2'},
{'id': 3, 'name': 'source-3'},
{'id': 4, 'name': 'source-4'}]}
]
self.assertEqual(serializer.data, expected)
def test_one_to_many_create_with_invalid_data(self):
data = {'id': 1, 'name': 'target-1', 'sources': [{'id': 1, 'name': 'source-1'},
{'id': 2, 'name': 'source-2'},
{'id': 3, 'name': 'source-3'},
{'id': 4}]}
serializer = self.Serializer(data=data)
self.assertFalse(serializer.is_valid())
self.assertEqual(serializer.errors, {'sources': [{}, {}, {}, {'name': ['This field is required.']}]})
def test_one_to_many_update(self):
data = {'id': 1, 'name': 'target-1-updated', 'sources': [{'id': 1, 'name': 'source-1-updated'},
{'id': 2, 'name': 'source-2'},
{'id': 3, 'name': 'source-3'}]}
instance = OneToManyTarget.objects.get(pk=1)
serializer = self.Serializer(instance, data=data)
self.assertTrue(serializer.is_valid())
obj = serializer.save()
self.assertEqual(serializer.data, data)
self.assertEqual(obj.name, 'target-1-updated')
# Ensure (target 1, source 1) are updated,
# and everything else is as expected.
queryset = OneToManyTarget.objects.all()
serializer = self.Serializer(queryset, many=True)
expected = [
{'id': 1, 'name': 'target-1-updated', 'sources': [{'id': 1, 'name': 'source-1-updated'},
{'id': 2, 'name': 'source-2'},
{'id': 3, 'name': 'source-3'}]}
]
self.assertEqual(serializer.data, expected)
def test_one_to_many_delete(self):
data = {'id': 1, 'name': 'target-1', 'sources': [{'id': 1, 'name': 'source-1'},
{'id': 3, 'name': 'source-3'}]}
instance = OneToManyTarget.objects.get(pk=1)
serializer = self.Serializer(instance, data=data)
self.assertTrue(serializer.is_valid())
serializer.save()
# Ensure source 2 is deleted, and everything else is as
# expected.
queryset = OneToManyTarget.objects.all()
serializer = self.Serializer(queryset, many=True)
expected = [
{'id': 1, 'name': 'target-1', 'sources': [{'id': 1, 'name': 'source-1'},
{'id': 3, 'name': 'source-3'}]}
]
self.assertEqual(serializer.data, expected)
| agpl-3.0 |
Denisolt/IEEE-NYIT-MA | local/lib/python2.7/site-packages/setuptools/command/bdist_rpm.py | 1049 | 1508 | import distutils.command.bdist_rpm as orig
class bdist_rpm(orig.bdist_rpm):
"""
Override the default bdist_rpm behavior to do the following:
1. Run egg_info to ensure the name and version are properly calculated.
2. Always run 'install' using --single-version-externally-managed to
disable eggs in RPM distributions.
3. Replace dash with underscore in the version numbers for better RPM
compatibility.
"""
def run(self):
# ensure distro name is up-to-date
self.run_command('egg_info')
orig.bdist_rpm.run(self)
def _make_spec_file(self):
version = self.distribution.get_version()
rpmversion = version.replace('-', '_')
spec = orig.bdist_rpm._make_spec_file(self)
line23 = '%define version ' + version
line24 = '%define version ' + rpmversion
spec = [
line.replace(
"Source0: %{name}-%{version}.tar",
"Source0: %{name}-%{unmangled_version}.tar"
).replace(
"setup.py install ",
"setup.py install --single-version-externally-managed "
).replace(
"%setup",
"%setup -n %{name}-%{unmangled_version}"
).replace(line23, line24)
for line in spec
]
insert_loc = spec.index(line24) + 1
unmangled_version = "%define unmangled_version " + version
spec.insert(insert_loc, unmangled_version)
return spec
| gpl-3.0 |
apache/airflow | tests/providers/http/sensors/test_http.py | 3 | 8988 | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import unittest
from unittest import mock
from unittest.mock import patch
import pytest
import requests
from airflow.exceptions import AirflowException, AirflowSensorTimeout
from airflow.models import TaskInstance
from airflow.models.dag import DAG
from airflow.providers.http.operators.http import SimpleHttpOperator
from airflow.providers.http.sensors.http import HttpSensor
from airflow.utils.timezone import datetime
DEFAULT_DATE = datetime(2015, 1, 1)
DEFAULT_DATE_ISO = DEFAULT_DATE.isoformat()
TEST_DAG_ID = 'unit_test_dag'
class TestHttpSensor(unittest.TestCase):
def setUp(self):
args = {'owner': 'airflow', 'start_date': DEFAULT_DATE}
self.dag = DAG(TEST_DAG_ID, default_args=args)
@patch("airflow.providers.http.hooks.http.requests.Session.send")
def test_poke_exception(self, mock_session_send):
"""
Exception occurs in poke function should not be ignored.
"""
response = requests.Response()
response.status_code = 200
mock_session_send.return_value = response
def resp_check(_):
raise AirflowException('AirflowException raised here!')
task = HttpSensor(
task_id='http_sensor_poke_exception',
http_conn_id='http_default',
endpoint='',
request_params={},
response_check=resp_check,
timeout=5,
poke_interval=1,
)
with pytest.raises(AirflowException, match='AirflowException raised here!'):
task.execute(context={})
@patch("airflow.providers.http.hooks.http.requests.Session.send")
def test_poke_continues_for_http_500_with_extra_options_check_response_false(self, mock_session_send):
def resp_check(_):
return False
response = requests.Response()
response.status_code = 500
response.reason = 'Internal Server Error'
response._content = b'Internal Server Error'
mock_session_send.return_value = response
task = HttpSensor(
dag=self.dag,
task_id='http_sensor_poke_for_code_500',
http_conn_id='http_default',
endpoint='',
request_params={},
method='HEAD',
response_check=resp_check,
extra_options={'check_response': False},
timeout=5,
poke_interval=1,
)
with self.assertRaises(AirflowSensorTimeout):
task.execute(context={})
@patch("airflow.providers.http.hooks.http.requests.Session.send")
def test_head_method(self, mock_session_send):
def resp_check(_):
return True
task = HttpSensor(
dag=self.dag,
task_id='http_sensor_head_method',
http_conn_id='http_default',
endpoint='',
request_params={},
method='HEAD',
response_check=resp_check,
timeout=5,
poke_interval=1,
)
task.execute(context={})
args, kwargs = mock_session_send.call_args
received_request = args[0]
prep_request = requests.Request('HEAD', 'https://www.httpbin.org', {}).prepare()
assert prep_request.url == received_request.url
assert prep_request.method, received_request.method
@patch("airflow.providers.http.hooks.http.requests.Session.send")
def test_poke_context(self, mock_session_send):
response = requests.Response()
response.status_code = 200
mock_session_send.return_value = response
def resp_check(_, execution_date):
if execution_date == DEFAULT_DATE:
return True
raise AirflowException('AirflowException raised here!')
task = HttpSensor(
task_id='http_sensor_poke_exception',
http_conn_id='http_default',
endpoint='',
request_params={},
response_check=resp_check,
timeout=5,
poke_interval=1,
dag=self.dag,
)
task_instance = TaskInstance(task=task, execution_date=DEFAULT_DATE)
task.execute(task_instance.get_template_context())
@patch("airflow.providers.http.hooks.http.requests.Session.send")
def test_logging_head_error_request(self, mock_session_send):
def resp_check(_):
return True
response = requests.Response()
response.status_code = 404
response.reason = 'Not Found'
response._content = b"This endpoint doesn't exist"
mock_session_send.return_value = response
task = HttpSensor(
dag=self.dag,
task_id='http_sensor_head_method',
http_conn_id='http_default',
endpoint='',
request_params={},
method='HEAD',
response_check=resp_check,
timeout=5,
poke_interval=1,
)
with mock.patch.object(task.hook.log, 'error') as mock_errors:
with pytest.raises(AirflowSensorTimeout):
task.execute(None)
assert mock_errors.called
calls = [
mock.call('HTTP error: %s', 'Not Found'),
mock.call("This endpoint doesn't exist"),
mock.call('HTTP error: %s', 'Not Found'),
mock.call("This endpoint doesn't exist"),
mock.call('HTTP error: %s', 'Not Found'),
mock.call("This endpoint doesn't exist"),
mock.call('HTTP error: %s', 'Not Found'),
mock.call("This endpoint doesn't exist"),
mock.call('HTTP error: %s', 'Not Found'),
mock.call("This endpoint doesn't exist"),
mock.call('HTTP error: %s', 'Not Found'),
mock.call("This endpoint doesn't exist"),
]
mock_errors.assert_has_calls(calls)
class FakeSession:
def __init__(self):
self.response = requests.Response()
self.response.status_code = 200
self.response._content = 'apache/airflow'.encode('ascii', 'ignore')
def send(self, *args, **kwargs):
return self.response
def prepare_request(self, request):
if 'date' in request.params:
self.response._content += ('/' + request.params['date']).encode('ascii', 'ignore')
return self.response
def merge_environment_settings(self, _url, **kwargs):
return kwargs
class TestHttpOpSensor(unittest.TestCase):
def setUp(self):
args = {'owner': 'airflow', 'start_date': DEFAULT_DATE_ISO}
dag = DAG(TEST_DAG_ID, default_args=args)
self.dag = dag
@mock.patch('requests.Session', FakeSession)
def test_get(self):
op = SimpleHttpOperator(
task_id='get_op',
method='GET',
endpoint='/search',
data={"client": "ubuntu", "q": "airflow"},
headers={},
dag=self.dag,
)
op.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)
@mock.patch('requests.Session', FakeSession)
def test_get_response_check(self):
op = SimpleHttpOperator(
task_id='get_op',
method='GET',
endpoint='/search',
data={"client": "ubuntu", "q": "airflow"},
response_check=lambda response: ("apache/airflow" in response.text),
headers={},
dag=self.dag,
)
op.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)
@mock.patch('requests.Session', FakeSession)
def test_sensor(self):
sensor = HttpSensor(
task_id='http_sensor_check',
http_conn_id='http_default',
endpoint='/search',
request_params={"client": "ubuntu", "q": "airflow", 'date': '{{ds}}'},
headers={},
response_check=lambda response: (
"apache/airflow/" + DEFAULT_DATE.strftime('%Y-%m-%d') in response.text
),
poke_interval=5,
timeout=15,
dag=self.dag,
)
sensor.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)
| apache-2.0 |
epssy/hue | desktop/core/ext-py/Django-1.6.10/django/conf/locale/sv/formats.py | 118 | 1568 | # -*- encoding: utf-8 -*-
# This file is distributed under the same license as the Django package.
#
from __future__ import unicode_literals
# The *_FORMAT strings use the Django date format syntax,
# see http://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
DATE_FORMAT = 'j F Y'
TIME_FORMAT = 'H:i'
DATETIME_FORMAT = 'j F Y H:i'
YEAR_MONTH_FORMAT = 'F Y'
MONTH_DAY_FORMAT = 'j F'
SHORT_DATE_FORMAT = 'Y-m-d'
SHORT_DATETIME_FORMAT = 'Y-m-d H:i'
FIRST_DAY_OF_WEEK = 1
# The *_INPUT_FORMATS strings use the Python strftime format syntax,
# see http://docs.python.org/library/datetime.html#strftime-strptime-behavior
# Kept ISO formats as they are in first position
DATE_INPUT_FORMATS = (
'%Y-%m-%d', # '2006-10-25'
'%m/%d/%Y', # '10/25/2006'
'%m/%d/%y', # '10/25/06'
)
DATETIME_INPUT_FORMATS = (
'%Y-%m-%d %H:%M:%S', # '2006-10-25 14:30:59'
'%Y-%m-%d %H:%M:%S.%f', # '2006-10-25 14:30:59.000200'
'%Y-%m-%d %H:%M', # '2006-10-25 14:30'
'%Y-%m-%d', # '2006-10-25'
'%m/%d/%Y %H:%M:%S', # '10/25/2006 14:30:59'
'%m/%d/%Y %H:%M:%S.%f', # '10/25/2006 14:30:59.000200'
'%m/%d/%Y %H:%M', # '10/25/2006 14:30'
'%m/%d/%Y', # '10/25/2006'
'%m/%d/%y %H:%M:%S', # '10/25/06 14:30:59'
'%m/%d/%y %H:%M:%S.%f', # '10/25/06 14:30:59.000200'
'%m/%d/%y %H:%M', # '10/25/06 14:30'
'%m/%d/%y', # '10/25/06'
)
DECIMAL_SEPARATOR = ','
THOUSAND_SEPARATOR = '\xa0' # non-breaking space
NUMBER_GROUPING = 3
| apache-2.0 |
hplustree/trove | trove/guestagent/datastore/experimental/couchbase/manager.py | 3 | 4570 | # Copyright (c) 2013 eBay Software Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
from oslo_log import log as logging
from trove.common.i18n import _
from trove.common import instance as rd_instance
from trove.common.notification import EndNotification
from trove.guestagent import backup
from trove.guestagent.datastore.experimental.couchbase import service
from trove.guestagent.datastore.experimental.couchbase import system
from trove.guestagent.datastore import manager
from trove.guestagent import volume
LOG = logging.getLogger(__name__)
class Manager(manager.Manager):
"""
This is Couchbase Manager class. It is dynamically loaded
based off of the datastore of the trove instance
"""
def __init__(self):
self.appStatus = service.CouchbaseAppStatus()
self.app = service.CouchbaseApp(self.appStatus)
super(Manager, self).__init__('couchbase')
@property
def status(self):
return self.appStatus
def reset_configuration(self, context, configuration):
self.app.reset_configuration(configuration)
def do_prepare(self, context, packages, databases, memory_mb, users,
device_path, mount_point, backup_info,
config_contents, root_password, overrides,
cluster_config, snapshot):
"""This is called from prepare in the base class."""
self.app.install_if_needed(packages)
if device_path:
device = volume.VolumeDevice(device_path)
# unmount if device is already mounted
device.unmount_device(device_path)
device.format()
device.mount(mount_point)
LOG.debug('Mounted the volume (%s).' % device_path)
self.app.start_db_with_conf_changes(config_contents)
LOG.debug('Securing couchbase now.')
self.app.initial_setup()
if backup_info:
LOG.debug('Now going to perform restore.')
self._perform_restore(backup_info,
context,
mount_point)
def restart(self, context):
"""
Restart this couchbase instance.
This method is called when the guest agent
gets a restart message from the taskmanager.
"""
self.app.restart()
def start_db_with_conf_changes(self, context, config_contents):
self.app.start_db_with_conf_changes(config_contents)
def stop_db(self, context, do_not_start_on_reboot=False):
"""
Stop this couchbase instance.
This method is called when the guest agent
gets a stop message from the taskmanager.
"""
self.app.stop_db(do_not_start_on_reboot=do_not_start_on_reboot)
def enable_root(self, context):
LOG.debug("Enabling root.")
return self.app.enable_root()
def enable_root_with_password(self, context, root_password=None):
return self.app.enable_root(root_password)
def is_root_enabled(self, context):
LOG.debug("Checking if root is enabled.")
return os.path.exists(system.pwd_file)
def _perform_restore(self, backup_info, context, restore_location):
"""
Restores all couchbase buckets and their documents from the
backup.
"""
LOG.info(_("Restoring database from backup %s") %
backup_info['id'])
try:
backup.restore(context, backup_info, restore_location)
except Exception as e:
LOG.error(_("Error performing restore from backup %s") %
backup_info['id'])
LOG.error(e)
self.status.set_status(rd_instance.ServiceStatuses.FAILED)
raise
LOG.info(_("Restored database successfully"))
def create_backup(self, context, backup_info):
"""
Backup all couchbase buckets and their documents.
"""
with EndNotification(context):
backup.backup(context, backup_info)
| apache-2.0 |
sadmansk/servo | tests/wpt/web-platform-tests/webvtt/tools/categorize_results.py | 89 | 3940 | import os
import sys
import json
import fnmatch
TEST_DIR = "/webvtt/"
CATEGORIES_FILE = "../categories.json"
class Test:
def __init__(self, file, name, status, message):
self.file = file
self.name = name
self.status = status
self.message = message
self.passed = status == 'PASS'
self.categories = []
@classmethod
def from_json(cls, json):
file = json["test"]
if not file.startswith(TEST_DIR):
return []
file = file[len(TEST_DIR):]
status = json["status"]
message = json["message"]
tests = []
for test in json["subtests"]:
name = test["name"]
if status == 'OK':
test_status = test["status"]
test_message = test["message"]
else:
test_status, test_message = status, message
tests.append(Test(file, name, test_status, test_message))
return tests
class Category:
def __init__(self, names):
self.names = set(names)
self.tests = {}
@classmethod
def from_json(cls, json):
return Category(json)
def add_test(self, name, test):
self.tests[test] = name
def __contains__(self, name):
return name in self.names
def parse_results(file):
data = json.load(file)
results = data["results"]
tests = []
for result in results:
tests += Test.from_json(result)
return tests
def parse_categories(file, tests, categories = None, categories_map = None):
data = json.load(file)
basepath = os.path.dirname(file.name)
categories = categories or []
if categories_map:
categories_map = dict(categories_map)
else:
categories_map = {}
if ":categories" in data:
for cat_data in data[":categories"]:
category = Category.from_json(cat_data)
categories.append(category)
for name in category.names:
categories_map[name] = category
for pattern, category_name in data.items():
if pattern.startswith(":"):
continue
category = categories_map[category_name]
file_pattern = os.path.normpath(os.path.join(basepath, pattern))
for test in tests:
if fnmatch.fnmatch(test.name, file_pattern) or fnmatch.fnmatch(test.file, file_pattern):
category.add_test(category_name, test)
test.categories.append(category)
if ":subcategories" in data:
for subcat_name in data[":subcategories"]:
path = os.path.join(basepath, subcat_name)
file = open(path, "r")
parse_categories(file, tests, categories, categories_map)
return categories
def main(argv):
if len(argv) == 1:
if argv[0] == '-':
results_file = sys.stdin
else:
results_file = open(argv[0], "r")
else:
print("USAGE: python3 categorize_results.py <file>")
print("<file>\tA file containing wpt results. Or `-` for reading results from stdin.")
return
filepath = os.path.dirname(__file__)
categories_path = os.path.join(filepath, CATEGORIES_FILE)
categories_file = open(categories_path, "r")
tests = parse_results(results_file)
categories = parse_categories(categories_file, tests)
for category in categories:
tests_by_name = { name: [] for name in category.names }
for test, name in category.tests.items():
tests_by_name[name].append(test)
for name in category.names:
test_group = tests_by_name[name]
amount = len(test_group)
if amount == 0:
continue
passed = sum(1 for test in test_group if test.passed)
print("{}:\t{}/{} - {}%".format(name, passed, amount, round(passed / amount * 100, 2)))
if __name__ == "__main__":
main(sys.argv[1:])
| mpl-2.0 |
worldbank/cv4ag | modules/gdal_polygonize.py | 2 | 6981 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#******************************************************************************
# $Id$
#
# Project: GDAL Python Interface
# Purpose: Application for converting raster data to a vector polygon layer.
# Author: Frank Warmerdam, warmerdam@pobox.com
#
#******************************************************************************
# Copyright (c) 2008, Frank Warmerdam
# Copyright (c) 2009-2013, Even Rouault <even dot rouault at mines-paris dot org>
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
#******************************************************************************
import sys
from osgeo import gdal
from osgeo import ogr
from osgeo import osr
def Usage():
print("""
gdal_polygonize [-8] [-nomask] [-mask filename] raster_file [-b band|mask]
[-q] [-f ogr_format] out_file [layer] [fieldname]
""")
sys.exit(1)
# =============================================================================
# Mainline
# =============================================================================
format = 'GML'
options = []
quiet_flag = 0
src_filename = None
src_band_n = 1
dst_filename = None
dst_layername = None
dst_fieldname = None
dst_field = -1
mask = 'default'
gdal.AllRegister()
argv = gdal.GeneralCmdLineProcessor( sys.argv )
if argv is None:
sys.exit( 0 )
# Parse command line arguments.
i = 1
while i < len(argv):
arg = argv[i]
if arg == '-f':
i = i + 1
format = argv[i]
elif arg == '-q' or arg == '-quiet':
quiet_flag = 1
elif arg == '-8':
options.append('8CONNECTED=8')
elif arg == '-nomask':
mask = 'none'
elif arg == '-mask':
i = i + 1
mask = argv[i]
elif arg == '-b':
i = i + 1
if argv[i].startswith('mask'):
src_band_n = argv[i]
else:
src_band_n = int(argv[i])
elif src_filename is None:
src_filename = argv[i]
elif dst_filename is None:
dst_filename = argv[i]
elif dst_layername is None:
dst_layername = argv[i]
elif dst_fieldname is None:
dst_fieldname = argv[i]
else:
Usage()
i = i + 1
if src_filename is None or dst_filename is None:
Usage()
if dst_layername is None:
dst_layername = 'out'
# =============================================================================
# Verify we have next gen bindings with the polygonize method.
# =============================================================================
try:
gdal.Polygonize
except:
print('')
print('gdal.Polygonize() not available. You are likely using "old gen"')
print('bindings or an older version of the next gen bindings.')
print('')
sys.exit(1)
# =============================================================================
# Open source file
# =============================================================================
src_ds = gdal.Open( src_filename )
if src_ds is None:
print('Unable to open %s' % src_filename)
sys.exit(1)
if src_band_n == 'mask':
srcband = src_ds.GetRasterBand(1).GetMaskBand()
# Workaround the fact that most source bands have no dataset attached
options.append('DATASET_FOR_GEOREF=' + src_filename)
elif isinstance(src_band_n, str) and src_band_n.startswith('mask,'):
srcband = src_ds.GetRasterBand(int(src_band_n[len('mask,'):])).GetMaskBand()
# Workaround the fact that most source bands have no dataset attached
options.append('DATASET_FOR_GEOREF=' + src_filename)
else:
srcband = src_ds.GetRasterBand(src_band_n)
if mask is 'default':
maskband = srcband.GetMaskBand()
elif mask is 'none':
maskband = None
else:
mask_ds = gdal.Open( mask )
maskband = mask_ds.GetRasterBand(1)
# =============================================================================
# Try opening the destination file as an existing file.
# =============================================================================
try:
gdal.PushErrorHandler( 'CPLQuietErrorHandler' )
dst_ds = ogr.Open( dst_filename, update=1 )
gdal.PopErrorHandler()
except:
dst_ds = None
# =============================================================================
# Create output file.
# =============================================================================
if dst_ds is None:
drv = ogr.GetDriverByName(format)
if not quiet_flag:
print('Creating output %s of format %s.' % (dst_filename, format))
dst_ds = drv.CreateDataSource( dst_filename )
# =============================================================================
# Find or create destination layer.
# =============================================================================
try:
dst_layer = dst_ds.GetLayerByName(dst_layername)
except:
dst_layer = None
if dst_layer is None:
srs = None
if src_ds.GetProjectionRef() != '':
srs = osr.SpatialReference()
srs.ImportFromWkt( src_ds.GetProjectionRef() )
dst_layer = dst_ds.CreateLayer(dst_layername, geom_type=ogr.wkbPolygon, srs = srs )
if dst_fieldname is None:
dst_fieldname = 'DN'
fd = ogr.FieldDefn( dst_fieldname, ogr.OFTInteger )
dst_layer.CreateField( fd )
dst_field = 0
else:
if dst_fieldname is not None:
dst_field = dst_layer.GetLayerDefn().GetFieldIndex(dst_fieldname)
if dst_field < 0:
print("Warning: cannot find field '%s' in layer '%s'" % (dst_fieldname, dst_layername))
# =============================================================================
# Invoke algorithm.
# =============================================================================
if quiet_flag:
prog_func = None
else:
prog_func = gdal.TermProgress
result = gdal.Polygonize( srcband, maskband, dst_layer, dst_field, options,
callback = prog_func )
srcband = None
src_ds = None
dst_ds = None
mask_ds = None
| mit |
pdellaert/ansible | contrib/inventory/gce.py | 37 | 19275 | #!/usr/bin/env python
# Copyright: (c) 2013, Google Inc.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
'''
GCE external inventory script
=================================
Generates inventory that Ansible can understand by making API requests
Google Compute Engine via the libcloud library. Full install/configuration
instructions for the gce* modules can be found in the comments of
ansible/test/gce_tests.py.
When run against a specific host, this script returns the following variables
based on the data obtained from the libcloud Node object:
- gce_uuid
- gce_id
- gce_image
- gce_machine_type
- gce_private_ip
- gce_public_ip
- gce_name
- gce_description
- gce_status
- gce_zone
- gce_tags
- gce_metadata
- gce_network
- gce_subnetwork
When run in --list mode, instances are grouped by the following categories:
- zone:
zone group name examples are us-central1-b, europe-west1-a, etc.
- instance tags:
An entry is created for each tag. For example, if you have two instances
with a common tag called 'foo', they will both be grouped together under
the 'tag_foo' name.
- network name:
the name of the network is appended to 'network_' (e.g. the 'default'
network will result in a group named 'network_default')
- machine type
types follow a pattern like n1-standard-4, g1-small, etc.
- running status:
group name prefixed with 'status_' (e.g. status_running, status_stopped,..)
- image:
when using an ephemeral/scratch disk, this will be set to the image name
used when creating the instance (e.g. debian-7-wheezy-v20130816). when
your instance was created with a root persistent disk it will be set to
'persistent_disk' since there is no current way to determine the image.
Examples:
Execute uname on all instances in the us-central1-a zone
$ ansible -i gce.py us-central1-a -m shell -a "/bin/uname -a"
Use the GCE inventory script to print out instance specific information
$ contrib/inventory/gce.py --host my_instance
Author: Eric Johnson <erjohnso@google.com>
Contributors: Matt Hite <mhite@hotmail.com>, Tom Melendez <supertom@google.com>,
John Roach <johnroach1985@gmail.com>
Version: 0.0.4
'''
try:
import pkg_resources
except ImportError:
# Use pkg_resources to find the correct versions of libraries and set
# sys.path appropriately when there are multiversion installs. We don't
# fail here as there is code that better expresses the errors where the
# library is used.
pass
USER_AGENT_PRODUCT = "Ansible-gce_inventory_plugin"
USER_AGENT_VERSION = "v2"
import sys
import os
import argparse
from time import time
from ansible.module_utils.six.moves import configparser
import logging
logging.getLogger('libcloud.common.google').addHandler(logging.NullHandler())
import json
try:
from libcloud.compute.types import Provider
from libcloud.compute.providers import get_driver
_ = Provider.GCE
except Exception:
sys.exit("GCE inventory script requires libcloud >= 0.13")
class CloudInventoryCache(object):
def __init__(self, cache_name='ansible-cloud-cache', cache_path='/tmp',
cache_max_age=300):
cache_dir = os.path.expanduser(cache_path)
if not os.path.exists(cache_dir):
os.makedirs(cache_dir)
self.cache_path_cache = os.path.join(cache_dir, cache_name)
self.cache_max_age = cache_max_age
def is_valid(self, max_age=None):
''' Determines if the cache files have expired, or if it is still valid '''
if max_age is None:
max_age = self.cache_max_age
if os.path.isfile(self.cache_path_cache):
mod_time = os.path.getmtime(self.cache_path_cache)
current_time = time()
if (mod_time + max_age) > current_time:
return True
return False
def get_all_data_from_cache(self, filename=''):
''' Reads the JSON inventory from the cache file. Returns Python dictionary. '''
data = ''
if not filename:
filename = self.cache_path_cache
with open(filename, 'r') as cache:
data = cache.read()
return json.loads(data)
def write_to_cache(self, data, filename=''):
''' Writes data to file as JSON. Returns True. '''
if not filename:
filename = self.cache_path_cache
json_data = json.dumps(data)
with open(filename, 'w') as cache:
cache.write(json_data)
return True
class GceInventory(object):
def __init__(self):
# Cache object
self.cache = None
# dictionary containing inventory read from disk
self.inventory = {}
# Read settings and parse CLI arguments
self.parse_cli_args()
self.config = self.get_config()
self.drivers = self.get_gce_drivers()
self.ip_type = self.get_inventory_options()
if self.ip_type:
self.ip_type = self.ip_type.lower()
# Cache management
start_inventory_time = time()
cache_used = False
if self.args.refresh_cache or not self.cache.is_valid():
self.do_api_calls_update_cache()
else:
self.load_inventory_from_cache()
cache_used = True
self.inventory['_meta']['stats'] = {'use_cache': True}
self.inventory['_meta']['stats'] = {
'inventory_load_time': time() - start_inventory_time,
'cache_used': cache_used
}
# Just display data for specific host
if self.args.host:
print(self.json_format_dict(
self.inventory['_meta']['hostvars'][self.args.host],
pretty=self.args.pretty))
else:
# Otherwise, assume user wants all instances grouped
zones = self.parse_env_zones()
print(self.json_format_dict(self.inventory,
pretty=self.args.pretty))
sys.exit(0)
def get_config(self):
"""
Reads the settings from the gce.ini file.
Populates a ConfigParser object with defaults and
attempts to read an .ini-style configuration from the filename
specified in GCE_INI_PATH. If the environment variable is
not present, the filename defaults to gce.ini in the current
working directory.
"""
gce_ini_default_path = os.path.join(
os.path.dirname(os.path.realpath(__file__)), "gce.ini")
gce_ini_path = os.environ.get('GCE_INI_PATH', gce_ini_default_path)
# Create a ConfigParser.
# This provides empty defaults to each key, so that environment
# variable configuration (as opposed to INI configuration) is able
# to work.
config = configparser.ConfigParser(defaults={
'gce_service_account_email_address': '',
'gce_service_account_pem_file_path': '',
'gce_project_id': '',
'gce_zone': '',
'libcloud_secrets': '',
'instance_tags': '',
'inventory_ip_type': '',
'cache_path': '~/.ansible/tmp',
'cache_max_age': '300'
})
if 'gce' not in config.sections():
config.add_section('gce')
if 'inventory' not in config.sections():
config.add_section('inventory')
if 'cache' not in config.sections():
config.add_section('cache')
config.read(gce_ini_path)
#########
# Section added for processing ini settings
#########
# Set the instance_states filter based on config file options
self.instance_states = []
if config.has_option('gce', 'instance_states'):
states = config.get('gce', 'instance_states')
# Ignore if instance_states is an empty string.
if states:
self.instance_states = states.split(',')
# Set the instance_tags filter, env var overrides config from file
# and cli param overrides all
if self.args.instance_tags:
self.instance_tags = self.args.instance_tags
else:
self.instance_tags = os.environ.get(
'GCE_INSTANCE_TAGS', config.get('gce', 'instance_tags'))
if self.instance_tags:
self.instance_tags = self.instance_tags.split(',')
# Caching
cache_path = config.get('cache', 'cache_path')
cache_max_age = config.getint('cache', 'cache_max_age')
# TOOD(supertom): support project-specific caches
cache_name = 'ansible-gce.cache'
self.cache = CloudInventoryCache(cache_path=cache_path,
cache_max_age=cache_max_age,
cache_name=cache_name)
return config
def get_inventory_options(self):
"""Determine inventory options. Environment variables always
take precedence over configuration files."""
ip_type = self.config.get('inventory', 'inventory_ip_type')
# If the appropriate environment variables are set, they override
# other configuration
ip_type = os.environ.get('INVENTORY_IP_TYPE', ip_type)
return ip_type
def get_gce_drivers(self):
"""Determine the GCE authorization settings and return a list of
libcloud drivers.
"""
# Attempt to get GCE params from a configuration file, if one
# exists.
secrets_path = self.config.get('gce', 'libcloud_secrets')
secrets_found = False
try:
import secrets
args = list(secrets.GCE_PARAMS)
kwargs = secrets.GCE_KEYWORD_PARAMS
secrets_found = True
except Exception:
pass
if not secrets_found and secrets_path:
if not secrets_path.endswith('secrets.py'):
err = "Must specify libcloud secrets file as "
err += "/absolute/path/to/secrets.py"
sys.exit(err)
sys.path.append(os.path.dirname(secrets_path))
try:
import secrets
args = list(getattr(secrets, 'GCE_PARAMS', []))
kwargs = getattr(secrets, 'GCE_KEYWORD_PARAMS', {})
secrets_found = True
except Exception:
pass
if not secrets_found:
args = [
self.config.get('gce', 'gce_service_account_email_address'),
self.config.get('gce', 'gce_service_account_pem_file_path')
]
kwargs = {'project': self.config.get('gce', 'gce_project_id'),
'datacenter': self.config.get('gce', 'gce_zone')}
# If the appropriate environment variables are set, they override
# other configuration; process those into our args and kwargs.
args[0] = os.environ.get('GCE_EMAIL', args[0])
args[1] = os.environ.get('GCE_PEM_FILE_PATH', args[1])
args[1] = os.environ.get('GCE_CREDENTIALS_FILE_PATH', args[1])
kwargs['project'] = os.environ.get('GCE_PROJECT', kwargs['project'])
kwargs['datacenter'] = os.environ.get('GCE_ZONE', kwargs['datacenter'])
gce_drivers = []
projects = kwargs['project'].split(',')
for project in projects:
kwargs['project'] = project
gce = get_driver(Provider.GCE)(*args, **kwargs)
gce.connection.user_agent_append(
'%s/%s' % (USER_AGENT_PRODUCT, USER_AGENT_VERSION),
)
gce_drivers.append(gce)
return gce_drivers
def parse_env_zones(self):
'''returns a list of comma separated zones parsed from the GCE_ZONE environment variable.
If provided, this will be used to filter the results of the grouped_instances call'''
import csv
reader = csv.reader([os.environ.get('GCE_ZONE', "")], skipinitialspace=True)
zones = [r for r in reader]
return [z for z in zones[0]]
def parse_cli_args(self):
''' Command line argument processing '''
parser = argparse.ArgumentParser(
description='Produce an Ansible Inventory file based on GCE')
parser.add_argument('--list', action='store_true', default=True,
help='List instances (default: True)')
parser.add_argument('--host', action='store',
help='Get all information about an instance')
parser.add_argument('--instance-tags', action='store',
help='Only include instances with this tags, separated by comma')
parser.add_argument('--pretty', action='store_true', default=False,
help='Pretty format (default: False)')
parser.add_argument(
'--refresh-cache', action='store_true', default=False,
help='Force refresh of cache by making API requests (default: False - use cache files)')
self.args = parser.parse_args()
def node_to_dict(self, inst):
md = {}
if inst is None:
return {}
if 'items' in inst.extra['metadata']:
for entry in inst.extra['metadata']['items']:
md[entry['key']] = entry['value']
net = inst.extra['networkInterfaces'][0]['network'].split('/')[-1]
subnet = None
if 'subnetwork' in inst.extra['networkInterfaces'][0]:
subnet = inst.extra['networkInterfaces'][0]['subnetwork'].split('/')[-1]
# default to exernal IP unless user has specified they prefer internal
if self.ip_type == 'internal':
ssh_host = inst.private_ips[0]
else:
ssh_host = inst.public_ips[0] if len(inst.public_ips) >= 1 else inst.private_ips[0]
return {
'gce_uuid': inst.uuid,
'gce_id': inst.id,
'gce_image': inst.image,
'gce_machine_type': inst.size,
'gce_private_ip': inst.private_ips[0],
'gce_public_ip': inst.public_ips[0] if len(inst.public_ips) >= 1 else None,
'gce_name': inst.name,
'gce_description': inst.extra['description'],
'gce_status': inst.extra['status'],
'gce_zone': inst.extra['zone'].name,
'gce_tags': inst.extra['tags'],
'gce_metadata': md,
'gce_network': net,
'gce_subnetwork': subnet,
# Hosts don't have a public name, so we add an IP
'ansible_ssh_host': ssh_host
}
def load_inventory_from_cache(self):
''' Loads inventory from JSON on disk. '''
try:
self.inventory = self.cache.get_all_data_from_cache()
hosts = self.inventory['_meta']['hostvars']
except Exception as e:
print(
"Invalid inventory file %s. Please rebuild with -refresh-cache option."
% (self.cache.cache_path_cache))
raise
def do_api_calls_update_cache(self):
''' Do API calls and save data in cache. '''
zones = self.parse_env_zones()
data = self.group_instances(zones)
self.cache.write_to_cache(data)
self.inventory = data
def list_nodes(self):
all_nodes = []
params, more_results = {'maxResults': 500}, True
while more_results:
for driver in self.drivers:
driver.connection.gce_params = params
all_nodes.extend(driver.list_nodes())
more_results = 'pageToken' in params
return all_nodes
def group_instances(self, zones=None):
'''Group all instances'''
groups = {}
meta = {}
meta["hostvars"] = {}
for node in self.list_nodes():
# This check filters on the desired instance states defined in the
# config file with the instance_states config option.
#
# If the instance_states list is _empty_ then _ALL_ states are returned.
#
# If the instance_states list is _populated_ then check the current
# state against the instance_states list
if self.instance_states and not node.extra['status'] in self.instance_states:
continue
# This check filters on the desired instance tags defined in the
# config file with the instance_tags config option, env var GCE_INSTANCE_TAGS,
# or as the cli param --instance-tags.
#
# If the instance_tags list is _empty_ then _ALL_ instances are returned.
#
# If the instance_tags list is _populated_ then check the current
# instance tags against the instance_tags list. If the instance has
# at least one tag from the instance_tags list, it is returned.
if self.instance_tags and not set(self.instance_tags) & set(node.extra['tags']):
continue
name = node.name
meta["hostvars"][name] = self.node_to_dict(node)
zone = node.extra['zone'].name
# To avoid making multiple requests per zone
# we list all nodes and then filter the results
if zones and zone not in zones:
continue
if zone in groups:
groups[zone].append(name)
else:
groups[zone] = [name]
tags = node.extra['tags']
for t in tags:
if t.startswith('group-'):
tag = t[6:]
else:
tag = 'tag_%s' % t
if tag in groups:
groups[tag].append(name)
else:
groups[tag] = [name]
net = node.extra['networkInterfaces'][0]['network'].split('/')[-1]
net = 'network_%s' % net
if net in groups:
groups[net].append(name)
else:
groups[net] = [name]
machine_type = node.size
if machine_type in groups:
groups[machine_type].append(name)
else:
groups[machine_type] = [name]
image = node.image or 'persistent_disk'
if image in groups:
groups[image].append(name)
else:
groups[image] = [name]
status = node.extra['status']
stat = 'status_%s' % status.lower()
if stat in groups:
groups[stat].append(name)
else:
groups[stat] = [name]
for private_ip in node.private_ips:
groups[private_ip] = [name]
if len(node.public_ips) >= 1:
for public_ip in node.public_ips:
groups[public_ip] = [name]
groups["_meta"] = meta
return groups
def json_format_dict(self, data, pretty=False):
''' Converts a dict to a JSON object and dumps it as a formatted
string '''
if pretty:
return json.dumps(data, sort_keys=True, indent=2)
else:
return json.dumps(data)
# Run the script
if __name__ == '__main__':
GceInventory()
| gpl-3.0 |
thomasf/offlineimap | offlineimap/folder/Base.py | 4 | 40980 | # Base folder support
# Copyright (C) 2002-2015 John Goerzen & contributors
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
import os.path
import re
import time
from sys import exc_info
from offlineimap import threadutil
from offlineimap import globals
from offlineimap.ui import getglobalui
from offlineimap.error import OfflineImapError
import offlineimap.accounts
class BaseFolder(object):
def __init__(self, name, repository):
"""
:param name: Path & name of folder minus root or reference
:param repository: Repository() in which the folder is.
"""
self.ui = getglobalui()
# Save original name for folderfilter operations
self.ffilter_name = name
# Top level dir name is always ''
self.root = None
self.name = name if not name == self.getsep() else ''
self.newmail_hook = None
# Only set the newmail_hook if the IMAP folder is named 'INBOX'
if self.name == 'INBOX':
self.newmail_hook = repository.newmail_hook
self.have_newmail = False
self.repository = repository
self.visiblename = repository.nametrans(name)
# In case the visiblename becomes '.' or '/' (top-level) we use
# '' as that is the name that e.g. the Maildir scanning will
# return for the top-level dir.
if self.visiblename == self.getsep():
self.visiblename = ''
self.config = repository.getconfig()
utime_from_header_global = self.config.getdefaultboolean(
"general", "utime_from_header", False)
repo = "Repository " + repository.name
self._utime_from_header = self.config.getdefaultboolean(repo,
"utime_from_header", utime_from_header_global)
# Do we need to use mail timestamp for filename prefix?
filename_use_mail_timestamp_global = self.config.getdefaultboolean(
"general", "filename_use_mail_timestamp", False)
repo = "Repository " + repository.name
self._filename_use_mail_timestamp = self.config.getdefaultboolean(repo,
"filename_use_mail_timestamp", filename_use_mail_timestamp_global)
# Determine if we're running static or dynamic folder filtering
# and check filtering status
self._dynamic_folderfilter = self.config.getdefaultboolean(
repo, "dynamic_folderfilter", False)
self._sync_this = repository.should_sync_folder(self.ffilter_name)
if self._dynamic_folderfilter:
self.ui.debug('', "Running dynamic folder filtering on '%s'[%s]"%
(self.ffilter_name, repository))
elif not self._sync_this:
self.ui.debug('', "Filtering out '%s'[%s] due to folderfilter"%
(self.ffilter_name, repository))
# Passes for syncmessagesto
self.syncmessagesto_passes = [('copying messages' , self.__syncmessagesto_copy),
('deleting messages' , self.__syncmessagesto_delete),
('syncing flags' , self.__syncmessagesto_flags)]
def getname(self):
"""Returns name"""
return self.name
def __str__(self):
# FIMXE: remove calls of this. We have getname().
return self.name
@property
def accountname(self):
"""Account name as string"""
return self.repository.accountname
@property
def sync_this(self):
"""Should this folder be synced or is it e.g. filtered out?"""
if not self._dynamic_folderfilter:
return self._sync_this
else:
return self.repository.should_sync_folder(self.ffilter_name)
@property
def utime_from_header(self):
return self._utime_from_header
def suggeststhreads(self):
"""Returns true if this folder suggests using threads for actions;
false otherwise. Probably only IMAP will return true."""
return 0
def waitforthread(self):
"""Implements method that waits for thread to be usable.
Should be implemented only for folders that suggest threads."""
raise NotImplementedError
# XXX: we may need someting like supports_quickstatus() to check
# XXX: if user specifies 'quick' flag for folder that doesn't
# XXX: support quick status queries, so one believes that quick
# XXX: status checks will be done, but it won't really be so.
def quickchanged(self, statusfolder):
""" Runs quick check for folder changes and returns changed
status: True -- changed, False -- not changed.
:param statusfolder: keeps track of the last known folder state.
"""
return True
def getcopyinstancelimit(self):
"""For threading folders, returns the instancelimitname for
InstanceLimitedThreads."""
raise NotImplementedError
def storesmessages(self):
"""Should be true for any backend that actually saves message bodies.
(Almost all of them). False for the LocalStatus backend. Saves
us from having to slurp up messages just for localstatus purposes."""
return 1
def getvisiblename(self):
"""The nametrans-transposed name of the folder's name."""
return self.visiblename
def getexplainedname(self):
"""Name that shows both real and nametrans-mangled values."""
if self.name == self.visiblename:
return self.name
else:
return "%s [remote name %s]"% (self.visiblename, self.name)
def getrepository(self):
"""Returns the repository object that this folder is within."""
return self.repository
def getroot(self):
"""Returns the root of the folder, in a folder-specific fashion."""
return self.root
def getsep(self):
"""Returns the separator for this folder type."""
return self.sep
def getfullname(self):
if self.getroot():
return self.getroot() + self.getsep() + self.getname()
else:
return self.getname()
def getfolderbasename(self):
"""Return base file name of file to store Status/UID info in."""
if not self.name:
basename = '.'
else: # Avoid directory hierarchies and file names such as '/'.
basename = self.name.replace('/', '.')
# Replace with literal 'dot' if final path name is '.' as '.' is
# an invalid file name.
basename = re.sub('(^|\/)\.$','\\1dot', basename)
return basename
def check_uidvalidity(self):
"""Tests if the cached UIDVALIDITY match the real current one
If required it saves the UIDVALIDITY value. In this case the
function is not threadsafe. So don't attempt to call it from
concurrent threads.
:returns: Boolean indicating the match. Returns True in case it
implicitely saved the UIDVALIDITY."""
if self.get_saveduidvalidity() != None:
return self.get_saveduidvalidity() == self.get_uidvalidity()
else:
self.save_uidvalidity()
return True
def _getuidfilename(self):
"""provides UIDVALIDITY cache filename for class internal purposes."""
return os.path.join(self.repository.getuiddir(),
self.getfolderbasename())
def get_saveduidvalidity(self):
"""Return the previously cached UIDVALIDITY value
:returns: UIDVALIDITY as (long) number or None, if None had been
saved yet."""
if hasattr(self, '_base_saved_uidvalidity'):
return self._base_saved_uidvalidity
uidfilename = self._getuidfilename()
if not os.path.exists(uidfilename):
self._base_saved_uidvalidity = None
else:
file = open(uidfilename, "rt")
self._base_saved_uidvalidity = long(file.readline().strip())
file.close()
return self._base_saved_uidvalidity
def save_uidvalidity(self):
"""Save the UIDVALIDITY value of the folder to the cache
This function is not threadsafe, so don't attempt to call it
from concurrent threads."""
newval = self.get_uidvalidity()
uidfilename = self._getuidfilename()
with open(uidfilename + ".tmp", "wt") as file:
file.write("%d\n"% newval)
os.rename(uidfilename + ".tmp", uidfilename)
self._base_saved_uidvalidity = newval
def get_uidvalidity(self):
"""Retrieve the current connections UIDVALIDITY value
This function needs to be implemented by each Backend
:returns: UIDVALIDITY as a (long) number"""
raise NotImplementedError
def cachemessagelist(self):
"""Reads the message list from disk or network and stores it in
memory for later use. This list will not be re-read from disk or
memory unless this function is called again."""
raise NotImplementedError
def ismessagelistempty(self):
"""Empty everythings we know about messages."""
if len(self.messagelist.keys()) < 1:
return True
return False
def dropmessagelistcache(self):
"""Empty everythings we know about messages."""
self.messagelist = {}
def getmessagelist(self):
"""Gets the current message list.
You must call cachemessagelist() before calling this function!"""
raise NotImplementedError
def msglist_item_initializer(self, uid):
"""Returns value for empty messagelist element with given UID.
This function must initialize all fields of messagelist item
and must be called every time when one creates new messagelist
entry to ensure that all fields that must be present are present."""
raise NotImplementedError
def uidexists(self, uid):
"""Returns True if uid exists"""
return uid in self.getmessagelist()
def getmessageuidlist(self):
"""Gets a list of UIDs.
You may have to call cachemessagelist() before calling this function!"""
return self.getmessagelist().keys()
def getmessagecount(self):
"""Gets the number of messages."""
return len(self.getmessagelist())
def getmessage(self, uid):
"""Returns the content of the specified message."""
raise NotImplementedError
def getmaxage(self):
""" maxage is allowed to be either an integer or a date of the
form YYYY-mm-dd. This returns a time_struct. """
maxagestr = self.config.getdefault("Account %s"%
self.accountname, "maxage", None)
if maxagestr == None:
return None
# is it a number?
try:
maxage = int(maxagestr)
if maxage < 1:
raise OfflineImapError("invalid maxage value %d"% maxage,
OfflineImapError.ERROR.MESSAGE)
return time.gmtime(time.time() - 60*60*24*maxage)
except ValueError:
pass # maybe it was a date
# is it a date string?
try:
date = time.strptime(maxagestr, "%Y-%m-%d")
if date[0] < 1900:
raise OfflineImapError("maxage led to year %d. "
"Abort syncing."% date[0],
OfflineImapError.ERROR.MESSAGE)
return date
except ValueError:
raise OfflineImapError("invalid maxage value %s"% maxagestr,
OfflineImapError.ERROR.MESSAGE)
def getmaxsize(self):
return self.config.getdefaultint("Account %s"%
self.accountname, "maxsize", None)
def getstartdate(self):
""" Retrieve the value of the configuration option startdate """
datestr = self.config.getdefault("Repository " + self.repository.name,
'startdate', None)
try:
if not datestr:
return None
date = time.strptime(datestr, "%Y-%m-%d")
if date[0] < 1900:
raise OfflineImapError("startdate led to year %d. "
"Abort syncing."% date[0],
OfflineImapError.ERROR.MESSAGE)
return date
except ValueError:
raise OfflineImapError("invalid startdate value %s",
OfflineImapError.ERROR.MESSAGE)
def get_min_uid_file(self):
startuiddir = os.path.join(self.config.getmetadatadir(),
'Repository-' + self.repository.name, 'StartUID')
if not os.path.exists(startuiddir):
os.mkdir(startuiddir, 0o700)
return os.path.join(startuiddir, self.getfolderbasename())
def retrieve_min_uid(self):
uidfile = self.get_min_uid_file()
if not os.path.exists(uidfile):
return None
try:
fd = open(uidfile, 'rt')
min_uid = long(fd.readline().strip())
fd.close()
return min_uid
except:
raise IOError("Can't read %s"% uidfile)
def savemessage(self, uid, content, flags, rtime):
"""Writes a new message, with the specified uid.
If the uid is < 0: The backend should assign a new uid and
return it. In case it cannot assign a new uid, it returns
the negative uid passed in WITHOUT saving the message.
If the backend CAN assign a new uid, but cannot find out what
this UID is (as is the case with some IMAP servers), it
returns 0 but DOES save the message.
IMAP backend should be the only one that can assign a new
uid.
If the uid is > 0, the backend should set the uid to this, if it can.
If it cannot set the uid to that, it will save it anyway.
It will return the uid assigned in any case.
Note that savemessage() does not check against dryrun settings,
so you need to ensure that savemessage is never called in a
dryrun mode."""
raise NotImplementedError
def getmessagetime(self, uid):
"""Return the received time for the specified message."""
raise NotImplementedError
def getmessagemtime(self, uid):
"""Returns the message modification time of the specified message."""
raise NotImplementedError
def getmessageflags(self, uid):
"""Returns the flags for the specified message."""
raise NotImplementedError
def getmessagekeywords(self, uid):
"""Returns the keywords for the specified message."""
raise NotImplementedError
def savemessageflags(self, uid, flags):
"""Sets the specified message's flags to the given set.
Note that this function does not check against dryrun settings,
so you need to ensure that it is never called in a
dryrun mode."""
raise NotImplementedError
def addmessageflags(self, uid, flags):
"""Adds the specified flags to the message's flag set. If a given
flag is already present, it will not be duplicated.
Note that this function does not check against dryrun settings,
so you need to ensure that it is never called in a
dryrun mode.
:param flags: A set() of flags"""
newflags = self.getmessageflags(uid) | flags
self.savemessageflags(uid, newflags)
def addmessagesflags(self, uidlist, flags):
"""Note that this function does not check against dryrun settings,
so you need to ensure that it is never called in a
dryrun mode."""
for uid in uidlist:
if self.uidexists(uid):
self.addmessageflags(uid, flags)
def deletemessageflags(self, uid, flags):
"""Removes each flag given from the message's flag set. If a given
flag is already removed, no action will be taken for that flag.
Note that this function does not check against dryrun settings,
so you need to ensure that it is never called in a
dryrun mode."""
newflags = self.getmessageflags(uid) - flags
self.savemessageflags(uid, newflags)
def deletemessagesflags(self, uidlist, flags):
"""
Note that this function does not check against dryrun settings,
so you need to ensure that it is never called in a
dryrun mode."""
for uid in uidlist:
self.deletemessageflags(uid, flags)
def getmessagelabels(self, uid):
"""Returns the labels for the specified message."""
raise NotImplementedError
def savemessagelabels(self, uid, labels, ignorelabels=set(), mtime=0):
"""Sets the specified message's labels to the given set.
Note that this function does not check against dryrun settings,
so you need to ensure that it is never called in a
dryrun mode."""
raise NotImplementedError
def addmessagelabels(self, uid, labels):
"""Adds the specified labels to the message's labels set. If a given
label is already present, it will not be duplicated.
Note that this function does not check against dryrun settings,
so you need to ensure that it is never called in a
dryrun mode.
:param labels: A set() of labels"""
newlabels = self.getmessagelabels(uid) | labels
self.savemessagelabels(uid, newlabels)
def addmessageslabels(self, uidlist, labels):
"""Note that this function does not check against dryrun settings,
so you need to ensure that it is never called in a
dryrun mode."""
for uid in uidlist:
self.addmessagelabels(uid, labels)
def deletemessagelabels(self, uid, labels):
"""Removes each label given from the message's label set. If a given
label is already removed, no action will be taken for that label.
Note that this function does not check against dryrun settings,
so you need to ensure that it is never called in a
dryrun mode."""
newlabels = self.getmessagelabels(uid) - labels
self.savemessagelabels(uid, newlabels)
def deletemessageslabels(self, uidlist, labels):
"""
Note that this function does not check against dryrun settings,
so you need to ensure that it is never called in a
dryrun mode."""
for uid in uidlist:
self.deletemessagelabels(uid, labels)
def addmessageheader(self, content, linebreak, headername, headervalue):
"""Adds new header to the provided message.
WARNING: This function is a bit tricky, and modifying it in the wrong way,
may easily lead to data-loss.
Arguments:
- content: message content, headers and body as a single string
- linebreak: string that carries line ending
- headername: name of the header to add
- headervalue: value of the header to add
.. note::
The following documentation will not get displayed correctly after being
processed by Sphinx. View the source of this method to read it.
This has to deal with strange corner cases where the header is
missing or empty. Here are illustrations for all the cases,
showing where the header gets inserted and what the end result
is. In each illustration, '+' means the added contents. Note
that these examples assume LF for linebreak, not CRLF, so '\n'
denotes a linebreak and '\n\n' corresponds to the transition
between header and body. However if the linebreak parameter
is set to '\r\n' then you would have to substitute '\r\n' for
'\n' in the below examples.
* Case 1: No '\n\n', leading '\n'
+X-Flying-Pig-Header: i am here\n
\n
This is the body\n
next line\n
* Case 2: '\n\n' at position 0
+X-Flying-Pig-Header: i am here
\n
\n
This is the body\n
next line\n
* Case 3: No '\n\n', no leading '\n'
+X-Flying-Pig-Header: i am here\n
+\n
This is the body\n
next line\n
* Case 4: '\n\n' at non-zero position
Subject: Something wrong with OI\n
From: some@person.at
+\nX-Flying-Pig-Header: i am here
\n
\n
This is the body\n
next line\n
"""
self.ui.debug('', 'addmessageheader: called to add %s: %s'%
(headername, headervalue))
insertionpoint = content.find(linebreak * 2)
if insertionpoint == -1:
self.ui.debug('', 'addmessageheader: headers were missing')
else:
self.ui.debug('', 'addmessageheader: headers end at position %d' % insertionpoint)
mark = '==>EOH<=='
contextstart = max(0, insertionpoint - 100)
contextend = min(len(content), insertionpoint + 100)
self.ui.debug('', 'addmessageheader: header/body transition context (marked by %s): %s' %
(mark, repr(content[contextstart:insertionpoint]) + \
mark + repr(content[insertionpoint:contextend])))
# Hoping for case #4
prefix = linebreak
suffix = ''
# Case #2
if insertionpoint == 0:
prefix = ''
suffix = ''
# Either case #1 or #3
elif insertionpoint == -1:
prefix = ''
suffix = linebreak
insertionpoint = 0
# Case #3: when body starts immediately, without preceding '\n'
# (this shouldn't happen with proper mail messages, but
# we seen many broken ones), we should add '\n' to make
# new (and the only header, in this case) to be properly
# separated from the message body.
if content[0:len(linebreak)] != linebreak:
suffix = suffix + linebreak
self.ui.debug('', 'addmessageheader: insertionpoint = %d'% insertionpoint)
headers = content[0:insertionpoint]
self.ui.debug('', 'addmessageheader: headers = %s'% repr(headers))
new_header = prefix + ("%s: %s" % (headername, headervalue)) + suffix
self.ui.debug('', 'addmessageheader: new_header = ' + repr(new_header))
return headers + new_header + content[insertionpoint:]
def __find_eoh(self, content):
""" Searches for the point where mail headers end.
Either double '\n', or end of string.
Arguments:
- content: contents of the message to search in
Returns: position of the first non-header byte.
"""
eoh_cr = content.find('\n\n')
if eoh_cr == -1:
eoh_cr = len(content)
return eoh_cr
def getmessageheader(self, content, name):
"""Searches for the first occurence of the given header and returns
its value. Header name is case-insensitive.
Arguments:
- contents: message itself
- name: name of the header to be searched
Returns: header value or None if no such header was found
"""
self.ui.debug('', 'getmessageheader: called to get %s'% name)
eoh = self.__find_eoh(content)
self.ui.debug('', 'getmessageheader: eoh = %d'% eoh)
headers = content[0:eoh]
self.ui.debug('', 'getmessageheader: headers = %s'% repr(headers))
m = re.search('^%s:(.*)$' % name, headers, flags = re.MULTILINE | re.IGNORECASE)
if m:
return m.group(1).strip()
else:
return None
def getmessageheaderlist(self, content, name):
"""Searches for the given header and returns a list of values for
that header.
Arguments:
- contents: message itself
- name: name of the header to be searched
Returns: list of header values or emptylist if no such header was found
"""
self.ui.debug('', 'getmessageheaderlist: called to get %s' % name)
eoh = self.__find_eoh(content)
self.ui.debug('', 'getmessageheaderlist: eoh = %d' % eoh)
headers = content[0:eoh]
self.ui.debug('', 'getmessageheaderlist: headers = %s' % repr(headers))
return re.findall('^%s:(.*)$' % name, headers, flags = re.MULTILINE | re.IGNORECASE)
def deletemessageheaders(self, content, header_list):
"""Deletes headers in the given list from the message content.
Arguments:
- content: message itself
- header_list: list of headers to be deleted or just the header name
We expect our message to have '\n' as line endings.
"""
if type(header_list) != type([]):
header_list = [header_list]
self.ui.debug('', 'deletemessageheaders: called to delete %s'% (header_list))
if not len(header_list): return content
eoh = self.__find_eoh(content)
self.ui.debug('', 'deletemessageheaders: end of headers = %d'% eoh)
headers = content[0:eoh]
rest = content[eoh:]
self.ui.debug('', 'deletemessageheaders: headers = %s'% repr(headers))
new_headers = []
for h in headers.split('\n'):
keep_it = True
for trim_h in header_list:
if len(h) > len(trim_h) and h[0:len(trim_h)+1] == (trim_h + ":"):
keep_it = False
break
if keep_it: new_headers.append(h)
return ('\n'.join(new_headers) + rest)
def change_message_uid(self, uid, new_uid):
"""Change the message from existing uid to new_uid
If the backend supports it (IMAP does not).
:param new_uid: (optional) If given, the old UID will be changed
to a new UID. This allows backends efficient renaming of
messages if the UID has changed."""
raise NotImplementedError
def deletemessage(self, uid):
"""Note that this function does not check against dryrun settings,
so you need to ensure that it is never called in a
dryrun mode."""
raise NotImplementedError
def deletemessages(self, uidlist):
"""Note that this function does not check against dryrun settings,
so you need to ensure that it is never called in a
dryrun mode."""
for uid in uidlist:
self.deletemessage(uid)
def copymessageto(self, uid, dstfolder, statusfolder, register = 1):
"""Copies a message from self to dst if needed, updating the status
Note that this function does not check against dryrun settings,
so you need to ensure that it is never called in a
dryrun mode.
:param uid: uid of the message to be copied.
:param dstfolder: A BaseFolder-derived instance
:param statusfolder: A LocalStatusFolder instance
:param register: whether we should register a new thread."
:returns: Nothing on success, or raises an Exception."""
# Sometimes, it could be the case that if a sync takes awhile,
# a message might be deleted from the maildir before it can be
# synced to the status cache. This is only a problem with
# self.getmessage(). So, don't call self.getmessage unless
# really needed.
if register: # output that we start a new thread
self.ui.registerthread(self.repository.account)
try:
message = None
flags = self.getmessageflags(uid)
rtime = self.getmessagetime(uid)
# If any of the destinations actually stores the message body,
# load it up.
if dstfolder.storesmessages():
message = self.getmessage(uid)
# Succeeded? -> IMAP actually assigned a UID. If newid
# remained negative, no server was willing to assign us an
# UID. If newid is 0, saving succeeded, but we could not
# retrieve the new UID. Ignore message in this case.
new_uid = dstfolder.savemessage(uid, message, flags, rtime)
if new_uid > 0:
if new_uid != uid:
# Got new UID, change the local uid to match the new one.
self.change_message_uid(uid, new_uid)
statusfolder.deletemessage(uid)
# Got new UID, change the local uid.
# Save uploaded status in the statusfolder
statusfolder.savemessage(new_uid, message, flags, rtime)
# Check whether the mail has been seen
if 'S' not in flags:
self.have_newmail = True
elif new_uid == 0:
# Message was stored to dstfolder, but we can't find it's UID
# This means we can't link current message to the one created
# in IMAP. So we just delete local message and on next run
# we'll sync it back
# XXX This could cause infinite loop on syncing between two
# IMAP servers ...
self.deletemessage(uid)
else:
raise OfflineImapError("Trying to save msg (uid %d) on folder "
"%s returned invalid uid %d"% (uid, dstfolder.getvisiblename(),
new_uid), OfflineImapError.ERROR.MESSAGE)
except (KeyboardInterrupt): # bubble up CTRL-C
raise
except OfflineImapError as e:
if e.severity > OfflineImapError.ERROR.MESSAGE:
raise # bubble severe errors up
self.ui.error(e, exc_info()[2])
except Exception as e:
self.ui.error(e, exc_info()[2],
msg = "Copying message %s [acc: %s]"% (uid, self.accountname))
raise #raise on unknown errors, so we can fix those
def __syncmessagesto_copy(self, dstfolder, statusfolder):
"""Pass1: Copy locally existing messages not on the other side.
This will copy messages to dstfolder that exist locally but are
not in the statusfolder yet. The strategy is:
1) Look for messages present in self but not in statusfolder.
2) invoke copymessageto() on those which:
- If dstfolder doesn't have it yet, add them to dstfolder.
- Update statusfolder
This function checks and protects us from action in dryrun mode."""
# We have no new mail yet
self.have_newmail = False
threads = []
copylist = filter(lambda uid: not statusfolder.uidexists(uid),
self.getmessageuidlist())
num_to_copy = len(copylist)
if num_to_copy and self.repository.account.dryrun:
self.ui.info("[DRYRUN] Copy {0} messages from {1}[{2}] to {3}".format(
num_to_copy, self, self.repository, dstfolder.repository))
return
for num, uid in enumerate(copylist):
# bail out on CTRL-C or SIGTERM
if offlineimap.accounts.Account.abort_NOW_signal.is_set():
break
if uid > 0 and dstfolder.uidexists(uid):
# dst has message with that UID already, only update status
flags = self.getmessageflags(uid)
rtime = self.getmessagetime(uid)
statusfolder.savemessage(uid, None, flags, rtime)
continue
self.ui.copyingmessage(uid, num+1, num_to_copy, self, dstfolder)
# exceptions are caught in copymessageto()
if self.suggeststhreads() and not globals.options.singlethreading:
self.waitforthread()
thread = threadutil.InstanceLimitedThread(\
self.getcopyinstancelimit(),
target = self.copymessageto,
name = "Copy message from %s:%s" % (self.repository, self),
args = (uid, dstfolder, statusfolder))
thread.start()
threads.append(thread)
else:
self.copymessageto(uid, dstfolder, statusfolder,
register = 0)
for thread in threads:
thread.join()
# Execute new mail hook if we have new mail
if self.have_newmail:
if self.newmail_hook != None:
self.newmail_hook();
def __syncmessagesto_delete(self, dstfolder, statusfolder):
"""Pass 2: Remove locally deleted messages on dst.
Get all UIDS in statusfolder but not self. These are messages
that were deleted in 'self'. Delete those from dstfolder and
statusfolder.
This function checks and protects us from action in dryrun mode.
"""
deletelist = filter(lambda uid: uid >= 0 and not
self.uidexists(uid), statusfolder.getmessageuidlist())
if len(deletelist):
# Delete in statusfolder first to play safe. In case of abort, we
# won't lose message, we will just unneccessarily retransmit some.
# Delete messages from statusfolder that were either deleted by the
# user, or not being tracked (e.g. because of maxage).
statusfolder.deletemessages(deletelist)
# Filter out untracked messages
deletelist = filter(lambda uid: dstfolder.uidexists(uid), deletelist)
if len(deletelist):
self.ui.deletingmessages(deletelist, [dstfolder])
if self.repository.account.dryrun:
return #don't delete messages in dry-run mode
dstfolder.deletemessages(deletelist)
def combine_flags_and_keywords(self, uid, dstfolder):
"""Combine the message's flags and keywords using the mapping for the
destination folder."""
# Take a copy of the message flag set, otherwise
# __syncmessagesto_flags() will fail because statusflags is actually a
# reference to selfflags (which it should not, but I don't have time to
# debug THAT).
selfflags = set(self.getmessageflags(uid))
try:
keywordmap = dstfolder.getrepository().getkeywordmap()
if keywordmap is None:
return selfflags
knownkeywords = set(keywordmap.keys())
selfkeywords = self.getmessagekeywords(uid)
if not knownkeywords >= selfkeywords:
#some of the message's keywords are not in the mapping, so
#skip them
skipped_keywords = list(selfkeywords - knownkeywords)
selfkeywords &= knownkeywords
self.ui.warn("Unknown keywords skipped: %s\n"
"You may want to change your configuration to include "
"those\n" % (skipped_keywords))
keywordletterset = set([keywordmap[keyw] for keyw in selfkeywords])
#add the mapped keywords to the list of message flags
selfflags |= keywordletterset
except NotImplementedError:
pass
return selfflags
def __syncmessagesto_flags(self, dstfolder, statusfolder):
"""Pass 3: Flag synchronization.
Compare flag mismatches in self with those in statusfolder. If
msg has a valid UID and exists on dstfolder (has not e.g. been
deleted there), sync the flag change to both dstfolder and
statusfolder.
This function checks and protects us from action in ryrun mode.
"""
# For each flag, we store a list of uids to which it should be
# added. Then, we can call addmessagesflags() to apply them in
# bulk, rather than one call per message.
addflaglist = {}
delflaglist = {}
for uid in self.getmessageuidlist():
# Ignore messages with negative UIDs missed by pass 1 and
# don't do anything if the message has been deleted remotely
if uid < 0 or not dstfolder.uidexists(uid):
continue
if statusfolder.uidexists(uid):
statusflags = statusfolder.getmessageflags(uid)
else:
statusflags = set()
selfflags = self.combine_flags_and_keywords(uid, dstfolder)
addflags = selfflags - statusflags
delflags = statusflags - selfflags
for flag in addflags:
if not flag in addflaglist:
addflaglist[flag] = []
addflaglist[flag].append(uid)
for flag in delflags:
if not flag in delflaglist:
delflaglist[flag] = []
delflaglist[flag].append(uid)
for flag, uids in addflaglist.items():
self.ui.addingflags(uids, flag, dstfolder)
if self.repository.account.dryrun:
continue #don't actually add in a dryrun
dstfolder.addmessagesflags(uids, set(flag))
statusfolder.addmessagesflags(uids, set(flag))
for flag,uids in delflaglist.items():
self.ui.deletingflags(uids, flag, dstfolder)
if self.repository.account.dryrun:
continue #don't actually remove in a dryrun
dstfolder.deletemessagesflags(uids, set(flag))
statusfolder.deletemessagesflags(uids, set(flag))
def syncmessagesto(self, dstfolder, statusfolder):
"""Syncs messages in this folder to the destination dstfolder.
This is the high level entry for syncing messages in one direction.
Syncsteps are:
Pass1: Copy locally existing messages
Copy messages in self, but not statusfolder to dstfolder if not
already in dstfolder. dstfolder might assign a new UID (e.g. if
uploading to IMAP). Update statusfolder.
Pass2: Remove locally deleted messages
Get all UIDS in statusfolder but not self. These are messages
that were deleted in 'self'. Delete those from dstfolder and
statusfolder.
After this pass, the message lists should be identical wrt the
uids present (except for potential negative uids that couldn't
be placed anywhere).
Pass3: Synchronize flag changes
Compare flag mismatches in self with those in statusfolder. If
msg has a valid UID and exists on dstfolder (has not e.g. been
deleted there), sync the flag change to both dstfolder and
statusfolder.
Pass4: Synchronize label changes (Gmail only)
Compares label mismatches in self with those in statusfolder.
If msg has a valid UID and exists on dstfolder, syncs the labels
to both dstfolder and statusfolder.
:param dstfolder: Folderinstance to sync the msgs to.
:param statusfolder: LocalStatus instance to sync against.
"""
for (passdesc, action) in self.syncmessagesto_passes:
# bail out on CTRL-C or SIGTERM
if offlineimap.accounts.Account.abort_NOW_signal.is_set():
break
try:
action(dstfolder, statusfolder)
except (KeyboardInterrupt):
raise
except OfflineImapError as e:
if e.severity > OfflineImapError.ERROR.FOLDER:
raise
self.ui.error(e, exc_info()[2])
except Exception as e:
self.ui.error(e, exc_info()[2], "Syncing folder %s [acc: %s]" %\
(self, self.accountname))
raise # raise unknown Exceptions so we can fix them
def __eq__(self, other):
"""Comparisons work either on string comparing folder names or
on the same instance.
MailDirFolder('foo') == 'foo' --> True
a = MailDirFolder('foo'); a == b --> True
MailDirFolder('foo') == 'moo' --> False
MailDirFolder('foo') == IMAPFolder('foo') --> False
MailDirFolder('foo') == MaildirFolder('foo') --> False
"""
if isinstance(other, basestring):
return other == self.name
return id(self) == id(other)
def __ne__(self, other):
return not self.__eq__(other)
| gpl-2.0 |
ntts-clo/ryu | ryu/services/protocols/vrrp/monitor.py | 56 | 6586 | # Copyright (C) 2013 Nippon Telegraph and Telephone Corporation.
# Copyright (C) 2013 Isaku Yamahata <yamahata at private email ne jp>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Interface monitor.
Watching packet received on this interface and parse VRRP packet.
VRRPManager creates/deletes instances of interface monitor dynamically.
"""
from ryu.base import app_manager
from ryu.controller import handler
from ryu.lib.packet import packet
from ryu.lib.packet import vlan
from ryu.lib.packet import vrrp
from ryu.services.protocols.vrrp import event as vrrp_event
class VRRPInterfaceMonitor(app_manager.RyuApp):
# subclass of VRRPInterfaceBase -> subclass of VRRPInterfaceMonitor
_CONSTRUCTORS = {}
@staticmethod
def register(interface_cls):
def _register(cls):
VRRPInterfaceMonitor._CONSTRUCTORS[interface_cls] = cls
return cls
return _register
@staticmethod
def factory(interface, config, router_name, statistics, *args, **kwargs):
cls = VRRPInterfaceMonitor._CONSTRUCTORS[interface.__class__]
app_mgr = app_manager.AppManager.get_instance()
kwargs = kwargs.copy()
kwargs['router_name'] = router_name
kwargs['vrrp_config'] = config
kwargs['vrrp_interface'] = interface
kwargs['vrrp_statistics'] = statistics
app = app_mgr.instantiate(cls, *args, **kwargs)
return app
@classmethod
def instance_name(cls, interface, vrid):
return '%s-%s-%d' % (cls.__name__, str(interface), vrid)
def __init__(self, *args, **kwargs):
super(VRRPInterfaceMonitor, self).__init__(*args, **kwargs)
self.config = kwargs['vrrp_config']
self.interface = kwargs['vrrp_interface']
self.router_name = kwargs['router_name']
self.statistics = kwargs['vrrp_statistics']
self.name = self.instance_name(self.interface, self.config.vrid)
def _parse_received_packet(self, packet_data):
# OF doesn't support VRRP packet matching, so we have to parse
# it ourselvs.
packet_ = packet.Packet(packet_data)
protocols = packet_.protocols
# we expect either of
# [ether, vlan, ip, vrrp{, padding}]
# or
# [ether, ip, vrrp{, padding}]
if len(protocols) < 2:
self.logger.debug('len(protocols) %d', len(protocols))
return
vlan_vid = self.interface.vlan_id
may_vlan = protocols[1]
if (vlan_vid is not None) != isinstance(may_vlan, vlan.vlan):
self.logger.debug('vlan_vid: %s %s', vlan_vid, type(may_vlan))
return
if vlan_vid is not None and vlan_vid != may_vlan.vid:
self.logger.debug('vlan_vid: %s vlan %s', vlan_vid, type(may_vlan))
return
# self.logger.debug('%s %s', packet_, packet_.protocols)
may_ip, may_vrrp = vrrp.vrrp.get_payload(packet_)
if not may_ip or not may_vrrp:
# self.logger.debug('may_ip %s may_vrrp %s', may_ip, may_vrrp)
return
if not vrrp.vrrp.is_valid_ttl(may_ip):
self.logger.debug('valid_ttl')
return
if may_vrrp.version != self.config.version:
self.logger.debug('vrrp version %d %d',
may_vrrp.version, self.config.version)
return
if not may_vrrp.is_valid():
self.logger.debug('valid vrrp')
return
offset = 0
for proto in packet_.protocols:
if proto == may_vrrp:
break
offset += len(proto)
if not may_vrrp.checksum_ok(
may_ip, packet_.data[offset:offset + len(may_vrrp)]):
self.logger.debug('bad checksum')
return
if may_vrrp.vrid != self.config.vrid:
self.logger.debug('vrid %d %d', may_vrrp.vrid, self.config.vrid)
return
if may_vrrp.is_ipv6 != self.config.is_ipv6:
self.logger.debug('is_ipv6 %s %s',
may_vrrp.is_ipv6, self.config.is_ipv6)
return
# TODO: Optional check rfc5798 7.1
# may_vrrp.ip_addresses equals to self.config.ip_addresses
if may_vrrp.priority == 0:
self.statistics.rx_vrrp_zero_prio_packets += 1
vrrp_received = vrrp_event.EventVRRPReceived(self.interface, packet_)
self.send_event(self.router_name, vrrp_received)
return True
def _send_vrrp_packet_received(self, packet_data):
valid = self._parse_received_packet(packet_data)
if valid is True:
self.statistics.rx_vrrp_packets += 1
else:
self.statistics.rx_vrrp_invalid_packets += 1
@handler.set_ev_handler(vrrp_event.EventVRRPTransmitRequest)
def vrrp_transmit_request_handler(self, ev):
raise NotImplementedError()
def _initialize(self):
raise NotImplementedError()
def _shutdown(self):
raise NotImplementedError()
@handler.set_ev_handler(vrrp_event.EventVRRPStateChanged)
def vrrp_state_changed_handler(self, ev):
assert ev.interface == self.interface
if ev.new_state == vrrp_event.VRRP_STATE_INITIALIZE:
# add/del packet in rule
if ev.old_state:
self._shutdown()
else:
self._initialize()
elif ev.new_state in [vrrp_event.VRRP_STATE_BACKUP,
vrrp_event.VRRP_STATE_MASTER]:
if ev.old_state == vrrp_event.VRRP_STATE_INITIALIZE:
if ev.new_state == vrrp_event.VRRP_STATE_MASTER:
self.statistics.idle_to_master_transitions += 1
else:
self.statistics.idle_to_backup_transitions += 1
elif ev.old_state == vrrp_event.VRRP_STATE_MASTER:
self.statistics.master_to_backup_transitions += 1
else:
self.statistics.backup_to_master_transitions += 1
else:
raise RuntimeError('unknown vrrp state %s' % ev.new_state)
| apache-2.0 |
zapster/graal-core | mx.graal-core/mx_graal_core.py | 1 | 36920 | #
# ----------------------------------------------------------------------------------------------------
#
# Copyright (c) 2007, 2015, Oracle and/or its affiliates. All rights reserved.
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
#
# This code is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License version 2 only, as
# published by the Free Software Foundation.
#
# This code is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
# version 2 for more details (a copy is included in the LICENSE file that
# accompanied this code).
#
# You should have received a copy of the GNU General Public License version
# 2 along with this work; if not, write to the Free Software Foundation,
# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
# or visit www.oracle.com if you need additional information or have any
# questions.
#
# ----------------------------------------------------------------------------------------------------
import os
from os.path import join, exists, getmtime
from argparse import ArgumentParser
import re
import zipfile
import subprocess
import mx
from mx_gate import Task
from mx_unittest import unittest
from mx_javamodules import as_java_module
import mx_gate
import mx_unittest
import mx_microbench
import mx_graal_benchmark # pylint: disable=unused-import
import mx_graal_tools #pylint: disable=unused-import
import argparse
import shlex
_suite = mx.suite('graal-core')
""" Prefix for running the VM. """
_vm_prefix = None
def get_vm_prefix(asList=True):
"""
Get the prefix for running the VM (e.g. "gdb --args").
"""
if asList:
return _vm_prefix.split() if _vm_prefix is not None else []
return _vm_prefix
#: The JDK used to build and run Graal.
jdk = mx.get_jdk(tag='default')
if jdk.javaCompliance < '1.8':
mx.abort('Graal requires JDK8 or later, got ' + str(jdk))
#: Specifies if Graal is being built/run against JDK8. If false, then
#: JDK9 or later is being used (checked above).
isJDK8 = jdk.javaCompliance < '1.9'
def _check_jvmci_version(jdk):
"""
Runs a Java utility to check that `jdk` supports the minimum JVMCI API required by Graal.
"""
simplename = 'JVMCIVersionCheck'
name = 'com.oracle.graal.hotspot.' + simplename
binDir = mx.ensure_dir_exists(join(_suite.get_output_root(), '.jdk' + str(jdk.version)))
if isinstance(_suite, mx.BinarySuite):
javaSource = join(binDir, simplename + '.java')
if not exists(javaSource):
dists = [d for d in _suite.dists if d.name == 'GRAAL_HOTSPOT']
assert len(dists) == 1, 'could not find GRAAL_HOTSPOT distribution'
d = dists[0]
assert exists(d.sourcesPath), 'missing expected file: ' + d.sourcesPath
with zipfile.ZipFile(d.sourcesPath, 'r') as zf:
with open(javaSource, 'w') as fp:
fp.write(zf.read(name.replace('.', '/') + '.java'))
else:
javaSource = join(_suite.dir, 'graal', 'com.oracle.graal.hotspot', 'src', name.replace('.', '/') + '.java')
javaClass = join(binDir, name.replace('.', '/') + '.class')
if not exists(javaClass) or getmtime(javaClass) < getmtime(javaSource):
mx.run([jdk.javac, '-d', binDir, javaSource])
mx.run([jdk.java, '-cp', binDir, name])
_check_jvmci_version(jdk)
class JVMCIClasspathEntry(object):
"""
Denotes a distribution that is put on the JVMCI class path.
:param str name: the name of the `JARDistribution` to be deployed
"""
def __init__(self, name):
self._name = name
def dist(self):
"""
Gets the `JARDistribution` deployed on the JVMCI class path.
"""
return mx.distribution(self._name)
def get_path(self):
"""
Gets the path to the distribution jar file.
:rtype: str
"""
return self.dist().classpath_repr()
#: The deployed Graal distributions
_jvmci_classpath = [
JVMCIClasspathEntry('GRAAL'),
]
def add_jvmci_classpath_entry(entry):
"""
Appends an entry to the JVMCI classpath.
"""
_jvmci_classpath.append(entry)
_bootclasspath_appends = []
def add_bootclasspath_append(dep):
"""
Adds a distribution that must be appended to the boot class path
"""
assert dep.isJARDistribution(), dep.name + ' is not a distribution'
_bootclasspath_appends.append(dep)
mx_gate.add_jacoco_includes(['com.oracle.graal.*'])
mx_gate.add_jacoco_excluded_annotations(['@Snippet', '@ClassSubstitution'])
class JVMCIMicrobenchExecutor(mx_microbench.MicrobenchExecutor):
def parseVmArgs(self, vmArgs):
if isJDK8:
if _is_jvmci_enabled(vmArgs) and '-XX:-UseJVMCIClassLoader' not in vmArgs:
vmArgs = ['-XX:-UseJVMCIClassLoader'] + vmArgs
return ['-server'] + _parseVmArgs(vmArgs)
def parseForkedVmArgs(self, vmArgs):
return ['-server'] + _parseVmArgs(vmArgs)
def run_java(self, args):
return run_vm(args)
mx_microbench.set_microbenchmark_executor(JVMCIMicrobenchExecutor())
def _get_XX_option_value(vmargs, name, default):
"""
Gets the value of an ``-XX:`` style HotSpot VM option.
:param list vmargs: VM arguments to inspect
:param str name: the name of the option
:param default: the default value of the option if it's not present in `vmargs`
:return: the value of the option as specified in `vmargs` or `default`
"""
for arg in reversed(vmargs):
if arg == '-XX:-' + name:
return False
if arg == '-XX:+' + name:
return True
if arg.startswith('-XX:' + name + '='):
return arg[len('-XX:' + name + '='):]
return default
def _is_jvmci_enabled(vmargs):
"""
Determines if JVMCI is enabled according to the given VM arguments and whether JDK > 8.
:param list vmargs: VM arguments to inspect
"""
return _get_XX_option_value(vmargs, 'EnableJVMCI', isJDK8)
def ctw(args, extraVMarguments=None):
"""run CompileTheWorld"""
defaultCtwopts = 'Inline=false'
parser = ArgumentParser(prog='mx ctw')
parser.add_argument('--ctwopts', action='store', help='space separated JVMCI options used for CTW compilations (default: --ctwopts="' + defaultCtwopts + '")', default=defaultCtwopts, metavar='<options>')
parser.add_argument('--cp', '--jar', action='store', help='jar or class path denoting classes to compile', metavar='<path>')
if not isJDK8:
parser.add_argument('--limitmods', action='store', help='limits the set of compiled classes to only those in the listed modules', metavar='<modulename>[,<modulename>...]')
args, vmargs = parser.parse_known_args(args)
if args.ctwopts:
# Replace spaces with '#' since it cannot contain spaces
vmargs.append('-Dgraal.CompileTheWorldConfig=' + re.sub(r'\s+', '#', args.ctwopts))
# suppress menubar and dock when running on Mac; exclude x11 classes as they may cause VM crashes (on Solaris)
vmargs = ['-Djava.awt.headless=true'] + vmargs
if args.cp:
cp = os.path.abspath(args.cp)
if not isJDK8 and not _is_jvmci_enabled(vmargs):
mx.abort('Non-Graal CTW does not support specifying a specific class path or jar to compile')
else:
if isJDK8:
cp = join(jdk.home, 'jre', 'lib', 'rt.jar')
else:
# Compile all classes in the JRT image by default.
cp = join(jdk.home, 'lib', 'modules')
vmargs.append('-Dgraal.CompileTheWorldExcludeMethodFilter=sun.awt.X11.*.*')
if _get_XX_option_value(vmargs + _remove_empty_entries(extraVMarguments), 'UseJVMCICompiler', False):
vmargs.append('-XX:+BootstrapJVMCI')
if isJDK8:
if not _is_jvmci_enabled(vmargs):
vmargs.extend(['-XX:+CompileTheWorld', '-Xbootclasspath/p:' + cp])
else:
vmargs.extend(['-Dgraal.CompileTheWorldClasspath=' + cp, '-XX:-UseJVMCIClassLoader', 'com.oracle.graal.hotspot.CompileTheWorld'])
else:
if _is_jvmci_enabled(vmargs):
# To be able to load all classes in the JRT with Class.forName,
# all JDK modules need to be made root modules.
limitmods = frozenset(args.limitmods.split(',')) if args.limitmods else None
nonBootJDKModules = [m.name for m in jdk.get_modules() if not m.boot and (limitmods is None or m.name in limitmods)]
if nonBootJDKModules:
vmargs.append('--add-modules=' + ','.join(nonBootJDKModules))
if args.limitmods:
vmargs.append('-DCompileTheWorld.limitmods=' + args.limitmods)
vmargs.extend(['-Dgraal.CompileTheWorldClasspath=' + cp, 'com.oracle.graal.hotspot.CompileTheWorld'])
else:
vmargs.append('-XX:+CompileTheWorld')
run_vm(vmargs + _remove_empty_entries(extraVMarguments))
class UnitTestRun:
def __init__(self, name, args, tags):
self.name = name
self.args = args
self.tags = tags
def run(self, suites, tasks, extraVMarguments=None):
for suite in suites:
with Task(self.name + ': hosted-product ' + suite, tasks, tags=self.tags) as t:
if mx_gate.Task.verbose:
extra_args = ['--verbose', '--enable-timing']
else:
extra_args = []
if t: unittest(['--suite', suite, '--fail-fast'] + extra_args + self.args + _remove_empty_entries(extraVMarguments))
class BootstrapTest:
def __init__(self, name, args, tags, suppress=None):
self.name = name
self.args = args
self.suppress = suppress
self.tags = tags
if tags is not None and (type(tags) is not list or all(not isinstance(x, basestring) for x in tags)):
mx.abort("Gate tag argument must be a list of strings, tag argument:" + str(tags))
def run(self, tasks, extraVMarguments=None):
with Task(self.name, tasks, tags=self.tags) as t:
if t:
if self.suppress:
out = mx.DuplicateSuppressingStream(self.suppress).write
else:
out = None
run_vm(self.args + ['-XX:+UseJVMCICompiler'] + _remove_empty_entries(extraVMarguments) + ['-XX:-TieredCompilation', '-XX:+BootstrapJVMCI', '-version'], out=out)
class MicrobenchRun:
def __init__(self, name, args, tags):
self.name = name
self.args = args
self.tags = tags
def run(self, tasks, extraVMarguments=None):
with Task(self.name + ': hosted-product ', tasks, tags=self.tags) as t:
if t: mx_microbench.get_microbenchmark_executor().microbench(_remove_empty_entries(extraVMarguments) + ['--', '-foe', 'true'] + self.args)
class GraalTags:
bootstrap = ['bootstrap', 'fulltest']
bootstraplite = ['bootstraplite', 'bootstrap', 'fulltest']
bootstrapfullverify = ['bootstrapfullverify', 'fulltest']
test = ['test', 'fulltest']
benchmarktest = ['benchmarktest', 'fulltest']
ctw = ['ctw', 'fulltest']
def _remove_empty_entries(a):
"""Removes empty entries. Return value is always a list."""
if not a:
return []
return [x for x in a if x]
def _gate_java_benchmark(args, successRe):
"""
Runs a Java benchmark and aborts if the benchmark process exits with a non-zero
exit code or the `successRe` pattern is not in the output of the benchmark process.
:param list args: the arguments to pass to the VM
:param str successRe: a regular expression
"""
out = mx.OutputCapture()
try:
run_java(args, out=mx.TeeOutputCapture(out), err=subprocess.STDOUT)
finally:
jvmErrorFile = re.search(r'(([A-Z]:|/).*[/\]hs_err_pid[0-9]+\.log)', out.data)
if jvmErrorFile:
jvmErrorFile = jvmErrorFile.group()
mx.log('Dumping ' + jvmErrorFile)
with open(jvmErrorFile, 'rb') as fp:
mx.log(fp.read())
os.unlink(jvmErrorFile)
if not re.search(successRe, out.data, re.MULTILINE):
mx.abort('Could not find benchmark success pattern: ' + successRe)
def _gate_dacapo(name, iterations, extraVMarguments=None):
vmargs = ['-Xms2g', '-XX:+UseSerialGC', '-XX:-UseCompressedOops', '-Djava.net.preferIPv4Stack=true', '-Dgraal.ExitVMOnException=true'] + _remove_empty_entries(extraVMarguments)
dacapoJar = mx.library('DACAPO').get_path(True)
_gate_java_benchmark(vmargs + ['-jar', dacapoJar, name, '-n', str(iterations)], r'^===== DaCapo 9\.12 ([a-zA-Z0-9_]+) PASSED in ([0-9]+) msec =====')
def _gate_scala_dacapo(name, iterations, extraVMarguments=None):
vmargs = ['-Xms2g', '-XX:+UseSerialGC', '-XX:-UseCompressedOops', '-Dgraal.ExitVMOnException=true'] + _remove_empty_entries(extraVMarguments)
scalaDacapoJar = mx.library('DACAPO_SCALA').get_path(True)
_gate_java_benchmark(vmargs + ['-jar', scalaDacapoJar, name, '-n', str(iterations)], r'^===== DaCapo 0\.1\.0(-SNAPSHOT)? ([a-zA-Z0-9_]+) PASSED in ([0-9]+) msec =====')
def compiler_gate_runner(suites, unit_test_runs, bootstrap_tests, tasks, extraVMarguments=None):
# Run unit tests in hosted mode
for r in unit_test_runs:
r.run(suites, tasks, ['-XX:-UseJVMCICompiler'] + _remove_empty_entries(extraVMarguments))
# Run microbench in hosted mode (only for testing the JMH setup)
for r in [MicrobenchRun('Microbench', ['TestJMH'], tags=GraalTags.benchmarktest)]:
r.run(tasks, ['-XX:-UseJVMCICompiler'] + _remove_empty_entries(extraVMarguments))
# Run ctw against rt.jar on hosted
with Task('CTW:hosted', tasks, tags=GraalTags.ctw) as t:
if t:
ctw([
'--ctwopts', 'Inline=false ExitVMOnException=true', '-esa', '-XX:-UseJVMCICompiler',
'-Dgraal.CompileTheWorldMultiThreaded=true', '-Dgraal.InlineDuringParsing=false',
'-Dgraal.CompileTheWorldVerbose=false', '-XX:ReservedCodeCacheSize=300m',
], _remove_empty_entries(extraVMarguments))
# bootstrap tests
for b in bootstrap_tests:
b.run(tasks, extraVMarguments)
# run selected DaCapo benchmarks
dacapos = {
'avrora': 1,
'batik': 1,
'fop': 8,
'h2': 1,
'jython': 2,
'luindex': 1,
'lusearch': 4,
'pmd': 1,
'sunflow': 2,
'xalan': 1,
}
for name, iterations in sorted(dacapos.iteritems()):
with Task('DaCapo:' + name, tasks, tags=GraalTags.benchmarktest) as t:
if t: _gate_dacapo(name, iterations, _remove_empty_entries(extraVMarguments) + ['-XX:+UseJVMCICompiler'])
# run selected Scala DaCapo benchmarks
scala_dacapos = {
'actors': 1,
'apparat': 1,
'factorie': 1,
'kiama': 4,
'scalac': 1,
'scaladoc': 1,
'scalap': 1,
'scalariform':1,
'scalatest': 1,
'scalaxb': 1,
'tmt': 1
}
for name, iterations in sorted(scala_dacapos.iteritems()):
with Task('ScalaDaCapo:' + name, tasks, tags=GraalTags.benchmarktest) as t:
if t: _gate_scala_dacapo(name, iterations, _remove_empty_entries(extraVMarguments) + ['-XX:+UseJVMCICompiler'])
# ensure -Xbatch still works
with Task('DaCapo_pmd:BatchMode', tasks, tags=GraalTags.test) as t:
if t: _gate_dacapo('pmd', 1, _remove_empty_entries(extraVMarguments) + ['-XX:+UseJVMCICompiler', '-Xbatch'])
# ensure benchmark counters still work
with Task('DaCapo_pmd:BenchmarkCounters', tasks, tags=GraalTags.test) as t:
if t: _gate_dacapo('pmd', 1, _remove_empty_entries(extraVMarguments) + ['-XX:+UseJVMCICompiler', '-Dgraal.LIRProfileMoves=true', '-Dgraal.GenericDynamicCounters=true', '-XX:JVMCICounterSize=10'])
# ensure -Xcomp still works
with Task('XCompMode:product', tasks, tags=GraalTags.test) as t:
if t: run_vm(_remove_empty_entries(extraVMarguments) + ['-XX:+UseJVMCICompiler', '-Xcomp', '-version'])
graal_unit_test_runs = [
UnitTestRun('UnitTests', [], tags=GraalTags.test),
]
_registers = 'o0,o1,o2,o3,f8,f9,d32,d34' if mx.get_arch() == 'sparcv9' else 'rbx,r11,r10,r14,xmm3,xmm11,xmm14'
_assertionFlags = ['-esa']
_graalErrorFlags = ['-Dgraal.ExitVMOnException=true']
_graalEconomyFlags = ['-Dgraal.CompilerConfiguration=economy']
_verificationFlags = ['-Dgraal.VerifyGraalGraphs=true', '-Dgraal.VerifyGraalGraphEdges=true', '-Dgraal.VerifyGraalPhasesSize=true', '-Dgraal.VerifyNodeCostOnAccess=true', '-Dgraal.VerifyPhases=true']
_coopFlags = ['-XX:-UseCompressedOops']
_gcVerificationFlags = ['-XX:+UnlockDiagnosticVMOptions', '-XX:+VerifyBeforeGC', '-XX:+VerifyAfterGC']
_g1VerificationFlags = ['-XX:-UseSerialGC', '-XX:+UseG1GC']
_exceptionFlags = ['-Dgraal.StressInvokeWithExceptionNode=true']
_registerPressureFlags = ['-Dgraal.RegisterPressure=' + _registers, '-Dgraal.LIRUnlockBackendRestart=true']
_immutableCodeFlags = ['-Dgraal.ImmutableCode=true']
graal_bootstrap_tests = [
BootstrapTest('BootstrapWithSystemAssertionsFullVerify', _assertionFlags + _verificationFlags + _graalErrorFlags, tags=GraalTags.bootstrapfullverify),
BootstrapTest('BootstrapWithSystemAssertions', _assertionFlags + _graalErrorFlags, tags=GraalTags.bootstraplite),
BootstrapTest('BootstrapWithSystemAssertionsNoCoop', _assertionFlags + _coopFlags + _graalErrorFlags, tags=GraalTags.bootstrap),
BootstrapTest('BootstrapWithGCVerification', _gcVerificationFlags + _graalErrorFlags, tags=GraalTags.bootstrap, suppress=['VerifyAfterGC:', 'VerifyBeforeGC:']),
BootstrapTest('BootstrapWithG1GCVerification', _g1VerificationFlags + _gcVerificationFlags + _graalErrorFlags, tags=GraalTags.bootstrap, suppress=['VerifyAfterGC:', 'VerifyBeforeGC:']),
BootstrapTest('BootstrapWithSystemAssertionsEconomy', _assertionFlags + _graalEconomyFlags + _graalErrorFlags, tags=GraalTags.bootstrap),
BootstrapTest('BootstrapWithSystemAssertionsExceptionEdges', _assertionFlags + _exceptionFlags + _graalErrorFlags, tags=GraalTags.bootstrap),
BootstrapTest('BootstrapWithSystemAssertionsRegisterPressure', _assertionFlags + _registerPressureFlags + _graalErrorFlags, tags=GraalTags.bootstrap),
BootstrapTest('BootstrapWithSystemAssertionsImmutableCode', _assertionFlags + _immutableCodeFlags + ['-Dgraal.VerifyPhases=true'] + _graalErrorFlags, tags=GraalTags.bootstrap)
]
def _graal_gate_runner(args, tasks):
compiler_gate_runner(['graal-core', 'truffle'], graal_unit_test_runs, graal_bootstrap_tests, tasks, args.extra_vm_argument)
class ShellEscapedStringAction(argparse.Action):
"""Turns a shell-escaped string into a list of arguments.
Note that it appends the result to the destination.
"""
def __init__(self, option_strings, nargs=None, **kwargs):
if nargs is not None:
raise ValueError("nargs not allowed")
super(ShellEscapedStringAction, self).__init__(option_strings, **kwargs)
def __call__(self, parser, namespace, values, option_string=None):
# do not override existing values
old_values = getattr(namespace, self.dest)
setattr(namespace, self.dest, (old_values if old_values else []) + shlex.split(values))
mx_gate.add_gate_runner(_suite, _graal_gate_runner)
mx_gate.add_gate_argument('--extra-vm-argument', action=ShellEscapedStringAction, help='add extra vm arguments to gate tasks if applicable')
def _unittest_vm_launcher(vmArgs, mainClass, mainClassArgs):
run_vm(vmArgs + [mainClass] + mainClassArgs)
def _unittest_config_participant(config):
vmArgs, mainClass, mainClassArgs = config
cpIndex, cp = mx.find_classpath_arg(vmArgs)
if cp:
cp = _uniqify(cp.split(os.pathsep))
if isJDK8:
# Remove entries from class path that are in Graal or on the boot class path
redundantClasspathEntries = set()
for dist in [entry.dist() for entry in _jvmci_classpath]:
redundantClasspathEntries.update((d.output_dir() for d in dist.archived_deps() if d.isJavaProject()))
redundantClasspathEntries.add(dist.path)
cp = os.pathsep.join([e for e in cp if e not in redundantClasspathEntries])
vmArgs[cpIndex] = cp
else:
deployedModules = []
redundantClasspathEntries = set()
for dist in [entry.dist() for entry in _jvmci_classpath] + _bootclasspath_appends:
deployedModule = as_java_module(dist, jdk)
deployedModules.append(deployedModule)
redundantClasspathEntries.update(mx.classpath(dist, preferProjects=False, jdk=jdk).split(os.pathsep))
redundantClasspathEntries.update(mx.classpath(dist, preferProjects=True, jdk=jdk).split(os.pathsep))
# Remove entries from the class path that are in the deployed modules
cp = [classpathEntry for classpathEntry in cp if classpathEntry not in redundantClasspathEntries]
vmArgs[cpIndex] = os.pathsep.join(cp)
# Junit libraries are made into automatic modules so that they are visible to tests
# patched into modules. These automatic modules must be declared to be read by
# Graal which means they must also be made root modules (i.e., ``--add-modules``)
# since ``--add-reads`` can only be applied to root modules.
junitCp = [e.classpath_repr() for e in mx.classpath_entries(['JUNIT'])]
junitModules = [_automatic_module_name(e) for e in junitCp]
vmArgs.append('--module-path=' + os.pathsep.join(junitCp))
vmArgs.append('--add-modules=' + ','.join(junitModules + [m.name for m in deployedModules]))
for deployedModule in deployedModules:
vmArgs.append('--add-reads=' + deployedModule.name + '=' + ','.join(junitModules))
# Explicitly export concealed JVMCI packages required by Graal. Even though
# normally exported via jdk.vm.ci.services.Services.exportJVMCITo(), the
# Junit harness wants to access JVMCI classes (e.g., when loading classes
# to find test methods) without going through that entry point.
addedExports = {}
for deployedModule in deployedModules:
for concealingModule, packages in deployedModule.concealedRequires.iteritems():
if concealingModule == 'jdk.vm.ci':
for package in packages:
addedExports.setdefault(concealingModule + '/' + package, set()).add(deployedModule.name)
patches = {}
pathToProject = {p.output_dir() : p for p in mx.projects() if p.isJavaProject()}
for classpathEntry in cp:
# Export concealed packages used by the class path entry
_add_exports_for_concealed_packages(classpathEntry, pathToProject, addedExports, 'ALL-UNNAMED', deployedModules)
for deployedModule in deployedModules:
assert deployedModule.dist.path != classpathEntry, deployedModule.dist.path + ' should no longer be on the class path'
# Patch the class path entry into a module if it defines packages already defined by the module.
# Packages definitions cannot be split between modules.
classpathEntryPackages = frozenset(_defined_packages(classpathEntry))
if not classpathEntryPackages.isdisjoint(deployedModule.packages):
patches.setdefault(deployedModule.name, []).append(classpathEntry)
extraPackages = classpathEntryPackages - frozenset(deployedModule.exports.iterkeys())
if extraPackages:
# From http://openjdk.java.net/jeps/261:
# If a package found in a module definition on a patch path is not already exported
# by that module then it will, still, not be exported. It can be exported explicitly
# via either the reflection API or the --add-exports option.
for package in extraPackages:
addedExports.setdefault(deployedModule.name + '/' + package, set()).update(junitModules + ['ALL-UNNAMED'])
for moduleName, cpEntries in patches.iteritems():
vmArgs.append('--patch-module=' + moduleName + '=' + os.pathsep.join(cpEntries))
vmArgs.extend(['--add-exports=' + export + '=' + ','.join(sorted(targets)) for export, targets in addedExports.iteritems()])
if isJDK8:
# Run the VM in a mode where application/test classes can
# access JVMCI loaded classes.
vmArgs.append('-XX:-UseJVMCIClassLoader')
return (vmArgs, mainClass, mainClassArgs)
mx_unittest.add_config_participant(_unittest_config_participant)
mx_unittest.set_vm_launcher('JDK9 VM launcher', _unittest_vm_launcher, jdk)
def _uniqify(alist):
"""
Processes given list to remove all duplicate entries, preserving only the first unique instance for each entry.
:param list alist: the list to process
:return: `alist` with all duplicates removed
"""
seen = set()
return [e for e in alist if e not in seen and seen.add(e) is None]
def _defined_packages(classpathEntry):
"""
Gets the packages defined by `classpathEntry`.
"""
packages = set()
if os.path.isdir(classpathEntry):
for root, _, filenames in os.walk(classpathEntry):
if any(f.endswith('.class') for f in filenames):
package = root[len(classpathEntry) + 1:].replace(os.sep, '.')
packages.add(package)
elif classpathEntry.endswith('.zip') or classpathEntry.endswith('.jar'):
with zipfile.ZipFile(classpathEntry, 'r') as zf:
for name in zf.namelist():
if name.endswith('.class') and '/' in name:
package = name[0:name.rfind('/')].replace('/', '.')
packages.add(package)
return packages
def _automatic_module_name(modulejar):
"""
Derives the name of an automatic module from an automatic module jar according to
specification of java.lang.module.ModuleFinder.of(Path... entries).
:param str modulejar: the path to a jar file treated as an automatic module
:return: the name of the automatic module derived from `modulejar`
"""
# Drop directory prefix and .jar (or .zip) suffix
name = os.path.basename(modulejar)[0:-4]
# Find first occurrence of -${NUMBER}. or -${NUMBER}$
m = re.search(r'-(\d+(\.|$))', name)
if m:
name = name[0:m.start()]
# Finally clean up the module name
name = re.sub(r'[^A-Za-z0-9]', '.', name) # replace non-alphanumeric
name = re.sub(r'(\.)(\1)+', '.', name) # collapse repeating dots
name = re.sub(r'^\.', '', name) # drop leading dots
return re.sub(r'\.$', '', name) # drop trailing dots
def _add_exports_for_concealed_packages(classpathEntry, pathToProject, exports, module, modulepath):
"""
Adds exports for concealed packages imported by the project whose output directory matches `classpathEntry`.
:param str classpathEntry: a class path entry
:param dict pathToProject: map from an output directory to its defining `JavaProject`
:param dict exports: map from a module/package specifier to the set of modules it must be exported to
:param str module: the name of the module containing the classes in `classpathEntry`
:param list modulepath: modules to be searched for concealed packages
"""
project = pathToProject.get(classpathEntry, None)
if project:
concealed = project.get_concealed_imported_packages(jdk, modulepath)
for concealingModule, packages in concealed.iteritems():
for package in packages:
exports.setdefault(concealingModule + '/' + package, set()).add(module)
def _extract_added_exports(args, addedExports):
"""
Extracts ``--add-exports`` entries from `args` and updates `addedExports` based on their values.
:param list args: command line arguments
:param dict addedExports: map from a module/package specifier to the set of modules it must be exported to
:return: the value of `args` minus all valid ``--add-exports`` entries
"""
res = []
for arg in args:
if arg.startswith('--add-exports='):
parts = arg[len('--add-exports='):].split('=', 1)
if len(parts) == 2:
export, targets = parts
addedExports.setdefault(export, set()).update(targets.split(','))
else:
# Invalid format - let the VM deal with it
res.append(arg)
else:
res.append(arg)
return res
def _parseVmArgs(args, addDefaultArgs=True):
args = mx.expand_project_in_args(args, insitu=False)
argsPrefix = []
jacocoArgs = mx_gate.get_jacoco_agent_args()
if jacocoArgs:
argsPrefix.extend(jacocoArgs)
# add default graal.options.file
options_file = join(mx.primary_suite().dir, 'graal.options')
if exists(options_file):
argsPrefix.append('-Dgraal.options.file=' + options_file)
if '-Dgraal.PrintFlags=true' in args and '-Xcomp' not in args:
mx.warn('Using -Dgraal.PrintFlags=true may have no effect without -Xcomp as Graal initialization is lazy')
if isJDK8:
argsPrefix.append('-Djvmci.class.path.append=' + os.pathsep.join((e.get_path() for e in _jvmci_classpath)))
argsPrefix.append('-Xbootclasspath/a:' + os.pathsep.join([dep.classpath_repr() for dep in _bootclasspath_appends]))
else:
deployedDists = [entry.dist() for entry in _jvmci_classpath] + \
[e for e in _bootclasspath_appends if e.isJARDistribution()]
deployedModules = [as_java_module(dist, jdk) for dist in deployedDists]
# Set or update module path to include Graal and its dependencies as modules
graalModulepath = []
for deployedModule in deployedModules:
graalModulepath.extend([jmd.jarpath for jmd in deployedModule.modulepath if jmd.jarpath])
graalModulepath.append(deployedModule.jarpath)
graalModulepath = _uniqify(graalModulepath)
# Update added exports to include concealed JDK packages required by Graal
addedExports = {}
args = _extract_added_exports(args, addedExports)
for deployedModule in deployedModules:
for concealingModule, packages in deployedModule.concealedRequires.iteritems():
# No need to explicitly export JVMCI - it's exported via reflection
if concealingModule != 'jdk.vm.ci':
for package in packages:
addedExports.setdefault(concealingModule + '/' + package, set()).add(deployedModule.name)
for export, targets in addedExports.iteritems():
argsPrefix.append('--add-exports=' + export + '=' + ','.join(sorted(targets)))
# Extend or set --module-path argument
mpUpdated = False
for mpIndex in range(len(args)):
if args[mpIndex] == '--module-path':
assert mpIndex + 1 < len(args), 'VM option ' + args[mpIndex] + ' requires an argument'
args[mpIndex + 1] = os.pathsep.join(_uniqify(args[mpIndex + 1].split(os.pathsep) + graalModulepath))
mpUpdated = True
break
elif args[mpIndex].startswith('--module-path='):
mp = args[mpIndex][len('--module-path='):]
args[mpIndex] = '--module-path=' + os.pathsep.join(_uniqify(mp.split(os.pathsep) + graalModulepath))
mpUpdated = True
break
if not mpUpdated:
argsPrefix.append('--module-path=' + os.pathsep.join(graalModulepath))
# Set the JVMCI compiler to Graal
argsPrefix.append('-Djvmci.Compiler=graal')
if '-version' in args:
ignoredArgs = args[args.index('-version') + 1:]
if len(ignoredArgs) > 0:
mx.log("Warning: The following options will be ignored by the VM because they come after the '-version' argument: " + ' '.join(ignoredArgs))
return jdk.processArgs(argsPrefix + args, addDefaultArgs=addDefaultArgs)
def _check_bootstrap_config(args):
"""
Issues a warning if `args` denote -XX:+BootstrapJVMCI but -XX:-UseJVMCICompiler.
"""
bootstrap = False
useJVMCICompiler = False
for arg in args:
if arg == '-XX:+BootstrapJVMCI':
bootstrap = True
elif arg == '-XX:+UseJVMCICompiler':
useJVMCICompiler = True
if bootstrap and not useJVMCICompiler:
mx.warn('-XX:+BootstrapJVMCI is ignored since -XX:+UseJVMCICompiler is not enabled')
def run_java(args, nonZeroIsFatal=True, out=None, err=None, cwd=None, timeout=None, env=None, addDefaultArgs=True):
args = ['-XX:+UnlockExperimentalVMOptions', '-XX:+EnableJVMCI'] + _parseVmArgs(args, addDefaultArgs=addDefaultArgs)
_check_bootstrap_config(args)
cmd = get_vm_prefix() + [jdk.java] + ['-server'] + args
return mx.run(cmd, nonZeroIsFatal=nonZeroIsFatal, out=out, err=err, cwd=cwd, env=env)
_JVMCI_JDK_TAG = 'jvmci'
class GraalJVMCI9JDKConfig(mx.JDKConfig):
"""
A JDKConfig that configures Graal as the JVMCI compiler.
"""
def __init__(self):
mx.JDKConfig.__init__(self, jdk.home, tag=_JVMCI_JDK_TAG)
def run_java(self, args, **kwArgs):
return run_java(args, **kwArgs)
class GraalJDKFactory(mx.JDKFactory):
def getJDKConfig(self):
return GraalJVMCI9JDKConfig()
def description(self):
return "JVMCI JDK with Graal"
mx.addJDKFactory(_JVMCI_JDK_TAG, mx.JavaCompliance('9'), GraalJDKFactory())
def run_vm(args, nonZeroIsFatal=True, out=None, err=None, cwd=None, timeout=None, debugLevel=None, vmbuild=None):
"""run a Java program by executing the java executable in a JVMCI JDK"""
return run_java(args, nonZeroIsFatal=nonZeroIsFatal, out=out, err=err, cwd=cwd, timeout=timeout)
class GraalArchiveParticipant:
def __init__(self, dist, isTest=False):
self.dist = dist
self.isTest = isTest
def __opened__(self, arc, srcArc, services):
self.services = services
self.arc = arc
def __add__(self, arcname, contents):
if arcname.startswith('META-INF/providers/'):
if self.isTest:
# The test distributions must not have their @ServiceProvider
# generated providers converted to real services otherwise
# bad things can happen such as InvocationPlugins being registered twice.
pass
else:
provider = arcname[len('META-INF/providers/'):]
for service in contents.strip().split(os.linesep):
assert service
self.services.setdefault(service, []).append(provider)
return True
elif arcname.endswith('_OptionDescriptors.class'):
if self.isTest:
mx.warn('@Option defined in test code will be ignored: ' + arcname)
else:
# Need to create service files for the providers of the
# jdk.vm.ci.options.Options service created by
# jdk.vm.ci.options.processor.OptionProcessor.
provider = arcname[:-len('.class'):].replace('/', '.')
self.services.setdefault('com.oracle.graal.options.OptionDescriptors', []).append(provider)
return False
def __addsrc__(self, arcname, contents):
return False
def __closing__(self):
pass
mx.add_argument('--vmprefix', action='store', dest='vm_prefix', help='prefix for running the VM (e.g. "gdb --args")', metavar='<prefix>')
mx.add_argument('--gdb', action='store_const', const='gdb --args', dest='vm_prefix', help='alias for --vmprefix "gdb --args"')
mx.add_argument('--lldb', action='store_const', const='lldb --', dest='vm_prefix', help='alias for --vmprefix "lldb --"')
mx.update_commands(_suite, {
'vm': [run_vm, '[-options] class [args...]'],
'ctw': [ctw, '[-vmoptions|noinline|nocomplex|full]'],
})
def mx_post_parse_cmd_line(opts):
mx.add_ide_envvar('JVMCI_VERSION_CHECK')
for dist in _suite.dists:
dist.set_archiveparticipant(GraalArchiveParticipant(dist, isTest=dist.name.endswith('_TEST')))
add_bootclasspath_append(mx.distribution('truffle:TRUFFLE_API'))
global _vm_prefix
_vm_prefix = opts.vm_prefix
| gpl-2.0 |
Ictp/indico | indico/ext/importer/handlers.py | 2 | 2394 | # -*- coding: utf-8 -*-
##
##
## This file is part of Indico.
## Copyright (C) 2002 - 2014 European Organization for Nuclear Research (CERN).
##
## Indico is free software; you can redistribute it and/or
## modify it under the terms of the GNU General Public License as
## published by the Free Software Foundation; either version 3 of the
## License, or (at your option) any later version.
##
## Indico is distributed in the hope that it will be useful, but
## WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with Indico;if not, see <http://www.gnu.org/licenses/>.
# stdlib imports
import pkg_resources
# legacy imports
from MaKaC.services.implementation.base import ServiceBase
from MaKaC.plugins.base import PluginsHolder
# indico imports
from indico.web.handlers import RHHtdocs
from indico.ext.importer.helpers import ImporterHelper
import indico.ext.importer
class RHImporterHtdocs(RHHtdocs):
"""Static file handler for Importer plugin"""
_local_path = pkg_resources.resource_filename(indico.ext.importer.__name__, "htdocs")
_min_dir = 'importer'
class DataImportService(ServiceBase):
"""
Fetches data from the specified importer plugin.
Arguments:
query - string used in importer's search phrase
importer - name of an importer plugin being used
size - number of returned queries
"""
def _checkParams(self):
ServiceBase._checkParams(self)
self._query = self._params['query']
self._importer = self._params['importer']
self._size = self._params.get('size', 10)
def _getAnswer(self):
importer = ImporterHelper.getImporter(self._importer)
if importer:
return importer.importData(self._query, self._size)
class GetImportersService(ServiceBase):
"""
Returns names and ids of active importer plugins.
"""
def _getAnswer(self):
importers = {}
for plugin in PluginsHolder().getPluginType('importer').getPluginList():
importers[plugin.getId()] = plugin.getName()
return importers
methodMap = {
"importer.import" : DataImportService,
"importer.getImporters": GetImportersService,
}
| gpl-3.0 |
ininex/geofire-python | resource/lib/python2.7/site-packages/gcloud/monitoring/test_label.py | 6 | 4111 | # Copyright 2016 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest2
class TestLabelValueType(unittest2.TestCase):
def _getTargetClass(self):
from gcloud.monitoring.label import LabelValueType
return LabelValueType
def test_one(self):
self.assertTrue(hasattr(self._getTargetClass(), 'STRING'))
def test_names(self):
for name in self._getTargetClass().__dict__:
if not name.startswith('_'):
self.assertEqual(getattr(self._getTargetClass(), name), name)
class TestLabelDescriptor(unittest2.TestCase):
def _getTargetClass(self):
from gcloud.monitoring.label import LabelDescriptor
return LabelDescriptor
def _makeOne(self, *args, **kwargs):
return self._getTargetClass()(*args, **kwargs)
def test_constructor(self):
KEY = 'response_code'
VALUE_TYPE = 'INT64'
DESCRIPTION = 'HTTP status code for the request.'
descriptor = self._makeOne(key=KEY, value_type=VALUE_TYPE,
description=DESCRIPTION)
self.assertEqual(descriptor.key, KEY)
self.assertEqual(descriptor.value_type, VALUE_TYPE)
self.assertEqual(descriptor.description, DESCRIPTION)
def test_constructor_defaults(self):
KEY = 'response_code'
descriptor = self._makeOne(key=KEY)
self.assertEqual(descriptor.key, KEY)
self.assertEqual(descriptor.value_type, 'STRING')
self.assertEqual(descriptor.description, '')
def test_from_dict(self):
KEY = 'response_code'
VALUE_TYPE = 'INT64'
DESCRIPTION = 'HTTP status code for the request.'
info = {
'key': KEY,
'valueType': VALUE_TYPE,
'description': DESCRIPTION,
}
descriptor = self._getTargetClass()._from_dict(info)
self.assertEqual(descriptor.key, KEY)
self.assertEqual(descriptor.value_type, VALUE_TYPE)
self.assertEqual(descriptor.description, DESCRIPTION)
def test_from_dict_defaults(self):
KEY = 'response_code'
info = {'key': KEY}
descriptor = self._getTargetClass()._from_dict(info)
self.assertEqual(descriptor.key, KEY)
self.assertEqual(descriptor.value_type, 'STRING')
self.assertEqual(descriptor.description, '')
def test_to_dict(self):
KEY = 'response_code'
VALUE_TYPE = 'INT64'
DESCRIPTION = 'HTTP status code for the request.'
descriptor = self._makeOne(key=KEY, value_type=VALUE_TYPE,
description=DESCRIPTION)
expected = {
'key': KEY,
'valueType': VALUE_TYPE,
'description': DESCRIPTION,
}
self.assertEqual(descriptor._to_dict(), expected)
def test_to_dict_defaults(self):
KEY = 'response_code'
descriptor = self._makeOne(key=KEY)
expected = {
'key': KEY,
'valueType': 'STRING',
}
self.assertEqual(descriptor._to_dict(), expected)
def test_equality(self):
KEY = 'response_code'
VALUE_TYPE = 'INT64'
DESCRIPTION = 'HTTP status code for the request.'
descriptor1 = self._makeOne(key=KEY, value_type=VALUE_TYPE,
description=DESCRIPTION)
descriptor2 = self._makeOne(key=KEY, value_type=VALUE_TYPE,
description=DESCRIPTION)
self.assertTrue(descriptor1 == descriptor2)
self.assertFalse(descriptor1 != descriptor2)
| mit |
suneeth51/neutron | neutron/tests/unit/plugins/ml2/test_security_group.py | 24 | 6979 | # Copyright (c) 2013 OpenStack Foundation
# Copyright 2013, Nachi Ueno, NTT MCL, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import math
import mock
from neutron.common import constants as const
from neutron import context
from neutron.extensions import securitygroup as ext_sg
from neutron import manager
from neutron.tests import tools
from neutron.tests.unit.agent import test_securitygroups_rpc as test_sg_rpc
from neutron.tests.unit.api.v2 import test_base
from neutron.tests.unit.extensions import test_securitygroup as test_sg
PLUGIN_NAME = 'neutron.plugins.ml2.plugin.Ml2Plugin'
NOTIFIER = 'neutron.plugins.ml2.rpc.AgentNotifierApi'
class Ml2SecurityGroupsTestCase(test_sg.SecurityGroupDBTestCase):
_plugin_name = PLUGIN_NAME
def setUp(self, plugin=None):
test_sg_rpc.set_firewall_driver(test_sg_rpc.FIREWALL_HYBRID_DRIVER)
notifier_p = mock.patch(NOTIFIER)
notifier_cls = notifier_p.start()
self.notifier = mock.Mock()
notifier_cls.return_value = self.notifier
self.useFixture(tools.AttributeMapMemento())
super(Ml2SecurityGroupsTestCase, self).setUp(PLUGIN_NAME)
def tearDown(self):
super(Ml2SecurityGroupsTestCase, self).tearDown()
class TestMl2SecurityGroups(Ml2SecurityGroupsTestCase,
test_sg.TestSecurityGroups,
test_sg_rpc.SGNotificationTestMixin):
def setUp(self):
super(TestMl2SecurityGroups, self).setUp()
self.ctx = context.get_admin_context()
plugin = manager.NeutronManager.get_plugin()
plugin.start_rpc_listeners()
def _make_port_with_new_sec_group(self, net_id):
sg = self._make_security_group(self.fmt, 'name', 'desc')
port = self._make_port(
self.fmt, net_id, security_groups=[sg['security_group']['id']])
return port['port']
def _make_port_without_sec_group(self, net_id):
port = self._make_port(
self.fmt, net_id, security_groups=[])
return port['port']
def test_security_group_get_ports_from_devices(self):
with self.network() as n:
with self.subnet(n):
orig_ports = [
self._make_port_with_new_sec_group(n['network']['id']),
self._make_port_with_new_sec_group(n['network']['id']),
self._make_port_without_sec_group(n['network']['id'])
]
plugin = manager.NeutronManager.get_plugin()
# should match full ID and starting chars
ports = plugin.get_ports_from_devices(self.ctx,
[orig_ports[0]['id'], orig_ports[1]['id'][0:8],
orig_ports[2]['id']])
self.assertEqual(len(orig_ports), len(ports))
for port_dict in ports:
p = next(p for p in orig_ports
if p['id'] == port_dict['id'])
self.assertEqual(p['id'], port_dict['id'])
self.assertEqual(p['security_groups'],
port_dict[ext_sg.SECURITYGROUPS])
self.assertEqual([], port_dict['security_group_rules'])
self.assertEqual([p['fixed_ips'][0]['ip_address']],
port_dict['fixed_ips'])
self._delete('ports', p['id'])
def test_security_group_get_ports_from_devices_with_bad_id(self):
plugin = manager.NeutronManager.get_plugin()
ports = plugin.get_ports_from_devices(self.ctx, ['bad_device_id'])
self.assertFalse(ports)
def test_security_group_no_db_calls_with_no_ports(self):
plugin = manager.NeutronManager.get_plugin()
with mock.patch(
'neutron.plugins.ml2.db.get_sg_ids_grouped_by_port'
) as get_mock:
self.assertFalse(plugin.get_ports_from_devices(self.ctx, []))
self.assertFalse(get_mock.called)
def test_large_port_count_broken_into_parts(self):
plugin = manager.NeutronManager.get_plugin()
max_ports_per_query = 5
ports_to_query = 73
for max_ports_per_query in (1, 2, 5, 7, 9, 31):
with mock.patch('neutron.plugins.ml2.db.MAX_PORTS_PER_QUERY',
new=max_ports_per_query),\
mock.patch(
'neutron.plugins.ml2.db.get_sg_ids_grouped_by_port',
return_value={}) as get_mock:
plugin.get_ports_from_devices(self.ctx,
['%s%s' % (const.TAP_DEVICE_PREFIX, i)
for i in range(ports_to_query)])
all_call_args = [x[1][1] for x in get_mock.mock_calls]
last_call_args = all_call_args.pop()
# all but last should be getting MAX_PORTS_PER_QUERY ports
self.assertTrue(
all(map(lambda x: len(x) == max_ports_per_query,
all_call_args))
)
remaining = ports_to_query % max_ports_per_query
if remaining:
self.assertEqual(remaining, len(last_call_args))
# should be broken into ceil(total/MAX_PORTS_PER_QUERY) calls
self.assertEqual(
math.ceil(ports_to_query / float(max_ports_per_query)),
get_mock.call_count
)
def test_full_uuids_skip_port_id_lookup(self):
plugin = manager.NeutronManager.get_plugin()
# when full UUIDs are provided, the _or statement should only
# have one matching 'IN' critiera for all of the IDs
with mock.patch('neutron.plugins.ml2.db.or_') as or_mock,\
mock.patch('sqlalchemy.orm.Session.query') as qmock:
fmock = qmock.return_value.outerjoin.return_value.filter
# return no ports to exit the method early since we are mocking
# the query
fmock.return_value = []
plugin.get_ports_from_devices(self.ctx,
[test_base._uuid(),
test_base._uuid()])
# the or_ function should only have one argument
or_mock.assert_called_once_with(mock.ANY)
class TestMl2SGServerRpcCallBack(
Ml2SecurityGroupsTestCase,
test_sg_rpc.SGServerRpcCallBackTestCase):
pass
| apache-2.0 |
armersong/zato | code/zato-web-admin/src/zato/admin/web/views/security/oauth.py | 6 | 2297 | # -*- coding: utf-8 -*-
"""
Copyright (C) 2010 Dariusz Suchojad <dsuch at zato.io>
Licensed under LGPLv3, see LICENSE.txt for terms and conditions.
"""
from __future__ import absolute_import, division, print_function, unicode_literals
# stdlib
import logging
# Zato
from zato.admin.web.forms import ChangePasswordForm
from zato.admin.web.forms.security.oauth import CreateForm, EditForm
from zato.admin.web.views import change_password as _change_password, \
CreateEdit, Delete as _Delete, Index as _Index, method_allowed
from zato.common import NONCE_STORE
from zato.common.odb.model import OAuth
logger = logging.getLogger(__name__)
class Index(_Index):
method_allowed = 'GET'
url_name = 'security-oauth'
template = 'zato/security/oauth.html'
service_name = 'zato.security.oauth.get-list'
output_class = OAuth
class SimpleIO(_Index.SimpleIO):
input_required = ('cluster_id',)
output_required = ('id', 'name', 'is_active', 'username', \
'proto_version', 'sig_method', 'max_nonce_log')
output_repeated = True
def handle(self):
return {
'create_form': CreateForm(),
'edit_form': EditForm(prefix='edit'),
'change_password_form': ChangePasswordForm(),
'default_max_nonce_log': NONCE_STORE.DEFAULT_MAX_LOG,
}
class _CreateEdit(CreateEdit):
method_allowed = 'POST'
class SimpleIO(CreateEdit.SimpleIO):
input_required = ('name', 'is_active', 'username', \
'proto_version', 'sig_method', 'max_nonce_log')
output_required = ('id', 'name')
def success_message(self, item):
return 'Successfully {0} the OAuth definition [{1}]'.format(self.verb, item.name)
class Create(_CreateEdit):
url_name = 'security-oauth-create'
service_name = 'zato.security.oauth.create'
class Edit(_CreateEdit):
url_name = 'security-oauth-edit'
form_prefix = 'edit-'
service_name = 'zato.security.oauth.edit'
class Delete(_Delete):
url_name = 'security-oauth-delete'
error_message = 'Could not delete the OAuth definition'
service_name = 'zato.security.oauth.delete'
@method_allowed('POST')
def change_secret(req):
return _change_password(req, 'zato.security.oauth.change-password')
| gpl-3.0 |
rcgee/oq-hazardlib | openquake/hazardlib/tests/gsim/chiou_youngs_2014_test.py | 1 | 6695 | # -*- coding: utf-8 -*-
# vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# Copyright (C) 2014-2016 GEM Foundation
#
# OpenQuake is free software: you can redistribute it and/or modify it
# under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# OpenQuake is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with OpenQuake. If not, see <http://www.gnu.org/licenses/>.
from openquake.hazardlib.gsim.chiou_youngs_2014 import (
ChiouYoungs2014, ChiouYoungs2014PEER, ChiouYoungs2014NearFaultEffect)
from openquake.hazardlib.tests.gsim.utils import BaseGSIMTestCase
from openquake.hazardlib.calc import ground_motion_fields
from openquake.hazardlib import const
from openquake.hazardlib.imt import PGV
from openquake.hazardlib.site import Site, SiteCollection
from openquake.hazardlib.source.rupture import ParametricProbabilisticRupture
from openquake.hazardlib.tom import PoissonTOM
from openquake.hazardlib.geo.surface import SimpleFaultSurface
from openquake.hazardlib.geo.line import Line
from openquake.hazardlib.geo.point import Point
class ChiouYoungs2014TestCase(BaseGSIMTestCase):
GSIM_CLASS = ChiouYoungs2014
# Test data were obtained from a tool given by the authorst
# in tests/gsim/data/NGA/CY14
def test_mean_hanging_wall_normal_slip(self):
self.check('NGA/CY14/CY14_MEDIAN_MS_HW_NM.csv',
max_discrep_percentage=0.05)
def test_mean_hanging_wall_reversed_slip(self):
self.check('NGA/CY14/CY14_MEDIAN_MS_HW_RV.csv',
max_discrep_percentage=0.05)
def test_mean_hanging_wall_strike_slip(self):
self.check('NGA/CY14/CY14_MEDIAN_MS_HW_SS.csv',
max_discrep_percentage=0.05)
def test_inter_event_stddev(self):
# data generated from opensha
self.check('NGA/CY14/CY14_INTER_EVENT_SIGMA.csv',
max_discrep_percentage=0.05)
def test_intra_event_stddev(self):
# data generated from opensha
self.check('NGA/CY14/CY14_INTRA_EVENT_SIGMA.csv',
max_discrep_percentage=0.05)
def test_total_event_stddev(self):
# data generated from opensha
self.check('NGA/CY14/CY14_TOTAL_EVENT_SIGMA.csv',
max_discrep_percentage=0.05)
class ChiouYoungs2014PEERTestCase(BaseGSIMTestCase):
GSIM_CLASS = ChiouYoungs2014PEER
# First five tests use data ported from Kenneth Campbell
# tables for verifying NGA models, available from OpenSHA, see
# http://opensha.usc.edu/docs/opensha/NGA/Campbell_NGA_tests.zip
# This data is distributed under different license, see LICENSE.txt
# in tests/gsim/data/NGA
def test_mean_hanging_wall_normal_slip(self):
self.check('NGA/CY14/CY14_MEDIAN_MS_HW_NM.csv',
max_discrep_percentage=0.05)
def test_mean_hanging_wall_reversed_slip(self):
self.check('NGA/CY14/CY14_MEDIAN_MS_HW_RV.csv',
max_discrep_percentage=0.05)
def test_mean_hanging_wall_strike_slip(self):
self.check('NGA/CY14/CY14_MEDIAN_MS_HW_SS.csv',
max_discrep_percentage=0.05)
def test_total_event_stddev(self):
# Total Sigma fixes at 0.65
self.check('NGA/CY14/CY14_TOTAL_EVENT_SIGMA_PEER.csv',
max_discrep_percentage=0.05)
class ChiouYoungs2014NearFaultTestCase(BaseGSIMTestCase):
GSIM_CLASS = ChiouYoungs2014NearFaultEffect
# First five tests use data ported from Kenneth Campbell
# tables for verifying NGA models, available from OpenSHA, see
# http://opensha.usc.edu/docs/opensha/NGA/Campbell_NGA_tests.zip
# This data is distributed under different license, see LICENSE.txt
# in tests/gsim/data/NGA
def test_mean_near_fault(self):
self.check('NGA/CY14/CY14_MEDIAN_RCDPP.csv',
max_discrep_percentage=0.05)
class ChiouYoungs2014NearFaultTestCase(BaseGSIMTestCase):
GSIM_CLASS = ChiouYoungs2014NearFaultEffect
# First five tests use data ported from Kenneth Campbell
# tables for verifying NGA models, available from OpenSHA, see
# http://opensha.usc.edu/docs/opensha/NGA/Campbell_NGA_tests.zip
# This data is distributed under different license, see LICENSE.txt
# in tests/gsim/data/NGA
def test_mean_near_fault(self):
self.check('NGA/CY14/CY14_MEDIAN_RCDPP.csv',
max_discrep_percentage=0.05)
class ChiouYoungs2014NearFaultDistanceTaperTestCase(BaseGSIMTestCase):
def make_rupture(self):
# Create the rupture surface.
upper_seismogenic_depth = 3.
lower_seismogenic_depth = 15.
dip = 90.
mesh_spacing = 1.
fault_trace_start = Point(28.531397, 40.8790859336)
fault_trace_end = Point(28.85, 40.9)
fault_trace = Line([fault_trace_start, fault_trace_end])
default_arguments = {
'mag': 6.5,
'rake': 180.,
'tectonic_region_type': const.TRT.STABLE_CONTINENTAL,
'hypocenter': Point(28.709146553353872, 40.890863701462457, 11.0),
'surface': SimpleFaultSurface.from_fault_data(
fault_trace, upper_seismogenic_depth, lower_seismogenic_depth,
dip=dip, mesh_spacing=mesh_spacing),
'source_typology': object(),
'rupture_slip_direction': 0.,
'occurrence_rate': 0.01,
'temporal_occurrence_model': PoissonTOM(50)
}
kwargs = default_arguments
rupture = ParametricProbabilisticRupture(**kwargs)
return rupture
def test_mearn_nearfault_distance_taper(self):
rupture = self.make_rupture()
site1 = Site(location=Point(27.9, 41), vs30=1200.,
vs30measured=True, z1pt0=2.36, z2pt5=2.)
site2 = Site(location=Point(28.1, 41), vs30=1200.,
vs30measured=True, z1pt0=2.36, z2pt5=2.)
sites = SiteCollection([site1, site2])
fields = ground_motion_fields(
rupture=rupture,
sites=sites,
imts=[PGV()],
gsim=ChiouYoungs2014NearFaultEffect(),
truncation_level=0,
realizations=1.
)
gmf = fields[PGV()]
self.assertAlmostEquals(2.27328758, gmf[0], delta=1e-4)
self.assertAlmostEquals(3.38322998, gmf[1], delta=1e-4)
| agpl-3.0 |
serzans/wagtail | wagtail/project_template/home/migrations/0002_create_homepage.py | 61 | 1179 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations
def create_homepage(apps, schema_editor):
# Get models
ContentType = apps.get_model('contenttypes.ContentType')
Page = apps.get_model('wagtailcore.Page')
Site = apps.get_model('wagtailcore.Site')
HomePage = apps.get_model('home.HomePage')
# Delete the default homepage
Page.objects.get(id=2).delete()
# Create content type for homepage model
homepage_content_type, created = ContentType.objects.get_or_create(
model='homepage', app_label='home')
# Create a new homepage
homepage = HomePage.objects.create(
title="Homepage",
slug='home',
content_type=homepage_content_type,
path='00010001',
depth=2,
numchild=0,
url_path='/home/',
)
# Create a site with the new homepage set as the root
Site.objects.create(
hostname='localhost', root_page=homepage, is_default_site=True)
class Migration(migrations.Migration):
dependencies = [
('home', '0001_initial'),
]
operations = [
migrations.RunPython(create_homepage),
]
| bsd-3-clause |
zixiliuyue/pika | pika/connection.py | 2 | 82517 | """Core connection objects"""
import ast
import sys
import collections
import copy
import logging
import math
import numbers
import platform
import warnings
if sys.version_info > (3,):
import urllib.parse as urlparse # pylint: disable=E0611,F0401
else:
import urlparse
from pika import __version__
from pika import callback
import pika.channel
from pika import credentials as pika_credentials
from pika import exceptions
from pika import frame
from pika import heartbeat
from pika import utils
from pika import spec
from pika.compat import (xrange, basestring, # pylint: disable=W0622
url_unquote, dictkeys, dict_itervalues,
dict_iteritems)
BACKPRESSURE_WARNING = ("Pika: Write buffer exceeded warning threshold at "
"%i bytes and an estimated %i frames behind")
PRODUCT = "Pika Python Client Library"
LOGGER = logging.getLogger(__name__)
class InternalCloseReasons(object):
"""Internal reason codes passed to the user's on_close_callback when the
connection is terminated abruptly, without reply code/text from the broker.
AMQP 0.9.1 specification cites IETF RFC 821 for reply codes. To avoid
conflict, the `InternalCloseReasons` namespace uses negative integers. These
are invalid for sending to the broker.
"""
SOCKET_ERROR = -1
BLOCKED_CONNECTION_TIMEOUT = -2
class Parameters(object): # pylint: disable=R0902
"""Base connection parameters class definition
:param bool backpressure_detection: `DEFAULT_BACKPRESSURE_DETECTION`
:param float|None blocked_connection_timeout:
`DEFAULT_BLOCKED_CONNECTION_TIMEOUT`
:param int channel_max: `DEFAULT_CHANNEL_MAX`
:param int connection_attempts: `DEFAULT_CONNECTION_ATTEMPTS`
:param credentials: `DEFAULT_CREDENTIALS`
:param int frame_max: `DEFAULT_FRAME_MAX`
:param int heartbeat: `DEFAULT_HEARTBEAT_TIMEOUT`
:param str host: `DEFAULT_HOST`
:param str locale: `DEFAULT_LOCALE`
:param int port: `DEFAULT_PORT`
:param float retry_delay: `DEFAULT_RETRY_DELAY`
:param float socket_timeout: `DEFAULT_SOCKET_TIMEOUT`
:param bool ssl: `DEFAULT_SSL`
:param dict ssl_options: `DEFAULT_SSL_OPTIONS`
:param str virtual_host: `DEFAULT_VIRTUAL_HOST`
"""
# Declare slots to protect against accidental assignment of an invalid
# attribute
__slots__ = (
'_backpressure_detection',
'_blocked_connection_timeout',
'_channel_max',
'_client_properties',
'_connection_attempts',
'_credentials',
'_frame_max',
'_heartbeat',
'_host',
'_locale',
'_port',
'_retry_delay',
'_socket_timeout',
'_ssl',
'_ssl_options',
'_virtual_host'
)
DEFAULT_USERNAME = 'guest'
DEFAULT_PASSWORD = 'guest'
DEFAULT_BACKPRESSURE_DETECTION = False
DEFAULT_BLOCKED_CONNECTION_TIMEOUT = None
DEFAULT_CHANNEL_MAX = pika.channel.MAX_CHANNELS
DEFAULT_CLIENT_PROPERTIES = None
DEFAULT_CREDENTIALS = pika_credentials.PlainCredentials(DEFAULT_USERNAME,
DEFAULT_PASSWORD)
DEFAULT_CONNECTION_ATTEMPTS = 1
DEFAULT_FRAME_MAX = spec.FRAME_MAX_SIZE
DEFAULT_HEARTBEAT_TIMEOUT = None # None accepts server's proposal
DEFAULT_HOST = 'localhost'
DEFAULT_LOCALE = 'en_US'
DEFAULT_PORT = 5672
DEFAULT_RETRY_DELAY = 2.0
DEFAULT_SOCKET_TIMEOUT = 0.25
DEFAULT_SSL = False
DEFAULT_SSL_OPTIONS = None
DEFAULT_SSL_PORT = 5671
DEFAULT_VIRTUAL_HOST = '/'
DEFAULT_HEARTBEAT_INTERVAL = DEFAULT_HEARTBEAT_TIMEOUT # DEPRECATED
def __init__(self):
self._backpressure_detection = None
self.backpressure_detection = self.DEFAULT_BACKPRESSURE_DETECTION
# If not None, blocked_connection_timeout is the timeout, in seconds,
# for the connection to remain blocked; if the timeout expires, the
# connection will be torn down, triggering the connection's
# on_close_callback
self._blocked_connection_timeout = None
self.blocked_connection_timeout = (
self.DEFAULT_BLOCKED_CONNECTION_TIMEOUT)
self._channel_max = None
self.channel_max = self.DEFAULT_CHANNEL_MAX
self._client_properties = None
self.client_properties = self.DEFAULT_CLIENT_PROPERTIES
self._connection_attempts = None
self.connection_attempts = self.DEFAULT_CONNECTION_ATTEMPTS
self._credentials = None
self.credentials = self.DEFAULT_CREDENTIALS
self._frame_max = None
self.frame_max = self.DEFAULT_FRAME_MAX
self._heartbeat = None
self.heartbeat = self.DEFAULT_HEARTBEAT_TIMEOUT
self._host = None
self.host = self.DEFAULT_HOST
self._locale = None
self.locale = self.DEFAULT_LOCALE
self._port = None
self.port = self.DEFAULT_PORT
self._retry_delay = None
self.retry_delay = self.DEFAULT_RETRY_DELAY
self._socket_timeout = None
self.socket_timeout = self.DEFAULT_SOCKET_TIMEOUT
self._ssl = None
self.ssl = self.DEFAULT_SSL
self._ssl_options = None
self.ssl_options = self.DEFAULT_SSL_OPTIONS
self._virtual_host = None
self.virtual_host = self.DEFAULT_VIRTUAL_HOST
def __repr__(self):
"""Represent the info about the instance.
:rtype: str
"""
return ('<%s host=%s port=%s virtual_host=%s ssl=%s>' %
(self.__class__.__name__, self.host, self.port,
self.virtual_host, self.ssl))
@property
def backpressure_detection(self):
"""
:returns: boolean indicatating whether backpressure detection is
enabled. Defaults to `DEFAULT_BACKPRESSURE_DETECTION`.
"""
return self._backpressure_detection
@backpressure_detection.setter
def backpressure_detection(self, value):
"""
:param bool value: boolean indicatating whether to enable backpressure
detection
"""
if not isinstance(value, bool):
raise TypeError('backpressure_detection must be a bool, '
'but got %r' % (value,))
self._backpressure_detection = value
@property
def blocked_connection_timeout(self):
"""
:returns: None or float blocked connection timeout. Defaults to
`DEFAULT_BLOCKED_CONNECTION_TIMEOUT`.
"""
return self._blocked_connection_timeout
@blocked_connection_timeout.setter
def blocked_connection_timeout(self, value):
"""
:param value: If not None, blocked_connection_timeout is the timeout, in
seconds, for the connection to remain blocked; if the timeout
expires, the connection will be torn down, triggering the
connection's on_close_callback
"""
if value is not None:
if not isinstance(value, numbers.Real):
raise TypeError('blocked_connection_timeout must be a Real '
'number, but got %r' % (value,))
if value < 0:
raise ValueError('blocked_connection_timeout must be >= 0, but '
'got %r' % (value,))
self._blocked_connection_timeout = value
@property
def channel_max(self):
"""
:returns: max preferred number of channels. Defaults to
`DEFAULT_CHANNEL_MAX`.
:rtype: int
"""
return self._channel_max
@channel_max.setter
def channel_max(self, value):
"""
:param int value: max preferred number of channels, between 1 and
`channel.MAX_CHANNELS`, inclusive
"""
if not isinstance(value, numbers.Integral):
raise TypeError('channel_max must be an int, but got %r' % (value,))
if value < 1 or value > pika.channel.MAX_CHANNELS:
raise ValueError('channel_max must be <= %i and > 0, but got %r' %
(pika.channel.MAX_CHANNELS, value))
self._channel_max = value
@property
def client_properties(self):
"""
:returns: None or dict of client properties used to override the fields
in the default client poperties reported to RabbitMQ via
`Connection.StartOk` method. Defaults to
`DEFAULT_CLIENT_PROPERTIES`.
"""
return self._client_properties
@client_properties.setter
def client_properties(self, value):
"""
:param value: None or dict of client properties used to override the
fields in the default client poperties reported to RabbitMQ via
`Connection.StartOk` method.
"""
if not isinstance(value, (dict, type(None),)):
raise TypeError('client_properties must be dict or None, '
'but got %r' % (value,))
# Copy the mutable object to avoid accidental side-effects
self._client_properties = copy.deepcopy(value)
@property
def connection_attempts(self):
"""
:returns: number of socket connection attempts. Defaults to
`DEFAULT_CONNECTION_ATTEMPTS`.
"""
return self._connection_attempts
@connection_attempts.setter
def connection_attempts(self, value):
"""
:param int value: number of socket connection attempts of at least 1
"""
if not isinstance(value, numbers.Integral):
raise TypeError('connection_attempts must be an int')
if value < 1:
raise ValueError('connection_attempts must be > 0, but got %r' %
(value,))
self._connection_attempts = value
@property
def credentials(self):
"""
:rtype: one of the classes from `pika.credentials.VALID_TYPES`. Defaults
to `DEFAULT_CREDENTIALS`.
"""
return self._credentials
@credentials.setter
def credentials(self, value):
"""
:param value: authentication credential object of one of the classes
from `pika.credentials.VALID_TYPES`
"""
if not isinstance(value, tuple(pika_credentials.VALID_TYPES)):
raise TypeError('Credentials must be an object of type: %r, but '
'got %r' % (pika_credentials.VALID_TYPES, value))
# Copy the mutable object to avoid accidental side-effects
self._credentials = copy.deepcopy(value)
@property
def frame_max(self):
"""
:returns: desired maximum AMQP frame size to use. Defaults to
`DEFAULT_FRAME_MAX`.
"""
return self._frame_max
@frame_max.setter
def frame_max(self, value):
"""
:param int value: desired maximum AMQP frame size to use between
`spec.FRAME_MIN_SIZE` and `spec.FRAME_MAX_SIZE`, inclusive
"""
if not isinstance(value, numbers.Integral):
raise TypeError('frame_max must be an int, but got %r' % (value,))
if value < spec.FRAME_MIN_SIZE:
raise ValueError('Min AMQP 0.9.1 Frame Size is %i, but got %r',
(spec.FRAME_MIN_SIZE, value,))
elif value > spec.FRAME_MAX_SIZE:
raise ValueError('Max AMQP 0.9.1 Frame Size is %i, but got %r',
(spec.FRAME_MAX_SIZE, value,))
self._frame_max = value
@property
def heartbeat(self):
"""
:returns: desired connection heartbeat timeout for negotiation or
None to accept broker's value. 0 turns heartbeat off. Defaults to
`DEFAULT_HEARTBEAT_TIMEOUT`.
:rtype: integer, float, or None
"""
return self._heartbeat
@heartbeat.setter
def heartbeat(self, value):
"""
:param value: desired connection heartbeat timeout for negotiation or
None to accept broker's value. 0 turns heartbeat off.
"""
if value is not None:
if not isinstance(value, numbers.Integral):
raise TypeError('heartbeat must be an int, but got %r' %
(value,))
if value < 0:
raise ValueError('heartbeat must >= 0, but got %r' % (value,))
self._heartbeat = value
@property
def host(self):
"""
:returns: hostname or ip address of broker. Defaults to `DEFAULT_HOST`.
:rtype: str
"""
return self._host
@host.setter
def host(self, value):
"""
:param str value: hostname or ip address of broker
"""
if not isinstance(value, basestring):
raise TypeError('host must be a str or unicode str, but got %r' %
(value,))
self._host = value
@property
def locale(self):
"""
:returns: locale value to pass to broker; e.g., 'en_US'. Defaults to
`DEFAULT_LOCALE`.
:rtype: str
"""
return self._locale
@locale.setter
def locale(self, value):
"""
:param str value: locale value to pass to broker; e.g., "en_US"
"""
if not isinstance(value, basestring):
raise TypeError('locale must be a str, but got %r' % (value,))
self._locale = value
@property
def port(self):
"""
:returns: port number of broker's listening socket. Defaults to
`DEFAULT_PORT`.
:rtype: int
"""
return self._port
@port.setter
def port(self, value):
"""
:param int value: port number of broker's listening socket
"""
if not isinstance(value, numbers.Integral):
raise TypeError('port must be an int, but got %r' % (value,))
self._port = value
@property
def retry_delay(self):
"""
:returns: interval between socket connection attempts; see also
`connection_attempts`. Defaults to `DEFAULT_RETRY_DELAY`.
:rtype: float
"""
return self._retry_delay
@retry_delay.setter
def retry_delay(self, value):
"""
:param float value: interval between socket connection attempts; see
also `connection_attempts`.
"""
if not isinstance(value, numbers.Real):
raise TypeError('retry_delay must be a float or int, but got %r' %
(value,))
self._retry_delay = value
@property
def socket_timeout(self):
"""
:returns: socket timeout value. Defaults to `DEFAULT_SOCKET_TIMEOUT`.
:rtype: float
"""
return self._socket_timeout
@socket_timeout.setter
def socket_timeout(self, value):
"""
:param float value: socket timeout value; NOTE: this is mostly unused
now, owing to switchover to to non-blocking socket setting after
initial socket conection establishment.
"""
if value is not None:
if not isinstance(value, numbers.Real):
raise TypeError('socket_timeout must be a float or int, '
'but got %r' % (value,))
if not value > 0:
raise ValueError('socket_timeout must be > 0, but got %r' %
(value,))
self._socket_timeout = value
@property
def ssl(self):
"""
:returns: boolean indicating whether to connect via SSL. Defaults to
`DEFAULT_SSL`.
"""
return self._ssl
@ssl.setter
def ssl(self, value):
"""
:param bool value: boolean indicating whether to connect via SSL
"""
if not isinstance(value, bool):
raise TypeError('ssl must be a bool, but got %r' % (value,))
self._ssl = value
@property
def ssl_options(self):
"""
:returns: None or a dict of options to pass to `ssl.wrap_socket`.
Defaults to `DEFAULT_SSL_OPTIONS`.
"""
return self._ssl_options
@ssl_options.setter
def ssl_options(self, value):
"""
:param value: None or a dict of options to pass to `ssl.wrap_socket`.
"""
if not isinstance(value, (dict, type(None))):
raise TypeError('ssl_options must be a dict or None, but got %r' %
(value,))
# Copy the mutable object to avoid accidental side-effects
self._ssl_options = copy.deepcopy(value)
@property
def virtual_host(self):
"""
:returns: rabbitmq virtual host name. Defaults to
`DEFAULT_VIRTUAL_HOST`.
"""
return self._virtual_host
@virtual_host.setter
def virtual_host(self, value):
"""
:param str value: rabbitmq virtual host name
"""
if not isinstance(value, basestring):
raise TypeError('virtual_host must be a str, but got %r' % (value,))
self._virtual_host = value
class ConnectionParameters(Parameters):
"""Connection parameters object that is passed into the connection adapter
upon construction.
"""
# Protect against accidental assignment of an invalid attribute
__slots__ = ()
class _DEFAULT(object):
"""Designates default parameter value; internal use"""
pass
def __init__(self, # pylint: disable=R0913,R0914,R0912
host=_DEFAULT,
port=_DEFAULT,
virtual_host=_DEFAULT,
credentials=_DEFAULT,
channel_max=_DEFAULT,
frame_max=_DEFAULT,
heartbeat=_DEFAULT,
ssl=_DEFAULT,
ssl_options=_DEFAULT,
connection_attempts=_DEFAULT,
retry_delay=_DEFAULT,
socket_timeout=_DEFAULT,
locale=_DEFAULT,
backpressure_detection=_DEFAULT,
blocked_connection_timeout=_DEFAULT,
client_properties=_DEFAULT,
**kwargs):
"""Create a new ConnectionParameters instance. See `Parameters` for
default values.
:param str host: Hostname or IP Address to connect to
:param int port: TCP port to connect to
:param str virtual_host: RabbitMQ virtual host to use
:param pika.credentials.Credentials credentials: auth credentials
:param int channel_max: Maximum number of channels to allow
:param int frame_max: The maximum byte size for an AMQP frame
:param int heartbeat: Heartbeat timeout. Max between this value
and server's proposal will be used as the heartbeat timeout. Use 0
to deactivate heartbeats and None to accept server's proposal.
:param bool ssl: Enable SSL
:param dict ssl_options: None or a dict of arguments to be passed to
ssl.wrap_socket
:param int connection_attempts: Maximum number of retry attempts
:param int|float retry_delay: Time to wait in seconds, before the next
:param int|float socket_timeout: Use for high latency networks
:param str locale: Set the locale value
:param bool backpressure_detection: DEPRECATED in favor of
`Connection.Blocked` and `Connection.Unblocked`. See
`Connection.add_on_connection_blocked_callback`.
:param blocked_connection_timeout: If not None,
the value is a non-negative timeout, in seconds, for the
connection to remain blocked (triggered by Connection.Blocked from
broker); if the timeout expires before connection becomes unblocked,
the connection will be torn down, triggering the adapter-specific
mechanism for informing client app about the closed connection (
e.g., on_close_callback or ConnectionClosed exception) with
`reason_code` of `InternalCloseReasons.BLOCKED_CONNECTION_TIMEOUT`.
:type blocked_connection_timeout: None, int, float
:param client_properties: None or dict of client properties used to
override the fields in the default client poperties reported to
RabbitMQ via `Connection.StartOk` method.
:param heartbeat_interval: DEPRECATED; use `heartbeat` instead, and
don't pass both
"""
super(ConnectionParameters, self).__init__()
if backpressure_detection is not self._DEFAULT:
self.backpressure_detection = backpressure_detection
if blocked_connection_timeout is not self._DEFAULT:
self.blocked_connection_timeout = blocked_connection_timeout
if channel_max is not self._DEFAULT:
self.channel_max = channel_max
if client_properties is not self._DEFAULT:
self.client_properties = client_properties
if connection_attempts is not self._DEFAULT:
self.connection_attempts = connection_attempts
if credentials is not self._DEFAULT:
self.credentials = credentials
if frame_max is not self._DEFAULT:
self.frame_max = frame_max
if heartbeat is not self._DEFAULT:
self.heartbeat = heartbeat
try:
heartbeat_interval = kwargs.pop('heartbeat_interval')
except KeyError:
# Good, this one is deprecated
pass
else:
warnings.warn('heartbeat_interval is deprecated, use heartbeat',
DeprecationWarning, stacklevel=2)
if heartbeat is not self._DEFAULT:
raise TypeError('heartbeat and deprecated heartbeat_interval '
'are mutually-exclusive')
self.heartbeat = heartbeat_interval
if host is not self._DEFAULT:
self.host = host
if locale is not self._DEFAULT:
self.locale = locale
if retry_delay is not self._DEFAULT:
self.retry_delay = retry_delay
if socket_timeout is not self._DEFAULT:
self.socket_timeout = socket_timeout
if ssl is not self._DEFAULT:
self.ssl = ssl
if ssl_options is not self._DEFAULT:
self.ssl_options = ssl_options
# Set port after SSL status is known
if port is not self._DEFAULT:
self.port = port
elif ssl is not self._DEFAULT:
self.port = self.DEFAULT_SSL_PORT if self.ssl else self.DEFAULT_PORT
if virtual_host is not self._DEFAULT:
self.virtual_host = virtual_host
if kwargs:
raise TypeError('Unexpected kwargs: %r' % (kwargs,))
class URLParameters(Parameters):
"""Connect to RabbitMQ via an AMQP URL in the format::
amqp://username:password@host:port/<virtual_host>[?query-string]
Ensure that the virtual host is URI encoded when specified. For example if
you are using the default "/" virtual host, the value should be `%2f`.
See `Parameters` for default values.
Valid query string values are:
- backpressure_detection:
DEPRECATED in favor of
`Connection.Blocked` and `Connection.Unblocked`. See
`Connection.add_on_connection_blocked_callback`.
- channel_max:
Override the default maximum channel count value
- client_properties:
dict of client properties used to override the fields in the default
client poperties reported to RabbitMQ via `Connection.StartOk`
method
- connection_attempts:
Specify how many times pika should try and reconnect before it gives up
- frame_max:
Override the default maximum frame size for communication
- heartbeat:
Specify the number of seconds between heartbeat frames to ensure that
the link between RabbitMQ and your application is up
- locale:
Override the default `en_US` locale value
- ssl:
Toggle SSL, possible values are `t`, `f`
- ssl_options:
Arguments passed to :meth:`ssl.wrap_socket`
- retry_delay:
The number of seconds to sleep before attempting to connect on
connection failure.
- socket_timeout:
Override low level socket timeout value
- blocked_connection_timeout:
Set the timeout, in seconds, that the connection may remain blocked
(triggered by Connection.Blocked from broker); if the timeout
expires before connection becomes unblocked, the connection will be
torn down, triggering the connection's on_close_callback
:param str url: The AMQP URL to connect to
"""
# Protect against accidental assignment of an invalid attribute
__slots__ = ('_all_url_query_values',)
# The name of the private function for parsing and setting a given URL query
# arg is constructed by catenating the query arg's name to this prefix
_SETTER_PREFIX = '_set_url_'
def __init__(self, url):
"""Create a new URLParameters instance.
:param str url: The URL value
"""
super(URLParameters, self).__init__()
self._all_url_query_values = None
# Handle the Protocol scheme
#
# Fix up scheme amqp(s) to http(s) so urlparse won't barf on python
# prior to 2.7. On Python 2.6.9,
# `urlparse('amqp://127.0.0.1/%2f?socket_timeout=1')` produces an
# incorect path='/%2f?socket_timeout=1'
if url[0:4].lower() == 'amqp':
url = 'http' + url[4:]
# TODO Is support for the alternative http(s) schemes intentional?
parts = urlparse.urlparse(url)
if parts.scheme == 'https':
self.ssl = True
elif parts.scheme == 'http':
self.ssl = False
elif parts.scheme:
raise ValueError('Unexpected URL scheme %r; supported scheme '
'values: amqp, amqps' % (parts.scheme,))
if parts.hostname is not None:
self.host = parts.hostname
# Take care of port after SSL status is known
if parts.port is not None:
self.port = parts.port
else:
self.port = self.DEFAULT_SSL_PORT if self.ssl else self.DEFAULT_PORT
if parts.username is not None:
self.credentials = pika_credentials.PlainCredentials(parts.username,
parts.password)
# Get the Virtual Host
if len(parts.path) > 1:
self.virtual_host = url_unquote(parts.path.split('/')[1])
# Handle query string values, validating and assigning them
self._all_url_query_values = urlparse.parse_qs(parts.query)
for name, value in dict_iteritems(self._all_url_query_values):
try:
set_value = getattr(self, self._SETTER_PREFIX + name)
except AttributeError:
raise ValueError('Unknown URL parameter: %r' % (name,))
try:
(value,) = value
except ValueError:
raise ValueError('Expected exactly one value for URL parameter '
'%s, but got %i values: %s' % (
name, len(value), value))
set_value(value)
def _set_url_backpressure_detection(self, value):
"""Deserialize and apply the corresponding query string arg"""
try:
backpressure_detection = {'t': True, 'f': False}[value]
except KeyError:
raise ValueError('Invalid backpressure_detection value: %r' %
(value,))
self.backpressure_detection = backpressure_detection
def _set_url_blocked_connection_timeout(self, value):
"""Deserialize and apply the corresponding query string arg"""
try:
blocked_connection_timeout = float(value)
except ValueError as exc:
raise ValueError('Invalid blocked_connection_timeout value %r: %r' %
(value, exc,))
self.blocked_connection_timeout = blocked_connection_timeout
def _set_url_channel_max(self, value):
"""Deserialize and apply the corresponding query string arg"""
try:
channel_max = int(value)
except ValueError as exc:
raise ValueError('Invalid channel_max value %r: %r' % (value, exc,))
self.channel_max = channel_max
def _set_url_client_properties(self, value):
"""Deserialize and apply the corresponding query string arg"""
self.client_properties = ast.literal_eval(value)
def _set_url_connection_attempts(self, value):
"""Deserialize and apply the corresponding query string arg"""
try:
connection_attempts = int(value)
except ValueError as exc:
raise ValueError('Invalid connection_attempts value %r: %r' %
(value, exc,))
self.connection_attempts = connection_attempts
def _set_url_frame_max(self, value):
"""Deserialize and apply the corresponding query string arg"""
try:
frame_max = int(value)
except ValueError as exc:
raise ValueError('Invalid frame_max value %r: %r' % (value, exc,))
self.frame_max = frame_max
def _set_url_heartbeat(self, value):
"""Deserialize and apply the corresponding query string arg"""
if 'heartbeat_interval' in self._all_url_query_values:
raise ValueError('Deprecated URL parameter heartbeat_interval must '
'not be specified together with heartbeat')
try:
heartbeat_timeout = int(value)
except ValueError as exc:
raise ValueError('Invalid heartbeat value %r: %r' % (value, exc,))
self.heartbeat = heartbeat_timeout
def _set_url_heartbeat_interval(self, value):
"""Deserialize and apply the corresponding query string arg"""
warnings.warn('heartbeat_interval is deprecated, use heartbeat',
DeprecationWarning, stacklevel=2)
if 'heartbeat' in self._all_url_query_values:
raise ValueError('Deprecated URL parameter heartbeat_interval must '
'not be specified together with heartbeat')
try:
heartbeat_timeout = int(value)
except ValueError as exc:
raise ValueError('Invalid heartbeat_interval value %r: %r' %
(value, exc,))
self.heartbeat = heartbeat_timeout
def _set_url_locale(self, value):
"""Deserialize and apply the corresponding query string arg"""
self.locale = value
def _set_url_retry_delay(self, value):
"""Deserialize and apply the corresponding query string arg"""
try:
retry_delay = float(value)
except ValueError as exc:
raise ValueError('Invalid retry_delay value %r: %r' % (value, exc,))
self.retry_delay = retry_delay
def _set_url_socket_timeout(self, value):
"""Deserialize and apply the corresponding query string arg"""
try:
socket_timeout = float(value)
except ValueError as exc:
raise ValueError('Invalid socket_timeout value %r: %r' %
(value, exc,))
self.socket_timeout = socket_timeout
def _set_url_ssl_options(self, value):
"""Deserialize and apply the corresponding query string arg"""
self.ssl_options = ast.literal_eval(value)
class Connection(object):
"""This is the core class that implements communication with RabbitMQ. This
class should not be invoked directly but rather through the use of an
adapter such as SelectConnection or BlockingConnection.
:param pika.connection.Parameters parameters: Connection parameters
:param method on_open_callback: Called when the connection is opened
:param method on_open_error_callback: Called if the connection cant
be opened
:param method on_close_callback: Called when the connection is closed
"""
# Disable pylint messages concerning "method could be a funciton"
# pylint: disable=R0201
ON_CONNECTION_BACKPRESSURE = '_on_connection_backpressure'
ON_CONNECTION_BLOCKED = '_on_connection_blocked'
ON_CONNECTION_CLOSED = '_on_connection_closed'
ON_CONNECTION_ERROR = '_on_connection_error'
ON_CONNECTION_OPEN = '_on_connection_open'
ON_CONNECTION_UNBLOCKED = '_on_connection_unblocked'
CONNECTION_CLOSED = 0
CONNECTION_INIT = 1
CONNECTION_PROTOCOL = 2
CONNECTION_START = 3
CONNECTION_TUNE = 4
CONNECTION_OPEN = 5
CONNECTION_CLOSING = 6 # client-initiated close in progress
_STATE_NAMES = {
CONNECTION_CLOSED: 'CLOSED',
CONNECTION_INIT: 'INIT',
CONNECTION_PROTOCOL: 'PROTOCOL',
CONNECTION_START: 'START',
CONNECTION_TUNE: 'TUNE',
CONNECTION_OPEN: 'OPEN',
CONNECTION_CLOSING: 'CLOSING'
}
def __init__(self,
parameters=None,
on_open_callback=None,
on_open_error_callback=None,
on_close_callback=None):
"""Connection initialization expects an object that has implemented the
Parameters class and a callback function to notify when we have
successfully connected to the AMQP Broker.
Available Parameters classes are the ConnectionParameters class and
URLParameters class.
:param pika.connection.Parameters parameters: Connection parameters
:param method on_open_callback: Called when the connection is opened
:param method on_open_error_callback: Called if the connection can't
be established: on_open_error_callback(connection, str|exception)
:param method on_close_callback: Called when the connection is closed:
`on_close_callback(connection, reason_code, reason_text)`, where
`reason_code` is either an IETF RFC 821 reply code for AMQP-level
closures or a value from `pika.connection.InternalCloseReasons` for
internal causes, such as socket errors.
"""
self.connection_state = self.CONNECTION_CLOSED
# Used to hold timer if configured for Connection.Blocked timeout
self._blocked_conn_timer = None
self.heartbeat = None
# Set our configuration options
self.params = (copy.deepcopy(parameters) if parameters is not None else
ConnectionParameters())
# Define our callback dictionary
self.callbacks = callback.CallbackManager()
# Attributes that will be properly initialized by _init_connection_state
# and/or during connection handshake.
self.server_capabilities = None
self.server_properties = None
self._body_max_length = None
self.known_hosts = None
self.closing = None
self._frame_buffer = None
self._channels = None
self._backpressure_multiplier = None
self._init_connection_state()
# Add the on connection error callback
self.callbacks.add(0, self.ON_CONNECTION_ERROR,
on_open_error_callback or self._on_connection_error,
False)
# On connection callback
if on_open_callback:
self.add_on_open_callback(on_open_callback)
# On connection callback
if on_close_callback:
self.add_on_close_callback(on_close_callback)
self.connect()
def add_backpressure_callback(self, callback_method):
"""Call method "callback" when pika believes backpressure is being
applied.
:param method callback_method: The method to call
"""
self.callbacks.add(0, self.ON_CONNECTION_BACKPRESSURE, callback_method,
False)
def add_on_close_callback(self, callback_method):
"""Add a callback notification when the connection has closed. The
callback will be passed the connection, the reply_code (int) and the
reply_text (str), if sent by the remote server.
:param method callback_method: Callback to call on close
"""
self.callbacks.add(0, self.ON_CONNECTION_CLOSED, callback_method, False)
def add_on_connection_blocked_callback(self, callback_method):
"""Add a callback to be notified when RabbitMQ has sent a
``Connection.Blocked`` frame indicating that RabbitMQ is low on
resources. Publishers can use this to voluntarily suspend publishing,
instead of relying on back pressure throttling. The callback
will be passed the ``Connection.Blocked`` method frame.
See also `ConnectionParameters.blocked_connection_timeout`.
:param method callback_method: Callback to call on `Connection.Blocked`,
having the signature `callback_method(pika.frame.Method)`, where the
method frame's `method` member is of type
`pika.spec.Connection.Blocked`
"""
self.callbacks.add(0, spec.Connection.Blocked, callback_method, False)
def add_on_connection_unblocked_callback(self, callback_method):
"""Add a callback to be notified when RabbitMQ has sent a
``Connection.Unblocked`` frame letting publishers know it's ok
to start publishing again. The callback will be passed the
``Connection.Unblocked`` method frame.
:param method callback_method: Callback to call on
`Connection.Unblocked`, having the signature
`callback_method(pika.frame.Method)`, where the method frame's
`method` member is of type `pika.spec.Connection.Unblocked`
"""
self.callbacks.add(0, spec.Connection.Unblocked, callback_method, False)
def add_on_open_callback(self, callback_method):
"""Add a callback notification when the connection has opened.
:param method callback_method: Callback to call when open
"""
self.callbacks.add(0, self.ON_CONNECTION_OPEN, callback_method, False)
def add_on_open_error_callback(self, callback_method, remove_default=True):
"""Add a callback notification when the connection can not be opened.
The callback method should accept the connection object that could not
connect, and an optional error message.
:param method callback_method: Callback to call when can't connect
:param bool remove_default: Remove default exception raising callback
"""
if remove_default:
self.callbacks.remove(0, self.ON_CONNECTION_ERROR,
self._on_connection_error)
self.callbacks.add(0, self.ON_CONNECTION_ERROR, callback_method, False)
def add_timeout(self, deadline, callback_method):
"""Adapters should override to call the callback after the
specified number of seconds have elapsed, using a timer, or a
thread, or similar.
:param int deadline: The number of seconds to wait to call callback
:param method callback_method: The callback method
"""
raise NotImplementedError
def channel(self, on_open_callback, channel_number=None):
"""Create a new channel with the next available channel number or pass
in a channel number to use. Must be non-zero if you would like to
specify but it is recommended that you let Pika manage the channel
numbers.
:param method on_open_callback: The callback when the channel is opened
:param int channel_number: The channel number to use, defaults to the
next available.
:rtype: pika.channel.Channel
"""
if not self.is_open:
# TODO if state is OPENING, then ConnectionClosed might be wrong
raise exceptions.ConnectionClosed(
'Channel allocation requires an open connection: %s' % self)
if not channel_number:
channel_number = self._next_channel_number()
self._channels[channel_number] = self._create_channel(channel_number,
on_open_callback)
self._add_channel_callbacks(channel_number)
self._channels[channel_number].open()
return self._channels[channel_number]
def close(self, reply_code=200, reply_text='Normal shutdown'):
"""Disconnect from RabbitMQ. If there are any open channels, it will
attempt to close them prior to fully disconnecting. Channels which
have active consumers will attempt to send a Basic.Cancel to RabbitMQ
to cleanly stop the delivery of messages prior to closing the channel.
:param int reply_code: The code number for the close
:param str reply_text: The text reason for the close
"""
if self.is_closing or self.is_closed:
LOGGER.warning('Suppressing close request on %s', self)
return
# Initiate graceful closing of channels that are OPEN or OPENING
self._close_channels(reply_code, reply_text)
# Set our connection state
self._set_connection_state(self.CONNECTION_CLOSING)
LOGGER.info("Closing connection (%s): %s", reply_code, reply_text)
self.closing = reply_code, reply_text
# If there are channels that haven't finished closing yet, then
# _on_close_ready will finally be called from _on_channel_cleanup once
# all channels have been closed
if not self._channels:
# We can initiate graceful closing of the connection right away,
# since no more channels remain
self._on_close_ready()
else:
LOGGER.info('Connection.close is waiting for '
'%d channels to close: %s', len(self._channels), self)
def connect(self):
"""Invoke if trying to reconnect to a RabbitMQ server. Constructing the
Connection object should connect on its own.
"""
self._set_connection_state(self.CONNECTION_INIT)
error = self._adapter_connect()
if not error:
return self._on_connected()
self.remaining_connection_attempts -= 1
LOGGER.warning('Could not connect, %i attempts left',
self.remaining_connection_attempts)
if self.remaining_connection_attempts > 0:
LOGGER.info('Retrying in %i seconds', self.params.retry_delay)
# TODO remove timeout if connection is closed before timer fires
self.add_timeout(self.params.retry_delay, self.connect)
else:
# TODO connect must not call failure callback from constructor. The
# current behavior is error-prone, because the user code may get a
# callback upon socket connection failure before user's other state
# may be sufficiently initialized. Constructors must either succeed
# or raise an exception. To be forward-compatible with failure
# reporting from fully non-blocking connection establishment,
# connect() should set INIT state and schedule a 0-second timer to
# continue the rest of the logic in a private method. The private
# method should use itself instead of connect() as the callback for
# scheduling retries.
# TODO This should use _on_terminate for consistent behavior
self.callbacks.process(0, self.ON_CONNECTION_ERROR, self, self,
error)
self.remaining_connection_attempts = self.params.connection_attempts
self._set_connection_state(self.CONNECTION_CLOSED)
def remove_timeout(self, timeout_id):
"""Adapters should override: Remove a timeout
:param str timeout_id: The timeout id to remove
"""
raise NotImplementedError
def set_backpressure_multiplier(self, value=10):
"""Alter the backpressure multiplier value. We set this to 10 by default.
This value is used to raise warnings and trigger the backpressure
callback.
:param int value: The multiplier value to set
"""
self._backpressure_multiplier = value
#
# Connections state properties
#
@property
def is_closed(self):
"""
Returns a boolean reporting the current connection state.
"""
return self.connection_state == self.CONNECTION_CLOSED
@property
def is_closing(self):
"""
Returns True if connection is in the process of closing due to
client-initiated `close` request, but closing is not yet complete.
"""
return self.connection_state == self.CONNECTION_CLOSING
@property
def is_open(self):
"""
Returns a boolean reporting the current connection state.
"""
return self.connection_state == self.CONNECTION_OPEN
#
# Properties that reflect server capabilities for the current connection
#
@property
def basic_nack(self):
"""Specifies if the server supports basic.nack on the active connection.
:rtype: bool
"""
return self.server_capabilities.get('basic.nack', False)
@property
def consumer_cancel_notify(self):
"""Specifies if the server supports consumer cancel notification on the
active connection.
:rtype: bool
"""
return self.server_capabilities.get('consumer_cancel_notify', False)
@property
def exchange_exchange_bindings(self):
"""Specifies if the active connection supports exchange to exchange
bindings.
:rtype: bool
"""
return self.server_capabilities.get('exchange_exchange_bindings', False)
@property
def publisher_confirms(self):
"""Specifies if the active connection can use publisher confirmations.
:rtype: bool
"""
return self.server_capabilities.get('publisher_confirms', False)
#
# Internal methods for managing the communication process
#
def _adapter_connect(self):
"""Subclasses should override to set up the outbound socket connection.
:raises: NotImplementedError
"""
raise NotImplementedError
def _adapter_disconnect(self):
"""Subclasses should override this to cause the underlying transport
(socket) to close.
:raises: NotImplementedError
"""
raise NotImplementedError
def _add_channel_callbacks(self, channel_number):
"""Add the appropriate callbacks for the specified channel number.
:param int channel_number: The channel number for the callbacks
"""
# pylint: disable=W0212
# This permits us to garbage-collect our reference to the channel
# regardless of whether it was closed by client or broker, and do so
# after all channel-close callbacks.
self._channels[channel_number]._add_on_cleanup_callback(
self._on_channel_cleanup)
def _add_connection_start_callback(self):
"""Add a callback for when a Connection.Start frame is received from
the broker.
"""
self.callbacks.add(0, spec.Connection.Start, self._on_connection_start)
def _add_connection_tune_callback(self):
"""Add a callback for when a Connection.Tune frame is received."""
self.callbacks.add(0, spec.Connection.Tune, self._on_connection_tune)
def _append_frame_buffer(self, value):
"""Append the bytes to the frame buffer.
:param str value: The bytes to append to the frame buffer
"""
self._frame_buffer += value
@property
def _buffer_size(self):
"""Return the suggested buffer size from the connection state/tune or
the default if that is None.
:rtype: int
"""
return self.params.frame_max or spec.FRAME_MAX_SIZE
def _check_for_protocol_mismatch(self, value):
"""Invoked when starting a connection to make sure it's a supported
protocol.
:param pika.frame.Method value: The frame to check
:raises: ProtocolVersionMismatch
"""
if (value.method.version_major,
value.method.version_minor) != spec.PROTOCOL_VERSION[0:2]:
# TODO This should call _on_terminate for proper callbacks and
# cleanup
raise exceptions.ProtocolVersionMismatch(frame.ProtocolHeader(),
value)
@property
def _client_properties(self):
"""Return the client properties dictionary.
:rtype: dict
"""
properties = {
'product': PRODUCT,
'platform': 'Python %s' % platform.python_version(),
'capabilities': {
'authentication_failure_close': True,
'basic.nack': True,
'connection.blocked': True,
'consumer_cancel_notify': True,
'publisher_confirms': True
},
'information': 'See http://pika.rtfd.org',
'version': __version__
}
if self.params.client_properties:
properties.update(self.params.client_properties)
return properties
def _close_channels(self, reply_code, reply_text):
"""Initiate graceful closing of channels that are in OPEN or OPENING
states, passing reply_code and reply_text.
:param int reply_code: The code for why the channels are being closed
:param str reply_text: The text reason for why the channels are closing
"""
assert self.is_open, str(self)
for channel_number in dictkeys(self._channels):
chan = self._channels[channel_number]
if not (chan.is_closing or chan.is_closed):
chan.close(reply_code, reply_text)
def _combine(self, val1, val2):
"""Pass in two values, if a is 0, return b otherwise if b is 0,
return a. If neither case matches return the smallest value.
:param int val1: The first value
:param int val2: The second value
:rtype: int
"""
return min(val1, val2) or (val1 or val2)
def _connect(self):
"""Attempt to connect to RabbitMQ
:rtype: bool
"""
warnings.warn('This method is deprecated, use Connection.connect',
DeprecationWarning)
def _create_channel(self, channel_number, on_open_callback):
"""Create a new channel using the specified channel number and calling
back the method specified by on_open_callback
:param int channel_number: The channel number to use
:param method on_open_callback: The callback when the channel is opened
"""
LOGGER.debug('Creating channel %s', channel_number)
return pika.channel.Channel(self, channel_number, on_open_callback)
def _create_heartbeat_checker(self):
"""Create a heartbeat checker instance if there is a heartbeat interval
set.
:rtype: pika.heartbeat.Heartbeat
"""
if self.params.heartbeat is not None and self.params.heartbeat > 0:
LOGGER.debug('Creating a HeartbeatChecker: %r',
self.params.heartbeat)
return heartbeat.HeartbeatChecker(self, self.params.heartbeat)
def _remove_heartbeat(self):
"""Stop the heartbeat checker if it exists
"""
if self.heartbeat:
self.heartbeat.stop()
self.heartbeat = None
def _deliver_frame_to_channel(self, value):
"""Deliver the frame to the channel specified in the frame.
:param pika.frame.Method value: The frame to deliver
"""
if not value.channel_number in self._channels:
# This should never happen and would constitute breach of the
# protocol
LOGGER.critical(
'Received %s frame for unregistered channel %i on %s',
value.NAME, value.channel_number, self)
return
# pylint: disable=W0212
self._channels[value.channel_number]._handle_content_frame(value)
def _detect_backpressure(self):
"""Attempt to calculate if TCP backpressure is being applied due to
our outbound buffer being larger than the average frame size over
a window of frames.
"""
avg_frame_size = self.bytes_sent / self.frames_sent
buffer_size = sum([len(f) for f in self.outbound_buffer])
if buffer_size > (avg_frame_size * self._backpressure_multiplier):
LOGGER.warning(BACKPRESSURE_WARNING, buffer_size,
int(buffer_size / avg_frame_size))
self.callbacks.process(0, self.ON_CONNECTION_BACKPRESSURE, self)
def _ensure_closed(self):
"""If the connection is not closed, close it."""
if self.is_open:
self.close()
def _flush_outbound(self):
"""Adapters should override to flush the contents of outbound_buffer
out along the socket.
:raises: NotImplementedError
"""
raise NotImplementedError
def _get_body_frame_max_length(self):
"""Calculate the maximum amount of bytes that can be in a body frame.
:rtype: int
"""
return (
self.params.frame_max - spec.FRAME_HEADER_SIZE - spec.FRAME_END_SIZE
)
def _get_credentials(self, method_frame):
"""Get credentials for authentication.
:param pika.frame.MethodFrame method_frame: The Connection.Start frame
:rtype: tuple(str, str)
"""
(auth_type,
response) = self.params.credentials.response_for(method_frame.method)
if not auth_type:
# TODO this should call _on_terminate for proper callbacks and
# cleanup instead
raise exceptions.AuthenticationError(self.params.credentials.TYPE)
self.params.credentials.erase_credentials()
return auth_type, response
def _has_pending_callbacks(self, value):
"""Return true if there are any callbacks pending for the specified
frame.
:param pika.frame.Method value: The frame to check
:rtype: bool
"""
return self.callbacks.pending(value.channel_number, value.method)
def _init_connection_state(self):
"""Initialize or reset all of the internal state variables for a given
connection. On disconnect or reconnect all of the state needs to
be wiped.
"""
# Connection state
self._set_connection_state(self.CONNECTION_CLOSED)
# Negotiated server properties
self.server_properties = None
# Outbound buffer for buffering writes until we're able to send them
self.outbound_buffer = collections.deque([])
# Inbound buffer for decoding frames
self._frame_buffer = bytes()
# Dict of open channels
self._channels = dict()
# Remaining connection attempts
self.remaining_connection_attempts = self.params.connection_attempts
# Data used for Heartbeat checking and back-pressure detection
self.bytes_sent = 0
self.bytes_received = 0
self.frames_sent = 0
self.frames_received = 0
self.heartbeat = None
# Default back-pressure multiplier value
self._backpressure_multiplier = 10
# When closing, hold reason why
self.closing = 0, 'Not specified'
# Our starting point once connected, first frame received
self._add_connection_start_callback()
# Add a callback handler for the Broker telling us to disconnect.
# NOTE: As of RabbitMQ 3.6.0, RabbitMQ broker may send Connection.Close
# to signal error during connection setup (and wait a longish time
# before closing the TCP/IP stream). Earlier RabbitMQ versions
# simply closed the TCP/IP stream.
self.callbacks.add(0, spec.Connection.Close, self._on_connection_close)
if self.params.blocked_connection_timeout is not None:
if self._blocked_conn_timer is not None:
# Blocked connection timer was active when teardown was
# initiated
self.remove_timeout(self._blocked_conn_timer)
self._blocked_conn_timer = None
self.add_on_connection_blocked_callback(
self._on_connection_blocked)
self.add_on_connection_unblocked_callback(
self._on_connection_unblocked)
def _is_method_frame(self, value):
"""Returns true if the frame is a method frame.
:param pika.frame.Frame value: The frame to evaluate
:rtype: bool
"""
return isinstance(value, frame.Method)
def _is_protocol_header_frame(self, value):
"""Returns True if it's a protocol header frame.
:rtype: bool
"""
return isinstance(value, frame.ProtocolHeader)
def _next_channel_number(self):
"""Return the next available channel number or raise an exception.
:rtype: int
"""
limit = self.params.channel_max or pika.channel.MAX_CHANNELS
if len(self._channels) >= limit:
raise exceptions.NoFreeChannels()
for num in xrange(1, len(self._channels) + 1):
if num not in self._channels:
return num
return len(self._channels) + 1
def _on_channel_cleanup(self, channel):
"""Remove the channel from the dict of channels when Channel.CloseOk is
sent. If connection is closing and no more channels remain, proceed to
`_on_close_ready`.
:param pika.channel.Channel channel: channel instance
"""
try:
del self._channels[channel.channel_number]
LOGGER.debug('Removed channel %s', channel.channel_number)
except KeyError:
LOGGER.error('Channel %r not in channels',
channel.channel_number)
if self.is_closing:
if not self._channels:
# Initiate graceful closing of the connection
self._on_close_ready()
else:
# Once Connection enters CLOSING state, all remaining channels
# should also be in CLOSING state. Deviation from this would
# prevent Connection from completing its closing procedure.
channels_not_in_closing_state = [
chan for chan in dict_itervalues(self._channels)
if not chan.is_closing]
if channels_not_in_closing_state:
LOGGER.critical(
'Connection in CLOSING state has non-CLOSING '
'channels: %r', channels_not_in_closing_state)
def _on_close_ready(self):
"""Called when the Connection is in a state that it can close after
a close has been requested. This happens, for example, when all of the
channels are closed that were open when the close request was made.
"""
if self.is_closed:
LOGGER.warning('_on_close_ready invoked when already closed')
return
self._send_connection_close(self.closing[0], self.closing[1])
def _on_connected(self):
"""Invoked when the socket is connected and it's time to start speaking
AMQP with the broker.
"""
self._set_connection_state(self.CONNECTION_PROTOCOL)
# Start the communication with the RabbitMQ Broker
self._send_frame(frame.ProtocolHeader())
def _on_blocked_connection_timeout(self):
""" Called when the "connection blocked timeout" expires. When this
happens, we tear down the connection
"""
self._blocked_conn_timer = None
self._on_terminate(InternalCloseReasons.BLOCKED_CONNECTION_TIMEOUT,
'Blocked connection timeout expired')
def _on_connection_blocked(self, method_frame):
"""Handle Connection.Blocked notification from RabbitMQ broker
:param pika.frame.Method method_frame: method frame having `method`
member of type `pika.spec.Connection.Blocked`
"""
LOGGER.warning('Received %s from broker', method_frame)
if self._blocked_conn_timer is not None:
# RabbitMQ is not supposed to repeat Connection.Blocked, but it
# doesn't hurt to be careful
LOGGER.warning('_blocked_conn_timer %s already set when '
'_on_connection_blocked is called',
self._blocked_conn_timer)
else:
self._blocked_conn_timer = self.add_timeout(
self.params.blocked_connection_timeout,
self._on_blocked_connection_timeout)
def _on_connection_unblocked(self, method_frame):
"""Handle Connection.Unblocked notification from RabbitMQ broker
:param pika.frame.Method method_frame: method frame having `method`
member of type `pika.spec.Connection.Blocked`
"""
LOGGER.info('Received %s from broker', method_frame)
if self._blocked_conn_timer is None:
# RabbitMQ is supposed to pair Connection.Blocked/Unblocked, but it
# doesn't hurt to be careful
LOGGER.warning('_blocked_conn_timer was not active when '
'_on_connection_unblocked called')
else:
self.remove_timeout(self._blocked_conn_timer)
self._blocked_conn_timer = None
def _on_connection_close(self, method_frame):
"""Called when the connection is closed remotely via Connection.Close
frame from broker.
:param pika.frame.Method method_frame: The Connection.Close frame
"""
LOGGER.debug('_on_connection_close: frame=%s', method_frame)
self.closing = (method_frame.method.reply_code,
method_frame.method.reply_text)
self._on_terminate(self.closing[0], self.closing[1])
def _on_connection_close_ok(self, method_frame):
"""Called when Connection.CloseOk is received from remote.
:param pika.frame.Method method_frame: The Connection.CloseOk frame
"""
LOGGER.debug('_on_connection_close_ok: frame=%s', method_frame)
self._on_terminate(self.closing[0], self.closing[1])
def _on_connection_error(self, _connection_unused, error_message=None):
"""Default behavior when the connecting connection can not connect.
:raises: exceptions.AMQPConnectionError
"""
raise exceptions.AMQPConnectionError(error_message or
self.params.connection_attempts)
def _on_connection_open(self, method_frame):
"""
This is called once we have tuned the connection with the server and
called the Connection.Open on the server and it has replied with
Connection.Ok.
"""
# TODO _on_connection_open - what if user started closing it already?
# It shouldn't transition to OPEN if in closing state. Just log and skip
# the rest.
self.known_hosts = method_frame.method.known_hosts
# We're now connected at the AMQP level
self._set_connection_state(self.CONNECTION_OPEN)
# Call our initial callback that we're open
self.callbacks.process(0, self.ON_CONNECTION_OPEN, self, self)
def _on_connection_start(self, method_frame):
"""This is called as a callback once we have received a Connection.Start
from the server.
:param pika.frame.Method method_frame: The frame received
:raises: UnexpectedFrameError
"""
self._set_connection_state(self.CONNECTION_START)
if self._is_protocol_header_frame(method_frame):
raise exceptions.UnexpectedFrameError
self._check_for_protocol_mismatch(method_frame)
self._set_server_information(method_frame)
self._add_connection_tune_callback()
self._send_connection_start_ok(*self._get_credentials(method_frame))
@staticmethod
def _tune_heartbeat_timeout(client_value, server_value):
""" Determine heartbeat timeout per AMQP 0-9-1 rules
Per https://www.rabbitmq.com/resources/specs/amqp0-9-1.pdf,
> Both peers negotiate the limits to the lowest agreed value as follows:
> - The server MUST tell the client what limits it proposes.
> - The client responds and **MAY reduce those limits** for its
connection
When negotiating heartbeat timeout, the reasoning needs to be reversed.
The way I think it makes sense to interpret this rule for heartbeats is
that the consumable resource is the frequency of heartbeats, which is
the inverse of the timeout. The more frequent heartbeats consume more
resources than less frequent heartbeats. So, when both heartbeat
timeouts are non-zero, we should pick the max heartbeat timeout rather
than the min. The heartbeat timeout value 0 (zero) has a special
meaning - it's supposed to disable the timeout. This makes zero a
setting for the least frequent heartbeats (i.e., never); therefore, if
any (or both) of the two is zero, then the above rules would suggest
that negotiation should yield 0 value for heartbeat, effectively turning
it off.
:param client_value: None to accept server_value; otherwise, an integral
number in seconds; 0 (zero) to disable heartbeat.
:param server_value: integral value of the heartbeat timeout proposed by
broker; 0 (zero) to disable heartbeat.
:returns: the value of the heartbeat timeout to use and return to broker
"""
if client_value is None:
# Accept server's limit
timeout = server_value
elif client_value == 0 or server_value == 0:
# 0 has a special meaning "disable heartbeats", which makes it the
# least frequent heartbeat value there is
timeout = 0
else:
# Pick the one with the bigger heartbeat timeout (i.e., the less
# frequent one)
timeout = max(client_value, server_value)
return timeout
def _on_connection_tune(self, method_frame):
"""Once the Broker sends back a Connection.Tune, we will set our tuning
variables that have been returned to us and kick off the Heartbeat
monitor if required, send our TuneOk and then the Connection. Open rpc
call on channel 0.
:param pika.frame.Method method_frame: The frame received
"""
self._set_connection_state(self.CONNECTION_TUNE)
# Get our max channels, frames and heartbeat interval
self.params.channel_max = self._combine(self.params.channel_max,
method_frame.method.channel_max)
self.params.frame_max = self._combine(self.params.frame_max,
method_frame.method.frame_max)
# Negotiate heatbeat timeout
self.params.heartbeat = self._tune_heartbeat_timeout(
client_value=self.params.heartbeat,
server_value=method_frame.method.heartbeat)
# Calculate the maximum pieces for body frames
self._body_max_length = self._get_body_frame_max_length()
# Create a new heartbeat checker if needed
self.heartbeat = self._create_heartbeat_checker()
# Send the TuneOk response with what we've agreed upon
self._send_connection_tune_ok()
# Send the Connection.Open RPC call for the vhost
self._send_connection_open()
def _on_data_available(self, data_in):
"""This is called by our Adapter, passing in the data from the socket.
As long as we have buffer try and map out frame data.
:param str data_in: The data that is available to read
"""
self._append_frame_buffer(data_in)
while self._frame_buffer:
consumed_count, frame_value = self._read_frame()
if not frame_value:
return
self._trim_frame_buffer(consumed_count)
self._process_frame(frame_value)
def _on_terminate(self, reason_code, reason_text):
"""Terminate the connection and notify registered ON_CONNECTION_ERROR
and/or ON_CONNECTION_CLOSED callbacks
:param integer reason_code: either IETF RFC 821 reply code for
AMQP-level closures or a value from `InternalCloseReasons` for
internal causes, such as socket errors
:param str reason_text: human-readable text message describing the error
"""
LOGGER.info(
'Disconnected from RabbitMQ at %s:%i (%s): %s',
self.params.host, self.params.port, reason_code,
reason_text)
if not isinstance(reason_code, numbers.Integral):
raise TypeError('reason_code must be an integer, but got %r'
% (reason_code,))
# Stop the heartbeat checker if it exists
self._remove_heartbeat()
# Remove connection management callbacks
# TODO This call was moved here verbatim from legacy code and the
# following doesn't seem to be right: `Connection.Open` here is
# unexpected, we don't appear to ever register it, and the broker
# shouldn't be sending `Connection.Open` to us, anyway.
self._remove_callbacks(0, [spec.Connection.Close, spec.Connection.Start,
spec.Connection.Open])
if self.params.blocked_connection_timeout is not None:
self._remove_callbacks(0, [spec.Connection.Blocked,
spec.Connection.Unblocked])
# Close the socket
self._adapter_disconnect()
# Determine whether this was an error during connection setup
connection_error = None
if self.connection_state == self.CONNECTION_PROTOCOL:
LOGGER.error('Incompatible Protocol Versions')
connection_error = exceptions.IncompatibleProtocolError(
reason_code,
reason_text)
elif self.connection_state == self.CONNECTION_START:
LOGGER.error('Connection closed while authenticating indicating a '
'probable authentication error')
connection_error = exceptions.ProbableAuthenticationError(
reason_code,
reason_text)
elif self.connection_state == self.CONNECTION_TUNE:
LOGGER.error('Connection closed while tuning the connection '
'indicating a probable permission error when '
'accessing a virtual host')
connection_error = exceptions.ProbableAccessDeniedError(
reason_code,
reason_text)
elif self.connection_state not in [self.CONNECTION_OPEN,
self.CONNECTION_CLOSED,
self.CONNECTION_CLOSING]:
LOGGER.warning('Unexpected connection state on disconnect: %i',
self.connection_state)
# Transition to closed state
self._set_connection_state(self.CONNECTION_CLOSED)
# Inform our channel proxies
for channel in dictkeys(self._channels):
if channel not in self._channels:
continue
# pylint: disable=W0212
self._channels[channel]._on_close_meta(reason_code, reason_text)
# Inform interested parties
if connection_error is not None:
LOGGER.error('Connection setup failed due to %r', connection_error)
self.callbacks.process(0,
self.ON_CONNECTION_ERROR,
self, self,
connection_error)
self.callbacks.process(0, self.ON_CONNECTION_CLOSED, self, self,
reason_code, reason_text)
# Reset connection properties
self._init_connection_state()
def _process_callbacks(self, frame_value):
"""Process the callbacks for the frame if the frame is a method frame
and if it has any callbacks pending.
:param pika.frame.Method frame_value: The frame to process
:rtype: bool
"""
if (self._is_method_frame(frame_value) and
self._has_pending_callbacks(frame_value)):
self.callbacks.process(frame_value.channel_number, # Prefix
frame_value.method, # Key
self, # Caller
frame_value) # Args
return True
return False
def _process_frame(self, frame_value):
"""Process an inbound frame from the socket.
:param frame_value: The frame to process
:type frame_value: pika.frame.Frame | pika.frame.Method
"""
# Will receive a frame type of -1 if protocol version mismatch
if frame_value.frame_type < 0:
return
# Keep track of how many frames have been read
self.frames_received += 1
# Process any callbacks, if True, exit method
if self._process_callbacks(frame_value):
return
# If a heartbeat is received, update the checker
if isinstance(frame_value, frame.Heartbeat):
if self.heartbeat:
self.heartbeat.received()
else:
LOGGER.warning('Received heartbeat frame without a heartbeat '
'checker')
# If the frame has a channel number beyond the base channel, deliver it
elif frame_value.channel_number > 0:
self._deliver_frame_to_channel(frame_value)
def _read_frame(self):
"""Try and read from the frame buffer and decode a frame.
:rtype tuple: (int, pika.frame.Frame)
"""
return frame.decode_frame(self._frame_buffer)
def _remove_callback(self, channel_number, method_class):
"""Remove the specified method_frame callback if it is set for the
specified channel number.
:param int channel_number: The channel number to remove the callback on
:param pika.amqp_object.Method method_class: The method class for the
callback
"""
self.callbacks.remove(str(channel_number), method_class)
def _remove_callbacks(self, channel_number, method_classes):
"""Remove the callbacks for the specified channel number and list of
method frames.
:param int channel_number: The channel number to remove the callback on
:param sequence method_classes: The method classes (derived from
`pika.amqp_object.Method`) for the callbacks
"""
for method_frame in method_classes:
self._remove_callback(channel_number, method_frame)
def _rpc(self, channel_number, method,
callback_method=None,
acceptable_replies=None):
"""Make an RPC call for the given callback, channel number and method.
acceptable_replies lists out what responses we'll process from the
server with the specified callback.
:param int channel_number: The channel number for the RPC call
:param pika.amqp_object.Method method: The method frame to call
:param method callback_method: The callback for the RPC response
:param list acceptable_replies: The replies this RPC call expects
"""
# Validate that acceptable_replies is a list or None
if acceptable_replies and not isinstance(acceptable_replies, list):
raise TypeError('acceptable_replies should be list or None')
# Validate the callback is callable
if callback_method:
if not utils.is_callable(callback_method):
raise TypeError('callback should be None, function or method.')
for reply in acceptable_replies:
self.callbacks.add(channel_number, reply, callback_method)
# Send the rpc call to RabbitMQ
self._send_method(channel_number, method)
def _send_connection_close(self, reply_code, reply_text):
"""Send a Connection.Close method frame.
:param int reply_code: The reason for the close
:param str reply_text: The text reason for the close
"""
self._rpc(0, spec.Connection.Close(reply_code, reply_text, 0, 0),
self._on_connection_close_ok, [spec.Connection.CloseOk])
def _send_connection_open(self):
"""Send a Connection.Open frame"""
self._rpc(0, spec.Connection.Open(self.params.virtual_host,
insist=True),
self._on_connection_open, [spec.Connection.OpenOk])
def _send_connection_start_ok(self, authentication_type, response):
"""Send a Connection.StartOk frame
:param str authentication_type: The auth type value
:param str response: The encoded value to send
"""
self._send_method(0,
spec.Connection.StartOk(self._client_properties,
authentication_type, response,
self.params.locale))
def _send_connection_tune_ok(self):
"""Send a Connection.TuneOk frame"""
self._send_method(0, spec.Connection.TuneOk(self.params.channel_max,
self.params.frame_max,
self.params.heartbeat))
def _send_frame(self, frame_value):
"""This appends the fully generated frame to send to the broker to the
output buffer which will be then sent via the connection adapter.
:param frame_value: The frame to write
:type frame_value: pika.frame.Frame|pika.frame.ProtocolHeader
:raises: exceptions.ConnectionClosed
"""
if self.is_closed:
LOGGER.error('Attempted to send frame when closed')
raise exceptions.ConnectionClosed
marshaled_frame = frame_value.marshal()
self.bytes_sent += len(marshaled_frame)
self.frames_sent += 1
self.outbound_buffer.append(marshaled_frame)
self._flush_outbound()
if self.params.backpressure_detection:
self._detect_backpressure()
def _send_method(self, channel_number, method, content=None):
"""Constructs a RPC method frame and then sends it to the broker.
:param int channel_number: The channel number for the frame
:param pika.amqp_object.Method method: The method to send
:param tuple content: If set, is a content frame, is tuple of
properties and body.
"""
if content:
self._send_message(channel_number, method, content)
else:
self._send_frame(frame.Method(channel_number, method))
def _send_message(self, channel_number, method, content=None):
"""Send the message directly, bypassing the single _send_frame
invocation by directly appending to the output buffer and flushing
within a lock.
:param int channel_number: The channel number for the frame
:param pika.amqp_object.Method method: The method frame to send
:param tuple content: If set, is a content frame, is tuple of
properties and body.
"""
length = len(content[1])
write_buffer = [frame.Method(channel_number, method).marshal(),
frame.Header(channel_number, length,
content[0]).marshal()]
if content[1]:
chunks = int(math.ceil(float(length) / self._body_max_length))
for chunk in xrange(0, chunks):
start = chunk * self._body_max_length
end = start + self._body_max_length
if end > length:
end = length
write_buffer.append(frame.Body(channel_number,
content[1][start:end]).marshal())
self.outbound_buffer += write_buffer
self.frames_sent += len(write_buffer)
self._flush_outbound()
if self.params.backpressure_detection:
self._detect_backpressure()
def _set_connection_state(self, connection_state):
"""Set the connection state.
:param int connection_state: The connection state to set
"""
self.connection_state = connection_state
def _set_server_information(self, method_frame):
"""Set the server properties and capabilities
:param spec.connection.Start method_frame: The Connection.Start frame
"""
self.server_properties = method_frame.method.server_properties
self.server_capabilities = self.server_properties.get('capabilities',
dict())
if hasattr(self.server_properties, 'capabilities'):
del self.server_properties['capabilities']
def _trim_frame_buffer(self, byte_count):
"""Trim the leading N bytes off the frame buffer and increment the
counter that keeps track of how many bytes have been read/used from the
socket.
:param int byte_count: The number of bytes consumed
"""
self._frame_buffer = self._frame_buffer[byte_count:]
self.bytes_received += byte_count
| bsd-3-clause |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.