repo_name stringlengths 6 100 | path stringlengths 4 294 | copies stringlengths 1 5 | size stringlengths 4 6 | content stringlengths 606 896k | license stringclasses 15
values |
|---|---|---|---|---|---|
carljm/django | django/forms/forms.py | 15 | 20129 | """
Form classes
"""
from __future__ import unicode_literals
import copy
from collections import OrderedDict
from django.core.exceptions import NON_FIELD_ERRORS, ValidationError
# BoundField is imported for backwards compatibility in Django 1.9
from django.forms.boundfield import BoundField # NOQA
from django.forms.fields import Field, FileField
# pretty_name is imported for backwards compatibility in Django 1.9
from django.forms.utils import ErrorDict, ErrorList, pretty_name # NOQA
from django.forms.widgets import Media, MediaDefiningClass
from django.utils import six
from django.utils.encoding import force_text, python_2_unicode_compatible
from django.utils.functional import cached_property
from django.utils.html import conditional_escape, html_safe
from django.utils.safestring import mark_safe
from django.utils.translation import ugettext as _
__all__ = ('BaseForm', 'Form')
class DeclarativeFieldsMetaclass(MediaDefiningClass):
"""
Metaclass that collects Fields declared on the base classes.
"""
def __new__(mcs, name, bases, attrs):
# Collect fields from current class.
current_fields = []
for key, value in list(attrs.items()):
if isinstance(value, Field):
current_fields.append((key, value))
attrs.pop(key)
current_fields.sort(key=lambda x: x[1].creation_counter)
attrs['declared_fields'] = OrderedDict(current_fields)
new_class = super(DeclarativeFieldsMetaclass, mcs).__new__(mcs, name, bases, attrs)
# Walk through the MRO.
declared_fields = OrderedDict()
for base in reversed(new_class.__mro__):
# Collect fields from base class.
if hasattr(base, 'declared_fields'):
declared_fields.update(base.declared_fields)
# Field shadowing.
for attr, value in base.__dict__.items():
if value is None and attr in declared_fields:
declared_fields.pop(attr)
new_class.base_fields = declared_fields
new_class.declared_fields = declared_fields
return new_class
@html_safe
@python_2_unicode_compatible
class BaseForm(object):
# This is the main implementation of all the Form logic. Note that this
# class is different than Form. See the comments by the Form class for more
# information. Any improvements to the form API should be made to *this*
# class, not to the Form class.
field_order = None
prefix = None
use_required_attribute = True
def __init__(self, data=None, files=None, auto_id='id_%s', prefix=None,
initial=None, error_class=ErrorList, label_suffix=None,
empty_permitted=False, field_order=None, use_required_attribute=None):
self.is_bound = data is not None or files is not None
self.data = data or {}
self.files = files or {}
self.auto_id = auto_id
if prefix is not None:
self.prefix = prefix
self.initial = initial or {}
self.error_class = error_class
# Translators: This is the default suffix added to form field labels
self.label_suffix = label_suffix if label_suffix is not None else _(':')
self.empty_permitted = empty_permitted
self._errors = None # Stores the errors after clean() has been called.
# The base_fields class attribute is the *class-wide* definition of
# fields. Because a particular *instance* of the class might want to
# alter self.fields, we create self.fields here by copying base_fields.
# Instances should always modify self.fields; they should not modify
# self.base_fields.
self.fields = copy.deepcopy(self.base_fields)
self._bound_fields_cache = {}
self.order_fields(self.field_order if field_order is None else field_order)
if use_required_attribute is not None:
self.use_required_attribute = use_required_attribute
def order_fields(self, field_order):
"""
Rearranges the fields according to field_order.
field_order is a list of field names specifying the order. Fields not
included in the list are appended in the default order for backward
compatibility with subclasses not overriding field_order. If field_order
is None, all fields are kept in the order defined in the class.
Unknown fields in field_order are ignored to allow disabling fields in
form subclasses without redefining ordering.
"""
if field_order is None:
return
fields = OrderedDict()
for key in field_order:
try:
fields[key] = self.fields.pop(key)
except KeyError: # ignore unknown fields
pass
fields.update(self.fields) # add remaining fields in original order
self.fields = fields
def __str__(self):
return self.as_table()
def __repr__(self):
if self._errors is None:
is_valid = "Unknown"
else:
is_valid = self.is_bound and not bool(self._errors)
return '<%(cls)s bound=%(bound)s, valid=%(valid)s, fields=(%(fields)s)>' % {
'cls': self.__class__.__name__,
'bound': self.is_bound,
'valid': is_valid,
'fields': ';'.join(self.fields),
}
def __iter__(self):
for name in self.fields:
yield self[name]
def __getitem__(self, name):
"Returns a BoundField with the given name."
try:
field = self.fields[name]
except KeyError:
raise KeyError(
"Key '%s' not found in '%s'. Choices are: %s." % (
name,
self.__class__.__name__,
', '.join(sorted(f for f in self.fields)),
)
)
if name not in self._bound_fields_cache:
self._bound_fields_cache[name] = field.get_bound_field(self, name)
return self._bound_fields_cache[name]
@property
def errors(self):
"Returns an ErrorDict for the data provided for the form"
if self._errors is None:
self.full_clean()
return self._errors
def is_valid(self):
"""
Returns True if the form has no errors. Otherwise, False. If errors are
being ignored, returns False.
"""
return self.is_bound and not self.errors
def add_prefix(self, field_name):
"""
Returns the field name with a prefix appended, if this Form has a
prefix set.
Subclasses may wish to override.
"""
return '%s-%s' % (self.prefix, field_name) if self.prefix else field_name
def add_initial_prefix(self, field_name):
"""
Add a 'initial' prefix for checking dynamic initial values
"""
return 'initial-%s' % self.add_prefix(field_name)
def _html_output(self, normal_row, error_row, row_ender, help_text_html, errors_on_separate_row):
"Helper function for outputting HTML. Used by as_table(), as_ul(), as_p()."
top_errors = self.non_field_errors() # Errors that should be displayed above all fields.
output, hidden_fields = [], []
for name, field in self.fields.items():
html_class_attr = ''
bf = self[name]
# Escape and cache in local variable.
bf_errors = self.error_class([conditional_escape(error) for error in bf.errors])
if bf.is_hidden:
if bf_errors:
top_errors.extend(
[_('(Hidden field %(name)s) %(error)s') % {'name': name, 'error': force_text(e)}
for e in bf_errors])
hidden_fields.append(six.text_type(bf))
else:
# Create a 'class="..."' attribute if the row should have any
# CSS classes applied.
css_classes = bf.css_classes()
if css_classes:
html_class_attr = ' class="%s"' % css_classes
if errors_on_separate_row and bf_errors:
output.append(error_row % force_text(bf_errors))
if bf.label:
label = conditional_escape(force_text(bf.label))
label = bf.label_tag(label) or ''
else:
label = ''
if field.help_text:
help_text = help_text_html % force_text(field.help_text)
else:
help_text = ''
output.append(normal_row % {
'errors': force_text(bf_errors),
'label': force_text(label),
'field': six.text_type(bf),
'help_text': help_text,
'html_class_attr': html_class_attr,
'css_classes': css_classes,
'field_name': bf.html_name,
})
if top_errors:
output.insert(0, error_row % force_text(top_errors))
if hidden_fields: # Insert any hidden fields in the last row.
str_hidden = ''.join(hidden_fields)
if output:
last_row = output[-1]
# Chop off the trailing row_ender (e.g. '</td></tr>') and
# insert the hidden fields.
if not last_row.endswith(row_ender):
# This can happen in the as_p() case (and possibly others
# that users write): if there are only top errors, we may
# not be able to conscript the last row for our purposes,
# so insert a new, empty row.
last_row = (normal_row % {
'errors': '',
'label': '',
'field': '',
'help_text': '',
'html_class_attr': html_class_attr,
'css_classes': '',
'field_name': '',
})
output.append(last_row)
output[-1] = last_row[:-len(row_ender)] + str_hidden + row_ender
else:
# If there aren't any rows in the output, just append the
# hidden fields.
output.append(str_hidden)
return mark_safe('\n'.join(output))
def as_table(self):
"Returns this form rendered as HTML <tr>s -- excluding the <table></table>."
return self._html_output(
normal_row='<tr%(html_class_attr)s><th>%(label)s</th><td>%(errors)s%(field)s%(help_text)s</td></tr>',
error_row='<tr><td colspan="2">%s</td></tr>',
row_ender='</td></tr>',
help_text_html='<br /><span class="helptext">%s</span>',
errors_on_separate_row=False)
def as_ul(self):
"Returns this form rendered as HTML <li>s -- excluding the <ul></ul>."
return self._html_output(
normal_row='<li%(html_class_attr)s>%(errors)s%(label)s %(field)s%(help_text)s</li>',
error_row='<li>%s</li>',
row_ender='</li>',
help_text_html=' <span class="helptext">%s</span>',
errors_on_separate_row=False)
def as_p(self):
"Returns this form rendered as HTML <p>s."
return self._html_output(
normal_row='<p%(html_class_attr)s>%(label)s %(field)s%(help_text)s</p>',
error_row='%s',
row_ender='</p>',
help_text_html=' <span class="helptext">%s</span>',
errors_on_separate_row=True)
def non_field_errors(self):
"""
Returns an ErrorList of errors that aren't associated with a particular
field -- i.e., from Form.clean(). Returns an empty ErrorList if there
are none.
"""
return self.errors.get(NON_FIELD_ERRORS, self.error_class(error_class='nonfield'))
def add_error(self, field, error):
"""
Update the content of `self._errors`.
The `field` argument is the name of the field to which the errors
should be added. If its value is None the errors will be treated as
NON_FIELD_ERRORS.
The `error` argument can be a single error, a list of errors, or a
dictionary that maps field names to lists of errors. What we define as
an "error" can be either a simple string or an instance of
ValidationError with its message attribute set and what we define as
list or dictionary can be an actual `list` or `dict` or an instance
of ValidationError with its `error_list` or `error_dict` attribute set.
If `error` is a dictionary, the `field` argument *must* be None and
errors will be added to the fields that correspond to the keys of the
dictionary.
"""
if not isinstance(error, ValidationError):
# Normalize to ValidationError and let its constructor
# do the hard work of making sense of the input.
error = ValidationError(error)
if hasattr(error, 'error_dict'):
if field is not None:
raise TypeError(
"The argument `field` must be `None` when the `error` "
"argument contains errors for multiple fields."
)
else:
error = error.error_dict
else:
error = {field or NON_FIELD_ERRORS: error.error_list}
for field, error_list in error.items():
if field not in self.errors:
if field != NON_FIELD_ERRORS and field not in self.fields:
raise ValueError(
"'%s' has no field named '%s'." % (self.__class__.__name__, field))
if field == NON_FIELD_ERRORS:
self._errors[field] = self.error_class(error_class='nonfield')
else:
self._errors[field] = self.error_class()
self._errors[field].extend(error_list)
if field in self.cleaned_data:
del self.cleaned_data[field]
def has_error(self, field, code=None):
if code is None:
return field in self.errors
if field in self.errors:
for error in self.errors.as_data()[field]:
if error.code == code:
return True
return False
def full_clean(self):
"""
Cleans all of self.data and populates self._errors and
self.cleaned_data.
"""
self._errors = ErrorDict()
if not self.is_bound: # Stop further processing.
return
self.cleaned_data = {}
# If the form is permitted to be empty, and none of the form data has
# changed from the initial data, short circuit any validation.
if self.empty_permitted and not self.has_changed():
return
self._clean_fields()
self._clean_form()
self._post_clean()
def _clean_fields(self):
for name, field in self.fields.items():
# value_from_datadict() gets the data from the data dictionaries.
# Each widget type knows how to retrieve its own data, because some
# widgets split data over several HTML fields.
if field.disabled:
value = self.get_initial_for_field(field, name)
else:
value = field.widget.value_from_datadict(self.data, self.files, self.add_prefix(name))
try:
if isinstance(field, FileField):
initial = self.get_initial_for_field(field, name)
value = field.clean(value, initial)
else:
value = field.clean(value)
self.cleaned_data[name] = value
if hasattr(self, 'clean_%s' % name):
value = getattr(self, 'clean_%s' % name)()
self.cleaned_data[name] = value
except ValidationError as e:
self.add_error(name, e)
def _clean_form(self):
try:
cleaned_data = self.clean()
except ValidationError as e:
self.add_error(None, e)
else:
if cleaned_data is not None:
self.cleaned_data = cleaned_data
def _post_clean(self):
"""
An internal hook for performing additional cleaning after form cleaning
is complete. Used for model validation in model forms.
"""
pass
def clean(self):
"""
Hook for doing any extra form-wide cleaning after Field.clean() has been
called on every field. Any ValidationError raised by this method will
not be associated with a particular field; it will have a special-case
association with the field named '__all__'.
"""
return self.cleaned_data
def has_changed(self):
"""
Returns True if data differs from initial.
"""
return bool(self.changed_data)
@cached_property
def changed_data(self):
data = []
for name, field in self.fields.items():
prefixed_name = self.add_prefix(name)
data_value = field.widget.value_from_datadict(self.data, self.files, prefixed_name)
if not field.show_hidden_initial:
# Use the BoundField's initial as this is the value passed to
# the widget.
initial_value = self[name].initial
else:
initial_prefixed_name = self.add_initial_prefix(name)
hidden_widget = field.hidden_widget()
try:
initial_value = field.to_python(hidden_widget.value_from_datadict(
self.data, self.files, initial_prefixed_name))
except ValidationError:
# Always assume data has changed if validation fails.
data.append(name)
continue
if field.has_changed(initial_value, data_value):
data.append(name)
return data
@property
def media(self):
"""
Provide a description of all media required to render the widgets on this form
"""
media = Media()
for field in self.fields.values():
media = media + field.widget.media
return media
def is_multipart(self):
"""
Returns True if the form needs to be multipart-encoded, i.e. it has
FileInput. Otherwise, False.
"""
for field in self.fields.values():
if field.widget.needs_multipart_form:
return True
return False
def hidden_fields(self):
"""
Returns a list of all the BoundField objects that are hidden fields.
Useful for manual form layout in templates.
"""
return [field for field in self if field.is_hidden]
def visible_fields(self):
"""
Returns a list of BoundField objects that aren't hidden fields.
The opposite of the hidden_fields() method.
"""
return [field for field in self if not field.is_hidden]
def get_initial_for_field(self, field, field_name):
"""
Return initial data for field on form. Use initial data from the form
or the field, in that order. Evaluate callable values.
"""
value = self.initial.get(field_name, field.initial)
if callable(value):
value = value()
return value
class Form(six.with_metaclass(DeclarativeFieldsMetaclass, BaseForm)):
"A collection of Fields, plus their associated data."
# This is a separate class from BaseForm in order to abstract the way
# self.fields is specified. This class (Form) is the one that does the
# fancy metaclass stuff purely for the semantic sugar -- it allows one
# to define a form using declarative syntax.
# BaseForm itself has no way of designating self.fields.
| bsd-3-clause |
dyoung418/tensorflow | tensorflow/contrib/nn/python/ops/scaled_softplus_test.py | 66 | 3039 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for scaled_softplus.py."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.contrib.nn.python.ops.scaled_softplus import scaled_softplus
from tensorflow.python.framework import constant_op
from tensorflow.python.ops import gradient_checker
from tensorflow.python.platform import test
class ScaledSoftplusTest(test.TestCase):
def test(self):
np.random.seed(1) # Make it reproducible.
x = np.random.randn(3, 4).astype(np.float32)
x64 = np.random.randn(3, 4).astype(np.float64)
alpha = np.random.rand() + 0.01
clip = np.float32(0.1)
y = np.minimum(alpha * np.log(1. + np.exp(x / alpha)), clip)
y64 = alpha * np.log(1. + np.exp(x64 / alpha))
with self.test_session(use_gpu=True) as sess:
z = scaled_softplus(constant_op.constant(x), alpha, clip)
z64 = scaled_softplus(constant_op.constant(x64), alpha)
z, z64 = sess.run([z, z64])
eps = 1e-6
self.assertAllClose(y, z, eps)
self.assertAllClose(y64, z64, eps)
def testGradient(self):
np.random.seed(1) # Make it reproducible.
x_shape = [5, 10]
x_np = np.random.randn(*x_shape).astype(np.float32)
alpha_np = np.float32(np.random.rand(1, x_shape[1]) + 0.01)
clip_np = np.float32(np.random.rand(x_shape[0], 1) * 5.)
with self.test_session(use_gpu=True):
x_tf = constant_op.constant(x_np)
alpha_tf = constant_op.constant(alpha_np)
clip_tf = constant_op.constant(clip_np)
y_tf = scaled_softplus(x_tf, alpha_tf)
z_tf = scaled_softplus(x_tf, alpha_tf, clip_tf * 0.1)
err = gradient_checker.compute_gradient_error([x_tf, alpha_tf],
[x_shape, alpha_np.shape],
y_tf, x_shape,
[x_np, alpha_np],
delta=0.002)
err_clip = gradient_checker.compute_gradient_error(
[x_tf, alpha_tf, clip_tf],
[x_shape, alpha_np.shape, clip_np.shape],
z_tf, x_shape,
[x_np, alpha_np, clip_np],
delta=0.002)
eps = 2e-4
self.assertLess(err, eps)
self.assertLess(err_clip, eps)
if __name__ == '__main__':
test.main()
| apache-2.0 |
StevenBlack/phantomjs | src/qt/qtwebkit/Tools/CygwinDownloader/cygwin-downloader.py | 120 | 5513 | #!/usr/bin/env python
import os, random, sys, time, urllib
#
# Options
#
dry_run = len(sys.argv) > 1 and "--dry-run" in set(sys.argv[1:])
quiet = len(sys.argv) > 1 and "--quiet" in set(sys.argv[1:])
#
# Functions and constants
#
def download_progress_hook(block_count, block_size, total_blocks):
if quiet or random.random() > 0.5:
return
sys.stdout.write(".")
sys.stdout.flush()
def download_url_to_file(url, file, message):
if not quiet:
print message + " ",
if not dry_run:
dir = os.path.dirname(file)
if len(dir) and not os.path.exists(dir):
os.makedirs(dir)
urllib.urlretrieve(url, file, download_progress_hook)
if not quiet:
print
# This is mostly just the list of North America http mirrors from http://cygwin.com/mirrors.html,
# but a few have been removed that seemed unresponsive from Cupertino.
mirror_servers = ["http://cygwin.elite-systems.org/",
"http://mirror.mcs.anl.gov/cygwin/",
"http://cygwin.osuosl.org/",
"http://mirrors.kernel.org/sourceware/cygwin/",
"http://mirrors.xmission.com/cygwin/",
"http://sourceware.mirrors.tds.net/pub/sourceware.org/cygwin/"]
package_mirror_url = mirror_servers[random.choice(range(len(mirror_servers)))]
def download_package(package, message):
download_url_to_file(package_mirror_url + package["path"], package["path"], message)
required_packages = frozenset(["apache",
"bc",
"bison",
"curl",
"diffutils",
"e2fsprogs",
"emacs",
"flex",
"gcc",
"gperf",
"keychain",
"make",
"minires",
"nano",
"openssh",
"patch",
"perl",
"perl-libwin32",
"python",
"rebase",
"rsync",
"ruby",
"subversion",
"unzip",
"vim",
"zip"])
#
# Main
#
print "Using Cygwin mirror server " + package_mirror_url + " to download setup.ini..."
urllib.urlretrieve(package_mirror_url + "setup.ini", "setup.ini.orig")
downloaded_packages_file_path = "setup.ini.orig"
downloaded_packages_file = file(downloaded_packages_file_path, "r")
if not dry_run:
modified_packages_file = file("setup.ini", "w")
packages = {}
current_package = ''
for line in downloaded_packages_file.readlines():
if line[0] == "@":
current_package = line[2:-1]
packages[current_package] = {"name": current_package, "needs_download": False, "requires": [], "path": ""}
elif line[:10] == "category: ":
if current_package in required_packages:
line = "category: Base\n"
if "Base" in set(line[10:-1].split()):
packages[current_package]["needs_download"] = True
elif line[:10] == "requires: ":
packages[current_package]["requires"] = line[10:].split()
packages[current_package]["requires"].sort()
elif line[:9] == "install: " and not len(packages[current_package]["path"]):
end_of_path = line.find(" ", 9)
if end_of_path != -1:
packages[current_package]["path"] = line[9:end_of_path]
if not dry_run:
modified_packages_file.write(line)
downloaded_packages_file.close()
os.remove(downloaded_packages_file_path)
if not dry_run:
modified_packages_file.close()
names_to_download = set()
package_names = packages.keys()
package_names.sort()
def add_package_and_dependencies(name):
if name in names_to_download:
return
if not name in packages:
return
packages[name]["needs_download"] = True
names_to_download.add(name)
for dep in packages[name]["requires"]:
add_package_and_dependencies(dep)
for name in package_names:
if packages[name]["needs_download"]:
add_package_and_dependencies(name)
downloaded_so_far = 0
for name in package_names:
if packages[name]["needs_download"]:
downloaded_so_far += 1
download_package(packages[name], "Downloading package %3d of %3d (%s)" % (downloaded_so_far, len(names_to_download), name))
download_url_to_file("http://cygwin.com/setup.exe", "setup.exe", "Downloading setup.exe")
seconds_to_sleep = 10
print """
Finished downloading Cygwin. In %d seconds,
I will run setup.exe. Select the "Install
from Local Directory" option and browse to
"%s"
when asked for the "Local Package Directory".
""" % (seconds_to_sleep, os.getcwd())
while seconds_to_sleep > 0:
print "%d..." % seconds_to_sleep,
sys.stdout.flush()
time.sleep(1)
seconds_to_sleep -= 1
print
if not dry_run:
os.execl("setup.exe")
| bsd-3-clause |
camny125/grit-i18n | grit/format/c_format_unittest.py | 61 | 1968 | #!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Unittest for c_format.py.
"""
import os
import sys
if __name__ == '__main__':
sys.path.append(os.path.join(os.path.dirname(__file__), '../..'))
import unittest
import StringIO
from grit import util
from grit.tool import build
class CFormatUnittest(unittest.TestCase):
def testMessages(self):
root = util.ParseGrdForUnittest("""
<messages>
<message name="IDS_QUESTIONS">Do you want to play questions?</message>
<message name="IDS_QUOTES">
"What's in a name, <ph name="NAME">%s<ex>Brandon</ex></ph>?"
</message>
<message name="IDS_LINE_BREAKS">
Was that rhetoric?
No.
Statement. Two all. Game point.
</message>
<message name="IDS_NON_ASCII">
\xc3\xb5\\xc2\\xa4\\\xc2\xa4\\\\xc3\\xb5\xe4\xa4\xa4
</message>
</messages>
""")
buf = StringIO.StringIO()
build.RcBuilder.ProcessNode(root, DummyOutput('c_format', 'en'), buf)
output = util.StripBlankLinesAndComments(buf.getvalue())
self.assertEqual(u"""\
#include "resource.h"
const char* GetString(int id) {
switch (id) {
case IDS_QUESTIONS:
return "Do you want to play questions?";
case IDS_QUOTES:
return "\\"What\\'s in a name, %s?\\"";
case IDS_LINE_BREAKS:
return "Was that rhetoric?\\nNo.\\nStatement. Two all. Game point.";
case IDS_NON_ASCII:
return "\\303\\265\\xc2\\xa4\\\\302\\244\\\\xc3\\xb5\\344\\244\\244";
default:
return 0;
}
}""", output)
class DummyOutput(object):
def __init__(self, type, language):
self.type = type
self.language = language
def GetType(self):
return self.type
def GetLanguage(self):
return self.language
def GetOutputFilename(self):
return 'hello.gif'
if __name__ == '__main__':
unittest.main()
| bsd-2-clause |
vdt/SimpleCV | SimpleCV/Features/BOFFeatureExtractor.py | 11 | 12465 | from SimpleCV.base import *
from SimpleCV.ImageClass import Image
from SimpleCV.Features.FeatureExtractorBase import *
class BOFFeatureExtractor(object):
"""
For a discussion of bag of features please see:
http://en.wikipedia.org/wiki/Bag_of_words_model_in_computer_vision
Initialize the bag of features extractor. This assumes you don't have
the feature codebook pre-computed.
patchsz = the dimensions of each codebook patch
numcodes = the number of different patches in the codebook.
imglayout = the shape of the resulting image in terms of patches
padding = the pixel padding of each patch in the resulting image.
"""
mPatchSize = (11,11)
mNumCodes = 128
mPadding = 0
mLayout = (8,16)
mCodebookImg = None
mCodebook = None
def __init__(self,patchsz=(11,11),numcodes=128,imglayout=(8,16),padding=0):
self.mPadding = padding
self.mLayout = imglayout
self.mPatchSize = patchsz
self.mNumCodes = numcodes
def generate(self,imgdirs,numcodes=128,sz=(11,11),imgs_per_dir=50,img_layout=(8,16),padding=0, verbose=True):
"""
This method builds the bag of features codebook from a list of directories
with images in them. Each directory should be broken down by image class.
* imgdirs: This list of directories.
* patchsz: the dimensions of each codebook patch
* numcodes: the number of different patches in the codebook.
* imglayout: the shape of the resulting image in terms of patches - this must
match the size of numcodes. I.e. numcodes == img_layout[0]*img_layout[1]
* padding:the pixel padding of each patch in the resulting image.
* imgs_per_dir: this method can use a specified number of images per directory
* verbose: print output
Once the method has completed it will save the results to a local file
using the file name codebook.png
WARNING:
THIS METHOD WILL TAKE FOREVER
"""
if( numcodes != img_layout[0]*img_layout[1]):
warnings.warn("Numcodes must match the size of image layout.")
return None
self.mPadding = padding
self.mLayout = img_layout
self.mNumCodes = numcodes
self.mPatchSize = sz
rawFeatures = np.zeros(sz[0]*sz[1])#fakeout numpy so we can use vstack
for path in imgdirs:
fcount = 0
files = []
for ext in IMAGE_FORMATS:
files.extend(glob.glob( os.path.join(path, ext)))
nimgs = min(len(files),imgs_per_dir)
for i in range(nimgs):
infile = files[i]
if verbose:
print(path+" "+str(i)+" of "+str(imgs_per_dir))
print "Opening file: " + infile
img = Image(infile)
newFeat = self._getPatches(img,sz)
if verbose:
print " Got " + str(len(newFeat)) + " features."
rawFeatures = np.vstack((rawFeatures,newFeat))
del img
rawFeatures = rawFeatures[1:,:] # pop the fake value we put on the top
if verbose:
print "=================================="
print "Got " + str(len(rawFeatures)) + " features "
print "Doing K-Means .... this will take a long time"
self.mCodebook = self._makeCodebook(rawFeatures,self.mNumCodes)
self.mCodebookImg = self._codebook2Img(self.mCodebook,self.mPatchSize,self.mNumCodes,self.mLayout,self.mPadding)
self.mCodebookImg.save('codebook.png')
def extractPatches(self, img, sz=(11,11) ):
"""
Get patches from a single images. This is an external access method. The
user will need to maintain the list of features. See the generate method
as a guide to doing this by hand. Sz is the image patch size.
"""
return self._getPatches(img,sz)
def makeCodebook(self, featureStack,ncodes=128):
"""
This method will return the centroids of the k-means analysis of a large
number of images. Ncodes is the number of centroids to find.
"""
return self._makeCodebook(featureStack,ncodes)
def _makeCodebook(self,data,ncodes=128):
"""
Do the k-means ... this is slow as as shit
"""
[centroids, membership] = cluster.kmeans2(data,ncodes, minit='points')
return(centroids)
def _img2Codebook(self, img, patchsize, count, patch_arrangement, spacersz):
"""
img = the image
patchsize = the patch size (ususally 11x11)
count = total codes
patch_arrangement = how are the patches grided in the image (eg 128 = (8x16) 256=(16x16) )
spacersz = the number of pixels between patches
"""
img = img.toHLS()
lmat = cv.CreateImage((img.width,img.height), cv.IPL_DEPTH_8U, 1)
patch = cv.CreateImage(patchsize,cv.IPL_DEPTH_8U,1)
cv.Split(img.getBitmap(),None,lmat,None,None)
w = patchsize[0]
h = patchsize[1]
length = w*h
retVal = np.zeros(length)
for widx in range(patch_arrangement[0]):
for hidx in range(patch_arrangement[1]):
x = (widx*patchsize[0])+((widx+1)*spacersz)
y = (hidx*patchsize[1])+((hidx+1)*spacersz)
cv.SetImageROI(lmat,(x,y,w,h))
cv.Copy(lmat,patch)
cv.ResetImageROI(lmat)
retVal = np.vstack((retVal,np.array(patch[:,:]).reshape(length)))
retVal = retVal[1:,:]
return retVal
def _codebook2Img(self, cb, patchsize, count, patch_arrangement, spacersz):
"""
cb = the codebook
patchsize = the patch size (ususally 11x11)
count = total codes
patch_arrangement = how are the patches grided in the image (eg 128 = (8x16) 256=(16x16) )
spacersz = the number of pixels between patches
"""
w = (patchsize[0]*patch_arrangement[0])+((patch_arrangement[0]+1)*spacersz)
h = (patchsize[1]*patch_arrangement[1])+((patch_arrangement[1]+1)*spacersz)
bm = cv.CreateImage((w,h), cv.IPL_DEPTH_8U, 1)
cv.Zero(bm)
img = Image(bm)
count = 0
for widx in range(patch_arrangement[0]):
for hidx in range(patch_arrangement[1]):
x = (widx*patchsize[0])+((widx+1)*spacersz)
y = (hidx*patchsize[1])+((hidx+1)*spacersz)
temp = Image(cb[count,:].reshape(patchsize[0],patchsize[1]))
img.blit(temp,pos=(x,y))
count = count + 1
return img
def _getPatches(self,img,sz=None):
#retVal = [] # may need to go to np.array
if( sz is None ):
sz = self.mPatchSize
img2 = img.toHLS()
lmat = cv.CreateImage((img.width,img.height), cv.IPL_DEPTH_8U, 1)
patch = cv.CreateImage(self.mPatchSize,cv.IPL_DEPTH_8U,1)
cv.Split(img2.getBitmap(),None,lmat,None,None)
wsteps = img2.width/sz[0]
hsteps = img2.height/sz[1]
w=sz[0]
h=sz[1]
length = w*h
retVal = np.zeros(length)
for widx in range(wsteps):
for hidx in range(hsteps):
x = (widx*sz[0])
y = (hidx*sz[1])
cv.SetImageROI(lmat,(x,y,w,h))
cv.EqualizeHist(lmat,patch)
#cv.Copy(lmat,patch)
cv.ResetImageROI(lmat)
retVal = np.vstack((retVal,np.array(patch[:,:]).reshape(length)))
#retVal.append()
retVal = retVal[1:,:] # pop the fake value we put on top of the stack
return retVal
def load(self,datafile):
"""
Load a codebook from file using the datafile. The datafile
should point to a local image for the source patch image.
"""
myFile = open(datafile, 'r')
temp = myFile.readline()
#print(temp)
self.mNumCodes = int(myFile.readline())
#print(self.mNumCodes)
w = int(myFile.readline())
h = int(myFile.readline())
self.mPatchSize = (w,h)
#print(self.mPatchSize)
self.mPadding = int(myFile.readline())
#print(self.mPadding)
w = int(myFile.readline())
h = int(myFile.readline())
self.mLayout = (w,h)
#print(self.mLayout)
imgfname = myFile.readline().strip()
#print(imgfname)
self.mCodebookImg = Image(imgfname)
self.mCodebook = self._img2Codebook(self.mCodebookImg,
self.mPatchSize,
self.mNumCodes,
self.mLayout,
self.mPadding)
#print(self.mCodebook)
return
def save(self,imgfname,datafname):
"""
Save the bag of features codebook and data set to a local file.
"""
myFile = open(datafname,'w')
myFile.write("BOF Codebook Data\n")
myFile.write(str(self.mNumCodes)+"\n")
myFile.write(str(self.mPatchSize[0])+"\n")
myFile.write(str(self.mPatchSize[1])+"\n")
myFile.write(str(self.mPadding)+"\n")
myFile.write(str(self.mLayout[0])+"\n")
myFile.write(str(self.mLayout[1])+"\n")
myFile.write(imgfname+"\n")
myFile.close()
if(self.mCodebookImg is None):
self._codebook2Img(self.mCodebook,self.mPatchSize,self.mNumCodes,self.mLayout,self.mPadding)
self.mCodebookImg.save(imgfname)
return
def __getstate__(self):
if(self.mCodebookImg is None):
self._codebook2Img(self.mCodebook,self.mPatchSize,self.mNumCodes,self.mLayout,self.mPadding)
mydict = self.__dict__.copy()
del mydict['mCodebook']
return mydict
def __setstate__(self, mydict):
self.__dict__ = mydict
self.mCodebook = self._img2Codebook(self.mCodebookImg,
self.mPatchSize,
self.mNumCodes,
self.mLayout,
self.mPadding)
def extract(self, img):
"""
This method extracts a bag of features histogram for the input image using
the provided codebook. The result are the bin counts for each codebook code.
"""
data = self._getPatches(img)
p = spsd.cdist(data,self.mCodebook)
codes = np.argmin(p,axis=1)
[retVal,foo] = np.histogram(codes,self.mNumCodes,normed=True,range=(0,self.mNumCodes-1))
return retVal
def reconstruct(self,img):
"""
This is a "just for fun" method as a sanity check for the BOF codeook.
The method takes in an image, extracts each codebook code, and replaces
the image at the position with the code.
"""
retVal = cv.CreateImage((img.width,img.height), cv.IPL_DEPTH_8U, 1)
data = self._getPatches(img)
p = spsd.cdist(data,self.mCodebook)
foo = p.shape[0]
codes = np.argmin(p,axis=1)
count = 0
wsteps = img.width/self.mPatchSize[0]
hsteps = img.height/self.mPatchSize[1]
w=self.mPatchSize[0]
h=self.mPatchSize[1]
length = w*h
retVal = Image(retVal)
for widx in range(wsteps):
for hidx in range(hsteps):
x = (widx*self.mPatchSize[0])
y = (hidx*self.mPatchSize[1])
p = codes[count]
temp = Image(self.mCodebook[p,:].reshape(self.mPatchSize[0],self.mPatchSize[1]))
retVal = retVal.blit(temp,pos=(x,y))
count = count + 1
return retVal
def getFieldNames(self):
"""
This method gives the names of each field in the feature vector in the
order in which they are returned. For example, 'xpos' or 'width'
"""
retVal = []
for widx in range(self.mLayout[0]):
for hidx in range(self.mLayout[1]):
temp = "CB_R"+str(widx)+"_C"+str(hidx)
retVal.append(temp)
return retVal
def getNumFields(self):
"""
This method returns the total number of fields in the feature vector.
"""
return self.mNumCodes
| bsd-3-clause |
mercycorps/tola-activity | htdocs/indicators/test.py | 1 | 2312 | from django.test import TestCase
from django.test import RequestFactory
from django.test import Client
from indicators.models import Indicator, IndicatorType, Objective, DisaggregationType, ReportingFrequency, CollectedData
from activitydb.models import Program, Sector
from django.contrib.auth.models import User
class IndicatorTestCase(TestCase):
def setUp(self):
new_program = Program.objects.create(name="testprogram")
new_program.save()
get_program = Program.objects.get(name="testprogram")
new_indicator_type = IndicatorType.objects.create(indicator_type="testtype")
new_indicator_type.save()
get_indicator_type = IndicatorType.objects.get(indicator_type="testtype")
new_disaggregation = DisaggregationType.objects.create(disaggregation_type="disagg")
new_disaggregation.save()
get_disaggregation = DisaggregationType.objects.get(disaggregation_type="disagg")
new_frequency = ReportingFrequency.objects.create(frequency="newfreq")
new_frequency.save()
get_frequency = ReportingFrequency.objects.get(frequency="newfreq")
user = User.objects.create_user('john', 'lennon@thebeatles.com', 'johnpassword')
user.save()
get_user = User.objects.get(username='john')
new_indicator = Indicator.objects.create(name="testindicator",number="1.2.3",source="testing",
disaggregation=get_disaggregation, baseline="10",lop_target="10", reporting_frequency=get_frequency,owner=get_user)
new_indicator.save()
get_indicator = Indicator.objects.get(name="testindicator")
new_collected = CollectedData.objects.create(targeted="12",achieved="20", description="somevaluecollected", indicator=get_indicator)
new_collected.save()
def test_indicator_exists(self):
"""Check for Indicator object"""
get_indicator = Indicator.objects.get(name="testindicator")
self.assertEqual(Indicator.objects.filter(id=get_indicator.id).count(), 1)
def test_collected_exists(self):
"""Check for CollectedData object"""
get_collected = CollectedData.objects.get(description="somevaluecollected")
self.assertEqual(CollectedData.objects.filter(id=get_collected.id).count(), 1)
| gpl-2.0 |
wolfram74/numerical_methods_iserles_notes | venv/lib/python2.7/site-packages/zmq/ssh/forward.py | 53 | 3549 | #
# This file is adapted from a paramiko demo, and thus licensed under LGPL 2.1.
# Original Copyright (C) 2003-2007 Robey Pointer <robeypointer@gmail.com>
# Edits Copyright (C) 2010 The IPython Team
#
# Paramiko is free software; you can redistribute it and/or modify it under the
# terms of the GNU Lesser General Public License as published by the Free
# Software Foundation; either version 2.1 of the License, or (at your option)
# any later version.
#
# Paramiko is distrubuted in the hope that it will be useful, but WITHOUT ANY
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
# A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
# details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Paramiko; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02111-1301 USA.
"""
Sample script showing how to do local port forwarding over paramiko.
This script connects to the requested SSH server and sets up local port
forwarding (the openssh -L option) from a local port through a tunneled
connection to a destination reachable from the SSH server machine.
"""
from __future__ import print_function
import logging
import select
try: # Python 3
import socketserver
except ImportError: # Python 2
import SocketServer as socketserver
logger = logging.getLogger('ssh')
class ForwardServer (socketserver.ThreadingTCPServer):
daemon_threads = True
allow_reuse_address = True
class Handler (socketserver.BaseRequestHandler):
def handle(self):
try:
chan = self.ssh_transport.open_channel('direct-tcpip',
(self.chain_host, self.chain_port),
self.request.getpeername())
except Exception as e:
logger.debug('Incoming request to %s:%d failed: %s' % (self.chain_host,
self.chain_port,
repr(e)))
return
if chan is None:
logger.debug('Incoming request to %s:%d was rejected by the SSH server.' %
(self.chain_host, self.chain_port))
return
logger.debug('Connected! Tunnel open %r -> %r -> %r' % (self.request.getpeername(),
chan.getpeername(), (self.chain_host, self.chain_port)))
while True:
r, w, x = select.select([self.request, chan], [], [])
if self.request in r:
data = self.request.recv(1024)
if len(data) == 0:
break
chan.send(data)
if chan in r:
data = chan.recv(1024)
if len(data) == 0:
break
self.request.send(data)
chan.close()
self.request.close()
logger.debug('Tunnel closed ')
def forward_tunnel(local_port, remote_host, remote_port, transport):
# this is a little convoluted, but lets me configure things for the Handler
# object. (SocketServer doesn't give Handlers any way to access the outer
# server normally.)
class SubHander (Handler):
chain_host = remote_host
chain_port = remote_port
ssh_transport = transport
ForwardServer(('127.0.0.1', local_port), SubHander).serve_forever()
__all__ = ['forward_tunnel']
| mit |
abhishekkrthakur/scikit-learn | sklearn/metrics/cluster/supervised.py | 21 | 26876 | """Utilities to evaluate the clustering performance of models
Functions named as *_score return a scalar value to maximize: the higher the
better.
"""
# Authors: Olivier Grisel <olivier.grisel@ensta.org>
# Wei LI <kuantkid@gmail.com>
# Diego Molla <dmolla-aliod@gmail.com>
# License: BSD 3 clause
from math import log
from scipy.misc import comb
from scipy.sparse import coo_matrix
import numpy as np
from .expected_mutual_info_fast import expected_mutual_information
from ...utils.fixes import bincount
def comb2(n):
# the exact version is faster for k == 2: use it by default globally in
# this module instead of the float approximate variant
return comb(n, 2, exact=1)
def check_clusterings(labels_true, labels_pred):
"""Check that the two clusterings matching 1D integer arrays"""
labels_true = np.asarray(labels_true)
labels_pred = np.asarray(labels_pred)
# input checks
if labels_true.ndim != 1:
raise ValueError(
"labels_true must be 1D: shape is %r" % (labels_true.shape,))
if labels_pred.ndim != 1:
raise ValueError(
"labels_pred must be 1D: shape is %r" % (labels_pred.shape,))
if labels_true.shape != labels_pred.shape:
raise ValueError(
"labels_true and labels_pred must have same size, got %d and %d"
% (labels_true.shape[0], labels_pred.shape[0]))
return labels_true, labels_pred
def contingency_matrix(labels_true, labels_pred, eps=None):
"""Build a contengency matrix describing the relationship between labels.
Parameters
----------
labels_true : int array, shape = [n_samples]
Ground truth class labels to be used as a reference
labels_pred : array, shape = [n_samples]
Cluster labels to evaluate
eps: None or float
If a float, that value is added to all values in the contingency
matrix. This helps to stop NaN propagation.
If ``None``, nothing is adjusted.
Returns
-------
contingency: array, shape=[n_classes_true, n_classes_pred]
Matrix :math:`C` such that :math:`C_{i, j}` is the number of samples in
true class :math:`i` and in predicted class :math:`j`. If
``eps is None``, the dtype of this array will be integer. If ``eps`` is
given, the dtype will be float.
"""
classes, class_idx = np.unique(labels_true, return_inverse=True)
clusters, cluster_idx = np.unique(labels_pred, return_inverse=True)
n_classes = classes.shape[0]
n_clusters = clusters.shape[0]
# Using coo_matrix to accelerate simple histogram calculation,
# i.e. bins are consecutive integers
# Currently, coo_matrix is faster than histogram2d for simple cases
contingency = coo_matrix((np.ones(class_idx.shape[0]),
(class_idx, cluster_idx)),
shape=(n_classes, n_clusters),
dtype=np.int).toarray()
if eps is not None:
# don't use += as contingency is integer
contingency = contingency + eps
return contingency
# clustering measures
def adjusted_rand_score(labels_true, labels_pred):
"""Rand index adjusted for chance
The Rand Index computes a similarity measure between two clusterings
by considering all pairs of samples and counting pairs that are
assigned in the same or different clusters in the predicted and
true clusterings.
The raw RI score is then "adjusted for chance" into the ARI score
using the following scheme::
ARI = (RI - Expected_RI) / (max(RI) - Expected_RI)
The adjusted Rand index is thus ensured to have a value close to
0.0 for random labeling independently of the number of clusters and
samples and exactly 1.0 when the clusterings are identical (up to
a permutation).
ARI is a symmetric measure::
adjusted_rand_score(a, b) == adjusted_rand_score(b, a)
Parameters
----------
labels_true : int array, shape = [n_samples]
Ground truth class labels to be used as a reference
labels_pred : array, shape = [n_samples]
Cluster labels to evaluate
Returns
-------
ari: float
Similarity score between -1.0 and 1.0. Random labelings have an ARI
close to 0.0. 1.0 stands for perfect match.
Examples
--------
Perfectly maching labelings have a score of 1 even
>>> from sklearn.metrics.cluster import adjusted_rand_score
>>> adjusted_rand_score([0, 0, 1, 1], [0, 0, 1, 1])
1.0
>>> adjusted_rand_score([0, 0, 1, 1], [1, 1, 0, 0])
1.0
Labelings that assign all classes members to the same clusters
are complete be not always pure, hence penalized::
>>> adjusted_rand_score([0, 0, 1, 2], [0, 0, 1, 1]) # doctest: +ELLIPSIS
0.57...
ARI is symmetric, so labelings that have pure clusters with members
coming from the same classes but unnecessary splits are penalized::
>>> adjusted_rand_score([0, 0, 1, 1], [0, 0, 1, 2]) # doctest: +ELLIPSIS
0.57...
If classes members are completely split across different clusters, the
assignment is totally incomplete, hence the ARI is very low::
>>> adjusted_rand_score([0, 0, 0, 0], [0, 1, 2, 3])
0.0
References
----------
.. [Hubert1985] `L. Hubert and P. Arabie, Comparing Partitions,
Journal of Classification 1985`
http://www.springerlink.com/content/x64124718341j1j0/
.. [wk] http://en.wikipedia.org/wiki/Rand_index#Adjusted_Rand_index
See also
--------
adjusted_mutual_info_score: Adjusted Mutual Information
"""
labels_true, labels_pred = check_clusterings(labels_true, labels_pred)
n_samples = labels_true.shape[0]
classes = np.unique(labels_true)
clusters = np.unique(labels_pred)
# Special limit cases: no clustering since the data is not split;
# or trivial clustering where each document is assigned a unique cluster.
# These are perfect matches hence return 1.0.
if (classes.shape[0] == clusters.shape[0] == 1
or classes.shape[0] == clusters.shape[0] == 0
or classes.shape[0] == clusters.shape[0] == len(labels_true)):
return 1.0
contingency = contingency_matrix(labels_true, labels_pred)
# Compute the ARI using the contingency data
sum_comb_c = sum(comb2(n_c) for n_c in contingency.sum(axis=1))
sum_comb_k = sum(comb2(n_k) for n_k in contingency.sum(axis=0))
sum_comb = sum(comb2(n_ij) for n_ij in contingency.flatten())
prod_comb = (sum_comb_c * sum_comb_k) / float(comb(n_samples, 2))
mean_comb = (sum_comb_k + sum_comb_c) / 2.
return ((sum_comb - prod_comb) / (mean_comb - prod_comb))
def homogeneity_completeness_v_measure(labels_true, labels_pred):
"""Compute the homogeneity and completeness and V-Measure scores at once
Those metrics are based on normalized conditional entropy measures of
the clustering labeling to evaluate given the knowledge of a Ground
Truth class labels of the same samples.
A clustering result satisfies homogeneity if all of its clusters
contain only data points which are members of a single class.
A clustering result satisfies completeness if all the data points
that are members of a given class are elements of the same cluster.
Both scores have positive values between 0.0 and 1.0, larger values
being desirable.
Those 3 metrics are independent of the absolute values of the labels:
a permutation of the class or cluster label values won't change the
score values in any way.
V-Measure is furthermore symmetric: swapping ``labels_true`` and
``label_pred`` will give the same score. This does not hold for
homogeneity and completeness.
Parameters
----------
labels_true : int array, shape = [n_samples]
ground truth class labels to be used as a reference
labels_pred : array, shape = [n_samples]
cluster labels to evaluate
Returns
-------
homogeneity: float
score between 0.0 and 1.0. 1.0 stands for perfectly homogeneous labeling
completeness: float
score between 0.0 and 1.0. 1.0 stands for perfectly complete labeling
v_measure: float
harmonic mean of the first two
See also
--------
homogeneity_score
completeness_score
v_measure_score
"""
labels_true, labels_pred = check_clusterings(labels_true, labels_pred)
if len(labels_true) == 0:
return 1.0, 1.0, 1.0
entropy_C = entropy(labels_true)
entropy_K = entropy(labels_pred)
MI = mutual_info_score(labels_true, labels_pred)
homogeneity = MI / (entropy_C) if entropy_C else 1.0
completeness = MI / (entropy_K) if entropy_K else 1.0
if homogeneity + completeness == 0.0:
v_measure_score = 0.0
else:
v_measure_score = (2.0 * homogeneity * completeness
/ (homogeneity + completeness))
return homogeneity, completeness, v_measure_score
def homogeneity_score(labels_true, labels_pred):
"""Homogeneity metric of a cluster labeling given a ground truth
A clustering result satisfies homogeneity if all of its clusters
contain only data points which are members of a single class.
This metric is independent of the absolute values of the labels:
a permutation of the class or cluster label values won't change the
score value in any way.
This metric is not symmetric: switching ``label_true`` with ``label_pred``
will return the :func:`completeness_score` which will be different in
general.
Parameters
----------
labels_true : int array, shape = [n_samples]
ground truth class labels to be used as a reference
labels_pred : array, shape = [n_samples]
cluster labels to evaluate
Returns
-------
homogeneity: float
score between 0.0 and 1.0. 1.0 stands for perfectly homogeneous labeling
References
----------
.. [1] `Andrew Rosenberg and Julia Hirschberg, 2007. V-Measure: A
conditional entropy-based external cluster evaluation measure
<http://aclweb.org/anthology/D/D07/D07-1043.pdf>`_
See also
--------
completeness_score
v_measure_score
Examples
--------
Perfect labelings are homogeneous::
>>> from sklearn.metrics.cluster import homogeneity_score
>>> homogeneity_score([0, 0, 1, 1], [1, 1, 0, 0])
1.0
Non-perfect labelings that further split classes into more clusters can be
perfectly homogeneous::
>>> print("%.6f" % homogeneity_score([0, 0, 1, 1], [0, 0, 1, 2]))
... # doctest: +ELLIPSIS
1.0...
>>> print("%.6f" % homogeneity_score([0, 0, 1, 1], [0, 1, 2, 3]))
... # doctest: +ELLIPSIS
1.0...
Clusters that include samples from different classes do not make for an
homogeneous labeling::
>>> print("%.6f" % homogeneity_score([0, 0, 1, 1], [0, 1, 0, 1]))
... # doctest: +ELLIPSIS
0.0...
>>> print("%.6f" % homogeneity_score([0, 0, 1, 1], [0, 0, 0, 0]))
... # doctest: +ELLIPSIS
0.0...
"""
return homogeneity_completeness_v_measure(labels_true, labels_pred)[0]
def completeness_score(labels_true, labels_pred):
"""Completeness metric of a cluster labeling given a ground truth
A clustering result satisfies completeness if all the data points
that are members of a given class are elements of the same cluster.
This metric is independent of the absolute values of the labels:
a permutation of the class or cluster label values won't change the
score value in any way.
This metric is not symmetric: switching ``label_true`` with ``label_pred``
will return the :func:`homogeneity_score` which will be different in
general.
Parameters
----------
labels_true : int array, shape = [n_samples]
ground truth class labels to be used as a reference
labels_pred : array, shape = [n_samples]
cluster labels to evaluate
Returns
-------
completeness: float
score between 0.0 and 1.0. 1.0 stands for perfectly complete labeling
References
----------
.. [1] `Andrew Rosenberg and Julia Hirschberg, 2007. V-Measure: A
conditional entropy-based external cluster evaluation measure
<http://aclweb.org/anthology/D/D07/D07-1043.pdf>`_
See also
--------
homogeneity_score
v_measure_score
Examples
--------
Perfect labelings are complete::
>>> from sklearn.metrics.cluster import completeness_score
>>> completeness_score([0, 0, 1, 1], [1, 1, 0, 0])
1.0
Non-perfect labelings that assign all classes members to the same clusters
are still complete::
>>> print(completeness_score([0, 0, 1, 1], [0, 0, 0, 0]))
1.0
>>> print(completeness_score([0, 1, 2, 3], [0, 0, 1, 1]))
1.0
If classes members are split across different clusters, the
assignment cannot be complete::
>>> print(completeness_score([0, 0, 1, 1], [0, 1, 0, 1]))
0.0
>>> print(completeness_score([0, 0, 0, 0], [0, 1, 2, 3]))
0.0
"""
return homogeneity_completeness_v_measure(labels_true, labels_pred)[1]
def v_measure_score(labels_true, labels_pred):
"""V-measure cluster labeling given a ground truth.
This score is identical to :func:`normalized_mutual_info_score`.
The V-measure is the harmonic mean between homogeneity and completeness::
v = 2 * (homogeneity * completeness) / (homogeneity + completeness)
This metric is independent of the absolute values of the labels:
a permutation of the class or cluster label values won't change the
score value in any way.
This metric is furthermore symmetric: switching ``label_true`` with
``label_pred`` will return the same score value. This can be useful to
measure the agreement of two independent label assignments strategies
on the same dataset when the real ground truth is not known.
Parameters
----------
labels_true : int array, shape = [n_samples]
ground truth class labels to be used as a reference
labels_pred : array, shape = [n_samples]
cluster labels to evaluate
Returns
-------
v_measure: float
score between 0.0 and 1.0. 1.0 stands for perfectly complete labeling
References
----------
.. [1] `Andrew Rosenberg and Julia Hirschberg, 2007. V-Measure: A
conditional entropy-based external cluster evaluation measure
<http://aclweb.org/anthology/D/D07/D07-1043.pdf>`_
See also
--------
homogeneity_score
completeness_score
Examples
--------
Perfect labelings are both homogeneous and complete, hence have score 1.0::
>>> from sklearn.metrics.cluster import v_measure_score
>>> v_measure_score([0, 0, 1, 1], [0, 0, 1, 1])
1.0
>>> v_measure_score([0, 0, 1, 1], [1, 1, 0, 0])
1.0
Labelings that assign all classes members to the same clusters
are complete be not homogeneous, hence penalized::
>>> print("%.6f" % v_measure_score([0, 0, 1, 2], [0, 0, 1, 1]))
... # doctest: +ELLIPSIS
0.8...
>>> print("%.6f" % v_measure_score([0, 1, 2, 3], [0, 0, 1, 1]))
... # doctest: +ELLIPSIS
0.66...
Labelings that have pure clusters with members coming from the same
classes are homogeneous but un-necessary splits harms completeness
and thus penalize V-measure as well::
>>> print("%.6f" % v_measure_score([0, 0, 1, 1], [0, 0, 1, 2]))
... # doctest: +ELLIPSIS
0.8...
>>> print("%.6f" % v_measure_score([0, 0, 1, 1], [0, 1, 2, 3]))
... # doctest: +ELLIPSIS
0.66...
If classes members are completely split across different clusters,
the assignment is totally incomplete, hence the V-Measure is null::
>>> print("%.6f" % v_measure_score([0, 0, 0, 0], [0, 1, 2, 3]))
... # doctest: +ELLIPSIS
0.0...
Clusters that include samples from totally different classes totally
destroy the homogeneity of the labeling, hence::
>>> print("%.6f" % v_measure_score([0, 0, 1, 1], [0, 0, 0, 0]))
... # doctest: +ELLIPSIS
0.0...
"""
return homogeneity_completeness_v_measure(labels_true, labels_pred)[2]
def mutual_info_score(labels_true, labels_pred, contingency=None):
"""Mutual Information between two clusterings
The Mutual Information is a measure of the similarity between two labels of
the same data. Where :math:`P(i)` is the probability of a random sample
occurring in cluster :math:`U_i` and :math:`P'(j)` is the probability of a
random sample occurring in cluster :math:`V_j`, the Mutual Information
between clusterings :math:`U` and :math:`V` is given as:
.. math::
MI(U,V)=\sum_{i=1}^R \sum_{j=1}^C P(i,j)\log\\frac{P(i,j)}{P(i)P'(j)}
This is equal to the Kullback-Leibler divergence of the joint distribution
with the product distribution of the marginals.
This metric is independent of the absolute values of the labels:
a permutation of the class or cluster label values won't change the
score value in any way.
This metric is furthermore symmetric: switching ``label_true`` with
``label_pred`` will return the same score value. This can be useful to
measure the agreement of two independent label assignments strategies
on the same dataset when the real ground truth is not known.
Parameters
----------
labels_true : int array, shape = [n_samples]
A clustering of the data into disjoint subsets.
labels_pred : array, shape = [n_samples]
A clustering of the data into disjoint subsets.
contingency: None or array, shape = [n_classes_true, n_classes_pred]
A contingency matrix given by the :func:`contingency_matrix` function.
If value is ``None``, it will be computed, otherwise the given value is
used, with ``labels_true`` and ``labels_pred`` ignored.
Returns
-------
mi: float
Mutual information, a non-negative value
See also
--------
adjusted_mutual_info_score: Adjusted against chance Mutual Information
normalized_mutual_info_score: Normalized Mutual Information
"""
if contingency is None:
labels_true, labels_pred = check_clusterings(labels_true, labels_pred)
contingency = contingency_matrix(labels_true, labels_pred)
contingency = np.array(contingency, dtype='float')
contingency_sum = np.sum(contingency)
pi = np.sum(contingency, axis=1)
pj = np.sum(contingency, axis=0)
outer = np.outer(pi, pj)
nnz = contingency != 0.0
# normalized contingency
contingency_nm = contingency[nnz]
log_contingency_nm = np.log(contingency_nm)
contingency_nm /= contingency_sum
# log(a / b) should be calculated as log(a) - log(b) for
# possible loss of precision
log_outer = -np.log(outer[nnz]) + log(pi.sum()) + log(pj.sum())
mi = (contingency_nm * (log_contingency_nm - log(contingency_sum))
+ contingency_nm * log_outer)
return mi.sum()
def adjusted_mutual_info_score(labels_true, labels_pred):
"""Adjusted Mutual Information between two clusterings
Adjusted Mutual Information (AMI) is an adjustment of the Mutual
Information (MI) score to account for chance. It accounts for the fact that
the MI is generally higher for two clusterings with a larger number of
clusters, regardless of whether there is actually more information shared.
For two clusterings :math:`U` and :math:`V`, the AMI is given as::
AMI(U, V) = [MI(U, V) - E(MI(U, V))] / [max(H(U), H(V)) - E(MI(U, V))]
This metric is independent of the absolute values of the labels:
a permutation of the class or cluster label values won't change the
score value in any way.
This metric is furthermore symmetric: switching ``label_true`` with
``label_pred`` will return the same score value. This can be useful to
measure the agreement of two independent label assignments strategies
on the same dataset when the real ground truth is not known.
Be mindful that this function is an order of magnitude slower than other
metrics, such as the Adjusted Rand Index.
Parameters
----------
labels_true : int array, shape = [n_samples]
A clustering of the data into disjoint subsets.
labels_pred : array, shape = [n_samples]
A clustering of the data into disjoint subsets.
Returns
-------
ami: float(upperlimited by 1.0)
The AMI returns a value of 1 when the two partitions are identical
(ie perfectly matched). Random partitions (independent labellings) have
an expected AMI around 0 on average hence can be negative.
See also
--------
adjusted_rand_score: Adjusted Rand Index
mutual_information_score: Mutual Information (not adjusted for chance)
Examples
--------
Perfect labelings are both homogeneous and complete, hence have
score 1.0::
>>> from sklearn.metrics.cluster import adjusted_mutual_info_score
>>> adjusted_mutual_info_score([0, 0, 1, 1], [0, 0, 1, 1])
1.0
>>> adjusted_mutual_info_score([0, 0, 1, 1], [1, 1, 0, 0])
1.0
If classes members are completely split across different clusters,
the assignment is totally in-complete, hence the AMI is null::
>>> adjusted_mutual_info_score([0, 0, 0, 0], [0, 1, 2, 3])
0.0
References
----------
.. [1] `Vinh, Epps, and Bailey, (2010). Information Theoretic Measures for
Clusterings Comparison: Variants, Properties, Normalization and
Correction for Chance, JMLR
<http://jmlr.csail.mit.edu/papers/volume11/vinh10a/vinh10a.pdf>`_
.. [2] `Wikipedia entry for the Adjusted Mutual Information
<http://en.wikipedia.org/wiki/Adjusted_Mutual_Information>`_
"""
labels_true, labels_pred = check_clusterings(labels_true, labels_pred)
n_samples = labels_true.shape[0]
classes = np.unique(labels_true)
clusters = np.unique(labels_pred)
# Special limit cases: no clustering since the data is not split.
# This is a perfect match hence return 1.0.
if (classes.shape[0] == clusters.shape[0] == 1
or classes.shape[0] == clusters.shape[0] == 0):
return 1.0
contingency = contingency_matrix(labels_true, labels_pred)
contingency = np.array(contingency, dtype='float')
# Calculate the MI for the two clusterings
mi = mutual_info_score(labels_true, labels_pred,
contingency=contingency)
# Calculate the expected value for the mutual information
emi = expected_mutual_information(contingency, n_samples)
# Calculate entropy for each labeling
h_true, h_pred = entropy(labels_true), entropy(labels_pred)
ami = (mi - emi) / (max(h_true, h_pred) - emi)
return ami
def normalized_mutual_info_score(labels_true, labels_pred):
"""Normalized Mutual Information between two clusterings
Normalized Mutual Information (NMI) is an normalization of the Mutual
Information (MI) score to scale the results between 0 (no mutual
information) and 1 (perfect correlation). In this function, mutual
information is normalized by ``sqrt(H(labels_true) * H(labels_pred))``
This measure is not adjusted for chance. Therefore
:func:`adjusted_mustual_info_score` might be preferred.
This metric is independent of the absolute values of the labels:
a permutation of the class or cluster label values won't change the
score value in any way.
This metric is furthermore symmetric: switching ``label_true`` with
``label_pred`` will return the same score value. This can be useful to
measure the agreement of two independent label assignments strategies
on the same dataset when the real ground truth is not known.
Parameters
----------
labels_true : int array, shape = [n_samples]
A clustering of the data into disjoint subsets.
labels_pred : array, shape = [n_samples]
A clustering of the data into disjoint subsets.
Returns
-------
nmi: float
score between 0.0 and 1.0. 1.0 stands for perfectly complete labeling
See also
--------
adjusted_rand_score: Adjusted Rand Index
adjusted_mutual_info_score: Adjusted Mutual Information (adjusted
against chance)
Examples
--------
Perfect labelings are both homogeneous and complete, hence have
score 1.0::
>>> from sklearn.metrics.cluster import normalized_mutual_info_score
>>> normalized_mutual_info_score([0, 0, 1, 1], [0, 0, 1, 1])
1.0
>>> normalized_mutual_info_score([0, 0, 1, 1], [1, 1, 0, 0])
1.0
If classes members are completely split across different clusters,
the assignment is totally in-complete, hence the NMI is null::
>>> normalized_mutual_info_score([0, 0, 0, 0], [0, 1, 2, 3])
0.0
"""
labels_true, labels_pred = check_clusterings(labels_true, labels_pred)
classes = np.unique(labels_true)
clusters = np.unique(labels_pred)
# Special limit cases: no clustering since the data is not split.
# This is a perfect match hence return 1.0.
if (classes.shape[0] == clusters.shape[0] == 1
or classes.shape[0] == clusters.shape[0] == 0):
return 1.0
contingency = contingency_matrix(labels_true, labels_pred)
contingency = np.array(contingency, dtype='float')
# Calculate the MI for the two clusterings
mi = mutual_info_score(labels_true, labels_pred,
contingency=contingency)
# Calculate the expected value for the mutual information
# Calculate entropy for each labeling
h_true, h_pred = entropy(labels_true), entropy(labels_pred)
nmi = mi / max(np.sqrt(h_true * h_pred), 1e-10)
return nmi
def entropy(labels):
"""Calculates the entropy for a labeling."""
if len(labels) == 0:
return 1.0
label_idx = np.unique(labels, return_inverse=True)[1]
pi = bincount(label_idx).astype(np.float)
pi = pi[pi > 0]
pi_sum = np.sum(pi)
# log(a / b) should be calculated as log(a) - log(b) for
# possible loss of precision
return -np.sum((pi / pi_sum) * (np.log(pi) - log(pi_sum)))
| bsd-3-clause |
nitinitprof/odoo | addons/event/res_partner.py | 329 | 1228 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp import models, fields
class res_partner(models.Model):
_inherit = 'res.partner'
speaker = fields.Boolean(help="Check this box if this contact is a speaker.")
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
alexlo03/ansible | lib/ansible/playbook/conditional.py | 8 | 10089 | # (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import ast
import re
from jinja2.compiler import generate
from jinja2.exceptions import UndefinedError
from ansible.errors import AnsibleError, AnsibleUndefinedVariable
from ansible.module_utils.six import text_type
from ansible.module_utils._text import to_native
from ansible.playbook.attribute import FieldAttribute
try:
from __main__ import display
except ImportError:
from ansible.utils.display import Display
display = Display()
DEFINED_REGEX = re.compile(r'(hostvars\[.+\]|[\w_]+)\s+(not\s+is|is|is\s+not)\s+(defined|undefined)')
LOOKUP_REGEX = re.compile(r'lookup\s*\(')
VALID_VAR_REGEX = re.compile("^[_A-Za-z][_a-zA-Z0-9]*$")
class Conditional:
'''
This is a mix-in class, to be used with Base to allow the object
to be run conditionally when a condition is met or skipped.
'''
_when = FieldAttribute(isa='list', default=list, extend=True, prepend=True)
def __init__(self, loader=None):
# when used directly, this class needs a loader, but we want to
# make sure we don't trample on the existing one if this class
# is used as a mix-in with a playbook base class
if not hasattr(self, '_loader'):
if loader is None:
raise AnsibleError("a loader must be specified when using Conditional() directly")
else:
self._loader = loader
super(Conditional, self).__init__()
def _validate_when(self, attr, name, value):
if not isinstance(value, list):
setattr(self, name, [value])
def extract_defined_undefined(self, conditional):
results = []
cond = conditional
m = DEFINED_REGEX.search(cond)
while m:
results.append(m.groups())
cond = cond[m.end():]
m = DEFINED_REGEX.search(cond)
return results
def evaluate_conditional(self, templar, all_vars):
'''
Loops through the conditionals set on this object, returning
False if any of them evaluate as such.
'''
# since this is a mix-in, it may not have an underlying datastructure
# associated with it, so we pull it out now in case we need it for
# error reporting below
ds = None
if hasattr(self, '_ds'):
ds = getattr(self, '_ds')
try:
# this allows for direct boolean assignments to conditionals "when: False"
if isinstance(self.when, bool):
return self.when
for conditional in self.when:
if not self._check_conditional(conditional, templar, all_vars):
return False
except Exception as e:
raise AnsibleError(
"The conditional check '%s' failed. The error was: %s" % (to_native(conditional), to_native(e)), obj=ds
)
return True
def _check_conditional(self, conditional, templar, all_vars):
'''
This method does the low-level evaluation of each conditional
set on this object, using jinja2 to wrap the conditionals for
evaluation.
'''
original = conditional
if conditional is None or conditional == '':
return True
if templar.is_template(conditional):
display.warning('when statements should not include jinja2 '
'templating delimiters such as {{ }} or {%% %%}. '
'Found: %s' % conditional)
# pull the "bare" var out, which allows for nested conditionals
# and things like:
# - assert:
# that:
# - item
# with_items:
# - 1 == 1
if conditional in all_vars and VALID_VAR_REGEX.match(conditional):
conditional = all_vars[conditional]
# make sure the templar is using the variables specified with this method
templar.set_available_variables(variables=all_vars)
try:
# if the conditional is "unsafe", disable lookups
disable_lookups = hasattr(conditional, '__UNSAFE__')
conditional = templar.template(conditional, disable_lookups=disable_lookups)
if not isinstance(conditional, text_type) or conditional == "":
return conditional
# update the lookups flag, as the string returned above may now be unsafe
# and we don't want future templating calls to do unsafe things
disable_lookups |= hasattr(conditional, '__UNSAFE__')
# First, we do some low-level jinja2 parsing involving the AST format of the
# statement to ensure we don't do anything unsafe (using the disable_lookup flag above)
class CleansingNodeVisitor(ast.NodeVisitor):
def generic_visit(self, node, inside_call=False, inside_yield=False):
if isinstance(node, ast.Call):
inside_call = True
elif isinstance(node, ast.Yield):
inside_yield = True
elif isinstance(node, ast.Str):
if disable_lookups:
if inside_call and node.s.startswith("__"):
# calling things with a dunder is generally bad at this point...
raise AnsibleError(
"Invalid access found in the conditional: '%s'" % conditional
)
elif inside_yield:
# we're inside a yield, so recursively parse and traverse the AST
# of the result to catch forbidden syntax from executing
parsed = ast.parse(node.s, mode='exec')
cnv = CleansingNodeVisitor()
cnv.visit(parsed)
# iterate over all child nodes
for child_node in ast.iter_child_nodes(node):
self.generic_visit(
child_node,
inside_call=inside_call,
inside_yield=inside_yield
)
try:
e = templar.environment.overlay()
e.filters.update(templar._get_filters(e.filters))
e.tests.update(templar._get_tests())
res = e._parse(conditional, None, None)
res = generate(res, e, None, None)
parsed = ast.parse(res, mode='exec')
cnv = CleansingNodeVisitor()
cnv.visit(parsed)
except Exception as e:
raise AnsibleError("Invalid conditional detected: %s" % to_native(e))
# and finally we generate and template the presented string and look at the resulting string
presented = "{%% if %s %%} True {%% else %%} False {%% endif %%}" % conditional
val = templar.template(presented, disable_lookups=disable_lookups).strip()
if val == "True":
return True
elif val == "False":
return False
else:
raise AnsibleError("unable to evaluate conditional: %s" % original)
except (AnsibleUndefinedVariable, UndefinedError) as e:
# the templating failed, meaning most likely a variable was undefined. If we happened
# to be looking for an undefined variable, return True, otherwise fail
try:
# first we extract the variable name from the error message
var_name = re.compile(r"'(hostvars\[.+\]|[\w_]+)' is undefined").search(str(e)).groups()[0]
# next we extract all defined/undefined tests from the conditional string
def_undef = self.extract_defined_undefined(conditional)
# then we loop through these, comparing the error variable name against
# each def/undef test we found above. If there is a match, we determine
# whether the logic/state mean the variable should exist or not and return
# the corresponding True/False
for (du_var, logic, state) in def_undef:
# when we compare the var names, normalize quotes because something
# like hostvars['foo'] may be tested against hostvars["foo"]
if var_name.replace("'", '"') == du_var.replace("'", '"'):
# the should exist is a xor test between a negation in the logic portion
# against the state (defined or undefined)
should_exist = ('not' in logic) != (state == 'defined')
if should_exist:
return False
else:
return True
# as nothing above matched the failed var name, re-raise here to
# trigger the AnsibleUndefinedVariable exception again below
raise
except Exception as new_e:
raise AnsibleUndefinedVariable("error while evaluating conditional (%s): %s" % (original, e))
| gpl-3.0 |
hyller/CodeLibrary | python-cookbook-master/src/11/passing_a_socket_file_descriptor_between_processes/worker.py | 5 | 1128 | # worker.py
import socket
import struct
def recv_fd(sock):
'''
Receive a single file descriptor
'''
msg, ancdata, flags, addr = sock.recvmsg(1,
socket.CMSG_LEN(struct.calcsize('i')))
cmsg_level, cmsg_type, cmsg_data = ancdata[0]
assert cmsg_level == socket.SOL_SOCKET and cmsg_type == socket.SCM_RIGHTS
sock.sendall(b'OK')
return struct.unpack('i', cmsg_data)[0]
def worker(server_address):
serv = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
serv.connect(server_address)
while True:
fd = recv_fd(serv)
print('WORKER: GOT FD', fd)
with socket.socket(socket.AF_INET, socket.SOCK_STREAM, fileno=fd) as client:
while True:
msg = client.recv(1024)
if not msg:
break
print('WORKER: RECV {!r}'.format(msg))
client.send(msg)
if __name__ == '__main__':
import sys
if len(sys.argv) != 2:
print('Usage: worker.py server_address', file=sys.stderr)
raise SystemExit(1)
worker(sys.argv[1])
| unlicense |
fossdevil/Assignments | Machine Learning/Assignment3Final/ML4.py | 1 | 3746 | import numpy as np
import scipy
import matplotlib.pyplot as plt
import random
# N points in d dimensions
def generatePoints(n,d):
points = []
for i in range(0,n):
point = np.random.normal(0,1,d);
p = point**2;
den = np.sqrt(sum(p));
point = list(point/den);
points.append(point);
return points;
def interPointDistance(points,n,d):
distMat = []
distance = 0;
for i in range(0,n):
disti = []
for j in range(0,n):
distance = np.linalg.norm(list(np.asarray(points[i])-np.asarray(points[j])));
disti.append(distance);
distMat.append(disti);
return distMat;
def projection(points,subspace,n):
projPoint = []
subspacet = np.asmatrix(subspace);
subspace = subspacet.T;
for i in range(0,n):
inv = np.linalg.inv(np.dot(subspacet,subspace));
proj = np.dot(np.dot(np.dot(subspace,inv),subspacet),points[i]);
projPoint.append(proj);
return projPoint;
def subspaceGen(n,d):
subspace = [];
subv = np.zeros(d);
r = np.arange(0,d);
k = list(random.sample(r,n));
j = 0;
for i in range(0,n):
subv = np.zeros(d);
subv[k[j]] = 1;
j = j+1;
subspace.append(subv);
return subspace;
n = 50;
d = 200;
points50 = generatePoints(n,d);
distMat = interPointDistance(points50,n,d);
print("Please open file \"Solution4.txt\":");
filename = "Solution4.txt"
target = open(filename,'w');
target.write("The interpoint distance Matrix is as follows:\n");
for i in range(0,n):
target.write(str(distMat[i]));
target.write("\n");
target.write("\n");
target.write("\n");
target.write("\n");
subspaces1 = np.asmatrix(subspaceGen(1,d));
subspaces2 = np.asmatrix(subspaceGen(2,d));
subspaces3 = np.asmatrix(subspaceGen(3,d));
subspaces10 = np.asmatrix(subspaceGen(10,d));
subspaces50 = np.asmatrix(subspaceGen(50,d));
projPoint1 = projection(points50,subspaces1,n);
projPoint2 = projection(points50,subspaces2,n);
projPoint3 = projection(points50,subspaces3,n);
projPoint10 = projection(points50,subspaces10,n);
projPoint50 = projection(points50,subspaces50,n);
distMat1 = interPointDistance(projPoint1,n,d);
distMat2 = interPointDistance(projPoint2,n,d);
distMat3 = interPointDistance(projPoint3,n,d);
distMat10 = interPointDistance(projPoint10,n,d);
distMat50 = interPointDistance(projPoint50,n,d);
num = np.sqrt(1.0/200);
diff1 = list((num*np.asmatrix(distMat))-np.asmatrix(distMat1));
num = np.sqrt(2.0/200);
diff2 = list((num*np.asmatrix(distMat))-np.asmatrix(distMat2));
num = np.sqrt(3.0/200);
diff3 = list((num*np.asmatrix(distMat))-np.asmatrix(distMat3));
num = np.sqrt(10.0/200);
diff10 = list((num*np.asmatrix(distMat))-np.asmatrix(distMat10));
num = np.sqrt(50.0/200);
diff50 = list((num*np.asmatrix(distMat))-np.asmatrix(distMat50));
target.write("Difference matrix is as follows:\n");
target.write("For k = 1");
target.write("\n");
for i in range(0,n):
target.write(str(diff1[i]));
target.write("\n");
target.write("\n");
target.write("\n");
target.write("\n");
target.write("For k = 2");
target.write("\n");
for i in range(0,n):
target.write(str(diff2[i]));
target.write("\n");
target.write("\n");
target.write("\n");
target.write("\n");
target.write("For k = 3");
target.write("\n");
for i in range(0,n):
target.write(str(diff3[i]));
target.write("\n");
target.write("\n");
target.write("\n");
target.write("\n");
target.write("For k = 10");
target.write("\n");
for i in range(0,n):
target.write(str(diff10[i]));
target.write("\n");
target.write("\n");
target.write("\n");
target.write("\n");
target.write("For k = 50");
target.write("\n");
for i in range(0,n):
target.write(str(diff50[i]));
target.write("\n");
target.close();
| mit |
pravsripad/mne-python | mne/stats/_adjacency.py | 4 | 3657 | # -*- coding: utf-8 -*-
# Authors: Eric Larson <larson.eric.d@gmail.com>
#
# License: Simplified BSD
import numpy as np
from ..utils import _validate_type, _check_option
from ..utils.check import int_like
def combine_adjacency(*structure):
"""Create a sparse binary adjacency/neighbors matrix.
Parameters
----------
*structure : list
The adjacency along each dimension. Each entry can be:
- ndarray or sparse matrix
A square binary adjacency matrix for the given dimension.
- int
The number of elements along the given dimension. A lattice
adjacency will be generated.
Returns
-------
adjacency : scipy.sparse.coo_matrix, shape (n_features, n_features)
The adjacency matrix.
"""
from scipy import sparse
structure = list(structure)
for di, dim in enumerate(structure):
name = f'structure[{di}]'
_validate_type(dim, ('int-like', np.ndarray, sparse.spmatrix), name)
if isinstance(dim, int_like):
dim = int(dim)
# Don't add the diagonal, because we explicitly remove it later:
# dim = sparse.eye(dim, format='coo')
# dim += sparse.eye(dim.shape[0], k=1, format='coo')
# dim += sparse.eye(dim.shape[0], k=-1, format='coo')
ii, jj = np.arange(0, dim - 1), np.arange(1, dim)
edges = np.vstack([np.hstack([ii, jj]), np.hstack([jj, ii])])
dim = sparse.coo_matrix(
(np.ones(edges.shape[1]), edges), (dim, dim), float)
else:
_check_option(f'{name}.ndim', dim.ndim, [2])
if dim.shape[0] != dim.shape[1]:
raise ValueError(
f'{name} must be square, got shape {dim.shape}')
if not isinstance(dim, sparse.coo_matrix):
dim = sparse.coo_matrix(dim)
else:
dim = dim.copy()
dim.data[dim.row == dim.col] = 0. # remove diagonal, will add later
dim.eliminate_zeros()
if not (dim.data == 1).all():
raise ValueError('All adjacency values must be 0 or 1')
structure[di] = dim
# list of coo
assert all(isinstance(dim, sparse.coo_matrix) for dim in structure)
shape = np.array([d.shape[0] for d in structure], int)
n_others = np.array([np.prod(np.concatenate([shape[:di], shape[di + 1:]]))
for di in range(len(structure))], int)
n_each = np.array([dim.data.size for dim in structure], int) * n_others
n_off = n_each.sum() # off-diagonal terms
n_diag = np.prod(shape)
vertices = np.arange(n_diag).reshape(shape)
edges = np.empty((2, n_off + n_diag), int)
used = np.zeros(n_off, bool)
weights = np.empty(n_off + n_diag, float) # even though just 0/1
offset = 0
for di, dim in enumerate(structure):
s_l = [slice(None)] * len(shape)
s_r = [slice(None)] * len(shape)
s_l[di] = dim.row
s_r[di] = dim.col
assert dim.row.shape == dim.col.shape == dim.data.shape
sl = slice(offset, offset + n_each[di])
edges[:, sl] = [vertices[tuple(s_l)].ravel(),
vertices[tuple(s_r)].ravel()]
weights[sl] = np.tile(dim.data, n_others[di])
offset += n_each[di]
assert not used[sl].any()
used[sl] = True
assert used.all()
# Handle the diagonal separately at the end to avoid duplicate entries
edges[:, n_off:] = vertices.ravel()
weights[n_off:] = 1.
graph = sparse.coo_matrix((weights, edges),
(vertices.size, vertices.size))
return graph
| bsd-3-clause |
santosfamilyfoundation/SantosGUI | application/utils/patch_multiprocess.py | 1 | 1225 |
import os
import sys
import multiprocess.forking as forking
def patch_multiprocess():
if sys.platform.startswith('win'):
# First define a modified version of Popen.
class _Popen(forking.Popen):
def __init__(self, *args, **kw):
if hasattr(sys, 'frozen'):
# We have to set original _MEIPASS2 value from sys._MEIPASS
# to get --onefile mode working.
os.putenv('_MEIPASS2', sys._MEIPASS)
try:
super(_Popen, self).__init__(*args, **kw)
finally:
if hasattr(sys, 'frozen'):
# On some platforms (e.g. AIX) 'os.unsetenv()' is not
# available. In those cases we cannot delete the variable
# but only set it to the empty string. The bootloader
# can handle this case.
if hasattr(os, 'unsetenv'):
os.unsetenv('_MEIPASS2')
else:
os.putenv('_MEIPASS2', '')
# Second override 'Popen' class with our modified version.
forking.Popen = _Popen
| mit |
xiangel/hue | apps/zookeeper/setup.py | 38 | 1178 | # Licensed to Cloudera, Inc. under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. Cloudera, Inc. licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from setuptools import setup, find_packages
from hueversion import VERSION
setup(
name = "zookeeper",
version = VERSION,
author = "Hue",
url = 'http://github.com/cloudera/hue',
description = "ZooKeeper Browser",
packages = find_packages('src'),
package_dir = {'': 'src'},
install_requires = ['setuptools', 'desktop'],
entry_points = { 'desktop.sdk.application': 'zookeeper=zookeeper' },
)
| apache-2.0 |
scripnichenko/nova | nova/api/openstack/compute/floating_ips.py | 16 | 12822 | # Copyright 2011 OpenStack Foundation
# Copyright (c) 2011 X.commerce, a business unit of eBay Inc.
# Copyright 2011 Grid Dynamics
# Copyright 2011 Eldar Nugaev, Kirill Shileev, Ilya Alekseyev
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import netaddr
from oslo_log import log as logging
from oslo_utils import uuidutils
import webob
from nova.api.openstack import common
from nova.api.openstack.compute.schemas import floating_ips
from nova.api.openstack import extensions
from nova.api.openstack import wsgi
from nova.api import validation
from nova import compute
from nova.compute import utils as compute_utils
from nova import exception
from nova.i18n import _
from nova.i18n import _LW
from nova import network
LOG = logging.getLogger(__name__)
ALIAS = 'os-floating-ips'
authorize = extensions.os_compute_authorizer(ALIAS)
def _translate_floating_ip_view(floating_ip):
result = {
'id': floating_ip['id'],
'ip': floating_ip['address'],
'pool': floating_ip['pool'],
}
try:
result['fixed_ip'] = floating_ip['fixed_ip']['address']
except (TypeError, KeyError, AttributeError):
result['fixed_ip'] = None
try:
result['instance_id'] = floating_ip['fixed_ip']['instance_uuid']
except (TypeError, KeyError, AttributeError):
result['instance_id'] = None
return {'floating_ip': result}
def _translate_floating_ips_view(floating_ips):
return {'floating_ips': [_translate_floating_ip_view(ip)['floating_ip']
for ip in floating_ips]}
def get_instance_by_floating_ip_addr(self, context, address):
try:
instance_id =\
self.network_api.get_instance_id_by_floating_address(
context, address)
except exception.FloatingIpNotFoundForAddress as ex:
raise webob.exc.HTTPNotFound(explanation=ex.format_message())
except exception.FloatingIpMultipleFoundForAddress as ex:
raise webob.exc.HTTPConflict(explanation=ex.format_message())
if instance_id:
return common.get_instance(self.compute_api, context, instance_id)
def disassociate_floating_ip(self, context, instance, address):
try:
self.network_api.disassociate_floating_ip(context, instance, address)
except exception.Forbidden:
raise webob.exc.HTTPForbidden()
except exception.CannotDisassociateAutoAssignedFloatingIP:
msg = _('Cannot disassociate auto assigned floating ip')
raise webob.exc.HTTPForbidden(explanation=msg)
class FloatingIPController(object):
"""The Floating IPs API controller for the OpenStack API."""
def __init__(self):
self.compute_api = compute.API(skip_policy_check=True)
self.network_api = network.API(skip_policy_check=True)
super(FloatingIPController, self).__init__()
@extensions.expected_errors((400, 404))
def show(self, req, id):
"""Return data about the given floating ip."""
context = req.environ['nova.context']
authorize(context)
try:
floating_ip = self.network_api.get_floating_ip(context, id)
except (exception.NotFound, exception.FloatingIpNotFound):
msg = _("Floating ip not found for id %s") % id
raise webob.exc.HTTPNotFound(explanation=msg)
except exception.InvalidID as e:
raise webob.exc.HTTPBadRequest(explanation=e.format_message())
return _translate_floating_ip_view(floating_ip)
@extensions.expected_errors(())
def index(self, req):
"""Return a list of floating ips allocated to a project."""
context = req.environ['nova.context']
authorize(context)
floating_ips = self.network_api.get_floating_ips_by_project(context)
return _translate_floating_ips_view(floating_ips)
@extensions.expected_errors((400, 403, 404))
def create(self, req, body=None):
context = req.environ['nova.context']
authorize(context)
pool = None
if body and 'pool' in body:
pool = body['pool']
try:
address = self.network_api.allocate_floating_ip(context, pool)
ip = self.network_api.get_floating_ip_by_address(context, address)
except exception.NoMoreFloatingIps:
if pool:
msg = _("No more floating ips in pool %s.") % pool
else:
msg = _("No more floating ips available.")
raise webob.exc.HTTPNotFound(explanation=msg)
except exception.FloatingIpLimitExceeded:
if pool:
msg = _("IP allocation over quota in pool %s.") % pool
else:
msg = _("IP allocation over quota.")
raise webob.exc.HTTPForbidden(explanation=msg)
except exception.FloatingIpPoolNotFound as e:
raise webob.exc.HTTPNotFound(explanation=e.format_message())
except exception.FloatingIpBadRequest as e:
raise webob.exc.HTTPBadRequest(explanation=e.format_message())
return _translate_floating_ip_view(ip)
@wsgi.response(202)
@extensions.expected_errors((400, 403, 404, 409))
def delete(self, req, id):
context = req.environ['nova.context']
authorize(context)
# get the floating ip object
try:
floating_ip = self.network_api.get_floating_ip(context, id)
except (exception.NotFound, exception.FloatingIpNotFound):
msg = _("Floating ip not found for id %s") % id
raise webob.exc.HTTPNotFound(explanation=msg)
except exception.InvalidID as e:
raise webob.exc.HTTPBadRequest(explanation=e.format_message())
address = floating_ip['address']
# get the associated instance object (if any)
instance = get_instance_by_floating_ip_addr(self, context, address)
try:
self.network_api.disassociate_and_release_floating_ip(
context, instance, floating_ip)
except exception.Forbidden:
raise webob.exc.HTTPForbidden()
except exception.CannotDisassociateAutoAssignedFloatingIP:
msg = _('Cannot disassociate auto assigned floating ip')
raise webob.exc.HTTPForbidden(explanation=msg)
class FloatingIPActionController(wsgi.Controller):
def __init__(self, *args, **kwargs):
super(FloatingIPActionController, self).__init__(*args, **kwargs)
self.compute_api = compute.API(skip_policy_check=True)
self.network_api = network.API(skip_policy_check=True)
@extensions.expected_errors((400, 403, 404))
@wsgi.action('addFloatingIp')
@validation.schema(floating_ips.add_floating_ip)
def _add_floating_ip(self, req, id, body):
"""Associate floating_ip to an instance."""
context = req.environ['nova.context']
authorize(context)
address = body['addFloatingIp']['address']
instance = common.get_instance(self.compute_api, context, id)
cached_nwinfo = compute_utils.get_nw_info_for_instance(instance)
if not cached_nwinfo:
LOG.warning(
_LW('Info cache is %r during associate') % instance.info_cache,
instance=instance)
msg = _('No nw_info cache associated with instance')
raise webob.exc.HTTPBadRequest(explanation=msg)
fixed_ips = cached_nwinfo.fixed_ips()
if not fixed_ips:
msg = _('No fixed ips associated to instance')
raise webob.exc.HTTPBadRequest(explanation=msg)
fixed_address = None
if 'fixed_address' in body['addFloatingIp']:
fixed_address = body['addFloatingIp']['fixed_address']
for fixed in fixed_ips:
if fixed['address'] == fixed_address:
break
else:
msg = _('Specified fixed address not assigned to instance')
raise webob.exc.HTTPBadRequest(explanation=msg)
if not fixed_address:
try:
fixed_address = next(ip['address'] for ip in fixed_ips
if netaddr.valid_ipv4(ip['address']))
except StopIteration:
msg = _('Unable to associate floating ip %(address)s '
'to any fixed IPs for instance %(id)s. '
'Instance has no fixed IPv4 addresses to '
'associate.') % (
{'address': address, 'id': id})
raise webob.exc.HTTPBadRequest(explanation=msg)
if len(fixed_ips) > 1:
LOG.warning(_LW('multiple fixed_ips exist, using the first '
'IPv4 fixed_ip: %s'), fixed_address)
try:
self.network_api.associate_floating_ip(context, instance,
floating_address=address,
fixed_address=fixed_address)
except exception.FloatingIpAssociated:
msg = _('floating ip is already associated')
raise webob.exc.HTTPBadRequest(explanation=msg)
except exception.NoFloatingIpInterface:
msg = _('l3driver call to add floating ip failed')
raise webob.exc.HTTPBadRequest(explanation=msg)
except exception.InstanceUnknownCell as e:
raise webob.exc.HTTPNotFound(explanation=e.format_message())
except exception.FloatingIpNotFoundForAddress:
msg = _('floating ip not found')
raise webob.exc.HTTPNotFound(explanation=msg)
except exception.Forbidden as e:
raise webob.exc.HTTPForbidden(explanation=e.format_message())
except Exception as e:
msg = _('Unable to associate floating ip %(address)s to '
'fixed ip %(fixed_address)s for instance %(id)s. '
'Error: %(error)s') % (
{'address': address, 'fixed_address': fixed_address,
'id': id, 'error': e})
LOG.exception(msg)
raise webob.exc.HTTPBadRequest(explanation=msg)
return webob.Response(status_int=202)
@extensions.expected_errors((400, 403, 404, 409))
@wsgi.action('removeFloatingIp')
@validation.schema(floating_ips.remove_floating_ip)
def _remove_floating_ip(self, req, id, body):
"""Dissociate floating_ip from an instance."""
context = req.environ['nova.context']
authorize(context)
address = body['removeFloatingIp']['address']
# get the floating ip object
try:
floating_ip = self.network_api.get_floating_ip_by_address(context,
address)
except exception.FloatingIpNotFoundForAddress:
msg = _("floating ip not found")
raise webob.exc.HTTPNotFound(explanation=msg)
# get the associated instance object (if any)
instance = get_instance_by_floating_ip_addr(self, context, address)
# disassociate if associated
if (instance and
floating_ip.get('fixed_ip_id') and
(uuidutils.is_uuid_like(id) and
[instance.uuid == id] or
[instance.id == id])[0]):
try:
disassociate_floating_ip(self, context, instance, address)
except exception.FloatingIpNotAssociated:
msg = _('Floating ip is not associated')
raise webob.exc.HTTPBadRequest(explanation=msg)
return webob.Response(status_int=202)
else:
msg = _("Floating ip %(address)s is not associated with instance "
"%(id)s.") % {'address': address, 'id': id}
raise webob.exc.HTTPConflict(explanation=msg)
class FloatingIps(extensions.V21APIExtensionBase):
"""Floating IPs support."""
name = "FloatingIps"
alias = ALIAS
version = 1
def get_resources(self):
resource = [extensions.ResourceExtension(ALIAS,
FloatingIPController())]
return resource
def get_controller_extensions(self):
controller = FloatingIPActionController()
extension = extensions.ControllerExtension(self, 'servers', controller)
return [extension]
| apache-2.0 |
NMGRL/pychron | pychron/furnace/tasks/nmgrl/panes.py | 2 | 12428 | # ===============================================================================
# Copyright 2015 Jake Ross
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===============================================================================
from __future__ import absolute_import
from threading import Thread
from enable.component_editor import ComponentEditor
from pyface.tasks.traits_dock_pane import TraitsDockPane
from pyface.tasks.traits_task_pane import TraitsTaskPane
from traits.api import Button, Bool, Str
from traitsui.api import View, Item, UItem, VGroup, HGroup, EnumEditor, spring, \
ButtonEditor, Tabbed
from pychron.core.ui.custom_label_editor import CustomLabel
from pychron.core.ui.lcd_editor import LCDEditor
from pychron.core.ui.led_editor import LEDEditor
from pychron.envisage.icon_button_editor import icon_button_editor
class ControlPane(TraitsDockPane):
name = 'Controls'
id = 'pychron.nmgrlfurnace.controls'
dump_sample_button = Button('Load')
fire_magnets_button = Button('Dump')
jitter_button = Button
jitter_label = Str('Start')
jittering = Bool
configure_jitter_button = Button
configure_dump_button = Button
refresh_states_button = Button('Refresh')
funnel_up_button = Button
funnel_down_button = Button
force_funnel_up_button = Button
funnel_set_home_button = Button('Set Home')
feeder_set_home_button = Button('Set Home')
toggle_advanced_view_button = Button
_advanced_view_state = Bool(False)
disable_button = Button
feeder_slew_positive = Button
feeder_slew_negative = Button
feeder_stop_button = Button
clear_sample_states_button = Button('Clear Dumped Samples')
def _feeder_slew_positive_fired(self):
self.model.stage_manager.feeder_slew(1)
def _feeder_slew_negative_fired(self):
self.model.stage_manager.feeder_slew(-1)
def _feeder_stop_button_fired(self):
self.model.stage_manager.feeder_stop()
def _feeder_set_home_button_fired(self):
self.model.stage_manager.feeder.set_home()
def _disable_button_fired(self):
self.model.setpoint = 0
def _funnel_set_home_button_fired(self):
self.model.funnel.set_home()
def _force_funnel_up_fired(self):
def func():
self.model.raise_funnel(force=True)
t = Thread(target=func)
t.start()
def _funnel_up_button_fired(self):
def func():
self.model.raise_funnel()
t = Thread(target=func)
t.start()
def _funnel_down_button_fired(self):
def func():
self.model.lower_funnel()
t = Thread(target=func)
t.start()
def _dump_sample_button_fired(self):
self.model.dump_sample()
def _fire_magnets_button_fired(self):
self.model.fire_magnets()
def _jitter_button_fired(self):
if not self.jittering:
self.model.start_jitter_feeder()
self.jitter_label = 'Stop'
else:
self.model.stop_jitter_feeder()
self.jitter_label = 'Start'
self.jittering = not self.jittering
def _configure_dump_button_fired(self):
self.model.configure_dump()
def _configure_jitter_button_fired(self):
self.model.configure_jitter_feeder()
def _toggle_advanced_view_button_fired(self):
self._advanced_view_state = not self._advanced_view_state
def _refresh_states_button_fired(self):
self.model.refresh_states()
def _clear_sample_states_button_fired(self):
self.model.clear_sample_states()
def trait_context(self):
return {'object': self.model,
'pane': self,
'tray_manager': self.model.stage_manager.tray_calibration_manager,
'stage_manager': self.model.stage_manager}
def traits_view(self):
# cali_grp = VGroup(UItem('tray_manager.calibrate',
# enabled_when='stage_manager.stage_map_name',
# editor=ButtonEditor(label_value='tray_manager.calibration_step')),
# HGroup(Readonly('tray_manager.x', format_str='%0.3f'),
# Readonly('tray_manager.y', format_str='%0.3f')),
# Readonly('tray_manager.rotation', format_str='%0.3f'),
# Readonly('tray_manager.scale', format_str='%0.4f'),
# Readonly('tray_manager.error', format_str='%0.2f'),
# UItem('tray_manager.calibrator', style='custom', editor=InstanceEditor()),
# CustomLabel('tray_manager.calibration_help',
# color='green',
# height=75, width=300),
#
# show_border=True, label='Calibration')
c_grp = VGroup(HGroup(Item('setpoint'),
UItem('water_flow_state', editor=LEDEditor(label='H2O Flow')),
spring, icon_button_editor('pane.disable_button', 'cancel')),
VGroup(UItem('temperature_readback', editor=LCDEditor())),
icon_button_editor('start_record_button', 'media-record',
tooltip='Start recording',
enabled_when='not _recording'),
icon_button_editor('stop_record_button',
'media-playback-stop',
tooltip='Stop recording',
enabled_when='_recording'),
label='Controller', show_border=True)
feeder_grp = VGroup(HGroup(Item('stage_manager.calibrated_position_entry', label='Hole'),
icon_button_editor('pane.toggle_advanced_view_button', 'cog')),
VGroup(Item('stage_manager.feeder.position', label='Position (units)'),
Item('stage_manager.feeder.velocity'),
Item('pane.feeder_set_home_button'),
HGroup(icon_button_editor('pane.feeder_slew_positive', 'arrow_left'),
icon_button_editor('pane.feeder_slew_negative', 'arrow_right'),
icon_button_editor('pane.feeder_stop_button', 'cancel')),
visible_when='pane._advanced_view_state'),
show_border=True, label='Position')
funnel_grp = VGroup(HGroup(icon_button_editor('pane.funnel_up_button', 'arrow_up',
enabled_when='funnel_up_enabled', tooltip='Raise Funnel'),
UItem('pane.force_funnel_up_button', tooltip='Force funnel to raise'),
icon_button_editor('pane.funnel_down_button', 'arrow_down', tooltip='Lower Funnel',
enabled_when='funnel_down_enabled')),
UItem('pane.funnel_set_home_button'),
show_border=True, label='Funnel')
jitter_grp = HGroup(UItem('pane.jitter_button', editor=ButtonEditor(label_value='pane.jitter_label')),
icon_button_editor('pane.configure_jitter_button', 'cog', tooltip='Configure Jitter'),
show_border=True, label='Jitter')
dump_grp = HGroup(UItem('pane.dump_sample_button',
enabled_when='dump_sample_enabled',
tooltip='Execute the complete sample loading procedure'),
UItem('pane.fire_magnets_button',
enabled_when='not magnets_firing',
tooltip='Execute the magnet sequence'),
UItem('pane.clear_sample_states_button'),
icon_button_editor('pane.configure_dump_button', 'cog', tooltip='Configure Dumping'),
show_border=True, label='Dump')
status_grp = HGroup(CustomLabel('status_txt', size=14))
d1 = VGroup(status_grp,
feeder_grp, funnel_grp, jitter_grp, dump_grp)
d2 = VGroup(
# UItem('pane.refresh_states_button'),
UItem('dumper_canvas', editor=ComponentEditor()))
d_grp = HGroup(d1, d2, label='Dumper', show_border=True)
# v_grp = VGroup(UItem('video_canvas', editor=VideoComponentEditor()),
# visible_when='video_enabled',
# label='Camera')
g_grp = VGroup(Item('graph_scan_width', label='Scan Width (mins)'),
HGroup(Item('graph_scale', label='Scale'),
Item('graph_y_auto', label='Autoscale Y'),
Item('graph_ymax', label='Max', format_str='%0.3f', enabled_when='not graph_y_auto'),
Item('graph_ymin', label='Min', format_str='%0.3f', enabled_when='not graph_y_auto')),
HGroup(icon_button_editor('clear_button', 'clear',
tooltip='Clear and reset graph'), spring),
HGroup(icon_button_editor('start_record_button', 'media-record',
tooltip='Start recording',
enabled_when='not _recording'),
icon_button_editor('stop_record_button',
'media-playback-stop',
tooltip='Stop recording',
enabled_when='_recording'),
icon_button_editor('add_marker_button', 'flag',
enabled_when='_recording'),
show_border=True,
label='Record Scan'),
HGroup(icon_button_editor('snapshot_button', 'camera'),
show_border=True, label='Snapshot', ),
label='Graph')
v = View(VGroup(c_grp,
HGroup(Tabbed(d_grp, g_grp))))
return v
class FurnacePane(TraitsTaskPane):
def trait_context(self):
return {'object': self.model,
'pane': self,
'tray_manager': self.model.stage_manager.tray_calibration_manager,
'stage_manager': self.model.stage_manager}
def traits_view(self):
canvas_grp = VGroup(
HGroup(UItem('stage_manager.stage_map_name', editor=EnumEditor(name='stage_manager.stage_map_names')),
spring),
UItem('stage_manager.canvas', style='custom', editor=ComponentEditor()))
v = View(VGroup(UItem('graph', style='custom'), canvas_grp))
return v
class ExperimentFurnacePane(TraitsDockPane):
name = 'Furnace'
id = 'pychron.experiment.furnace'
disable_button = Button
def _disable_button_fired(self):
self.model.setpoint = 0
def traits_view(self):
c_grp = VGroup(HGroup(Item('setpoint'),
UItem('water_flow_state', editor=LEDEditor(label='H2O Flow')),
spring, icon_button_editor('pane.disable_button', 'cancel'),
Item('verbose_scan', label='Verbose Logging')),
VGroup(UItem('temperature_readback', editor=LCDEditor(width=100, height=50))),
label='Controller', show_border=True)
v = View(c_grp)
return v
# ============= EOF =============================================
| apache-2.0 |
tempbottle/h-store | third_party/python/boto/s3/bucketlistresultset.py | 11 | 5787 | # Copyright (c) 2006,2007 Mitch Garnaat http://garnaat.org/
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
def bucket_lister(bucket, prefix='', delimiter='', marker='', headers=None):
"""
A generator function for listing keys in a bucket.
"""
more_results = True
k = None
while more_results:
rs = bucket.get_all_keys(prefix=prefix, marker=marker,
delimiter=delimiter, headers=headers)
for k in rs:
yield k
if k:
marker = k.name
more_results= rs.is_truncated
class BucketListResultSet:
"""
A resultset for listing keys within a bucket. Uses the bucket_lister
generator function and implements the iterator interface. This
transparently handles the results paging from S3 so even if you have
many thousands of keys within the bucket you can iterate over all
keys in a reasonably efficient manner.
"""
def __init__(self, bucket=None, prefix='', delimiter='', marker='', headers=None):
self.bucket = bucket
self.prefix = prefix
self.delimiter = delimiter
self.marker = marker
self.headers = headers
def __iter__(self):
return bucket_lister(self.bucket, prefix=self.prefix,
delimiter=self.delimiter, marker=self.marker, headers=self.headers)
def versioned_bucket_lister(bucket, prefix='', delimiter='',
key_marker='', version_id_marker='', headers=None):
"""
A generator function for listing versions in a bucket.
"""
more_results = True
k = None
while more_results:
rs = bucket.get_all_versions(prefix=prefix, key_marker=key_marker,
version_id_marker=version_id_marker,
delimiter=delimiter, headers=headers)
for k in rs:
yield k
key_marker = rs.next_key_marker
version_id_marker = rs.next_version_id_marker
more_results= rs.is_truncated
class VersionedBucketListResultSet:
"""
A resultset for listing versions within a bucket. Uses the bucket_lister
generator function and implements the iterator interface. This
transparently handles the results paging from S3 so even if you have
many thousands of keys within the bucket you can iterate over all
keys in a reasonably efficient manner.
"""
def __init__(self, bucket=None, prefix='', delimiter='', key_marker='',
version_id_marker='', headers=None):
self.bucket = bucket
self.prefix = prefix
self.delimiter = delimiter
self.key_marker = key_marker
self.version_id_marker = version_id_marker
self.headers = headers
def __iter__(self):
return versioned_bucket_lister(self.bucket, prefix=self.prefix,
delimiter=self.delimiter,
key_marker=self.key_marker,
version_id_marker=self.version_id_marker,
headers=self.headers)
def multipart_upload_lister(bucket, key_marker='',
upload_id_marker='',
headers=None):
"""
A generator function for listing multipart uploads in a bucket.
"""
more_results = True
k = None
while more_results:
rs = bucket.get_all_multipart_uploads(key_marker=key_marker,
upload_id_marker=upload_id_marker,
headers=headers)
for k in rs:
yield k
key_marker = rs.next_key_marker
upload_id_marker = rs.next_upload_id_marker
more_results= rs.is_truncated
class MultiPartUploadListResultSet:
"""
A resultset for listing multipart uploads within a bucket.
Uses the multipart_upload_lister generator function and
implements the iterator interface. This
transparently handles the results paging from S3 so even if you have
many thousands of uploads within the bucket you can iterate over all
keys in a reasonably efficient manner.
"""
def __init__(self, bucket=None, key_marker='',
upload_id_marker='', headers=None):
self.bucket = bucket
self.key_marker = key_marker
self.upload_id_marker = upload_id_marker
self.headers = headers
def __iter__(self):
return multipart_upload_lister(self.bucket,
key_marker=self.key_marker,
upload_id_marker=self.upload_id_marker,
headers=self.headers)
| gpl-3.0 |
tiagochiavericosta/edx-platform | common/djangoapps/service_status/test.py | 132 | 1427 | """Test for async task service status"""
from django.utils import unittest
from django.test.client import Client
from django.core.urlresolvers import reverse
import json
class CeleryConfigTest(unittest.TestCase):
"""
Test that we can get a response from Celery
"""
def setUp(self):
"""
Create a django test client
"""
super(CeleryConfigTest, self).setUp()
self.client = Client()
self.ping_url = reverse('status.service.celery.ping')
def test_ping(self):
"""
Try to ping celery.
"""
# Access the service status page, which starts a delayed
# asynchronous task
response = self.client.get(self.ping_url)
# HTTP response should be successful
self.assertEqual(response.status_code, 200)
# Expect to get a JSON-serialized dict with
# task and time information
result_dict = json.loads(response.content)
# Was it successful?
self.assertTrue(result_dict['success'])
# We should get a "pong" message back
self.assertEqual(result_dict['value'], "pong")
# We don't know the other dict values exactly,
# but we can assert that they take the right form
self.assertIsInstance(result_dict['task_id'], unicode)
self.assertIsInstance(result_dict['time'], float)
self.assertTrue(result_dict['time'] > 0.0)
| agpl-3.0 |
nelmiux/CarnotKE | jyhton/lib-python/2.7/test/test_logging.py | 7 | 60266 | #!/usr/bin/env python
#
# Copyright 2001-2012 by Vinay Sajip. All Rights Reserved.
#
# Permission to use, copy, modify, and distribute this software and its
# documentation for any purpose and without fee is hereby granted,
# provided that the above copyright notice appear in all copies and that
# both that copyright notice and this permission notice appear in
# supporting documentation, and that the name of Vinay Sajip
# not be used in advertising or publicity pertaining to distribution
# of the software without specific, written prior permission.
# VINAY SAJIP DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, INCLUDING
# ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL
# VINAY SAJIP BE LIABLE FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR
# ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER
# IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
"""Test harness for the logging module. Run all tests.
Copyright (C) 2001-2012 Vinay Sajip. All Rights Reserved.
"""
import logging
import logging.handlers
import logging.config
import codecs
import cPickle
import cStringIO
import gc
import json
import os
import random
import re
import select
import socket
from SocketServer import ThreadingTCPServer, StreamRequestHandler
import struct
import sys
import tempfile
from test.test_support import captured_stdout, run_with_locale, run_unittest
import textwrap
import time
import unittest
import warnings
import weakref
try:
import threading
except ImportError:
threading = None
class BaseTest(unittest.TestCase):
"""Base class for logging tests."""
log_format = "%(name)s -> %(levelname)s: %(message)s"
expected_log_pat = r"^([\w.]+) -> ([\w]+): ([\d]+)$"
message_num = 0
def setUp(self):
"""Setup the default logging stream to an internal StringIO instance,
so that we can examine log output as we want."""
logger_dict = logging.getLogger().manager.loggerDict
logging._acquireLock()
try:
self.saved_handlers = logging._handlers.copy()
self.saved_handler_list = logging._handlerList[:]
self.saved_loggers = logger_dict.copy()
self.saved_level_names = logging._levelNames.copy()
finally:
logging._releaseLock()
# Set two unused loggers: one non-ASCII and one Unicode.
# This is to test correct operation when sorting existing
# loggers in the configuration code. See issue 8201.
logging.getLogger("\xab\xd7\xbb")
logging.getLogger(u"\u013f\u00d6\u0047")
self.root_logger = logging.getLogger("")
self.original_logging_level = self.root_logger.getEffectiveLevel()
self.stream = cStringIO.StringIO()
self.root_logger.setLevel(logging.DEBUG)
self.root_hdlr = logging.StreamHandler(self.stream)
self.root_formatter = logging.Formatter(self.log_format)
self.root_hdlr.setFormatter(self.root_formatter)
self.root_logger.addHandler(self.root_hdlr)
def tearDown(self):
"""Remove our logging stream, and restore the original logging
level."""
self.stream.close()
self.root_logger.removeHandler(self.root_hdlr)
while self.root_logger.handlers:
h = self.root_logger.handlers[0]
self.root_logger.removeHandler(h)
h.close()
self.root_logger.setLevel(self.original_logging_level)
logging._acquireLock()
try:
logging._levelNames.clear()
logging._levelNames.update(self.saved_level_names)
logging._handlers.clear()
logging._handlers.update(self.saved_handlers)
logging._handlerList[:] = self.saved_handler_list
loggerDict = logging.getLogger().manager.loggerDict
loggerDict.clear()
loggerDict.update(self.saved_loggers)
finally:
logging._releaseLock()
def assert_log_lines(self, expected_values, stream=None):
"""Match the collected log lines against the regular expression
self.expected_log_pat, and compare the extracted group values to
the expected_values list of tuples."""
stream = stream or self.stream
pat = re.compile(self.expected_log_pat)
try:
stream.reset()
actual_lines = stream.readlines()
except AttributeError:
# StringIO.StringIO lacks a reset() method.
actual_lines = stream.getvalue().splitlines()
self.assertEqual(len(actual_lines), len(expected_values))
for actual, expected in zip(actual_lines, expected_values):
match = pat.search(actual)
if not match:
self.fail("Log line does not match expected pattern:\n" +
actual)
self.assertEqual(tuple(match.groups()), expected)
s = stream.read()
if s:
self.fail("Remaining output at end of log stream:\n" + s)
def next_message(self):
"""Generate a message consisting solely of an auto-incrementing
integer."""
self.message_num += 1
return "%d" % self.message_num
class BuiltinLevelsTest(BaseTest):
"""Test builtin levels and their inheritance."""
def test_flat(self):
#Logging levels in a flat logger namespace.
m = self.next_message
ERR = logging.getLogger("ERR")
ERR.setLevel(logging.ERROR)
INF = logging.getLogger("INF")
INF.setLevel(logging.INFO)
DEB = logging.getLogger("DEB")
DEB.setLevel(logging.DEBUG)
# These should log.
ERR.log(logging.CRITICAL, m())
ERR.error(m())
INF.log(logging.CRITICAL, m())
INF.error(m())
INF.warn(m())
INF.info(m())
DEB.log(logging.CRITICAL, m())
DEB.error(m())
DEB.warn (m())
DEB.info (m())
DEB.debug(m())
# These should not log.
ERR.warn(m())
ERR.info(m())
ERR.debug(m())
INF.debug(m())
self.assert_log_lines([
('ERR', 'CRITICAL', '1'),
('ERR', 'ERROR', '2'),
('INF', 'CRITICAL', '3'),
('INF', 'ERROR', '4'),
('INF', 'WARNING', '5'),
('INF', 'INFO', '6'),
('DEB', 'CRITICAL', '7'),
('DEB', 'ERROR', '8'),
('DEB', 'WARNING', '9'),
('DEB', 'INFO', '10'),
('DEB', 'DEBUG', '11'),
])
def test_nested_explicit(self):
# Logging levels in a nested namespace, all explicitly set.
m = self.next_message
INF = logging.getLogger("INF")
INF.setLevel(logging.INFO)
INF_ERR = logging.getLogger("INF.ERR")
INF_ERR.setLevel(logging.ERROR)
# These should log.
INF_ERR.log(logging.CRITICAL, m())
INF_ERR.error(m())
# These should not log.
INF_ERR.warn(m())
INF_ERR.info(m())
INF_ERR.debug(m())
self.assert_log_lines([
('INF.ERR', 'CRITICAL', '1'),
('INF.ERR', 'ERROR', '2'),
])
def test_nested_inherited(self):
#Logging levels in a nested namespace, inherited from parent loggers.
m = self.next_message
INF = logging.getLogger("INF")
INF.setLevel(logging.INFO)
INF_ERR = logging.getLogger("INF.ERR")
INF_ERR.setLevel(logging.ERROR)
INF_UNDEF = logging.getLogger("INF.UNDEF")
INF_ERR_UNDEF = logging.getLogger("INF.ERR.UNDEF")
UNDEF = logging.getLogger("UNDEF")
# These should log.
INF_UNDEF.log(logging.CRITICAL, m())
INF_UNDEF.error(m())
INF_UNDEF.warn(m())
INF_UNDEF.info(m())
INF_ERR_UNDEF.log(logging.CRITICAL, m())
INF_ERR_UNDEF.error(m())
# These should not log.
INF_UNDEF.debug(m())
INF_ERR_UNDEF.warn(m())
INF_ERR_UNDEF.info(m())
INF_ERR_UNDEF.debug(m())
self.assert_log_lines([
('INF.UNDEF', 'CRITICAL', '1'),
('INF.UNDEF', 'ERROR', '2'),
('INF.UNDEF', 'WARNING', '3'),
('INF.UNDEF', 'INFO', '4'),
('INF.ERR.UNDEF', 'CRITICAL', '5'),
('INF.ERR.UNDEF', 'ERROR', '6'),
])
def test_nested_with_virtual_parent(self):
# Logging levels when some parent does not exist yet.
m = self.next_message
INF = logging.getLogger("INF")
GRANDCHILD = logging.getLogger("INF.BADPARENT.UNDEF")
CHILD = logging.getLogger("INF.BADPARENT")
INF.setLevel(logging.INFO)
# These should log.
GRANDCHILD.log(logging.FATAL, m())
GRANDCHILD.info(m())
CHILD.log(logging.FATAL, m())
CHILD.info(m())
# These should not log.
GRANDCHILD.debug(m())
CHILD.debug(m())
self.assert_log_lines([
('INF.BADPARENT.UNDEF', 'CRITICAL', '1'),
('INF.BADPARENT.UNDEF', 'INFO', '2'),
('INF.BADPARENT', 'CRITICAL', '3'),
('INF.BADPARENT', 'INFO', '4'),
])
def test_invalid_name(self):
self.assertRaises(TypeError, logging.getLogger, any)
class BasicFilterTest(BaseTest):
"""Test the bundled Filter class."""
def test_filter(self):
# Only messages satisfying the specified criteria pass through the
# filter.
filter_ = logging.Filter("spam.eggs")
handler = self.root_logger.handlers[0]
try:
handler.addFilter(filter_)
spam = logging.getLogger("spam")
spam_eggs = logging.getLogger("spam.eggs")
spam_eggs_fish = logging.getLogger("spam.eggs.fish")
spam_bakedbeans = logging.getLogger("spam.bakedbeans")
spam.info(self.next_message())
spam_eggs.info(self.next_message()) # Good.
spam_eggs_fish.info(self.next_message()) # Good.
spam_bakedbeans.info(self.next_message())
self.assert_log_lines([
('spam.eggs', 'INFO', '2'),
('spam.eggs.fish', 'INFO', '3'),
])
finally:
handler.removeFilter(filter_)
#
# First, we define our levels. There can be as many as you want - the only
# limitations are that they should be integers, the lowest should be > 0 and
# larger values mean less information being logged. If you need specific
# level values which do not fit into these limitations, you can use a
# mapping dictionary to convert between your application levels and the
# logging system.
#
SILENT = 120
TACITURN = 119
TERSE = 118
EFFUSIVE = 117
SOCIABLE = 116
VERBOSE = 115
TALKATIVE = 114
GARRULOUS = 113
CHATTERBOX = 112
BORING = 111
LEVEL_RANGE = range(BORING, SILENT + 1)
#
# Next, we define names for our levels. You don't need to do this - in which
# case the system will use "Level n" to denote the text for the level.
#
my_logging_levels = {
SILENT : 'Silent',
TACITURN : 'Taciturn',
TERSE : 'Terse',
EFFUSIVE : 'Effusive',
SOCIABLE : 'Sociable',
VERBOSE : 'Verbose',
TALKATIVE : 'Talkative',
GARRULOUS : 'Garrulous',
CHATTERBOX : 'Chatterbox',
BORING : 'Boring',
}
class GarrulousFilter(logging.Filter):
"""A filter which blocks garrulous messages."""
def filter(self, record):
return record.levelno != GARRULOUS
class VerySpecificFilter(logging.Filter):
"""A filter which blocks sociable and taciturn messages."""
def filter(self, record):
return record.levelno not in [SOCIABLE, TACITURN]
class CustomLevelsAndFiltersTest(BaseTest):
"""Test various filtering possibilities with custom logging levels."""
# Skip the logger name group.
expected_log_pat = r"^[\w.]+ -> ([\w]+): ([\d]+)$"
def setUp(self):
BaseTest.setUp(self)
for k, v in my_logging_levels.items():
logging.addLevelName(k, v)
def log_at_all_levels(self, logger):
for lvl in LEVEL_RANGE:
logger.log(lvl, self.next_message())
def test_logger_filter(self):
# Filter at logger level.
self.root_logger.setLevel(VERBOSE)
# Levels >= 'Verbose' are good.
self.log_at_all_levels(self.root_logger)
self.assert_log_lines([
('Verbose', '5'),
('Sociable', '6'),
('Effusive', '7'),
('Terse', '8'),
('Taciturn', '9'),
('Silent', '10'),
])
def test_handler_filter(self):
# Filter at handler level.
self.root_logger.handlers[0].setLevel(SOCIABLE)
try:
# Levels >= 'Sociable' are good.
self.log_at_all_levels(self.root_logger)
self.assert_log_lines([
('Sociable', '6'),
('Effusive', '7'),
('Terse', '8'),
('Taciturn', '9'),
('Silent', '10'),
])
finally:
self.root_logger.handlers[0].setLevel(logging.NOTSET)
def test_specific_filters(self):
# Set a specific filter object on the handler, and then add another
# filter object on the logger itself.
handler = self.root_logger.handlers[0]
specific_filter = None
garr = GarrulousFilter()
handler.addFilter(garr)
try:
self.log_at_all_levels(self.root_logger)
first_lines = [
# Notice how 'Garrulous' is missing
('Boring', '1'),
('Chatterbox', '2'),
('Talkative', '4'),
('Verbose', '5'),
('Sociable', '6'),
('Effusive', '7'),
('Terse', '8'),
('Taciturn', '9'),
('Silent', '10'),
]
self.assert_log_lines(first_lines)
specific_filter = VerySpecificFilter()
self.root_logger.addFilter(specific_filter)
self.log_at_all_levels(self.root_logger)
self.assert_log_lines(first_lines + [
# Not only 'Garrulous' is still missing, but also 'Sociable'
# and 'Taciturn'
('Boring', '11'),
('Chatterbox', '12'),
('Talkative', '14'),
('Verbose', '15'),
('Effusive', '17'),
('Terse', '18'),
('Silent', '20'),
])
finally:
if specific_filter:
self.root_logger.removeFilter(specific_filter)
handler.removeFilter(garr)
class MemoryHandlerTest(BaseTest):
"""Tests for the MemoryHandler."""
# Do not bother with a logger name group.
expected_log_pat = r"^[\w.]+ -> ([\w]+): ([\d]+)$"
def setUp(self):
BaseTest.setUp(self)
self.mem_hdlr = logging.handlers.MemoryHandler(10, logging.WARNING,
self.root_hdlr)
self.mem_logger = logging.getLogger('mem')
self.mem_logger.propagate = 0
self.mem_logger.addHandler(self.mem_hdlr)
def tearDown(self):
self.mem_hdlr.close()
BaseTest.tearDown(self)
def test_flush(self):
# The memory handler flushes to its target handler based on specific
# criteria (message count and message level).
self.mem_logger.debug(self.next_message())
self.assert_log_lines([])
self.mem_logger.info(self.next_message())
self.assert_log_lines([])
# This will flush because the level is >= logging.WARNING
self.mem_logger.warn(self.next_message())
lines = [
('DEBUG', '1'),
('INFO', '2'),
('WARNING', '3'),
]
self.assert_log_lines(lines)
for n in (4, 14):
for i in range(9):
self.mem_logger.debug(self.next_message())
self.assert_log_lines(lines)
# This will flush because it's the 10th message since the last
# flush.
self.mem_logger.debug(self.next_message())
lines = lines + [('DEBUG', str(i)) for i in range(n, n + 10)]
self.assert_log_lines(lines)
self.mem_logger.debug(self.next_message())
self.assert_log_lines(lines)
class ExceptionFormatter(logging.Formatter):
"""A special exception formatter."""
def formatException(self, ei):
return "Got a [%s]" % ei[0].__name__
class ConfigFileTest(BaseTest):
"""Reading logging config from a .ini-style config file."""
expected_log_pat = r"^([\w]+) \+\+ ([\w]+)$"
# config0 is a standard configuration.
config0 = """
[loggers]
keys=root
[handlers]
keys=hand1
[formatters]
keys=form1
[logger_root]
level=WARNING
handlers=hand1
[handler_hand1]
class=StreamHandler
level=NOTSET
formatter=form1
args=(sys.stdout,)
[formatter_form1]
format=%(levelname)s ++ %(message)s
datefmt=
"""
# config1 adds a little to the standard configuration.
config1 = """
[loggers]
keys=root,parser
[handlers]
keys=hand1
[formatters]
keys=form1
[logger_root]
level=WARNING
handlers=
[logger_parser]
level=DEBUG
handlers=hand1
propagate=1
qualname=compiler.parser
[handler_hand1]
class=StreamHandler
level=NOTSET
formatter=form1
args=(sys.stdout,)
[formatter_form1]
format=%(levelname)s ++ %(message)s
datefmt=
"""
# config1a moves the handler to the root.
config1a = """
[loggers]
keys=root,parser
[handlers]
keys=hand1
[formatters]
keys=form1
[logger_root]
level=WARNING
handlers=hand1
[logger_parser]
level=DEBUG
handlers=
propagate=1
qualname=compiler.parser
[handler_hand1]
class=StreamHandler
level=NOTSET
formatter=form1
args=(sys.stdout,)
[formatter_form1]
format=%(levelname)s ++ %(message)s
datefmt=
"""
# config2 has a subtle configuration error that should be reported
config2 = config1.replace("sys.stdout", "sys.stbout")
# config3 has a less subtle configuration error
config3 = config1.replace("formatter=form1", "formatter=misspelled_name")
# config4 specifies a custom formatter class to be loaded
config4 = """
[loggers]
keys=root
[handlers]
keys=hand1
[formatters]
keys=form1
[logger_root]
level=NOTSET
handlers=hand1
[handler_hand1]
class=StreamHandler
level=NOTSET
formatter=form1
args=(sys.stdout,)
[formatter_form1]
class=""" + __name__ + """.ExceptionFormatter
format=%(levelname)s:%(name)s:%(message)s
datefmt=
"""
# config5 specifies a custom handler class to be loaded
config5 = config1.replace('class=StreamHandler', 'class=logging.StreamHandler')
# config6 uses ', ' delimiters in the handlers and formatters sections
config6 = """
[loggers]
keys=root,parser
[handlers]
keys=hand1, hand2
[formatters]
keys=form1, form2
[logger_root]
level=WARNING
handlers=
[logger_parser]
level=DEBUG
handlers=hand1
propagate=1
qualname=compiler.parser
[handler_hand1]
class=StreamHandler
level=NOTSET
formatter=form1
args=(sys.stdout,)
[handler_hand2]
class=StreamHandler
level=NOTSET
formatter=form1
args=(sys.stderr,)
[formatter_form1]
format=%(levelname)s ++ %(message)s
datefmt=
[formatter_form2]
format=%(message)s
datefmt=
"""
# config7 adds a compiler logger.
config7 = """
[loggers]
keys=root,parser,compiler
[handlers]
keys=hand1
[formatters]
keys=form1
[logger_root]
level=WARNING
handlers=hand1
[logger_compiler]
level=DEBUG
handlers=
propagate=1
qualname=compiler
[logger_parser]
level=DEBUG
handlers=
propagate=1
qualname=compiler.parser
[handler_hand1]
class=StreamHandler
level=NOTSET
formatter=form1
args=(sys.stdout,)
[formatter_form1]
format=%(levelname)s ++ %(message)s
datefmt=
"""
def apply_config(self, conf):
file = cStringIO.StringIO(textwrap.dedent(conf))
logging.config.fileConfig(file)
def test_config0_ok(self):
# A simple config file which overrides the default settings.
with captured_stdout() as output:
self.apply_config(self.config0)
logger = logging.getLogger()
# Won't output anything
logger.info(self.next_message())
# Outputs a message
logger.error(self.next_message())
self.assert_log_lines([
('ERROR', '2'),
], stream=output)
# Original logger output is empty.
self.assert_log_lines([])
def test_config1_ok(self, config=config1):
# A config file defining a sub-parser as well.
with captured_stdout() as output:
self.apply_config(config)
logger = logging.getLogger("compiler.parser")
# Both will output a message
logger.info(self.next_message())
logger.error(self.next_message())
self.assert_log_lines([
('INFO', '1'),
('ERROR', '2'),
], stream=output)
# Original logger output is empty.
self.assert_log_lines([])
def test_config2_failure(self):
# A simple config file which overrides the default settings.
self.assertRaises(StandardError, self.apply_config, self.config2)
def test_config3_failure(self):
# A simple config file which overrides the default settings.
self.assertRaises(StandardError, self.apply_config, self.config3)
def test_config4_ok(self):
# A config file specifying a custom formatter class.
with captured_stdout() as output:
self.apply_config(self.config4)
logger = logging.getLogger()
try:
raise RuntimeError()
except RuntimeError:
logging.exception("just testing")
sys.stdout.seek(0)
self.assertEqual(output.getvalue(),
"ERROR:root:just testing\nGot a [RuntimeError]\n")
# Original logger output is empty
self.assert_log_lines([])
def test_config5_ok(self):
self.test_config1_ok(config=self.config5)
def test_config6_ok(self):
self.test_config1_ok(config=self.config6)
def test_config7_ok(self):
with captured_stdout() as output:
self.apply_config(self.config1a)
logger = logging.getLogger("compiler.parser")
# See issue #11424. compiler-hyphenated sorts
# between compiler and compiler.xyz and this
# was preventing compiler.xyz from being included
# in the child loggers of compiler because of an
# overzealous loop termination condition.
hyphenated = logging.getLogger('compiler-hyphenated')
# All will output a message
logger.info(self.next_message())
logger.error(self.next_message())
hyphenated.critical(self.next_message())
self.assert_log_lines([
('INFO', '1'),
('ERROR', '2'),
('CRITICAL', '3'),
], stream=output)
# Original logger output is empty.
self.assert_log_lines([])
with captured_stdout() as output:
self.apply_config(self.config7)
logger = logging.getLogger("compiler.parser")
self.assertFalse(logger.disabled)
# Both will output a message
logger.info(self.next_message())
logger.error(self.next_message())
logger = logging.getLogger("compiler.lexer")
# Both will output a message
logger.info(self.next_message())
logger.error(self.next_message())
# Will not appear
hyphenated.critical(self.next_message())
self.assert_log_lines([
('INFO', '4'),
('ERROR', '5'),
('INFO', '6'),
('ERROR', '7'),
], stream=output)
# Original logger output is empty.
self.assert_log_lines([])
class LogRecordStreamHandler(StreamRequestHandler):
"""Handler for a streaming logging request. It saves the log message in the
TCP server's 'log_output' attribute."""
TCP_LOG_END = "!!!END!!!"
def handle(self):
"""Handle multiple requests - each expected to be of 4-byte length,
followed by the LogRecord in pickle format. Logs the record
according to whatever policy is configured locally."""
while True:
chunk = self.connection.recv(4)
if len(chunk) < 4:
break
slen = struct.unpack(">L", chunk)[0]
chunk = self.connection.recv(slen)
while len(chunk) < slen:
chunk = chunk + self.connection.recv(slen - len(chunk))
obj = self.unpickle(chunk)
record = logging.makeLogRecord(obj)
self.handle_log_record(record)
def unpickle(self, data):
return cPickle.loads(data)
def handle_log_record(self, record):
# If the end-of-messages sentinel is seen, tell the server to
# terminate.
if self.TCP_LOG_END in record.msg:
self.server.abort = 1
return
self.server.log_output += record.msg + "\n"
class LogRecordSocketReceiver(ThreadingTCPServer):
"""A simple-minded TCP socket-based logging receiver suitable for test
purposes."""
allow_reuse_address = 1
log_output = ""
def __init__(self, host='localhost',
port=logging.handlers.DEFAULT_TCP_LOGGING_PORT,
handler=LogRecordStreamHandler):
ThreadingTCPServer.__init__(self, (host, port), handler)
self.abort = False
self.timeout = 0.1
self.finished = threading.Event()
def serve_until_stopped(self):
while not self.abort:
rd, wr, ex = select.select([self.socket.fileno()], [], [],
self.timeout)
if rd:
self.handle_request()
# Notify the main thread that we're about to exit
self.finished.set()
# close the listen socket
self.server_close()
@unittest.skipUnless(threading, 'Threading required for this test.')
class SocketHandlerTest(BaseTest):
"""Test for SocketHandler objects."""
def setUp(self):
"""Set up a TCP server to receive log messages, and a SocketHandler
pointing to that server's address and port."""
BaseTest.setUp(self)
self.tcpserver = LogRecordSocketReceiver(port=0)
self.port = self.tcpserver.socket.getsockname()[1]
self.threads = [
threading.Thread(target=self.tcpserver.serve_until_stopped)]
for thread in self.threads:
thread.start()
self.sock_hdlr = logging.handlers.SocketHandler('localhost', self.port)
self.sock_hdlr.setFormatter(self.root_formatter)
self.root_logger.removeHandler(self.root_logger.handlers[0])
self.root_logger.addHandler(self.sock_hdlr)
def tearDown(self):
"""Shutdown the TCP server."""
try:
self.tcpserver.abort = True
del self.tcpserver
self.root_logger.removeHandler(self.sock_hdlr)
self.sock_hdlr.close()
for thread in self.threads:
thread.join(2.0)
finally:
BaseTest.tearDown(self)
def get_output(self):
"""Get the log output as received by the TCP server."""
# Signal the TCP receiver and wait for it to terminate.
self.root_logger.critical(LogRecordStreamHandler.TCP_LOG_END)
self.tcpserver.finished.wait(2.0)
return self.tcpserver.log_output
def test_output(self):
# The log message sent to the SocketHandler is properly received.
logger = logging.getLogger("tcp")
logger.error("spam")
logger.debug("eggs")
self.assertEqual(self.get_output(), "spam\neggs\n")
class MemoryTest(BaseTest):
"""Test memory persistence of logger objects."""
def setUp(self):
"""Create a dict to remember potentially destroyed objects."""
BaseTest.setUp(self)
self._survivors = {}
def _watch_for_survival(self, *args):
"""Watch the given objects for survival, by creating weakrefs to
them."""
for obj in args:
key = id(obj), repr(obj)
self._survivors[key] = weakref.ref(obj)
def _assertTruesurvival(self):
"""Assert that all objects watched for survival have survived."""
# Trigger cycle breaking.
gc.collect()
dead = []
for (id_, repr_), ref in self._survivors.items():
if ref() is None:
dead.append(repr_)
if dead:
self.fail("%d objects should have survived "
"but have been destroyed: %s" % (len(dead), ", ".join(dead)))
def test_persistent_loggers(self):
# Logger objects are persistent and retain their configuration, even
# if visible references are destroyed.
self.root_logger.setLevel(logging.INFO)
foo = logging.getLogger("foo")
self._watch_for_survival(foo)
foo.setLevel(logging.DEBUG)
self.root_logger.debug(self.next_message())
foo.debug(self.next_message())
self.assert_log_lines([
('foo', 'DEBUG', '2'),
])
del foo
# foo has survived.
self._assertTruesurvival()
# foo has retained its settings.
bar = logging.getLogger("foo")
bar.debug(self.next_message())
self.assert_log_lines([
('foo', 'DEBUG', '2'),
('foo', 'DEBUG', '3'),
])
class EncodingTest(BaseTest):
def test_encoding_plain_file(self):
# In Python 2.x, a plain file object is treated as having no encoding.
log = logging.getLogger("test")
fn = tempfile.mktemp(".log")
# the non-ascii data we write to the log.
data = "foo\x80"
try:
handler = logging.FileHandler(fn)
log.addHandler(handler)
try:
# write non-ascii data to the log.
log.warning(data)
finally:
log.removeHandler(handler)
handler.close()
# check we wrote exactly those bytes, ignoring trailing \n etc
f = open(fn)
try:
self.assertEqual(f.read().rstrip(), data)
finally:
f.close()
finally:
if os.path.isfile(fn):
os.remove(fn)
def test_encoding_cyrillic_unicode(self):
log = logging.getLogger("test")
#Get a message in Unicode: Do svidanya in Cyrillic (meaning goodbye)
message = u'\u0434\u043e \u0441\u0432\u0438\u0434\u0430\u043d\u0438\u044f'
#Ensure it's written in a Cyrillic encoding
writer_class = codecs.getwriter('cp1251')
writer_class.encoding = 'cp1251'
stream = cStringIO.StringIO()
writer = writer_class(stream, 'strict')
handler = logging.StreamHandler(writer)
log.addHandler(handler)
try:
log.warning(message)
finally:
log.removeHandler(handler)
handler.close()
# check we wrote exactly those bytes, ignoring trailing \n etc
s = stream.getvalue()
#Compare against what the data should be when encoded in CP-1251
self.assertEqual(s, '\xe4\xee \xf1\xe2\xe8\xe4\xe0\xed\xe8\xff\n')
class WarningsTest(BaseTest):
def test_warnings(self):
with warnings.catch_warnings():
logging.captureWarnings(True)
try:
warnings.filterwarnings("always", category=UserWarning)
file = cStringIO.StringIO()
h = logging.StreamHandler(file)
logger = logging.getLogger("py.warnings")
logger.addHandler(h)
warnings.warn("I'm warning you...")
logger.removeHandler(h)
s = file.getvalue()
h.close()
self.assertTrue(s.find("UserWarning: I'm warning you...\n") > 0)
#See if an explicit file uses the original implementation
file = cStringIO.StringIO()
warnings.showwarning("Explicit", UserWarning, "dummy.py", 42,
file, "Dummy line")
s = file.getvalue()
file.close()
self.assertEqual(s,
"dummy.py:42: UserWarning: Explicit\n Dummy line\n")
finally:
logging.captureWarnings(False)
def formatFunc(format, datefmt=None):
return logging.Formatter(format, datefmt)
def handlerFunc():
return logging.StreamHandler()
class CustomHandler(logging.StreamHandler):
pass
class ConfigDictTest(BaseTest):
"""Reading logging config from a dictionary."""
expected_log_pat = r"^([\w]+) \+\+ ([\w]+)$"
# config0 is a standard configuration.
config0 = {
'version': 1,
'formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handlers' : {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
},
},
'root' : {
'level' : 'WARNING',
'handlers' : ['hand1'],
},
}
# config1 adds a little to the standard configuration.
config1 = {
'version': 1,
'formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handlers' : {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
},
},
'loggers' : {
'compiler.parser' : {
'level' : 'DEBUG',
'handlers' : ['hand1'],
},
},
'root' : {
'level' : 'WARNING',
},
}
# config2 has a subtle configuration error that should be reported
config2 = {
'version': 1,
'formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handlers' : {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdbout',
},
},
'loggers' : {
'compiler.parser' : {
'level' : 'DEBUG',
'handlers' : ['hand1'],
},
},
'root' : {
'level' : 'WARNING',
},
}
#As config1 but with a misspelt level on a handler
config2a = {
'version': 1,
'formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handlers' : {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'NTOSET',
'stream' : 'ext://sys.stdout',
},
},
'loggers' : {
'compiler.parser' : {
'level' : 'DEBUG',
'handlers' : ['hand1'],
},
},
'root' : {
'level' : 'WARNING',
},
}
#As config1 but with a misspelt level on a logger
config2b = {
'version': 1,
'formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handlers' : {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
},
},
'loggers' : {
'compiler.parser' : {
'level' : 'DEBUG',
'handlers' : ['hand1'],
},
},
'root' : {
'level' : 'WRANING',
},
}
# config3 has a less subtle configuration error
config3 = {
'version': 1,
'formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handlers' : {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'misspelled_name',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
},
},
'loggers' : {
'compiler.parser' : {
'level' : 'DEBUG',
'handlers' : ['hand1'],
},
},
'root' : {
'level' : 'WARNING',
},
}
# config4 specifies a custom formatter class to be loaded
config4 = {
'version': 1,
'formatters': {
'form1' : {
'()' : __name__ + '.ExceptionFormatter',
'format' : '%(levelname)s:%(name)s:%(message)s',
},
},
'handlers' : {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
},
},
'root' : {
'level' : 'NOTSET',
'handlers' : ['hand1'],
},
}
# As config4 but using an actual callable rather than a string
config4a = {
'version': 1,
'formatters': {
'form1' : {
'()' : ExceptionFormatter,
'format' : '%(levelname)s:%(name)s:%(message)s',
},
'form2' : {
'()' : __name__ + '.formatFunc',
'format' : '%(levelname)s:%(name)s:%(message)s',
},
'form3' : {
'()' : formatFunc,
'format' : '%(levelname)s:%(name)s:%(message)s',
},
},
'handlers' : {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
},
'hand2' : {
'()' : handlerFunc,
},
},
'root' : {
'level' : 'NOTSET',
'handlers' : ['hand1'],
},
}
# config5 specifies a custom handler class to be loaded
config5 = {
'version': 1,
'formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handlers' : {
'hand1' : {
'class' : __name__ + '.CustomHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
},
},
'loggers' : {
'compiler.parser' : {
'level' : 'DEBUG',
'handlers' : ['hand1'],
},
},
'root' : {
'level' : 'WARNING',
},
}
# config6 specifies a custom handler class to be loaded
# but has bad arguments
config6 = {
'version': 1,
'formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handlers' : {
'hand1' : {
'class' : __name__ + '.CustomHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
'9' : 'invalid parameter name',
},
},
'loggers' : {
'compiler.parser' : {
'level' : 'DEBUG',
'handlers' : ['hand1'],
},
},
'root' : {
'level' : 'WARNING',
},
}
#config 7 does not define compiler.parser but defines compiler.lexer
#so compiler.parser should be disabled after applying it
config7 = {
'version': 1,
'formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handlers' : {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
},
},
'loggers' : {
'compiler.lexer' : {
'level' : 'DEBUG',
'handlers' : ['hand1'],
},
},
'root' : {
'level' : 'WARNING',
},
}
config8 = {
'version': 1,
'disable_existing_loggers' : False,
'formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handlers' : {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
},
},
'loggers' : {
'compiler' : {
'level' : 'DEBUG',
'handlers' : ['hand1'],
},
'compiler.lexer' : {
},
},
'root' : {
'level' : 'WARNING',
},
}
config9 = {
'version': 1,
'formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handlers' : {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'WARNING',
'stream' : 'ext://sys.stdout',
},
},
'loggers' : {
'compiler.parser' : {
'level' : 'WARNING',
'handlers' : ['hand1'],
},
},
'root' : {
'level' : 'NOTSET',
},
}
config9a = {
'version': 1,
'incremental' : True,
'handlers' : {
'hand1' : {
'level' : 'WARNING',
},
},
'loggers' : {
'compiler.parser' : {
'level' : 'INFO',
},
},
}
config9b = {
'version': 1,
'incremental' : True,
'handlers' : {
'hand1' : {
'level' : 'INFO',
},
},
'loggers' : {
'compiler.parser' : {
'level' : 'INFO',
},
},
}
#As config1 but with a filter added
config10 = {
'version': 1,
'formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'filters' : {
'filt1' : {
'name' : 'compiler.parser',
},
},
'handlers' : {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
'filters' : ['filt1'],
},
},
'loggers' : {
'compiler.parser' : {
'level' : 'DEBUG',
'filters' : ['filt1'],
},
},
'root' : {
'level' : 'WARNING',
'handlers' : ['hand1'],
},
}
#As config1 but using cfg:// references
config11 = {
'version': 1,
'true_formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handler_configs': {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
},
},
'formatters' : 'cfg://true_formatters',
'handlers' : {
'hand1' : 'cfg://handler_configs[hand1]',
},
'loggers' : {
'compiler.parser' : {
'level' : 'DEBUG',
'handlers' : ['hand1'],
},
},
'root' : {
'level' : 'WARNING',
},
}
#As config11 but missing the version key
config12 = {
'true_formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handler_configs': {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
},
},
'formatters' : 'cfg://true_formatters',
'handlers' : {
'hand1' : 'cfg://handler_configs[hand1]',
},
'loggers' : {
'compiler.parser' : {
'level' : 'DEBUG',
'handlers' : ['hand1'],
},
},
'root' : {
'level' : 'WARNING',
},
}
#As config11 but using an unsupported version
config13 = {
'version': 2,
'true_formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handler_configs': {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
},
},
'formatters' : 'cfg://true_formatters',
'handlers' : {
'hand1' : 'cfg://handler_configs[hand1]',
},
'loggers' : {
'compiler.parser' : {
'level' : 'DEBUG',
'handlers' : ['hand1'],
},
},
'root' : {
'level' : 'WARNING',
},
}
def apply_config(self, conf):
logging.config.dictConfig(conf)
def test_config0_ok(self):
# A simple config which overrides the default settings.
with captured_stdout() as output:
self.apply_config(self.config0)
logger = logging.getLogger()
# Won't output anything
logger.info(self.next_message())
# Outputs a message
logger.error(self.next_message())
self.assert_log_lines([
('ERROR', '2'),
], stream=output)
# Original logger output is empty.
self.assert_log_lines([])
def test_config1_ok(self, config=config1):
# A config defining a sub-parser as well.
with captured_stdout() as output:
self.apply_config(config)
logger = logging.getLogger("compiler.parser")
# Both will output a message
logger.info(self.next_message())
logger.error(self.next_message())
self.assert_log_lines([
('INFO', '1'),
('ERROR', '2'),
], stream=output)
# Original logger output is empty.
self.assert_log_lines([])
def test_config2_failure(self):
# A simple config which overrides the default settings.
self.assertRaises(StandardError, self.apply_config, self.config2)
def test_config2a_failure(self):
# A simple config which overrides the default settings.
self.assertRaises(StandardError, self.apply_config, self.config2a)
def test_config2b_failure(self):
# A simple config which overrides the default settings.
self.assertRaises(StandardError, self.apply_config, self.config2b)
def test_config3_failure(self):
# A simple config which overrides the default settings.
self.assertRaises(StandardError, self.apply_config, self.config3)
def test_config4_ok(self):
# A config specifying a custom formatter class.
with captured_stdout() as output:
self.apply_config(self.config4)
#logger = logging.getLogger()
try:
raise RuntimeError()
except RuntimeError:
logging.exception("just testing")
sys.stdout.seek(0)
self.assertEqual(output.getvalue(),
"ERROR:root:just testing\nGot a [RuntimeError]\n")
# Original logger output is empty
self.assert_log_lines([])
def test_config4a_ok(self):
# A config specifying a custom formatter class.
with captured_stdout() as output:
self.apply_config(self.config4a)
#logger = logging.getLogger()
try:
raise RuntimeError()
except RuntimeError:
logging.exception("just testing")
sys.stdout.seek(0)
self.assertEqual(output.getvalue(),
"ERROR:root:just testing\nGot a [RuntimeError]\n")
# Original logger output is empty
self.assert_log_lines([])
def test_config5_ok(self):
self.test_config1_ok(config=self.config5)
def test_config6_failure(self):
self.assertRaises(StandardError, self.apply_config, self.config6)
def test_config7_ok(self):
with captured_stdout() as output:
self.apply_config(self.config1)
logger = logging.getLogger("compiler.parser")
# Both will output a message
logger.info(self.next_message())
logger.error(self.next_message())
self.assert_log_lines([
('INFO', '1'),
('ERROR', '2'),
], stream=output)
# Original logger output is empty.
self.assert_log_lines([])
with captured_stdout() as output:
self.apply_config(self.config7)
logger = logging.getLogger("compiler.parser")
self.assertTrue(logger.disabled)
logger = logging.getLogger("compiler.lexer")
# Both will output a message
logger.info(self.next_message())
logger.error(self.next_message())
self.assert_log_lines([
('INFO', '3'),
('ERROR', '4'),
], stream=output)
# Original logger output is empty.
self.assert_log_lines([])
#Same as test_config_7_ok but don't disable old loggers.
def test_config_8_ok(self):
with captured_stdout() as output:
self.apply_config(self.config1)
logger = logging.getLogger("compiler.parser")
# Both will output a message
logger.info(self.next_message())
logger.error(self.next_message())
self.assert_log_lines([
('INFO', '1'),
('ERROR', '2'),
], stream=output)
# Original logger output is empty.
self.assert_log_lines([])
with captured_stdout() as output:
self.apply_config(self.config8)
logger = logging.getLogger("compiler.parser")
self.assertFalse(logger.disabled)
# Both will output a message
logger.info(self.next_message())
logger.error(self.next_message())
logger = logging.getLogger("compiler.lexer")
# Both will output a message
logger.info(self.next_message())
logger.error(self.next_message())
self.assert_log_lines([
('INFO', '3'),
('ERROR', '4'),
('INFO', '5'),
('ERROR', '6'),
], stream=output)
# Original logger output is empty.
self.assert_log_lines([])
def test_config_9_ok(self):
with captured_stdout() as output:
self.apply_config(self.config9)
logger = logging.getLogger("compiler.parser")
#Nothing will be output since both handler and logger are set to WARNING
logger.info(self.next_message())
self.assert_log_lines([], stream=output)
self.apply_config(self.config9a)
#Nothing will be output since both handler is still set to WARNING
logger.info(self.next_message())
self.assert_log_lines([], stream=output)
self.apply_config(self.config9b)
#Message should now be output
logger.info(self.next_message())
self.assert_log_lines([
('INFO', '3'),
], stream=output)
def test_config_10_ok(self):
with captured_stdout() as output:
self.apply_config(self.config10)
logger = logging.getLogger("compiler.parser")
logger.warning(self.next_message())
logger = logging.getLogger('compiler')
#Not output, because filtered
logger.warning(self.next_message())
logger = logging.getLogger('compiler.lexer')
#Not output, because filtered
logger.warning(self.next_message())
logger = logging.getLogger("compiler.parser.codegen")
#Output, as not filtered
logger.error(self.next_message())
self.assert_log_lines([
('WARNING', '1'),
('ERROR', '4'),
], stream=output)
def test_config11_ok(self):
self.test_config1_ok(self.config11)
def test_config12_failure(self):
self.assertRaises(StandardError, self.apply_config, self.config12)
def test_config13_failure(self):
self.assertRaises(StandardError, self.apply_config, self.config13)
@unittest.skipUnless(threading, 'listen() needs threading to work')
def setup_via_listener(self, text):
# Ask for a randomly assigned port (by using port 0)
t = logging.config.listen(0)
t.start()
t.ready.wait()
# Now get the port allocated
port = t.port
t.ready.clear()
try:
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.settimeout(2.0)
sock.connect(('localhost', port))
slen = struct.pack('>L', len(text))
s = slen + text
sentsofar = 0
left = len(s)
while left > 0:
sent = sock.send(s[sentsofar:])
sentsofar += sent
left -= sent
sock.close()
finally:
t.ready.wait(2.0)
logging.config.stopListening()
t.join(2.0)
def test_listen_config_10_ok(self):
with captured_stdout() as output:
self.setup_via_listener(json.dumps(self.config10))
logger = logging.getLogger("compiler.parser")
logger.warning(self.next_message())
logger = logging.getLogger('compiler')
#Not output, because filtered
logger.warning(self.next_message())
logger = logging.getLogger('compiler.lexer')
#Not output, because filtered
logger.warning(self.next_message())
logger = logging.getLogger("compiler.parser.codegen")
#Output, as not filtered
logger.error(self.next_message())
self.assert_log_lines([
('WARNING', '1'),
('ERROR', '4'),
], stream=output)
def test_listen_config_1_ok(self):
with captured_stdout() as output:
self.setup_via_listener(textwrap.dedent(ConfigFileTest.config1))
logger = logging.getLogger("compiler.parser")
# Both will output a message
logger.info(self.next_message())
logger.error(self.next_message())
self.assert_log_lines([
('INFO', '1'),
('ERROR', '2'),
], stream=output)
# Original logger output is empty.
self.assert_log_lines([])
class ManagerTest(BaseTest):
def test_manager_loggerclass(self):
logged = []
class MyLogger(logging.Logger):
def _log(self, level, msg, args, exc_info=None, extra=None):
logged.append(msg)
man = logging.Manager(None)
self.assertRaises(TypeError, man.setLoggerClass, int)
man.setLoggerClass(MyLogger)
logger = man.getLogger('test')
logger.warning('should appear in logged')
logging.warning('should not appear in logged')
self.assertEqual(logged, ['should appear in logged'])
class ChildLoggerTest(BaseTest):
def test_child_loggers(self):
r = logging.getLogger()
l1 = logging.getLogger('abc')
l2 = logging.getLogger('def.ghi')
c1 = r.getChild('xyz')
c2 = r.getChild('uvw.xyz')
self.assertTrue(c1 is logging.getLogger('xyz'))
self.assertTrue(c2 is logging.getLogger('uvw.xyz'))
c1 = l1.getChild('def')
c2 = c1.getChild('ghi')
c3 = l1.getChild('def.ghi')
self.assertTrue(c1 is logging.getLogger('abc.def'))
self.assertTrue(c2 is logging.getLogger('abc.def.ghi'))
self.assertTrue(c2 is c3)
class HandlerTest(BaseTest):
@unittest.skipIf(os.name == 'nt', 'WatchedFileHandler not appropriate for Windows.')
@unittest.skipUnless(threading, 'Threading required for this test.')
def test_race(self):
# Issue #14632 refers.
def remove_loop(fname, tries):
for _ in range(tries):
try:
os.unlink(fname)
except OSError:
pass
time.sleep(0.004 * random.randint(0, 4))
del_count = 500
log_count = 500
for delay in (False, True):
fd, fn = tempfile.mkstemp('.log', 'test_logging-3-')
os.close(fd)
remover = threading.Thread(target=remove_loop, args=(fn, del_count))
remover.daemon = True
remover.start()
h = logging.handlers.WatchedFileHandler(fn, delay=delay)
f = logging.Formatter('%(asctime)s: %(levelname)s: %(message)s')
h.setFormatter(f)
try:
for _ in range(log_count):
time.sleep(0.005)
r = logging.makeLogRecord({'msg': 'testing' })
h.handle(r)
finally:
remover.join()
try:
h.close()
except ValueError:
pass
if os.path.exists(fn):
os.unlink(fn)
# Set the locale to the platform-dependent default. I have no idea
# why the test does this, but in any case we save the current locale
# first and restore it at the end.
@run_with_locale('LC_ALL', '')
def test_main():
run_unittest(BuiltinLevelsTest, BasicFilterTest,
CustomLevelsAndFiltersTest, MemoryHandlerTest,
ConfigFileTest, SocketHandlerTest, MemoryTest,
EncodingTest, WarningsTest, ConfigDictTest, ManagerTest,
ChildLoggerTest, HandlerTest)
if __name__ == "__main__":
test_main()
| apache-2.0 |
wangyou/XX-Net | code/default/python27/1.0/lib/win32/systray/win32_adapter.py | 6 | 6689 | import ctypes
RegisterWindowMessage = ctypes.windll.user32.RegisterWindowMessageA
LoadCursor = ctypes.windll.user32.LoadCursorA
LoadIcon = ctypes.windll.user32.LoadIconA
LoadImage = ctypes.windll.user32.LoadImageA
RegisterClass = ctypes.windll.user32.RegisterClassA
CreateWindowEx = ctypes.windll.user32.CreateWindowExA
UpdateWindow = ctypes.windll.user32.UpdateWindow
DefWindowProc = ctypes.windll.user32.DefWindowProcA
GetSystemMetrics = ctypes.windll.user32.GetSystemMetrics
InsertMenuItem = ctypes.windll.user32.InsertMenuItemA
PostMessage = ctypes.windll.user32.PostMessageA
PostQuitMessage = ctypes.windll.user32.PostQuitMessage
SetMenuDefaultItem = ctypes.windll.user32.SetMenuDefaultItem
GetCursorPos = ctypes.windll.user32.GetCursorPos
SetForegroundWindow = ctypes.windll.user32.SetForegroundWindow
TrackPopupMenu = ctypes.windll.user32.TrackPopupMenu
CreatePopupMenu = ctypes.windll.user32.CreatePopupMenu
CreateCompatibleDC = ctypes.windll.gdi32.CreateCompatibleDC
GetDC = ctypes.windll.user32.GetDC
CreateCompatibleBitmap = ctypes.windll.gdi32.CreateCompatibleBitmap
GetSysColorBrush = ctypes.windll.user32.GetSysColorBrush
FillRect = ctypes.windll.user32.FillRect
DrawIconEx = ctypes.windll.user32.DrawIconEx
SelectObject = ctypes.windll.gdi32.SelectObject
DeleteDC = ctypes.windll.gdi32.DeleteDC
DestroyWindow = ctypes.windll.user32.DestroyWindow
GetModuleHandle = ctypes.windll.kernel32.GetModuleHandleA
GetMessage = ctypes.windll.user32.GetMessageA
TranslateMessage = ctypes.windll.user32.TranslateMessage
DispatchMessage = ctypes.windll.user32.DispatchMessageA
Shell_NotifyIcon = ctypes.windll.shell32.Shell_NotifyIcon
DestroyIcon = ctypes.windll.user32.DestroyIcon
NIM_ADD = 0
NIM_MODIFY = 1
NIM_DELETE = 2
NIF_ICON = 2
NIF_MESSAGE = 1
NIF_TIP = 4
MIIM_ID = 2
MIIM_SUBMENU = 4
MIIM_STRING = 64
MIIM_BITMAP = 128
WM_DESTROY = 2
WM_CLOSE = 16
WM_COMMAND = 273
WM_USER = 1024
WM_LBUTTONDBLCLK = 515
WM_RBUTTONUP = 517
WM_LBUTTONUP = 514
WM_NULL = 0
CS_VREDRAW = 1
CS_HREDRAW = 2
IDC_ARROW = 32512
COLOR_WINDOW = 5
WS_OVERLAPPED = 0
WS_SYSMENU = 524288
CW_USEDEFAULT = -2147483648
LR_LOADFROMFILE = 16
LR_DEFAULTSIZE = 64
IMAGE_ICON = 1
IDI_APPLICATION = 32512
TPM_LEFTALIGN = 0
SM_CXSMICON = 49
SM_CYSMICON = 50
COLOR_MENU = 4
DI_NORMAL = 3
class fState:
MFS_DEFAULT = 0x1000
MFS_ENABLED = 0
MFS_DISABLED = 0x3
MFS_CHECKED = 0x8
MFS_HILITE = 0x80
# WPARAM is defined as UINT_PTR (unsigned type)
# LPARAM is defined as LONG_PTR (signed type)
if ctypes.sizeof(ctypes.c_long) == ctypes.sizeof(ctypes.c_void_p):
WPARAM = ctypes.c_ulong
LPARAM = ctypes.c_long
LRESULT = ctypes.c_long
elif ctypes.sizeof(ctypes.c_longlong) == ctypes.sizeof(ctypes.c_void_p):
WPARAM = ctypes.c_ulonglong
LPARAM = ctypes.c_longlong
LRESULT = ctypes.c_longlong
HANDLE = ctypes.c_void_p
def convert_to_ascii(s):
try:
return bytes(s, "ascii")
except:
return s
LPFN_WNDPROC = ctypes.CFUNCTYPE(LRESULT, HANDLE, ctypes.c_uint, WPARAM, LPARAM)
class WNDCLASS(ctypes.Structure):
_fields_ = [("style", ctypes.c_uint),
("lpfnWndProc", LPFN_WNDPROC),
("cbClsExtra", ctypes.c_int),
("cbWndExtra", ctypes.c_int),
("hInstance", HANDLE),
("hIcon", HANDLE),
("hCursor", HANDLE),
("hbrBackground", HANDLE),
("lpszMenuName", ctypes.c_char_p),
("lpszClassName", ctypes.c_char_p),
]
class POINT(ctypes.Structure):
_fields_ = [('x', ctypes.c_long), ('y', ctypes.c_long)]
class RECT(ctypes.Structure):
_fields_ = [('left', ctypes.c_long), ('top', ctypes.c_long),
('right', ctypes.c_long), ('bottom', ctypes.c_long)]
class MENUITEMINFO(ctypes.Structure):
_fields_ = [("cbSize", ctypes.c_uint),
("fMask", ctypes.c_uint),
("fType", ctypes.c_uint),
("fState", ctypes.c_uint),
("wID", ctypes.c_uint),
("hSubMenu", HANDLE),
("hbmpChecked", HANDLE),
("hbmpUnchecked", HANDLE),
("dwItemData", ctypes.c_void_p),
("dwTypeData", ctypes.c_char_p),
("cch", ctypes.c_uint),
("hbmpItem", HANDLE),
]
class MSG(ctypes.Structure):
_fields_ = [("hwnd", HANDLE),
("message", ctypes.c_uint),
("wParam", WPARAM),
("lParam", LPARAM),
("time", ctypes.c_ulong),
("pt", POINT),
]
class NOTIFYICONDATA(ctypes.Structure):
_fields_ = [("cbSize", ctypes.c_uint),
("hWnd", HANDLE),
("uID", ctypes.c_uint),
("uFlags", ctypes.c_uint),
("uCallbackMessage", ctypes.c_uint),
("hIcon", HANDLE),
("szTip", ctypes.c_char * 64),
("dwState", ctypes.c_uint),
("dwStateMask", ctypes.c_uint),
("szInfo", ctypes.c_char * 256),
("uTimeout", ctypes.c_uint),
("szInfoTitle", ctypes.c_char * 64),
("dwInfoFlags", ctypes.c_uint),
("guidItem", ctypes.c_char * 16),
("hBalloonIcon", HANDLE),
]
def PackMENUITEMINFO(text=None, hbmpItem=None, wID=None, hSubMenu=None, fState=fState.MFS_ENABLED):
res = MENUITEMINFO()
res.cbSize = ctypes.sizeof(res)
res.fMask = 1
res.fState = fState
if hbmpItem is not None:
res.fMask |= MIIM_BITMAP
res.hbmpItem = hbmpItem
if wID is not None:
res.fMask |= MIIM_ID
res.wID = wID
if text is not None:
text = convert_to_ascii(text)
res.fMask |= MIIM_STRING
res.dwTypeData = text
if hSubMenu is not None:
res.fMask |= MIIM_SUBMENU
res.hSubMenu = hSubMenu
return res
def LOWORD(w):
return w & 0xFFFF
def PumpMessages():
msg = MSG()
while GetMessage(ctypes.byref(msg), None, 0, 0) > 0:
TranslateMessage(ctypes.byref(msg))
DispatchMessage(ctypes.byref(msg))
def NotifyData(hWnd=0, uID=0, uFlags=0, uCallbackMessage=0, hIcon=0, szTip=""):
szTip = convert_to_ascii(szTip)
res = NOTIFYICONDATA()
res.cbSize = ctypes.sizeof(res)
res.hWnd = hWnd
res.uID = uID
res.uFlags = uFlags
res.uCallbackMessage = uCallbackMessage
res.hIcon = hIcon
res.szTip = szTip
return res
| bsd-2-clause |
qma/pants | tests/python/pants_test/build_graph/test_build_file_parser.py | 5 | 12566 | # coding=utf-8
# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
import os
from collections import namedtuple
from textwrap import dedent
from pants.base.build_file import FilesystemBuildFile
from pants.build_graph.address import BuildFileAddress
from pants.build_graph.build_file_aliases import BuildFileAliases
from pants.build_graph.build_file_parser import BuildFileParser
from pants.build_graph.target import Target
from pants_test.base_test import BaseTest
# TODO(Eric Ayers) Explicit unit tests are missing for registered_alises, parse_spec,
# parse_build_file_family
class ErrorTarget(Target):
def __init__(self, *args, **kwargs):
assert False, "This fake target should never be initialized in this test!"
class BuildFileParserBasicsTest(BaseTest):
def test_addressable_exceptions(self):
self.add_to_build_file('a/BUILD', 'target()')
build_file_a = FilesystemBuildFile(self.build_root, 'a/BUILD')
with self.assertRaises(BuildFileParser.ExecuteError):
self.build_file_parser.parse_build_file(build_file_a)
self.add_to_build_file('b/BUILD', 'target(name="foo", "bad_arg")')
build_file_b = FilesystemBuildFile(self.build_root, 'b/BUILD')
with self.assertRaises(BuildFileParser.BuildFileParserError):
self.build_file_parser.parse_build_file(build_file_b)
self.add_to_build_file('d/BUILD', dedent(
"""
target(
name="foo",
dependencies=[
object(),
]
)
"""
))
build_file_d = FilesystemBuildFile(self.build_root, 'd/BUILD')
with self.assertRaises(BuildFileParser.BuildFileParserError):
self.build_file_parser.parse_build_file(build_file_d)
def test_noop_parse(self):
self.add_to_build_file('BUILD', '')
build_file = FilesystemBuildFile(self.build_root, '')
address_map = set(self.build_file_parser.parse_build_file(build_file))
self.assertEqual(len(address_map), 0)
class BuildFileParserTargetTest(BaseTest):
@property
def alias_groups(self):
return BuildFileAliases(targets={'fake': ErrorTarget})
def test_trivial_target(self):
self.add_to_build_file('BUILD', 'fake(name="foozle")')
build_file = FilesystemBuildFile(self.build_root, 'BUILD')
address_map = self.build_file_parser.parse_build_file(build_file)
self.assertEqual(len(address_map), 1)
address, proxy = address_map.popitem()
self.assertEqual(address, BuildFileAddress(build_file, 'foozle'))
self.assertEqual(proxy.addressed_name, 'foozle')
self.assertEqual(proxy.addressed_type, ErrorTarget)
def test_sibling_build_files(self):
self.add_to_build_file('BUILD', dedent(
"""
fake(name="base",
dependencies=[
':foo',
])
"""))
self.add_to_build_file('BUILD.foo', dedent(
"""
fake(name="foo",
dependencies=[
':bat',
])
"""))
self.add_to_build_file('./BUILD.bar', dedent(
"""
fake(name="bat")
"""))
bar_build_file = FilesystemBuildFile(self.build_root, 'BUILD.bar')
base_build_file = FilesystemBuildFile(self.build_root, 'BUILD')
foo_build_file = FilesystemBuildFile(self.build_root, 'BUILD.foo')
address_map = self.build_file_parser.address_map_from_build_file(bar_build_file)
addresses = address_map.keys()
self.assertEqual({bar_build_file, base_build_file, foo_build_file},
set([address.build_file for address in addresses]))
self.assertEqual({':base', ':foo', ':bat'},
set([address.spec for address in addresses]))
def test_build_file_duplicates(self):
# This workspace has two targets in the same file with the same name.
self.add_to_build_file('BUILD', 'fake(name="foo")\n')
self.add_to_build_file('BUILD', 'fake(name="foo")\n')
with self.assertRaises(BuildFileParser.AddressableConflictException):
base_build_file = FilesystemBuildFile(self.build_root, 'BUILD')
self.build_file_parser.parse_build_file(base_build_file)
def test_sibling_build_files_duplicates(self):
# This workspace is malformed, you can't shadow a name in a sibling BUILD file
self.add_to_build_file('BUILD', dedent(
"""
fake(name="base",
dependencies=[
':foo',
])
"""))
self.add_to_build_file('BUILD.foo', dedent(
"""
fake(name="foo",
dependencies=[
':bat',
])
"""))
self.add_to_build_file('./BUILD.bar', dedent(
"""
fake(name="base")
"""))
with self.assertRaises(BuildFileParser.SiblingConflictException):
base_build_file = FilesystemBuildFile(self.build_root, 'BUILD')
self.build_file_parser.address_map_from_build_file(base_build_file)
class BuildFileParserExposedObjectTest(BaseTest):
@property
def alias_groups(self):
return BuildFileAliases(objects={'fake_object': object()})
def test_exposed_object(self):
self.add_to_build_file('BUILD', """fake_object""")
build_file = FilesystemBuildFile(self.build_root, 'BUILD')
address_map = self.build_file_parser.parse_build_file(build_file)
self.assertEqual(len(address_map), 0)
class BuildFileParserExposedContextAwareObjectFactoryTest(BaseTest):
Jar = namedtuple('Jar', ['org', 'name', 'rev'])
Repository = namedtuple('Repository', ['name', 'url', 'push_db_basedir'])
Artifact = namedtuple('Artifact', ['org', 'name', 'repo'])
class JarLibrary(Target):
def __init__(self, jars=None, **kwargs):
super(BuildFileParserExposedContextAwareObjectFactoryTest.JarLibrary, self).__init__(**kwargs)
self.jars = jars or []
class JvmLibrary(Target):
def __init__(self, provides=None, **kwargs):
super(BuildFileParserExposedContextAwareObjectFactoryTest.JvmLibrary, self).__init__(**kwargs)
self.provides = provides
class JavaLibrary(JvmLibrary):
pass
class ScalaLibrary(JvmLibrary):
pass
@classmethod
def make_lib(cls, parse_context):
def real_make_lib(org, name, rev):
dep = parse_context.create_object('jar', org=org, name=name, rev=rev)
parse_context.create_object('jar_library', name=name, jars=[dep])
return real_make_lib
@classmethod
def create_java_libraries(cls, parse_context):
def real_create_java_libraries(base_name,
org='com.twitter',
provides_java_name=None,
provides_scala_name=None):
def provides_artifact(provides_name):
if provides_name is None:
return None
jvm_repo = cls.Repository(
name='maven-central',
url='http://maven.example.com',
push_db_basedir=os.path.join('build-support', 'ivy', 'pushdb'),
)
return parse_context.create_object('artifact',
org=org,
name=provides_name,
repo=jvm_repo)
parse_context.create_object('java_library',
name='{}-java'.format(base_name),
provides=provides_artifact(provides_java_name))
parse_context.create_object('scala_library',
name='{}-scala'.format(base_name),
provides=provides_artifact(provides_scala_name))
return real_create_java_libraries
def setUp(self):
super(BuildFileParserExposedContextAwareObjectFactoryTest, self).setUp()
self._paths = set()
def path_relative_util(self, parse_context):
def real_path_relative_util(path):
self._paths.add(os.path.join(parse_context.rel_path, path))
return real_path_relative_util
@property
def alias_groups(self):
return BuildFileAliases(
targets={
'jar_library': self.JarLibrary,
'java_library': self.JavaLibrary,
'scala_library': self.ScalaLibrary,
},
context_aware_object_factories={
'make_lib': self.make_lib,
'create_java_libraries': self.create_java_libraries,
'path_util': self.path_relative_util,
},
objects={
'artifact': self.Artifact,
'jar': self.Jar,
}
)
def test_context_aware_object_factories(self):
contents = dedent("""
create_java_libraries(base_name="create-java-libraries",
provides_java_name="test-java",
provides_scala_name="test-scala")
make_lib("com.foo.test", "does_not_exists", "1.0")
path_util("baz")
""")
self.create_file('3rdparty/BUILD', contents)
build_file = FilesystemBuildFile(self.build_root, '3rdparty/BUILD')
address_map = self.build_file_parser.parse_build_file(build_file)
registered_proxies = set(address_map.values())
self.assertEqual(len(registered_proxies), 3)
targets_created = {}
for target_proxy in registered_proxies:
targets_created[target_proxy.addressed_name] = target_proxy.addressed_type
self.assertEqual({'does_not_exists',
'create-java-libraries-scala',
'create-java-libraries-java'},
set(targets_created.keys()))
self.assertEqual(targets_created['does_not_exists'], self.JarLibrary)
self.assertEqual(targets_created['create-java-libraries-java'], self.JavaLibrary)
self.assertEqual(targets_created['create-java-libraries-scala'], self.ScalaLibrary)
self.assertEqual({'3rdparty/baz'}, self._paths)
def test_raises_parse_error(self):
self.add_to_build_file('BUILD', 'foo(name = = "baz")')
build_file = FilesystemBuildFile(self.build_root, 'BUILD')
with self.assertRaises(BuildFileParser.ParseError):
self.build_file_parser.parse_build_file(build_file)
# Test some corner cases for the context printing
# Error at beginning of BUILD file
build_file = self.add_to_build_file('begin/BUILD', dedent("""
*?&INVALID! = 'foo'
target(
name='bar',
dependencies= [
':baz',
],
)
"""))
with self.assertRaises(BuildFileParser.ParseError):
self.build_file_parser.parse_build_file(build_file)
# Error at end of BUILD file
build_file = self.add_to_build_file('end/BUILD', dedent("""
target(
name='bar',
dependencies= [
':baz',
],
)
*?&INVALID! = 'foo'
"""))
with self.assertRaises(BuildFileParser.ParseError):
self.build_file_parser.parse_build_file(build_file)
# Error in the middle of BUILD file > 6 lines
build_file = self.add_to_build_file('middle/BUILD', dedent("""
target(
name='bar',
*?&INVALID! = 'foo'
dependencies = [
':baz',
],
)
"""))
with self.assertRaises(BuildFileParser.ParseError):
self.build_file_parser.parse_build_file(build_file)
# Error in very short build file.
build_file = self.add_to_build_file('short/BUILD', dedent("""
target(name='bar', dependencies = [':baz'],) *?&INVALID! = 'foo'
"""))
with self.assertRaises(BuildFileParser.ParseError):
self.build_file_parser.parse_build_file(build_file)
def test_raises_execute_error(self):
self.add_to_build_file('BUILD', 'undefined_alias(name="baz")')
build_file = FilesystemBuildFile(self.build_root, 'BUILD')
with self.assertRaises(BuildFileParser.ExecuteError):
self.build_file_parser.parse_build_file(build_file)
def test_build_file_parser_error_hierarcy(self):
"""Exception handling code depends on the fact that all explicit exceptions from BuildFileParser
are subclassed from the BuildFileParserError base class.
"""
def assert_build_file_parser_error(e):
self.assertIsInstance(e, BuildFileParser.BuildFileParserError)
assert_build_file_parser_error(BuildFileParser.BuildFileScanError())
assert_build_file_parser_error(BuildFileParser.AddressableConflictException())
assert_build_file_parser_error(BuildFileParser.SiblingConflictException())
assert_build_file_parser_error(BuildFileParser.ParseError())
assert_build_file_parser_error(BuildFileParser.ExecuteError())
| apache-2.0 |
olituks/sentinella | frontend/library/web2py/applications/welcome/models/menu.py | 30 | 6204 | # -*- coding: utf-8 -*-
# this file is released under public domain and you can use without limitations
#########################################################################
## Customize your APP title, subtitle and menus here
#########################################################################
response.logo = A(B('web',SPAN(2),'py'),XML('™ '),
_class="brand",_href="http://www.web2py.com/")
response.title = request.application.replace('_',' ').title()
response.subtitle = ''
## read more at http://dev.w3.org/html5/markup/meta.name.html
response.meta.author = 'Your Name <you@example.com>'
response.meta.keywords = 'web2py, python, framework'
response.meta.generator = 'Web2py Web Framework'
## your http://google.com/analytics id
response.google_analytics_id = None
#########################################################################
## this is the main application menu add/remove items as required
#########################################################################
response.menu = [
(T('Home'), False, URL('default', 'index'), [])
]
DEVELOPMENT_MENU = True
#########################################################################
## provide shortcuts for development. remove in production
#########################################################################
def _():
# shortcuts
app = request.application
ctr = request.controller
# useful links to internal and external resources
response.menu += [
(SPAN('web2py', _class='highlighted'), False, 'http://web2py.com', [
(T('My Sites'), False, URL('admin', 'default', 'site')),
(T('This App'), False, URL('admin', 'default', 'design/%s' % app), [
(T('Controller'), False,
URL(
'admin', 'default', 'edit/%s/controllers/%s.py' % (app, ctr))),
(T('View'), False,
URL(
'admin', 'default', 'edit/%s/views/%s' % (app, response.view))),
(T('Layout'), False,
URL(
'admin', 'default', 'edit/%s/views/layout.html' % app)),
(T('Stylesheet'), False,
URL(
'admin', 'default', 'edit/%s/static/css/web2py.css' % app)),
(T('DB Model'), False,
URL(
'admin', 'default', 'edit/%s/models/db.py' % app)),
(T('Menu Model'), False,
URL(
'admin', 'default', 'edit/%s/models/menu.py' % app)),
(T('Database'), False, URL(app, 'appadmin', 'index')),
(T('Errors'), False, URL(
'admin', 'default', 'errors/' + app)),
(T('About'), False, URL(
'admin', 'default', 'about/' + app)),
]),
('web2py.com', False, 'http://www.web2py.com', [
(T('Download'), False,
'http://www.web2py.com/examples/default/download'),
(T('Support'), False,
'http://www.web2py.com/examples/default/support'),
(T('Demo'), False, 'http://web2py.com/demo_admin'),
(T('Quick Examples'), False,
'http://web2py.com/examples/default/examples'),
(T('FAQ'), False, 'http://web2py.com/AlterEgo'),
(T('Videos'), False,
'http://www.web2py.com/examples/default/videos/'),
(T('Free Applications'),
False, 'http://web2py.com/appliances'),
(T('Plugins'), False, 'http://web2py.com/plugins'),
(T('Layouts'), False, 'http://web2py.com/layouts'),
(T('Recipes'), False, 'http://web2pyslices.com/'),
(T('Semantic'), False, 'http://web2py.com/semantic'),
]),
(T('Documentation'), False, 'http://www.web2py.com/book', [
(T('Preface'), False,
'http://www.web2py.com/book/default/chapter/00'),
(T('Introduction'), False,
'http://www.web2py.com/book/default/chapter/01'),
(T('Python'), False,
'http://www.web2py.com/book/default/chapter/02'),
(T('Overview'), False,
'http://www.web2py.com/book/default/chapter/03'),
(T('The Core'), False,
'http://www.web2py.com/book/default/chapter/04'),
(T('The Views'), False,
'http://www.web2py.com/book/default/chapter/05'),
(T('Database'), False,
'http://www.web2py.com/book/default/chapter/06'),
(T('Forms and Validators'), False,
'http://www.web2py.com/book/default/chapter/07'),
(T('Email and SMS'), False,
'http://www.web2py.com/book/default/chapter/08'),
(T('Access Control'), False,
'http://www.web2py.com/book/default/chapter/09'),
(T('Services'), False,
'http://www.web2py.com/book/default/chapter/10'),
(T('Ajax Recipes'), False,
'http://www.web2py.com/book/default/chapter/11'),
(T('Components and Plugins'), False,
'http://www.web2py.com/book/default/chapter/12'),
(T('Deployment Recipes'), False,
'http://www.web2py.com/book/default/chapter/13'),
(T('Other Recipes'), False,
'http://www.web2py.com/book/default/chapter/14'),
(T('Buy this book'), False,
'http://stores.lulu.com/web2py'),
]),
(T('Community'), False, None, [
(T('Groups'), False,
'http://www.web2py.com/examples/default/usergroups'),
(T('Twitter'), False, 'http://twitter.com/web2py'),
(T('Live Chat'), False,
'http://webchat.freenode.net/?channels=web2py'),
]),
(T('Plugins'), False, None, [
('plugin_wiki', False,
'http://web2py.com/examples/default/download'),
(T('Other Plugins'), False,
'http://web2py.com/plugins'),
(T('Layout Plugins'),
False, 'http://web2py.com/layouts'),
])
]
)]
if DEVELOPMENT_MENU: _()
if "auth" in locals(): auth.wikimenu()
| lgpl-2.1 |
40123210/w17b_exam | static/Brython3.1.1-20150328-091302/Lib/multiprocessing/pool.py | 694 | 23263 | #
# Module providing the `Pool` class for managing a process pool
#
# multiprocessing/pool.py
#
# Copyright (c) 2006-2008, R Oudkerk
# Licensed to PSF under a Contributor Agreement.
#
__all__ = ['Pool']
#
# Imports
#
import threading
import queue
import itertools
import collections
import time
from multiprocessing import Process, cpu_count, TimeoutError
from multiprocessing.util import Finalize, debug
#
# Constants representing the state of a pool
#
RUN = 0
CLOSE = 1
TERMINATE = 2
#
# Miscellaneous
#
job_counter = itertools.count()
def mapstar(args):
return list(map(*args))
def starmapstar(args):
return list(itertools.starmap(args[0], args[1]))
#
# Code run by worker processes
#
class MaybeEncodingError(Exception):
"""Wraps possible unpickleable errors, so they can be
safely sent through the socket."""
def __init__(self, exc, value):
self.exc = repr(exc)
self.value = repr(value)
super(MaybeEncodingError, self).__init__(self.exc, self.value)
def __str__(self):
return "Error sending result: '%s'. Reason: '%s'" % (self.value,
self.exc)
def __repr__(self):
return "<MaybeEncodingError: %s>" % str(self)
def worker(inqueue, outqueue, initializer=None, initargs=(), maxtasks=None):
assert maxtasks is None or (type(maxtasks) == int and maxtasks > 0)
put = outqueue.put
get = inqueue.get
if hasattr(inqueue, '_writer'):
inqueue._writer.close()
outqueue._reader.close()
if initializer is not None:
initializer(*initargs)
completed = 0
while maxtasks is None or (maxtasks and completed < maxtasks):
try:
task = get()
except (EOFError, IOError):
debug('worker got EOFError or IOError -- exiting')
break
if task is None:
debug('worker got sentinel -- exiting')
break
job, i, func, args, kwds = task
try:
result = (True, func(*args, **kwds))
except Exception as e:
result = (False, e)
try:
put((job, i, result))
except Exception as e:
wrapped = MaybeEncodingError(e, result[1])
debug("Possible encoding error while sending result: %s" % (
wrapped))
put((job, i, (False, wrapped)))
completed += 1
debug('worker exiting after %d tasks' % completed)
#
# Class representing a process pool
#
class Pool(object):
'''
Class which supports an async version of applying functions to arguments.
'''
Process = Process
def __init__(self, processes=None, initializer=None, initargs=(),
maxtasksperchild=None):
self._setup_queues()
self._taskqueue = queue.Queue()
self._cache = {}
self._state = RUN
self._maxtasksperchild = maxtasksperchild
self._initializer = initializer
self._initargs = initargs
if processes is None:
try:
processes = cpu_count()
except NotImplementedError:
processes = 1
if processes < 1:
raise ValueError("Number of processes must be at least 1")
if initializer is not None and not callable(initializer):
raise TypeError('initializer must be a callable')
self._processes = processes
self._pool = []
self._repopulate_pool()
self._worker_handler = threading.Thread(
target=Pool._handle_workers,
args=(self, )
)
self._worker_handler.daemon = True
self._worker_handler._state = RUN
self._worker_handler.start()
self._task_handler = threading.Thread(
target=Pool._handle_tasks,
args=(self._taskqueue, self._quick_put, self._outqueue, self._pool)
)
self._task_handler.daemon = True
self._task_handler._state = RUN
self._task_handler.start()
self._result_handler = threading.Thread(
target=Pool._handle_results,
args=(self._outqueue, self._quick_get, self._cache)
)
self._result_handler.daemon = True
self._result_handler._state = RUN
self._result_handler.start()
self._terminate = Finalize(
self, self._terminate_pool,
args=(self._taskqueue, self._inqueue, self._outqueue, self._pool,
self._worker_handler, self._task_handler,
self._result_handler, self._cache),
exitpriority=15
)
def _join_exited_workers(self):
"""Cleanup after any worker processes which have exited due to reaching
their specified lifetime. Returns True if any workers were cleaned up.
"""
cleaned = False
for i in reversed(range(len(self._pool))):
worker = self._pool[i]
if worker.exitcode is not None:
# worker exited
debug('cleaning up worker %d' % i)
worker.join()
cleaned = True
del self._pool[i]
return cleaned
def _repopulate_pool(self):
"""Bring the number of pool processes up to the specified number,
for use after reaping workers which have exited.
"""
for i in range(self._processes - len(self._pool)):
w = self.Process(target=worker,
args=(self._inqueue, self._outqueue,
self._initializer,
self._initargs, self._maxtasksperchild)
)
self._pool.append(w)
w.name = w.name.replace('Process', 'PoolWorker')
w.daemon = True
w.start()
debug('added worker')
def _maintain_pool(self):
"""Clean up any exited workers and start replacements for them.
"""
if self._join_exited_workers():
self._repopulate_pool()
def _setup_queues(self):
from .queues import SimpleQueue
self._inqueue = SimpleQueue()
self._outqueue = SimpleQueue()
self._quick_put = self._inqueue._writer.send
self._quick_get = self._outqueue._reader.recv
def apply(self, func, args=(), kwds={}):
'''
Equivalent of `func(*args, **kwds)`.
'''
assert self._state == RUN
return self.apply_async(func, args, kwds).get()
def map(self, func, iterable, chunksize=None):
'''
Apply `func` to each element in `iterable`, collecting the results
in a list that is returned.
'''
return self._map_async(func, iterable, mapstar, chunksize).get()
def starmap(self, func, iterable, chunksize=None):
'''
Like `map()` method but the elements of the `iterable` are expected to
be iterables as well and will be unpacked as arguments. Hence
`func` and (a, b) becomes func(a, b).
'''
return self._map_async(func, iterable, starmapstar, chunksize).get()
def starmap_async(self, func, iterable, chunksize=None, callback=None,
error_callback=None):
'''
Asynchronous version of `starmap()` method.
'''
return self._map_async(func, iterable, starmapstar, chunksize,
callback, error_callback)
def imap(self, func, iterable, chunksize=1):
'''
Equivalent of `map()` -- can be MUCH slower than `Pool.map()`.
'''
if self._state != RUN:
raise ValueError("Pool not running")
if chunksize == 1:
result = IMapIterator(self._cache)
self._taskqueue.put((((result._job, i, func, (x,), {})
for i, x in enumerate(iterable)), result._set_length))
return result
else:
assert chunksize > 1
task_batches = Pool._get_tasks(func, iterable, chunksize)
result = IMapIterator(self._cache)
self._taskqueue.put((((result._job, i, mapstar, (x,), {})
for i, x in enumerate(task_batches)), result._set_length))
return (item for chunk in result for item in chunk)
def imap_unordered(self, func, iterable, chunksize=1):
'''
Like `imap()` method but ordering of results is arbitrary.
'''
if self._state != RUN:
raise ValueError("Pool not running")
if chunksize == 1:
result = IMapUnorderedIterator(self._cache)
self._taskqueue.put((((result._job, i, func, (x,), {})
for i, x in enumerate(iterable)), result._set_length))
return result
else:
assert chunksize > 1
task_batches = Pool._get_tasks(func, iterable, chunksize)
result = IMapUnorderedIterator(self._cache)
self._taskqueue.put((((result._job, i, mapstar, (x,), {})
for i, x in enumerate(task_batches)), result._set_length))
return (item for chunk in result for item in chunk)
def apply_async(self, func, args=(), kwds={}, callback=None,
error_callback=None):
'''
Asynchronous version of `apply()` method.
'''
if self._state != RUN:
raise ValueError("Pool not running")
result = ApplyResult(self._cache, callback, error_callback)
self._taskqueue.put(([(result._job, None, func, args, kwds)], None))
return result
def map_async(self, func, iterable, chunksize=None, callback=None,
error_callback=None):
'''
Asynchronous version of `map()` method.
'''
return self._map_async(func, iterable, mapstar, chunksize, callback,
error_callback)
def _map_async(self, func, iterable, mapper, chunksize=None, callback=None,
error_callback=None):
'''
Helper function to implement map, starmap and their async counterparts.
'''
if self._state != RUN:
raise ValueError("Pool not running")
if not hasattr(iterable, '__len__'):
iterable = list(iterable)
if chunksize is None:
chunksize, extra = divmod(len(iterable), len(self._pool) * 4)
if extra:
chunksize += 1
if len(iterable) == 0:
chunksize = 0
task_batches = Pool._get_tasks(func, iterable, chunksize)
result = MapResult(self._cache, chunksize, len(iterable), callback,
error_callback=error_callback)
self._taskqueue.put((((result._job, i, mapper, (x,), {})
for i, x in enumerate(task_batches)), None))
return result
@staticmethod
def _handle_workers(pool):
thread = threading.current_thread()
# Keep maintaining workers until the cache gets drained, unless the pool
# is terminated.
while thread._state == RUN or (pool._cache and thread._state != TERMINATE):
pool._maintain_pool()
time.sleep(0.1)
# send sentinel to stop workers
pool._taskqueue.put(None)
debug('worker handler exiting')
@staticmethod
def _handle_tasks(taskqueue, put, outqueue, pool):
thread = threading.current_thread()
for taskseq, set_length in iter(taskqueue.get, None):
i = -1
for i, task in enumerate(taskseq):
if thread._state:
debug('task handler found thread._state != RUN')
break
try:
put(task)
except IOError:
debug('could not put task on queue')
break
else:
if set_length:
debug('doing set_length()')
set_length(i+1)
continue
break
else:
debug('task handler got sentinel')
try:
# tell result handler to finish when cache is empty
debug('task handler sending sentinel to result handler')
outqueue.put(None)
# tell workers there is no more work
debug('task handler sending sentinel to workers')
for p in pool:
put(None)
except IOError:
debug('task handler got IOError when sending sentinels')
debug('task handler exiting')
@staticmethod
def _handle_results(outqueue, get, cache):
thread = threading.current_thread()
while 1:
try:
task = get()
except (IOError, EOFError):
debug('result handler got EOFError/IOError -- exiting')
return
if thread._state:
assert thread._state == TERMINATE
debug('result handler found thread._state=TERMINATE')
break
if task is None:
debug('result handler got sentinel')
break
job, i, obj = task
try:
cache[job]._set(i, obj)
except KeyError:
pass
while cache and thread._state != TERMINATE:
try:
task = get()
except (IOError, EOFError):
debug('result handler got EOFError/IOError -- exiting')
return
if task is None:
debug('result handler ignoring extra sentinel')
continue
job, i, obj = task
try:
cache[job]._set(i, obj)
except KeyError:
pass
if hasattr(outqueue, '_reader'):
debug('ensuring that outqueue is not full')
# If we don't make room available in outqueue then
# attempts to add the sentinel (None) to outqueue may
# block. There is guaranteed to be no more than 2 sentinels.
try:
for i in range(10):
if not outqueue._reader.poll():
break
get()
except (IOError, EOFError):
pass
debug('result handler exiting: len(cache)=%s, thread._state=%s',
len(cache), thread._state)
@staticmethod
def _get_tasks(func, it, size):
it = iter(it)
while 1:
x = tuple(itertools.islice(it, size))
if not x:
return
yield (func, x)
def __reduce__(self):
raise NotImplementedError(
'pool objects cannot be passed between processes or pickled'
)
def close(self):
debug('closing pool')
if self._state == RUN:
self._state = CLOSE
self._worker_handler._state = CLOSE
def terminate(self):
debug('terminating pool')
self._state = TERMINATE
self._worker_handler._state = TERMINATE
self._terminate()
def join(self):
debug('joining pool')
assert self._state in (CLOSE, TERMINATE)
self._worker_handler.join()
self._task_handler.join()
self._result_handler.join()
for p in self._pool:
p.join()
@staticmethod
def _help_stuff_finish(inqueue, task_handler, size):
# task_handler may be blocked trying to put items on inqueue
debug('removing tasks from inqueue until task handler finished')
inqueue._rlock.acquire()
while task_handler.is_alive() and inqueue._reader.poll():
inqueue._reader.recv()
time.sleep(0)
@classmethod
def _terminate_pool(cls, taskqueue, inqueue, outqueue, pool,
worker_handler, task_handler, result_handler, cache):
# this is guaranteed to only be called once
debug('finalizing pool')
worker_handler._state = TERMINATE
task_handler._state = TERMINATE
debug('helping task handler/workers to finish')
cls._help_stuff_finish(inqueue, task_handler, len(pool))
assert result_handler.is_alive() or len(cache) == 0
result_handler._state = TERMINATE
outqueue.put(None) # sentinel
# We must wait for the worker handler to exit before terminating
# workers because we don't want workers to be restarted behind our back.
debug('joining worker handler')
if threading.current_thread() is not worker_handler:
worker_handler.join()
# Terminate workers which haven't already finished.
if pool and hasattr(pool[0], 'terminate'):
debug('terminating workers')
for p in pool:
if p.exitcode is None:
p.terminate()
debug('joining task handler')
if threading.current_thread() is not task_handler:
task_handler.join()
debug('joining result handler')
if threading.current_thread() is not result_handler:
result_handler.join()
if pool and hasattr(pool[0], 'terminate'):
debug('joining pool workers')
for p in pool:
if p.is_alive():
# worker has not yet exited
debug('cleaning up worker %d' % p.pid)
p.join()
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.terminate()
#
# Class whose instances are returned by `Pool.apply_async()`
#
class ApplyResult(object):
def __init__(self, cache, callback, error_callback):
self._event = threading.Event()
self._job = next(job_counter)
self._cache = cache
self._callback = callback
self._error_callback = error_callback
cache[self._job] = self
def ready(self):
return self._event.is_set()
def successful(self):
assert self.ready()
return self._success
def wait(self, timeout=None):
self._event.wait(timeout)
def get(self, timeout=None):
self.wait(timeout)
if not self.ready():
raise TimeoutError
if self._success:
return self._value
else:
raise self._value
def _set(self, i, obj):
self._success, self._value = obj
if self._callback and self._success:
self._callback(self._value)
if self._error_callback and not self._success:
self._error_callback(self._value)
self._event.set()
del self._cache[self._job]
AsyncResult = ApplyResult # create alias -- see #17805
#
# Class whose instances are returned by `Pool.map_async()`
#
class MapResult(ApplyResult):
def __init__(self, cache, chunksize, length, callback, error_callback):
ApplyResult.__init__(self, cache, callback,
error_callback=error_callback)
self._success = True
self._value = [None] * length
self._chunksize = chunksize
if chunksize <= 0:
self._number_left = 0
self._event.set()
del cache[self._job]
else:
self._number_left = length//chunksize + bool(length % chunksize)
def _set(self, i, success_result):
success, result = success_result
if success:
self._value[i*self._chunksize:(i+1)*self._chunksize] = result
self._number_left -= 1
if self._number_left == 0:
if self._callback:
self._callback(self._value)
del self._cache[self._job]
self._event.set()
else:
self._success = False
self._value = result
if self._error_callback:
self._error_callback(self._value)
del self._cache[self._job]
self._event.set()
#
# Class whose instances are returned by `Pool.imap()`
#
class IMapIterator(object):
def __init__(self, cache):
self._cond = threading.Condition(threading.Lock())
self._job = next(job_counter)
self._cache = cache
self._items = collections.deque()
self._index = 0
self._length = None
self._unsorted = {}
cache[self._job] = self
def __iter__(self):
return self
def next(self, timeout=None):
self._cond.acquire()
try:
try:
item = self._items.popleft()
except IndexError:
if self._index == self._length:
raise StopIteration
self._cond.wait(timeout)
try:
item = self._items.popleft()
except IndexError:
if self._index == self._length:
raise StopIteration
raise TimeoutError
finally:
self._cond.release()
success, value = item
if success:
return value
raise value
__next__ = next # XXX
def _set(self, i, obj):
self._cond.acquire()
try:
if self._index == i:
self._items.append(obj)
self._index += 1
while self._index in self._unsorted:
obj = self._unsorted.pop(self._index)
self._items.append(obj)
self._index += 1
self._cond.notify()
else:
self._unsorted[i] = obj
if self._index == self._length:
del self._cache[self._job]
finally:
self._cond.release()
def _set_length(self, length):
self._cond.acquire()
try:
self._length = length
if self._index == self._length:
self._cond.notify()
del self._cache[self._job]
finally:
self._cond.release()
#
# Class whose instances are returned by `Pool.imap_unordered()`
#
class IMapUnorderedIterator(IMapIterator):
def _set(self, i, obj):
self._cond.acquire()
try:
self._items.append(obj)
self._index += 1
self._cond.notify()
if self._index == self._length:
del self._cache[self._job]
finally:
self._cond.release()
#
#
#
class ThreadPool(Pool):
from .dummy import Process
def __init__(self, processes=None, initializer=None, initargs=()):
Pool.__init__(self, processes, initializer, initargs)
def _setup_queues(self):
self._inqueue = queue.Queue()
self._outqueue = queue.Queue()
self._quick_put = self._inqueue.put
self._quick_get = self._outqueue.get
@staticmethod
def _help_stuff_finish(inqueue, task_handler, size):
# put sentinels at head of inqueue to make workers finish
inqueue.not_empty.acquire()
try:
inqueue.queue.clear()
inqueue.queue.extend([None] * size)
inqueue.not_empty.notify_all()
finally:
inqueue.not_empty.release()
| agpl-3.0 |
4383/street-workout-database | sport/web/commons/templatetags/common_tags.py | 1 | 3392 | __author__ = 'herve.beraud'
from datetime import datetime, timedelta
from django import template
from django.core.exceptions import ObjectDoesNotExist
from django.conf import settings
from django.utils.timesince import timesince
from community.models import InformationMessage
from exercises.models import Category
from exercises.models import MuscleGroup
from exercises.models import Muscle
register = template.Library()
@register.inclusion_tag('common_tags/show_exercises_menu.html')
def show_exercises_menu():
categories = Category.objects.filter(active=True).count()
muscles_groups = MuscleGroup.objects.filter(active=True).count()
muscles = Muscle.objects.filter(active=True).count()
return {'categories': categories, 'muscles_group': muscles_groups, 'muscles': muscles}
@register.inclusion_tag('common_tags/image_gallery.html')
def images_gallery(images):
return {"images": images}
@register.inclusion_tag('common_tags/grid-list-gallery.html')
def grid_list_gallery(items,
display_level=True,
display_menu=True,
shortcut_menu=True,
semantic_type="exercise",
margin_bottom=False
):
return {"items": items,
"display_level": display_level,
"display_menu": display_menu,
"shortcut_menu": shortcut_menu,
"semantic_type": semantic_type,
"margin_bottom": margin_bottom
}
@register.inclusion_tag('common_tags/video_gallery.html')
def videos_gallery(videos):
return {"videos": videos}
@register.inclusion_tag('common_tags/grid-list-gallery-menu.html')
def grid_list_gallery_menu():
return {}
@register.inclusion_tag('common_tags/display_information_message.html', takes_context=True)
def display_information_message(context):
expiration_date = datetime.today() + timedelta(days=365)
cookie_date_format = "%a, %d %b %Y %I:%M:%S GMT"
try:
information_message = InformationMessage.objects.filter(
active=True,
display_date__lte=datetime.now(), expiration_date__gt=datetime.now()).latest('publish_date')
request = context['request']
if information_message.display_once:
try:
already_read_information_message_id = int(request.COOKIES.get('information_message_id'))
if already_read_information_message_id == information_message.id:
information_message = None
# Cookie not found
except TypeError:
pass
except ObjectDoesNotExist:
information_message = None
return {"information_message": information_message, "expiration_date": expiration_date.strftime(cookie_date_format)}
@register.simple_tag
def current_version():
return settings.CURRENT_VERSION
@register.simple_tag
def current_revision():
return settings.CURRENT_REVISION
@register.simple_tag
def last_update_date_since():
now = datetime.now()
update = datetime.fromtimestamp(settings.LAST_UPDATE_DATE)
return timesince(update, now)
@register.simple_tag
def last_update_date():
return datetime.fromtimestamp(settings.LAST_UPDATE_DATE)
@register.simple_tag
def last_update_status():
return settings.LAST_UPDATE_STATUS
@register.simple_tag
def debugging():
return settings.DEBUG
| gpl-2.0 |
dydek/django | django/contrib/gis/geos/prototypes/predicates.py | 8 | 1540 | """
This module houses the GEOS ctypes prototype functions for the
unary and binary predicate operations on geometries.
"""
from ctypes import c_char, c_char_p, c_double
from django.contrib.gis.geos.libgeos import GEOM_PTR, GEOSFuncFactory
from django.contrib.gis.geos.prototypes.errcheck import check_predicate
# ## Binary & unary predicate factories ##
class UnaryPredicate(GEOSFuncFactory):
"For GEOS unary predicate functions."
argtypes = [GEOM_PTR]
restype = c_char
errcheck = staticmethod(check_predicate)
class BinaryPredicate(UnaryPredicate):
"For GEOS binary predicate functions."
argtypes = [GEOM_PTR, GEOM_PTR]
# ## Unary Predicates ##
geos_hasz = UnaryPredicate('GEOSHasZ')
geos_isempty = UnaryPredicate('GEOSisEmpty')
geos_isring = UnaryPredicate('GEOSisRing')
geos_issimple = UnaryPredicate('GEOSisSimple')
geos_isvalid = UnaryPredicate('GEOSisValid')
# ## Binary Predicates ##
geos_contains = BinaryPredicate('GEOSContains')
geos_covers = BinaryPredicate('GEOSCovers')
geos_crosses = BinaryPredicate('GEOSCrosses')
geos_disjoint = BinaryPredicate('GEOSDisjoint')
geos_equals = BinaryPredicate('GEOSEquals')
geos_equalsexact = BinaryPredicate('GEOSEqualsExact', argtypes=[GEOM_PTR, GEOM_PTR, c_double])
geos_intersects = BinaryPredicate('GEOSIntersects')
geos_overlaps = BinaryPredicate('GEOSOverlaps')
geos_relatepattern = BinaryPredicate('GEOSRelatePattern', argtypes=[GEOM_PTR, GEOM_PTR, c_char_p])
geos_touches = BinaryPredicate('GEOSTouches')
geos_within = BinaryPredicate('GEOSWithin')
| bsd-3-clause |
openprocurement/openprocurement.auctions.dgf | openprocurement/auctions/dgf/views/other/question.py | 1 | 3280 | # -*- coding: utf-8 -*-
from openprocurement.auctions.core.utils import (
apply_patch,
context_unpack,
get_now,
json_view,
opresource,
save_auction,
)
from openprocurement.auctions.core.validation import (
validate_question_data,
validate_patch_question_data,
)
from openprocurement.auctions.core.views.mixins import AuctionQuestionResource
@opresource(name='dgfOtherAssets:Auction Questions',
collection_path='/auctions/{auction_id}/questions',
path='/auctions/{auction_id}/questions/{question_id}',
auctionsprocurementMethodType="dgfOtherAssets",
description="Auction questions")
class AuctionQuestionResource(AuctionQuestionResource):
@json_view(content_type="application/json", validators=(validate_question_data,), permission='create_question')
def collection_post(self):
"""Post a question
"""
auction = self.request.validated['auction']
if auction.status != 'active.tendering' or get_now() < auction.enquiryPeriod.startDate or get_now() > auction.enquiryPeriod.endDate:
self.request.errors.add('body', 'data', 'Can add question only in enquiryPeriod')
self.request.errors.status = 403
return
question = self.request.validated['question']
if any([i.status != 'active' for i in auction.lots if i.id == question.relatedItem]):
self.request.errors.add('body', 'data', 'Can add question only in active lot status')
self.request.errors.status = 403
return
auction.questions.append(question)
if save_auction(self.request):
self.LOGGER.info('Created auction question {}'.format(question.id),
extra=context_unpack(self.request, {'MESSAGE_ID': 'auction_question_create'}, {'question_id': question.id}))
self.request.response.status = 201
route = self.request.matched_route.name.replace("collection_", "")
self.request.response.headers['Location'] = self.request.current_route_url(_route_name=route, question_id=question.id, _query={})
return {'data': question.serialize("view")}
@json_view(content_type="application/json", permission='edit_auction', validators=(validate_patch_question_data,))
def patch(self):
"""Post an Answer
"""
auction = self.request.validated['auction']
if auction.status != 'active.tendering':
self.request.errors.add('body', 'data', 'Can\'t update question in current ({}) auction status'.format(auction.status))
self.request.errors.status = 403
return
if any([i.status != 'active' for i in auction.lots if i.id == self.request.context.relatedItem]):
self.request.errors.add('body', 'data', 'Can update question only in active lot status')
self.request.errors.status = 403
return
if apply_patch(self.request, src=self.request.context.serialize()):
self.LOGGER.info('Updated auction question {}'.format(self.request.context.id),
extra=context_unpack(self.request, {'MESSAGE_ID': 'auction_question_patch'}))
return {'data': self.request.context.serialize(auction.status)}
| apache-2.0 |
PXke/invenio | invenio/legacy/bibcirculation/utils.py | 4 | 30918 | # -*- coding: utf-8 -*-
##
## This file is part of Invenio.
## Copyright (C) 2008, 2009, 2010, 2011, 2012, 2013 CERN.
##
## Invenio is free software; you can redistribute it and/or
## modify it under the terms of the GNU General Public License as
## published by the Free Software Foundation; either version 2 of the
## License, or (at your option) any later version.
##
## Invenio is distributed in the hope that it will be useful, but
## WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with Invenio; if not, write to the Free Software Foundation, Inc.,
## 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""BibCirculation Utils: Auxiliary methods of BibCirculation """
__revision__ = "$Id$"
import datetime
import random
import re
import time
from invenio.legacy.bibrecord import get_fieldvalues
from invenio.utils.url import make_invenio_opener
from invenio.legacy.search_engine import get_field_tags
from invenio.legacy.bibsched.bibtask import task_low_level_submission
from invenio.utils.text import encode_for_xml
from invenio.base.i18n import gettext_set_language
from invenio.config import CFG_SITE_URL, CFG_TMPDIR, CFG_SITE_LANG
import invenio.legacy.bibcirculation.db_layer as db
from invenio.legacy.bibcirculation.config import \
CFG_BIBCIRCULATION_WORKING_DAYS, \
CFG_BIBCIRCULATION_HOLIDAYS, \
CFG_CERN_SITE, \
CFG_BIBCIRCULATION_ITEM_STATUS_ON_LOAN, \
CFG_BIBCIRCULATION_ITEM_STATUS_ON_SHELF, \
CFG_BIBCIRCULATION_ITEM_STATUS_IN_PROCESS, \
CFG_BIBCIRCULATION_REQUEST_STATUS_PENDING, \
CFG_BIBCIRCULATION_REQUEST_STATUS_WAITING, \
CFG_BIBCIRCULATION_LOAN_STATUS_ON_LOAN, \
CFG_BIBCIRCULATION_LOAN_STATUS_EXPIRED, \
CFG_BIBCIRCULATION_LOAN_STATUS_RETURNED
DICC_REGEXP = re.compile("^\{('[^']*': ?('[^']*'|\"[^\"]+\"|[0-9]*|None)(, ?'[^']*': ?('[^']*'|\"[^\"]+\"|[0-9]*|None))*)?\}$")
BIBCIRCULATION_OPENER = make_invenio_opener('BibCirculation')
def search_user(column, string):
if string is not None:
string = string.strip()
if CFG_CERN_SITE == 1:
if column == 'name':
result = db.search_borrower_by_name(string)
else:
if column == 'email':
try:
result = db.search_borrower_by_email(string)
except:
result = ()
else:
try:
result = db.search_borrower_by_ccid(string)
except:
result = ()
if result == ():
from invenio.legacy.bibcirculation.cern_ldap \
import get_user_info_from_ldap
ldap_info = 'busy'
while ldap_info == 'busy':
time.sleep(1)
if column == 'id' or column == 'ccid':
ldap_info = get_user_info_from_ldap(ccid=string)
elif column == 'email':
ldap_info = get_user_info_from_ldap(email=string)
else:
ldap_info = get_user_info_from_ldap(nickname=string)
if len(ldap_info) == 0:
result = ()
else:
try:
name = ldap_info['displayName'][0]
except KeyError:
name = ""
try:
email = ldap_info['mail'][0]
except KeyError:
email = ""
try:
phone = ldap_info['telephoneNumber'][0]
except KeyError:
phone = ""
try:
address = ldap_info['physicalDeliveryOfficeName'][0]
except KeyError:
address = ""
try:
mailbox = ldap_info['postOfficeBox'][0]
except KeyError:
mailbox = ""
try:
ccid = ldap_info['employeeID'][0]
except KeyError:
ccid = ""
try:
db.new_borrower(ccid, name, email, phone,
address, mailbox, '')
except:
pass
result = db.search_borrower_by_ccid(int(ccid))
else:
if column == 'name':
result = db.search_borrower_by_name(string)
elif column == 'email':
result = db.search_borrower_by_email(string)
else:
result = db.search_borrower_by_id(string)
return result
def update_user_info_from_ldap(user_id):
from invenio.legacy.bibcirculation.cern_ldap import get_user_info_from_ldap
ccid = db.get_borrower_ccid(user_id)
ldap_info = get_user_info_from_ldap(ccid=ccid)
if not ldap_info:
result = ()
else:
try:
name = ldap_info['displayName'][0]
except KeyError:
name = ""
try:
email = ldap_info['mail'][0]
except KeyError:
email = ""
try:
phone = ldap_info['telephoneNumber'][0]
except KeyError:
phone = ""
try:
address = ldap_info['physicalDeliveryOfficeName'][0]
except KeyError:
address = ""
try:
mailbox = ldap_info['postOfficeBox'][0]
except KeyError:
mailbox = ""
db.update_borrower(user_id, name, email, phone, address, mailbox)
result = db.search_borrower_by_ccid(int(ccid))
return result
def get_book_cover(isbn):
"""
Retrieve book cover using Amazon web services.
@param isbn: book's isbn
@type isbn: string
@return book cover
"""
from xml.dom import minidom
# connect to AWS
"""cover_xml = BIBCIRCULATION_OPENER.open('http://ecs.amazonaws.com/onca/xml' \
'?Service=AWSECommerceService&AWSAccessKeyId=' \
+ CFG_BIBCIRCULATION_AMAZON_ACCESS_KEY + \
'&Operation=ItemSearch&Condition=All&' \
'ResponseGroup=Images&SearchIndex=Books&' \
'Keywords=' + isbn)"""
cover_xml=""
# parse XML
try:
xml_img = minidom.parse(cover_xml)
retrieve_book_cover = xml_img.getElementsByTagName('MediumImage')
book_cover = retrieve_book_cover.item(0).firstChild.firstChild.data
except:
book_cover = "%s/img/book_cover_placeholder.gif" % (CFG_SITE_URL)
return book_cover
def book_information_from_MARC(recid):
"""
Retrieve book's information from MARC
@param recid: identify the record. Primary key of bibrec.
@type recid: int
@return tuple with title, year, author, isbn and editor.
"""
# FIXME do the same that book_title_from_MARC
book_title = book_title_from_MARC(recid)
book_year = ''.join(get_fieldvalues(recid, "260__c"))
author_tags = ['100__a', '700__a', '721__a']
book_author = ''
for tag in author_tags:
l = get_fieldvalues(recid, tag)
for c in l:
book_author += c + '; '
book_author = book_author[:-2]
l = get_fieldvalues(recid, "020__a")
book_isbn = ''
for isbn in l:
book_isbn += isbn + ', '
book_isbn = book_isbn[:-2]
book_editor = ', '.join(get_fieldvalues(recid, "260__a") + \
get_fieldvalues(recid, "260__b"))
return (book_title, book_year, book_author, book_isbn, book_editor)
def book_title_from_MARC(recid):
"""
Retrieve book's title from MARC
@param recid: identify the record. Primary key of bibrec.
@type recid: int
@return book's title
"""
title_tags = get_field_tags('title')
book_title = ''
i = 0
while book_title == '' and i < len(title_tags):
l = get_fieldvalues(recid, title_tags[i])
for candidate in l:
book_title = book_title + candidate + ': '
i += 1
book_title = book_title[:-2]
return book_title
def update_status_if_expired(loan_id):
"""
Update the loan's status if status is 'expired'.
@param loan_id: identify the loan. Primary key of crcLOAN.
@type loan_id: int
"""
loan_status = db.get_loan_status(loan_id)
if loan_status == CFG_BIBCIRCULATION_LOAN_STATUS_EXPIRED:
db.update_loan_status(CFG_BIBCIRCULATION_ITEM_STATUS_ON_LOAN, loan_id)
return
def get_next_day(date_string):
"""
Get the next day
@param date_string: date
@type date_string: string
return next day
"""
# add 1 day
more_1_day = datetime.timedelta(days=1)
# convert date_string to datetime format
tmp_date = time.strptime(date_string, '%Y-%m-%d')
# calculate the new date (next day)
next_day = datetime.datetime(tmp_date[0], tmp_date[1], tmp_date[2]) \
+ more_1_day
return next_day
def generate_new_due_date(days):
"""
Generate a new due date (today + X days = new due date).
@param days: number of days
@type days: string
@return new due date
"""
today = datetime.date.today()
more_X_days = datetime.timedelta(days=days)
tmp_date = today + more_X_days
week_day = tmp_date.strftime('%A')
due_date = tmp_date.strftime('%Y-%m-%d')
due_date_validated = False
while not due_date_validated:
if week_day in CFG_BIBCIRCULATION_WORKING_DAYS \
and due_date not in CFG_BIBCIRCULATION_HOLIDAYS:
due_date_validated = True
else:
next_day = get_next_day(due_date)
due_date = next_day.strftime('%Y-%m-%d')
week_day = next_day.strftime('%A')
return due_date
def renew_loan_for_X_days(barcode):
"""
Renew a loan based on its loan period
@param barcode: identify the item. Primary key of crcITEM.
@type barcode: string
@return new due date
"""
loan_period = db.get_loan_period(barcode)
if loan_period == '4 weeks':
due_date = generate_new_due_date(30)
else:
due_date = generate_new_due_date(7)
return due_date
def make_copy_available(request_id):
"""
Change the status of a copy for
CFG_BIBCIRCULATION_ITEM_STATUS_ON_SHELF when
an hold request was cancelled.
@param request_id: identify the request: Primary key of crcLOANREQUEST
@type request_id: int
"""
barcode_requested = db.get_requested_barcode(request_id)
db.update_item_status(CFG_BIBCIRCULATION_ITEM_STATUS_ON_SHELF, barcode_requested)
update_requests_statuses(barcode_requested)
def print_new_loan_information(req, ln=CFG_SITE_LANG):
"""
Create a printable format with the information of the last
loan who has been registered on the table crcLOAN.
"""
_ = gettext_set_language(ln)
# get the last loan from crcLOAN
(recid, borrower_id, due_date) = db.get_last_loan()
# get book's information
(book_title, book_year, book_author,
book_isbn, book_editor) = book_information_from_MARC(recid)
# get borrower's data/information (name, address, email)
(borrower_name, borrower_address,
borrower_mailbox, borrower_email) = db.get_borrower_data(borrower_id)
# Generate printable format
req.content_type = "text/html"
req.send_http_header()
out = """<table style='width:95%; margin:auto; max-width: 600px;'>"""
out += """
<tr>
<td><img src="%s/img/CERN_CDS_logo.png"></td>
</tr>
</table><br />""" % (CFG_SITE_URL)
out += """<table style='color: #79d; font-size: 82%; width:95%;
margin:auto; max-width: 400px;'>"""
out += """ <tr>
<td align="center">
<h2><strong>%s</strong></h2>
</td>
</tr>""" % (_("Loan information"))
out += """ <tr>
<td align="center"><strong>%s</strong></td>
</tr>""" % (_("This book has been sent to you:"))
out += """</table><br />"""
out += """<table style='color: #79d; font-size: 82%; width:95%;
margin:auto; max-width: 400px;'>"""
out += """ <tr>
<td width="70"><strong>%s</strong></td>
<td style='color: black;'>%s</td>
</tr>
<tr>
<td width="70"><strong>%s</strong></td>
<td style='color: black;'>%s</td>
</tr>
<tr>
<td width="70"><strong>%s</strong></td>
<td style='color: black;'>%s</td>
</tr>
<tr>
<td width="70"><strong>%s</strong></td>
<td style='color: black;'>%s</td>
</tr>
<tr>
<td width="70"><strong>%s</strong></td>
<td style='color: black;'>%s</td>
</tr>
""" % (_("Title"), book_title,
_("Author"), book_author,
_("Editor"), book_editor,
_("ISBN"), book_isbn,
_("Year"), book_year)
out += """</table><br />"""
out += """<table style='color: #79d; font-size: 82%; width:95%;
margin:auto; max-width: 400px;'>"""
out += """ <tr>
<td width="70"><strong>%s</strong></td>
<td style='color: black;'>%s</td>
</tr>
<tr>
<td width="70"><strong>%s</strong></td>
<td style='color: black;'>%s</td>
</tr>
<tr>
<td width="70"><strong>%s</strong></td>
<td style='color: black;'>%s</td>
</tr>
<tr>
<td width="70"><strong>%s</strong></td>
<td style='color: black;'>%s</td>
</tr>
""" % (_("Name"), borrower_name,
_("Mailbox"), borrower_mailbox,
_("Address"), borrower_address,
_("Email"), borrower_email)
out += """</table>
<br />"""
out += """<table style='color: #79d; font-size: 82%; width:95%;
margin:auto; max-width: 400px;'>"""
out += """ <tr>
<td align="center"><h2><strong>%s: %s</strong></h2></td>
</tr>""" % (_("Due date"), due_date)
out += """</table>"""
out += """<table style='color: #79d; font-size: 82%; width:95%;
margin:auto; max-width: 800px;'>
<tr>
<td>
<input type="button" onClick='window.print()'
value='Print' style='color: #fff;
background: #36c; font-weight: bold;'>
</td>
</tr>
</table>
"""
req.write("<html>")
req.write(out)
req.write("</html>")
return "\n"
def print_pending_hold_requests_information(req, ln):
"""
Create a printable format with all the information about all
pending hold requests.
"""
_ = gettext_set_language(ln)
requests = db.get_pdf_request_data(CFG_BIBCIRCULATION_REQUEST_STATUS_PENDING)
req.content_type = "text/html"
req.send_http_header()
out = """<table style='width:100%; margin:auto; max-width: 1024px;'>"""
out += """
<tr>
<td><img src="%s/img/CERN_CDS_logo.png"></td>
</tr>
</table><br />""" % (CFG_SITE_URL)
out += """<table style='color: #79d; font-size: 82%;
width:95%; margin:auto; max-width: 1024px;'>"""
out += """ <tr>
<td align="center"><h2><strong>%s</strong></h2></td>
</tr>""" % (_("List of pending hold requests"))
out += """ <tr>
<td align="center"><strong>%s</strong></td>
</tr>""" % (time.ctime())
out += """</table><br/>"""
out += """<table style='color: #79d; font-size: 82%;
width:95%; margin:auto; max-width: 1024px;'>"""
out += """<tr>
<td><strong>%s</strong></td>
<td><strong>%s</strong></td>
<td><strong>%s</strong></td>
<td><strong>%s</strong></td>
<td><strong>%s</strong></td>
<td><strong>%s</strong></td>
<td><strong>%s</strong></td>
</tr>
""" % (_("Borrower"),
_("Item"),
_("Library"),
_("Location"),
_("From"),
_("To"),
_("Request date"))
for (recid, borrower_name, library_name, location,
date_from, date_to, request_date) in requests:
out += """<tr style='color: black;'>
<td class="bibcirccontent">%s</td>
<td class="bibcirccontent">%s</td>
<td class="bibcirccontent">%s</td>
<td class="bibcirccontent">%s</td>
<td class="bibcirccontent">%s</td>
<td class="bibcirccontent">%s</td>
<td class="bibcirccontent">%s</td>
</tr>
""" % (borrower_name, book_title_from_MARC(recid),
library_name, location, date_from, date_to,
request_date)
out += """</table>
<br />
<br />
<table style='color: #79d; font-size: 82%;
width:95%; margin:auto; max-width: 1024px;'>
<tr>
<td>
<input type=button value='Back' onClick="history.go(-1)"
style='color: #fff; background: #36c;
font-weight: bold;'>
<input type="button" onClick='window.print()'
value='Print' style='color: #fff;
background: #36c; font-weight: bold;'>
</td>
</tr>
</table>"""
req.write("<html>")
req.write(out)
req.write("</html>")
return "\n"
def get_item_info_for_search_result(recid):
"""
Get the item's info from MARC in order to create a
search result with more details
@param recid: identify the record. Primary key of bibrec.
@type recid: int
@return book's informations (author, editor and number of copies)
"""
book_author = ' '.join(get_fieldvalues(recid, "100__a") + \
get_fieldvalues(recid, "100__u"))
book_editor = ' , '.join(get_fieldvalues(recid, "260__a") + \
get_fieldvalues(recid, "260__b") + \
get_fieldvalues(recid, "260__c"))
book_copies = ' '.join(get_fieldvalues(recid, "964__a"))
book_infos = (book_author, book_editor, book_copies)
return book_infos
def update_request_data(request_id):
"""
Update the status of a given request.
@param request_id: identify the request: Primary key of crcLOANREQUEST
@type request_id: int
"""
barcode = db.get_request_barcode(request_id)
is_on_loan = db.is_item_on_loan(barcode)
if is_on_loan is not None:
db.update_item_status(CFG_BIBCIRCULATION_ITEM_STATUS_ON_LOAN, barcode)
else:
db.update_item_status(CFG_BIBCIRCULATION_ITEM_STATUS_ON_SHELF, barcode)
update_requests_statuses(barcode)
return True
def compare_dates(date):
"""
Compare given date with today
@param date: given date
@type date: string
@return boolean
"""
if date < time.strftime("%Y-%m-%d"):
return False
else:
return True
def validate_date_format(date):
"""
Verify the date format
@param date: given date
@type date: string
@return boolean
"""
try:
if time.strptime(date, "%Y-%m-%d"):
if compare_dates(date):
return True
else:
return False
except ValueError:
return False
def create_ill_record(book_info):
"""
Create a new ILL record
@param book_info: book's information
@type book_info: tuple
@return MARC record
"""
(title, author, place, publisher, year, edition, isbn) = book_info
ill_record = """
<record>
<datafield tag="020" ind1=" " ind2=" ">
<subfield code="a">%(isbn)s</subfield>
</datafield>
<datafield tag="100" ind1=" " ind2=" ">
<subfield code="a">%(author)s</subfield>
</datafield>
<datafield tag="245" ind1=" " ind2=" ">
<subfield code="a">%(title)s</subfield>
</datafield>
<datafield tag="250" ind1=" " ind2=" ">
<subfield code="a">%(edition)s</subfield>
</datafield>
<datafield tag="260" ind1=" " ind2=" ">
<subfield code="a">%(place)s</subfield>
<subfield code="b">%(publisher)s</subfield>
<subfield code="c">%(year)s</subfield>
</datafield>
<datafield tag="980" ind1=" " ind2=" ">
<subfield code="a">ILLBOOK</subfield>
</datafield>
</record>
""" % {'isbn': encode_for_xml(isbn),
'author': encode_for_xml(author),
'title': encode_for_xml(title),
'edition': encode_for_xml(edition),
'place': encode_for_xml(place),
'publisher': encode_for_xml(publisher),
'year': encode_for_xml(year)}
file_path = '%s/%s_%s.xml' % (CFG_TMPDIR, 'bibcirculation_ill_book',
time.strftime("%Y%m%d_%H%M%S"))
xml_file = open(file_path, 'w')
xml_file.write(ill_record)
xml_file.close()
# Pass XML file to BibUpload.
task_low_level_submission('bibupload', 'bibcirculation',
'-P', '5', '-i', file_path)
return ill_record
def wash_recid_from_ILL_request(ill_request_id):
"""
Get dictionnary and wash recid values.
@param ill_request_id: identify the ILL request. Primray key of crcILLREQUEST
@type ill_request_id: int
@return recid
"""
book_info = db.get_ill_book_info(ill_request_id)
if looks_like_dictionary(book_info):
book_info = eval(book_info)
else:
book_info = None
try:
recid = int(book_info['recid'])
except KeyError:
recid = None
return recid
def all_copies_are_missing(recid):
"""
Verify if all copies of an item are missing
@param recid: identify the record. Primary key of bibrec
@type recid: int
@return boolean
"""
copies_status = db.get_copies_status(recid)
number_of_missing = 0
if copies_status == None:
return True
else:
for (status) in copies_status:
if status == 'missing':
number_of_missing += 1
if number_of_missing == len(copies_status):
return True
else:
return False
#def has_copies(recid):
# """
# Verify if a recid is item (has copies)
#
# @param recid: identify the record. Primary key of bibrec
# @type recid: int
#
# @return boolean
# """
#
# copies_status = db.get_copies_status(recid)
#
# if copies_status is None:
# return False
# else:
# if len(copies_status) == 0:
# return False
# else:
# return True
def generate_email_body(template, loan_id, ill=0):
"""
Generate the body of an email for loan recalls.
@param template: email template
@type template: string
@param loan_id: identify the loan. Primary key of crcLOAN.
@type loan_id: int
@return email(body)
"""
if ill:
# Inter library loan.
out = template
else:
recid = db.get_loan_recid(loan_id)
(book_title, book_year, book_author,
book_isbn, book_editor) = book_information_from_MARC(int(recid))
out = template % (book_title, book_year, book_author,
book_isbn, book_editor)
return out
def create_item_details_url(recid, ln):
url = '/admin2/bibcirculation/get_item_details?ln=%s&recid=%s' % (ln,
str(recid))
return CFG_SITE_URL + url
def tag_all_requests_as_done(barcode, user_id):
recid = db.get_id_bibrec(barcode)
description = db.get_item_description(barcode)
list_of_barcodes = db.get_barcodes(recid, description)
for bc in list_of_barcodes:
db.tag_requests_as_done(user_id, bc)
def update_requests_statuses(barcode):
recid = db.get_id_bibrec(barcode)
description = db.get_item_description(barcode)
list_of_pending_requests = db.get_requests(recid, description,
CFG_BIBCIRCULATION_REQUEST_STATUS_PENDING)
some_copy_available = False
copies_status = db.get_copies_status(recid, description)
if copies_status is not None:
for status in copies_status:
if status in (CFG_BIBCIRCULATION_ITEM_STATUS_ON_SHELF,
CFG_BIBCIRCULATION_ITEM_STATUS_IN_PROCESS):
some_copy_available = True
if len(list_of_pending_requests) == 1:
if not some_copy_available:
db.update_loan_request_status(CFG_BIBCIRCULATION_REQUEST_STATUS_WAITING,
list_of_pending_requests[0][0])
else:
return list_of_pending_requests[0][0]
elif len(list_of_pending_requests) == 0:
if some_copy_available:
list_of_waiting_requests = db.get_requests(recid, description,
CFG_BIBCIRCULATION_REQUEST_STATUS_WAITING)
if len(list_of_waiting_requests) > 0:
db.update_loan_request_status(CFG_BIBCIRCULATION_REQUEST_STATUS_PENDING,
list_of_waiting_requests[0][0])
return list_of_waiting_requests[0][0]
elif len(list_of_pending_requests) > 1:
for request in list_of_pending_requests:
db.update_loan_request_status(CFG_BIBCIRCULATION_REQUEST_STATUS_WAITING,
request[0])
list_of_waiting_requests = db.get_requests(recid, description,
CFG_BIBCIRCULATION_REQUEST_STATUS_WAITING)
if some_copy_available:
db.update_loan_request_status(CFG_BIBCIRCULATION_REQUEST_STATUS_PENDING,
list_of_waiting_requests[0][0])
return list_of_waiting_requests[0][0]
return None
def is_periodical(recid):
rec_type = get_fieldvalues(recid, "690C_a")
if len(rec_type) > 0:
for value in rec_type:
if value == 'PERI':
return True
return False
def has_date_format(date):
if type(date) is not str:
return False
date = date.strip()
if len(date) is not 10:
return False
elif date[4] is not '-' and date[7] is not '-':
return False
else:
year = date[:4]
month = date[5:7]
day = date[8:]
return year.isdigit() and month.isdigit() and day.isdigit()
def generate_tmp_barcode():
tmp_barcode = 'tmp-' + str(random.random())[-8:]
while(db.barcode_in_use(tmp_barcode)):
tmp_barcode = 'tmp-' + str(random.random())[-8:]
return tmp_barcode
def check_database():
from invenio.legacy.dbquery import run_sql
r1 = run_sql(""" SELECT it.barcode, it.status, ln.status
FROM crcITEM it, crcLOAN ln
WHERE ln.barcode=it.barcode
AND it.status=%s
AND ln.status!=%s
AND ln.status!=%s
AND ln.status!=%s
""", (CFG_BIBCIRCULATION_ITEM_STATUS_ON_LOAN,
CFG_BIBCIRCULATION_LOAN_STATUS_ON_LOAN,
CFG_BIBCIRCULATION_LOAN_STATUS_EXPIRED,
CFG_BIBCIRCULATION_LOAN_STATUS_RETURNED))
r2 = run_sql(""" SELECT it.barcode
FROM crcITEM it, crcLOAN ln
WHERE ln.barcode=it.barcode
AND it.status=%s
AND (ln.status=%s or ln.status=%s)
""", (CFG_BIBCIRCULATION_ITEM_STATUS_ON_SHELF,
CFG_BIBCIRCULATION_LOAN_STATUS_ON_LOAN,
CFG_BIBCIRCULATION_LOAN_STATUS_EXPIRED))
r3 = run_sql(""" SELECT l1.barcode, l1.id,
DATE_FORMAT(l1.loaned_on,'%%Y-%%m-%%d %%H:%%i:%%s'),
DATE_FORMAT(l2.loaned_on,'%%Y-%%m-%%d %%H:%%i:%%s')
FROM crcLOAN l1,
crcLOAN l2
WHERE l1.id!=l2.id
AND l1.status!=%s
AND l1.status=l2.status
AND l1.barcode=l2.barcode
ORDER BY l1.loaned_on
""", (CFG_BIBCIRCULATION_LOAN_STATUS_RETURNED, ))
r4 = run_sql(""" SELECT id, id_crcBORROWER, barcode,
due_date, number_of_renewals
FROM crcLOAN
WHERE status=%s
AND due_date>NOW()
""", (CFG_BIBCIRCULATION_LOAN_STATUS_EXPIRED, ))
return (len(r1), len(r2), len(r3), len(r4))
def looks_like_dictionary(candidate_string):
if re.match(DICC_REGEXP, candidate_string):
return True
else:
return False
| gpl-2.0 |
adit-chandra/tensorflow | tensorflow/python/ops/confusion_matrix.py | 9 | 10906 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Confusion matrix related utilities."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import check_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import sparse_ops
from tensorflow.python.util import deprecation
from tensorflow.python.util.tf_export import tf_export
def remove_squeezable_dimensions(
labels, predictions, expected_rank_diff=0, name=None):
"""Squeeze last dim if ranks differ from expected by exactly 1.
In the common case where we expect shapes to match, `expected_rank_diff`
defaults to 0, and we squeeze the last dimension of the larger rank if they
differ by 1.
But, for example, if `labels` contains class IDs and `predictions` contains 1
probability per class, we expect `predictions` to have 1 more dimension than
`labels`, so `expected_rank_diff` would be 1. In this case, we'd squeeze
`labels` if `rank(predictions) - rank(labels) == 0`, and
`predictions` if `rank(predictions) - rank(labels) == 2`.
This will use static shape if available. Otherwise, it will add graph
operations, which could result in a performance hit.
Args:
labels: Label values, a `Tensor` whose dimensions match `predictions`.
predictions: Predicted values, a `Tensor` of arbitrary dimensions.
expected_rank_diff: Expected result of `rank(predictions) - rank(labels)`.
name: Name of the op.
Returns:
Tuple of `labels` and `predictions`, possibly with last dim squeezed.
"""
with ops.name_scope(name, 'remove_squeezable_dimensions',
[labels, predictions]):
predictions = ops.convert_to_tensor(predictions)
labels = ops.convert_to_tensor(labels)
predictions_shape = predictions.get_shape()
predictions_rank = predictions_shape.ndims
labels_shape = labels.get_shape()
labels_rank = labels_shape.ndims
if (labels_rank is not None) and (predictions_rank is not None):
# Use static rank.
rank_diff = predictions_rank - labels_rank
if (rank_diff == expected_rank_diff + 1 and
predictions_shape.dims[-1].is_compatible_with(1)):
predictions = array_ops.squeeze(predictions, [-1])
elif (rank_diff == expected_rank_diff - 1 and
labels_shape.dims[-1].is_compatible_with(1)):
labels = array_ops.squeeze(labels, [-1])
return labels, predictions
# Use dynamic rank.
rank_diff = array_ops.rank(predictions) - array_ops.rank(labels)
if (predictions_rank is None) or (
predictions_shape.dims[-1].is_compatible_with(1)):
predictions = control_flow_ops.cond(
math_ops.equal(expected_rank_diff + 1, rank_diff),
lambda: array_ops.squeeze(predictions, [-1]),
lambda: predictions)
if (labels_rank is None) or (
labels_shape.dims[-1].is_compatible_with(1)):
labels = control_flow_ops.cond(
math_ops.equal(expected_rank_diff - 1, rank_diff),
lambda: array_ops.squeeze(labels, [-1]),
lambda: labels)
return labels, predictions
@tf_export('math.confusion_matrix', v1=[])
def confusion_matrix(labels,
predictions,
num_classes=None,
weights=None,
dtype=dtypes.int32,
name=None):
"""Computes the confusion matrix from predictions and labels.
The matrix columns represent the prediction labels and the rows represent the
real labels. The confusion matrix is always a 2-D array of shape `[n, n]`,
where `n` is the number of valid labels for a given classification task. Both
prediction and labels must be 1-D arrays of the same shape in order for this
function to work.
If `num_classes` is `None`, then `num_classes` will be set to one plus the
maximum value in either predictions or labels. Class labels are expected to
start at 0. For example, if `num_classes` is 3, then the possible labels
would be `[0, 1, 2]`.
If `weights` is not `None`, then each prediction contributes its
corresponding weight to the total value of the confusion matrix cell.
For example:
```python
tf.math.confusion_matrix([1, 2, 4], [2, 2, 4]) ==>
[[0 0 0 0 0]
[0 0 1 0 0]
[0 0 1 0 0]
[0 0 0 0 0]
[0 0 0 0 1]]
```
Note that the possible labels are assumed to be `[0, 1, 2, 3, 4]`,
resulting in a 5x5 confusion matrix.
Args:
labels: 1-D `Tensor` of real labels for the classification task.
predictions: 1-D `Tensor` of predictions for a given classification.
num_classes: The possible number of labels the classification task can
have. If this value is not provided, it will be calculated
using both predictions and labels array.
weights: An optional `Tensor` whose shape matches `predictions`.
dtype: Data type of the confusion matrix.
name: Scope name.
Returns:
A `Tensor` of type `dtype` with shape `[n, n]` representing the confusion
matrix, where `n` is the number of possible labels in the classification
task.
Raises:
ValueError: If both predictions and labels are not 1-D vectors and have
mismatched shapes, or if `weights` is not `None` and its shape doesn't
match `predictions`.
"""
with ops.name_scope(name, 'confusion_matrix',
(predictions, labels, num_classes, weights)) as name:
labels, predictions = remove_squeezable_dimensions(
ops.convert_to_tensor(labels, name='labels'),
ops.convert_to_tensor(
predictions, name='predictions'))
predictions = math_ops.cast(predictions, dtypes.int64)
labels = math_ops.cast(labels, dtypes.int64)
# Sanity checks - underflow or overflow can cause memory corruption.
labels = control_flow_ops.with_dependencies(
[check_ops.assert_non_negative(
labels, message='`labels` contains negative values')],
labels)
predictions = control_flow_ops.with_dependencies(
[check_ops.assert_non_negative(
predictions, message='`predictions` contains negative values')],
predictions)
if num_classes is None:
num_classes = math_ops.maximum(math_ops.reduce_max(predictions),
math_ops.reduce_max(labels)) + 1
else:
num_classes_int64 = math_ops.cast(num_classes, dtypes.int64)
labels = control_flow_ops.with_dependencies(
[check_ops.assert_less(
labels, num_classes_int64, message='`labels` out of bound')],
labels)
predictions = control_flow_ops.with_dependencies(
[check_ops.assert_less(
predictions, num_classes_int64,
message='`predictions` out of bound')],
predictions)
if weights is not None:
weights = ops.convert_to_tensor(weights, name='weights')
predictions.get_shape().assert_is_compatible_with(weights.get_shape())
weights = math_ops.cast(weights, dtype)
shape = array_ops.stack([num_classes, num_classes])
indices = array_ops.stack([labels, predictions], axis=1)
values = (array_ops.ones_like(predictions, dtype)
if weights is None else weights)
cm_sparse = sparse_tensor.SparseTensor(
indices=indices,
values=values,
dense_shape=math_ops.cast(shape, dtypes.int64))
zero_matrix = array_ops.zeros(math_ops.cast(shape, dtypes.int32), dtype)
return sparse_ops.sparse_add(zero_matrix, cm_sparse)
@tf_export(v1=['math.confusion_matrix', 'confusion_matrix'])
@deprecation.deprecated_endpoints('confusion_matrix', 'train.confusion_matrix')
def confusion_matrix_v1(labels,
predictions,
num_classes=None,
dtype=dtypes.int32,
name=None,
weights=None):
"""Computes the confusion matrix from predictions and labels.
The matrix columns represent the prediction labels and the rows represent the
real labels. The confusion matrix is always a 2-D array of shape `[n, n]`,
where `n` is the number of valid labels for a given classification task. Both
prediction and labels must be 1-D arrays of the same shape in order for this
function to work.
If `num_classes` is `None`, then `num_classes` will be set to one plus the
maximum value in either predictions or labels. Class labels are expected to
start at 0. For example, if `num_classes` is 3, then the possible labels
would be `[0, 1, 2]`.
If `weights` is not `None`, then each prediction contributes its
corresponding weight to the total value of the confusion matrix cell.
For example:
```python
tf.math.confusion_matrix([1, 2, 4], [2, 2, 4]) ==>
[[0 0 0 0 0]
[0 0 1 0 0]
[0 0 1 0 0]
[0 0 0 0 0]
[0 0 0 0 1]]
```
Note that the possible labels are assumed to be `[0, 1, 2, 3, 4]`,
resulting in a 5x5 confusion matrix.
Args:
labels: 1-D `Tensor` of real labels for the classification task.
predictions: 1-D `Tensor` of predictions for a given classification.
num_classes: The possible number of labels the classification task can have.
If this value is not provided, it will be calculated using both
predictions and labels array.
dtype: Data type of the confusion matrix.
name: Scope name.
weights: An optional `Tensor` whose shape matches `predictions`.
Returns:
A `Tensor` of type `dtype` with shape `[n, n]` representing the confusion
matrix, where `n` is the number of possible labels in the classification
task.
Raises:
ValueError: If both predictions and labels are not 1-D vectors and have
mismatched shapes, or if `weights` is not `None` and its shape doesn't
match `predictions`.
"""
return confusion_matrix(labels, predictions, num_classes, weights, dtype,
name)
| apache-2.0 |
tonybaloney/st2contrib | packs/mmonit/actions/update_host.py | 12 | 1299 | from lib.mmonit import MmonitBaseAction
class MmonitUpdateHost(MmonitBaseAction):
def run(self, host_id, hostname, keepname=None, description=None, status=None, ipaddrout=None,
portout=None, sslout=None, monituser=None, monitpassword=None, skew=None):
self.login()
data = {"id": host_id, "hostname": hostname}
# Way too explicit for my taste but I guess its easier to understand
if keepname is not None:
data['keepname'] = keepname
if description is not None:
data['description'] = description
if status is not None:
data['status'] = status
if ipaddrout is not None:
data['ipaddrout'] = ipaddrout
if portout is not None:
data['portout'] = portout
if sslout is not None:
data['sslout'] = sslout
if monituser is not None:
data['monituser'] = monituser
if monitpassword is not None:
data['monitpassword'] = monitpassword
if skew is not None:
data['skew'] = skew
req = self.session.post("{}/admin/hosts/update".format(self.url), data=data)
try:
return req.json()
except Exception:
raise
finally:
self.logout()
| apache-2.0 |
mgahsan/QuantEcon.py | examples/calibrations/BGP.py | 6 | 1334 | # -*- coding: utf-8 -*-
"""
Created on Wed Feb 18 15:32:07 2015
@author: dgevans
"""
import numpy as np
class baseline(object):
beta = 0.9
psi = 0.69
Pi = 0.5 *np.ones((2,2))
G = np.array([0.1,0.2])
Theta = np.ones(2)
transfers = False
#derivatives of utiltiy function
def U(self,c,n):
return np.log(c) + self.psi*np.log(1-n)
def Uc(self,c,n):
return 1./c
def Ucc(self,c,n):
return -c**(-2)
def Un(self,c,n):
return -self.psi/(1-n)
def Unn(self,c,n):
return -self.psi/(1-n)**2
#Model 1
M1 = baseline()
#Model 2
M2 = baseline()
M2.G = np.array([0.15])
M2.Pi = np.ones((1,1))
M2.Theta = np.ones(1)
#Model 3 with time varying
M_time_example = baseline()
M_time_example.Pi = np.array([[0., 1., 0., 0., 0., 0.],
[0., 0., 1., 0., 0., 0.],
[0., 0., 0., 0.5, 0.5, 0.],
[0., 0., 0., 0., 0., 1.],
[0., 0., 0., 0., 0., 1.],
[0., 0., 0., 0., 0., 1.]])
M_time_example.G = np.array([0.1, 0.1, 0.1, 0.2, 0.1, 0.1])
M_time_example.Theta = np.ones(6) # Theta can in principle be random | bsd-3-clause |
Rudloff/youtube-dl | youtube_dl/extractor/radiode.py | 150 | 1819 | from __future__ import unicode_literals
from .common import InfoExtractor
class RadioDeIE(InfoExtractor):
IE_NAME = 'radio.de'
_VALID_URL = r'https?://(?P<id>.+?)\.(?:radio\.(?:de|at|fr|pt|es|pl|it)|rad\.io)'
_TEST = {
'url': 'http://ndr2.radio.de/',
'info_dict': {
'id': 'ndr2',
'ext': 'mp3',
'title': 're:^NDR 2 [0-9]{4}-[0-9]{2}-[0-9]{2} [0-9]{2}:[0-9]{2}$',
'description': 'md5:591c49c702db1a33751625ebfb67f273',
'thumbnail': 're:^https?://.*\.png',
'is_live': True,
},
'params': {
'skip_download': True,
}
}
def _real_extract(self, url):
radio_id = self._match_id(url)
webpage = self._download_webpage(url, radio_id)
jscode = self._search_regex(
r"'components/station/stationService':\s*\{\s*'?station'?:\s*(\{.*?\s*\}),\n",
webpage, 'broadcast')
broadcast = self._parse_json(jscode, radio_id)
title = self._live_title(broadcast['name'])
description = broadcast.get('description') or broadcast.get('shortDescription')
thumbnail = broadcast.get('picture4Url') or broadcast.get('picture4TransUrl') or broadcast.get('logo100x100')
formats = [{
'url': stream['streamUrl'],
'ext': stream['streamContentFormat'].lower(),
'acodec': stream['streamContentFormat'],
'abr': stream['bitRate'],
'asr': stream['sampleRate']
} for stream in broadcast['streamUrls']]
self._sort_formats(formats)
return {
'id': radio_id,
'title': title,
'description': description,
'thumbnail': thumbnail,
'is_live': True,
'formats': formats,
}
| unlicense |
nikesh-mahalka/nova | nova/api/openstack/compute/legacy_v2/contrib/extended_ips.py | 79 | 3098 | # Copyright 2013 Nebula, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""The Extended Ips API extension."""
import itertools
from nova.api.openstack import common
from nova.api.openstack import extensions
from nova.api.openstack import wsgi
from nova import compute
authorize = extensions.soft_extension_authorizer('compute', 'extended_ips')
class ExtendedIpsController(wsgi.Controller):
def __init__(self, *args, **kwargs):
super(ExtendedIpsController, self).__init__(*args, **kwargs)
self.compute_api = compute.API()
def _extend_server(self, context, server, instance):
key = "%s:type" % Extended_ips.alias
networks = common.get_networks_for_instance(context, instance)
for label, network in networks.items():
# NOTE(vish): ips are hidden in some states via the
# hide_server_addresses extension.
if label in server['addresses']:
all_ips = itertools.chain(network["ips"],
network["floating_ips"])
for i, ip in enumerate(all_ips):
server['addresses'][label][i][key] = ip['type']
@wsgi.extends
def show(self, req, resp_obj, id):
context = req.environ['nova.context']
if authorize(context):
server = resp_obj.obj['server']
db_instance = req.get_db_instance(server['id'])
# server['id'] is guaranteed to be in the cache due to
# the core API adding it in its 'show' method.
self._extend_server(context, server, db_instance)
@wsgi.extends
def detail(self, req, resp_obj):
context = req.environ['nova.context']
if authorize(context):
servers = list(resp_obj.obj['servers'])
for server in servers:
db_instance = req.get_db_instance(server['id'])
# server['id'] is guaranteed to be in the cache due to
# the core API adding it in its 'detail' method.
self._extend_server(context, server, db_instance)
class Extended_ips(extensions.ExtensionDescriptor):
"""Adds type parameter to the ip list."""
name = "ExtendedIps"
alias = "OS-EXT-IPS"
namespace = ("http://docs.openstack.org/compute/ext/"
"extended_ips/api/v1.1")
updated = "2013-01-06T00:00:00Z"
def get_controller_extensions(self):
controller = ExtendedIpsController()
extension = extensions.ControllerExtension(self, 'servers', controller)
return [extension]
| apache-2.0 |
kennedyshead/home-assistant | homeassistant/components/sharkiq/__init__.py | 2 | 3047 | """Shark IQ Integration."""
import asyncio
from contextlib import suppress
import async_timeout
from sharkiqpy import (
AylaApi,
SharkIqAuthError,
SharkIqAuthExpiringError,
SharkIqNotAuthedError,
get_ayla_api,
)
from homeassistant import exceptions
from homeassistant.const import CONF_PASSWORD, CONF_USERNAME
from .const import _LOGGER, API_TIMEOUT, DOMAIN, PLATFORMS
from .update_coordinator import SharkIqUpdateCoordinator
class CannotConnect(exceptions.HomeAssistantError):
"""Error to indicate we cannot connect."""
async def async_connect_or_timeout(ayla_api: AylaApi) -> bool:
"""Connect to vacuum."""
try:
with async_timeout.timeout(API_TIMEOUT):
_LOGGER.debug("Initialize connection to Ayla networks API")
await ayla_api.async_sign_in()
except SharkIqAuthError:
_LOGGER.error("Authentication error connecting to Shark IQ api")
return False
except asyncio.TimeoutError as exc:
_LOGGER.error("Timeout expired")
raise CannotConnect from exc
return True
async def async_setup_entry(hass, config_entry):
"""Initialize the sharkiq platform via config entry."""
ayla_api = get_ayla_api(
username=config_entry.data[CONF_USERNAME],
password=config_entry.data[CONF_PASSWORD],
websession=hass.helpers.aiohttp_client.async_get_clientsession(),
)
try:
if not await async_connect_or_timeout(ayla_api):
return False
except CannotConnect as exc:
raise exceptions.ConfigEntryNotReady from exc
shark_vacs = await ayla_api.async_get_devices(False)
device_names = ", ".join(d.name for d in shark_vacs)
_LOGGER.debug("Found %d Shark IQ device(s): %s", len(shark_vacs), device_names)
coordinator = SharkIqUpdateCoordinator(hass, config_entry, ayla_api, shark_vacs)
await coordinator.async_config_entry_first_refresh()
hass.data.setdefault(DOMAIN, {})
hass.data[DOMAIN][config_entry.entry_id] = coordinator
hass.config_entries.async_setup_platforms(config_entry, PLATFORMS)
return True
async def async_disconnect_or_timeout(coordinator: SharkIqUpdateCoordinator):
"""Disconnect to vacuum."""
_LOGGER.debug("Disconnecting from Ayla Api")
with async_timeout.timeout(5), suppress(
SharkIqAuthError, SharkIqAuthExpiringError, SharkIqNotAuthedError
):
await coordinator.ayla_api.async_sign_out()
async def async_update_options(hass, config_entry):
"""Update options."""
await hass.config_entries.async_reload(config_entry.entry_id)
async def async_unload_entry(hass, config_entry):
"""Unload a config entry."""
unload_ok = await hass.config_entries.async_unload_platforms(
config_entry, PLATFORMS
)
if unload_ok:
domain_data = hass.data[DOMAIN][config_entry.entry_id]
with suppress(SharkIqAuthError):
await async_disconnect_or_timeout(coordinator=domain_data)
hass.data[DOMAIN].pop(config_entry.entry_id)
return unload_ok
| apache-2.0 |
jturney/psi4 | psi4/driver/procrouting/wrappers_cfour.py | 7 | 36345 | #
# @BEGIN LICENSE
#
# Psi4: an open-source quantum chemistry software package
#
# Copyright (c) 2007-2021 The Psi4 Developers.
#
# The copyrights for code used from other parties are included in
# the corresponding files.
#
# This file is part of Psi4.
#
# Psi4 is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, version 3.
#
# Psi4 is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License along
# with Psi4; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# @END LICENSE
#
"""Module with functions for Psi4/Cfour interface. Portions that require
calls to Boost Python psi4 module are here, otherwise in qcdb module.
Also calls to qcdb module are here and not elsewhere in driver.
Organizationally, this module isolates qcdb code from psi4 code.
"""
import os
import re
import glob
import shelve
import shutil
import difflib
import datetime
import subprocess
from psi4.driver.p4util.exceptions import *
def run_cfour_module(xmod):
# Find environment by merging PSIPATH and PATH environment variables
lenv = {
'PATH': ':'.join([os.path.abspath(x) for x in os.environ.get('PSIPATH', '').split(':') if x != '']) + \
':' + os.environ.get('PATH') + \
':' + core.get_datadir() + '/basis' + \
':' + core.psi_top_srcdir() + '/share/basis',
'CFOUR_NUM_CORES': os.environ.get('CFOUR_NUM_CORES'),
'LD_LIBRARY_PATH': os.environ.get('LD_LIBRARY_PATH')
}
# Filter out None values as subprocess will fault on them
lenv = {k: v for k, v in lenv.items() if v is not None}
# Call executable xcfour, directing cfour output to the psi4 output file
try:
retcode = subprocess.Popen([xmod], bufsize=0, stdout=subprocess.PIPE, env=lenv)
except OSError as e:
sys.stderr.write('Program %s not found in path or execution failed: %s\n' % (cfour_executable, e.strerror))
#p4out.write('Program %s not found in path or execution failed: %s\n' % (cfour_executable, e.strerror))
message = ('Program %s not found in path or execution failed: %s\n' % (cfour_executable, e.strerror))
raise ValidationError(message)
c4out = ''
while True:
data = retcode.stdout.readline()
if not data:
break
#if core.outfile_name() == 'stdout':
# sys.stdout.write(data)
#else:
# p4out.write(data)
# p4out.flush()
c4out += data
#internal_p4c4_info['output'] = c4out
return c4out
def vpt2(name, **kwargs):
"""Perform vibrational second-order perturbation computation through
Cfour to get anharmonic frequencies. This version uses c4 for the disp
and pt2 but gets gradients from p4.
:type c4full: :ref:`boolean <op_py_boolean>`
:param c4full: ``'on'`` || |dl| ``'off'`` |dr|
Indicates whether when *name* indicates a Cfour method and *mode*
indicates a sow/reap approach, sown files are direct ZMAT files
and FJOBARC files are expected to reap, so that Cfour only, not
Cfour-through-Psi4, is needed for distributed jobs.
.. caution:: Some features are not yet implemented. Buy a developer a coffee.
- Presently uses all gradients. Could mix in analytic 2nd-derivs.
- Collect resutls.
- Manage scratch / subdir better.
- Allow CFOUR_BASIS
- Consider forcing some tighter convcrit, c4 and p4
- mixed ang/bohr signals
- error by converting to ang in psi?
- Expand CURRENT DIPOLE XYZ beyond SCF
- Remember additional FJOBARC record TOTENER2 if EXCITE .ne. NONE
- switch C --> S/R with recovery using shelf
"""
lowername = name.lower()
kwargs = p4util.kwargs_lower(kwargs)
optstash = p4util.OptionsState(
['BASIS'])
# Option mode of operation- whether vpt2 run in one job or files farmed out
if not('vpt2_mode' in kwargs):
if ('mode' in kwargs):
kwargs['vpt2_mode'] = kwargs['mode']
del kwargs['mode']
else:
kwargs['vpt2_mode'] = 'continuous'
# Switches for route through code- S/R or continuous & Psi4 or Cfour gradients
isSowReap = True if kwargs['vpt2_mode'].lower() == 'sowreap' else False
isC4notP4 = bool(re.match('cfour', lowername)) or bool(re.match('c4-', lowername))
isC4fully = True if ('c4full' in kwargs and yes.match(str(kwargs['c4full'])) and isC4notP4 and isSowReap) else False
# Save submission directory and basis set
current_directory = os.getcwd()
user_basis = core.get_global_option('BASIS')
# Open data persistence shelf- vital for sowreap, checkpoint for continuouw
shelf = shelve.open(current_directory + '/' + os.path.splitext(core.outfile_name())[0] + '.shelf', writeback=True)
# Cfour keywords to request vpt2 analysis through findif gradients
core.set_local_option('CFOUR', 'CFOUR_VIBRATION', 'FINDIF')
core.set_local_option('CFOUR', 'CFOUR_FREQ_ALGORITHM', 'PARALLEL')
core.set_local_option('CFOUR', 'CFOUR_ANH_ALGORITHM', 'PARALLEL')
core.set_local_option('CFOUR', 'CFOUR_ANHARMONIC', 'VPT2')
core.set_local_option('CFOUR', 'CFOUR_FD_PROJECT', 'OFF')
# When a Psi4 method is requested for vpt2, a skeleton of
# computations in Cfour is still required to hang the gradients
# upon. The skeleton is as cheap as possible (integrals only
# & sto-3g) and set up here.
if isC4notP4:
skelname = lowername
else:
skelname = 'c4-scf'
core.set_global_option('BASIS', 'STO-3G')
# P4 'c4-scf'/'cfour'CALC_LEVEL lowername # temporary
# C4 lowername cfour{} # temporary
if 'status' not in shelf:
shelf['status'] = 'initialized'
shelf['linkage'] = os.getpid()
shelf['zmat'] = {} # Cfour-generated ZMAT files with finite difference geometries
shelf['fjobarc'] = {} # Cfour- or Psi4-generated ascii files with packaged gradient results
shelf.sync()
else:
pass
# how decide whether to use. keep precedent of intco.dat in mind
# Construct and move into directory job scratch / cfour scratch / harm
psioh = core.IOManager.shared_object()
psio = core.IO.shared_object()
os.chdir(psioh.get_default_path()) # psi_scratch
cfour_tmpdir = kwargs['path'] if 'path' in kwargs else \
'psi.' + str(os.getpid()) + '.' + psio.get_default_namespace() + \
'.cfour.' + str(uuid.uuid4())[:8]
if not os.path.exists(cfour_tmpdir):
os.mkdir(cfour_tmpdir)
os.chdir(cfour_tmpdir) # psi_scratch/cfour
if not os.path.exists('harm'):
os.mkdir('harm')
os.chdir('harm') # psi_scratch/cfour/harm
psioh.set_specific_retention(32, True) # temporary, to track p4 scratch
#shelf['status'] = 'anharm_jobs_sown' # temporary to force backtrack
print('STAT', shelf['status']) # temporary
# Generate the ZMAT input file in scratch
with open('ZMAT', 'w') as handle:
cfour_infile = write_zmat(skelname, 1)
handle.write(cfour_infile)
print('\n====== Begin ZMAT input for CFOUR ======')
print(open('ZMAT', 'r').read())
print('======= End ZMAT input for CFOUR =======\n')
shelf['genbas'] = open('GENBAS', 'r').read()
# Check existing shelf consistent with generated ZMAT, store
if ('000-000' in shelf['zmat']) and (shelf['zmat']['000-000'] != cfour_infile):
diff = difflib.Differ().compare(shelf['zmat']['000-000'].splitlines(), cfour_infile.splitlines())
raise ValidationError("""Input file translated to Cfour ZMAT does not match ZMAT stored in shelf.\n\n""" +
'\n'.join(list(diff)))
shelf['zmat']['000-000'] = cfour_infile
shelf.sync()
# Reset basis after Cfour skeleton seeded
core.set_global_option('BASIS', user_basis)
if shelf['status'] == 'initialized':
p4util.banner(' VPT2 Setup: Harmonic ')
# Generate the displacements that will form the harmonic freq
os.chdir(psioh.get_default_path() + cfour_tmpdir + '/harm') # psi_scratch/cfour/harm
with open('partial.out', 'w') as handle:
handle.write(run_cfour_module('xjoda'))
handle.write(run_cfour_module('xsymcor'))
# Read the displacements that will form the harmonic freq
zmats0N = ['000-' + item[-3:] for item in sorted(glob.glob('zmat*'))]
for zm12 in zmats0N:
zm1, zm2 = zm12.split('-')
with open('zmat' + zm2, 'r') as handle:
shelf['zmat'][zm12] = handle.read()
shelf.sync()
core.print_out(' CFOUR scratch file %s for %s-%s has been read\n' % ('zmat' + zm2, zm1, zm2))
core.print_out('%s\n' % shelf['zmat'][zm12])
# S/R: Write distributed input files for harmonic freq
if isSowReap:
os.chdir(current_directory)
inputSansMol = p4util.format_currentstate_for_input(gradient, lowername, allButMol=True, **kwargs)
for zm12 in zmats0N:
zm1, zm2 = zm12.split('-')
ifile = vpt2_sow_files(zm12, shelf['linkage'], isC4notP4, isC4fully,
shelf['zmat'][zm12], inputSansMol, shelf['genbas'])
with open('VPT2-' + zm12 + '.in', 'w') as handle:
handle.write(ifile)
msg = vpt2_instructions('harmonic', current_directory, zmats0N)
core.print_out(msg)
print(msg)
shelf['status'] = 'harm_jobs_sown'
# S/R: Pause for distributed calculations
if isSowReap:
shelf.close()
return 0.0
if shelf['status'] == 'harm_jobs_sown':
zmats0N = [item for item in sorted(shelf['zmat'].keys()) if (item[:3] == '000' and item[-3:] != '000')]
# S/R: Check that distributed calcs all completed correctly
if isSowReap:
msg = vpt2_instructions('harmonic', current_directory, zmats0N)
core.print_out(msg)
isOk, msg = sown_jobs_status(current_directory, 'VPT2', zmats0N, reap_job_validate,
shelf['linkage'], ['CURRENT ENERGY', 'CURRENT DIPOLE', 'CURRENT GRADIENT'])
core.print_out(msg)
print(msg)
if not isOk:
shelf.close()
return 0.0
# Collect all results from gradients forming the harmonic freq
for zm12 in zmats0N:
zm1, zm2 = zm12.split('-')
if zm12 not in shelf['fjobarc']:
p4util.banner(' VPT2 Computation: %s ' % (zm12))
print(' VPT2 Computation: %s ' % (zm12))
fjobarc = vpt2_reaprun_files(zm12, shelf['linkage'], isSowReap, isC4notP4, isC4fully,
shelf['zmat'][zm12], current_directory, psioh.get_default_path(), cfour_tmpdir,
lowername, kwargs)
shelf['fjobarc'][zm12] = fjobarc
shelf.sync()
shelf['status'] = 'harm_jobs_reaped'
if shelf['status'] == 'harm_jobs_reaped':
zmats0N = [item for item in sorted(shelf['zmat'].keys()) if (item[:3] == '000' and item[-3:] != '000')]
p4util.banner(' VPT2 Results: Harmonic ')
# Process the gradients into harmonic freq
os.chdir(psioh.get_default_path() + cfour_tmpdir + '/harm') # psi_scratch/cfour/harm
harmout = run_cfour_module('xjoda')
harmout += run_cfour_module('xsymcor')
for zm12 in zmats0N:
zm1, zm2 = zm12.split('-')
with open('FJOBARC', 'w') as handle:
handle.write(shelf['fjobarc'][zm12])
harmout += run_cfour_module('xja2fja')
harmout += run_cfour_module('xsymcor')
shutil.move('FJOBARC', 'fja.' + zm12)
try:
os.remove('zmat' + zm2)
except OSError:
pass
harmout += run_cfour_module('xjoda')
harmout += run_cfour_module('xcubic')
core.print_out(harmout)
with open('harm.out', 'w') as handle:
handle.write(harmout)
# Generate displacements along harmonic normal modes
zmatsN0 = [item[-3:] for item in sorted(glob.glob('zmat*'))]
os.chdir('..') # psi_scratch/cfour
for zm1 in zmatsN0:
zm12 = zm1 + '-000'
with open(psioh.get_default_path() + cfour_tmpdir + '/harm/zmat' + zm1, 'r') as handle:
shelf['zmat'][zm12] = handle.read()
shelf.sync()
core.print_out(' CFOUR scratch file %s for %s has been read\n' % ('zmat' + zm1, zm12))
core.print_out('%s\n' % shelf['zmat'][zm12])
# Collect displacements along the normal coordinates generated by the harmonic freq.
# Further harmonic freqs are to be run at each of these to produce quartic force field.
# To carry these out, generate displacements for findif by gradient at each displacement.
if os.path.exists(zm1):
shutil.rmtree(zm1)
os.mkdir(zm1)
os.chdir(zm1) # psi_scratch/cfour/004
with open('ZMAT', 'w') as handle:
handle.write(shelf['zmat'][zm12])
shutil.copy2('../harm/GENBAS', 'GENBAS') # ln -s $ecpdir/ECPDATA $j/ECPDATA
with open('partial.out', 'w') as handle:
handle.write(run_cfour_module('xjoda'))
handle.write(run_cfour_module('xsymcor'))
# Read the displacements that will form the anharmonic freq
zmatsNN = [item[-3:] for item in sorted(glob.glob('zmat*'))]
for zm2 in zmatsNN:
zm12 = zm1 + '-' + zm2
with open(psioh.get_default_path() + cfour_tmpdir + '/' + zm1 + '/zmat' + zm2, 'r') as handle:
shelf['zmat'][zm12] = handle.read()
shelf.sync()
core.print_out(' CFOUR scratch file %s for %s has been read\n' % ('zmat' + zm2, zm12))
core.print_out('%s\n' % shelf['zmat'][zm12])
os.chdir('..') # psi_scratch/cfour
zmatsNN = [item for item in sorted(shelf['zmat'].keys()) if (item[:3] != '000' and item[-3:] != '000')]
# S/R: Write distributed input files for anharmonic freq
if isSowReap:
os.chdir(current_directory)
inputSansMol = p4util.format_currentstate_for_input(gradient, lowername, allButMol=True, **kwargs)
for zm12 in zmatsNN:
zm1, zm2 = zm12.split('-')
ifile = vpt2_sow_files(zm12, shelf['linkage'], isC4notP4, isC4fully,
shelf['zmat'][zm12], inputSansMol, shelf['genbas'])
# GENBAS needed here
with open('VPT2-' + zm12 + '.in', 'w') as handle:
handle.write(ifile)
msg = vpt2_instructions('anharmonic', current_directory, zmatsNN)
core.print_out(msg)
print(msg)
shelf['status'] = 'anharm_jobs_sown'
# S/R: Pause for distributed calculations
if isSowReap:
shelf.close()
return 0.0
if shelf['status'] == 'anharm_jobs_sown':
zmatsNN = [item for item in sorted(shelf['zmat'].keys()) if (item[:3] != '000' and item[-3:] != '000')]
# S/R: Check that distributed calcs all completed correctly
if isSowReap:
msg = vpt2_instructions('anharmonic', current_directory, zmatsNN)
core.print_out(msg)
isOk, msg = sown_jobs_status(current_directory, 'VPT2', zmatsNN,
reap_job_validate, shelf['linkage'],
['CURRENT ENERGY', 'CURRENT DIPOLE', 'CURRENT GRADIENT'])
core.print_out(msg)
print(msg)
if not isOk:
shelf.close()
return 0.0
# Collect all results from gradients forming the anharmonic freq
for zm12 in zmatsNN:
zm1, zm2 = zm12.split('-')
if zm12 not in shelf['fjobarc']:
p4util.banner(' VPT2 Computation: %s ' % (zm12))
print(' VPT2 Computation: %s ' % (zm12))
fjobarc = vpt2_reaprun_files(zm12, shelf['linkage'], isSowReap, isC4notP4, isC4fully,
shelf['zmat'][zm12], current_directory, psioh.get_default_path(), cfour_tmpdir,
lowername, kwargs)
shelf['fjobarc'][zm12] = fjobarc
shelf.sync()
shelf['status'] = 'anharm_jobs_reaped'
if shelf['status'] == 'anharm_jobs_reaped':
zmats0N = [item for item in sorted(shelf['zmat'].keys()) if (item[:3] == '000' and item[-3:] != '000')]
zmatsN0 = [item for item in sorted(shelf['zmat'].keys()) if (item[:3] != '000' and item[-3:] == '000')]
zmatsNN = [item for item in sorted(shelf['zmat'].keys()) if (item[:3] != '000' and item[-3:] != '000')]
p4util.banner(' VPT2 Results: Harmonic ')
# Process the gradients into harmonic freq
os.chdir(psioh.get_default_path() + cfour_tmpdir) # psi_scratch/cfour
if os.path.exists('anharm'):
shutil.rmtree('anharm')
os.mkdir('anharm')
os.chdir('harm') # psi_scratch/cfour/harm
run_cfour_module('xclean')
anharmout = run_cfour_module('xjoda')
anharmout += run_cfour_module('xsymcor')
for zm12 in zmats0N:
zm1, zm2 = zm12.split('-')
with open('FJOBARC', 'w') as handle:
handle.write(shelf['fjobarc'][zm12])
anharmout += run_cfour_module('xja2fja')
anharmout += run_cfour_module('xsymcor')
shutil.move('FJOBARC', 'fja.' + zm12)
anharmout += run_cfour_module('xjoda')
anharmout += run_cfour_module('xcubic')
core.print_out(anharmout)
with open('harm.out', 'w') as handle:
handle.write(anharmout)
# Process the gradients into harmonic freq at each normco displaced point
os.chdir('..') # psi_scratch/cfour
for zm11 in zmatsN0:
zm1 = zm11[:3]
if os.path.exists(zm1):
shutil.rmtree(zm1)
os.mkdir(zm1)
os.chdir(zm1) # psi_scratch/cfour/004
run_cfour_module('xclean')
with open('ZMAT', 'w') as handle:
handle.write(shelf['zmat'][zm11])
shutil.copy2('../harm/GENBAS', 'GENBAS')
anharmout = run_cfour_module('xjoda')
anharmout += run_cfour_module('xsymcor')
for zm22 in [item for item in zmatsNN if (item[:3] == zm1 and item[-3:] != '000')]:
zm2 = zm22[-3:]
zm12 = zm1 + '-' + zm2
print(zm12)
with open('FJOBARC', 'w') as handle:
handle.write(shelf['fjobarc'][zm12])
anharmout += run_cfour_module('xja2fja')
anharmout += run_cfour_module('xsymcor')
shutil.move('FJOBARC', 'fja.' + zm12)
anharmout += run_cfour_module('xjoda')
anharmout += run_cfour_module('xja2fja')
with open('FJOBARC', 'r') as handle:
shelf['fjobarc'][zm11] = handle.read()
shelf.sync()
core.print_out(anharmout)
with open('partial.out', 'w') as handle:
handle.write(anharmout)
os.chdir('..') # psi_scratch/cfour
# Process the harmonic freqs at normco displacements into anharmonic freq
p4util.banner(' VPT2 Results: Anharmonic ')
os.chdir('anharm') # psi_scratch/cfour/anharm
shutil.copy2('../harm/JOBARC', 'JOBARC')
shutil.copy2('../harm/JAINDX', 'JAINDX')
for zm12 in zmatsN0:
with open('FJOBARC', 'w') as handle:
handle.write(shelf['fjobarc'][zm12])
anharmout = run_cfour_module('xja2fja')
anharmout += run_cfour_module('xcubic')
shutil.move('FJOBARC', 'fja.' + zm12)
core.print_out(anharmout)
with open('anharm.out', 'w') as handle:
handle.write(anharmout)
shelf['status'] = 'vpt2_completed'
# Finish up
os.chdir(current_directory)
shelf.close()
optstash.restore()
def vpt2_sow_files(item, linkage, isC4notP4, isC4fully, zmat, inputSansMol, inputGenbas):
"""Provided with the particular displacement number *item* and the
associated *zmat* file contents and *linkage*, and common contents
*inputSansMol*, returns contents of input file to be sown.
"""
inputReapOrders = r"""
print_variables()
print_out('VPT2 RESULT: linkage {0} for item {1} yields CURRENT ENERGY being %r\n' % (variable('CURRENT ENERGY')))
print_out('VPT2 RESULT: linkage {0} for item {1} yields CURRENT GRADIENT being %r\n' % (p4util.mat2arr(core.get_gradient())))
print_out('VPT2 RESULT: linkage {0} for item {1} yields CURRENT DIPOLE being [%r, %r, %r]\n' % (variable('CURRENT DIPOLE X'), variable('CURRENT DIPOLE Y'), variable('CURRENT DIPOLE Z')))
""".format(linkage, item)
# Direct Cfour for gradients
if isC4fully:
inputString = zmat
with open('VPT2-GENBAS', 'w') as handle:
handle.write(inputGenbas)
# Cfour for gradients
elif isC4notP4:
# GENBAS needed here
inputString = 'extracted_genbas = """\n' + inputGenbas.replace('\n\n', '\nblankline\n') + '\n"""\n\n'
inputString += """cfour {\n%s\n}\n\nenergy('cfour', genbas=extracted_genbas)\n\n""" % (zmat)
inputString += inputReapOrders
inputString += r"""
print_out('VPT2 RESULT: linkage {0} for item {1} yields CURRENT MOLECULE being %r\n' % (get_active_molecule().create_psi4_string_from_molecule()))
""".format(linkage, item)
# Psi4 for gradients
else:
inputString = p4util.format_molecule_for_input(
qcdb.cfour.harvest_zmat(zmat).create_psi4_string_from_molecule(),
name='disp' + item[:3] + item[-3:])
inputString += inputSansMol
inputString += inputReapOrders
return inputString
def vpt2_reaprun_files(item, linkage, isSowReap, isC4notP4, isC4fully, zmat, outdir, scrdir, c4scrdir, lowername, kwargs):
"""Provided with the particular displacement number *item* and the
associated *zmat* file with geometry and *linkage*, returns the
FJOBARC contents. Depending on the mode settings of *isC4notP4*,
*isSowReap*, and *isC4fully*, either runs (using *lowername* and
*kwargs*) or reaps contents. *outdir* is where psi4 was invoked,
*scrdir* is the psi4 scratch directory, and *c4scrdir* is Cfour
scratch directory within.
"""
os.chdir(outdir) # current_directory
# Extract qcdb.Molecule at findif orientation
zmmol = qcdb.cfour.harvest_zmat(zmat)
# Cfour S/R Direct for gradients
if isC4fully:
with open('VPT2-' + item + '.fja', 'r') as handle:
fjobarc = handle.read()
# Cfour for gradients
elif isC4notP4:
# S/R: Reap results from output file
if isSowReap:
isOk, msg, results = reap_job_validate(outdir, 'VPT2', item, linkage,
['CURRENT ENERGY', 'CURRENT DIPOLE', 'CURRENT GRADIENT', 'CURRENT MOLECULE'])
if not isOk:
raise ValidationError(msg)
fje = results['CURRENT ENERGY']
fjgrd = results['CURRENT GRADIENT']
fjdip = [item / constants.dipmom_au2debye for item in results['CURRENT DIPOLE']]
c4mol = qcdb.Molecule(results['CURRENT MOLECULE'])
c4mol.update_geometry()
# C: Run the job and collect results
else:
# Prepare Cfour skeleton calc directory
os.chdir(scrdir + c4scrdir) # psi_scratch/cfour
if os.path.exists('scr.' + item):
shutil.rmtree('scr.' + item)
os.mkdir('scr.' + item)
os.chdir('scr.' + item) # psi_scratch/cfour/scr.000-004
with open('ZMAT', 'w') as handle:
handle.write(zmat)
shutil.copy2('../harm/GENBAS', 'GENBAS')
#os.chdir(scrdir + '/scr.' + item)
#run_cfour_module('xja2fja')
#with open('FJOBARC', 'r') as handle:
# fjobarc = handle.read()
# Run Cfour calc using ZMAT & GENBAS in scratch, outdir redirects to outfile
os.chdir(outdir) # current_directory
core.get_active_molecule().set_name('blank_molecule_psi4_yo')
energy('cfour', path=c4scrdir + '/scr.' + item)
# os.chdir(scrdir + '/scr.' + item)
fje = core.variable('CURRENT ENERGY')
fjgrd = p4util.mat2arr(core.get_gradient())
fjdip = [core.variable('CURRENT DIPOLE X') / constants.dipmom_au2debye,
core.variable('CURRENT DIPOLE Y') / constants.dipmom_au2debye,
core.variable('CURRENT DIPOLE Z') / constants.dipmom_au2debye]
c4mol = qcdb.Molecule(core.get_active_molecule().create_psi4_string_from_molecule())
c4mol.update_geometry()
# Get map btwn ZMAT and C4 orientation, then use it, grad and dipole to forge FJOBARC file
fjobarc = qcdb.cfour.format_fjobarc(fje,
*qcdb.cfour.backtransform(chgeMol=zmmol, permMol=c4mol), gradient=fjgrd, dipole=fjdip)
# Psi4 for gradients
else:
# Prepare Cfour skeleton calc directory
os.chdir(scrdir + c4scrdir) # psi_scratch/cfour
if os.path.exists('scr.' + item):
shutil.rmtree('scr.' + item)
os.mkdir('scr.' + item)
os.chdir('scr.' + item) # psi_scratch/cfour/scr.000-004
with open('ZMAT', 'w') as handle:
handle.write(zmat)
shutil.copy2('../harm/GENBAS', 'GENBAS')
# Run Cfour skeleton calc and extract qcdb.Molecule at needed C4 orientation
with open('partial.out', 'w') as handle:
handle.write(run_cfour_module('xjoda'))
handle.write(run_cfour_module('xvmol'))
handle.write(run_cfour_module('xvmol2ja'))
core.print_out(' CFOUR scratch file %s for %s has been read\n' % ('JOBARC (binary)', item))
c4mol = qcdb.cfour.jajo2mol(qcdb.jajo.getrec(['COORD ', 'ATOMCHRG', 'MAP2ZMAT', 'IFLAGS ']))
# S/R: Reap results from output file
if isSowReap:
isOk, msg, results = reap_job_validate(outdir, 'VPT2', item, linkage,
['CURRENT ENERGY', 'CURRENT DIPOLE', 'CURRENT GRADIENT'])
if not isOk:
raise ValidationError(msg)
fje = results['CURRENT ENERGY']
fjgrd = results['CURRENT GRADIENT']
fjdip = [item / constants.dipmom_au2debye for item in results['CURRENT DIPOLE']]
# C: Run the job and collect results
else:
core.IO.set_default_namespace(item)
molecule = geometry(zmmol.create_psi4_string_from_molecule(), 'disp-' + item)
molecule.update_geometry()
gradient(lowername, **kwargs)
fje = core.variable('CURRENT ENERGY')
fjgrd = p4util.mat2arr(core.get_gradient())
fjdip = [core.variable('CURRENT DIPOLE X') / constants.dipmom_au2debye,
core.variable('CURRENT DIPOLE Y') / constants.dipmom_au2debye,
core.variable('CURRENT DIPOLE Z') / constants.dipmom_au2debye]
# Transform results into C4 orientation (defined by c4mol) & forge FJOBARC file
fjobarc = qcdb.cfour.format_fjobarc(fje,
*qcdb.cfour.backtransform(chgeMol=zmmol, permMol=c4mol, chgeGrad=fjgrd, chgeDip=fjdip))
return fjobarc
def vpt2_instructions(stage, dir, zmats):
"""Stores all the instructions to the user for running
:py:func:`~wrappers_cfour.vpt2` in sowreap mode. Depending on the
*stage*, Pieces together instruction strings for the appropriate
*stage* individualized by working directory *dir* and sown inputs
*zmats* information.
"""
stepFiles = ''
for zm12 in sorted(zmats):
stepFiles += """ psi4 %-27s %-27s\n""" % ('VPT2-' + zm12 + '.in', 'VPT2-' + zm12 + '.out')
step0 = """
The vpt2 sow/reap procedure has been selected through mode='sowreap'. This
output file, the corresponding input file, and the data persistence file
must not be edited by the user over the course of the sow/reap procedure.
Throughout, psi4 can be invoked to move to the next stage of the procedure
or to tally up the 'sown' jobs. This output file is overwritten each time
psi4 is invoked, but all results and instructions accumulate.
This procedure involves two stages of distributed calculations, harmonic and
anharmonic, and a mimimum of three invokations of psi4 on the original input
file (including the one that initially generated this text). From the input
geometry (0), displacements are generated for which gradients are required.
Input files for these are 'sown' in the current directory (1). Upon
completion, their output files are 'reaped' into a harmonic force field (2).
At displacements along the normal coordinates, further displacements are
generated for which gradients are required. Input files for these are again
'sown' in the current directory (3). Upon completion, their output files are
'reaped' into an anharmonic force field (4), terminating the vpt2 procedure.
Follow the instructions below to continue.
(0) Read Only
--------------
%s
%s
%s
""" % (dir + '/' + os.path.splitext(core.outfile_name())[0] + '.in',
dir + '/' + core.outfile_name(),
dir + '/' + os.path.splitext(core.outfile_name())[0] + '.shelf')
step1 = """
(1) Sow
--------
Run all of the VPT2-000-*.in input files on any variety of computer
architecture. The output file names must be as given below (default).
"""
step2 = """
(2) Reap
---------
Gather all the resulting output files in this directory along with the
three read-only files from (0). Invoke psi4 again. The job will be
trivial in length (unless sto-3g integrals on the molecule are costly)
and give results for the harmonic frequency stage in this output file. It
will also supply the next set of instructions.
psi4 %-27s %-27s
""" % (os.path.splitext(core.outfile_name())[0] + '.in', core.outfile_name())
step3 = """
(3) Sow
--------
Run all of the VPT2-*-*.in input files on any variety of computer
architecture. The output file names must be as given below (default).
"""
step4 = """
(4) Reap
---------
Gather all the resulting output files in this directory along with the
three read-only files from (0). Invoke psi4 again. The job will be
trivial in length (unless sto-3g integrals on the molecule are costly)
and give results for the harmonic and anharmonic frequency stages in this
output file.
psi4 %-27s %-27s
""" % (os.path.splitext(core.outfile_name())[0] + '.in', core.outfile_name())
if stage == 'harmonic':
instructions = step0 + step1 + stepFiles + step2
elif stage == 'anharmonic':
instructions = step0 + step3 + stepFiles + step4
return instructions
def sown_jobs_status(dir, prefix, zmats, validate_func=None, linkage=None, keys=None):
"""Evaluate the output file status of jobs in *zmats* which should
exist at *dir* + '/' + prefix + '-' + job + '.out'. Returns string with
formatted summary of job status and boolean of whether all complete.
Return boolean *isOk* signals whether all *zmats* have completed and,
if *validate_func* present, are validated.
"""
isOk = True
msgError = ''
instructions = '\n'
instructions += p4util.banner(prefix + ' Status: ' + datetime.datetime.now().strftime("%Y-%m-%d %H:%M"), strNotOutfile=True)
instructions += '\n'
for job in sorted(zmats):
outfile = dir + '/' + prefix + '-' + job + '.out'
fjafile = dir + '/' + prefix + '-' + job + '.fja'
formatArgs = [prefix + '-' + job, '', '', '', '']
if os.path.isfile(outfile):
with open(outfile, 'r') as handle:
for line in handle:
if line.find('Buy a developer a beer!') > -1:
formatArgs[3] = 'Completed'
if reap_job_validate is not None:
isOkJob, msg, temp = reap_job_validate(dir, prefix, job, linkage, keys)
if isOkJob:
formatArgs[4] = '& Validated'
else:
isOk = False
msgError += msg
formatArgs[4] = 'INVALID'
break
else:
isOk = False
formatArgs[2] = 'Running'
elif os.path.isfile(fjafile):
formatArgs[3] = 'Completed'
else:
isOk = False
formatArgs[1] = 'Waiting'
instructions += """ {0:<27} {1:^10} {2:^10} {3:^10} {4:^10}\n""".format(*formatArgs)
instructions += '\n' + msgError + '\n\n'
return isOk, instructions
def reap_job_validate(dir, prefix, item, linkage, keys):
"""For a given output file whose path is constructed with
*dir* + '/' + *prefix* + '-' + *item* + '.out', tests that the file
exists and has *prefix* RESULTS lines for each piece of information
requested in list *keys* and that those lines correspond to the
appropriate *linkage* and *item*. Returns *keys* along with their
scanned values in dict *reapings*, along with error and success
messages in *instructions* and a boolean *isOk* indicating whether
all *keys* reaped sucessfully.
"""
isOk = True
instructions = ''
reapings = {}
outfile = dir + '/' + prefix + '-' + item + '.out'
try:
with open(outfile, 'r') as handle:
for line in handle:
if line.find(prefix + ' RESULT:') == 0:
sline = line.split()
if sline[2:7] == ['linkage', str(linkage), 'for', 'item', item]:
yieldsAt = line.find('yields')
beingAt = line.find('being')
if beingAt > yieldsAt > -1:
key = line[yieldsAt + 6:beingAt].strip()
val = line[beingAt + 5:].strip()
if key in keys:
reapings[key] = eval(val)
#core.print_out(' CFOUR scratch file %s for %s has been read\n' % ('JOBARC', zm12))
else:
isOk = False
instructions += """Outfile file %s
has corrupted sowreap result line:\n%s\n\n""" % (outfile, line)
else:
isOk = False
instructions += """Outfile file %s
has sowreap result of either incompatible linkage (observed: %s, expected: %s)
or incompatible job affiliation (observed: %s, expected: %s).\n\n""" % \
(outfile, sline[3], linkage, sline[6], item)
else:
if len(reapings) != len(keys):
isOk = False
instructions += """Output file %s
has missing results (observed: %s, expected: %s).\n\n""" % \
(outfile, reapings.keys(), keys)
except IOError:
isOk = False
instructions += """Output file %s
that was judged present and complete at the beginning of this
job is now missing. Replace it and invoke psi4 again.\n\n""" % (outfile)
# return file contents in instructions
return isOk, instructions, reapings
| lgpl-3.0 |
gurneyalex/stock-logistics-warehouse | __unported__/stock_orderpoint_creator/stock_orderpoint_creator/wizard/orderpoint_creator.py | 23 | 2960 | # -*- coding: utf-8 -*-
##############################################################################
#
# Author: Yannick Vaucher (Camptocamp)
# Copyright 2012 Camptocamp SA
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
""" Wizard defining stock.warehouse.orderpoint configurations for selected
products. Those configs are generated using templates """
from openerp.osv.orm import browse_record, TransientModel, fields
_template_register = ['orderpoint_template_id']
class OrderpointCreator(TransientModel):
_name = 'stock.warehouse.orderpoint.creator'
_description = 'Orderpoint Creator'
_columns = {'orderpoint_template_id': fields.many2many(
'stock.warehouse.orderpoint.template',
rel='order_point_creator_rel',
string='Stock rule template')
}
def _get_template_register(self):
"""return a list of the field names which defines a template
This is a hook to allow expending the list of template"""
return _template_register
def action_configure(self, cursor, uid, wiz_id, context=None):
""" action to retrieve wizard data and launch creation of items """
product_ids = context['active_ids']
if isinstance(wiz_id, list):
wiz_id = wiz_id[0]
current = self.browse(cursor, uid, wiz_id, context=context)
for template_field in self._get_template_register():
template_br_list = current[template_field]
if template_br_list:
if isinstance(template_br_list, browse_record):
template_br_list = [template_br_list]
template_model = template_br_list[0]._model._name
template_obj = self.pool.get(template_model)
template_obj._disable_old_instances(cursor, uid, template_br_list,
product_ids, context=context)
for template_br in template_br_list:
template_obj.create_instances(cursor, uid, template_br,
product_ids, context=context)
return {}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
karelin/polymode | tests/old/test_sweaction.py | 5 | 2168 | #Unit test for VWE_Action
from __future__ import division
from numpy import *
from numpy.testing import *
#--------------------------------------------------------------------------
#Remove main ABCSolver pathname & add path one up in directory to PATH
import sys,os
#sys.path.append(os.path.split(os.path.abspath(os.curdir))[0])
sys.path.append(os.path.abspath(os.curdir))
#--------------------------------------------------------------------------
from VWE_Action import *
class test_VWE_Action(NumpyTestCase):
def check_complex(self):
pass
class test_Hankelratio(NumpyTestCase):
'''
Check Hankel ratio function gives correct results
'''
def test_hankel(self):
ms = arange(-200,200,10)
x = 6.18709499137-0.000430208636459j
answer = array([-32.30979881 -2.24876725e-03j, -30.69270826 -2.13644030e-03j, -29.07552567 -2.02411976e-03j, -27.45823466 -1.91180680e-03j,-25.84081475 -1.79950283e-03j, -24.22323996 -1.68720969e-03j, -22.60547679 -1.57492971e-03j, -20.98748133 -1.46266598e-03j,-19.36919489 -1.35042262e-03j, -17.75053715 -1.23820530e-03j, -16.13139519 -1.12602199e-03j, -14.51160509 -1.01388429e-03j,-12.89091938 -9.01809806e-04j, -11.26894597 -7.89826599e-04j, -9.64502393 -6.77982377e-04j, -8.01794435 -5.66365607e-04j,-6.38522875 -4.55161871e-04j, -4.74083918 -3.44843658e-04j, -3.06486731 -2.37145693e-04j, -1.19981619 +4.75288828e-03j,-0.08032385 +1.00314151e+00j, -1.19981619 +4.75288828e-03j, -3.06486731 -2.37145693e-04j, -4.74083918 -3.44843658e-04j,-6.38522875 -4.55161871e-04j, -8.01794435 -5.66365607e-04j, -9.64502393 -6.77982377e-04j, -11.26894597 -7.89826599e-04j,-12.89091938 -9.01809806e-04j, -14.51160509 -1.01388429e-03j, -16.13139519 -1.12602199e-03j, -17.75053715 -1.23820530e-03j,-19.36919489 -1.35042262e-03j, -20.98748133 -1.46266598e-03j, -22.60547679 -1.57492971e-03j, -24.22323996 -1.68720969e-03j,-25.84081475 -1.79950283e-03j, -27.45823466 -1.91180680e-03j, -29.07552567 -2.02411976e-03j, -30.69270826 -2.13644030e-03j])
self.__check__(ms,x,answer)
def __check__(self, ms, x, answer):
h = hankel_ratio(ms, x)
assert_almost_equal(h, answer)
if __name__ == '__main__':
NumpyTest().run()
| gpl-3.0 |
JoonghyunCho/crosswalk-tizen | tools/gyp/pylib/gyp/MSVSSettings_test.py | 778 | 65880 | #!/usr/bin/env python
# Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Unit tests for the MSVSSettings.py file."""
import StringIO
import unittest
import gyp.MSVSSettings as MSVSSettings
class TestSequenceFunctions(unittest.TestCase):
def setUp(self):
self.stderr = StringIO.StringIO()
def _ExpectedWarnings(self, expected):
"""Compares recorded lines to expected warnings."""
self.stderr.seek(0)
actual = self.stderr.read().split('\n')
actual = [line for line in actual if line]
self.assertEqual(sorted(expected), sorted(actual))
def testValidateMSVSSettings_tool_names(self):
"""Tests that only MSVS tool names are allowed."""
MSVSSettings.ValidateMSVSSettings(
{'VCCLCompilerTool': {},
'VCLinkerTool': {},
'VCMIDLTool': {},
'foo': {},
'VCResourceCompilerTool': {},
'VCLibrarianTool': {},
'VCManifestTool': {},
'ClCompile': {}},
self.stderr)
self._ExpectedWarnings([
'Warning: unrecognized tool foo',
'Warning: unrecognized tool ClCompile'])
def testValidateMSVSSettings_settings(self):
"""Tests that for invalid MSVS settings."""
MSVSSettings.ValidateMSVSSettings(
{'VCCLCompilerTool': {
'AdditionalIncludeDirectories': 'folder1;folder2',
'AdditionalOptions': ['string1', 'string2'],
'AdditionalUsingDirectories': 'folder1;folder2',
'AssemblerListingLocation': 'a_file_name',
'AssemblerOutput': '0',
'BasicRuntimeChecks': '5',
'BrowseInformation': 'fdkslj',
'BrowseInformationFile': 'a_file_name',
'BufferSecurityCheck': 'true',
'CallingConvention': '-1',
'CompileAs': '1',
'DebugInformationFormat': '2',
'DefaultCharIsUnsigned': 'true',
'Detect64BitPortabilityProblems': 'true',
'DisableLanguageExtensions': 'true',
'DisableSpecificWarnings': 'string1;string2',
'EnableEnhancedInstructionSet': '1',
'EnableFiberSafeOptimizations': 'true',
'EnableFunctionLevelLinking': 'true',
'EnableIntrinsicFunctions': 'true',
'EnablePREfast': 'true',
'Enableprefast': 'bogus',
'ErrorReporting': '1',
'ExceptionHandling': '1',
'ExpandAttributedSource': 'true',
'FavorSizeOrSpeed': '1',
'FloatingPointExceptions': 'true',
'FloatingPointModel': '1',
'ForceConformanceInForLoopScope': 'true',
'ForcedIncludeFiles': 'file1;file2',
'ForcedUsingFiles': 'file1;file2',
'GeneratePreprocessedFile': '1',
'GenerateXMLDocumentationFiles': 'true',
'IgnoreStandardIncludePath': 'true',
'InlineFunctionExpansion': '1',
'KeepComments': 'true',
'MinimalRebuild': 'true',
'ObjectFile': 'a_file_name',
'OmitDefaultLibName': 'true',
'OmitFramePointers': 'true',
'OpenMP': 'true',
'Optimization': '1',
'PrecompiledHeaderFile': 'a_file_name',
'PrecompiledHeaderThrough': 'a_file_name',
'PreprocessorDefinitions': 'string1;string2',
'ProgramDataBaseFileName': 'a_file_name',
'RuntimeLibrary': '1',
'RuntimeTypeInfo': 'true',
'ShowIncludes': 'true',
'SmallerTypeCheck': 'true',
'StringPooling': 'true',
'StructMemberAlignment': '1',
'SuppressStartupBanner': 'true',
'TreatWChar_tAsBuiltInType': 'true',
'UndefineAllPreprocessorDefinitions': 'true',
'UndefinePreprocessorDefinitions': 'string1;string2',
'UseFullPaths': 'true',
'UsePrecompiledHeader': '1',
'UseUnicodeResponseFiles': 'true',
'WarnAsError': 'true',
'WarningLevel': '1',
'WholeProgramOptimization': 'true',
'XMLDocumentationFileName': 'a_file_name',
'ZZXYZ': 'bogus'},
'VCLinkerTool': {
'AdditionalDependencies': 'file1;file2',
'AdditionalLibraryDirectories': 'folder1;folder2',
'AdditionalManifestDependencies': 'file1;file2',
'AdditionalOptions': 'a string1',
'AddModuleNamesToAssembly': 'file1;file2',
'AllowIsolation': 'true',
'AssemblyDebug': '2',
'AssemblyLinkResource': 'file1;file2',
'BaseAddress': 'a string1',
'CLRImageType': '2',
'CLRThreadAttribute': '2',
'CLRUnmanagedCodeCheck': 'true',
'DataExecutionPrevention': '2',
'DelayLoadDLLs': 'file1;file2',
'DelaySign': 'true',
'Driver': '2',
'EmbedManagedResourceFile': 'file1;file2',
'EnableCOMDATFolding': '2',
'EnableUAC': 'true',
'EntryPointSymbol': 'a string1',
'ErrorReporting': '2',
'FixedBaseAddress': '2',
'ForceSymbolReferences': 'file1;file2',
'FunctionOrder': 'a_file_name',
'GenerateDebugInformation': 'true',
'GenerateManifest': 'true',
'GenerateMapFile': 'true',
'HeapCommitSize': 'a string1',
'HeapReserveSize': 'a string1',
'IgnoreAllDefaultLibraries': 'true',
'IgnoreDefaultLibraryNames': 'file1;file2',
'IgnoreEmbeddedIDL': 'true',
'IgnoreImportLibrary': 'true',
'ImportLibrary': 'a_file_name',
'KeyContainer': 'a_file_name',
'KeyFile': 'a_file_name',
'LargeAddressAware': '2',
'LinkIncremental': '2',
'LinkLibraryDependencies': 'true',
'LinkTimeCodeGeneration': '2',
'ManifestFile': 'a_file_name',
'MapExports': 'true',
'MapFileName': 'a_file_name',
'MergedIDLBaseFileName': 'a_file_name',
'MergeSections': 'a string1',
'MidlCommandFile': 'a_file_name',
'ModuleDefinitionFile': 'a_file_name',
'OptimizeForWindows98': '1',
'OptimizeReferences': '2',
'OutputFile': 'a_file_name',
'PerUserRedirection': 'true',
'Profile': 'true',
'ProfileGuidedDatabase': 'a_file_name',
'ProgramDatabaseFile': 'a_file_name',
'RandomizedBaseAddress': '2',
'RegisterOutput': 'true',
'ResourceOnlyDLL': 'true',
'SetChecksum': 'true',
'ShowProgress': '2',
'StackCommitSize': 'a string1',
'StackReserveSize': 'a string1',
'StripPrivateSymbols': 'a_file_name',
'SubSystem': '2',
'SupportUnloadOfDelayLoadedDLL': 'true',
'SuppressStartupBanner': 'true',
'SwapRunFromCD': 'true',
'SwapRunFromNet': 'true',
'TargetMachine': '2',
'TerminalServerAware': '2',
'TurnOffAssemblyGeneration': 'true',
'TypeLibraryFile': 'a_file_name',
'TypeLibraryResourceID': '33',
'UACExecutionLevel': '2',
'UACUIAccess': 'true',
'UseLibraryDependencyInputs': 'true',
'UseUnicodeResponseFiles': 'true',
'Version': 'a string1'},
'VCMIDLTool': {
'AdditionalIncludeDirectories': 'folder1;folder2',
'AdditionalOptions': 'a string1',
'CPreprocessOptions': 'a string1',
'DefaultCharType': '1',
'DLLDataFileName': 'a_file_name',
'EnableErrorChecks': '1',
'ErrorCheckAllocations': 'true',
'ErrorCheckBounds': 'true',
'ErrorCheckEnumRange': 'true',
'ErrorCheckRefPointers': 'true',
'ErrorCheckStubData': 'true',
'GenerateStublessProxies': 'true',
'GenerateTypeLibrary': 'true',
'HeaderFileName': 'a_file_name',
'IgnoreStandardIncludePath': 'true',
'InterfaceIdentifierFileName': 'a_file_name',
'MkTypLibCompatible': 'true',
'notgood': 'bogus',
'OutputDirectory': 'a string1',
'PreprocessorDefinitions': 'string1;string2',
'ProxyFileName': 'a_file_name',
'RedirectOutputAndErrors': 'a_file_name',
'StructMemberAlignment': '1',
'SuppressStartupBanner': 'true',
'TargetEnvironment': '1',
'TypeLibraryName': 'a_file_name',
'UndefinePreprocessorDefinitions': 'string1;string2',
'ValidateParameters': 'true',
'WarnAsError': 'true',
'WarningLevel': '1'},
'VCResourceCompilerTool': {
'AdditionalOptions': 'a string1',
'AdditionalIncludeDirectories': 'folder1;folder2',
'Culture': '1003',
'IgnoreStandardIncludePath': 'true',
'notgood2': 'bogus',
'PreprocessorDefinitions': 'string1;string2',
'ResourceOutputFileName': 'a string1',
'ShowProgress': 'true',
'SuppressStartupBanner': 'true',
'UndefinePreprocessorDefinitions': 'string1;string2'},
'VCLibrarianTool': {
'AdditionalDependencies': 'file1;file2',
'AdditionalLibraryDirectories': 'folder1;folder2',
'AdditionalOptions': 'a string1',
'ExportNamedFunctions': 'string1;string2',
'ForceSymbolReferences': 'a string1',
'IgnoreAllDefaultLibraries': 'true',
'IgnoreSpecificDefaultLibraries': 'file1;file2',
'LinkLibraryDependencies': 'true',
'ModuleDefinitionFile': 'a_file_name',
'OutputFile': 'a_file_name',
'SuppressStartupBanner': 'true',
'UseUnicodeResponseFiles': 'true'},
'VCManifestTool': {
'AdditionalManifestFiles': 'file1;file2',
'AdditionalOptions': 'a string1',
'AssemblyIdentity': 'a string1',
'ComponentFileName': 'a_file_name',
'DependencyInformationFile': 'a_file_name',
'GenerateCatalogFiles': 'true',
'InputResourceManifests': 'a string1',
'ManifestResourceFile': 'a_file_name',
'OutputManifestFile': 'a_file_name',
'RegistrarScriptFile': 'a_file_name',
'ReplacementsFile': 'a_file_name',
'SuppressStartupBanner': 'true',
'TypeLibraryFile': 'a_file_name',
'UpdateFileHashes': 'truel',
'UpdateFileHashesSearchPath': 'a_file_name',
'UseFAT32Workaround': 'true',
'UseUnicodeResponseFiles': 'true',
'VerboseOutput': 'true'}},
self.stderr)
self._ExpectedWarnings([
'Warning: for VCCLCompilerTool/BasicRuntimeChecks, '
'index value (5) not in expected range [0, 4)',
'Warning: for VCCLCompilerTool/BrowseInformation, '
"invalid literal for int() with base 10: 'fdkslj'",
'Warning: for VCCLCompilerTool/CallingConvention, '
'index value (-1) not in expected range [0, 3)',
'Warning: for VCCLCompilerTool/DebugInformationFormat, '
'converted value for 2 not specified.',
'Warning: unrecognized setting VCCLCompilerTool/Enableprefast',
'Warning: unrecognized setting VCCLCompilerTool/ZZXYZ',
'Warning: for VCLinkerTool/TargetMachine, '
'converted value for 2 not specified.',
'Warning: unrecognized setting VCMIDLTool/notgood',
'Warning: unrecognized setting VCResourceCompilerTool/notgood2',
'Warning: for VCManifestTool/UpdateFileHashes, '
"expected bool; got 'truel'"
''])
def testValidateMSBuildSettings_settings(self):
"""Tests that for invalid MSBuild settings."""
MSVSSettings.ValidateMSBuildSettings(
{'ClCompile': {
'AdditionalIncludeDirectories': 'folder1;folder2',
'AdditionalOptions': ['string1', 'string2'],
'AdditionalUsingDirectories': 'folder1;folder2',
'AssemblerListingLocation': 'a_file_name',
'AssemblerOutput': 'NoListing',
'BasicRuntimeChecks': 'StackFrameRuntimeCheck',
'BrowseInformation': 'false',
'BrowseInformationFile': 'a_file_name',
'BufferSecurityCheck': 'true',
'BuildingInIDE': 'true',
'CallingConvention': 'Cdecl',
'CompileAs': 'CompileAsC',
'CompileAsManaged': 'Pure',
'CreateHotpatchableImage': 'true',
'DebugInformationFormat': 'ProgramDatabase',
'DisableLanguageExtensions': 'true',
'DisableSpecificWarnings': 'string1;string2',
'EnableEnhancedInstructionSet': 'StreamingSIMDExtensions',
'EnableFiberSafeOptimizations': 'true',
'EnablePREfast': 'true',
'Enableprefast': 'bogus',
'ErrorReporting': 'Prompt',
'ExceptionHandling': 'SyncCThrow',
'ExpandAttributedSource': 'true',
'FavorSizeOrSpeed': 'Neither',
'FloatingPointExceptions': 'true',
'FloatingPointModel': 'Precise',
'ForceConformanceInForLoopScope': 'true',
'ForcedIncludeFiles': 'file1;file2',
'ForcedUsingFiles': 'file1;file2',
'FunctionLevelLinking': 'false',
'GenerateXMLDocumentationFiles': 'true',
'IgnoreStandardIncludePath': 'true',
'InlineFunctionExpansion': 'OnlyExplicitInline',
'IntrinsicFunctions': 'false',
'MinimalRebuild': 'true',
'MultiProcessorCompilation': 'true',
'ObjectFileName': 'a_file_name',
'OmitDefaultLibName': 'true',
'OmitFramePointers': 'true',
'OpenMPSupport': 'true',
'Optimization': 'Disabled',
'PrecompiledHeader': 'NotUsing',
'PrecompiledHeaderFile': 'a_file_name',
'PrecompiledHeaderOutputFile': 'a_file_name',
'PreprocessKeepComments': 'true',
'PreprocessorDefinitions': 'string1;string2',
'PreprocessOutputPath': 'a string1',
'PreprocessSuppressLineNumbers': 'false',
'PreprocessToFile': 'false',
'ProcessorNumber': '33',
'ProgramDataBaseFileName': 'a_file_name',
'RuntimeLibrary': 'MultiThreaded',
'RuntimeTypeInfo': 'true',
'ShowIncludes': 'true',
'SmallerTypeCheck': 'true',
'StringPooling': 'true',
'StructMemberAlignment': '1Byte',
'SuppressStartupBanner': 'true',
'TrackerLogDirectory': 'a_folder',
'TreatSpecificWarningsAsErrors': 'string1;string2',
'TreatWarningAsError': 'true',
'TreatWChar_tAsBuiltInType': 'true',
'UndefineAllPreprocessorDefinitions': 'true',
'UndefinePreprocessorDefinitions': 'string1;string2',
'UseFullPaths': 'true',
'UseUnicodeForAssemblerListing': 'true',
'WarningLevel': 'TurnOffAllWarnings',
'WholeProgramOptimization': 'true',
'XMLDocumentationFileName': 'a_file_name',
'ZZXYZ': 'bogus'},
'Link': {
'AdditionalDependencies': 'file1;file2',
'AdditionalLibraryDirectories': 'folder1;folder2',
'AdditionalManifestDependencies': 'file1;file2',
'AdditionalOptions': 'a string1',
'AddModuleNamesToAssembly': 'file1;file2',
'AllowIsolation': 'true',
'AssemblyDebug': '',
'AssemblyLinkResource': 'file1;file2',
'BaseAddress': 'a string1',
'BuildingInIDE': 'true',
'CLRImageType': 'ForceIJWImage',
'CLRSupportLastError': 'Enabled',
'CLRThreadAttribute': 'MTAThreadingAttribute',
'CLRUnmanagedCodeCheck': 'true',
'CreateHotPatchableImage': 'X86Image',
'DataExecutionPrevention': 'false',
'DelayLoadDLLs': 'file1;file2',
'DelaySign': 'true',
'Driver': 'NotSet',
'EmbedManagedResourceFile': 'file1;file2',
'EnableCOMDATFolding': 'false',
'EnableUAC': 'true',
'EntryPointSymbol': 'a string1',
'FixedBaseAddress': 'false',
'ForceFileOutput': 'Enabled',
'ForceSymbolReferences': 'file1;file2',
'FunctionOrder': 'a_file_name',
'GenerateDebugInformation': 'true',
'GenerateMapFile': 'true',
'HeapCommitSize': 'a string1',
'HeapReserveSize': 'a string1',
'IgnoreAllDefaultLibraries': 'true',
'IgnoreEmbeddedIDL': 'true',
'IgnoreSpecificDefaultLibraries': 'a_file_list',
'ImageHasSafeExceptionHandlers': 'true',
'ImportLibrary': 'a_file_name',
'KeyContainer': 'a_file_name',
'KeyFile': 'a_file_name',
'LargeAddressAware': 'false',
'LinkDLL': 'true',
'LinkErrorReporting': 'SendErrorReport',
'LinkStatus': 'true',
'LinkTimeCodeGeneration': 'UseLinkTimeCodeGeneration',
'ManifestFile': 'a_file_name',
'MapExports': 'true',
'MapFileName': 'a_file_name',
'MergedIDLBaseFileName': 'a_file_name',
'MergeSections': 'a string1',
'MidlCommandFile': 'a_file_name',
'MinimumRequiredVersion': 'a string1',
'ModuleDefinitionFile': 'a_file_name',
'MSDOSStubFileName': 'a_file_name',
'NoEntryPoint': 'true',
'OptimizeReferences': 'false',
'OutputFile': 'a_file_name',
'PerUserRedirection': 'true',
'PreventDllBinding': 'true',
'Profile': 'true',
'ProfileGuidedDatabase': 'a_file_name',
'ProgramDatabaseFile': 'a_file_name',
'RandomizedBaseAddress': 'false',
'RegisterOutput': 'true',
'SectionAlignment': '33',
'SetChecksum': 'true',
'ShowProgress': 'LinkVerboseREF',
'SpecifySectionAttributes': 'a string1',
'StackCommitSize': 'a string1',
'StackReserveSize': 'a string1',
'StripPrivateSymbols': 'a_file_name',
'SubSystem': 'Console',
'SupportNobindOfDelayLoadedDLL': 'true',
'SupportUnloadOfDelayLoadedDLL': 'true',
'SuppressStartupBanner': 'true',
'SwapRunFromCD': 'true',
'SwapRunFromNET': 'true',
'TargetMachine': 'MachineX86',
'TerminalServerAware': 'false',
'TrackerLogDirectory': 'a_folder',
'TreatLinkerWarningAsErrors': 'true',
'TurnOffAssemblyGeneration': 'true',
'TypeLibraryFile': 'a_file_name',
'TypeLibraryResourceID': '33',
'UACExecutionLevel': 'AsInvoker',
'UACUIAccess': 'true',
'Version': 'a string1'},
'ResourceCompile': {
'AdditionalIncludeDirectories': 'folder1;folder2',
'AdditionalOptions': 'a string1',
'Culture': '0x236',
'IgnoreStandardIncludePath': 'true',
'NullTerminateStrings': 'true',
'PreprocessorDefinitions': 'string1;string2',
'ResourceOutputFileName': 'a string1',
'ShowProgress': 'true',
'SuppressStartupBanner': 'true',
'TrackerLogDirectory': 'a_folder',
'UndefinePreprocessorDefinitions': 'string1;string2'},
'Midl': {
'AdditionalIncludeDirectories': 'folder1;folder2',
'AdditionalOptions': 'a string1',
'ApplicationConfigurationMode': 'true',
'ClientStubFile': 'a_file_name',
'CPreprocessOptions': 'a string1',
'DefaultCharType': 'Signed',
'DllDataFileName': 'a_file_name',
'EnableErrorChecks': 'EnableCustom',
'ErrorCheckAllocations': 'true',
'ErrorCheckBounds': 'true',
'ErrorCheckEnumRange': 'true',
'ErrorCheckRefPointers': 'true',
'ErrorCheckStubData': 'true',
'GenerateClientFiles': 'Stub',
'GenerateServerFiles': 'None',
'GenerateStublessProxies': 'true',
'GenerateTypeLibrary': 'true',
'HeaderFileName': 'a_file_name',
'IgnoreStandardIncludePath': 'true',
'InterfaceIdentifierFileName': 'a_file_name',
'LocaleID': '33',
'MkTypLibCompatible': 'true',
'OutputDirectory': 'a string1',
'PreprocessorDefinitions': 'string1;string2',
'ProxyFileName': 'a_file_name',
'RedirectOutputAndErrors': 'a_file_name',
'ServerStubFile': 'a_file_name',
'StructMemberAlignment': 'NotSet',
'SuppressCompilerWarnings': 'true',
'SuppressStartupBanner': 'true',
'TargetEnvironment': 'Itanium',
'TrackerLogDirectory': 'a_folder',
'TypeLibFormat': 'NewFormat',
'TypeLibraryName': 'a_file_name',
'UndefinePreprocessorDefinitions': 'string1;string2',
'ValidateAllParameters': 'true',
'WarnAsError': 'true',
'WarningLevel': '1'},
'Lib': {
'AdditionalDependencies': 'file1;file2',
'AdditionalLibraryDirectories': 'folder1;folder2',
'AdditionalOptions': 'a string1',
'DisplayLibrary': 'a string1',
'ErrorReporting': 'PromptImmediately',
'ExportNamedFunctions': 'string1;string2',
'ForceSymbolReferences': 'a string1',
'IgnoreAllDefaultLibraries': 'true',
'IgnoreSpecificDefaultLibraries': 'file1;file2',
'LinkTimeCodeGeneration': 'true',
'MinimumRequiredVersion': 'a string1',
'ModuleDefinitionFile': 'a_file_name',
'Name': 'a_file_name',
'OutputFile': 'a_file_name',
'RemoveObjects': 'file1;file2',
'SubSystem': 'Console',
'SuppressStartupBanner': 'true',
'TargetMachine': 'MachineX86i',
'TrackerLogDirectory': 'a_folder',
'TreatLibWarningAsErrors': 'true',
'UseUnicodeResponseFiles': 'true',
'Verbose': 'true'},
'Manifest': {
'AdditionalManifestFiles': 'file1;file2',
'AdditionalOptions': 'a string1',
'AssemblyIdentity': 'a string1',
'ComponentFileName': 'a_file_name',
'EnableDPIAwareness': 'fal',
'GenerateCatalogFiles': 'truel',
'GenerateCategoryTags': 'true',
'InputResourceManifests': 'a string1',
'ManifestFromManagedAssembly': 'a_file_name',
'notgood3': 'bogus',
'OutputManifestFile': 'a_file_name',
'OutputResourceManifests': 'a string1',
'RegistrarScriptFile': 'a_file_name',
'ReplacementsFile': 'a_file_name',
'SuppressDependencyElement': 'true',
'SuppressStartupBanner': 'true',
'TrackerLogDirectory': 'a_folder',
'TypeLibraryFile': 'a_file_name',
'UpdateFileHashes': 'true',
'UpdateFileHashesSearchPath': 'a_file_name',
'VerboseOutput': 'true'},
'ProjectReference': {
'LinkLibraryDependencies': 'true',
'UseLibraryDependencyInputs': 'true'},
'ManifestResourceCompile': {
'ResourceOutputFileName': 'a_file_name'},
'': {
'EmbedManifest': 'true',
'GenerateManifest': 'true',
'IgnoreImportLibrary': 'true',
'LinkIncremental': 'false'}},
self.stderr)
self._ExpectedWarnings([
'Warning: unrecognized setting ClCompile/Enableprefast',
'Warning: unrecognized setting ClCompile/ZZXYZ',
'Warning: unrecognized setting Manifest/notgood3',
'Warning: for Manifest/GenerateCatalogFiles, '
"expected bool; got 'truel'",
'Warning: for Lib/TargetMachine, unrecognized enumerated value '
'MachineX86i',
"Warning: for Manifest/EnableDPIAwareness, expected bool; got 'fal'"])
def testConvertToMSBuildSettings_empty(self):
"""Tests an empty conversion."""
msvs_settings = {}
expected_msbuild_settings = {}
actual_msbuild_settings = MSVSSettings.ConvertToMSBuildSettings(
msvs_settings,
self.stderr)
self.assertEqual(expected_msbuild_settings, actual_msbuild_settings)
self._ExpectedWarnings([])
def testConvertToMSBuildSettings_minimal(self):
"""Tests a minimal conversion."""
msvs_settings = {
'VCCLCompilerTool': {
'AdditionalIncludeDirectories': 'dir1',
'AdditionalOptions': '/foo',
'BasicRuntimeChecks': '0',
},
'VCLinkerTool': {
'LinkTimeCodeGeneration': '1',
'ErrorReporting': '1',
'DataExecutionPrevention': '2',
},
}
expected_msbuild_settings = {
'ClCompile': {
'AdditionalIncludeDirectories': 'dir1',
'AdditionalOptions': '/foo',
'BasicRuntimeChecks': 'Default',
},
'Link': {
'LinkTimeCodeGeneration': 'UseLinkTimeCodeGeneration',
'LinkErrorReporting': 'PromptImmediately',
'DataExecutionPrevention': 'true',
},
}
actual_msbuild_settings = MSVSSettings.ConvertToMSBuildSettings(
msvs_settings,
self.stderr)
self.assertEqual(expected_msbuild_settings, actual_msbuild_settings)
self._ExpectedWarnings([])
def testConvertToMSBuildSettings_warnings(self):
"""Tests conversion that generates warnings."""
msvs_settings = {
'VCCLCompilerTool': {
'AdditionalIncludeDirectories': '1',
'AdditionalOptions': '2',
# These are incorrect values:
'BasicRuntimeChecks': '12',
'BrowseInformation': '21',
'UsePrecompiledHeader': '13',
'GeneratePreprocessedFile': '14'},
'VCLinkerTool': {
# These are incorrect values:
'Driver': '10',
'LinkTimeCodeGeneration': '31',
'ErrorReporting': '21',
'FixedBaseAddress': '6'},
'VCResourceCompilerTool': {
# Custom
'Culture': '1003'}}
expected_msbuild_settings = {
'ClCompile': {
'AdditionalIncludeDirectories': '1',
'AdditionalOptions': '2'},
'Link': {},
'ResourceCompile': {
# Custom
'Culture': '0x03eb'}}
actual_msbuild_settings = MSVSSettings.ConvertToMSBuildSettings(
msvs_settings,
self.stderr)
self.assertEqual(expected_msbuild_settings, actual_msbuild_settings)
self._ExpectedWarnings([
'Warning: while converting VCCLCompilerTool/BasicRuntimeChecks to '
'MSBuild, index value (12) not in expected range [0, 4)',
'Warning: while converting VCCLCompilerTool/BrowseInformation to '
'MSBuild, index value (21) not in expected range [0, 3)',
'Warning: while converting VCCLCompilerTool/UsePrecompiledHeader to '
'MSBuild, index value (13) not in expected range [0, 3)',
'Warning: while converting VCCLCompilerTool/GeneratePreprocessedFile to '
'MSBuild, value must be one of [0, 1, 2]; got 14',
'Warning: while converting VCLinkerTool/Driver to '
'MSBuild, index value (10) not in expected range [0, 4)',
'Warning: while converting VCLinkerTool/LinkTimeCodeGeneration to '
'MSBuild, index value (31) not in expected range [0, 5)',
'Warning: while converting VCLinkerTool/ErrorReporting to '
'MSBuild, index value (21) not in expected range [0, 3)',
'Warning: while converting VCLinkerTool/FixedBaseAddress to '
'MSBuild, index value (6) not in expected range [0, 3)',
])
def testConvertToMSBuildSettings_full_synthetic(self):
"""Tests conversion of all the MSBuild settings."""
msvs_settings = {
'VCCLCompilerTool': {
'AdditionalIncludeDirectories': 'folder1;folder2;folder3',
'AdditionalOptions': 'a_string',
'AdditionalUsingDirectories': 'folder1;folder2;folder3',
'AssemblerListingLocation': 'a_file_name',
'AssemblerOutput': '0',
'BasicRuntimeChecks': '1',
'BrowseInformation': '2',
'BrowseInformationFile': 'a_file_name',
'BufferSecurityCheck': 'true',
'CallingConvention': '0',
'CompileAs': '1',
'DebugInformationFormat': '4',
'DefaultCharIsUnsigned': 'true',
'Detect64BitPortabilityProblems': 'true',
'DisableLanguageExtensions': 'true',
'DisableSpecificWarnings': 'd1;d2;d3',
'EnableEnhancedInstructionSet': '0',
'EnableFiberSafeOptimizations': 'true',
'EnableFunctionLevelLinking': 'true',
'EnableIntrinsicFunctions': 'true',
'EnablePREfast': 'true',
'ErrorReporting': '1',
'ExceptionHandling': '2',
'ExpandAttributedSource': 'true',
'FavorSizeOrSpeed': '0',
'FloatingPointExceptions': 'true',
'FloatingPointModel': '1',
'ForceConformanceInForLoopScope': 'true',
'ForcedIncludeFiles': 'file1;file2;file3',
'ForcedUsingFiles': 'file1;file2;file3',
'GeneratePreprocessedFile': '1',
'GenerateXMLDocumentationFiles': 'true',
'IgnoreStandardIncludePath': 'true',
'InlineFunctionExpansion': '2',
'KeepComments': 'true',
'MinimalRebuild': 'true',
'ObjectFile': 'a_file_name',
'OmitDefaultLibName': 'true',
'OmitFramePointers': 'true',
'OpenMP': 'true',
'Optimization': '3',
'PrecompiledHeaderFile': 'a_file_name',
'PrecompiledHeaderThrough': 'a_file_name',
'PreprocessorDefinitions': 'd1;d2;d3',
'ProgramDataBaseFileName': 'a_file_name',
'RuntimeLibrary': '0',
'RuntimeTypeInfo': 'true',
'ShowIncludes': 'true',
'SmallerTypeCheck': 'true',
'StringPooling': 'true',
'StructMemberAlignment': '1',
'SuppressStartupBanner': 'true',
'TreatWChar_tAsBuiltInType': 'true',
'UndefineAllPreprocessorDefinitions': 'true',
'UndefinePreprocessorDefinitions': 'd1;d2;d3',
'UseFullPaths': 'true',
'UsePrecompiledHeader': '1',
'UseUnicodeResponseFiles': 'true',
'WarnAsError': 'true',
'WarningLevel': '2',
'WholeProgramOptimization': 'true',
'XMLDocumentationFileName': 'a_file_name'},
'VCLinkerTool': {
'AdditionalDependencies': 'file1;file2;file3',
'AdditionalLibraryDirectories': 'folder1;folder2;folder3',
'AdditionalLibraryDirectories_excluded': 'folder1;folder2;folder3',
'AdditionalManifestDependencies': 'file1;file2;file3',
'AdditionalOptions': 'a_string',
'AddModuleNamesToAssembly': 'file1;file2;file3',
'AllowIsolation': 'true',
'AssemblyDebug': '0',
'AssemblyLinkResource': 'file1;file2;file3',
'BaseAddress': 'a_string',
'CLRImageType': '1',
'CLRThreadAttribute': '2',
'CLRUnmanagedCodeCheck': 'true',
'DataExecutionPrevention': '0',
'DelayLoadDLLs': 'file1;file2;file3',
'DelaySign': 'true',
'Driver': '1',
'EmbedManagedResourceFile': 'file1;file2;file3',
'EnableCOMDATFolding': '0',
'EnableUAC': 'true',
'EntryPointSymbol': 'a_string',
'ErrorReporting': '0',
'FixedBaseAddress': '1',
'ForceSymbolReferences': 'file1;file2;file3',
'FunctionOrder': 'a_file_name',
'GenerateDebugInformation': 'true',
'GenerateManifest': 'true',
'GenerateMapFile': 'true',
'HeapCommitSize': 'a_string',
'HeapReserveSize': 'a_string',
'IgnoreAllDefaultLibraries': 'true',
'IgnoreDefaultLibraryNames': 'file1;file2;file3',
'IgnoreEmbeddedIDL': 'true',
'IgnoreImportLibrary': 'true',
'ImportLibrary': 'a_file_name',
'KeyContainer': 'a_file_name',
'KeyFile': 'a_file_name',
'LargeAddressAware': '2',
'LinkIncremental': '1',
'LinkLibraryDependencies': 'true',
'LinkTimeCodeGeneration': '2',
'ManifestFile': 'a_file_name',
'MapExports': 'true',
'MapFileName': 'a_file_name',
'MergedIDLBaseFileName': 'a_file_name',
'MergeSections': 'a_string',
'MidlCommandFile': 'a_file_name',
'ModuleDefinitionFile': 'a_file_name',
'OptimizeForWindows98': '1',
'OptimizeReferences': '0',
'OutputFile': 'a_file_name',
'PerUserRedirection': 'true',
'Profile': 'true',
'ProfileGuidedDatabase': 'a_file_name',
'ProgramDatabaseFile': 'a_file_name',
'RandomizedBaseAddress': '1',
'RegisterOutput': 'true',
'ResourceOnlyDLL': 'true',
'SetChecksum': 'true',
'ShowProgress': '0',
'StackCommitSize': 'a_string',
'StackReserveSize': 'a_string',
'StripPrivateSymbols': 'a_file_name',
'SubSystem': '2',
'SupportUnloadOfDelayLoadedDLL': 'true',
'SuppressStartupBanner': 'true',
'SwapRunFromCD': 'true',
'SwapRunFromNet': 'true',
'TargetMachine': '3',
'TerminalServerAware': '2',
'TurnOffAssemblyGeneration': 'true',
'TypeLibraryFile': 'a_file_name',
'TypeLibraryResourceID': '33',
'UACExecutionLevel': '1',
'UACUIAccess': 'true',
'UseLibraryDependencyInputs': 'false',
'UseUnicodeResponseFiles': 'true',
'Version': 'a_string'},
'VCResourceCompilerTool': {
'AdditionalIncludeDirectories': 'folder1;folder2;folder3',
'AdditionalOptions': 'a_string',
'Culture': '1003',
'IgnoreStandardIncludePath': 'true',
'PreprocessorDefinitions': 'd1;d2;d3',
'ResourceOutputFileName': 'a_string',
'ShowProgress': 'true',
'SuppressStartupBanner': 'true',
'UndefinePreprocessorDefinitions': 'd1;d2;d3'},
'VCMIDLTool': {
'AdditionalIncludeDirectories': 'folder1;folder2;folder3',
'AdditionalOptions': 'a_string',
'CPreprocessOptions': 'a_string',
'DefaultCharType': '0',
'DLLDataFileName': 'a_file_name',
'EnableErrorChecks': '2',
'ErrorCheckAllocations': 'true',
'ErrorCheckBounds': 'true',
'ErrorCheckEnumRange': 'true',
'ErrorCheckRefPointers': 'true',
'ErrorCheckStubData': 'true',
'GenerateStublessProxies': 'true',
'GenerateTypeLibrary': 'true',
'HeaderFileName': 'a_file_name',
'IgnoreStandardIncludePath': 'true',
'InterfaceIdentifierFileName': 'a_file_name',
'MkTypLibCompatible': 'true',
'OutputDirectory': 'a_string',
'PreprocessorDefinitions': 'd1;d2;d3',
'ProxyFileName': 'a_file_name',
'RedirectOutputAndErrors': 'a_file_name',
'StructMemberAlignment': '3',
'SuppressStartupBanner': 'true',
'TargetEnvironment': '1',
'TypeLibraryName': 'a_file_name',
'UndefinePreprocessorDefinitions': 'd1;d2;d3',
'ValidateParameters': 'true',
'WarnAsError': 'true',
'WarningLevel': '4'},
'VCLibrarianTool': {
'AdditionalDependencies': 'file1;file2;file3',
'AdditionalLibraryDirectories': 'folder1;folder2;folder3',
'AdditionalLibraryDirectories_excluded': 'folder1;folder2;folder3',
'AdditionalOptions': 'a_string',
'ExportNamedFunctions': 'd1;d2;d3',
'ForceSymbolReferences': 'a_string',
'IgnoreAllDefaultLibraries': 'true',
'IgnoreSpecificDefaultLibraries': 'file1;file2;file3',
'LinkLibraryDependencies': 'true',
'ModuleDefinitionFile': 'a_file_name',
'OutputFile': 'a_file_name',
'SuppressStartupBanner': 'true',
'UseUnicodeResponseFiles': 'true'},
'VCManifestTool': {
'AdditionalManifestFiles': 'file1;file2;file3',
'AdditionalOptions': 'a_string',
'AssemblyIdentity': 'a_string',
'ComponentFileName': 'a_file_name',
'DependencyInformationFile': 'a_file_name',
'EmbedManifest': 'true',
'GenerateCatalogFiles': 'true',
'InputResourceManifests': 'a_string',
'ManifestResourceFile': 'my_name',
'OutputManifestFile': 'a_file_name',
'RegistrarScriptFile': 'a_file_name',
'ReplacementsFile': 'a_file_name',
'SuppressStartupBanner': 'true',
'TypeLibraryFile': 'a_file_name',
'UpdateFileHashes': 'true',
'UpdateFileHashesSearchPath': 'a_file_name',
'UseFAT32Workaround': 'true',
'UseUnicodeResponseFiles': 'true',
'VerboseOutput': 'true'}}
expected_msbuild_settings = {
'ClCompile': {
'AdditionalIncludeDirectories': 'folder1;folder2;folder3',
'AdditionalOptions': 'a_string /J',
'AdditionalUsingDirectories': 'folder1;folder2;folder3',
'AssemblerListingLocation': 'a_file_name',
'AssemblerOutput': 'NoListing',
'BasicRuntimeChecks': 'StackFrameRuntimeCheck',
'BrowseInformation': 'true',
'BrowseInformationFile': 'a_file_name',
'BufferSecurityCheck': 'true',
'CallingConvention': 'Cdecl',
'CompileAs': 'CompileAsC',
'DebugInformationFormat': 'EditAndContinue',
'DisableLanguageExtensions': 'true',
'DisableSpecificWarnings': 'd1;d2;d3',
'EnableEnhancedInstructionSet': 'NotSet',
'EnableFiberSafeOptimizations': 'true',
'EnablePREfast': 'true',
'ErrorReporting': 'Prompt',
'ExceptionHandling': 'Async',
'ExpandAttributedSource': 'true',
'FavorSizeOrSpeed': 'Neither',
'FloatingPointExceptions': 'true',
'FloatingPointModel': 'Strict',
'ForceConformanceInForLoopScope': 'true',
'ForcedIncludeFiles': 'file1;file2;file3',
'ForcedUsingFiles': 'file1;file2;file3',
'FunctionLevelLinking': 'true',
'GenerateXMLDocumentationFiles': 'true',
'IgnoreStandardIncludePath': 'true',
'InlineFunctionExpansion': 'AnySuitable',
'IntrinsicFunctions': 'true',
'MinimalRebuild': 'true',
'ObjectFileName': 'a_file_name',
'OmitDefaultLibName': 'true',
'OmitFramePointers': 'true',
'OpenMPSupport': 'true',
'Optimization': 'Full',
'PrecompiledHeader': 'Create',
'PrecompiledHeaderFile': 'a_file_name',
'PrecompiledHeaderOutputFile': 'a_file_name',
'PreprocessKeepComments': 'true',
'PreprocessorDefinitions': 'd1;d2;d3',
'PreprocessSuppressLineNumbers': 'false',
'PreprocessToFile': 'true',
'ProgramDataBaseFileName': 'a_file_name',
'RuntimeLibrary': 'MultiThreaded',
'RuntimeTypeInfo': 'true',
'ShowIncludes': 'true',
'SmallerTypeCheck': 'true',
'StringPooling': 'true',
'StructMemberAlignment': '1Byte',
'SuppressStartupBanner': 'true',
'TreatWarningAsError': 'true',
'TreatWChar_tAsBuiltInType': 'true',
'UndefineAllPreprocessorDefinitions': 'true',
'UndefinePreprocessorDefinitions': 'd1;d2;d3',
'UseFullPaths': 'true',
'WarningLevel': 'Level2',
'WholeProgramOptimization': 'true',
'XMLDocumentationFileName': 'a_file_name'},
'Link': {
'AdditionalDependencies': 'file1;file2;file3',
'AdditionalLibraryDirectories': 'folder1;folder2;folder3',
'AdditionalManifestDependencies': 'file1;file2;file3',
'AdditionalOptions': 'a_string',
'AddModuleNamesToAssembly': 'file1;file2;file3',
'AllowIsolation': 'true',
'AssemblyDebug': '',
'AssemblyLinkResource': 'file1;file2;file3',
'BaseAddress': 'a_string',
'CLRImageType': 'ForceIJWImage',
'CLRThreadAttribute': 'STAThreadingAttribute',
'CLRUnmanagedCodeCheck': 'true',
'DataExecutionPrevention': '',
'DelayLoadDLLs': 'file1;file2;file3',
'DelaySign': 'true',
'Driver': 'Driver',
'EmbedManagedResourceFile': 'file1;file2;file3',
'EnableCOMDATFolding': '',
'EnableUAC': 'true',
'EntryPointSymbol': 'a_string',
'FixedBaseAddress': 'false',
'ForceSymbolReferences': 'file1;file2;file3',
'FunctionOrder': 'a_file_name',
'GenerateDebugInformation': 'true',
'GenerateMapFile': 'true',
'HeapCommitSize': 'a_string',
'HeapReserveSize': 'a_string',
'IgnoreAllDefaultLibraries': 'true',
'IgnoreEmbeddedIDL': 'true',
'IgnoreSpecificDefaultLibraries': 'file1;file2;file3',
'ImportLibrary': 'a_file_name',
'KeyContainer': 'a_file_name',
'KeyFile': 'a_file_name',
'LargeAddressAware': 'true',
'LinkErrorReporting': 'NoErrorReport',
'LinkTimeCodeGeneration': 'PGInstrument',
'ManifestFile': 'a_file_name',
'MapExports': 'true',
'MapFileName': 'a_file_name',
'MergedIDLBaseFileName': 'a_file_name',
'MergeSections': 'a_string',
'MidlCommandFile': 'a_file_name',
'ModuleDefinitionFile': 'a_file_name',
'NoEntryPoint': 'true',
'OptimizeReferences': '',
'OutputFile': 'a_file_name',
'PerUserRedirection': 'true',
'Profile': 'true',
'ProfileGuidedDatabase': 'a_file_name',
'ProgramDatabaseFile': 'a_file_name',
'RandomizedBaseAddress': 'false',
'RegisterOutput': 'true',
'SetChecksum': 'true',
'ShowProgress': 'NotSet',
'StackCommitSize': 'a_string',
'StackReserveSize': 'a_string',
'StripPrivateSymbols': 'a_file_name',
'SubSystem': 'Windows',
'SupportUnloadOfDelayLoadedDLL': 'true',
'SuppressStartupBanner': 'true',
'SwapRunFromCD': 'true',
'SwapRunFromNET': 'true',
'TargetMachine': 'MachineARM',
'TerminalServerAware': 'true',
'TurnOffAssemblyGeneration': 'true',
'TypeLibraryFile': 'a_file_name',
'TypeLibraryResourceID': '33',
'UACExecutionLevel': 'HighestAvailable',
'UACUIAccess': 'true',
'Version': 'a_string'},
'ResourceCompile': {
'AdditionalIncludeDirectories': 'folder1;folder2;folder3',
'AdditionalOptions': 'a_string',
'Culture': '0x03eb',
'IgnoreStandardIncludePath': 'true',
'PreprocessorDefinitions': 'd1;d2;d3',
'ResourceOutputFileName': 'a_string',
'ShowProgress': 'true',
'SuppressStartupBanner': 'true',
'UndefinePreprocessorDefinitions': 'd1;d2;d3'},
'Midl': {
'AdditionalIncludeDirectories': 'folder1;folder2;folder3',
'AdditionalOptions': 'a_string',
'CPreprocessOptions': 'a_string',
'DefaultCharType': 'Unsigned',
'DllDataFileName': 'a_file_name',
'EnableErrorChecks': 'All',
'ErrorCheckAllocations': 'true',
'ErrorCheckBounds': 'true',
'ErrorCheckEnumRange': 'true',
'ErrorCheckRefPointers': 'true',
'ErrorCheckStubData': 'true',
'GenerateStublessProxies': 'true',
'GenerateTypeLibrary': 'true',
'HeaderFileName': 'a_file_name',
'IgnoreStandardIncludePath': 'true',
'InterfaceIdentifierFileName': 'a_file_name',
'MkTypLibCompatible': 'true',
'OutputDirectory': 'a_string',
'PreprocessorDefinitions': 'd1;d2;d3',
'ProxyFileName': 'a_file_name',
'RedirectOutputAndErrors': 'a_file_name',
'StructMemberAlignment': '4',
'SuppressStartupBanner': 'true',
'TargetEnvironment': 'Win32',
'TypeLibraryName': 'a_file_name',
'UndefinePreprocessorDefinitions': 'd1;d2;d3',
'ValidateAllParameters': 'true',
'WarnAsError': 'true',
'WarningLevel': '4'},
'Lib': {
'AdditionalDependencies': 'file1;file2;file3',
'AdditionalLibraryDirectories': 'folder1;folder2;folder3',
'AdditionalOptions': 'a_string',
'ExportNamedFunctions': 'd1;d2;d3',
'ForceSymbolReferences': 'a_string',
'IgnoreAllDefaultLibraries': 'true',
'IgnoreSpecificDefaultLibraries': 'file1;file2;file3',
'ModuleDefinitionFile': 'a_file_name',
'OutputFile': 'a_file_name',
'SuppressStartupBanner': 'true',
'UseUnicodeResponseFiles': 'true'},
'Manifest': {
'AdditionalManifestFiles': 'file1;file2;file3',
'AdditionalOptions': 'a_string',
'AssemblyIdentity': 'a_string',
'ComponentFileName': 'a_file_name',
'GenerateCatalogFiles': 'true',
'InputResourceManifests': 'a_string',
'OutputManifestFile': 'a_file_name',
'RegistrarScriptFile': 'a_file_name',
'ReplacementsFile': 'a_file_name',
'SuppressStartupBanner': 'true',
'TypeLibraryFile': 'a_file_name',
'UpdateFileHashes': 'true',
'UpdateFileHashesSearchPath': 'a_file_name',
'VerboseOutput': 'true'},
'ManifestResourceCompile': {
'ResourceOutputFileName': 'my_name'},
'ProjectReference': {
'LinkLibraryDependencies': 'true',
'UseLibraryDependencyInputs': 'false'},
'': {
'EmbedManifest': 'true',
'GenerateManifest': 'true',
'IgnoreImportLibrary': 'true',
'LinkIncremental': 'false'}}
actual_msbuild_settings = MSVSSettings.ConvertToMSBuildSettings(
msvs_settings,
self.stderr)
self.assertEqual(expected_msbuild_settings, actual_msbuild_settings)
self._ExpectedWarnings([])
def testConvertToMSBuildSettings_actual(self):
"""Tests the conversion of an actual project.
A VS2008 project with most of the options defined was created through the
VS2008 IDE. It was then converted to VS2010. The tool settings found in
the .vcproj and .vcxproj files were converted to the two dictionaries
msvs_settings and expected_msbuild_settings.
Note that for many settings, the VS2010 converter adds macros like
%(AdditionalIncludeDirectories) to make sure than inherited values are
included. Since the Gyp projects we generate do not use inheritance,
we removed these macros. They were:
ClCompile:
AdditionalIncludeDirectories: ';%(AdditionalIncludeDirectories)'
AdditionalOptions: ' %(AdditionalOptions)'
AdditionalUsingDirectories: ';%(AdditionalUsingDirectories)'
DisableSpecificWarnings: ';%(DisableSpecificWarnings)',
ForcedIncludeFiles: ';%(ForcedIncludeFiles)',
ForcedUsingFiles: ';%(ForcedUsingFiles)',
PreprocessorDefinitions: ';%(PreprocessorDefinitions)',
UndefinePreprocessorDefinitions:
';%(UndefinePreprocessorDefinitions)',
Link:
AdditionalDependencies: ';%(AdditionalDependencies)',
AdditionalLibraryDirectories: ';%(AdditionalLibraryDirectories)',
AdditionalManifestDependencies:
';%(AdditionalManifestDependencies)',
AdditionalOptions: ' %(AdditionalOptions)',
AddModuleNamesToAssembly: ';%(AddModuleNamesToAssembly)',
AssemblyLinkResource: ';%(AssemblyLinkResource)',
DelayLoadDLLs: ';%(DelayLoadDLLs)',
EmbedManagedResourceFile: ';%(EmbedManagedResourceFile)',
ForceSymbolReferences: ';%(ForceSymbolReferences)',
IgnoreSpecificDefaultLibraries:
';%(IgnoreSpecificDefaultLibraries)',
ResourceCompile:
AdditionalIncludeDirectories: ';%(AdditionalIncludeDirectories)',
AdditionalOptions: ' %(AdditionalOptions)',
PreprocessorDefinitions: ';%(PreprocessorDefinitions)',
Manifest:
AdditionalManifestFiles: ';%(AdditionalManifestFiles)',
AdditionalOptions: ' %(AdditionalOptions)',
InputResourceManifests: ';%(InputResourceManifests)',
"""
msvs_settings = {
'VCCLCompilerTool': {
'AdditionalIncludeDirectories': 'dir1',
'AdditionalOptions': '/more',
'AdditionalUsingDirectories': 'test',
'AssemblerListingLocation': '$(IntDir)\\a',
'AssemblerOutput': '1',
'BasicRuntimeChecks': '3',
'BrowseInformation': '1',
'BrowseInformationFile': '$(IntDir)\\e',
'BufferSecurityCheck': 'false',
'CallingConvention': '1',
'CompileAs': '1',
'DebugInformationFormat': '4',
'DefaultCharIsUnsigned': 'true',
'Detect64BitPortabilityProblems': 'true',
'DisableLanguageExtensions': 'true',
'DisableSpecificWarnings': 'abc',
'EnableEnhancedInstructionSet': '1',
'EnableFiberSafeOptimizations': 'true',
'EnableFunctionLevelLinking': 'true',
'EnableIntrinsicFunctions': 'true',
'EnablePREfast': 'true',
'ErrorReporting': '2',
'ExceptionHandling': '2',
'ExpandAttributedSource': 'true',
'FavorSizeOrSpeed': '2',
'FloatingPointExceptions': 'true',
'FloatingPointModel': '1',
'ForceConformanceInForLoopScope': 'false',
'ForcedIncludeFiles': 'def',
'ForcedUsingFiles': 'ge',
'GeneratePreprocessedFile': '2',
'GenerateXMLDocumentationFiles': 'true',
'IgnoreStandardIncludePath': 'true',
'InlineFunctionExpansion': '1',
'KeepComments': 'true',
'MinimalRebuild': 'true',
'ObjectFile': '$(IntDir)\\b',
'OmitDefaultLibName': 'true',
'OmitFramePointers': 'true',
'OpenMP': 'true',
'Optimization': '3',
'PrecompiledHeaderFile': '$(IntDir)\\$(TargetName).pche',
'PrecompiledHeaderThrough': 'StdAfx.hd',
'PreprocessorDefinitions': 'WIN32;_DEBUG;_CONSOLE',
'ProgramDataBaseFileName': '$(IntDir)\\vc90b.pdb',
'RuntimeLibrary': '3',
'RuntimeTypeInfo': 'false',
'ShowIncludes': 'true',
'SmallerTypeCheck': 'true',
'StringPooling': 'true',
'StructMemberAlignment': '3',
'SuppressStartupBanner': 'false',
'TreatWChar_tAsBuiltInType': 'false',
'UndefineAllPreprocessorDefinitions': 'true',
'UndefinePreprocessorDefinitions': 'wer',
'UseFullPaths': 'true',
'UsePrecompiledHeader': '0',
'UseUnicodeResponseFiles': 'false',
'WarnAsError': 'true',
'WarningLevel': '3',
'WholeProgramOptimization': 'true',
'XMLDocumentationFileName': '$(IntDir)\\c'},
'VCLinkerTool': {
'AdditionalDependencies': 'zx',
'AdditionalLibraryDirectories': 'asd',
'AdditionalManifestDependencies': 's2',
'AdditionalOptions': '/mor2',
'AddModuleNamesToAssembly': 'd1',
'AllowIsolation': 'false',
'AssemblyDebug': '1',
'AssemblyLinkResource': 'd5',
'BaseAddress': '23423',
'CLRImageType': '3',
'CLRThreadAttribute': '1',
'CLRUnmanagedCodeCheck': 'true',
'DataExecutionPrevention': '0',
'DelayLoadDLLs': 'd4',
'DelaySign': 'true',
'Driver': '2',
'EmbedManagedResourceFile': 'd2',
'EnableCOMDATFolding': '1',
'EnableUAC': 'false',
'EntryPointSymbol': 'f5',
'ErrorReporting': '2',
'FixedBaseAddress': '1',
'ForceSymbolReferences': 'd3',
'FunctionOrder': 'fssdfsd',
'GenerateDebugInformation': 'true',
'GenerateManifest': 'false',
'GenerateMapFile': 'true',
'HeapCommitSize': '13',
'HeapReserveSize': '12',
'IgnoreAllDefaultLibraries': 'true',
'IgnoreDefaultLibraryNames': 'flob;flok',
'IgnoreEmbeddedIDL': 'true',
'IgnoreImportLibrary': 'true',
'ImportLibrary': 'f4',
'KeyContainer': 'f7',
'KeyFile': 'f6',
'LargeAddressAware': '2',
'LinkIncremental': '0',
'LinkLibraryDependencies': 'false',
'LinkTimeCodeGeneration': '1',
'ManifestFile':
'$(IntDir)\\$(TargetFileName).2intermediate.manifest',
'MapExports': 'true',
'MapFileName': 'd5',
'MergedIDLBaseFileName': 'f2',
'MergeSections': 'f5',
'MidlCommandFile': 'f1',
'ModuleDefinitionFile': 'sdsd',
'OptimizeForWindows98': '2',
'OptimizeReferences': '2',
'OutputFile': '$(OutDir)\\$(ProjectName)2.exe',
'PerUserRedirection': 'true',
'Profile': 'true',
'ProfileGuidedDatabase': '$(TargetDir)$(TargetName).pgdd',
'ProgramDatabaseFile': 'Flob.pdb',
'RandomizedBaseAddress': '1',
'RegisterOutput': 'true',
'ResourceOnlyDLL': 'true',
'SetChecksum': 'false',
'ShowProgress': '1',
'StackCommitSize': '15',
'StackReserveSize': '14',
'StripPrivateSymbols': 'd3',
'SubSystem': '1',
'SupportUnloadOfDelayLoadedDLL': 'true',
'SuppressStartupBanner': 'false',
'SwapRunFromCD': 'true',
'SwapRunFromNet': 'true',
'TargetMachine': '1',
'TerminalServerAware': '1',
'TurnOffAssemblyGeneration': 'true',
'TypeLibraryFile': 'f3',
'TypeLibraryResourceID': '12',
'UACExecutionLevel': '2',
'UACUIAccess': 'true',
'UseLibraryDependencyInputs': 'true',
'UseUnicodeResponseFiles': 'false',
'Version': '333'},
'VCResourceCompilerTool': {
'AdditionalIncludeDirectories': 'f3',
'AdditionalOptions': '/more3',
'Culture': '3084',
'IgnoreStandardIncludePath': 'true',
'PreprocessorDefinitions': '_UNICODE;UNICODE2',
'ResourceOutputFileName': '$(IntDir)/$(InputName)3.res',
'ShowProgress': 'true'},
'VCManifestTool': {
'AdditionalManifestFiles': 'sfsdfsd',
'AdditionalOptions': 'afdsdafsd',
'AssemblyIdentity': 'sddfdsadfsa',
'ComponentFileName': 'fsdfds',
'DependencyInformationFile': '$(IntDir)\\mt.depdfd',
'EmbedManifest': 'false',
'GenerateCatalogFiles': 'true',
'InputResourceManifests': 'asfsfdafs',
'ManifestResourceFile':
'$(IntDir)\\$(TargetFileName).embed.manifest.resfdsf',
'OutputManifestFile': '$(TargetPath).manifestdfs',
'RegistrarScriptFile': 'sdfsfd',
'ReplacementsFile': 'sdffsd',
'SuppressStartupBanner': 'false',
'TypeLibraryFile': 'sfsd',
'UpdateFileHashes': 'true',
'UpdateFileHashesSearchPath': 'sfsd',
'UseFAT32Workaround': 'true',
'UseUnicodeResponseFiles': 'false',
'VerboseOutput': 'true'}}
expected_msbuild_settings = {
'ClCompile': {
'AdditionalIncludeDirectories': 'dir1',
'AdditionalOptions': '/more /J',
'AdditionalUsingDirectories': 'test',
'AssemblerListingLocation': '$(IntDir)a',
'AssemblerOutput': 'AssemblyCode',
'BasicRuntimeChecks': 'EnableFastChecks',
'BrowseInformation': 'true',
'BrowseInformationFile': '$(IntDir)e',
'BufferSecurityCheck': 'false',
'CallingConvention': 'FastCall',
'CompileAs': 'CompileAsC',
'DebugInformationFormat': 'EditAndContinue',
'DisableLanguageExtensions': 'true',
'DisableSpecificWarnings': 'abc',
'EnableEnhancedInstructionSet': 'StreamingSIMDExtensions',
'EnableFiberSafeOptimizations': 'true',
'EnablePREfast': 'true',
'ErrorReporting': 'Queue',
'ExceptionHandling': 'Async',
'ExpandAttributedSource': 'true',
'FavorSizeOrSpeed': 'Size',
'FloatingPointExceptions': 'true',
'FloatingPointModel': 'Strict',
'ForceConformanceInForLoopScope': 'false',
'ForcedIncludeFiles': 'def',
'ForcedUsingFiles': 'ge',
'FunctionLevelLinking': 'true',
'GenerateXMLDocumentationFiles': 'true',
'IgnoreStandardIncludePath': 'true',
'InlineFunctionExpansion': 'OnlyExplicitInline',
'IntrinsicFunctions': 'true',
'MinimalRebuild': 'true',
'ObjectFileName': '$(IntDir)b',
'OmitDefaultLibName': 'true',
'OmitFramePointers': 'true',
'OpenMPSupport': 'true',
'Optimization': 'Full',
'PrecompiledHeader': 'NotUsing', # Actual conversion gives ''
'PrecompiledHeaderFile': 'StdAfx.hd',
'PrecompiledHeaderOutputFile': '$(IntDir)$(TargetName).pche',
'PreprocessKeepComments': 'true',
'PreprocessorDefinitions': 'WIN32;_DEBUG;_CONSOLE',
'PreprocessSuppressLineNumbers': 'true',
'PreprocessToFile': 'true',
'ProgramDataBaseFileName': '$(IntDir)vc90b.pdb',
'RuntimeLibrary': 'MultiThreadedDebugDLL',
'RuntimeTypeInfo': 'false',
'ShowIncludes': 'true',
'SmallerTypeCheck': 'true',
'StringPooling': 'true',
'StructMemberAlignment': '4Bytes',
'SuppressStartupBanner': 'false',
'TreatWarningAsError': 'true',
'TreatWChar_tAsBuiltInType': 'false',
'UndefineAllPreprocessorDefinitions': 'true',
'UndefinePreprocessorDefinitions': 'wer',
'UseFullPaths': 'true',
'WarningLevel': 'Level3',
'WholeProgramOptimization': 'true',
'XMLDocumentationFileName': '$(IntDir)c'},
'Link': {
'AdditionalDependencies': 'zx',
'AdditionalLibraryDirectories': 'asd',
'AdditionalManifestDependencies': 's2',
'AdditionalOptions': '/mor2',
'AddModuleNamesToAssembly': 'd1',
'AllowIsolation': 'false',
'AssemblyDebug': 'true',
'AssemblyLinkResource': 'd5',
'BaseAddress': '23423',
'CLRImageType': 'ForceSafeILImage',
'CLRThreadAttribute': 'MTAThreadingAttribute',
'CLRUnmanagedCodeCheck': 'true',
'DataExecutionPrevention': '',
'DelayLoadDLLs': 'd4',
'DelaySign': 'true',
'Driver': 'UpOnly',
'EmbedManagedResourceFile': 'd2',
'EnableCOMDATFolding': 'false',
'EnableUAC': 'false',
'EntryPointSymbol': 'f5',
'FixedBaseAddress': 'false',
'ForceSymbolReferences': 'd3',
'FunctionOrder': 'fssdfsd',
'GenerateDebugInformation': 'true',
'GenerateMapFile': 'true',
'HeapCommitSize': '13',
'HeapReserveSize': '12',
'IgnoreAllDefaultLibraries': 'true',
'IgnoreEmbeddedIDL': 'true',
'IgnoreSpecificDefaultLibraries': 'flob;flok',
'ImportLibrary': 'f4',
'KeyContainer': 'f7',
'KeyFile': 'f6',
'LargeAddressAware': 'true',
'LinkErrorReporting': 'QueueForNextLogin',
'LinkTimeCodeGeneration': 'UseLinkTimeCodeGeneration',
'ManifestFile': '$(IntDir)$(TargetFileName).2intermediate.manifest',
'MapExports': 'true',
'MapFileName': 'd5',
'MergedIDLBaseFileName': 'f2',
'MergeSections': 'f5',
'MidlCommandFile': 'f1',
'ModuleDefinitionFile': 'sdsd',
'NoEntryPoint': 'true',
'OptimizeReferences': 'true',
'OutputFile': '$(OutDir)$(ProjectName)2.exe',
'PerUserRedirection': 'true',
'Profile': 'true',
'ProfileGuidedDatabase': '$(TargetDir)$(TargetName).pgdd',
'ProgramDatabaseFile': 'Flob.pdb',
'RandomizedBaseAddress': 'false',
'RegisterOutput': 'true',
'SetChecksum': 'false',
'ShowProgress': 'LinkVerbose',
'StackCommitSize': '15',
'StackReserveSize': '14',
'StripPrivateSymbols': 'd3',
'SubSystem': 'Console',
'SupportUnloadOfDelayLoadedDLL': 'true',
'SuppressStartupBanner': 'false',
'SwapRunFromCD': 'true',
'SwapRunFromNET': 'true',
'TargetMachine': 'MachineX86',
'TerminalServerAware': 'false',
'TurnOffAssemblyGeneration': 'true',
'TypeLibraryFile': 'f3',
'TypeLibraryResourceID': '12',
'UACExecutionLevel': 'RequireAdministrator',
'UACUIAccess': 'true',
'Version': '333'},
'ResourceCompile': {
'AdditionalIncludeDirectories': 'f3',
'AdditionalOptions': '/more3',
'Culture': '0x0c0c',
'IgnoreStandardIncludePath': 'true',
'PreprocessorDefinitions': '_UNICODE;UNICODE2',
'ResourceOutputFileName': '$(IntDir)%(Filename)3.res',
'ShowProgress': 'true'},
'Manifest': {
'AdditionalManifestFiles': 'sfsdfsd',
'AdditionalOptions': 'afdsdafsd',
'AssemblyIdentity': 'sddfdsadfsa',
'ComponentFileName': 'fsdfds',
'GenerateCatalogFiles': 'true',
'InputResourceManifests': 'asfsfdafs',
'OutputManifestFile': '$(TargetPath).manifestdfs',
'RegistrarScriptFile': 'sdfsfd',
'ReplacementsFile': 'sdffsd',
'SuppressStartupBanner': 'false',
'TypeLibraryFile': 'sfsd',
'UpdateFileHashes': 'true',
'UpdateFileHashesSearchPath': 'sfsd',
'VerboseOutput': 'true'},
'ProjectReference': {
'LinkLibraryDependencies': 'false',
'UseLibraryDependencyInputs': 'true'},
'': {
'EmbedManifest': 'false',
'GenerateManifest': 'false',
'IgnoreImportLibrary': 'true',
'LinkIncremental': ''
},
'ManifestResourceCompile': {
'ResourceOutputFileName':
'$(IntDir)$(TargetFileName).embed.manifest.resfdsf'}
}
actual_msbuild_settings = MSVSSettings.ConvertToMSBuildSettings(
msvs_settings,
self.stderr)
self.assertEqual(expected_msbuild_settings, actual_msbuild_settings)
self._ExpectedWarnings([])
if __name__ == '__main__':
unittest.main()
| apache-2.0 |
VictorLowther/swift | test/unit/obj/test_expirer.py | 4 | 16349 | # Copyright (c) 2011 OpenStack, LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
from sys import exc_info
from time import time
from unittest import main, TestCase
from test.unit import FakeLogger
from swift.common import internal_client
from swift.obj import expirer
from swift.proxy.server import Application
def not_random():
return 0.5
last_not_sleep = 0
def not_sleep(seconds):
global last_not_sleep
last_not_sleep = seconds
class TestObjectExpirer(TestCase):
maxDiff = None
def setUp(self):
global not_sleep
self.old_loadapp = internal_client.loadapp
self.old_sleep = internal_client.sleep
internal_client.loadapp = lambda x: None
internal_client.sleep = not_sleep
def teardown(self):
internal_client.sleep = self.old_sleep
internal_client.loadapp = self.loadapp
def test_report(self):
x = expirer.ObjectExpirer({})
x.logger = FakeLogger()
x.report()
self.assertEquals(x.logger.log_dict['info'], [])
x.logger._clear()
x.report(final=True)
self.assertTrue('completed' in x.logger.log_dict['info'][-1][0][0],
x.logger.log_dict['info'])
self.assertTrue('so far' not in x.logger.log_dict['info'][-1][0][0],
x.logger.log_dict['info'])
x.logger._clear()
x.report_last_time = time() - x.report_interval
x.report()
self.assertTrue('completed' not in x.logger.log_dict['info'][-1][0][0],
x.logger.log_dict['info'])
self.assertTrue('so far' in x.logger.log_dict['info'][-1][0][0],
x.logger.log_dict['info'])
def test_run_once_nothing_to_do(self):
x = expirer.ObjectExpirer({})
x.logger = FakeLogger()
x.swift = 'throw error because a string does not have needed methods'
x.run_once()
self.assertEquals(x.logger.log_dict['exception'],
[(("Unhandled exception",), {},
"'str' object has no attribute "
"'get_account_info'")])
def test_run_once_calls_report(self):
class InternalClient(object):
def get_account_info(*a, **kw):
return 1, 2
def iter_containers(*a, **kw):
return []
x = expirer.ObjectExpirer({})
x.logger = FakeLogger()
x.swift = InternalClient()
x.run_once()
self.assertEquals(
x.logger.log_dict['info'],
[(('Pass beginning; 1 possible containers; '
'2 possible objects',), {}),
(('Pass completed in 0s; 0 objects expired',), {})])
def test_container_timestamp_break(self):
class InternalClient(object):
def __init__(self, containers):
self.containers = containers
def get_account_info(*a, **kw):
return 1, 2
def iter_containers(self, *a, **kw):
return self.containers
def iter_objects(*a, **kw):
raise Exception('This should not have been called')
x = expirer.ObjectExpirer({})
x.logger = FakeLogger()
x.swift = InternalClient([{'name': str(int(time() + 86400))}])
x.run_once()
for exccall in x.logger.log_dict['exception']:
self.assertTrue(
'This should not have been called' not in exccall[0][0])
self.assertEquals(
x.logger.log_dict['info'],
[(('Pass beginning; 1 possible containers; '
'2 possible objects',), {}),
(('Pass completed in 0s; 0 objects expired',), {})])
# Reverse test to be sure it still would blow up the way expected.
x = expirer.ObjectExpirer({})
x.logger = FakeLogger()
x.swift = InternalClient([{'name': str(int(time() - 86400))}])
x.run_once()
self.assertEquals(x.logger.log_dict['exception'],
[(('Unhandled exception',), {},
str(Exception('This should not have been called')))])
def test_object_timestamp_break(self):
class InternalClient(object):
def __init__(self, containers, objects):
self.containers = containers
self.objects = objects
def get_account_info(*a, **kw):
return 1, 2
def iter_containers(self, *a, **kw):
return self.containers
def delete_container(*a, **kw):
pass
def iter_objects(self, *a, **kw):
return self.objects
def should_not_be_called(*a, **kw):
raise Exception('This should not have been called')
x = expirer.ObjectExpirer({})
x.logger = FakeLogger()
x.swift = InternalClient([{'name': str(int(time() - 86400))}],
[{'name': '%d-actual-obj' % int(time() + 86400)}])
x.run_once()
for exccall in x.logger.log_dict['exception']:
self.assertTrue(
'This should not have been called' not in exccall[0][0])
self.assertEquals(x.logger.log_dict['info'],
[(('Pass beginning; 1 possible containers; '
'2 possible objects',), {}),
(('Pass completed in 0s; 0 objects expired',), {})])
# Reverse test to be sure it still would blow up the way expected.
x = expirer.ObjectExpirer({})
x.logger = FakeLogger()
ts = int(time() - 86400)
x.swift = InternalClient([{'name': str(int(time() - 86400))}],
[{'name': '%d-actual-obj' % ts}])
x.delete_actual_object = should_not_be_called
x.run_once()
excswhiledeleting = []
for exccall in x.logger.log_dict['exception']:
if exccall[0][0].startswith('Exception while deleting '):
excswhiledeleting.append(exccall[0][0])
self.assertEquals(excswhiledeleting,
['Exception while deleting object %d %d-actual-obj '
'This should not have been called' % (ts, ts)])
def test_failed_delete_keeps_entry(self):
class InternalClient(object):
def __init__(self, containers, objects):
self.containers = containers
self.objects = objects
def get_account_info(*a, **kw):
return 1, 2
def iter_containers(self, *a, **kw):
return self.containers
def delete_container(*a, **kw):
pass
def delete_object(*a, **kw):
raise Exception('This should not have been called')
def iter_objects(self, *a, **kw):
return self.objects
def deliberately_blow_up(actual_obj, timestamp):
raise Exception('failed to delete actual object')
def should_not_get_called(container, obj):
raise Exception('This should not have been called')
x = expirer.ObjectExpirer({})
x.logger = FakeLogger()
x.iter_containers = lambda: [str(int(time() - 86400))]
ts = int(time() - 86400)
x.delete_actual_object = deliberately_blow_up
x.swift = InternalClient([{'name': str(int(time() - 86400))}],
[{'name': '%d-actual-obj' % ts}])
x.run_once()
excswhiledeleting = []
for exccall in x.logger.log_dict['exception']:
if exccall[0][0].startswith('Exception while deleting '):
excswhiledeleting.append(exccall[0][0])
self.assertEquals(excswhiledeleting,
['Exception while deleting object %d %d-actual-obj '
'failed to delete actual object' % (ts, ts)])
self.assertEquals(x.logger.log_dict['info'],
[(('Pass beginning; 1 possible containers; '
'2 possible objects',), {}),
(('Pass completed in 0s; 0 objects expired',), {})])
# Reverse test to be sure it still would blow up the way expected.
x = expirer.ObjectExpirer({})
x.logger = FakeLogger()
ts = int(time() - 86400)
x.delete_actual_object = lambda o, t: None
x.swift = InternalClient([{'name': str(int(time() - 86400))}],
[{'name': '%d-actual-obj' % ts}])
x.run_once()
excswhiledeleting = []
for exccall in x.logger.log_dict['exception']:
if exccall[0][0].startswith('Exception while deleting '):
excswhiledeleting.append(exccall[0][0])
self.assertEquals(excswhiledeleting,
['Exception while deleting object %d %d-actual-obj This should '
'not have been called' % (ts, ts)])
def test_success_gets_counted(self):
class InternalClient(object):
def __init__(self, containers, objects):
self.containers = containers
self.objects = objects
def get_account_info(*a, **kw):
return 1, 2
def iter_containers(self, *a, **kw):
return self.containers
def delete_container(*a, **kw):
pass
def delete_object(*a, **kw):
pass
def iter_objects(self, *a, **kw):
return self.objects
x = expirer.ObjectExpirer({})
x.logger = FakeLogger()
x.delete_actual_object = lambda o, t: None
self.assertEquals(x.report_objects, 0)
x.swift = InternalClient([{'name': str(int(time() - 86400))}],
[{'name': '%d-actual-obj' % int(time() - 86400)}])
x.run_once()
self.assertEquals(x.report_objects, 1)
self.assertEquals(x.logger.log_dict['info'],
[(('Pass beginning; 1 possible containers; '
'2 possible objects',), {}),
(('Pass completed in 0s; 1 objects expired',), {})])
def test_failed_delete_continues_on(self):
class InternalClient(object):
def __init__(self, containers, objects):
self.containers = containers
self.objects = objects
def get_account_info(*a, **kw):
return 1, 2
def iter_containers(self, *a, **kw):
return self.containers
def delete_container(*a, **kw):
raise Exception('failed to delete container')
def delete_object(*a, **kw):
pass
def iter_objects(self, *a, **kw):
return self.objects
def fail_delete_actual_object(actual_obj, timestamp):
raise Exception('failed to delete actual object')
x = expirer.ObjectExpirer({})
x.logger = FakeLogger()
cts = int(time() - 86400)
ots = int(time() - 86400)
containers = [
{'name': str(cts)},
{'name': str(cts + 1)},
]
objects = [
{'name': '%d-actual-obj' % ots},
{'name': '%d-next-obj' % ots}
]
x.swift = InternalClient(containers, objects)
x.delete_actual_object = fail_delete_actual_object
x.run_once()
excswhiledeleting = []
for exccall in x.logger.log_dict['exception']:
if exccall[0][0].startswith('Exception while deleting '):
excswhiledeleting.append(exccall[0][0])
self.assertEquals(excswhiledeleting, [
'Exception while deleting object %d %d-actual-obj failed to '
'delete actual object' % (cts, ots),
'Exception while deleting object %d %d-next-obj failed to '
'delete actual object' % (cts, ots),
'Exception while deleting container %d failed to delete '
'container' % (cts,),
'Exception while deleting object %d %d-actual-obj failed to '
'delete actual object' % (cts + 1, ots),
'Exception while deleting object %d %d-next-obj failed to '
'delete actual object' % (cts + 1, ots),
'Exception while deleting container %d failed to delete '
'container' % (cts + 1,)])
self.assertEquals(x.logger.log_dict['info'],
[(('Pass beginning; 1 possible containers; '
'2 possible objects',), {}),
(('Pass completed in 0s; 0 objects expired',), {})])
def test_run_forever_initial_sleep_random(self):
global last_not_sleep
def raise_system_exit():
raise SystemExit('test_run_forever')
interval = 1234
x = expirer.ObjectExpirer({'__file__': 'unit_test',
'interval': interval})
orig_random = expirer.random
orig_sleep = expirer.sleep
exc = None
try:
expirer.random = not_random
expirer.sleep = not_sleep
x.run_once = raise_system_exit
x.run_forever()
except SystemExit, err:
exc = err
finally:
expirer.random = orig_random
expirer.sleep = orig_sleep
self.assertEquals(str(err), 'test_run_forever')
self.assertEquals(last_not_sleep, 0.5 * interval)
def test_run_forever_catches_usual_exceptions(self):
raises = [0]
def raise_exceptions():
raises[0] += 1
if raises[0] < 2:
raise Exception('exception %d' % raises[0])
raise SystemExit('exiting exception %d' % raises[0])
x = expirer.ObjectExpirer({})
x.logger = FakeLogger()
orig_sleep = expirer.sleep
exc = None
try:
expirer.sleep = not_sleep
x.run_once = raise_exceptions
x.run_forever()
except SystemExit, err:
exc = err
finally:
expirer.sleep = orig_sleep
self.assertEquals(str(err), 'exiting exception 2')
self.assertEquals(x.logger.log_dict['exception'],
[(('Unhandled exception',), {},
'exception 1')])
def test_delete_actual_object(self):
got_env = [None]
def fake_app(env, start_response):
got_env[0] = env
start_response('204 No Content', [('Content-Length', '0')])
return []
internal_client.loadapp = lambda x: fake_app
x = expirer.ObjectExpirer({})
ts = '1234'
x.delete_actual_object('/path/to/object', ts)
self.assertEquals(got_env[0]['HTTP_X_IF_DELETE_AT'], ts)
def test_delete_actual_object_handles_404(self):
def fake_app(env, start_response):
start_response('404 Not Found', [('Content-Length', '0')])
return []
internal_client.loadapp = lambda x: fake_app
x = expirer.ObjectExpirer({})
x.delete_actual_object('/path/to/object', '1234')
def test_delete_actual_object_handles_412(self):
def fake_app(env, start_response):
start_response('412 Precondition Failed',
[('Content-Length', '0')])
return []
internal_client.loadapp = lambda x: fake_app
x = expirer.ObjectExpirer({})
x.delete_actual_object('/path/to/object', '1234')
def test_delete_actual_object_does_not_handle_odd_stuff(self):
def fake_app(env, start_response):
start_response('503 Internal Server Error',
[('Content-Length', '0')])
return []
internal_client.loadapp = lambda x: fake_app
x = expirer.ObjectExpirer({})
exc = None
try:
x.delete_actual_object('/path/to/object', '1234')
except Exception, err:
exc = err
finally:
pass
self.assertEquals(503, exc.resp.status_int)
if __name__ == '__main__':
main()
| apache-2.0 |
switchkiller/ProjDjanko | lib/python2.7/site-packages/django/contrib/admin/__init__.py | 160 | 1237 | # ACTION_CHECKBOX_NAME is unused, but should stay since its import from here
# has been referenced in documentation.
from django.contrib.admin.decorators import register
from django.contrib.admin.helpers import ACTION_CHECKBOX_NAME
from django.contrib.admin.options import (HORIZONTAL, VERTICAL,
ModelAdmin, StackedInline, TabularInline)
from django.contrib.admin.filters import (ListFilter, SimpleListFilter,
FieldListFilter, BooleanFieldListFilter, RelatedFieldListFilter,
ChoicesFieldListFilter, DateFieldListFilter, AllValuesFieldListFilter,
RelatedOnlyFieldListFilter)
from django.contrib.admin.sites import AdminSite, site
from django.utils.module_loading import autodiscover_modules
__all__ = [
"register", "ACTION_CHECKBOX_NAME", "ModelAdmin", "HORIZONTAL", "VERTICAL",
"StackedInline", "TabularInline", "AdminSite", "site", "ListFilter",
"SimpleListFilter", "FieldListFilter", "BooleanFieldListFilter",
"RelatedFieldListFilter", "ChoicesFieldListFilter", "DateFieldListFilter",
"AllValuesFieldListFilter", "RelatedOnlyFieldListFilter", "autodiscover",
]
def autodiscover():
autodiscover_modules('admin', register_to=site)
default_app_config = 'django.contrib.admin.apps.AdminConfig'
| gpl-2.0 |
SmartInfrastructures/neutron | neutron/services/firewall/agents/firewall_agent_api.py | 46 | 2517 | # Copyright (c) 2013 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_config import cfg
from oslo_log import log as logging
import oslo_messaging
from neutron.common import rpc as n_rpc
LOG = logging.getLogger(__name__)
FWaaSOpts = [
cfg.StrOpt(
'driver',
default='',
help=_("Name of the FWaaS Driver")),
cfg.BoolOpt(
'enabled',
default=False,
help=_("Enable FWaaS")),
]
cfg.CONF.register_opts(FWaaSOpts, 'fwaas')
class FWaaSPluginApiMixin(object):
"""Agent side of the FWaaS agent to FWaaS Plugin RPC API."""
def __init__(self, topic, host):
self.host = host
target = oslo_messaging.Target(topic=topic, version='1.0')
self.client = n_rpc.get_client(target)
def set_firewall_status(self, context, firewall_id, status):
"""Make a RPC to set the status of a firewall."""
cctxt = self.client.prepare()
return cctxt.call(context, 'set_firewall_status', host=self.host,
firewall_id=firewall_id, status=status)
def firewall_deleted(self, context, firewall_id):
"""Make a RPC to indicate that the firewall resources are deleted."""
cctxt = self.client.prepare()
return cctxt.call(context, 'firewall_deleted', host=self.host,
firewall_id=firewall_id)
class FWaaSAgentRpcCallbackMixin(object):
"""Mixin for FWaaS agent Implementations."""
def __init__(self, host):
super(FWaaSAgentRpcCallbackMixin, self).__init__(host)
def create_firewall(self, context, firewall, host):
"""Handle RPC cast from plugin to create a firewall."""
pass
def update_firewall(self, context, firewall, host):
"""Handle RPC cast from plugin to update a firewall."""
pass
def delete_firewall(self, context, firewall, host):
"""Handle RPC cast from plugin to delete a firewall."""
pass
| apache-2.0 |
FHannes/intellij-community | python/lib/Lib/site-packages/django/utils/tzinfo.py | 313 | 2511 | "Implementation of tzinfo classes for use with datetime.datetime."
import time
from datetime import timedelta, tzinfo
from django.utils.encoding import smart_unicode, smart_str, DEFAULT_LOCALE_ENCODING
class FixedOffset(tzinfo):
"Fixed offset in minutes east from UTC."
def __init__(self, offset):
if isinstance(offset, timedelta):
self.__offset = offset
offset = self.__offset.seconds // 60
else:
self.__offset = timedelta(minutes=offset)
sign = offset < 0 and '-' or '+'
self.__name = u"%s%02d%02d" % (sign, abs(offset) / 60., abs(offset) % 60)
def __repr__(self):
return self.__name
def utcoffset(self, dt):
return self.__offset
def tzname(self, dt):
return self.__name
def dst(self, dt):
return timedelta(0)
class LocalTimezone(tzinfo):
"Proxy timezone information from time module."
def __init__(self, dt):
tzinfo.__init__(self)
self._tzname = self.tzname(dt)
def __repr__(self):
return smart_str(self._tzname)
def utcoffset(self, dt):
if self._isdst(dt):
return timedelta(seconds=-time.altzone)
else:
return timedelta(seconds=-time.timezone)
def dst(self, dt):
if self._isdst(dt):
return timedelta(seconds=-time.altzone) - timedelta(seconds=-time.timezone)
else:
return timedelta(0)
def tzname(self, dt):
try:
return smart_unicode(time.tzname[self._isdst(dt)],
DEFAULT_LOCALE_ENCODING)
except UnicodeDecodeError:
return None
def _isdst(self, dt):
tt = (dt.year, dt.month, dt.day, dt.hour, dt.minute, dt.second, dt.weekday(), 0, -1)
try:
stamp = time.mktime(tt)
except (OverflowError, ValueError):
# 32 bit systems can't handle dates after Jan 2038, and certain
# systems can't handle dates before ~1901-12-01:
#
# >>> time.mktime((1900, 1, 13, 0, 0, 0, 0, 0, 0))
# OverflowError: mktime argument out of range
# >>> time.mktime((1850, 1, 13, 0, 0, 0, 0, 0, 0))
# ValueError: year out of range
#
# In this case, we fake the date, because we only care about the
# DST flag.
tt = (2037,) + tt[1:]
stamp = time.mktime(tt)
tt = time.localtime(stamp)
return tt.tm_isdst > 0
| apache-2.0 |
tommy-u/chaco | chaco/multi_line_plot.py | 1 | 16640 | """ Defines the MultiLinePlot class.
"""
from __future__ import with_statement
# Standard library imports
import warnings
from math import ceil, floor
# Major library imports
import numpy as np
from numpy import argsort, array, invert, isnan, take, transpose
# Enthought library imports
from enable.api import black_color_trait, ColorTrait, LineStyle
from traits.api import Float, List, Str, Trait, \
Bool, Callable, Property, cached_property, Instance, Array
from traitsui.api import Item, View, ScrubberEditor, HGroup
from array_data_source import ArrayDataSource
from base import arg_find_runs, bin_search
from base_xy_plot import BaseXYPlot
class MultiLinePlot(BaseXYPlot):
""" A plot consisting of multiple lines.
The data to be plotted must come from a two-dimensional array with shape M by N
stored in a MultiArrayDataSource object. M is the number of lines to be plotted,
and N is the number of points in each line.
Constructor Parameters
----------------------
index : instance of an ArrayDataSource
These are the 'x' or abscissa coordinates.
yindex : instance of ArrayDataSource
These are the 'y' coordinates.
value : instance of a MultiArrayDataSource
Note that the `scale`, `offset` and `normalized_amplitude` attributes of the
MultiLinePlot control the projection of the traces into the (x,y)
plot. In simplest case, `scale=1` and `offset=0`, and `normalized_amplitude`
controls the scaling of the traces relative to their base y value.
global_min, global_max : float
The minimum and maximum values of the data in `value`. For large
arrays, computing these could take excessive time, so they must be
provided when an instance is created.
normalized_amplitude : Float
color : ColorTrait
color_func : Callable or None
If not None, this Callable overrides `color`. The argument to `color_func`
will be the integer index of the trace to be rendered. `color_func` must
return an RGBA 4-tuple.
Default: None
orientation : str
Must be 'v' or 'h' (for 'vertical' or 'horizontal', respectively). This is
the orientation of the index axis (i.e. the 'x' axis).
Default: 'h'
fast_clip : bool
If True, traces whose *base* 'y' coordinate is outside the value axis range
are not plotted, even if some of the data in the curve extends into the plot
region.
Default: False
line_width : float
Width of the plotted lines.
line_style :
The style of the trace lines in the plot.
The following are from the original LinePlot code, and are untested:
selected_color
selected_line_style
"""
# M and N appearing in the comments are as defined in the docstring.
yindex = Instance(ArrayDataSource)
# amplitude = Float(0.0)
# `scale` and `offset` provide a more general transformation, but are currently
# untested.
scale = Float(1.0)
offset = Float(0.0)
fast_clip = Bool(False)
# The color of the lines.
color = black_color_trait
# A function that returns the color of lines. Overrides `color` if not None.
color_func = Trait(None, None, Callable)
# The color to use to highlight the line when selected.
selected_color = ColorTrait("lightyellow")
# The style of the selected line.
selected_line_style = LineStyle("solid")
# The name of the key in self.metadata that holds the selection mask
metadata_name = Str("selections")
# The thickness of the line.
line_width = Float(1.0)
# The line dash style.
line_style = LineStyle
use_global_bounds = Bool(True)
# Minimum value in the `value` data source. This must be provided
# in the call to the constructor.
global_min = Float
# Maximum value in the `value` data source. This must be provided
# in the call to the constructor.
global_max = Float
# Normalized amplitude is the value exposed to the user.
normalized_amplitude = Float(-0.5)
amplitude_scale = Property(Float, depends_on=['global_min', 'global_max', 'data',
'use_global_bounds', 'yindex'])
amplitude = Property(Float, depends_on=['normalized_amplitude',
'amplitude_scale'])
#------------------------------------------------------------------------
# Private traits
#------------------------------------------------------------------------
# The projected 2D numpy array.
_trace_data = Property(Array, depends_on=['index', 'index.data_changed',
'value', 'value.data_changed', 'yindex', 'yindex.data_changed',
'amplitude', 'scale', 'offset'])
# Cached list of non-NaN arrays of (x,y) data-space points; regardless of
# self.orientation, this is always stored as (index_pt, value_pt). This is
# different from the default BaseXYPlot definition.
_cached_data_pts = List
# Cached list of non-NaN arrays of (x,y) screen-space points.
_cached_screen_pts = List
#------------------------------------------------------------------------
#
#------------------------------------------------------------------------
def trait_view(self, obj):
"""Create a minimalist View, with just the amplitude and color attributes."""
# Minimalist Traits UI View for customizing the plot: only the trace amplitude
# and line color are exposed.
view = View(
HGroup(
Item('use_global_bounds'),
# Item('normalized_amplitude'),
# Item('normalized_amplitude', editor=RangeEditor()),
Item('normalized_amplitude',
editor=ScrubberEditor(increment=0.2, hover_color=0xFFFFFF, active_color=0xA0CD9E,
border_color=0x0000FF)),
),
Item("color", label="Trace color", style="simple"),
width=480,
title="Trace Plot Line Attributes",
buttons=["OK", "Cancel"])
return view
#------------------------------------------------------------------------
#
#------------------------------------------------------------------------
# See base_xy_plot.py for these:
## def hittest(self, screen_pt, threshold=7.0):
## def interpolate(self, index_value):
def get_screen_points(self):
self._gather_points()
scrn_pts_list = [[self.map_screen(ary) for ary in line]
for line in self._cached_data_pts]
return scrn_pts_list
#------------------------------------------------------------------------
# Private methods
#------------------------------------------------------------------------
@cached_property
def _get_amplitude_scale(self):
"""
If the amplitude is set to this value, the largest trace deviation from
its base y coordinate will be equal to the y coordinate spacing.
"""
# Note: Like the rest of the current code, this ignores the `scale` attribute.
if self.yindex is not None:
coordinates = self.yindex.get_data()
else:
coordinates = []
if len(coordinates) > 1:
dy = coordinates[1] - coordinates[0]
if dy == 0:
dy = 1.0
else:
# default coordinate spacing if there is only 1 coordinate
dy = 1.0
if self.use_global_bounds:
max_abs = max(abs(self.global_min), abs(self.global_max))
else:
data = self.value._data
max_abs = np.max(np.abs(data))
if max_abs == 0:
amp_scale = 0.5 * dy
else:
amp_scale = 0.5 * dy / max_abs
return amp_scale
@cached_property
def _get_amplitude(self):
amplitude = self.normalized_amplitude * self.amplitude_scale
return amplitude
@cached_property
def _get__trace_data(self):
"""Compute the transformed data."""
# Get the array from `value`
data = self.value._data
coordinates = self.yindex.get_data()
channel_data = self.scale*(self.amplitude*data + coordinates[:,np.newaxis]) \
+ self.offset
return channel_data
def _gather_points(self):
"""
Collects the data points that are within the bounds of the plot and
caches them.
"""
if self._cache_valid:
return
if not self.index or not self.value:
return
index = self.index.get_data()
varray = self._trace_data
if varray.size == 0:
self._cached_data_pts = []
self._cached_valid = True
return
coordinates = self.yindex.get_data()
if self.fast_clip:
coord_min = float(coordinates[0])
coord_max = coordinates[-1]
slice_min = max(0,ceil((varray.shape[0]-1)*(self.value_range.low - coord_min)/(coord_max - coord_min)))
slice_max = min(varray.shape[0], 1+floor((varray.shape[0]-1)*(self.value_range.high - coord_min)/(coord_max - coord_min)))
varray = varray[slice_min:slice_max]
# FIXME: The y coordinates must also be sliced to match varray.
# Check to see if the data is completely outside the view region.
outside = False
# Check x coordinates.
low, high = self.index.get_bounds()
if low > self.index_range.high or high < self.index_range.low:
outside = True
# Check y coordinates. Use varray because it is nased on the yindex,
# but has been shifted up or down depending on the values.
ylow, yhigh = varray.min(), varray.max()
if ylow > self.value_range.high or yhigh < self.value_range.low:
outside = True
if outside:
self._cached_data_pts = []
self._cached_valid = True
return
if len(index) == 0 or varray.shape[0] == 0 or varray.shape[1] == 0 \
or len(index) != varray.shape[1]:
self._cached_data_pts = []
self._cache_valid = True
return
size_diff = varray.shape[1] - len(index)
if size_diff > 0:
warnings.warn('Chaco.LinePlot: value.shape[1] %d - len(index) %d = %d\n' \
% (varray.shape[1], len(index), size_diff))
index_max = len(index)
varray = varray[:,:index_max]
else:
index_max = varray.shape[1]
index = index[:index_max]
# Split the index and value raw data into non-NaN chunks.
# nan_mask is a boolean M by N array.
nan_mask = invert(isnan(varray)) & invert(isnan(index))
blocks_list = []
for nm in nan_mask:
blocks = [b for b in arg_find_runs(nm, "flat") if nm[b[0]] != 0]
blocks_list.append(blocks)
line_points = []
for k, blocks in enumerate(blocks_list):
points = []
for block in blocks:
start, end = block
block_index = index[start:end]
block_value = varray[k, start:end]
index_mask = self.index_mapper.range.mask_data(block_index)
runs = [r for r in arg_find_runs(index_mask, "flat") \
if index_mask[r[0]] != 0]
# Check to see if our data view region is between two points in the
# index data. If so, then we have to reverse map our current view
# into the appropriate index and draw the bracketing points.
if runs == []:
data_pt = self.map_data((self.x_mapper.low_pos, self.y_mapper.low_pos))
if self.index.sort_order == "none":
indices = argsort(index)
sorted_index = take(index, indices)
sorted_value = take(varray[k], indices)
sort = 1
else:
sorted_index = index
sorted_value = varray[k]
if self.index.sort_order == "ascending":
sort = 1
else:
sort = -1
ndx = bin_search(sorted_index, data_pt, sort)
if ndx == -1:
# bin_search can return -1 if data_pt is outside the bounds
# of the source data
continue
z = transpose(array((sorted_index[ndx:ndx+2],
sorted_value[ndx:ndx+2])))
points.append(z)
else:
# Expand the width of every group of points so we draw the lines
# up to their next point, outside the plot area
data_end = len(index_mask)
for run in runs:
start, end = run
if start != 0:
start -= 1
if end != data_end:
end += 1
run_data = transpose(array((block_index[start:end],
block_value[start:end])))
points.append(run_data)
line_points.append(points)
self._cached_data_pts = line_points
self._cache_valid = True
return
# See base_xy_plot.py for:
## def _downsample(self):
## def _downsample_vectorized(self):
def _render(self, gc, line_points, selected_points=None):
if len(line_points) == 0:
return
with gc:
gc.set_antialias(True)
gc.clip_to_rect(self.x, self.y, self.width, self.height)
render = self._render_normal
if selected_points is not None:
gc.set_stroke_color(self.selected_color_)
gc.set_line_width(self.line_width+10.0)
gc.set_line_dash(self.selected_line_style_)
render(gc, selected_points)
if self.color_func is not None:
# Existence of self.color_func overrides self.color.
color_func = self.color_func
else:
color_func = lambda k: self.color_
tmp = list(enumerate(line_points))
# Note: the list is reversed for testing with _render_filled.
for k, points in reversed(tmp):
color = color_func(k)
# Apply the alpha
alpha = color[-1] if len(color) == 4 else 1
color = color[:3] + (alpha * self.alpha,)
gc.set_stroke_color(color)
gc.set_line_width(self.line_width)
gc.set_line_dash(self.line_style_)
render(gc, points)
# Draw the default axes, if necessary
self._draw_default_axes(gc)
def _render_normal(self, gc, points):
for ary in points:
if len(ary) > 0:
gc.begin_path()
gc.lines(ary)
gc.stroke_path()
return
def _render_icon(self, gc, x, y, width, height):
with gc:
gc.set_stroke_color(self.color_)
gc.set_line_width(self.line_width)
gc.set_line_dash(self.line_style_)
gc.set_antialias(0)
gc.move_to(x, y+height/2)
gc.line_to(x+width, y+height/2)
gc.stroke_path()
def _alpha_changed(self):
self.invalidate_draw()
self.request_redraw()
return
def _color_changed(self):
self.invalidate_draw()
self.request_redraw()
return
def _line_style_changed(self):
self.invalidate_draw()
self.request_redraw()
return
def _line_width_changed(self):
self.invalidate_draw()
self.request_redraw()
return
def _amplitude_changed(self):
self.value.data_changed = True
self.invalidate_draw()
self.request_redraw()
return
def __getstate__(self):
state = super(MultiLinePlot,self).__getstate__()
for key in ['traits_view']:
if state.has_key(key):
del state[key]
return state
| bsd-3-clause |
mrkm4ntr/incubator-airflow | tests/providers/google/cloud/operators/test_dlp.py | 8 | 27989 | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=R0904, C0111
"""
This module contains various unit tests for Google Cloud DLP Operators
"""
import unittest
from unittest import mock
from airflow.providers.google.cloud.operators.dlp import (
CloudDLPCancelDLPJobOperator,
CloudDLPCreateDeidentifyTemplateOperator,
CloudDLPCreateDLPJobOperator,
CloudDLPCreateInspectTemplateOperator,
CloudDLPCreateJobTriggerOperator,
CloudDLPCreateStoredInfoTypeOperator,
CloudDLPDeidentifyContentOperator,
CloudDLPDeleteDeidentifyTemplateOperator,
CloudDLPDeleteDLPJobOperator,
CloudDLPDeleteInspectTemplateOperator,
CloudDLPDeleteJobTriggerOperator,
CloudDLPDeleteStoredInfoTypeOperator,
CloudDLPGetDeidentifyTemplateOperator,
CloudDLPGetDLPJobOperator,
CloudDLPGetDLPJobTriggerOperator,
CloudDLPGetInspectTemplateOperator,
CloudDLPGetStoredInfoTypeOperator,
CloudDLPInspectContentOperator,
CloudDLPListDeidentifyTemplatesOperator,
CloudDLPListDLPJobsOperator,
CloudDLPListInfoTypesOperator,
CloudDLPListInspectTemplatesOperator,
CloudDLPListJobTriggersOperator,
CloudDLPListStoredInfoTypesOperator,
CloudDLPRedactImageOperator,
CloudDLPReidentifyContentOperator,
CloudDLPUpdateDeidentifyTemplateOperator,
CloudDLPUpdateInspectTemplateOperator,
CloudDLPUpdateJobTriggerOperator,
CloudDLPUpdateStoredInfoTypeOperator,
)
GCP_CONN_ID = "google_cloud_default"
ORGANIZATION_ID = "test-org"
PROJECT_ID = "test-project"
DLP_JOB_ID = "job123"
TEMPLATE_ID = "template123"
STORED_INFO_TYPE_ID = "type123"
TRIGGER_ID = "trigger123"
class TestCloudDLPCancelDLPJobOperator(unittest.TestCase):
@mock.patch("airflow.providers.google.cloud.operators.dlp.CloudDLPHook")
def test_cancel_dlp_job(self, mock_hook):
mock_hook.return_value.cancel_dlp_job.return_value = mock.MagicMock()
operator = CloudDLPCancelDLPJobOperator(dlp_job_id=DLP_JOB_ID, task_id="id")
operator.execute(context=None)
mock_hook.assert_called_once_with(
gcp_conn_id=GCP_CONN_ID,
impersonation_chain=None,
)
mock_hook.return_value.cancel_dlp_job.assert_called_once_with(
dlp_job_id=DLP_JOB_ID,
project_id=None,
retry=None,
timeout=None,
metadata=None,
)
class TestCloudDLPCreateDeidentifyTemplateOperator(unittest.TestCase):
@mock.patch("airflow.providers.google.cloud.operators.dlp.CloudDLPHook")
def test_create_deidentify_template(self, mock_hook):
mock_hook.return_value.create_deidentify_template.return_value = mock.MagicMock()
operator = CloudDLPCreateDeidentifyTemplateOperator(organization_id=ORGANIZATION_ID, task_id="id")
operator.execute(context=None)
mock_hook.assert_called_once_with(
gcp_conn_id=GCP_CONN_ID,
impersonation_chain=None,
)
mock_hook.return_value.create_deidentify_template.assert_called_once_with(
organization_id=ORGANIZATION_ID,
project_id=None,
deidentify_template=None,
template_id=None,
retry=None,
timeout=None,
metadata=None,
)
class TestCloudDLPCreateDLPJobOperator(unittest.TestCase):
@mock.patch("airflow.providers.google.cloud.operators.dlp.CloudDLPHook")
def test_create_dlp_job(self, mock_hook):
mock_hook.return_value.create_dlp_job.return_value = mock.MagicMock()
operator = CloudDLPCreateDLPJobOperator(project_id=PROJECT_ID, task_id="id")
operator.execute(context=None)
mock_hook.assert_called_once_with(
gcp_conn_id=GCP_CONN_ID,
impersonation_chain=None,
)
mock_hook.return_value.create_dlp_job.assert_called_once_with(
project_id=PROJECT_ID,
inspect_job=None,
risk_job=None,
job_id=None,
retry=None,
timeout=None,
metadata=None,
wait_until_finished=True,
)
class TestCloudDLPCreateInspectTemplateOperator(unittest.TestCase):
@mock.patch("airflow.providers.google.cloud.operators.dlp.CloudDLPHook")
def test_create_inspect_template(self, mock_hook):
mock_hook.return_value.create_inspect_template.return_value = mock.MagicMock()
operator = CloudDLPCreateInspectTemplateOperator(organization_id=ORGANIZATION_ID, task_id="id")
operator.execute(context=None)
mock_hook.assert_called_once_with(
gcp_conn_id=GCP_CONN_ID,
impersonation_chain=None,
)
mock_hook.return_value.create_inspect_template.assert_called_once_with(
organization_id=ORGANIZATION_ID,
project_id=None,
inspect_template=None,
template_id=None,
retry=None,
timeout=None,
metadata=None,
)
class TestCloudDLPCreateJobTriggerOperator(unittest.TestCase):
@mock.patch("airflow.providers.google.cloud.operators.dlp.CloudDLPHook")
def test_create_job_trigger(self, mock_hook):
mock_hook.return_value.create_job_trigger.return_value = mock.MagicMock()
operator = CloudDLPCreateJobTriggerOperator(project_id=PROJECT_ID, task_id="id")
operator.execute(context=None)
mock_hook.assert_called_once_with(
gcp_conn_id=GCP_CONN_ID,
impersonation_chain=None,
)
mock_hook.return_value.create_job_trigger.assert_called_once_with(
project_id=PROJECT_ID,
job_trigger=None,
trigger_id=None,
retry=None,
timeout=None,
metadata=None,
)
class TestCloudDLPCreateStoredInfoTypeOperator(unittest.TestCase):
@mock.patch("airflow.providers.google.cloud.operators.dlp.CloudDLPHook")
def test_create_stored_info_type(self, mock_hook):
mock_hook.return_value.create_stored_info_type.return_value = mock.MagicMock()
operator = CloudDLPCreateStoredInfoTypeOperator(organization_id=ORGANIZATION_ID, task_id="id")
operator.execute(context=None)
mock_hook.assert_called_once_with(
gcp_conn_id=GCP_CONN_ID,
impersonation_chain=None,
)
mock_hook.return_value.create_stored_info_type.assert_called_once_with(
organization_id=ORGANIZATION_ID,
project_id=None,
config=None,
stored_info_type_id=None,
retry=None,
timeout=None,
metadata=None,
)
class TestCloudDLPDeidentifyContentOperator(unittest.TestCase):
@mock.patch("airflow.providers.google.cloud.operators.dlp.CloudDLPHook")
def test_deidentify_content(self, mock_hook):
mock_hook.return_value.deidentify_content.return_value = mock.MagicMock()
operator = CloudDLPDeidentifyContentOperator(project_id=PROJECT_ID, task_id="id")
operator.execute(context=None)
mock_hook.assert_called_once_with(
gcp_conn_id=GCP_CONN_ID,
impersonation_chain=None,
)
mock_hook.return_value.deidentify_content.assert_called_once_with(
project_id=PROJECT_ID,
deidentify_config=None,
inspect_config=None,
item=None,
inspect_template_name=None,
deidentify_template_name=None,
retry=None,
timeout=None,
metadata=None,
)
class TestCloudDLPDeleteDeidentifyTemplateOperator(unittest.TestCase):
@mock.patch("airflow.providers.google.cloud.operators.dlp.CloudDLPHook")
def test_delete_deidentify_template(self, mock_hook):
mock_hook.return_value.delete_deidentify_template.return_value = mock.MagicMock()
operator = CloudDLPDeleteDeidentifyTemplateOperator(
template_id=TEMPLATE_ID, organization_id=ORGANIZATION_ID, task_id="id"
)
operator.execute(context=None)
mock_hook.assert_called_once_with(
gcp_conn_id=GCP_CONN_ID,
impersonation_chain=None,
)
mock_hook.return_value.delete_deidentify_template.assert_called_once_with(
template_id=TEMPLATE_ID,
organization_id=ORGANIZATION_ID,
project_id=None,
retry=None,
timeout=None,
metadata=None,
)
class TestCloudDLPDeleteDlpJobOperator(unittest.TestCase):
@mock.patch("airflow.providers.google.cloud.operators.dlp.CloudDLPHook")
def test_delete_dlp_job(self, mock_hook):
mock_hook.return_value.delete_dlp_job.return_value = mock.MagicMock()
operator = CloudDLPDeleteDLPJobOperator(dlp_job_id=DLP_JOB_ID, project_id=PROJECT_ID, task_id="id")
operator.execute(context=None)
mock_hook.assert_called_once_with(
gcp_conn_id=GCP_CONN_ID,
impersonation_chain=None,
)
mock_hook.return_value.delete_dlp_job.assert_called_once_with(
dlp_job_id=DLP_JOB_ID,
project_id=PROJECT_ID,
retry=None,
timeout=None,
metadata=None,
)
class TestCloudDLPDeleteInspectTemplateOperator(unittest.TestCase):
@mock.patch("airflow.providers.google.cloud.operators.dlp.CloudDLPHook")
def test_delete_inspect_template(self, mock_hook):
mock_hook.return_value.delete_inspect_template.return_value = mock.MagicMock()
operator = CloudDLPDeleteInspectTemplateOperator(
template_id=TEMPLATE_ID, organization_id=ORGANIZATION_ID, task_id="id"
)
operator.execute(context=None)
mock_hook.assert_called_once_with(
gcp_conn_id=GCP_CONN_ID,
impersonation_chain=None,
)
mock_hook.return_value.delete_inspect_template.assert_called_once_with(
template_id=TEMPLATE_ID,
organization_id=ORGANIZATION_ID,
project_id=None,
retry=None,
timeout=None,
metadata=None,
)
class TestCloudDLPDeleteJobTriggerOperator(unittest.TestCase):
@mock.patch("airflow.providers.google.cloud.operators.dlp.CloudDLPHook")
def test_delete_job_trigger(self, mock_hook):
mock_hook.return_value.delete_job_trigger.return_value = mock.MagicMock()
operator = CloudDLPDeleteJobTriggerOperator(
job_trigger_id=TRIGGER_ID, project_id=PROJECT_ID, task_id="id"
)
operator.execute(context=None)
mock_hook.assert_called_once_with(
gcp_conn_id=GCP_CONN_ID,
impersonation_chain=None,
)
mock_hook.return_value.delete_job_trigger.assert_called_once_with(
job_trigger_id=TRIGGER_ID,
project_id=PROJECT_ID,
retry=None,
timeout=None,
metadata=None,
)
class TestCloudDLPDeleteStoredInfoTypeOperator(unittest.TestCase):
@mock.patch("airflow.providers.google.cloud.operators.dlp.CloudDLPHook")
def test_delete_stored_info_type(self, mock_hook):
mock_hook.return_value.delete_stored_info_type.return_value = mock.MagicMock()
operator = CloudDLPDeleteStoredInfoTypeOperator(
stored_info_type_id=STORED_INFO_TYPE_ID,
organization_id=ORGANIZATION_ID,
task_id="id",
)
operator.execute(context=None)
mock_hook.assert_called_once_with(
gcp_conn_id=GCP_CONN_ID,
impersonation_chain=None,
)
mock_hook.return_value.delete_stored_info_type.assert_called_once_with(
stored_info_type_id=STORED_INFO_TYPE_ID,
organization_id=ORGANIZATION_ID,
project_id=None,
retry=None,
timeout=None,
metadata=None,
)
class TestCloudDLPGetDeidentifyTemplateOperator(unittest.TestCase):
@mock.patch("airflow.providers.google.cloud.operators.dlp.CloudDLPHook")
def test_get_deidentify_template(self, mock_hook):
mock_hook.return_value.get_deidentify_template.return_value = mock.MagicMock()
operator = CloudDLPGetDeidentifyTemplateOperator(
template_id=TEMPLATE_ID, organization_id=ORGANIZATION_ID, task_id="id"
)
operator.execute(context=None)
mock_hook.assert_called_once_with(
gcp_conn_id=GCP_CONN_ID,
impersonation_chain=None,
)
mock_hook.return_value.get_deidentify_template.assert_called_once_with(
template_id=TEMPLATE_ID,
organization_id=ORGANIZATION_ID,
project_id=None,
retry=None,
timeout=None,
metadata=None,
)
class TestCloudDLPGetDlpJobOperator(unittest.TestCase):
@mock.patch("airflow.providers.google.cloud.operators.dlp.CloudDLPHook")
def test_get_dlp_job(self, mock_hook):
mock_hook.return_value.get_dlp_job.return_value = mock.MagicMock()
operator = CloudDLPGetDLPJobOperator(dlp_job_id=DLP_JOB_ID, project_id=PROJECT_ID, task_id="id")
operator.execute(context=None)
mock_hook.assert_called_once_with(
gcp_conn_id=GCP_CONN_ID,
impersonation_chain=None,
)
mock_hook.return_value.get_dlp_job.assert_called_once_with(
dlp_job_id=DLP_JOB_ID,
project_id=PROJECT_ID,
retry=None,
timeout=None,
metadata=None,
)
class TestCloudDLPGetInspectTemplateOperator(unittest.TestCase):
@mock.patch("airflow.providers.google.cloud.operators.dlp.CloudDLPHook")
def test_get_inspect_template(self, mock_hook):
mock_hook.return_value.get_inspect_template.return_value = mock.MagicMock()
operator = CloudDLPGetInspectTemplateOperator(
template_id=TEMPLATE_ID, organization_id=ORGANIZATION_ID, task_id="id"
)
operator.execute(context=None)
mock_hook.assert_called_once_with(
gcp_conn_id=GCP_CONN_ID,
impersonation_chain=None,
)
mock_hook.return_value.get_inspect_template.assert_called_once_with(
template_id=TEMPLATE_ID,
organization_id=ORGANIZATION_ID,
project_id=None,
retry=None,
timeout=None,
metadata=None,
)
class TestCloudDLPGetJobTripperOperator(unittest.TestCase):
@mock.patch("airflow.providers.google.cloud.operators.dlp.CloudDLPHook")
def test_get_job_trigger(self, mock_hook):
mock_hook.return_value.get_job_trigger.return_value = mock.MagicMock()
operator = CloudDLPGetDLPJobTriggerOperator(
job_trigger_id=TRIGGER_ID, project_id=PROJECT_ID, task_id="id"
)
operator.execute(context=None)
mock_hook.assert_called_once_with(
gcp_conn_id=GCP_CONN_ID,
impersonation_chain=None,
)
mock_hook.return_value.get_job_trigger.assert_called_once_with(
job_trigger_id=TRIGGER_ID,
project_id=PROJECT_ID,
retry=None,
timeout=None,
metadata=None,
)
class TestCloudDLPGetStoredInfoTypeOperator(unittest.TestCase):
@mock.patch("airflow.providers.google.cloud.operators.dlp.CloudDLPHook")
def test_get_stored_info_type(self, mock_hook):
mock_hook.return_value.get_stored_info_type.return_value = mock.MagicMock()
operator = CloudDLPGetStoredInfoTypeOperator(
stored_info_type_id=STORED_INFO_TYPE_ID,
organization_id=ORGANIZATION_ID,
task_id="id",
)
operator.execute(context=None)
mock_hook.assert_called_once_with(
gcp_conn_id=GCP_CONN_ID,
impersonation_chain=None,
)
mock_hook.return_value.get_stored_info_type.assert_called_once_with(
stored_info_type_id=STORED_INFO_TYPE_ID,
organization_id=ORGANIZATION_ID,
project_id=None,
retry=None,
timeout=None,
metadata=None,
)
class TestCloudDLPInspectContentOperator(unittest.TestCase):
@mock.patch("airflow.providers.google.cloud.operators.dlp.CloudDLPHook")
def test_inspect_content(self, mock_hook):
mock_hook.return_value.inspect_content.return_value = mock.MagicMock()
operator = CloudDLPInspectContentOperator(project_id=PROJECT_ID, task_id="id")
operator.execute(context=None)
mock_hook.assert_called_once_with(
gcp_conn_id=GCP_CONN_ID,
impersonation_chain=None,
)
mock_hook.return_value.inspect_content.assert_called_once_with(
project_id=PROJECT_ID,
inspect_config=None,
item=None,
inspect_template_name=None,
retry=None,
timeout=None,
metadata=None,
)
class TestCloudDLPListDeidentifyTemplatesOperator(unittest.TestCase):
@mock.patch("airflow.providers.google.cloud.operators.dlp.CloudDLPHook")
def test_list_deidentify_templates(self, mock_hook):
mock_hook.return_value.list_deidentify_templates.return_value = mock.MagicMock()
operator = CloudDLPListDeidentifyTemplatesOperator(organization_id=ORGANIZATION_ID, task_id="id")
operator.execute(context=None)
mock_hook.assert_called_once_with(
gcp_conn_id=GCP_CONN_ID,
impersonation_chain=None,
)
mock_hook.return_value.list_deidentify_templates.assert_called_once_with(
organization_id=ORGANIZATION_ID,
project_id=None,
page_size=None,
order_by=None,
retry=None,
timeout=None,
metadata=None,
)
class TestCloudDLPListDlpJobsOperator(unittest.TestCase):
@mock.patch("airflow.providers.google.cloud.operators.dlp.CloudDLPHook")
def test_list_dlp_jobs(self, mock_hook):
mock_hook.return_value.list_dlp_jobs.return_value = mock.MagicMock()
operator = CloudDLPListDLPJobsOperator(project_id=PROJECT_ID, task_id="id")
operator.execute(context=None)
mock_hook.assert_called_once_with(
gcp_conn_id=GCP_CONN_ID,
impersonation_chain=None,
)
mock_hook.return_value.list_dlp_jobs.assert_called_once_with(
project_id=PROJECT_ID,
results_filter=None,
page_size=None,
job_type=None,
order_by=None,
retry=None,
timeout=None,
metadata=None,
)
class TestCloudDLPListInfoTypesOperator(unittest.TestCase):
@mock.patch("airflow.providers.google.cloud.operators.dlp.CloudDLPHook")
def test_list_info_types(self, mock_hook):
mock_hook.return_value.list_info_types.return_value = mock.MagicMock()
operator = CloudDLPListInfoTypesOperator(task_id="id")
operator.execute(context=None)
mock_hook.assert_called_once_with(
gcp_conn_id=GCP_CONN_ID,
impersonation_chain=None,
)
mock_hook.return_value.list_info_types.assert_called_once_with(
language_code=None,
results_filter=None,
retry=None,
timeout=None,
metadata=None,
)
class TestCloudDLPListInspectTemplatesOperator(unittest.TestCase):
@mock.patch("airflow.providers.google.cloud.operators.dlp.CloudDLPHook")
def test_list_inspect_templates(self, mock_hook):
mock_hook.return_value.list_inspect_templates.return_value = mock.MagicMock()
operator = CloudDLPListInspectTemplatesOperator(organization_id=ORGANIZATION_ID, task_id="id")
operator.execute(context=None)
mock_hook.assert_called_once_with(
gcp_conn_id=GCP_CONN_ID,
impersonation_chain=None,
)
mock_hook.return_value.list_inspect_templates.assert_called_once_with(
organization_id=ORGANIZATION_ID,
project_id=None,
page_size=None,
order_by=None,
retry=None,
timeout=None,
metadata=None,
)
class TestCloudDLPListJobTriggersOperator(unittest.TestCase):
@mock.patch("airflow.providers.google.cloud.operators.dlp.CloudDLPHook")
def test_list_job_triggers(self, mock_hook):
mock_hook.return_value.list_job_triggers.return_value = mock.MagicMock()
operator = CloudDLPListJobTriggersOperator(project_id=PROJECT_ID, task_id="id")
operator.execute(context=None)
mock_hook.assert_called_once_with(
gcp_conn_id=GCP_CONN_ID,
impersonation_chain=None,
)
mock_hook.return_value.list_job_triggers.assert_called_once_with(
project_id=PROJECT_ID,
page_size=None,
order_by=None,
results_filter=None,
retry=None,
timeout=None,
metadata=None,
)
class TestCloudDLPListStoredInfoTypesOperator(unittest.TestCase):
@mock.patch("airflow.providers.google.cloud.operators.dlp.CloudDLPHook")
def test_list_stored_info_types(self, mock_hook):
mock_hook.return_value.list_stored_info_types.return_value = mock.MagicMock()
operator = CloudDLPListStoredInfoTypesOperator(organization_id=ORGANIZATION_ID, task_id="id")
operator.execute(context=None)
mock_hook.assert_called_once_with(
gcp_conn_id=GCP_CONN_ID,
impersonation_chain=None,
)
mock_hook.return_value.list_stored_info_types.assert_called_once_with(
organization_id=ORGANIZATION_ID,
project_id=None,
page_size=None,
order_by=None,
retry=None,
timeout=None,
metadata=None,
)
class TestCloudDLPRedactImageOperator(unittest.TestCase):
@mock.patch("airflow.providers.google.cloud.operators.dlp.CloudDLPHook")
def test_redact_image(self, mock_hook):
mock_hook.return_value.redact_image.return_value = mock.MagicMock()
operator = CloudDLPRedactImageOperator(project_id=PROJECT_ID, task_id="id")
operator.execute(context=None)
mock_hook.assert_called_once_with(
gcp_conn_id=GCP_CONN_ID,
impersonation_chain=None,
)
mock_hook.return_value.redact_image.assert_called_once_with(
project_id=PROJECT_ID,
inspect_config=None,
image_redaction_configs=None,
include_findings=None,
byte_item=None,
retry=None,
timeout=None,
metadata=None,
)
class TestCloudDLPReidentifyContentOperator(unittest.TestCase):
@mock.patch("airflow.providers.google.cloud.operators.dlp.CloudDLPHook")
def test_reidentify_content(self, mock_hook):
mock_hook.return_value.reidentify_content.return_value = mock.MagicMock()
operator = CloudDLPReidentifyContentOperator(project_id=PROJECT_ID, task_id="id")
operator.execute(context=None)
mock_hook.assert_called_once_with(
gcp_conn_id=GCP_CONN_ID,
impersonation_chain=None,
)
mock_hook.return_value.reidentify_content.assert_called_once_with(
project_id=PROJECT_ID,
reidentify_config=None,
inspect_config=None,
item=None,
inspect_template_name=None,
reidentify_template_name=None,
retry=None,
timeout=None,
metadata=None,
)
class TestCloudDLPUpdateDeidentifyTemplateOperator(unittest.TestCase):
@mock.patch("airflow.providers.google.cloud.operators.dlp.CloudDLPHook")
def test_update_deidentify_template(self, mock_hook):
mock_hook.return_value.update_deidentify_template.return_value = mock.MagicMock()
operator = CloudDLPUpdateDeidentifyTemplateOperator(
template_id=TEMPLATE_ID, organization_id=ORGANIZATION_ID, task_id="id"
)
operator.execute(context=None)
mock_hook.assert_called_once_with(
gcp_conn_id=GCP_CONN_ID,
impersonation_chain=None,
)
mock_hook.return_value.update_deidentify_template.assert_called_once_with(
template_id=TEMPLATE_ID,
organization_id=ORGANIZATION_ID,
project_id=None,
deidentify_template=None,
update_mask=None,
retry=None,
timeout=None,
metadata=None,
)
class TestCloudDLPUpdateInspectTemplateOperator(unittest.TestCase):
@mock.patch("airflow.providers.google.cloud.operators.dlp.CloudDLPHook")
def test_update_inspect_template(self, mock_hook):
mock_hook.return_value.update_inspect_template.return_value = mock.MagicMock()
operator = CloudDLPUpdateInspectTemplateOperator(
template_id=TEMPLATE_ID, organization_id=ORGANIZATION_ID, task_id="id"
)
operator.execute(context=None)
mock_hook.assert_called_once_with(
gcp_conn_id=GCP_CONN_ID,
impersonation_chain=None,
)
mock_hook.return_value.update_inspect_template.assert_called_once_with(
template_id=TEMPLATE_ID,
organization_id=ORGANIZATION_ID,
project_id=None,
inspect_template=None,
update_mask=None,
retry=None,
timeout=None,
metadata=None,
)
class TestCloudDLPUpdateJobTriggerOperator(unittest.TestCase):
@mock.patch("airflow.providers.google.cloud.operators.dlp.CloudDLPHook")
def test_update_job_trigger(self, mock_hook):
mock_hook.return_value.update_job_trigger.return_value = mock.MagicMock()
operator = CloudDLPUpdateJobTriggerOperator(job_trigger_id=TRIGGER_ID, task_id="id")
operator.execute(context=None)
mock_hook.assert_called_once_with(
gcp_conn_id=GCP_CONN_ID,
impersonation_chain=None,
)
mock_hook.return_value.update_job_trigger.assert_called_once_with(
job_trigger_id=TRIGGER_ID,
project_id=None,
job_trigger=None,
update_mask=None,
retry=None,
timeout=None,
metadata=None,
)
class TestCloudDLPUpdateStoredInfoTypeOperator(unittest.TestCase):
@mock.patch("airflow.providers.google.cloud.operators.dlp.CloudDLPHook")
def test_update_stored_info_type(self, mock_hook):
mock_hook.return_value.update_stored_info_type.return_value = mock.MagicMock()
operator = CloudDLPUpdateStoredInfoTypeOperator(
stored_info_type_id=STORED_INFO_TYPE_ID,
organization_id=ORGANIZATION_ID,
task_id="id",
)
operator.execute(context=None)
mock_hook.assert_called_once_with(
gcp_conn_id=GCP_CONN_ID,
impersonation_chain=None,
)
mock_hook.return_value.update_stored_info_type.assert_called_once_with(
stored_info_type_id=STORED_INFO_TYPE_ID,
organization_id=ORGANIZATION_ID,
project_id=None,
config=None,
update_mask=None,
retry=None,
timeout=None,
metadata=None,
)
| apache-2.0 |
oceanobservatories/mi-dataset | mi/dataset/driver/velpt_j/cspp/velpt_j_cspp_recovered_driver.py | 7 | 1875 | #!/usr/bin/env python
"""
@package mi.dataset.driver.velpt_j.cspp
@file mi/dataset/driver/velpt_j/cspp/velpt_j_cspp_recovered_driver.py
@author Emily Hahn
@brief Driver for the recovered velpt series j instrument through cspp
"""
from mi.dataset.dataset_parser import DataSetDriverConfigKeys
from mi.dataset.dataset_driver import SimpleDatasetDriver
from mi.dataset.parser.velpt_j_cspp import VelptJCsppParser, VelptJCsppMetadataRecoveredDataParticle, \
VelptJCsppInstrumentRecoveredDataParticle
from mi.dataset.parser.cspp_base import METADATA_PARTICLE_CLASS_KEY, DATA_PARTICLE_CLASS_KEY
from mi.core.versioning import version
@version("15.6.2")
def parse(unused, source_file_path, particle_data_handler):
"""
This is the method called by Uframe
:param unused
:param source_file_path This is the full path and filename of the file to be parsed
:param particle_data_handler Java Object to consume the output of the parser
:return particle_data_handler
"""
with open(source_file_path, 'rU') as stream_handle:
# create and instance of the concrete driver class defined below
driver = VelptJCsppRecoveredDriver(unused, stream_handle, particle_data_handler)
driver.processFileStream()
return particle_data_handler
class VelptJCsppRecoveredDriver(SimpleDatasetDriver):
"""
Derived velpt j cspp driver class
All this needs to do is create a concrete _build_parser method
"""
def _build_parser(self, stream_handle):
parser_config = {
DataSetDriverConfigKeys.PARTICLE_CLASSES_DICT: {
METADATA_PARTICLE_CLASS_KEY: VelptJCsppMetadataRecoveredDataParticle,
DATA_PARTICLE_CLASS_KEY: VelptJCsppInstrumentRecoveredDataParticle,
}
}
return VelptJCsppParser(parser_config, stream_handle, self._exception_callback)
| bsd-2-clause |
grinich/bleach | bleach/tests/test_links.py | 10 | 14873 | import urllib
from html5lib.tokenizer import HTMLTokenizer
from nose.tools import eq_
from bleach import linkify, url_re, DEFAULT_CALLBACKS as DC
def test_url_re():
def no_match(s):
match = url_re.search(s)
if match:
assert not match, 'matched %s' % s[slice(*match.span())]
yield no_match, 'just what i am looking for...it'
def test_empty():
eq_('', linkify(''))
def test_simple_link():
eq_('a <a href="http://example.com" rel="nofollow">http://example.com'
'</a> link',
linkify('a http://example.com link'))
eq_('a <a href="https://example.com" rel="nofollow">https://example.com'
'</a> link',
linkify('a https://example.com link'))
eq_('an <a href="http://example.com" rel="nofollow">example.com</a> link',
linkify('an example.com link'))
def test_trailing_slash():
eq_('<a href="http://example.com/" rel="nofollow">http://example.com/</a>',
linkify('http://example.com/'))
eq_('<a href="http://example.com/foo/" rel="nofollow">'
'http://example.com/foo/</a>',
linkify('http://example.com/foo/'))
eq_('<a href="http://example.com/foo/bar/" rel="nofollow">'
'http://example.com/foo/bar/</a>',
linkify('http://example.com/foo/bar/'))
def test_mangle_link():
"""We can muck with the href attribute of the link."""
def filter_url(attrs, new=False):
attrs['href'] = (u'http://bouncer/?u=%s' %
urllib.quote_plus(attrs['href']))
return attrs
eq_('<a href="http://bouncer/?u=http%3A%2F%2Fexample.com" rel="nofollow">'
'http://example.com</a>',
linkify('http://example.com', DC + [filter_url]))
def test_mangle_text():
"""We can muck with the inner text of a link."""
def ft(attrs, new=False):
attrs['_text'] = 'bar'
return attrs
eq_('<a href="http://ex.mp">bar</a> <a href="http://ex.mp/foo">bar</a>',
linkify('http://ex.mp <a href="http://ex.mp/foo">foo</a>', [ft]))
def test_email_link():
tests = (
('a james@example.com mailto', False, 'a james@example.com mailto'),
('a james@example.com.au mailto', False,
'a james@example.com.au mailto'),
('a <a href="mailto:james@example.com">james@example.com</a> mailto',
True, 'a james@example.com mailto'),
('aussie <a href="mailto:james@example.com.au">'
'james@example.com.au</a> mailto', True,
'aussie james@example.com.au mailto'),
# This is kind of a pathological case. I guess we do our best here.
('email to <a href="james@example.com" rel="nofollow">'
'james@example.com</a>', True,
'email to <a href="james@example.com">james@example.com</a>'),
)
def _check(o, p, i):
eq_(o, linkify(i, parse_email=p))
for (o, p, i) in tests:
yield _check, o, p, i
def test_email_link_escaping():
tests = (
('''<a href='mailto:"james"@example.com'>'''
'''"james"@example.com</a>''',
'"james"@example.com'),
('''<a href="mailto:"j'ames"@example.com">'''
'''"j'ames"@example.com</a>''',
'"j\'ames"@example.com'),
('''<a href='mailto:"ja>mes"@example.com'>'''
'''"ja>mes"@example.com</a>''',
'"ja>mes"@example.com'),
)
def _check(o, i):
eq_(o, linkify(i, parse_email=True))
for (o, i) in tests:
yield _check, o, i
def test_prevent_links():
"""Returning None from any callback should remove links or prevent them
from being created."""
def no_new_links(attrs, new=False):
if new:
return None
return attrs
def no_old_links(attrs, new=False):
if not new:
return None
return attrs
def noop(attrs, new=False):
return attrs
in_text = 'a ex.mp <a href="http://example.com">example</a>'
out_text = 'a <a href="http://ex.mp">ex.mp</a> example'
tests = (
([noop], ('a <a href="http://ex.mp">ex.mp</a> '
'<a href="http://example.com">example</a>'), 'noop'),
([no_new_links, noop], in_text, 'no new, noop'),
([noop, no_new_links], in_text, 'noop, no new'),
([no_old_links, noop], out_text, 'no old, noop'),
([noop, no_old_links], out_text, 'noop, no old'),
([no_old_links, no_new_links], 'a ex.mp example', 'no links'),
)
def _check(cb, o, msg):
eq_(o, linkify(in_text, cb), msg)
for (cb, o, msg) in tests:
yield _check, cb, o, msg
def test_set_attrs():
"""We can set random attributes on links."""
def set_attr(attrs, new=False):
attrs['rev'] = 'canonical'
return attrs
eq_('<a href="http://ex.mp" rev="canonical">ex.mp</a>',
linkify('ex.mp', [set_attr]))
def test_only_proto_links():
"""Only create links if there's a protocol."""
def only_proto(attrs, new=False):
if new and not attrs['_text'].startswith(('http:', 'https:')):
return None
return attrs
in_text = 'a ex.mp http://ex.mp <a href="/foo">bar</a>'
out_text = ('a ex.mp <a href="http://ex.mp">http://ex.mp</a> '
'<a href="/foo">bar</a>')
eq_(out_text, linkify(in_text, [only_proto]))
def test_stop_email():
"""Returning None should prevent a link from being created."""
def no_email(attrs, new=False):
if attrs['href'].startswith('mailto:'):
return None
return attrs
text = 'do not link james@example.com'
eq_(text, linkify(text, parse_email=True, callbacks=[no_email]))
def test_tlds():
eq_('<a href="http://example.com" rel="nofollow">example.com</a>',
linkify('example.com'))
eq_('<a href="http://example.co.uk" rel="nofollow">example.co.uk</a>',
linkify('example.co.uk'))
eq_('<a href="http://example.edu" rel="nofollow">example.edu</a>',
linkify('example.edu'))
eq_('example.xxx', linkify('example.xxx'))
eq_(' brie', linkify(' brie'))
eq_('<a href="http://bit.ly/fun" rel="nofollow">bit.ly/fun</a>',
linkify('bit.ly/fun'))
def test_escaping():
eq_('< unrelated', linkify('< unrelated'))
def test_nofollow_off():
eq_('<a href="http://example.com">example.com</a>',
linkify(u'example.com', []))
def test_link_in_html():
eq_('<i><a href="http://yy.com" rel="nofollow">http://yy.com</a></i>',
linkify('<i>http://yy.com</i>'))
eq_('<em><strong><a href="http://xx.com" rel="nofollow">http://xx.com</a>'
'</strong></em>',
linkify('<em><strong>http://xx.com</strong></em>'))
def test_links_https():
eq_('<a href="https://yy.com" rel="nofollow">https://yy.com</a>',
linkify('https://yy.com'))
def test_add_rel_nofollow():
"""Verify that rel="nofollow" is added to an existing link"""
eq_('<a href="http://yy.com" rel="nofollow">http://yy.com</a>',
linkify('<a href="http://yy.com">http://yy.com</a>'))
def test_url_with_path():
eq_('<a href="http://example.com/path/to/file" rel="nofollow">'
'http://example.com/path/to/file</a>',
linkify('http://example.com/path/to/file'))
def test_link_ftp():
eq_('<a href="ftp://ftp.mozilla.org/some/file" rel="nofollow">'
'ftp://ftp.mozilla.org/some/file</a>',
linkify('ftp://ftp.mozilla.org/some/file'))
def test_link_query():
eq_('<a href="http://xx.com/?test=win" rel="nofollow">'
'http://xx.com/?test=win</a>',
linkify('http://xx.com/?test=win'))
eq_('<a href="http://xx.com/?test=win" rel="nofollow">'
'xx.com/?test=win</a>',
linkify('xx.com/?test=win'))
eq_('<a href="http://xx.com?test=win" rel="nofollow">'
'xx.com?test=win</a>',
linkify('xx.com?test=win'))
def test_link_fragment():
eq_('<a href="http://xx.com/path#frag" rel="nofollow">'
'http://xx.com/path#frag</a>',
linkify('http://xx.com/path#frag'))
def test_link_entities():
eq_('<a href="http://xx.com/?a=1&b=2" rel="nofollow">'
'http://xx.com/?a=1&b=2</a>',
linkify('http://xx.com/?a=1&b=2'))
def test_escaped_html():
"""If I pass in escaped HTML, it should probably come out escaped."""
s = '<em>strong</em>'
eq_(s, linkify(s))
def test_link_http_complete():
eq_('<a href="https://user:pass@ftp.mozilla.org/x/y.exe?a=b&c=d'
'&e#f" rel="nofollow">'
'https://user:pass@ftp.mozilla.org/x/y.exe?a=b&c=d&e#f</a>',
linkify('https://user:pass@ftp.mozilla.org/x/y.exe?a=b&c=d&e#f'))
def test_non_url():
"""document.vulnerable should absolutely not be linkified."""
s = 'document.vulnerable'
eq_(s, linkify(s))
def test_javascript_url():
"""javascript: urls should never be linkified."""
s = 'javascript:document.vulnerable'
eq_(s, linkify(s))
def test_unsafe_url():
"""Any unsafe char ({}[]<>, etc.) in the path should end URL scanning."""
eq_('All your{"<a href="http://xx.yy.com/grover.png" '
'rel="nofollow">xx.yy.com/grover.png</a>"}base are',
linkify('All your{"xx.yy.com/grover.png"}base are'))
def test_skip_pre():
"""Skip linkification in <pre> tags."""
simple = 'http://xx.com <pre>http://xx.com</pre>'
linked = ('<a href="http://xx.com" rel="nofollow">http://xx.com</a> '
'<pre>http://xx.com</pre>')
all_linked = ('<a href="http://xx.com" rel="nofollow">http://xx.com</a> '
'<pre><a href="http://xx.com" rel="nofollow">http://xx.com'
'</a></pre>')
eq_(linked, linkify(simple, skip_pre=True))
eq_(all_linked, linkify(simple))
already_linked = '<pre><a href="http://xx.com">xx</a></pre>'
nofollowed = '<pre><a href="http://xx.com" rel="nofollow">xx</a></pre>'
eq_(nofollowed, linkify(already_linked))
eq_(nofollowed, linkify(already_linked, skip_pre=True))
def test_libgl():
"""libgl.so.1 should not be linkified."""
eq_('libgl.so.1', linkify('libgl.so.1'))
def test_end_of_sentence():
"""example.com. should match."""
out = u'<a href="http://%s" rel="nofollow">%s</a>%s'
in_ = u'%s%s'
def check(u, p):
eq_(out % (u, u, p), linkify(in_ % (u, p)))
tests = (
('example.com', '.'),
('example.com', '...'),
('ex.com/foo', '.'),
('ex.com/foo', '....'),
)
for u, p in tests:
yield check, u, p
def test_end_of_clause():
"""example.com/foo, shouldn't include the ,"""
eq_('<a href="http://ex.com/foo" rel="nofollow">ex.com/foo</a>, bar',
linkify('ex.com/foo, bar'))
def test_sarcasm():
"""Jokes should crash.<sarcasm/>"""
dirty = u'Yeah right <sarcasm/>'
clean = u'Yeah right <sarcasm/>'
eq_(clean, linkify(dirty))
def test_wrapping_parentheses():
"""URLs wrapped in parantheses should not include them."""
out = u'%s<a href="http://%s" rel="nofollow">%s</a>%s'
tests = (
('(example.com)', out % ('(', 'example.com', 'example.com', ')')),
('(example.com/)', out % ('(', 'example.com/', 'example.com/', ')')),
('(example.com/foo)', out % ('(', 'example.com/foo',
'example.com/foo', ')')),
('(((example.com/))))', out % ('(((', 'example.com/)',
'example.com/)', ')))')),
('example.com/))', out % ('', 'example.com/))',
'example.com/))', '')),
('http://en.wikipedia.org/wiki/Test_(assessment)',
out % ('', 'en.wikipedia.org/wiki/Test_(assessment)',
'http://en.wikipedia.org/wiki/Test_(assessment)', '')),
('(http://en.wikipedia.org/wiki/Test_(assessment))',
out % ('(', 'en.wikipedia.org/wiki/Test_(assessment)',
'http://en.wikipedia.org/wiki/Test_(assessment)', ')')),
('((http://en.wikipedia.org/wiki/Test_(assessment))',
out % ('((', 'en.wikipedia.org/wiki/Test_(assessment',
'http://en.wikipedia.org/wiki/Test_(assessment', '))')),
('(http://en.wikipedia.org/wiki/Test_(assessment)))',
out % ('(', 'en.wikipedia.org/wiki/Test_(assessment))',
'http://en.wikipedia.org/wiki/Test_(assessment))', ')')),
('(http://en.wikipedia.org/wiki/)Test_(assessment',
out % ('(', 'en.wikipedia.org/wiki/)Test_(assessment',
'http://en.wikipedia.org/wiki/)Test_(assessment', '')),
)
def check(test, expected_output):
eq_(expected_output, linkify(test))
for test, expected_output in tests:
yield check, test, expected_output
def test_ports():
"""URLs can contain port numbers."""
tests = (
('http://foo.com:8000', ('http://foo.com:8000', '')),
('http://foo.com:8000/', ('http://foo.com:8000/', '')),
('http://bar.com:xkcd', ('http://bar.com', ':xkcd')),
('http://foo.com:81/bar', ('http://foo.com:81/bar', '')),
('http://foo.com:', ('http://foo.com', ':')),
)
def check(test, output):
eq_(u'<a href="{0}" rel="nofollow">{0}</a>{1}'.format(*output),
linkify(test))
for test, output in tests:
yield check, test, output
def test_tokenizer():
"""Linkify doesn't always have to sanitize."""
raw = '<em>test<x></x></em>'
eq_('<em>test<x></x></em>', linkify(raw))
eq_(raw, linkify(raw, tokenizer=HTMLTokenizer))
def test_ignore_bad_protocols():
eq_('foohttp://bar',
linkify('foohttp://bar'))
eq_('foohttp://<a href="http://exampl.com" rel="nofollow">exampl.com</a>',
linkify('foohttp://exampl.com'))
def test_max_recursion_depth():
"""If we hit the max recursion depth, just return the string."""
test = '<em>' * 2000 + 'foo' + '</em>' * 2000
eq_(test, linkify(test))
def test_link_emails_and_urls():
"""parse_email=True shouldn't prevent URLs from getting linkified."""
output = ('<a href="http://example.com" rel="nofollow">'
'http://example.com</a> <a href="mailto:person@example.com">'
'person@example.com</a>')
eq_(output, linkify('http://example.com person@example.com',
parse_email=True))
def test_links_case_insensitive():
"""Protocols and domain names are case insensitive."""
expect = ('<a href="HTTP://EXAMPLE.COM" rel="nofollow">'
'HTTP://EXAMPLE.COM</a>')
eq_(expect, linkify('HTTP://EXAMPLE.COM'))
def test_elements_inside_links():
eq_(u'<a href="#" rel="nofollow">hello<br></a>',
linkify('<a href="#">hello<br></a>'))
eq_(u'<a href="#" rel="nofollow"><strong>bold</strong> hello<br></a>',
linkify('<a href="#"><strong>bold</strong> hello<br></a>'))
| bsd-3-clause |
iuliat/nova | nova/api/openstack/compute/plugins/v3/flavor_rxtx.py | 36 | 2096 | # Copyright 2012 Nebula, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""The Flavor Rxtx API extension."""
from nova.api.openstack import extensions
from nova.api.openstack import wsgi
ALIAS = 'os-flavor-rxtx'
authorize = extensions.os_compute_soft_authorizer(ALIAS)
class FlavorRxtxController(wsgi.Controller):
def _extend_flavors(self, req, flavors):
for flavor in flavors:
db_flavor = req.get_db_flavor(flavor['id'])
key = 'rxtx_factor'
flavor[key] = db_flavor['rxtx_factor'] or ""
def _show(self, req, resp_obj):
if not authorize(req.environ['nova.context']):
return
if 'flavor' in resp_obj.obj:
self._extend_flavors(req, [resp_obj.obj['flavor']])
@wsgi.extends
def show(self, req, resp_obj, id):
return self._show(req, resp_obj)
@wsgi.extends(action='create')
def create(self, req, resp_obj, body):
return self._show(req, resp_obj)
@wsgi.extends
def detail(self, req, resp_obj):
if not authorize(req.environ['nova.context']):
return
self._extend_flavors(req, list(resp_obj.obj['flavors']))
class FlavorRxtx(extensions.V3APIExtensionBase):
"""Support to show the rxtx status of a flavor."""
name = "FlavorRxtx"
alias = ALIAS
version = 1
def get_controller_extensions(self):
controller = FlavorRxtxController()
extension = extensions.ControllerExtension(self, 'flavors', controller)
return [extension]
def get_resources(self):
return []
| apache-2.0 |
civisanalytics/ansible | lib/ansible/modules/cloud/amazon/ec2_ami_find.py | 5 | 13983 | #!/usr/bin/python
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'version': '1.0'}
DOCUMENTATION = '''
---
module: ec2_ami_find
version_added: '2.0'
short_description: Searches for AMIs to obtain the AMI ID and other information
description:
- Returns list of matching AMIs with AMI ID, along with other useful information
- Can search AMIs with different owners
- Can search by matching tag(s), by AMI name and/or other criteria
- Results can be sorted and sliced
author: "Tom Bamford (@tombamford)"
notes:
- This module is not backwards compatible with the previous version of the ec2_search_ami module which worked only for Ubuntu AMIs listed on cloud-images.ubuntu.com.
- See the example below for a suggestion of how to search by distro/release.
options:
region:
description:
- The AWS region to use.
required: true
aliases: [ 'aws_region', 'ec2_region' ]
owner:
description:
- Search AMIs owned by the specified owner
- Can specify an AWS account ID, or one of the special IDs 'self', 'amazon' or 'aws-marketplace'
- If not specified, all EC2 AMIs in the specified region will be searched.
- You can include wildcards in many of the search options. An asterisk (*) matches zero or more characters, and a question mark (?) matches exactly one character. You can escape special characters using a backslash (\) before the character. For example, a value of \*amazon\?\\ searches for the literal string *amazon?\.
required: false
default: null
ami_id:
description:
- An AMI ID to match.
default: null
required: false
ami_tags:
description:
- A hash/dictionary of tags to match for the AMI.
default: null
required: false
architecture:
description:
- An architecture type to match (e.g. x86_64).
default: null
required: false
hypervisor:
description:
- A hypervisor type type to match (e.g. xen).
default: null
required: false
is_public:
description:
- Whether or not the image(s) are public.
choices: ['yes', 'no']
default: null
required: false
name:
description:
- An AMI name to match.
default: null
required: false
platform:
description:
- Platform type to match.
default: null
required: false
product_code:
description:
- Marketplace product code to match.
default: null
required: false
version_added: "2.3"
sort:
description:
- Optional attribute which with to sort the results.
- If specifying 'tag', the 'tag_name' parameter is required.
- Starting at version 2.1, additional sort choices of architecture, block_device_mapping, creationDate, hypervisor, is_public, location, owner_id, platform, root_device_name, root_device_type, state, and virtualization_type are supported.
choices: ['name', 'description', 'tag', 'architecture', 'block_device_mapping', 'creationDate', 'hypervisor', 'is_public', 'location', 'owner_id', 'platform', 'root_device_name', 'root_device_type', 'state', 'virtualization_type']
default: null
required: false
sort_tag:
description:
- Tag name with which to sort results.
- Required when specifying 'sort=tag'.
default: null
required: false
sort_order:
description:
- Order in which to sort results.
- Only used when the 'sort' parameter is specified.
choices: ['ascending', 'descending']
default: 'ascending'
required: false
sort_start:
description:
- Which result to start with (when sorting).
- Corresponds to Python slice notation.
default: null
required: false
sort_end:
description:
- Which result to end with (when sorting).
- Corresponds to Python slice notation.
default: null
required: false
state:
description:
- AMI state to match.
default: 'available'
required: false
virtualization_type:
description:
- Virtualization type to match (e.g. hvm).
default: null
required: false
no_result_action:
description:
- What to do when no results are found.
- "'success' reports success and returns an empty array"
- "'fail' causes the module to report failure"
choices: ['success', 'fail']
default: 'success'
required: false
requirements:
- "python >= 2.6"
- boto
'''
EXAMPLES = '''
# Note: These examples do not set authentication details, see the AWS Guide for details.
# Search for the AMI tagged "project:website"
- ec2_ami_find:
owner: self
ami_tags:
project: website
no_result_action: fail
register: ami_find
# Search for the latest Ubuntu 14.04 AMI
- ec2_ami_find:
name: "ubuntu/images/ebs/ubuntu-trusty-14.04-amd64-server-*"
owner: 099720109477
sort: name
sort_order: descending
sort_end: 1
register: ami_find
# Launch an EC2 instance
- ec2:
image: "{{ ami_find.results[0].ami_id }}"
instance_type: m3.medium
key_name: mykey
wait: yes
'''
RETURN = '''
ami_id:
description: id of found amazon image
returned: when AMI found
type: string
sample: "ami-e9095e8c"
architecture:
description: architecture of image
returned: when AMI found
type: string
sample: "x86_64"
architecture:
description: architecture of image
returned: when AMI found
type: string
sample: "x86_64"
block_device_mapping:
description: block device mapping associated with image
returned: when AMI found
type: dictionary of block devices
sample: "{
'/dev/xvda': {
'delete_on_termination': true,
'encrypted': false,
'size': 8,
'snapshot_id': 'snap-ca0330b8',
'volume_type': 'gp2'
}"
creationDate:
description: creation date of image
returned: when AMI found
type: string
sample: "2015-10-15T22:43:44.000Z"
description:
description: description of image
returned: when AMI found
type: string
sample: "test-server01"
hypervisor:
description: type of hypervisor
returned: when AMI found
type: string
sample: "xen"
is_public:
description: whether image is public
returned: when AMI found
type: bool
sample: false
location:
description: location of image
returned: when AMI found
type: string
sample: "435210894375/test-server01-20151015-234343"
name:
description: ami name of image
returned: when AMI found
type: string
sample: "test-server01-20151015-234343"
owner_id:
description: owner of image
returned: when AMI found
type: string
sample: "435210894375"
platform:
description: platform of image
returned: when AMI found
type: string
sample: null
root_device_name:
description: rood device name of image
returned: when AMI found
type: string
sample: "/dev/xvda"
root_device_type:
description: rood device type of image
returned: when AMI found
type: string
sample: "ebs"
state:
description: state of image
returned: when AMI found
type: string
sample: "available"
tags:
description: tags assigned to image
returned: when AMI found
type: dictionary of tags
sample: "{
'Environment': 'devel',
'Name': 'test-server01',
'Role': 'web'
}"
virtualization_type:
description: image virtualization type
returned: when AMI found
type: string
sample: "hvm"
'''
try:
import boto.ec2
from boto.ec2.blockdevicemapping import BlockDeviceType, BlockDeviceMapping
HAS_BOTO=True
except ImportError:
HAS_BOTO=False
import json
def get_block_device_mapping(image):
"""
Retrieves block device mapping from AMI
"""
bdm_dict = dict()
bdm = getattr(image,'block_device_mapping')
for device_name in bdm.keys():
bdm_dict[device_name] = {
'size': bdm[device_name].size,
'snapshot_id': bdm[device_name].snapshot_id,
'volume_type': bdm[device_name].volume_type,
'encrypted': bdm[device_name].encrypted,
'delete_on_termination': bdm[device_name].delete_on_termination
}
return bdm_dict
def main():
argument_spec = ec2_argument_spec()
argument_spec.update(dict(
owner = dict(required=False, default=None),
ami_id = dict(required=False),
ami_tags = dict(required=False, type='dict',
aliases = ['search_tags', 'image_tags']),
architecture = dict(required=False),
hypervisor = dict(required=False),
is_public = dict(required=False, type='bool'),
name = dict(required=False),
platform = dict(required=False),
product_code = dict(required=False),
sort = dict(required=False, default=None,
choices=['name', 'description', 'tag', 'architecture', 'block_device_mapping', 'creationDate', 'hypervisor', 'is_public', 'location', 'owner_id', 'platform', 'root_device_name', 'root_device_type', 'state', 'virtualization_type']),
sort_tag = dict(required=False),
sort_order = dict(required=False, default='ascending',
choices=['ascending', 'descending']),
sort_start = dict(required=False),
sort_end = dict(required=False),
state = dict(required=False, default='available'),
virtualization_type = dict(required=False),
no_result_action = dict(required=False, default='success',
choices = ['success', 'fail']),
)
)
module = AnsibleModule(
argument_spec=argument_spec,
)
if not HAS_BOTO:
module.fail_json(msg='boto required for this module, install via pip or your package manager')
ami_id = module.params.get('ami_id')
ami_tags = module.params.get('ami_tags')
architecture = module.params.get('architecture')
hypervisor = module.params.get('hypervisor')
is_public = module.params.get('is_public')
name = module.params.get('name')
owner = module.params.get('owner')
platform = module.params.get('platform')
product_code = module.params.get('product_code')
sort = module.params.get('sort')
sort_tag = module.params.get('sort_tag')
sort_order = module.params.get('sort_order')
sort_start = module.params.get('sort_start')
sort_end = module.params.get('sort_end')
state = module.params.get('state')
virtualization_type = module.params.get('virtualization_type')
no_result_action = module.params.get('no_result_action')
filter = {'state': state}
if ami_id:
filter['image_id'] = ami_id
if ami_tags:
for tag in ami_tags:
filter['tag:'+tag] = ami_tags[tag]
if architecture:
filter['architecture'] = architecture
if hypervisor:
filter['hypervisor'] = hypervisor
if is_public:
filter['is_public'] = 'true'
if name:
filter['name'] = name
if platform:
filter['platform'] = platform
if product_code:
filter['product-code'] = product_code
if virtualization_type:
filter['virtualization_type'] = virtualization_type
ec2 = ec2_connect(module)
images_result = ec2.get_all_images(owners=owner, filters=filter)
if no_result_action == 'fail' and len(images_result) == 0:
module.fail_json(msg="No AMIs matched the attributes: %s" % json.dumps(filter))
results = []
for image in images_result:
data = {
'ami_id': image.id,
'architecture': image.architecture,
'block_device_mapping': get_block_device_mapping(image),
'creationDate': image.creationDate,
'description': image.description,
'hypervisor': image.hypervisor,
'is_public': image.is_public,
'location': image.location,
'name': image.name,
'owner_id': image.owner_id,
'platform': image.platform,
'root_device_name': image.root_device_name,
'root_device_type': image.root_device_type,
'state': image.state,
'tags': image.tags,
'virtualization_type': image.virtualization_type,
}
if image.kernel_id:
data['kernel_id'] = image.kernel_id
if image.ramdisk_id:
data['ramdisk_id'] = image.ramdisk_id
results.append(data)
if sort == 'tag':
if not sort_tag:
module.fail_json(msg="'sort_tag' option must be given with 'sort=tag'")
results.sort(key=lambda e: e['tags'][sort_tag], reverse=(sort_order=='descending'))
elif sort:
results.sort(key=lambda e: e[sort], reverse=(sort_order=='descending'))
try:
if sort and sort_start and sort_end:
results = results[int(sort_start):int(sort_end)]
elif sort and sort_start:
results = results[int(sort_start):]
elif sort and sort_end:
results = results[:int(sort_end)]
except TypeError:
module.fail_json(msg="Please supply numeric values for sort_start and/or sort_end")
module.exit_json(results=results)
# import module snippets
from ansible.module_utils.basic import *
from ansible.module_utils.ec2 import *
if __name__ == '__main__':
main()
| gpl-3.0 |
AlexDoul/PyQt4 | examples/itemviews/stardelegate.py | 9 | 8877 | #!/usr/bin/env python
#############################################################################
##
## Copyright (C) 2010 Hans-Peter Jansen <hpj@urpla.net>.
## Copyright (C) 2010 Nokia Corporation and/or its subsidiary(-ies).
## All rights reserved.
##
## This file is part of the examples of PyQt.
##
## $QT_BEGIN_LICENSE:BSD$
## You may use this file under the terms of the BSD license as follows:
##
## "Redistribution and use in source and binary forms, with or without
## modification, are permitted provided that the following conditions are
## met:
## * Redistributions of source code must retain the above copyright
## notice, this list of conditions and the following disclaimer.
## * Redistributions in binary form must reproduce the above copyright
## notice, this list of conditions and the following disclaimer in
## the documentation and/or other materials provided with the
## distribution.
## * Neither the name of Nokia Corporation and its Subsidiary(-ies) nor
## the names of its contributors may be used to endorse or promote
## products derived from this software without specific prior written
## permission.
##
## THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
## "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
## LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
## A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
## OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
## SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
## LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
## DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
## THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
## (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
## OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE."
## $QT_END_LICENSE$
##
#############################################################################
# These are only needed for Python v2 but are harmless for Python v3.
import sip
sip.setapi('QString', 2)
sip.setapi('QVariant', 2)
import math
from PyQt4 import QtCore, QtGui
class StarRating(object):
# enum EditMode
Editable, ReadOnly = range(2)
PaintingScaleFactor = 20
def __init__(self, starCount=1, maxStarCount=5):
self._starCount = starCount
self._maxStarCount = maxStarCount
self.starPolygon = QtGui.QPolygonF([QtCore.QPointF(1.0, 0.5)])
for i in range(5):
self.starPolygon << QtCore.QPointF(0.5 + 0.5 * math.cos(0.8 * i * math.pi),
0.5 + 0.5 * math.sin(0.8 * i * math.pi))
self.diamondPolygon = QtGui.QPolygonF()
self.diamondPolygon << QtCore.QPointF(0.4, 0.5) \
<< QtCore.QPointF(0.5, 0.4) \
<< QtCore.QPointF(0.6, 0.5) \
<< QtCore.QPointF(0.5, 0.6) \
<< QtCore.QPointF(0.4, 0.5)
def starCount(self):
return self._starCount
def maxStarCount(self):
return self._maxStarCount
def setStarCount(self, starCount):
self._starCount = starCount
def setMaxStarCount(self, maxStarCount):
self._maxStarCount = maxStarCount
def sizeHint(self):
return self.PaintingScaleFactor * QtCore.QSize(self._maxStarCount, 1)
def paint(self, painter, rect, palette, editMode):
painter.save()
painter.setRenderHint(QtGui.QPainter.Antialiasing, True)
painter.setPen(QtCore.Qt.NoPen)
if editMode == StarRating.Editable:
painter.setBrush(palette.highlight())
else:
painter.setBrush(palette.foreground())
yOffset = (rect.height() - self.PaintingScaleFactor) / 2
painter.translate(rect.x(), rect.y() + yOffset)
painter.scale(self.PaintingScaleFactor, self.PaintingScaleFactor)
for i in range(self._maxStarCount):
if i < self._starCount:
painter.drawPolygon(self.starPolygon, QtCore.Qt.WindingFill)
elif editMode == StarRating.Editable:
painter.drawPolygon(self.diamondPolygon, QtCore.Qt.WindingFill)
painter.translate(1.0, 0.0)
painter.restore()
class StarEditor(QtGui.QWidget):
editingFinished = QtCore.pyqtSignal()
def __init__(self, parent = None):
super(StarEditor, self).__init__(parent)
self._starRating = StarRating()
self.setMouseTracking(True)
self.setAutoFillBackground(True)
def setStarRating(self, starRating):
self._starRating = starRating
def starRating(self):
return self._starRating
def sizeHint(self):
return self._starRating.sizeHint()
def paintEvent(self, event):
painter = QtGui.QPainter(self)
self._starRating.paint(painter, self.rect(), self.palette(),
StarRating.Editable)
def mouseMoveEvent(self, event):
star = self.starAtPosition(event.x())
if star != self._starRating.starCount() and star != -1:
self._starRating.setStarCount(star)
self.update()
def mouseReleaseEvent(self, event):
self.editingFinished.emit()
def starAtPosition(self, x):
# Enable a star, if pointer crosses the center horizontally.
starwidth = self._starRating.sizeHint().width() // self._starRating.maxStarCount()
star = (x + starwidth / 2) // starwidth
if 0 <= star <= self._starRating.maxStarCount():
return star
return -1
class StarDelegate(QtGui.QStyledItemDelegate):
def paint(self, painter, option, index):
starRating = index.data()
if isinstance(starRating, StarRating):
if option.state & QtGui.QStyle.State_Selected:
painter.fillRect(option.rect, option.palette.highlight())
starRating.paint(painter, option.rect, option.palette,
StarRating.ReadOnly)
else:
super(StarDelegate, self).paint(painter, option, index)
def sizeHint(self, option, index):
starRating = index.data()
if isinstance(starRating, StarRating):
return starRating.sizeHint()
else:
return super(StarDelegate, self).sizeHint(option, index)
def createEditor(self, parent, option, index):
starRating = index.data()
if isinstance(starRating, StarRating):
editor = StarEditor(parent)
editor.editingFinished.connect(self.commitAndCloseEditor)
return editor
else:
return super(StarDelegate, self).createEditor(parent, option, index)
def setEditorData(self, editor, index):
starRating = index.data()
if isinstance(starRating, StarRating):
editor.setStarRating(starRating)
else:
super(StarDelegate, self).setEditorData(editor, index)
def setModelData(self, editor, model, index):
starRating = index.data()
if isinstance(starRating, StarRating):
model.setData(index, editor.starRating())
else:
super(StarDelegate, self).setModelData(editor, model, index)
def commitAndCloseEditor(self):
editor = self.sender()
self.commitData.emit(editor)
self.closeEditor.emit(editor)
def populateTableWidget(tableWidget):
staticData = (
("Mass in B-Minor", "Baroque", "J.S. Bach", 5),
("Three More Foxes", "Jazz", "Maynard Ferguson", 4),
("Sex Bomb", "Pop", "Tom Jones", 3),
("Barbie Girl", "Pop", "Aqua", 5),
)
for row, (title, genre, artist, rating) in enumerate(staticData):
item0 = QtGui.QTableWidgetItem(title)
item1 = QtGui.QTableWidgetItem(genre)
item2 = QtGui.QTableWidgetItem(artist)
item3 = QtGui.QTableWidgetItem()
item3.setData(0, StarRating(rating))
tableWidget.setItem(row, 0, item0)
tableWidget.setItem(row, 1, item1)
tableWidget.setItem(row, 2, item2)
tableWidget.setItem(row, 3, item3)
if __name__ == '__main__':
import sys
app = QtGui.QApplication(sys.argv)
tableWidget = QtGui.QTableWidget(4, 4)
tableWidget.setItemDelegate(StarDelegate())
tableWidget.setEditTriggers(
QtGui.QAbstractItemView.DoubleClicked |
QtGui.QAbstractItemView.SelectedClicked)
tableWidget.setSelectionBehavior(QtGui.QAbstractItemView.SelectRows)
headerLabels = ("Title", "Genre", "Artist", "Rating")
tableWidget.setHorizontalHeaderLabels(headerLabels)
populateTableWidget(tableWidget)
tableWidget.resizeColumnsToContents()
tableWidget.resize(500, 300)
tableWidget.show()
sys.exit(app.exec_())
| gpl-2.0 |
glove747/liberty-neutron | neutron/extensions/metering.py | 21 | 6632 | # Copyright (C) 2013 eNovance SAS <licensing@enovance.com>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import abc
from oslo_log import log as logging
import six
from neutron.api import extensions
from neutron.api.v2 import attributes as attr
from neutron.api.v2 import resource_helper
from neutron.common import exceptions as nexception
from neutron.plugins.common import constants
from neutron.services import service_base
LOG = logging.getLogger(__name__)
class MeteringLabelNotFound(nexception.NotFound):
message = _("Metering label %(label_id)s does not exist")
class DuplicateMeteringRuleInPost(nexception.InUse):
message = _("Duplicate Metering Rule in POST.")
class MeteringLabelRuleNotFound(nexception.NotFound):
message = _("Metering label rule %(rule_id)s does not exist")
class MeteringLabelRuleOverlaps(nexception.Conflict):
message = _("Metering label rule with remote_ip_prefix "
"%(remote_ip_prefix)s overlaps another")
RESOURCE_ATTRIBUTE_MAP = {
'metering_labels': {
'id': {'allow_post': False, 'allow_put': False,
'is_visible': True,
'primary_key': True},
'name': {'allow_post': True, 'allow_put': False,
'is_visible': True, 'default': ''},
'description': {'allow_post': True, 'allow_put': False,
'is_visible': True, 'default': ''},
'tenant_id': {'allow_post': True, 'allow_put': False,
'required_by_policy': True,
'validate': {'type:string': attr.TENANT_ID_MAX_LEN},
'is_visible': True},
'shared': {'allow_post': True, 'allow_put': False,
'is_visible': True, 'default': False,
'convert_to': attr.convert_to_boolean}
},
'metering_label_rules': {
'id': {'allow_post': False, 'allow_put': False,
'is_visible': True,
'primary_key': True},
'metering_label_id': {'allow_post': True, 'allow_put': False,
'validate': {'type:uuid': None},
'is_visible': True, 'required_by_policy': True},
'direction': {'allow_post': True, 'allow_put': False,
'is_visible': True,
'validate': {'type:values': ['ingress', 'egress']}},
'excluded': {'allow_post': True, 'allow_put': False,
'is_visible': True, 'default': False,
'convert_to': attr.convert_to_boolean},
'remote_ip_prefix': {'allow_post': True, 'allow_put': False,
'is_visible': True, 'required_by_policy': True,
'validate': {'type:subnet': None}},
'tenant_id': {'allow_post': True, 'allow_put': False,
'required_by_policy': True,
'validate': {'type:string': attr.TENANT_ID_MAX_LEN},
'is_visible': True}
}
}
class Metering(extensions.ExtensionDescriptor):
@classmethod
def get_name(cls):
return "Neutron Metering"
@classmethod
def get_alias(cls):
return "metering"
@classmethod
def get_description(cls):
return "Neutron Metering extension."
@classmethod
def get_updated(cls):
return "2013-06-12T10:00:00-00:00"
@classmethod
def get_plugin_interface(cls):
return MeteringPluginBase
@classmethod
def get_resources(cls):
"""Returns Ext Resources."""
plural_mappings = resource_helper.build_plural_mappings(
{}, RESOURCE_ATTRIBUTE_MAP)
attr.PLURALS.update(plural_mappings)
# PCM: Metering sets pagination and sorting to True. Do we have cfg
# entries for these so can be read? Else, must pass in.
return resource_helper.build_resource_info(plural_mappings,
RESOURCE_ATTRIBUTE_MAP,
constants.METERING,
translate_name=True,
allow_bulk=True)
def update_attributes_map(self, attributes):
super(Metering, self).update_attributes_map(
attributes, extension_attrs_map=RESOURCE_ATTRIBUTE_MAP)
def get_extended_resources(self, version):
if version == "2.0":
return RESOURCE_ATTRIBUTE_MAP
else:
return {}
@six.add_metaclass(abc.ABCMeta)
class MeteringPluginBase(service_base.ServicePluginBase):
def get_plugin_description(self):
return constants.METERING
def get_plugin_type(self):
return constants.METERING
@abc.abstractmethod
def create_metering_label(self, context, metering_label):
"""Create a metering label."""
pass
@abc.abstractmethod
def delete_metering_label(self, context, label_id):
"""Delete a metering label."""
pass
@abc.abstractmethod
def get_metering_label(self, context, label_id, fields=None):
"""Get a metering label."""
pass
@abc.abstractmethod
def get_metering_labels(self, context, filters=None, fields=None,
sorts=None, limit=None, marker=None,
page_reverse=False):
"""List all metering labels."""
pass
@abc.abstractmethod
def create_metering_label_rule(self, context, metering_label_rule):
"""Create a metering label rule."""
pass
@abc.abstractmethod
def get_metering_label_rule(self, context, rule_id, fields=None):
"""Get a metering label rule."""
pass
@abc.abstractmethod
def delete_metering_label_rule(self, context, rule_id):
"""Delete a metering label rule."""
pass
@abc.abstractmethod
def get_metering_label_rules(self, context, filters=None, fields=None,
sorts=None, limit=None, marker=None,
page_reverse=False):
"""List all metering label rules."""
pass
| apache-2.0 |
Isabek/python-koans | python3/koans/about_attribute_access.py | 104 | 6531 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Partially based on AboutMessagePassing in the Ruby Koans
#
from runner.koan import *
class AboutAttributeAccess(Koan):
class TypicalObject:
pass
def test_calling_undefined_functions_normally_results_in_errors(self):
typical = self.TypicalObject()
with self.assertRaises(___): typical.foobar()
def test_calling_getattribute_causes_an_attribute_error(self):
typical = self.TypicalObject()
with self.assertRaises(___): typical.__getattribute__('foobar')
# THINK ABOUT IT:
#
# If the method __getattribute__() causes the AttributeError, then
# what would happen if we redefine __getattribute__()?
# ------------------------------------------------------------------
class CatchAllAttributeReads:
def __getattribute__(self, attr_name):
return "Someone called '" + attr_name + "' and it could not be found"
def test_all_attribute_reads_are_caught(self):
catcher = self.CatchAllAttributeReads()
self.assertRegexpMatches(catcher.foobar, __)
def test_intercepting_return_values_can_disrupt_the_call_chain(self):
catcher = self.CatchAllAttributeReads()
self.assertRegexpMatches(catcher.foobaz, __) # This is fine
try:
catcher.foobaz(1)
except TypeError as ex:
err_msg = ex.args[0]
self.assertRegexpMatches(err_msg, __)
# foobaz returns a string. What happens to the '(1)' part?
# Try entering this into a python console to reproduce the issue:
#
# "foobaz"(1)
#
def test_changes_to_the_getattribute_implementation_affects_getattr_function(self):
catcher = self.CatchAllAttributeReads()
self.assertRegexpMatches(getattr(catcher, 'any_attribute'), __)
# ------------------------------------------------------------------
class WellBehavedFooCatcher:
def __getattribute__(self, attr_name):
if attr_name[:3] == "foo":
return "Foo to you too"
else:
return super().__getattribute__(attr_name)
def test_foo_attributes_are_caught(self):
catcher = self.WellBehavedFooCatcher()
self.assertEqual(__, catcher.foo_bar)
self.assertEqual(__, catcher.foo_baz)
def test_non_foo_messages_are_treated_normally(self):
catcher = self.WellBehavedFooCatcher()
with self.assertRaises(___): catcher.normal_undefined_attribute
# ------------------------------------------------------------------
global stack_depth
stack_depth = 0
class RecursiveCatcher:
def __init__(self):
global stack_depth
stack_depth = 0
self.no_of_getattribute_calls = 0
def __getattribute__(self, attr_name):
global stack_depth # We need something that is outside the scope of this class
stack_depth += 1
if stack_depth<=10: # to prevent a stack overflow
self.no_of_getattribute_calls += 1
# Oops! We just accessed an attribute (no_of_getattribute_calls)
# Guess what happens when self.no_of_getattribute_calls is
# accessed?
# Using 'object' directly because using super() here will also
# trigger a __getattribute__() call.
return object.__getattribute__(self, attr_name)
def my_method(self):
pass
def test_getattribute_is_a_bit_overzealous_sometimes(self):
catcher = self.RecursiveCatcher()
catcher.my_method()
global stack_depth
self.assertEqual(__, stack_depth)
# ------------------------------------------------------------------
class MinimalCatcher:
class DuffObject: pass
def __init__(self):
self.no_of_getattr_calls = 0
def __getattr__(self, attr_name):
self.no_of_getattr_calls += 1
return self.DuffObject
def my_method(self):
pass
def test_getattr_ignores_known_attributes(self):
catcher = self.MinimalCatcher()
catcher.my_method()
self.assertEqual(__, catcher.no_of_getattr_calls)
def test_getattr_only_catches_unknown_attributes(self):
catcher = self.MinimalCatcher()
catcher.purple_flamingos()
catcher.free_pie()
self.assertEqual(__,
type(catcher.give_me_duff_or_give_me_death()).__name__)
self.assertEqual(__, catcher.no_of_getattr_calls)
# ------------------------------------------------------------------
class PossessiveSetter(object):
def __setattr__(self, attr_name, value):
new_attr_name = attr_name
if attr_name[-5:] == 'comic':
new_attr_name = "my_" + new_attr_name
elif attr_name[-3:] == 'pie':
new_attr_name = "a_" + new_attr_name
object.__setattr__(self, new_attr_name, value)
def test_setattr_intercepts_attribute_assignments(self):
fanboy = self.PossessiveSetter()
fanboy.comic = 'The Laminator, issue #1'
fanboy.pie = 'blueberry'
self.assertEqual(__, fanboy.a_pie)
#
# NOTE: Change the prefix to make this next assert pass
#
prefix = '__'
self.assertEqual("The Laminator, issue #1", getattr(fanboy, prefix + '_comic'))
# ------------------------------------------------------------------
class ScarySetter:
def __init__(self):
self.num_of_coconuts = 9
self._num_of_private_coconuts = 2
def __setattr__(self, attr_name, value):
new_attr_name = attr_name
if attr_name[0] != '_':
new_attr_name = "altered_" + new_attr_name
object.__setattr__(self, new_attr_name, value)
def test_it_modifies_external_attribute_as_expected(self):
setter = self.ScarySetter()
setter.e = "mc hammer"
self.assertEqual(__, setter.altered_e)
def test_it_mangles_some_internal_attributes(self):
setter = self.ScarySetter()
try:
coconuts = setter.num_of_coconuts
except AttributeError:
self.assertEqual(__, setter.altered_num_of_coconuts)
def test_in_this_case_private_attributes_remain_unmangled(self):
setter = self.ScarySetter()
self.assertEqual(__, setter._num_of_private_coconuts)
| mit |
Turlough/keyczar | cpp/src/tools/scons/scons-local-1.2.0.d20090223/SCons/Tool/tex.py | 19 | 25091 | """SCons.Tool.tex
Tool-specific initialization for TeX.
There normally shouldn't be any need to import this module directly.
It will usually be imported through the generic SCons.Tool.Tool()
selection method.
"""
#
# Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "src/engine/SCons/Tool/tex.py 4043 2009/02/23 09:06:45 scons"
import os.path
import re
import string
import shutil
import SCons.Action
import SCons.Node
import SCons.Node.FS
import SCons.Util
import SCons.Scanner.LaTeX
Verbose = False
must_rerun_latex = True
# these are files that just need to be checked for changes and then rerun latex
check_suffixes = ['.toc', '.lof', '.lot', '.out', '.nav', '.snm']
# these are files that require bibtex or makeindex to be run when they change
all_suffixes = check_suffixes + ['.bbl', '.idx', '.nlo', '.glo']
#
# regular expressions used to search for Latex features
# or outputs that require rerunning latex
#
# search for all .aux files opened by latex (recorded in the .log file)
openout_aux_re = re.compile(r"\\openout.*`(.*\.aux)'")
#printindex_re = re.compile(r"^[^%]*\\printindex", re.MULTILINE)
#printnomenclature_re = re.compile(r"^[^%]*\\printnomenclature", re.MULTILINE)
#printglossary_re = re.compile(r"^[^%]*\\printglossary", re.MULTILINE)
# search to find rerun warnings
warning_rerun_str = '(^LaTeX Warning:.*Rerun)|(^Package \w+ Warning:.*Rerun)'
warning_rerun_re = re.compile(warning_rerun_str, re.MULTILINE)
# search to find citation rerun warnings
rerun_citations_str = "^LaTeX Warning:.*\n.*Rerun to get citations correct"
rerun_citations_re = re.compile(rerun_citations_str, re.MULTILINE)
# search to find undefined references or citations warnings
undefined_references_str = '(^LaTeX Warning:.*undefined references)|(^Package \w+ Warning:.*undefined citations)'
undefined_references_re = re.compile(undefined_references_str, re.MULTILINE)
# used by the emitter
auxfile_re = re.compile(r".", re.MULTILINE)
tableofcontents_re = re.compile(r"^[^%\n]*\\tableofcontents", re.MULTILINE)
makeindex_re = re.compile(r"^[^%\n]*\\makeindex", re.MULTILINE)
bibliography_re = re.compile(r"^[^%\n]*\\bibliography", re.MULTILINE)
listoffigures_re = re.compile(r"^[^%\n]*\\listoffigures", re.MULTILINE)
listoftables_re = re.compile(r"^[^%\n]*\\listoftables", re.MULTILINE)
hyperref_re = re.compile(r"^[^%\n]*\\usepackage.*\{hyperref\}", re.MULTILINE)
makenomenclature_re = re.compile(r"^[^%\n]*\\makenomenclature", re.MULTILINE)
makeglossary_re = re.compile(r"^[^%\n]*\\makeglossary", re.MULTILINE)
beamer_re = re.compile(r"^[^%\n]*\\documentclass\{beamer\}", re.MULTILINE)
# search to find all files included by Latex
include_re = re.compile(r'^[^%\n]*\\(?:include|input){([^}]*)}', re.MULTILINE)
# search to find all graphics files included by Latex
includegraphics_re = re.compile(r'^[^%\n]*\\(?:includegraphics(?:\[[^\]]+\])?){([^}]*)}', re.MULTILINE)
# search to find all files opened by Latex (recorded in .log file)
openout_re = re.compile(r"\\openout.*`(.*)'")
# list of graphics file extensions for TeX and LaTeX
TexGraphics = SCons.Scanner.LaTeX.TexGraphics
LatexGraphics = SCons.Scanner.LaTeX.LatexGraphics
# An Action sufficient to build any generic tex file.
TeXAction = None
# An action to build a latex file. This action might be needed more
# than once if we are dealing with labels and bibtex.
LaTeXAction = None
# An action to run BibTeX on a file.
BibTeXAction = None
# An action to run MakeIndex on a file.
MakeIndexAction = None
# An action to run MakeIndex (for nomencl) on a file.
MakeNclAction = None
# An action to run MakeIndex (for glossary) on a file.
MakeGlossaryAction = None
# Used as a return value of modify_env_var if the variable is not set.
_null = SCons.Scanner.LaTeX._null
modify_env_var = SCons.Scanner.LaTeX.modify_env_var
def FindFile(name,suffixes,paths,env,requireExt=False):
if requireExt:
name,ext = SCons.Util.splitext(name)
# if the user gave an extension use it.
if ext:
name = name + ext
if Verbose:
print " searching for '%s' with extensions: " % name,suffixes
for path in paths:
testName = os.path.join(path,name)
if Verbose:
print " look for '%s'" % testName
if os.path.exists(testName):
if Verbose:
print " found '%s'" % testName
return env.fs.File(testName)
else:
name_ext = SCons.Util.splitext(testName)[1]
if name_ext:
continue
# if no suffix try adding those passed in
for suffix in suffixes:
testNameExt = testName + suffix
if Verbose:
print " look for '%s'" % testNameExt
if os.path.exists(testNameExt):
if Verbose:
print " found '%s'" % testNameExt
return env.fs.File(testNameExt)
if Verbose:
print " did not find '%s'" % name
return None
def InternalLaTeXAuxAction(XXXLaTeXAction, target = None, source= None, env=None):
"""A builder for LaTeX files that checks the output in the aux file
and decides how many times to use LaTeXAction, and BibTeXAction."""
global must_rerun_latex
# This routine is called with two actions. In this file for DVI builds
# with LaTeXAction and from the pdflatex.py with PDFLaTeXAction
# set this up now for the case where the user requests a different extension
# for the target filename
if (XXXLaTeXAction == LaTeXAction):
callerSuffix = ".dvi"
else:
callerSuffix = env['PDFSUFFIX']
basename = SCons.Util.splitext(str(source[0]))[0]
basedir = os.path.split(str(source[0]))[0]
basefile = os.path.split(str(basename))[1]
abspath = os.path.abspath(basedir)
targetext = os.path.splitext(str(target[0]))[1]
targetdir = os.path.split(str(target[0]))[0]
saved_env = {}
for var in SCons.Scanner.LaTeX.LaTeX.env_variables:
saved_env[var] = modify_env_var(env, var, abspath)
# Create base file names with the target directory since the auxiliary files
# will be made there. That's because the *COM variables have the cd
# command in the prolog. We check
# for the existence of files before opening them--even ones like the
# aux file that TeX always creates--to make it possible to write tests
# with stubs that don't necessarily generate all of the same files.
targetbase = os.path.join(targetdir, basefile)
# if there is a \makeindex there will be a .idx and thus
# we have to run makeindex at least once to keep the build
# happy even if there is no index.
# Same for glossaries and nomenclature
src_content = source[0].get_text_contents()
run_makeindex = makeindex_re.search(src_content) and not os.path.exists(targetbase + '.idx')
run_nomenclature = makenomenclature_re.search(src_content) and not os.path.exists(targetbase + '.nlo')
run_glossary = makeglossary_re.search(src_content) and not os.path.exists(targetbase + '.glo')
saved_hashes = {}
suffix_nodes = {}
for suffix in all_suffixes:
theNode = env.fs.File(targetbase + suffix)
suffix_nodes[suffix] = theNode
saved_hashes[suffix] = theNode.get_csig()
if Verbose:
print "hashes: ",saved_hashes
must_rerun_latex = True
#
# routine to update MD5 hash and compare
#
# TODO(1.5): nested scopes
def check_MD5(filenode, suffix, saved_hashes=saved_hashes, targetbase=targetbase):
global must_rerun_latex
# two calls to clear old csig
filenode.clear_memoized_values()
filenode.ninfo = filenode.new_ninfo()
new_md5 = filenode.get_csig()
if saved_hashes[suffix] == new_md5:
if Verbose:
print "file %s not changed" % (targetbase+suffix)
return False # unchanged
saved_hashes[suffix] = new_md5
must_rerun_latex = True
if Verbose:
print "file %s changed, rerunning Latex, new hash = " % (targetbase+suffix), new_md5
return True # changed
# generate the file name that latex will generate
resultfilename = targetbase + callerSuffix
count = 0
while (must_rerun_latex and count < int(env.subst('$LATEXRETRIES'))) :
result = XXXLaTeXAction(target, source, env)
if result != 0:
return result
count = count + 1
must_rerun_latex = False
# Decide if various things need to be run, or run again.
# Read the log file to find all .aux files
logfilename = targetbase + '.log'
logContent = ''
auxfiles = []
if os.path.exists(logfilename):
logContent = open(logfilename, "rb").read()
auxfiles = openout_aux_re.findall(logContent)
# Now decide if bibtex will need to be run.
# The information that bibtex reads from the .aux file is
# pass-independent. If we find (below) that the .bbl file is unchanged,
# then the last latex saw a correct bibliography.
# Therefore only do this on the first pass
if count == 1:
for auxfilename in auxfiles:
target_aux = os.path.join(targetdir, auxfilename)
if os.path.exists(target_aux):
content = open(target_aux, "rb").read()
if string.find(content, "bibdata") != -1:
if Verbose:
print "Need to run bibtex"
bibfile = env.fs.File(targetbase)
result = BibTeXAction(bibfile, bibfile, env)
if result != 0:
return result
must_rerun_latex = check_MD5(suffix_nodes['.bbl'],'.bbl')
break
# Now decide if latex will need to be run again due to index.
if check_MD5(suffix_nodes['.idx'],'.idx') or (count == 1 and run_makeindex):
# We must run makeindex
if Verbose:
print "Need to run makeindex"
idxfile = suffix_nodes['.idx']
result = MakeIndexAction(idxfile, idxfile, env)
if result != 0:
return result
# TO-DO: need to add a way for the user to extend this list for whatever
# auxiliary files they create in other (or their own) packages
# Harder is case is where an action needs to be called -- that should be rare (I hope?)
for index in check_suffixes:
check_MD5(suffix_nodes[index],index)
# Now decide if latex will need to be run again due to nomenclature.
if check_MD5(suffix_nodes['.nlo'],'.nlo') or (count == 1 and run_nomenclature):
# We must run makeindex
if Verbose:
print "Need to run makeindex for nomenclature"
nclfile = suffix_nodes['.nlo']
result = MakeNclAction(nclfile, nclfile, env)
if result != 0:
return result
# Now decide if latex will need to be run again due to glossary.
if check_MD5(suffix_nodes['.glo'],'.glo') or (count == 1 and run_glossary):
# We must run makeindex
if Verbose:
print "Need to run makeindex for glossary"
glofile = suffix_nodes['.glo']
result = MakeGlossaryAction(glofile, glofile, env)
if result != 0:
return result
# Now decide if latex needs to be run yet again to resolve warnings.
if warning_rerun_re.search(logContent):
must_rerun_latex = True
if Verbose:
print "rerun Latex due to latex or package rerun warning"
if rerun_citations_re.search(logContent):
must_rerun_latex = True
if Verbose:
print "rerun Latex due to 'Rerun to get citations correct' warning"
if undefined_references_re.search(logContent):
must_rerun_latex = True
if Verbose:
print "rerun Latex due to undefined references or citations"
if (count >= int(env.subst('$LATEXRETRIES')) and must_rerun_latex):
print "reached max number of retries on Latex ,",int(env.subst('$LATEXRETRIES'))
# end of while loop
# rename Latex's output to what the target name is
if not (str(target[0]) == resultfilename and os.path.exists(resultfilename)):
if os.path.exists(resultfilename):
print "move %s to %s" % (resultfilename, str(target[0]), )
shutil.move(resultfilename,str(target[0]))
# Original comment (when TEXPICTS was not restored):
# The TEXPICTS enviroment variable is needed by a dvi -> pdf step
# later on Mac OSX so leave it
#
# It is also used when searching for pictures (implicit dependencies).
# Why not set the variable again in the respective builder instead
# of leaving local modifications in the environment? What if multiple
# latex builds in different directories need different TEXPICTS?
for var in SCons.Scanner.LaTeX.LaTeX.env_variables:
if var == 'TEXPICTS':
continue
if saved_env[var] is _null:
try:
del env['ENV'][var]
except KeyError:
pass # was never set
else:
env['ENV'][var] = saved_env[var]
return result
def LaTeXAuxAction(target = None, source= None, env=None):
result = InternalLaTeXAuxAction( LaTeXAction, target, source, env )
return result
LaTeX_re = re.compile("\\\\document(style|class)")
def is_LaTeX(flist):
# Scan a file list to decide if it's TeX- or LaTeX-flavored.
for f in flist:
content = f.get_text_contents()
if LaTeX_re.search(content):
return 1
return 0
def TeXLaTeXFunction(target = None, source= None, env=None):
"""A builder for TeX and LaTeX that scans the source file to
decide the "flavor" of the source and then executes the appropriate
program."""
if is_LaTeX(source):
result = LaTeXAuxAction(target,source,env)
else:
result = TeXAction(target,source,env)
return result
def TeXLaTeXStrFunction(target = None, source= None, env=None):
"""A strfunction for TeX and LaTeX that scans the source file to
decide the "flavor" of the source and then returns the appropriate
command string."""
if env.GetOption("no_exec"):
if is_LaTeX(source):
result = env.subst('$LATEXCOM',0,target,source)+" ..."
else:
result = env.subst("$TEXCOM",0,target,source)+" ..."
else:
result = ''
return result
def tex_eps_emitter(target, source, env):
"""An emitter for TeX and LaTeX sources when
executing tex or latex. It will accept .ps and .eps
graphics files
"""
(target, source) = tex_emitter_core(target, source, env, TexGraphics)
return (target, source)
def tex_pdf_emitter(target, source, env):
"""An emitter for TeX and LaTeX sources when
executing pdftex or pdflatex. It will accept graphics
files of types .pdf, .jpg, .png, .gif, and .tif
"""
(target, source) = tex_emitter_core(target, source, env, LatexGraphics)
return (target, source)
def ScanFiles(theFile, target, paths, file_tests, file_tests_search, env, graphics_extensions, targetdir):
# for theFile (a Node) update any file_tests and search for graphics files
# then find all included files and call ScanFiles for each of them
content = theFile.get_text_contents()
if Verbose:
print " scanning ",str(theFile)
for i in range(len(file_tests_search)):
if file_tests[i][0] == None:
file_tests[i][0] = file_tests_search[i].search(content)
# recursively call this on each of the included files
inc_files = [ ]
inc_files.extend( include_re.findall(content) )
if Verbose:
print "files included by '%s': "%str(theFile),inc_files
# inc_files is list of file names as given. need to find them
# using TEXINPUTS paths.
for src in inc_files:
srcNode = srcNode = FindFile(src,['.tex','.ltx','.latex'],paths,env,requireExt=False)
if srcNode != None:
file_test = ScanFiles(srcNode, target, paths, file_tests, file_tests_search, env, graphics_extensions, targetdir)
if Verbose:
print " done scanning ",str(theFile)
return file_tests
def tex_emitter_core(target, source, env, graphics_extensions):
"""An emitter for TeX and LaTeX sources.
For LaTeX sources we try and find the common created files that
are needed on subsequent runs of latex to finish tables of contents,
bibliographies, indices, lists of figures, and hyperlink references.
"""
targetbase = SCons.Util.splitext(str(target[0]))[0]
basename = SCons.Util.splitext(str(source[0]))[0]
basefile = os.path.split(str(basename))[1]
basedir = os.path.split(str(source[0]))[0]
targetdir = os.path.split(str(target[0]))[0]
abspath = os.path.abspath(basedir)
target[0].attributes.path = abspath
#
# file names we will make use of in searching the sources and log file
#
emit_suffixes = ['.aux', '.log', '.ilg', '.blg', '.nls', '.nlg', '.gls', '.glg'] + all_suffixes
auxfilename = targetbase + '.aux'
logfilename = targetbase + '.log'
env.SideEffect(auxfilename,target[0])
env.SideEffect(logfilename,target[0])
env.Clean(target[0],auxfilename)
env.Clean(target[0],logfilename)
content = source[0].get_text_contents()
idx_exists = os.path.exists(targetbase + '.idx')
nlo_exists = os.path.exists(targetbase + '.nlo')
glo_exists = os.path.exists(targetbase + '.glo')
# set up list with the regular expressions
# we use to find features used
file_tests_search = [auxfile_re,
makeindex_re,
bibliography_re,
tableofcontents_re,
listoffigures_re,
listoftables_re,
hyperref_re,
makenomenclature_re,
makeglossary_re,
beamer_re ]
# set up list with the file suffixes that need emitting
# when a feature is found
file_tests_suff = [['.aux'],
['.idx', '.ind', '.ilg'],
['.bbl', '.blg'],
['.toc'],
['.lof'],
['.lot'],
['.out'],
['.nlo', '.nls', '.nlg'],
['.glo', '.gls', '.glg'],
['.nav', '.snm', '.out', '.toc'] ]
# build the list of lists
file_tests = []
for i in range(len(file_tests_search)):
file_tests.append( [None, file_tests_suff[i]] )
# TO-DO: need to add a way for the user to extend this list for whatever
# auxiliary files they create in other (or their own) packages
# get path list from both env['TEXINPUTS'] and env['ENV']['TEXINPUTS']
savedpath = modify_env_var(env, 'TEXINPUTS', abspath)
paths = env['ENV']['TEXINPUTS']
if SCons.Util.is_List(paths):
pass
else:
# Split at os.pathsep to convert into absolute path
# TODO(1.5)
#paths = paths.split(os.pathsep)
paths = string.split(paths, os.pathsep)
# now that we have the path list restore the env
if savedpath is _null:
try:
del env['ENV']['TEXINPUTS']
except KeyError:
pass # was never set
else:
env['ENV']['TEXINPUTS'] = savedpath
if Verbose:
print "search path ",paths
file_tests = ScanFiles(source[0], target, paths, file_tests, file_tests_search, env, graphics_extensions, targetdir)
for (theSearch,suffix_list) in file_tests:
if theSearch:
for suffix in suffix_list:
env.SideEffect(targetbase + suffix,target[0])
env.Clean(target[0],targetbase + suffix)
# read log file to get all other files that latex creates and will read on the next pass
if os.path.exists(logfilename):
content = open(logfilename, "rb").read()
out_files = openout_re.findall(content)
env.SideEffect(out_files,target[0])
env.Clean(target[0],out_files)
return (target, source)
TeXLaTeXAction = None
def generate(env):
"""Add Builders and construction variables for TeX to an Environment."""
# A generic tex file Action, sufficient for all tex files.
global TeXAction
if TeXAction is None:
TeXAction = SCons.Action.Action("$TEXCOM", "$TEXCOMSTR")
# An Action to build a latex file. This might be needed more
# than once if we are dealing with labels and bibtex.
global LaTeXAction
if LaTeXAction is None:
LaTeXAction = SCons.Action.Action("$LATEXCOM", "$LATEXCOMSTR")
# Define an action to run BibTeX on a file.
global BibTeXAction
if BibTeXAction is None:
BibTeXAction = SCons.Action.Action("$BIBTEXCOM", "$BIBTEXCOMSTR")
# Define an action to run MakeIndex on a file.
global MakeIndexAction
if MakeIndexAction is None:
MakeIndexAction = SCons.Action.Action("$MAKEINDEXCOM", "$MAKEINDEXCOMSTR")
# Define an action to run MakeIndex on a file for nomenclatures.
global MakeNclAction
if MakeNclAction is None:
MakeNclAction = SCons.Action.Action("$MAKENCLCOM", "$MAKENCLCOMSTR")
# Define an action to run MakeIndex on a file for glossaries.
global MakeGlossaryAction
if MakeGlossaryAction is None:
MakeGlossaryAction = SCons.Action.Action("$MAKEGLOSSARYCOM", "$MAKEGLOSSARYCOMSTR")
global TeXLaTeXAction
if TeXLaTeXAction is None:
TeXLaTeXAction = SCons.Action.Action(TeXLaTeXFunction,
strfunction=TeXLaTeXStrFunction)
import dvi
dvi.generate(env)
bld = env['BUILDERS']['DVI']
bld.add_action('.tex', TeXLaTeXAction)
bld.add_emitter('.tex', tex_eps_emitter)
env['TEX'] = 'tex'
env['TEXFLAGS'] = SCons.Util.CLVar('-interaction=nonstopmode')
env['TEXCOM'] = 'cd ${TARGET.dir} && $TEX $TEXFLAGS ${SOURCE.file}'
# Duplicate from latex.py. If latex.py goes away, then this is still OK.
env['LATEX'] = 'latex'
env['LATEXFLAGS'] = SCons.Util.CLVar('-interaction=nonstopmode')
env['LATEXCOM'] = 'cd ${TARGET.dir} && $LATEX $LATEXFLAGS ${SOURCE.file}'
env['LATEXRETRIES'] = 3
env['BIBTEX'] = 'bibtex'
env['BIBTEXFLAGS'] = SCons.Util.CLVar('')
env['BIBTEXCOM'] = 'cd ${TARGET.dir} && $BIBTEX $BIBTEXFLAGS ${SOURCE.filebase}'
env['MAKEINDEX'] = 'makeindex'
env['MAKEINDEXFLAGS'] = SCons.Util.CLVar('')
env['MAKEINDEXCOM'] = 'cd ${TARGET.dir} && $MAKEINDEX $MAKEINDEXFLAGS ${SOURCE.file}'
env['MAKEGLOSSARY'] = 'makeindex'
env['MAKEGLOSSARYSTYLE'] = '${SOURCE.filebase}.ist'
env['MAKEGLOSSARYFLAGS'] = SCons.Util.CLVar('-s ${MAKEGLOSSARYSTYLE} -t ${SOURCE.filebase}.glg')
env['MAKEGLOSSARYCOM'] = 'cd ${TARGET.dir} && $MAKEGLOSSARY ${SOURCE.filebase}.glo $MAKEGLOSSARYFLAGS -o ${SOURCE.filebase}.gls'
env['MAKENCL'] = 'makeindex'
env['MAKENCLSTYLE'] = '$nomencl.ist'
env['MAKENCLFLAGS'] = '-s ${MAKENCLSTYLE} -t ${SOURCE.filebase}.nlg'
env['MAKENCLCOM'] = 'cd ${TARGET.dir} && $MAKENCL ${SOURCE.filebase}.nlo $MAKENCLFLAGS -o ${SOURCE.filebase}.nls'
# Duplicate from pdflatex.py. If latex.py goes away, then this is still OK.
env['PDFLATEX'] = 'pdflatex'
env['PDFLATEXFLAGS'] = SCons.Util.CLVar('-interaction=nonstopmode')
env['PDFLATEXCOM'] = 'cd ${TARGET.dir} && $PDFLATEX $PDFLATEXFLAGS ${SOURCE.file}'
def exists(env):
return env.Detect('tex')
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
| apache-2.0 |
wimnat/ansible | test/support/integration/plugins/modules/ec2_group.py | 49 | 57129 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# This file is part of Ansible
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['stableinterface'],
'supported_by': 'core'}
DOCUMENTATION = '''
---
module: ec2_group
author: "Andrew de Quincey (@adq)"
version_added: "1.3"
requirements: [ boto3 ]
short_description: maintain an ec2 VPC security group.
description:
- Maintains ec2 security groups. This module has a dependency on python-boto >= 2.5.
options:
name:
description:
- Name of the security group.
- One of and only one of I(name) or I(group_id) is required.
- Required if I(state=present).
required: false
type: str
group_id:
description:
- Id of group to delete (works only with absent).
- One of and only one of I(name) or I(group_id) is required.
required: false
version_added: "2.4"
type: str
description:
description:
- Description of the security group. Required when C(state) is C(present).
required: false
type: str
vpc_id:
description:
- ID of the VPC to create the group in.
required: false
type: str
rules:
description:
- List of firewall inbound rules to enforce in this group (see example). If none are supplied,
no inbound rules will be enabled. Rules list may include its own name in `group_name`.
This allows idempotent loopback additions (e.g. allow group to access itself).
Rule sources list support was added in version 2.4. This allows to define multiple sources per
source type as well as multiple source types per rule. Prior to 2.4 an individual source is allowed.
In version 2.5 support for rule descriptions was added.
required: false
type: list
elements: dict
suboptions:
cidr_ip:
type: str
description:
- The IPv4 CIDR range traffic is coming from.
- You can specify only one of I(cidr_ip), I(cidr_ipv6), I(ip_prefix), I(group_id)
and I(group_name).
cidr_ipv6:
type: str
description:
- The IPv6 CIDR range traffic is coming from.
- You can specify only one of I(cidr_ip), I(cidr_ipv6), I(ip_prefix), I(group_id)
and I(group_name).
ip_prefix:
type: str
description:
- The IP Prefix U(https://docs.aws.amazon.com/cli/latest/reference/ec2/describe-prefix-lists.html)
that traffic is coming from.
- You can specify only one of I(cidr_ip), I(cidr_ipv6), I(ip_prefix), I(group_id)
and I(group_name).
group_id:
type: str
description:
- The ID of the Security Group that traffic is coming from.
- You can specify only one of I(cidr_ip), I(cidr_ipv6), I(ip_prefix), I(group_id)
and I(group_name).
group_name:
type: str
description:
- Name of the Security Group that traffic is coming from.
- If the Security Group doesn't exist a new Security Group will be
created with I(group_desc) as the description.
- You can specify only one of I(cidr_ip), I(cidr_ipv6), I(ip_prefix), I(group_id)
and I(group_name).
group_desc:
type: str
description:
- If the I(group_name) is set and the Security Group doesn't exist a new Security Group will be
created with I(group_desc) as the description.
proto:
type: str
description:
- The IP protocol name (C(tcp), C(udp), C(icmp), C(icmpv6)) or number (U(https://en.wikipedia.org/wiki/List_of_IP_protocol_numbers))
from_port:
type: int
description: The start of the range of ports that traffic is coming from. A value of C(-1) indicates all ports.
to_port:
type: int
description: The end of the range of ports that traffic is coming from. A value of C(-1) indicates all ports.
rule_desc:
type: str
description: A description for the rule.
rules_egress:
description:
- List of firewall outbound rules to enforce in this group (see example). If none are supplied,
a default all-out rule is assumed. If an empty list is supplied, no outbound rules will be enabled.
Rule Egress sources list support was added in version 2.4. In version 2.5 support for rule descriptions
was added.
required: false
version_added: "1.6"
type: list
elements: dict
suboptions:
cidr_ip:
type: str
description:
- The IPv4 CIDR range traffic is going to.
- You can specify only one of I(cidr_ip), I(cidr_ipv6), I(ip_prefix), I(group_id)
and I(group_name).
cidr_ipv6:
type: str
description:
- The IPv6 CIDR range traffic is going to.
- You can specify only one of I(cidr_ip), I(cidr_ipv6), I(ip_prefix), I(group_id)
and I(group_name).
ip_prefix:
type: str
description:
- The IP Prefix U(https://docs.aws.amazon.com/cli/latest/reference/ec2/describe-prefix-lists.html)
that traffic is going to.
- You can specify only one of I(cidr_ip), I(cidr_ipv6), I(ip_prefix), I(group_id)
and I(group_name).
group_id:
type: str
description:
- The ID of the Security Group that traffic is going to.
- You can specify only one of I(cidr_ip), I(cidr_ipv6), I(ip_prefix), I(group_id)
and I(group_name).
group_name:
type: str
description:
- Name of the Security Group that traffic is going to.
- If the Security Group doesn't exist a new Security Group will be
created with I(group_desc) as the description.
- You can specify only one of I(cidr_ip), I(cidr_ipv6), I(ip_prefix), I(group_id)
and I(group_name).
group_desc:
type: str
description:
- If the I(group_name) is set and the Security Group doesn't exist a new Security Group will be
created with I(group_desc) as the description.
proto:
type: str
description:
- The IP protocol name (C(tcp), C(udp), C(icmp), C(icmpv6)) or number (U(https://en.wikipedia.org/wiki/List_of_IP_protocol_numbers))
from_port:
type: int
description: The start of the range of ports that traffic is going to. A value of C(-1) indicates all ports.
to_port:
type: int
description: The end of the range of ports that traffic is going to. A value of C(-1) indicates all ports.
rule_desc:
type: str
description: A description for the rule.
state:
version_added: "1.4"
description:
- Create or delete a security group.
required: false
default: 'present'
choices: [ "present", "absent" ]
aliases: []
type: str
purge_rules:
version_added: "1.8"
description:
- Purge existing rules on security group that are not found in rules.
required: false
default: 'true'
aliases: []
type: bool
purge_rules_egress:
version_added: "1.8"
description:
- Purge existing rules_egress on security group that are not found in rules_egress.
required: false
default: 'true'
aliases: []
type: bool
tags:
version_added: "2.4"
description:
- A dictionary of one or more tags to assign to the security group.
required: false
type: dict
aliases: ['resource_tags']
purge_tags:
version_added: "2.4"
description:
- If yes, existing tags will be purged from the resource to match exactly what is defined by I(tags) parameter. If the I(tags) parameter is not set then
tags will not be modified.
required: false
default: yes
type: bool
extends_documentation_fragment:
- aws
- ec2
notes:
- If a rule declares a group_name and that group doesn't exist, it will be
automatically created. In that case, group_desc should be provided as well.
The module will refuse to create a depended-on group without a description.
- Preview diff mode support is added in version 2.7.
'''
EXAMPLES = '''
- name: example using security group rule descriptions
ec2_group:
name: "{{ name }}"
description: sg with rule descriptions
vpc_id: vpc-xxxxxxxx
profile: "{{ aws_profile }}"
region: us-east-1
rules:
- proto: tcp
ports:
- 80
cidr_ip: 0.0.0.0/0
rule_desc: allow all on port 80
- name: example ec2 group
ec2_group:
name: example
description: an example EC2 group
vpc_id: 12345
region: eu-west-1
aws_secret_key: SECRET
aws_access_key: ACCESS
rules:
- proto: tcp
from_port: 80
to_port: 80
cidr_ip: 0.0.0.0/0
- proto: tcp
from_port: 22
to_port: 22
cidr_ip: 10.0.0.0/8
- proto: tcp
from_port: 443
to_port: 443
# this should only be needed for EC2 Classic security group rules
# because in a VPC an ELB will use a user-account security group
group_id: amazon-elb/sg-87654321/amazon-elb-sg
- proto: tcp
from_port: 3306
to_port: 3306
group_id: 123412341234/sg-87654321/exact-name-of-sg
- proto: udp
from_port: 10050
to_port: 10050
cidr_ip: 10.0.0.0/8
- proto: udp
from_port: 10051
to_port: 10051
group_id: sg-12345678
- proto: icmp
from_port: 8 # icmp type, -1 = any type
to_port: -1 # icmp subtype, -1 = any subtype
cidr_ip: 10.0.0.0/8
- proto: all
# the containing group name may be specified here
group_name: example
- proto: all
# in the 'proto' attribute, if you specify -1, all, or a protocol number other than tcp, udp, icmp, or 58 (ICMPv6),
# traffic on all ports is allowed, regardless of any ports you specify
from_port: 10050 # this value is ignored
to_port: 10050 # this value is ignored
cidr_ip: 10.0.0.0/8
rules_egress:
- proto: tcp
from_port: 80
to_port: 80
cidr_ip: 0.0.0.0/0
cidr_ipv6: 64:ff9b::/96
group_name: example-other
# description to use if example-other needs to be created
group_desc: other example EC2 group
- name: example2 ec2 group
ec2_group:
name: example2
description: an example2 EC2 group
vpc_id: 12345
region: eu-west-1
rules:
# 'ports' rule keyword was introduced in version 2.4. It accepts a single port value or a list of values including ranges (from_port-to_port).
- proto: tcp
ports: 22
group_name: example-vpn
- proto: tcp
ports:
- 80
- 443
- 8080-8099
cidr_ip: 0.0.0.0/0
# Rule sources list support was added in version 2.4. This allows to define multiple sources per source type as well as multiple source types per rule.
- proto: tcp
ports:
- 6379
- 26379
group_name:
- example-vpn
- example-redis
- proto: tcp
ports: 5665
group_name: example-vpn
cidr_ip:
- 172.16.1.0/24
- 172.16.17.0/24
cidr_ipv6:
- 2607:F8B0::/32
- 64:ff9b::/96
group_id:
- sg-edcd9784
diff: True
- name: "Delete group by its id"
ec2_group:
region: eu-west-1
group_id: sg-33b4ee5b
state: absent
'''
RETURN = '''
group_name:
description: Security group name
sample: My Security Group
type: str
returned: on create/update
group_id:
description: Security group id
sample: sg-abcd1234
type: str
returned: on create/update
description:
description: Description of security group
sample: My Security Group
type: str
returned: on create/update
tags:
description: Tags associated with the security group
sample:
Name: My Security Group
Purpose: protecting stuff
type: dict
returned: on create/update
vpc_id:
description: ID of VPC to which the security group belongs
sample: vpc-abcd1234
type: str
returned: on create/update
ip_permissions:
description: Inbound rules associated with the security group.
sample:
- from_port: 8182
ip_protocol: tcp
ip_ranges:
- cidr_ip: "1.1.1.1/32"
ipv6_ranges: []
prefix_list_ids: []
to_port: 8182
user_id_group_pairs: []
type: list
returned: on create/update
ip_permissions_egress:
description: Outbound rules associated with the security group.
sample:
- ip_protocol: -1
ip_ranges:
- cidr_ip: "0.0.0.0/0"
ipv6_ranges: []
prefix_list_ids: []
user_id_group_pairs: []
type: list
returned: on create/update
owner_id:
description: AWS Account ID of the security group
sample: 123456789012
type: int
returned: on create/update
'''
import json
import re
import itertools
from copy import deepcopy
from time import sleep
from collections import namedtuple
from ansible.module_utils.aws.core import AnsibleAWSModule, is_boto3_error_code
from ansible.module_utils.aws.iam import get_aws_account_id
from ansible.module_utils.aws.waiters import get_waiter
from ansible.module_utils.ec2 import AWSRetry, camel_dict_to_snake_dict, compare_aws_tags
from ansible.module_utils.ec2 import ansible_dict_to_boto3_filter_list, boto3_tag_list_to_ansible_dict, ansible_dict_to_boto3_tag_list
from ansible.module_utils.common.network import to_ipv6_subnet, to_subnet
from ansible.module_utils.compat.ipaddress import ip_network, IPv6Network
from ansible.module_utils._text import to_text
from ansible.module_utils.six import string_types
try:
from botocore.exceptions import BotoCoreError, ClientError
except ImportError:
pass # caught by AnsibleAWSModule
Rule = namedtuple('Rule', ['port_range', 'protocol', 'target', 'target_type', 'description'])
valid_targets = set(['ipv4', 'ipv6', 'group', 'ip_prefix'])
current_account_id = None
def rule_cmp(a, b):
"""Compare rules without descriptions"""
for prop in ['port_range', 'protocol', 'target', 'target_type']:
if prop == 'port_range' and to_text(a.protocol) == to_text(b.protocol):
# equal protocols can interchange `(-1, -1)` and `(None, None)`
if a.port_range in ((None, None), (-1, -1)) and b.port_range in ((None, None), (-1, -1)):
continue
elif getattr(a, prop) != getattr(b, prop):
return False
elif getattr(a, prop) != getattr(b, prop):
return False
return True
def rules_to_permissions(rules):
return [to_permission(rule) for rule in rules]
def to_permission(rule):
# take a Rule, output the serialized grant
perm = {
'IpProtocol': rule.protocol,
}
perm['FromPort'], perm['ToPort'] = rule.port_range
if rule.target_type == 'ipv4':
perm['IpRanges'] = [{
'CidrIp': rule.target,
}]
if rule.description:
perm['IpRanges'][0]['Description'] = rule.description
elif rule.target_type == 'ipv6':
perm['Ipv6Ranges'] = [{
'CidrIpv6': rule.target,
}]
if rule.description:
perm['Ipv6Ranges'][0]['Description'] = rule.description
elif rule.target_type == 'group':
if isinstance(rule.target, tuple):
pair = {}
if rule.target[0]:
pair['UserId'] = rule.target[0]
# group_id/group_name are mutually exclusive - give group_id more precedence as it is more specific
if rule.target[1]:
pair['GroupId'] = rule.target[1]
elif rule.target[2]:
pair['GroupName'] = rule.target[2]
perm['UserIdGroupPairs'] = [pair]
else:
perm['UserIdGroupPairs'] = [{
'GroupId': rule.target
}]
if rule.description:
perm['UserIdGroupPairs'][0]['Description'] = rule.description
elif rule.target_type == 'ip_prefix':
perm['PrefixListIds'] = [{
'PrefixListId': rule.target,
}]
if rule.description:
perm['PrefixListIds'][0]['Description'] = rule.description
elif rule.target_type not in valid_targets:
raise ValueError('Invalid target type for rule {0}'.format(rule))
return fix_port_and_protocol(perm)
def rule_from_group_permission(perm):
def ports_from_permission(p):
if 'FromPort' not in p and 'ToPort' not in p:
return (None, None)
return (int(perm['FromPort']), int(perm['ToPort']))
# outputs a rule tuple
for target_key, target_subkey, target_type in [
('IpRanges', 'CidrIp', 'ipv4'),
('Ipv6Ranges', 'CidrIpv6', 'ipv6'),
('PrefixListIds', 'PrefixListId', 'ip_prefix'),
]:
if target_key not in perm:
continue
for r in perm[target_key]:
# there may be several IP ranges here, which is ok
yield Rule(
ports_from_permission(perm),
to_text(perm['IpProtocol']),
r[target_subkey],
target_type,
r.get('Description')
)
if 'UserIdGroupPairs' in perm and perm['UserIdGroupPairs']:
for pair in perm['UserIdGroupPairs']:
target = (
pair.get('UserId', None),
pair.get('GroupId', None),
pair.get('GroupName', None),
)
if pair.get('UserId', '').startswith('amazon-'):
# amazon-elb and amazon-prefix rules don't need
# group-id specified, so remove it when querying
# from permission
target = (
target[0],
None,
target[2],
)
elif 'VpcPeeringConnectionId' in pair or pair['UserId'] != current_account_id:
target = (
pair.get('UserId', None),
pair.get('GroupId', None),
pair.get('GroupName', None),
)
yield Rule(
ports_from_permission(perm),
to_text(perm['IpProtocol']),
target,
'group',
pair.get('Description')
)
@AWSRetry.backoff(tries=5, delay=5, backoff=2.0, catch_extra_error_codes=['InvalidGroup.NotFound'])
def get_security_groups_with_backoff(connection, **kwargs):
return connection.describe_security_groups(**kwargs)
@AWSRetry.backoff(tries=5, delay=5, backoff=2.0)
def sg_exists_with_backoff(connection, **kwargs):
try:
return connection.describe_security_groups(**kwargs)
except is_boto3_error_code('InvalidGroup.NotFound'):
return {'SecurityGroups': []}
def deduplicate_rules_args(rules):
"""Returns unique rules"""
if rules is None:
return None
return list(dict(zip((json.dumps(r, sort_keys=True) for r in rules), rules)).values())
def validate_rule(module, rule):
VALID_PARAMS = ('cidr_ip', 'cidr_ipv6', 'ip_prefix',
'group_id', 'group_name', 'group_desc',
'proto', 'from_port', 'to_port', 'rule_desc')
if not isinstance(rule, dict):
module.fail_json(msg='Invalid rule parameter type [%s].' % type(rule))
for k in rule:
if k not in VALID_PARAMS:
module.fail_json(msg='Invalid rule parameter \'{0}\' for rule: {1}'.format(k, rule))
if 'group_id' in rule and 'cidr_ip' in rule:
module.fail_json(msg='Specify group_id OR cidr_ip, not both')
elif 'group_name' in rule and 'cidr_ip' in rule:
module.fail_json(msg='Specify group_name OR cidr_ip, not both')
elif 'group_id' in rule and 'cidr_ipv6' in rule:
module.fail_json(msg="Specify group_id OR cidr_ipv6, not both")
elif 'group_name' in rule and 'cidr_ipv6' in rule:
module.fail_json(msg="Specify group_name OR cidr_ipv6, not both")
elif 'cidr_ip' in rule and 'cidr_ipv6' in rule:
module.fail_json(msg="Specify cidr_ip OR cidr_ipv6, not both")
elif 'group_id' in rule and 'group_name' in rule:
module.fail_json(msg='Specify group_id OR group_name, not both')
def get_target_from_rule(module, client, rule, name, group, groups, vpc_id):
"""
Returns tuple of (target_type, target, group_created) after validating rule params.
rule: Dict describing a rule.
name: Name of the security group being managed.
groups: Dict of all available security groups.
AWS accepts an ip range or a security group as target of a rule. This
function validate the rule specification and return either a non-None
group_id or a non-None ip range.
"""
FOREIGN_SECURITY_GROUP_REGEX = r'^([^/]+)/?(sg-\S+)?/(\S+)'
group_id = None
group_name = None
target_group_created = False
validate_rule(module, rule)
if rule.get('group_id') and re.match(FOREIGN_SECURITY_GROUP_REGEX, rule['group_id']):
# this is a foreign Security Group. Since you can't fetch it you must create an instance of it
owner_id, group_id, group_name = re.match(FOREIGN_SECURITY_GROUP_REGEX, rule['group_id']).groups()
group_instance = dict(UserId=owner_id, GroupId=group_id, GroupName=group_name)
groups[group_id] = group_instance
groups[group_name] = group_instance
# group_id/group_name are mutually exclusive - give group_id more precedence as it is more specific
if group_id and group_name:
group_name = None
return 'group', (owner_id, group_id, group_name), False
elif 'group_id' in rule:
return 'group', rule['group_id'], False
elif 'group_name' in rule:
group_name = rule['group_name']
if group_name == name:
group_id = group['GroupId']
groups[group_id] = group
groups[group_name] = group
elif group_name in groups and group.get('VpcId') and groups[group_name].get('VpcId'):
# both are VPC groups, this is ok
group_id = groups[group_name]['GroupId']
elif group_name in groups and not (group.get('VpcId') or groups[group_name].get('VpcId')):
# both are EC2 classic, this is ok
group_id = groups[group_name]['GroupId']
else:
auto_group = None
filters = {'group-name': group_name}
if vpc_id:
filters['vpc-id'] = vpc_id
# if we got here, either the target group does not exist, or there
# is a mix of EC2 classic + VPC groups. Mixing of EC2 classic + VPC
# is bad, so we have to create a new SG because no compatible group
# exists
if not rule.get('group_desc', '').strip():
# retry describing the group once
try:
auto_group = get_security_groups_with_backoff(client, Filters=ansible_dict_to_boto3_filter_list(filters)).get('SecurityGroups', [])[0]
except (is_boto3_error_code('InvalidGroup.NotFound'), IndexError):
module.fail_json(msg="group %s will be automatically created by rule %s but "
"no description was provided" % (group_name, rule))
except ClientError as e: # pylint: disable=duplicate-except
module.fail_json_aws(e)
elif not module.check_mode:
params = dict(GroupName=group_name, Description=rule['group_desc'])
if vpc_id:
params['VpcId'] = vpc_id
try:
auto_group = client.create_security_group(**params)
get_waiter(
client, 'security_group_exists',
).wait(
GroupIds=[auto_group['GroupId']],
)
except is_boto3_error_code('InvalidGroup.Duplicate'):
# The group exists, but didn't show up in any of our describe-security-groups calls
# Try searching on a filter for the name, and allow a retry window for AWS to update
# the model on their end.
try:
auto_group = get_security_groups_with_backoff(client, Filters=ansible_dict_to_boto3_filter_list(filters)).get('SecurityGroups', [])[0]
except IndexError as e:
module.fail_json(msg="Could not create or use existing group '{0}' in rule. Make sure the group exists".format(group_name))
except ClientError as e:
module.fail_json_aws(
e,
msg="Could not create or use existing group '{0}' in rule. Make sure the group exists".format(group_name))
if auto_group is not None:
group_id = auto_group['GroupId']
groups[group_id] = auto_group
groups[group_name] = auto_group
target_group_created = True
return 'group', group_id, target_group_created
elif 'cidr_ip' in rule:
return 'ipv4', validate_ip(module, rule['cidr_ip']), False
elif 'cidr_ipv6' in rule:
return 'ipv6', validate_ip(module, rule['cidr_ipv6']), False
elif 'ip_prefix' in rule:
return 'ip_prefix', rule['ip_prefix'], False
module.fail_json(msg="Could not match target for rule {0}".format(rule), failed_rule=rule)
def ports_expand(ports):
# takes a list of ports and returns a list of (port_from, port_to)
ports_expanded = []
for port in ports:
if not isinstance(port, string_types):
ports_expanded.append((port,) * 2)
elif '-' in port:
ports_expanded.append(tuple(int(p.strip()) for p in port.split('-', 1)))
else:
ports_expanded.append((int(port.strip()),) * 2)
return ports_expanded
def rule_expand_ports(rule):
# takes a rule dict and returns a list of expanded rule dicts
if 'ports' not in rule:
if isinstance(rule.get('from_port'), string_types):
rule['from_port'] = int(rule.get('from_port'))
if isinstance(rule.get('to_port'), string_types):
rule['to_port'] = int(rule.get('to_port'))
return [rule]
ports = rule['ports'] if isinstance(rule['ports'], list) else [rule['ports']]
rule_expanded = []
for from_to in ports_expand(ports):
temp_rule = rule.copy()
del temp_rule['ports']
temp_rule['from_port'], temp_rule['to_port'] = sorted(from_to)
rule_expanded.append(temp_rule)
return rule_expanded
def rules_expand_ports(rules):
# takes a list of rules and expands it based on 'ports'
if not rules:
return rules
return [rule for rule_complex in rules
for rule in rule_expand_ports(rule_complex)]
def rule_expand_source(rule, source_type):
# takes a rule dict and returns a list of expanded rule dicts for specified source_type
sources = rule[source_type] if isinstance(rule[source_type], list) else [rule[source_type]]
source_types_all = ('cidr_ip', 'cidr_ipv6', 'group_id', 'group_name', 'ip_prefix')
rule_expanded = []
for source in sources:
temp_rule = rule.copy()
for s in source_types_all:
temp_rule.pop(s, None)
temp_rule[source_type] = source
rule_expanded.append(temp_rule)
return rule_expanded
def rule_expand_sources(rule):
# takes a rule dict and returns a list of expanded rule discts
source_types = (stype for stype in ('cidr_ip', 'cidr_ipv6', 'group_id', 'group_name', 'ip_prefix') if stype in rule)
return [r for stype in source_types
for r in rule_expand_source(rule, stype)]
def rules_expand_sources(rules):
# takes a list of rules and expands it based on 'cidr_ip', 'group_id', 'group_name'
if not rules:
return rules
return [rule for rule_complex in rules
for rule in rule_expand_sources(rule_complex)]
def update_rules_description(module, client, rule_type, group_id, ip_permissions):
if module.check_mode:
return
try:
if rule_type == "in":
client.update_security_group_rule_descriptions_ingress(GroupId=group_id, IpPermissions=ip_permissions)
if rule_type == "out":
client.update_security_group_rule_descriptions_egress(GroupId=group_id, IpPermissions=ip_permissions)
except (ClientError, BotoCoreError) as e:
module.fail_json_aws(e, msg="Unable to update rule description for group %s" % group_id)
def fix_port_and_protocol(permission):
for key in ('FromPort', 'ToPort'):
if key in permission:
if permission[key] is None:
del permission[key]
else:
permission[key] = int(permission[key])
permission['IpProtocol'] = to_text(permission['IpProtocol'])
return permission
def remove_old_permissions(client, module, revoke_ingress, revoke_egress, group_id):
if revoke_ingress:
revoke(client, module, revoke_ingress, group_id, 'in')
if revoke_egress:
revoke(client, module, revoke_egress, group_id, 'out')
return bool(revoke_ingress or revoke_egress)
def revoke(client, module, ip_permissions, group_id, rule_type):
if not module.check_mode:
try:
if rule_type == 'in':
client.revoke_security_group_ingress(GroupId=group_id, IpPermissions=ip_permissions)
elif rule_type == 'out':
client.revoke_security_group_egress(GroupId=group_id, IpPermissions=ip_permissions)
except (BotoCoreError, ClientError) as e:
rules = 'ingress rules' if rule_type == 'in' else 'egress rules'
module.fail_json_aws(e, "Unable to revoke {0}: {1}".format(rules, ip_permissions))
def add_new_permissions(client, module, new_ingress, new_egress, group_id):
if new_ingress:
authorize(client, module, new_ingress, group_id, 'in')
if new_egress:
authorize(client, module, new_egress, group_id, 'out')
return bool(new_ingress or new_egress)
def authorize(client, module, ip_permissions, group_id, rule_type):
if not module.check_mode:
try:
if rule_type == 'in':
client.authorize_security_group_ingress(GroupId=group_id, IpPermissions=ip_permissions)
elif rule_type == 'out':
client.authorize_security_group_egress(GroupId=group_id, IpPermissions=ip_permissions)
except (BotoCoreError, ClientError) as e:
rules = 'ingress rules' if rule_type == 'in' else 'egress rules'
module.fail_json_aws(e, "Unable to authorize {0}: {1}".format(rules, ip_permissions))
def validate_ip(module, cidr_ip):
split_addr = cidr_ip.split('/')
if len(split_addr) == 2:
# this_ip is a IPv4 or IPv6 CIDR that may or may not have host bits set
# Get the network bits if IPv4, and validate if IPv6.
try:
ip = to_subnet(split_addr[0], split_addr[1])
if ip != cidr_ip:
module.warn("One of your CIDR addresses ({0}) has host bits set. To get rid of this warning, "
"check the network mask and make sure that only network bits are set: {1}.".format(
cidr_ip, ip))
except ValueError:
# to_subnet throws a ValueError on IPv6 networks, so we should be working with v6 if we get here
try:
isinstance(ip_network(to_text(cidr_ip)), IPv6Network)
ip = cidr_ip
except ValueError:
# If a host bit is set on something other than a /128, IPv6Network will throw a ValueError
# The ipv6_cidr in this case probably looks like "2001:DB8:A0B:12F0::1/64" and we just want the network bits
ip6 = to_ipv6_subnet(split_addr[0]) + "/" + split_addr[1]
if ip6 != cidr_ip:
module.warn("One of your IPv6 CIDR addresses ({0}) has host bits set. To get rid of this warning, "
"check the network mask and make sure that only network bits are set: {1}.".format(cidr_ip, ip6))
return ip6
return ip
return cidr_ip
def update_tags(client, module, group_id, current_tags, tags, purge_tags):
tags_need_modify, tags_to_delete = compare_aws_tags(current_tags, tags, purge_tags)
if not module.check_mode:
if tags_to_delete:
try:
client.delete_tags(Resources=[group_id], Tags=[{'Key': tag} for tag in tags_to_delete])
except (BotoCoreError, ClientError) as e:
module.fail_json_aws(e, msg="Unable to delete tags {0}".format(tags_to_delete))
# Add/update tags
if tags_need_modify:
try:
client.create_tags(Resources=[group_id], Tags=ansible_dict_to_boto3_tag_list(tags_need_modify))
except (BotoCoreError, ClientError) as e:
module.fail_json(e, msg="Unable to add tags {0}".format(tags_need_modify))
return bool(tags_need_modify or tags_to_delete)
def update_rule_descriptions(module, group_id, present_ingress, named_tuple_ingress_list, present_egress, named_tuple_egress_list):
changed = False
client = module.client('ec2')
ingress_needs_desc_update = []
egress_needs_desc_update = []
for present_rule in present_egress:
needs_update = [r for r in named_tuple_egress_list if rule_cmp(r, present_rule) and r.description != present_rule.description]
for r in needs_update:
named_tuple_egress_list.remove(r)
egress_needs_desc_update.extend(needs_update)
for present_rule in present_ingress:
needs_update = [r for r in named_tuple_ingress_list if rule_cmp(r, present_rule) and r.description != present_rule.description]
for r in needs_update:
named_tuple_ingress_list.remove(r)
ingress_needs_desc_update.extend(needs_update)
if ingress_needs_desc_update:
update_rules_description(module, client, 'in', group_id, rules_to_permissions(ingress_needs_desc_update))
changed |= True
if egress_needs_desc_update:
update_rules_description(module, client, 'out', group_id, rules_to_permissions(egress_needs_desc_update))
changed |= True
return changed
def create_security_group(client, module, name, description, vpc_id):
if not module.check_mode:
params = dict(GroupName=name, Description=description)
if vpc_id:
params['VpcId'] = vpc_id
try:
group = client.create_security_group(**params)
except (BotoCoreError, ClientError) as e:
module.fail_json_aws(e, msg="Unable to create security group")
# When a group is created, an egress_rule ALLOW ALL
# to 0.0.0.0/0 is added automatically but it's not
# reflected in the object returned by the AWS API
# call. We re-read the group for getting an updated object
# amazon sometimes takes a couple seconds to update the security group so wait till it exists
while True:
sleep(3)
group = get_security_groups_with_backoff(client, GroupIds=[group['GroupId']])['SecurityGroups'][0]
if group.get('VpcId') and not group.get('IpPermissionsEgress'):
pass
else:
break
return group
return None
def wait_for_rule_propagation(module, group, desired_ingress, desired_egress, purge_ingress, purge_egress):
group_id = group['GroupId']
tries = 6
def await_rules(group, desired_rules, purge, rule_key):
for i in range(tries):
current_rules = set(sum([list(rule_from_group_permission(p)) for p in group[rule_key]], []))
if purge and len(current_rules ^ set(desired_rules)) == 0:
return group
elif purge:
conflicts = current_rules ^ set(desired_rules)
# For cases where set comparison is equivalent, but invalid port/proto exist
for a, b in itertools.combinations(conflicts, 2):
if rule_cmp(a, b):
conflicts.discard(a)
conflicts.discard(b)
if not len(conflicts):
return group
elif current_rules.issuperset(desired_rules) and not purge:
return group
sleep(10)
group = get_security_groups_with_backoff(module.client('ec2'), GroupIds=[group_id])['SecurityGroups'][0]
module.warn("Ran out of time waiting for {0} {1}. Current: {2}, Desired: {3}".format(group_id, rule_key, current_rules, desired_rules))
return group
group = get_security_groups_with_backoff(module.client('ec2'), GroupIds=[group_id])['SecurityGroups'][0]
if 'VpcId' in group and module.params.get('rules_egress') is not None:
group = await_rules(group, desired_egress, purge_egress, 'IpPermissionsEgress')
return await_rules(group, desired_ingress, purge_ingress, 'IpPermissions')
def group_exists(client, module, vpc_id, group_id, name):
params = {'Filters': []}
if group_id:
params['GroupIds'] = [group_id]
if name:
# Add name to filters rather than params['GroupNames']
# because params['GroupNames'] only checks the default vpc if no vpc is provided
params['Filters'].append({'Name': 'group-name', 'Values': [name]})
if vpc_id:
params['Filters'].append({'Name': 'vpc-id', 'Values': [vpc_id]})
# Don't filter by description to maintain backwards compatibility
try:
security_groups = sg_exists_with_backoff(client, **params).get('SecurityGroups', [])
all_groups = get_security_groups_with_backoff(client).get('SecurityGroups', [])
except (BotoCoreError, ClientError) as e: # pylint: disable=duplicate-except
module.fail_json_aws(e, msg="Error in describe_security_groups")
if security_groups:
groups = dict((group['GroupId'], group) for group in all_groups)
groups.update(dict((group['GroupName'], group) for group in all_groups))
if vpc_id:
vpc_wins = dict((group['GroupName'], group) for group in all_groups if group.get('VpcId') and group['VpcId'] == vpc_id)
groups.update(vpc_wins)
# maintain backwards compatibility by using the last matching group
return security_groups[-1], groups
return None, {}
def verify_rules_with_descriptions_permitted(client, module, rules, rules_egress):
if not hasattr(client, "update_security_group_rule_descriptions_egress"):
all_rules = rules if rules else [] + rules_egress if rules_egress else []
if any('rule_desc' in rule for rule in all_rules):
module.fail_json(msg="Using rule descriptions requires botocore version >= 1.7.2.")
def get_diff_final_resource(client, module, security_group):
def get_account_id(security_group, module):
try:
owner_id = security_group.get('owner_id', module.client('sts').get_caller_identity()['Account'])
except (BotoCoreError, ClientError) as e:
owner_id = "Unable to determine owner_id: {0}".format(to_text(e))
return owner_id
def get_final_tags(security_group_tags, specified_tags, purge_tags):
if specified_tags is None:
return security_group_tags
tags_need_modify, tags_to_delete = compare_aws_tags(security_group_tags, specified_tags, purge_tags)
end_result_tags = dict((k, v) for k, v in specified_tags.items() if k not in tags_to_delete)
end_result_tags.update(dict((k, v) for k, v in security_group_tags.items() if k not in tags_to_delete))
end_result_tags.update(tags_need_modify)
return end_result_tags
def get_final_rules(client, module, security_group_rules, specified_rules, purge_rules):
if specified_rules is None:
return security_group_rules
if purge_rules:
final_rules = []
else:
final_rules = list(security_group_rules)
specified_rules = flatten_nested_targets(module, deepcopy(specified_rules))
for rule in specified_rules:
format_rule = {
'from_port': None, 'to_port': None, 'ip_protocol': rule.get('proto', 'tcp'),
'ip_ranges': [], 'ipv6_ranges': [], 'prefix_list_ids': [], 'user_id_group_pairs': []
}
if rule.get('proto', 'tcp') in ('all', '-1', -1):
format_rule['ip_protocol'] = '-1'
format_rule.pop('from_port')
format_rule.pop('to_port')
elif rule.get('ports'):
if rule.get('ports') and (isinstance(rule['ports'], string_types) or isinstance(rule['ports'], int)):
rule['ports'] = [rule['ports']]
for port in rule.get('ports'):
if isinstance(port, string_types) and '-' in port:
format_rule['from_port'], format_rule['to_port'] = port.split('-')
else:
format_rule['from_port'] = format_rule['to_port'] = port
elif rule.get('from_port') or rule.get('to_port'):
format_rule['from_port'] = rule.get('from_port', rule.get('to_port'))
format_rule['to_port'] = rule.get('to_port', rule.get('from_port'))
for source_type in ('cidr_ip', 'cidr_ipv6', 'prefix_list_id'):
if rule.get(source_type):
rule_key = {'cidr_ip': 'ip_ranges', 'cidr_ipv6': 'ipv6_ranges', 'prefix_list_id': 'prefix_list_ids'}.get(source_type)
if rule.get('rule_desc'):
format_rule[rule_key] = [{source_type: rule[source_type], 'description': rule['rule_desc']}]
else:
if not isinstance(rule[source_type], list):
rule[source_type] = [rule[source_type]]
format_rule[rule_key] = [{source_type: target} for target in rule[source_type]]
if rule.get('group_id') or rule.get('group_name'):
rule_sg = camel_dict_to_snake_dict(group_exists(client, module, module.params['vpc_id'], rule.get('group_id'), rule.get('group_name'))[0])
format_rule['user_id_group_pairs'] = [{
'description': rule_sg.get('description', rule_sg.get('group_desc')),
'group_id': rule_sg.get('group_id', rule.get('group_id')),
'group_name': rule_sg.get('group_name', rule.get('group_name')),
'peering_status': rule_sg.get('peering_status'),
'user_id': rule_sg.get('user_id', get_account_id(security_group, module)),
'vpc_id': rule_sg.get('vpc_id', module.params['vpc_id']),
'vpc_peering_connection_id': rule_sg.get('vpc_peering_connection_id')
}]
for k, v in list(format_rule['user_id_group_pairs'][0].items()):
if v is None:
format_rule['user_id_group_pairs'][0].pop(k)
final_rules.append(format_rule)
# Order final rules consistently
final_rules.sort(key=get_ip_permissions_sort_key)
return final_rules
security_group_ingress = security_group.get('ip_permissions', [])
specified_ingress = module.params['rules']
purge_ingress = module.params['purge_rules']
security_group_egress = security_group.get('ip_permissions_egress', [])
specified_egress = module.params['rules_egress']
purge_egress = module.params['purge_rules_egress']
return {
'description': module.params['description'],
'group_id': security_group.get('group_id', 'sg-xxxxxxxx'),
'group_name': security_group.get('group_name', module.params['name']),
'ip_permissions': get_final_rules(client, module, security_group_ingress, specified_ingress, purge_ingress),
'ip_permissions_egress': get_final_rules(client, module, security_group_egress, specified_egress, purge_egress),
'owner_id': get_account_id(security_group, module),
'tags': get_final_tags(security_group.get('tags', {}), module.params['tags'], module.params['purge_tags']),
'vpc_id': security_group.get('vpc_id', module.params['vpc_id'])}
def flatten_nested_targets(module, rules):
def _flatten(targets):
for target in targets:
if isinstance(target, list):
for t in _flatten(target):
yield t
elif isinstance(target, string_types):
yield target
if rules is not None:
for rule in rules:
target_list_type = None
if isinstance(rule.get('cidr_ip'), list):
target_list_type = 'cidr_ip'
elif isinstance(rule.get('cidr_ipv6'), list):
target_list_type = 'cidr_ipv6'
if target_list_type is not None:
rule[target_list_type] = list(_flatten(rule[target_list_type]))
return rules
def get_rule_sort_key(dicts):
if dicts.get('cidr_ip'):
return dicts.get('cidr_ip')
elif dicts.get('cidr_ipv6'):
return dicts.get('cidr_ipv6')
elif dicts.get('prefix_list_id'):
return dicts.get('prefix_list_id')
elif dicts.get('group_id'):
return dicts.get('group_id')
return None
def get_ip_permissions_sort_key(rule):
if rule.get('ip_ranges'):
rule.get('ip_ranges').sort(key=get_rule_sort_key)
return rule.get('ip_ranges')[0]['cidr_ip']
elif rule.get('ipv6_ranges'):
rule.get('ipv6_ranges').sort(key=get_rule_sort_key)
return rule.get('ipv6_ranges')[0]['cidr_ipv6']
elif rule.get('prefix_list_ids'):
rule.get('prefix_list_ids').sort(key=get_rule_sort_key)
return rule.get('prefix_list_ids')[0]['prefix_list_id']
elif rule.get('user_id_group_pairs'):
rule.get('user_id_group_pairs').sort(key=get_rule_sort_key)
return rule.get('user_id_group_pairs')[0]['group_id']
return None
def main():
argument_spec = dict(
name=dict(),
group_id=dict(),
description=dict(),
vpc_id=dict(),
rules=dict(type='list'),
rules_egress=dict(type='list'),
state=dict(default='present', type='str', choices=['present', 'absent']),
purge_rules=dict(default=True, required=False, type='bool'),
purge_rules_egress=dict(default=True, required=False, type='bool'),
tags=dict(required=False, type='dict', aliases=['resource_tags']),
purge_tags=dict(default=True, required=False, type='bool')
)
module = AnsibleAWSModule(
argument_spec=argument_spec,
supports_check_mode=True,
required_one_of=[['name', 'group_id']],
required_if=[['state', 'present', ['name']]],
)
name = module.params['name']
group_id = module.params['group_id']
description = module.params['description']
vpc_id = module.params['vpc_id']
rules = flatten_nested_targets(module, deepcopy(module.params['rules']))
rules_egress = flatten_nested_targets(module, deepcopy(module.params['rules_egress']))
rules = deduplicate_rules_args(rules_expand_sources(rules_expand_ports(rules)))
rules_egress = deduplicate_rules_args(rules_expand_sources(rules_expand_ports(rules_egress)))
state = module.params.get('state')
purge_rules = module.params['purge_rules']
purge_rules_egress = module.params['purge_rules_egress']
tags = module.params['tags']
purge_tags = module.params['purge_tags']
if state == 'present' and not description:
module.fail_json(msg='Must provide description when state is present.')
changed = False
client = module.client('ec2')
verify_rules_with_descriptions_permitted(client, module, rules, rules_egress)
group, groups = group_exists(client, module, vpc_id, group_id, name)
group_created_new = not bool(group)
global current_account_id
current_account_id = get_aws_account_id(module)
before = {}
after = {}
# Ensure requested group is absent
if state == 'absent':
if group:
# found a match, delete it
before = camel_dict_to_snake_dict(group, ignore_list=['Tags'])
before['tags'] = boto3_tag_list_to_ansible_dict(before.get('tags', []))
try:
if not module.check_mode:
client.delete_security_group(GroupId=group['GroupId'])
except (BotoCoreError, ClientError) as e:
module.fail_json_aws(e, msg="Unable to delete security group '%s'" % group)
else:
group = None
changed = True
else:
# no match found, no changes required
pass
# Ensure requested group is present
elif state == 'present':
if group:
# existing group
before = camel_dict_to_snake_dict(group, ignore_list=['Tags'])
before['tags'] = boto3_tag_list_to_ansible_dict(before.get('tags', []))
if group['Description'] != description:
module.warn("Group description does not match existing group. Descriptions cannot be changed without deleting "
"and re-creating the security group. Try using state=absent to delete, then rerunning this task.")
else:
# no match found, create it
group = create_security_group(client, module, name, description, vpc_id)
changed = True
if tags is not None and group is not None:
current_tags = boto3_tag_list_to_ansible_dict(group.get('Tags', []))
changed |= update_tags(client, module, group['GroupId'], current_tags, tags, purge_tags)
if group:
named_tuple_ingress_list = []
named_tuple_egress_list = []
current_ingress = sum([list(rule_from_group_permission(p)) for p in group['IpPermissions']], [])
current_egress = sum([list(rule_from_group_permission(p)) for p in group['IpPermissionsEgress']], [])
for new_rules, rule_type, named_tuple_rule_list in [(rules, 'in', named_tuple_ingress_list),
(rules_egress, 'out', named_tuple_egress_list)]:
if new_rules is None:
continue
for rule in new_rules:
target_type, target, target_group_created = get_target_from_rule(
module, client, rule, name, group, groups, vpc_id)
changed |= target_group_created
if rule.get('proto', 'tcp') in ('all', '-1', -1):
rule['proto'] = '-1'
rule['from_port'] = None
rule['to_port'] = None
try:
int(rule.get('proto', 'tcp'))
rule['proto'] = to_text(rule.get('proto', 'tcp'))
rule['from_port'] = None
rule['to_port'] = None
except ValueError:
# rule does not use numeric protocol spec
pass
named_tuple_rule_list.append(
Rule(
port_range=(rule['from_port'], rule['to_port']),
protocol=to_text(rule.get('proto', 'tcp')),
target=target, target_type=target_type,
description=rule.get('rule_desc'),
)
)
# List comprehensions for rules to add, rules to modify, and rule ids to determine purging
new_ingress_permissions = [to_permission(r) for r in (set(named_tuple_ingress_list) - set(current_ingress))]
new_egress_permissions = [to_permission(r) for r in (set(named_tuple_egress_list) - set(current_egress))]
if module.params.get('rules_egress') is None and 'VpcId' in group:
# when no egress rules are specified and we're in a VPC,
# we add in a default allow all out rule, which was the
# default behavior before egress rules were added
rule = Rule((None, None), '-1', '0.0.0.0/0', 'ipv4', None)
if rule in current_egress:
named_tuple_egress_list.append(rule)
if rule not in current_egress:
current_egress.append(rule)
# List comprehensions for rules to add, rules to modify, and rule ids to determine purging
present_ingress = list(set(named_tuple_ingress_list).union(set(current_ingress)))
present_egress = list(set(named_tuple_egress_list).union(set(current_egress)))
if purge_rules:
revoke_ingress = []
for p in present_ingress:
if not any([rule_cmp(p, b) for b in named_tuple_ingress_list]):
revoke_ingress.append(to_permission(p))
else:
revoke_ingress = []
if purge_rules_egress and module.params.get('rules_egress') is not None:
if module.params.get('rules_egress') is []:
revoke_egress = [
to_permission(r) for r in set(present_egress) - set(named_tuple_egress_list)
if r != Rule((None, None), '-1', '0.0.0.0/0', 'ipv4', None)
]
else:
revoke_egress = []
for p in present_egress:
if not any([rule_cmp(p, b) for b in named_tuple_egress_list]):
revoke_egress.append(to_permission(p))
else:
revoke_egress = []
# named_tuple_ingress_list and named_tuple_egress_list got updated by
# method update_rule_descriptions, deep copy these two lists to new
# variables for the record of the 'desired' ingress and egress sg permissions
desired_ingress = deepcopy(named_tuple_ingress_list)
desired_egress = deepcopy(named_tuple_egress_list)
changed |= update_rule_descriptions(module, group['GroupId'], present_ingress, named_tuple_ingress_list, present_egress, named_tuple_egress_list)
# Revoke old rules
changed |= remove_old_permissions(client, module, revoke_ingress, revoke_egress, group['GroupId'])
rule_msg = 'Revoking {0}, and egress {1}'.format(revoke_ingress, revoke_egress)
new_ingress_permissions = [to_permission(r) for r in (set(named_tuple_ingress_list) - set(current_ingress))]
new_ingress_permissions = rules_to_permissions(set(named_tuple_ingress_list) - set(current_ingress))
new_egress_permissions = rules_to_permissions(set(named_tuple_egress_list) - set(current_egress))
# Authorize new rules
changed |= add_new_permissions(client, module, new_ingress_permissions, new_egress_permissions, group['GroupId'])
if group_created_new and module.params.get('rules') is None and module.params.get('rules_egress') is None:
# A new group with no rules provided is already being awaited.
# When it is created we wait for the default egress rule to be added by AWS
security_group = get_security_groups_with_backoff(client, GroupIds=[group['GroupId']])['SecurityGroups'][0]
elif changed and not module.check_mode:
# keep pulling until current security group rules match the desired ingress and egress rules
security_group = wait_for_rule_propagation(module, group, desired_ingress, desired_egress, purge_rules, purge_rules_egress)
else:
security_group = get_security_groups_with_backoff(client, GroupIds=[group['GroupId']])['SecurityGroups'][0]
security_group = camel_dict_to_snake_dict(security_group, ignore_list=['Tags'])
security_group['tags'] = boto3_tag_list_to_ansible_dict(security_group.get('tags', []))
else:
security_group = {'group_id': None}
if module._diff:
if module.params['state'] == 'present':
after = get_diff_final_resource(client, module, security_group)
if before.get('ip_permissions'):
before['ip_permissions'].sort(key=get_ip_permissions_sort_key)
security_group['diff'] = [{'before': before, 'after': after}]
module.exit_json(changed=changed, **security_group)
if __name__ == '__main__':
main()
| gpl-3.0 |
pylover/network-interfaces | network_interfaces/stanza.py | 1 | 4473 | # -*- coding: utf-8 -*-
import re
from .helpers import clean_list, list_hash
__author__ = 'vahid'
class Stanza(object):
_type = None
_filename = None
_headers = None
def __init__(self, filename, *headers):
self._filename = filename
self._headers = list(headers)
def __repr__(self):
return ' '.join(self._headers)
def _headers_hash(self):
result = 0
for h in self._headers:
result ^= h.__hash__()
return result
def __hash__(self):
return \
self._type.__hash__() ^ \
self._headers_hash()
@classmethod
def is_stanza(cls, s):
return re.match(r'^(iface|mapping|auto|allow-|source).*', s)
@classmethod
def subclasses(cls):
return cls.__subclasses__() + [g for s in cls.__subclasses__()
for g in s.subclasses()]
@classmethod
def create(cls, header, filename):
cells = re.split('\s+', header)
cells = clean_list(cells)
stanza_type = cells[0]
subclasses = cls.subclasses()
# Checking for exact match
for subclass in subclasses:
if subclass._type and stanza_type == subclass._type:
return subclass(filename, *cells)
# Partial start match
for subclass in subclasses:
if subclass._type and stanza_type.startswith(subclass._type):
return subclass(filename, *cells)
def validate(self, allow_correction=False):
pass
class MultilineStanza(Stanza):
_items = None
def __init__(self, *args, **kwargs):
super(MultilineStanza, self).__init__(*args, **kwargs)
self._items = []
def __getattr__(self, item):
try:
return self[item]
except (KeyError, IndexError):
return super(MultilineStanza, self).__getattribute__(item)
#raise AttributeError('%s %s' % (object.__repr__(self), item))
def __setattr__(self, key, value):
if hasattr(self.__class__, key):
super(Stanza, self).__setattr__(key, value)
else:
self[key] = value
def __delattr__(self, item):
if hasattr(self.__class__, item):
super(Stanza, self).__delattr__(item)
else:
del self[item]
def __contains__(self, item):
return self.__getitem_internal(item) is not None
def __getitem__(self, item):
if not isinstance(item, str):
raise TypeError(type(item))
result = self.__getitem_internal(item)
if not result:
raise KeyError(item)
return ' '.join(result[1:])
def __setitem__(self, key, value):
if not isinstance(key, str):
raise TypeError(type(key))
values = re.split('\s', value)
cells = self.__getitem_internal(key)
if not cells:
self.add_entry(' '.join([key] + values))
else:
del cells[1:]
cells += values
def __delitem__(self, item):
if not isinstance(item, str):
raise TypeError(type(item))
self.__delitem_internal(item)
def __repr__(self):
items = [(i[0], ' '.join(i[1:]).strip()) for i in self._items]
return '%s\n%s\n' % (
super(MultilineStanza, self).__repr__(),
'\n'.join([' %s %s' % (i[0], i[1]) for i in items if i[1]]))
def __hash__(self):
return super(MultilineStanza, self).__hash__() ^ self._items_hash()
def update(self, other):
if isinstance(other, dict):
for k, v in other.items():
self[k.replace('_', '-')] = v
else:
raise ValueError('A dict is required, but %s was passed.' % type(other))
def _items_hash(self):
result = 0
for i in self._items:
result ^= list_hash(i)
return result
def add_entry(self, l):
cells = re.split('\s+', l)
cells = clean_list(cells)
if cells and cells not in self._items:
self._items.append(cells)
def __getitem_internal(self, item):
key = item.replace('_', '-')
for i in self._items:
if i[0] == key:
return i
return None
def __delitem_internal(self, item):
key = item.replace('_', '-')
for i in self._items:
if i[0] == key:
self._items.remove(i)
return
| gpl-3.0 |
Venturi/cms | env/lib/python2.7/site-packages/unidecode/x0d2.py | 253 | 4724 | data = (
'toels', # 0x00
'toelt', # 0x01
'toelp', # 0x02
'toelh', # 0x03
'toem', # 0x04
'toeb', # 0x05
'toebs', # 0x06
'toes', # 0x07
'toess', # 0x08
'toeng', # 0x09
'toej', # 0x0a
'toec', # 0x0b
'toek', # 0x0c
'toet', # 0x0d
'toep', # 0x0e
'toeh', # 0x0f
'tyo', # 0x10
'tyog', # 0x11
'tyogg', # 0x12
'tyogs', # 0x13
'tyon', # 0x14
'tyonj', # 0x15
'tyonh', # 0x16
'tyod', # 0x17
'tyol', # 0x18
'tyolg', # 0x19
'tyolm', # 0x1a
'tyolb', # 0x1b
'tyols', # 0x1c
'tyolt', # 0x1d
'tyolp', # 0x1e
'tyolh', # 0x1f
'tyom', # 0x20
'tyob', # 0x21
'tyobs', # 0x22
'tyos', # 0x23
'tyoss', # 0x24
'tyong', # 0x25
'tyoj', # 0x26
'tyoc', # 0x27
'tyok', # 0x28
'tyot', # 0x29
'tyop', # 0x2a
'tyoh', # 0x2b
'tu', # 0x2c
'tug', # 0x2d
'tugg', # 0x2e
'tugs', # 0x2f
'tun', # 0x30
'tunj', # 0x31
'tunh', # 0x32
'tud', # 0x33
'tul', # 0x34
'tulg', # 0x35
'tulm', # 0x36
'tulb', # 0x37
'tuls', # 0x38
'tult', # 0x39
'tulp', # 0x3a
'tulh', # 0x3b
'tum', # 0x3c
'tub', # 0x3d
'tubs', # 0x3e
'tus', # 0x3f
'tuss', # 0x40
'tung', # 0x41
'tuj', # 0x42
'tuc', # 0x43
'tuk', # 0x44
'tut', # 0x45
'tup', # 0x46
'tuh', # 0x47
'tweo', # 0x48
'tweog', # 0x49
'tweogg', # 0x4a
'tweogs', # 0x4b
'tweon', # 0x4c
'tweonj', # 0x4d
'tweonh', # 0x4e
'tweod', # 0x4f
'tweol', # 0x50
'tweolg', # 0x51
'tweolm', # 0x52
'tweolb', # 0x53
'tweols', # 0x54
'tweolt', # 0x55
'tweolp', # 0x56
'tweolh', # 0x57
'tweom', # 0x58
'tweob', # 0x59
'tweobs', # 0x5a
'tweos', # 0x5b
'tweoss', # 0x5c
'tweong', # 0x5d
'tweoj', # 0x5e
'tweoc', # 0x5f
'tweok', # 0x60
'tweot', # 0x61
'tweop', # 0x62
'tweoh', # 0x63
'twe', # 0x64
'tweg', # 0x65
'twegg', # 0x66
'twegs', # 0x67
'twen', # 0x68
'twenj', # 0x69
'twenh', # 0x6a
'twed', # 0x6b
'twel', # 0x6c
'twelg', # 0x6d
'twelm', # 0x6e
'twelb', # 0x6f
'twels', # 0x70
'twelt', # 0x71
'twelp', # 0x72
'twelh', # 0x73
'twem', # 0x74
'tweb', # 0x75
'twebs', # 0x76
'twes', # 0x77
'twess', # 0x78
'tweng', # 0x79
'twej', # 0x7a
'twec', # 0x7b
'twek', # 0x7c
'twet', # 0x7d
'twep', # 0x7e
'tweh', # 0x7f
'twi', # 0x80
'twig', # 0x81
'twigg', # 0x82
'twigs', # 0x83
'twin', # 0x84
'twinj', # 0x85
'twinh', # 0x86
'twid', # 0x87
'twil', # 0x88
'twilg', # 0x89
'twilm', # 0x8a
'twilb', # 0x8b
'twils', # 0x8c
'twilt', # 0x8d
'twilp', # 0x8e
'twilh', # 0x8f
'twim', # 0x90
'twib', # 0x91
'twibs', # 0x92
'twis', # 0x93
'twiss', # 0x94
'twing', # 0x95
'twij', # 0x96
'twic', # 0x97
'twik', # 0x98
'twit', # 0x99
'twip', # 0x9a
'twih', # 0x9b
'tyu', # 0x9c
'tyug', # 0x9d
'tyugg', # 0x9e
'tyugs', # 0x9f
'tyun', # 0xa0
'tyunj', # 0xa1
'tyunh', # 0xa2
'tyud', # 0xa3
'tyul', # 0xa4
'tyulg', # 0xa5
'tyulm', # 0xa6
'tyulb', # 0xa7
'tyuls', # 0xa8
'tyult', # 0xa9
'tyulp', # 0xaa
'tyulh', # 0xab
'tyum', # 0xac
'tyub', # 0xad
'tyubs', # 0xae
'tyus', # 0xaf
'tyuss', # 0xb0
'tyung', # 0xb1
'tyuj', # 0xb2
'tyuc', # 0xb3
'tyuk', # 0xb4
'tyut', # 0xb5
'tyup', # 0xb6
'tyuh', # 0xb7
'teu', # 0xb8
'teug', # 0xb9
'teugg', # 0xba
'teugs', # 0xbb
'teun', # 0xbc
'teunj', # 0xbd
'teunh', # 0xbe
'teud', # 0xbf
'teul', # 0xc0
'teulg', # 0xc1
'teulm', # 0xc2
'teulb', # 0xc3
'teuls', # 0xc4
'teult', # 0xc5
'teulp', # 0xc6
'teulh', # 0xc7
'teum', # 0xc8
'teub', # 0xc9
'teubs', # 0xca
'teus', # 0xcb
'teuss', # 0xcc
'teung', # 0xcd
'teuj', # 0xce
'teuc', # 0xcf
'teuk', # 0xd0
'teut', # 0xd1
'teup', # 0xd2
'teuh', # 0xd3
'tyi', # 0xd4
'tyig', # 0xd5
'tyigg', # 0xd6
'tyigs', # 0xd7
'tyin', # 0xd8
'tyinj', # 0xd9
'tyinh', # 0xda
'tyid', # 0xdb
'tyil', # 0xdc
'tyilg', # 0xdd
'tyilm', # 0xde
'tyilb', # 0xdf
'tyils', # 0xe0
'tyilt', # 0xe1
'tyilp', # 0xe2
'tyilh', # 0xe3
'tyim', # 0xe4
'tyib', # 0xe5
'tyibs', # 0xe6
'tyis', # 0xe7
'tyiss', # 0xe8
'tying', # 0xe9
'tyij', # 0xea
'tyic', # 0xeb
'tyik', # 0xec
'tyit', # 0xed
'tyip', # 0xee
'tyih', # 0xef
'ti', # 0xf0
'tig', # 0xf1
'tigg', # 0xf2
'tigs', # 0xf3
'tin', # 0xf4
'tinj', # 0xf5
'tinh', # 0xf6
'tid', # 0xf7
'til', # 0xf8
'tilg', # 0xf9
'tilm', # 0xfa
'tilb', # 0xfb
'tils', # 0xfc
'tilt', # 0xfd
'tilp', # 0xfe
'tilh', # 0xff
)
| gpl-2.0 |
realsaiko/odoo | addons/account_asset/account_asset.py | 32 | 28996 | # -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import time
from datetime import datetime
from dateutil.relativedelta import relativedelta
from openerp.osv import fields, osv
import openerp.addons.decimal_precision as dp
from openerp.tools.translate import _
class account_asset_category(osv.osv):
_name = 'account.asset.category'
_description = 'Asset category'
_columns = {
'name': fields.char('Name', required=True, select=1),
'note': fields.text('Note'),
'account_analytic_id': fields.many2one('account.analytic.account', 'Analytic account'),
'account_asset_id': fields.many2one('account.account', 'Asset Account', required=True, domain=[('type','=','other')]),
'account_depreciation_id': fields.many2one('account.account', 'Depreciation Account', required=True, domain=[('type','=','other')]),
'account_expense_depreciation_id': fields.many2one('account.account', 'Depr. Expense Account', required=True, domain=[('type','=','other')]),
'journal_id': fields.many2one('account.journal', 'Journal', required=True),
'company_id': fields.many2one('res.company', 'Company', required=True),
'method': fields.selection([('linear','Linear'),('degressive','Degressive')], 'Computation Method', required=True, help="Choose the method to use to compute the amount of depreciation lines.\n"\
" * Linear: Calculated on basis of: Gross Value / Number of Depreciations\n" \
" * Degressive: Calculated on basis of: Residual Value * Degressive Factor"),
'method_number': fields.integer('Number of Depreciations', help="The number of depreciations needed to depreciate your asset"),
'method_period': fields.integer('Period Length', help="State here the time between 2 depreciations, in months", required=True),
'method_progress_factor': fields.float('Degressive Factor'),
'method_time': fields.selection([('number','Number of Depreciations'),('end','Ending Date')], 'Time Method', required=True,
help="Choose the method to use to compute the dates and number of depreciation lines.\n"\
" * Number of Depreciations: Fix the number of depreciation lines and the time between 2 depreciations.\n" \
" * Ending Date: Choose the time between 2 depreciations and the date the depreciations won't go beyond."),
'method_end': fields.date('Ending date'),
'prorata':fields.boolean('Prorata Temporis', help='Indicates that the first depreciation entry for this asset have to be done from the purchase date instead of the first January'),
'open_asset': fields.boolean('Skip Draft State', help="Check this if you want to automatically confirm the assets of this category when created by invoices."),
}
_defaults = {
'company_id': lambda self, cr, uid, context: self.pool.get('res.company')._company_default_get(cr, uid, 'account.asset.category', context=context),
'method': 'linear',
'method_number': 5,
'method_time': 'number',
'method_period': 12,
'method_progress_factor': 0.3,
}
def onchange_account_asset(self, cr, uid, ids, account_asset_id, context=None):
res = {'value':{}}
if account_asset_id:
res['value'] = {'account_depreciation_id': account_asset_id}
return res
class account_asset_asset(osv.osv):
_name = 'account.asset.asset'
_description = 'Asset'
def unlink(self, cr, uid, ids, context=None):
for asset in self.browse(cr, uid, ids, context=context):
if asset.account_move_line_ids:
raise osv.except_osv(_('Error!'), _('You cannot delete an asset that contains posted depreciation lines.'))
return super(account_asset_asset, self).unlink(cr, uid, ids, context=context)
def _get_period(self, cr, uid, context=None):
periods = self.pool.get('account.period').find(cr, uid, context=context)
if periods:
return periods[0]
else:
return False
def _get_last_depreciation_date(self, cr, uid, ids, context=None):
"""
@param id: ids of a account.asset.asset objects
@return: Returns a dictionary of the effective dates of the last depreciation entry made for given asset ids. If there isn't any, return the purchase date of this asset
"""
cr.execute("""
SELECT a.id as id, COALESCE(MAX(l.date),a.purchase_date) AS date
FROM account_asset_asset a
LEFT JOIN account_move_line l ON (l.asset_id = a.id)
WHERE a.id IN %s
GROUP BY a.id, a.purchase_date """, (tuple(ids),))
return dict(cr.fetchall())
def _compute_board_amount(self, cr, uid, asset, i, residual_amount, amount_to_depr, undone_dotation_number, posted_depreciation_line_ids, total_days, depreciation_date, context=None):
#by default amount = 0
amount = 0
if i == undone_dotation_number:
amount = residual_amount
else:
if asset.method == 'linear':
amount = amount_to_depr / (undone_dotation_number - len(posted_depreciation_line_ids))
if asset.prorata:
amount = amount_to_depr / asset.method_number
days = total_days - float(depreciation_date.strftime('%j'))
if i == 1:
amount = (amount_to_depr / asset.method_number) / total_days * days
elif i == undone_dotation_number:
amount = (amount_to_depr / asset.method_number) / total_days * (total_days - days)
elif asset.method == 'degressive':
amount = residual_amount * asset.method_progress_factor
if asset.prorata:
days = total_days - float(depreciation_date.strftime('%j'))
if i == 1:
amount = (residual_amount * asset.method_progress_factor) / total_days * days
elif i == undone_dotation_number:
amount = (residual_amount * asset.method_progress_factor) / total_days * (total_days - days)
return amount
def _compute_board_undone_dotation_nb(self, cr, uid, asset, depreciation_date, total_days, context=None):
undone_dotation_number = asset.method_number
if asset.method_time == 'end':
end_date = datetime.strptime(asset.method_end, '%Y-%m-%d')
undone_dotation_number = 0
while depreciation_date <= end_date:
depreciation_date = (datetime(depreciation_date.year, depreciation_date.month, depreciation_date.day) + relativedelta(months=+asset.method_period))
undone_dotation_number += 1
if asset.prorata:
undone_dotation_number += 1
return undone_dotation_number
def compute_depreciation_board(self, cr, uid, ids, context=None):
depreciation_lin_obj = self.pool.get('account.asset.depreciation.line')
currency_obj = self.pool.get('res.currency')
for asset in self.browse(cr, uid, ids, context=context):
if asset.value_residual == 0.0:
continue
posted_depreciation_line_ids = depreciation_lin_obj.search(cr, uid, [('asset_id', '=', asset.id), ('move_check', '=', True)],order='depreciation_date desc')
old_depreciation_line_ids = depreciation_lin_obj.search(cr, uid, [('asset_id', '=', asset.id), ('move_id', '=', False)])
if old_depreciation_line_ids:
depreciation_lin_obj.unlink(cr, uid, old_depreciation_line_ids, context=context)
amount_to_depr = residual_amount = asset.value_residual
if asset.prorata:
depreciation_date = datetime.strptime(self._get_last_depreciation_date(cr, uid, [asset.id], context)[asset.id], '%Y-%m-%d')
else:
# depreciation_date = 1st January of purchase year
purchase_date = datetime.strptime(asset.purchase_date, '%Y-%m-%d')
#if we already have some previous validated entries, starting date isn't 1st January but last entry + method period
if (len(posted_depreciation_line_ids)>0):
last_depreciation_date = datetime.strptime(depreciation_lin_obj.browse(cr,uid,posted_depreciation_line_ids[0],context=context).depreciation_date, '%Y-%m-%d')
depreciation_date = (last_depreciation_date+relativedelta(months=+asset.method_period))
else:
depreciation_date = datetime(purchase_date.year, 1, 1)
day = depreciation_date.day
month = depreciation_date.month
year = depreciation_date.year
total_days = (year % 4) and 365 or 366
undone_dotation_number = self._compute_board_undone_dotation_nb(cr, uid, asset, depreciation_date, total_days, context=context)
for x in range(len(posted_depreciation_line_ids), undone_dotation_number):
i = x + 1
amount = self._compute_board_amount(cr, uid, asset, i, residual_amount, amount_to_depr, undone_dotation_number, posted_depreciation_line_ids, total_days, depreciation_date, context=context)
company_currency = asset.company_id.currency_id.id
current_currency = asset.currency_id.id
# compute amount into company currency
amount = currency_obj.compute(cr, uid, current_currency, company_currency, amount, context=context)
residual_amount -= amount
vals = {
'amount': amount,
'asset_id': asset.id,
'sequence': i,
'name': str(asset.id) +'/' + str(i),
'remaining_value': residual_amount,
'depreciated_value': (asset.purchase_value - asset.salvage_value) - (residual_amount + amount),
'depreciation_date': depreciation_date.strftime('%Y-%m-%d'),
}
depreciation_lin_obj.create(cr, uid, vals, context=context)
# Considering Depr. Period as months
depreciation_date = (datetime(year, month, day) + relativedelta(months=+asset.method_period))
day = depreciation_date.day
month = depreciation_date.month
year = depreciation_date.year
return True
def validate(self, cr, uid, ids, context=None):
if context is None:
context = {}
return self.write(cr, uid, ids, {
'state':'open'
}, context)
def set_to_close(self, cr, uid, ids, context=None):
return self.write(cr, uid, ids, {'state': 'close'}, context=context)
def set_to_draft(self, cr, uid, ids, context=None):
return self.write(cr, uid, ids, {'state': 'draft'}, context=context)
def _amount_residual(self, cr, uid, ids, name, args, context=None):
cr.execute("""SELECT
l.asset_id as id, SUM(abs(l.debit-l.credit)) AS amount
FROM
account_move_line l
WHERE
l.asset_id IN %s GROUP BY l.asset_id """, (tuple(ids),))
res=dict(cr.fetchall())
for asset in self.browse(cr, uid, ids, context):
res[asset.id] = asset.purchase_value - res.get(asset.id, 0.0) - asset.salvage_value
for id in ids:
res.setdefault(id, 0.0)
return res
def onchange_company_id(self, cr, uid, ids, company_id=False, context=None):
val = {}
if company_id:
company = self.pool.get('res.company').browse(cr, uid, company_id, context=context)
if company.currency_id.company_id and company.currency_id.company_id.id != company_id:
val['currency_id'] = False
else:
val['currency_id'] = company.currency_id.id
return {'value': val}
def onchange_purchase_salvage_value(self, cr, uid, ids, purchase_value, salvage_value, context=None):
val = {}
for asset in self.browse(cr, uid, ids, context=context):
if purchase_value:
val['value_residual'] = purchase_value - salvage_value
if salvage_value:
val['value_residual'] = purchase_value - salvage_value
return {'value': val}
def _entry_count(self, cr, uid, ids, field_name, arg, context=None):
MoveLine = self.pool('account.move.line')
return {
asset_id: MoveLine.search_count(cr, uid, [('asset_id', '=', asset_id)], context=context)
for asset_id in ids
}
_columns = {
'account_move_line_ids': fields.one2many('account.move.line', 'asset_id', 'Entries', readonly=True, states={'draft':[('readonly',False)]}),
'entry_count': fields.function(_entry_count, string='# Asset Entries', type='integer'),
'name': fields.char('Asset Name', required=True, readonly=True, states={'draft':[('readonly',False)]}),
'code': fields.char('Reference', size=32, readonly=True, states={'draft':[('readonly',False)]}),
'purchase_value': fields.float('Gross Value', required=True, readonly=True, states={'draft':[('readonly',False)]}),
'currency_id': fields.many2one('res.currency','Currency',required=True, readonly=True, states={'draft':[('readonly',False)]}),
'company_id': fields.many2one('res.company', 'Company', required=True, readonly=True, states={'draft':[('readonly',False)]}),
'note': fields.text('Note'),
'category_id': fields.many2one('account.asset.category', 'Asset Category', required=True, change_default=True, readonly=True, states={'draft':[('readonly',False)]}),
'parent_id': fields.many2one('account.asset.asset', 'Parent Asset', readonly=True, states={'draft':[('readonly',False)]}),
'child_ids': fields.one2many('account.asset.asset', 'parent_id', 'Children Assets', copy=True),
'purchase_date': fields.date('Purchase Date', required=True, readonly=True, states={'draft':[('readonly',False)]}),
'state': fields.selection([('draft','Draft'),('open','Running'),('close','Close')], 'Status', required=True, copy=False,
help="When an asset is created, the status is 'Draft'.\n" \
"If the asset is confirmed, the status goes in 'Running' and the depreciation lines can be posted in the accounting.\n" \
"You can manually close an asset when the depreciation is over. If the last line of depreciation is posted, the asset automatically goes in that status."),
'active': fields.boolean('Active'),
'partner_id': fields.many2one('res.partner', 'Partner', readonly=True, states={'draft':[('readonly',False)]}),
'method': fields.selection([('linear','Linear'),('degressive','Degressive')], 'Computation Method', required=True, readonly=True, states={'draft':[('readonly',False)]}, help="Choose the method to use to compute the amount of depreciation lines.\n"\
" * Linear: Calculated on basis of: Gross Value / Number of Depreciations\n" \
" * Degressive: Calculated on basis of: Residual Value * Degressive Factor"),
'method_number': fields.integer('Number of Depreciations', readonly=True, states={'draft':[('readonly',False)]}, help="The number of depreciations needed to depreciate your asset"),
'method_period': fields.integer('Number of Months in a Period', required=True, readonly=True, states={'draft':[('readonly',False)]}, help="The amount of time between two depreciations, in months"),
'method_end': fields.date('Ending Date', readonly=True, states={'draft':[('readonly',False)]}),
'method_progress_factor': fields.float('Degressive Factor', readonly=True, states={'draft':[('readonly',False)]}),
'value_residual': fields.function(_amount_residual, method=True, digits_compute=dp.get_precision('Account'), string='Residual Value'),
'method_time': fields.selection([('number','Number of Depreciations'),('end','Ending Date')], 'Time Method', required=True, readonly=True, states={'draft':[('readonly',False)]},
help="Choose the method to use to compute the dates and number of depreciation lines.\n"\
" * Number of Depreciations: Fix the number of depreciation lines and the time between 2 depreciations.\n" \
" * Ending Date: Choose the time between 2 depreciations and the date the depreciations won't go beyond."),
'prorata':fields.boolean('Prorata Temporis', readonly=True, states={'draft':[('readonly',False)]}, help='Indicates that the first depreciation entry for this asset have to be done from the purchase date instead of the first January'),
'history_ids': fields.one2many('account.asset.history', 'asset_id', 'History', readonly=True),
'depreciation_line_ids': fields.one2many('account.asset.depreciation.line', 'asset_id', 'Depreciation Lines', readonly=True, states={'draft':[('readonly',False)],'open':[('readonly',False)]}),
'salvage_value': fields.float('Salvage Value', digits_compute=dp.get_precision('Account'), help="It is the amount you plan to have that you cannot depreciate.", readonly=True, states={'draft':[('readonly',False)]}),
}
_defaults = {
'code': lambda obj, cr, uid, context: obj.pool.get('ir.sequence').get(cr, uid, 'account.asset.code'),
'purchase_date': lambda obj, cr, uid, context: time.strftime('%Y-%m-%d'),
'active': True,
'state': 'draft',
'method': 'linear',
'method_number': 5,
'method_time': 'number',
'method_period': 12,
'method_progress_factor': 0.3,
'currency_id': lambda self,cr,uid,c: self.pool.get('res.users').browse(cr, uid, uid, c).company_id.currency_id.id,
'company_id': lambda self, cr, uid, context: self.pool.get('res.company')._company_default_get(cr, uid, 'account.asset.asset',context=context),
}
def _check_recursion(self, cr, uid, ids, context=None, parent=None):
return super(account_asset_asset, self)._check_recursion(cr, uid, ids, context=context, parent=parent)
def _check_prorata(self, cr, uid, ids, context=None):
for asset in self.browse(cr, uid, ids, context=context):
if asset.prorata and asset.method_time != 'number':
return False
return True
_constraints = [
(_check_recursion, 'Error ! You cannot create recursive assets.', ['parent_id']),
(_check_prorata, 'Prorata temporis can be applied only for time method "number of depreciations".', ['prorata']),
]
def onchange_category_id(self, cr, uid, ids, category_id, context=None):
res = {'value':{}}
asset_categ_obj = self.pool.get('account.asset.category')
if category_id:
category_obj = asset_categ_obj.browse(cr, uid, category_id, context=context)
res['value'] = {
'method': category_obj.method,
'method_number': category_obj.method_number,
'method_time': category_obj.method_time,
'method_period': category_obj.method_period,
'method_progress_factor': category_obj.method_progress_factor,
'method_end': category_obj.method_end,
'prorata': category_obj.prorata,
}
return res
def onchange_method_time(self, cr, uid, ids, method_time='number', context=None):
res = {'value': {}}
if method_time != 'number':
res['value'] = {'prorata': False}
return res
def _compute_entries(self, cr, uid, ids, period_id, context=None):
result = []
period_obj = self.pool.get('account.period')
depreciation_obj = self.pool.get('account.asset.depreciation.line')
period = period_obj.browse(cr, uid, period_id, context=context)
depreciation_ids = depreciation_obj.search(cr, uid, [('asset_id', 'in', ids), ('depreciation_date', '<=', period.date_stop), ('depreciation_date', '>=', period.date_start), ('move_check', '=', False)], context=context)
context = dict(context or {}, depreciation_date=period.date_stop)
return depreciation_obj.create_move(cr, uid, depreciation_ids, context=context)
def create(self, cr, uid, vals, context=None):
asset_id = super(account_asset_asset, self).create(cr, uid, vals, context=context)
self.compute_depreciation_board(cr, uid, [asset_id], context=context)
return asset_id
def open_entries(self, cr, uid, ids, context=None):
context = dict(context or {}, search_default_asset_id=ids, default_asset_id=ids)
return {
'name': _('Journal Items'),
'view_type': 'form',
'view_mode': 'tree,form',
'res_model': 'account.move.line',
'view_id': False,
'type': 'ir.actions.act_window',
'context': context,
}
class account_asset_depreciation_line(osv.osv):
_name = 'account.asset.depreciation.line'
_description = 'Asset depreciation line'
def _get_move_check(self, cr, uid, ids, name, args, context=None):
res = {}
for line in self.browse(cr, uid, ids, context=context):
res[line.id] = bool(line.move_id)
return res
_columns = {
'name': fields.char('Depreciation Name', required=True, select=1),
'sequence': fields.integer('Sequence', required=True),
'asset_id': fields.many2one('account.asset.asset', 'Asset', required=True, ondelete='cascade'),
'parent_state': fields.related('asset_id', 'state', type='char', string='State of Asset'),
'amount': fields.float('Current Depreciation', digits_compute=dp.get_precision('Account'), required=True),
'remaining_value': fields.float('Next Period Depreciation', digits_compute=dp.get_precision('Account'),required=True),
'depreciated_value': fields.float('Amount Already Depreciated', required=True),
'depreciation_date': fields.date('Depreciation Date', select=1),
'move_id': fields.many2one('account.move', 'Depreciation Entry'),
'move_check': fields.function(_get_move_check, method=True, type='boolean', string='Posted', store=True)
}
def create_move(self, cr, uid, ids, context=None):
context = dict(context or {})
can_close = False
asset_obj = self.pool.get('account.asset.asset')
period_obj = self.pool.get('account.period')
move_obj = self.pool.get('account.move')
move_line_obj = self.pool.get('account.move.line')
currency_obj = self.pool.get('res.currency')
created_move_ids = []
asset_ids = []
for line in self.browse(cr, uid, ids, context=context):
depreciation_date = context.get('depreciation_date') or line.depreciation_date or time.strftime('%Y-%m-%d')
period_ids = period_obj.find(cr, uid, depreciation_date, context=context)
company_currency = line.asset_id.company_id.currency_id.id
current_currency = line.asset_id.currency_id.id
context.update({'date': depreciation_date})
amount = currency_obj.compute(cr, uid, current_currency, company_currency, line.amount, context=context)
sign = (line.asset_id.category_id.journal_id.type == 'purchase' and 1) or -1
asset_name = line.asset_id.name
reference = line.name
move_vals = {
'name': asset_name,
'date': depreciation_date,
'ref': reference,
'period_id': period_ids and period_ids[0] or False,
'journal_id': line.asset_id.category_id.journal_id.id,
}
move_id = move_obj.create(cr, uid, move_vals, context=context)
journal_id = line.asset_id.category_id.journal_id.id
partner_id = line.asset_id.partner_id.id
move_line_obj.create(cr, uid, {
'name': asset_name,
'ref': reference,
'move_id': move_id,
'account_id': line.asset_id.category_id.account_depreciation_id.id,
'debit': 0.0,
'credit': amount,
'period_id': period_ids and period_ids[0] or False,
'journal_id': journal_id,
'partner_id': partner_id,
'currency_id': company_currency != current_currency and current_currency or False,
'amount_currency': company_currency != current_currency and - sign * line.amount or 0.0,
'date': depreciation_date,
})
move_line_obj.create(cr, uid, {
'name': asset_name,
'ref': reference,
'move_id': move_id,
'account_id': line.asset_id.category_id.account_expense_depreciation_id.id,
'credit': 0.0,
'debit': amount,
'period_id': period_ids and period_ids[0] or False,
'journal_id': journal_id,
'partner_id': partner_id,
'currency_id': company_currency != current_currency and current_currency or False,
'amount_currency': company_currency != current_currency and sign * line.amount or 0.0,
'analytic_account_id': line.asset_id.category_id.account_analytic_id.id,
'date': depreciation_date,
'asset_id': line.asset_id.id
})
self.write(cr, uid, line.id, {'move_id': move_id}, context=context)
created_move_ids.append(move_id)
asset_ids.append(line.asset_id.id)
# we re-evaluate the assets to determine whether we can close them
for asset in asset_obj.browse(cr, uid, list(set(asset_ids)), context=context):
if currency_obj.is_zero(cr, uid, asset.currency_id, asset.value_residual):
asset.write({'state': 'close'})
return created_move_ids
class account_move_line(osv.osv):
_inherit = 'account.move.line'
_columns = {
'asset_id': fields.many2one('account.asset.asset', 'Asset', ondelete="restrict"),
}
class account_asset_history(osv.osv):
_name = 'account.asset.history'
_description = 'Asset history'
_columns = {
'name': fields.char('History name', select=1),
'user_id': fields.many2one('res.users', 'User', required=True),
'date': fields.date('Date', required=True),
'asset_id': fields.many2one('account.asset.asset', 'Asset', required=True),
'method_time': fields.selection([('number','Number of Depreciations'),('end','Ending Date')], 'Time Method', required=True,
help="The method to use to compute the dates and number of depreciation lines.\n"\
"Number of Depreciations: Fix the number of depreciation lines and the time between 2 depreciations.\n" \
"Ending Date: Choose the time between 2 depreciations and the date the depreciations won't go beyond."),
'method_number': fields.integer('Number of Depreciations', help="The number of depreciations needed to depreciate your asset"),
'method_period': fields.integer('Period Length', help="Time in month between two depreciations"),
'method_end': fields.date('Ending date'),
'note': fields.text('Note'),
}
_order = 'date desc'
_defaults = {
'date': lambda *args: time.strftime('%Y-%m-%d'),
'user_id': lambda self, cr, uid, ctx: uid
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
peter-scholtens/geany | tests/ctags/simple.py | 85 | 1037 | """A long string
literal
"""
from bsddb import btopen
# Public global constants
VERSION = '1.2.0'
# Flags for list() and children()
ALL = 0xff
KEY = 0x01
TREEID = 0x02
INDENT = 0x04
DATA = 0x08 # Used by dbtreedata
class one:
x = lambda x: x
y = 0
def __init__(self, filename, pathsep='', treegap=64):
"""Another string
literal"""
def __private_function__(self, key, data):
def public_function(self, key):
class this_is_ignored:
def _pack(self, i, s): '''this is''' """a""" '''string
literal'''"""
class inside_string:
"""
class so_is_this:
def _test(test, code, outcome, exception):
def ignored_function():
def more_nesting():
class deeply_nested():
def even_more():
@blah class this is seen???
@bleh def this also? good!
if __name__ == '__main__':
class two (one):
def only(arg):
# line continuation
class\
three\
(A, B,
C):
def\
foo(
x
,
y, z):
| gpl-2.0 |
CWVanderReyden/originalMyHomeNet | Lib/encodings/iso2022_jp_ext.py | 816 | 1069 | #
# iso2022_jp_ext.py: Python Unicode Codec for ISO2022_JP_EXT
#
# Written by Hye-Shik Chang <perky@FreeBSD.org>
#
import _codecs_iso2022, codecs
import _multibytecodec as mbc
codec = _codecs_iso2022.getcodec('iso2022_jp_ext')
class Codec(codecs.Codec):
encode = codec.encode
decode = codec.decode
class IncrementalEncoder(mbc.MultibyteIncrementalEncoder,
codecs.IncrementalEncoder):
codec = codec
class IncrementalDecoder(mbc.MultibyteIncrementalDecoder,
codecs.IncrementalDecoder):
codec = codec
class StreamReader(Codec, mbc.MultibyteStreamReader, codecs.StreamReader):
codec = codec
class StreamWriter(Codec, mbc.MultibyteStreamWriter, codecs.StreamWriter):
codec = codec
def getregentry():
return codecs.CodecInfo(
name='iso2022_jp_ext',
encode=Codec().encode,
decode=Codec().decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
| gpl-3.0 |
hhuang05/ardupilot | mk/VRBRAIN/Tools/genmsg/src/genmsg/deps.py | 51 | 4087 | # Software License Agreement (BSD License)
#
# Copyright (c) 2011, Willow Garage, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of Willow Garage, Inc. nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
import os
import genmsg.msg_loader
import genmsg
# pkg_name - string
# msg_file - string full path
# search_paths - dict of {'pkg':'msg_dir'}
def find_msg_dependencies_with_type(pkg_name, msg_file, search_paths):
# Read and parse the source msg file
msg_context = genmsg.msg_loader.MsgContext.create_default()
full_type_name = genmsg.gentools.compute_full_type_name(pkg_name, os.path.basename(msg_file))
spec = genmsg.msg_loader.load_msg_from_file(msg_context, msg_file, full_type_name)
try:
genmsg.msg_loader.load_depends(msg_context, spec, search_paths)
except genmsg.InvalidMsgSpec as e:
raise genmsg.MsgGenerationException("Cannot read .msg for %s: %s"%(full_type_name, str(e)))
deps = set()
for dep_type_name in msg_context.get_all_depends(full_type_name):
deps.add((dep_type_name, msg_context.get_file(dep_type_name)))
return list(deps)
def find_msg_dependencies(pkg_name, msg_file, search_paths):
deps = find_msg_dependencies_with_type(pkg_name, msg_file, search_paths)
return [d[1] for d in deps]
def find_srv_dependencies_with_type(pkg_name, msg_file, search_paths):
# Read and parse the source msg file
msg_context = genmsg.msg_loader.MsgContext.create_default()
full_type_name = genmsg.gentools.compute_full_type_name(pkg_name, os.path.basename(msg_file))
spec = genmsg.msg_loader.load_srv_from_file(msg_context, msg_file, full_type_name)
try:
genmsg.msg_loader.load_depends(msg_context, spec, search_paths)
except genmsg.InvalidMsgSpec as e:
raise genmsg.MsgGenerationException("Cannot read .msg for %s: %s"%(full_type_name, str(e)))
deps = set()
for dep_type_name in msg_context.get_all_depends(spec.request.full_name):
deps.add((dep_type_name, msg_context.get_file(dep_type_name)))
for dep_type_name in msg_context.get_all_depends(spec.response.full_name):
deps.add((dep_type_name, msg_context.get_file(dep_type_name)))
return list(deps)
def find_srv_dependencies(pkg_name, msg_file, search_paths):
deps = find_srv_dependencies_with_type(pkg_name, msg_file, search_paths)
return [d[1] for d in deps]
#paths = {'std_msgs':'/u/mkjargaard/repositories/mkjargaard/dist-sandbox/std_msgs/msg'}
#file = '/u/mkjargaard/repositories/mkjargaard/dist-sandbox/quux_msgs/msg/QuuxString.msg'
#find_msg_dependencies('quux_msgs', file, paths)
| gpl-3.0 |
regardscitoyens/twitter-parlementaires | download_twitter.py | 1 | 1413 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import sys, os, json
from twitter import Twitter, OAuth
from twitterconfig import KEY, SECRET, OAUTH_TOKEN, OAUTH_SECRET
if len(sys.argv) < 3:
sys.stderr.write("Please input both Twitter list's owner_screen_name and slug\n")
exit(1)
LIST_USER, LIST_ID = sys.argv[1:3]
if not os.path.isdir(".cache"):
os.makedirs(".cache")
t = Twitter(auth=OAuth(OAUTH_TOKEN, OAUTH_SECRET, KEY, SECRET))
accounts = {}
page = 1
args = {
"owner_screen_name": LIST_USER,
"include_entities": "false",
"skip_status": "true",
"count": 5000,
"cursor": -1
}
try:
args["list_id"] = long(LIST_ID)
except:
args["slug"] = LIST_ID
while args["cursor"]:
res = t.lists.members(**args)
with open(os.path.join('.cache', 'twitter-%s-%s.json' % (LIST_USER, args["cursor"] if args["cursor"] != -1 else 0)), 'w') as f:
json.dump(res, f)
args["cursor"] = res.get('next_cursor', res.get('next_cursor_str', 0))
new = 0
for account in res['users']:
name = account['screen_name'].lower()
if name not in accounts:
accounts[name] = account
new += 1
print("[INFO/%s] page %s -> %s results including %s new ; new total: %s" % (LIST_ID, page, len(res['users']), new, len(accounts)))
page += 1
with open(os.path.join('.cache', 'twitter-%s.json' % LIST_ID), 'w') as f:
json.dump(accounts, f)
| agpl-3.0 |
i5o/openshot-sugar | openshot/openshot/windows/TreeFiles.py | 3 | 5436 | # OpenShot Video Editor is a program that creates, modifies, and edits video files.
# Copyright (C) 2009 Jonathan Thomas
#
# This file is part of OpenShot Video Editor (http://launchpad.net/openshot/).
#
# OpenShot Video Editor is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# OpenShot Video Editor is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with OpenShot Video Editor. If not, see <http://www.gnu.org/licenses/>.
import os
import gtk, gobject, pango
from classes import project
# init the foreign language
from language import Language_Init
class OpenShotTree:
def __init__(self, treeview, project):
# Add language support
_ = Language_Init.Translator(project).lang.gettext
# init vars
self.treeview = treeview
self.project = project
# create a TreeStore
self.store = gtk.TreeStore(gtk.gdk.Pixbuf, str, str, str, str)
#set multiple selection mode on the tree
selection = treeview.get_selection()
selection.set_mode(gtk.SELECTION_MULTIPLE)
# Set the treeview's data model
self.treeview.set_model(self.store)
self.treeviewAddGeneralPixbufColumn(self.treeview, _("Thumb"), 0, resizable=False, reorderable=False, project=self.project)
self.treeviewAddGeneralTextColumn(self.treeview, _("File"), 1, resizable=True, reorderable=True, editable=False, visible=True, elipses=False, autosize=True, project=self.project)
self.treeviewAddGeneralTextColumn(self.treeview, _("Length"), 2, resizable=True, reorderable=True, editable=False, visible=True, project=self.project)
self.treeviewAddGeneralTextColumn(self.treeview, _("Label"), 3, resizable=True, reorderable=True, editable=True, visible=True, elipses=False, autosize=True, project=self.project)
self.treeviewAddGeneralTextColumn(self.treeview, "unique_id", 4, resizable=True, reorderable=True, editable=True, visible=False, elipses=True, project=self.project)
#self.row = {}
#self.row["0"] = [None, "Choose a Video or Audio File to Begin", "", "", ""]
#self.store.append(None, [self.row["0"]])
#item = self.store.append(None, [[gtk.STOCK_OPEN, _("Choose a Video or Audio File to Begin"), "", "", ""]])
item = self.store.append(None)
self.store.set_value(item, 0, None)
self.store.set_value(item, 1, _("Choose a Video or Audio File to Begin"))
self.store.set_value(item, 2, "")
self.store.set_value(item, 3, "")
self.store.set_value(item, 4, "")
# connect signals
self.treeview.connect_after('drag_begin', self.on_treeFiles_drag_begin)
def on_treeFiles_drag_begin(self, widget, *args):
context = args[0]
# update drag type
self.project.form.drag_type = "file"
# Get the drag icon
play_image = gtk.image_new_from_file(os.path.join(self.project.THEMES_DIR, self.project.theme, "icons", "plus.png"))
pixbuf = play_image.get_pixbuf()
context.set_icon_pixbuf(pixbuf, 15, 10)
def treeviewAddGeneralTextColumn(self, treeview, name, pos = 0, resizable=True, reorderable=False, editable=False, visible=True, elipses=False, autosize=False, project=None):
'''Add a new text column to the model'''
cell = gtk.CellRendererText()
cell.set_property('editable', editable)
if (elipses):
cell.set_property("ellipsize", pango.ELLIPSIZE_END)
col = gtk.TreeViewColumn(name, cell, text = pos)
if (autosize):
col.set_sizing(gtk.TREE_VIEW_COLUMN_AUTOSIZE)
col.set_expand(False)
col.set_resizable(resizable)
col.set_reorderable(reorderable)
col.set_visible(visible)
treeview.append_column(col)
treeview.set_headers_clickable(True)
if (editable):
model = treeview.get_model()
cell.connect('edited', self.cell_edited,model, project)
cell.connect('editing_started',self.cell_start_editing,model)
if (reorderable):
col.set_sort_column_id(pos)
return cell, col
def cell_start_editing(self,widget,*args):
# update flag on form (for stop capturing keys)
self.project.form.is_edit_mode = True
def treeviewAddGeneralPixbufColumn(self, treeview, name, pos = 0, resizable=True, reorderable=False, project=None):
'''Add a new gtk.gdk.Pixbuf column to the model'''
cell = gtk.CellRendererPixbuf()
col = gtk.TreeViewColumn(name, cell, pixbuf = pos)
col.set_resizable(resizable)
col.set_reorderable(reorderable)
col.set_alignment(0.0)
treeview.append_column(col)
treeview.set_headers_clickable(True)
if (reorderable):
col.set_sort_column_id(pos)
return cell, col
def cell_edited(self, cell, row, new_text, model, project=None):
# update flag on form (for stop capturing keys)
self.project.form.is_edit_mode = False
##Fired when the editable label cell is edited
#get the row that was edited
iter = model.get_iter_from_string(row)
column = cell.get_data(_("Label"))
#set the edit in the model
model.set(iter,3,new_text)
#update the file object with the label edit
unique_id = model.get_value(iter, 4)
self.project.project_folder.UpdateFileLabel(unique_id, new_text, 0)
def set_project(self, project):
self.project = project
| gpl-3.0 |
phobson/statsmodels | statsmodels/sandbox/distributions/try_max.py | 34 | 2428 | '''
adjusted from Denis on pystatsmodels mailing list
there might still be problems with loc and scale,
'''
from __future__ import division
import numpy as np
from scipy import stats
__date__ = "2010-12-29 dec"
class MaxDist(stats.rv_continuous):
""" max of n of scipy.stats normal expon ...
Example:
maxnormal10 = RVmax( scipy.stats.norm, 10 )
sample = maxnormal10( size=1000 )
sample.cdf = cdf ^ n, ppf ^ (1/n)
"""
def __init__( self, dist, n ):
self.dist = dist
self.n = n
extradoc = 'maximumdistribution is the distribution of the '\
+ 'maximum of n i.i.d. random variable'
super(MaxDist, self).__init__(name='maxdist', a=dist.a, b=dist.b,
longname = 'A maximumdistribution', extradoc = extradoc)
def _pdf(self, x, *args, **kw):
return self.n * self.dist.pdf(x, *args, **kw) \
* self.dist.cdf(x, *args, **kw )**(self.n-1)
def _cdf(self, x, *args, **kw):
return self.dist.cdf(x, *args, **kw)**self.n
def _ppf(self, q, *args, **kw):
# y = F(x) ^ n <=> x = F-1( y ^ 1/n)
return self.dist.ppf(q**(1./self.n), *args, **kw)
## def rvs( self, *args, **kw ):
## size = kw.pop( "size", 1 )
## u = np.random.uniform( size=size, **kw ) ** (1 / self.n)
## return self.dist.ppf( u, **kw )
maxdistr = MaxDist(stats.norm, 10)
print(maxdistr.rvs(size=10))
print(maxdistr.stats(moments = 'mvsk'))
'''
>>> print maxdistr.stats(moments = 'mvsk')
(array(1.5387527308351818), array(0.34434382328492852), array(0.40990510188513779), array(0.33139861783918922))
>>> rvs = np.random.randn(1000,10)
>>> stats.describe(rvs.max(1))
(1000, (-0.028558517753519492, 3.6134958002753685), 1.5560520428553426, 0.34965234046170773, 0.48504309950278557, 0.17691859056779258)
>>> rvs2 = maxdistr.rvs(size=1000)
>>> stats.describe(rvs2)
(1000, (-0.015290995091401905, 3.3227019151170931), 1.5248146840651813, 0.32827518543128631, 0.23998620901199566, -0.080555658370268013)
>>> rvs2 = maxdistr.rvs(size=10000)
>>> stats.describe(rvs2)
(10000, (-0.15855091764294812, 4.1898138060896937), 1.532862047388899, 0.34361316060467512, 0.43128838106936973, 0.41115043864619061)
>>> maxdistr.pdf(1.5)
0.69513824417156755
#integrating the pdf
>>> maxdistr.expect()
1.5387527308351729
>>> maxdistr.expect(lambda x:1)
0.99999999999999956
'''
| bsd-3-clause |
Klaudit/phantomjs | src/qt/qtwebkit/Tools/Scripts/webkitpy/tool/steps/abstractstep.py | 129 | 3437 | # Copyright (C) 2010 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import sys
from webkitpy.common.system.executive import ScriptError
from webkitpy.tool.steps.options import Options
class AbstractStep(object):
def __init__(self, tool, options):
self._tool = tool
self._options = options
def _exit(self, code):
sys.exit(code)
def _changed_files(self, state):
return self.cached_lookup(state, "changed_files")
_well_known_keys = {
# FIXME: Should this use state.get('bug_id') or state.get('patch').bug_id() like UpdateChangeLogsWithReviewer does?
"bug": lambda self, state: self._tool.bugs.fetch_bug(state["bug_id"]),
# bug_title can either be a new title given by the user, or one from an existing bug.
"bug_title": lambda self, state: self.cached_lookup(state, 'bug').title(),
"changed_files": lambda self, state: self._tool.scm().changed_files(self._options.git_commit),
"diff": lambda self, state: self._tool.scm().create_patch(self._options.git_commit, changed_files=self._changed_files(state)),
# Absolute path to ChangeLog files.
"changelogs": lambda self, state: self._tool.checkout().modified_changelogs(self._options.git_commit, changed_files=self._changed_files(state)),
}
def cached_lookup(self, state, key, promise=None):
if state.get(key):
return state[key]
if not promise:
promise = self._well_known_keys.get(key)
state[key] = promise(self, state)
return state[key]
def did_modify_checkout(self, state):
state["diff"] = None
state["changelogs"] = None
state["changed_files"] = None
@classmethod
def options(cls):
return [
# We need this option here because cached_lookup uses it. :(
Options.git_commit,
]
def run(self, state):
raise NotImplementedError, "subclasses must implement"
| bsd-3-clause |
mzdaniel/oh-mainline | vendor/packages/Django/django/core/management/commands/shell.py | 230 | 3263 | import os
from django.core.management.base import NoArgsCommand
from optparse import make_option
class Command(NoArgsCommand):
option_list = NoArgsCommand.option_list + (
make_option('--plain', action='store_true', dest='plain',
help='Tells Django to use plain Python, not IPython.'),
)
help = "Runs a Python interactive interpreter. Tries to use IPython, if it's available."
shells = ['ipython', 'bpython']
requires_model_validation = False
def ipython(self):
try:
from IPython.frontend.terminal.embed import TerminalInteractiveShell
shell = TerminalInteractiveShell()
shell.mainloop()
except ImportError:
# IPython < 0.11
# Explicitly pass an empty list as arguments, because otherwise
# IPython would use sys.argv from this script.
try:
from IPython.Shell import IPShell
shell = IPShell(argv=[])
shell.mainloop()
except ImportError:
# IPython not found at all, raise ImportError
raise
def bpython(self):
import bpython
bpython.embed()
def run_shell(self):
for shell in self.shells:
try:
return getattr(self, shell)()
except ImportError:
pass
raise ImportError
def handle_noargs(self, **options):
# XXX: (Temporary) workaround for ticket #1796: force early loading of all
# models from installed apps.
from django.db.models.loading import get_models
loaded_models = get_models()
use_plain = options.get('plain', False)
try:
if use_plain:
# Don't bother loading IPython, because the user wants plain Python.
raise ImportError
self.run_shell()
except ImportError:
import code
# Set up a dictionary to serve as the environment for the shell, so
# that tab completion works on objects that are imported at runtime.
# See ticket 5082.
imported_objects = {}
try: # Try activating rlcompleter, because it's handy.
import readline
except ImportError:
pass
else:
# We don't have to wrap the following import in a 'try', because
# we already know 'readline' was imported successfully.
import rlcompleter
readline.set_completer(rlcompleter.Completer(imported_objects).complete)
readline.parse_and_bind("tab:complete")
# We want to honor both $PYTHONSTARTUP and .pythonrc.py, so follow system
# conventions and get $PYTHONSTARTUP first then import user.
if not use_plain:
pythonrc = os.environ.get("PYTHONSTARTUP")
if pythonrc and os.path.isfile(pythonrc):
try:
execfile(pythonrc)
except NameError:
pass
# This will import .pythonrc.py as a side-effect
import user
code.interact(local=imported_objects)
| agpl-3.0 |
ELNOGAL/CMNT_00040_2016_ELN_addons | eln_sale/models/sale_shop.py | 2 | 2786 | # -*- coding: utf-8 -*-
##############################################################################
#
# Copyright (C) 2004-2012 Pexego Sistemas Informáticos All Rights Reserved
# $Marta Vázquez Rodríguez$ <marta@pexego.es>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp import models, fields
class SaleShop(models.Model):
_name = 'sale.shop'
_description = 'Sale Type'
_order = 'sequence'
name = fields.Char('Type Name', size=64, required=True)
payment_default_id = fields.Many2one(
string='Default Payment Term',
comodel_name='account.payment.term')
warehouse_id = fields.Many2one(
string='Warehouse',
comodel_name='stock.warehouse',
required=True)
pricelist_id = fields.Many2one(
string='Pricelist',
comodel_name='product.pricelist')
project_id = fields.Many2one(
string='Analytic Account',
comodel_name='account.analytic.account',
domain=[('parent_id', '!=', False)])
company_id = fields.Many2one(
string='Company',
comodel_name='res.company')
supplier_id = fields.Many2one(
string='Supplier',
comodel_name='res.partner', select=True)
order_policy = fields.Selection([
('manual', 'On Demand'),
('picking', 'On Delivery Order'),
('prepaid', 'Before Delivery'),
('no_bill', 'No bill')], string='Create Invoice',
help="On demand: A draft invoice can be created from the sales order when needed. \nOn delivery order: A draft invoice can be created from the delivery order when the products have been delivered. \nBefore delivery: A draft invoice is created from the sales order and must be paid before the products can be delivered.")
indirect_invoicing = fields.Boolean(
string='Indirect Invoicing',
default=False,
help="Check the indirect invoicing field if the shop is a shop of indirect invoicing.")
active = fields.Boolean('Active', default=True)
sequence = fields.Integer('Sequence', default=1)
| agpl-3.0 |
anitagraser/processing_pysal | ext-libs/pysal/spreg/twosls_sp_regimes.py | 4 | 33806 | '''
Spatial Two Stages Least Squares with Regimes
'''
__author__ = "Luc Anselin luc.anselin@asu.edu, Pedro V. Amaral pedro.amaral@asu.edu, David C. Folch david.folch@asu.edu"
import numpy as np
import pysal
import regimes as REGI
import user_output as USER
import summary_output as SUMMARY
import multiprocessing as mp
from twosls_regimes import TSLS_Regimes, _optimal_weight
from twosls import BaseTSLS
from utils import set_endog, set_endog_sparse, sp_att, set_warn, sphstack, spdot
from robust import hac_multi
from platform import system
class GM_Lag_Regimes(TSLS_Regimes, REGI.Regimes_Frame):
"""
Spatial two stage least squares (S2SLS) with regimes;
Anselin (1988) [1]_
Parameters
----------
y : array
nx1 array for dependent variable
x : array
Two dimensional array with n rows and one column for each
independent (exogenous) variable, excluding the constant
regimes : list
List of n values with the mapping of each
observation to a regime. Assumed to be aligned with 'x'.
yend : array
Two dimensional array with n rows and one column for each
endogenous variable
q : array
Two dimensional array with n rows and one column for each
external exogenous variable to use as instruments (note:
this should not contain any variables from x); cannot be
used in combination with h
constant_regi: ['one', 'many']
Switcher controlling the constant term setup. It may take
the following values:
* 'one': a vector of ones is appended to x and held
constant across regimes
* 'many': a vector of ones is appended to x and considered
different per regime (default)
cols2regi : list, 'all'
Argument indicating whether each
column of x should be considered as different per regime
or held constant across regimes (False).
If a list, k booleans indicating for each variable the
option (True if one per regime, False to be held constant).
If 'all' (default), all the variables vary by regime.
w : pysal W object
Spatial weights object
w_lags : integer
Orders of W to include as instruments for the spatially
lagged dependent variable. For example, w_lags=1, then
instruments are WX; if w_lags=2, then WX, WWX; and so on.
lag_q : boolean
If True, then include spatial lags of the additional
instruments (q).
regime_lag_sep: boolean
If True (default), the spatial parameter for spatial lag is also
computed according to different regimes. If False,
the spatial parameter is fixed accross regimes.
Option valid only when regime_err_sep=True
regime_err_sep: boolean
If True, a separate regression is run for each regime.
robust : string
If 'white', then a White consistent estimator of the
variance-covariance matrix is given.
If 'hac', then a HAC consistent estimator of the
variance-covariance matrix is given.
If 'ogmm', then Optimal GMM is used to estimate
betas and the variance-covariance matrix.
Default set to None.
gwk : pysal W object
Kernel spatial weights needed for HAC estimation. Note:
matrix must have ones along the main diagonal.
sig2n_k : boolean
If True, then use n-k to estimate sigma^2. If False, use n.
spat_diag : boolean
If True, then compute Anselin-Kelejian test
vm : boolean
If True, include variance-covariance matrix in summary
results
cores : boolean
Specifies if multiprocessing is to be used
Default: no multiprocessing, cores = False
Note: Multiprocessing may not work on all platforms.
name_y : string
Name of dependent variable for use in output
name_x : list of strings
Names of independent variables for use in output
name_yend : list of strings
Names of endogenous variables for use in output
name_q : list of strings
Names of instruments for use in output
name_w : string
Name of weights matrix for use in output
name_gwk : string
Name of kernel weights matrix for use in output
name_ds : string
Name of dataset for use in output
name_regimes : string
Name of regimes variable for use in output
Attributes
----------
summary : string
Summary of regression results and diagnostics (note: use in
conjunction with the print command)
betas : array
kx1 array of estimated coefficients
u : array
nx1 array of residuals
e_pred : array
nx1 array of residuals (using reduced form)
predy : array
nx1 array of predicted y values
predy_e : array
nx1 array of predicted y values (using reduced form)
n : integer
Number of observations
k : integer
Number of variables for which coefficients are estimated
(including the constant)
Only available in dictionary 'multi' when multiple regressions
(see 'multi' below for details)
kstar : integer
Number of endogenous variables.
Only available in dictionary 'multi' when multiple regressions
(see 'multi' below for details)
y : array
nx1 array for dependent variable
x : array
Two dimensional array with n rows and one column for each
independent (exogenous) variable, including the constant
Only available in dictionary 'multi' when multiple regressions
(see 'multi' below for details)
yend : array
Two dimensional array with n rows and one column for each
endogenous variable
Only available in dictionary 'multi' when multiple regressions
(see 'multi' below for details)
q : array
Two dimensional array with n rows and one column for each
external exogenous variable used as instruments
Only available in dictionary 'multi' when multiple regressions
(see 'multi' below for details)
z : array
nxk array of variables (combination of x and yend)
Only available in dictionary 'multi' when multiple regressions
(see 'multi' below for details)
h : array
nxl array of instruments (combination of x and q)
Only available in dictionary 'multi' when multiple regressions
(see 'multi' below for details)
robust : string
Adjustment for robust standard errors
Only available in dictionary 'multi' when multiple regressions
(see 'multi' below for details)
mean_y : float
Mean of dependent variable
std_y : float
Standard deviation of dependent variable
vm : array
Variance covariance matrix (kxk)
pr2 : float
Pseudo R squared (squared correlation between y and ypred)
Only available in dictionary 'multi' when multiple regressions
(see 'multi' below for details)
pr2_e : float
Pseudo R squared (squared correlation between y and ypred_e
(using reduced form))
Only available in dictionary 'multi' when multiple regressions
(see 'multi' below for details)
utu : float
Sum of squared residuals
sig2 : float
Sigma squared used in computations
Only available in dictionary 'multi' when multiple regressions
(see 'multi' below for details)
std_err : array
1xk array of standard errors of the betas
Only available in dictionary 'multi' when multiple regressions
(see 'multi' below for details)
z_stat : list of tuples
z statistic; each tuple contains the pair (statistic,
p-value), where each is a float
Only available in dictionary 'multi' when multiple regressions
(see 'multi' below for details)
ak_test : tuple
Anselin-Kelejian test; tuple contains the pair (statistic,
p-value)
Only available in dictionary 'multi' when multiple regressions
(see 'multi' below for details)
name_y : string
Name of dependent variable for use in output
name_x : list of strings
Names of independent variables for use in output
name_yend : list of strings
Names of endogenous variables for use in output
name_z : list of strings
Names of exogenous and endogenous variables for use in
output
name_q : list of strings
Names of external instruments
name_h : list of strings
Names of all instruments used in ouput
name_w : string
Name of weights matrix for use in output
name_gwk : string
Name of kernel weights matrix for use in output
name_ds : string
Name of dataset for use in output
name_regimes : string
Name of regimes variable for use in output
title : string
Name of the regression method used
Only available in dictionary 'multi' when multiple regressions
(see 'multi' below for details)
sig2n : float
Sigma squared (computed with n in the denominator)
sig2n_k : float
Sigma squared (computed with n-k in the denominator)
hth : float
H'H
Only available in dictionary 'multi' when multiple regressions
(see 'multi' below for details)
hthi : float
(H'H)^-1
Only available in dictionary 'multi' when multiple regressions
(see 'multi' below for details)
varb : array
(Z'H (H'H)^-1 H'Z)^-1
Only available in dictionary 'multi' when multiple regressions
(see 'multi' below for details)
zthhthi : array
Z'H(H'H)^-1
Only available in dictionary 'multi' when multiple regressions
(see 'multi' below for details)
pfora1a2 : array
n(zthhthi)'varb
Only available in dictionary 'multi' when multiple regressions
(see 'multi' below for details)
regimes : list
List of n values with the mapping of each
observation to a regime. Assumed to be aligned with 'x'.
constant_regi: ['one', 'many']
Ignored if regimes=False. Constant option for regimes.
Switcher controlling the constant term setup. It may take
the following values:
* 'one': a vector of ones is appended to x and held
constant across regimes
* 'many': a vector of ones is appended to x and considered
different per regime
cols2regi : list, 'all'
Ignored if regimes=False. Argument indicating whether each
column of x should be considered as different per regime
or held constant across regimes (False).
If a list, k booleans indicating for each variable the
option (True if one per regime, False to be held constant).
If 'all', all the variables vary by regime.
regime_lag_sep : boolean
If True, the spatial parameter for spatial lag is also
computed according to different regimes. If False (default),
the spatial parameter is fixed accross regimes.
regime_err_sep : boolean
If True, a separate regression is run for each regime.
kr : int
Number of variables/columns to be "regimized" or subject
to change by regime. These will result in one parameter
estimate by regime for each variable (i.e. nr parameters per
variable)
kf : int
Number of variables/columns to be considered fixed or
global across regimes and hence only obtain one parameter
estimate
nr : int
Number of different regimes in the 'regimes' list
multi : dictionary
Only available when multiple regressions are estimated,
i.e. when regime_err_sep=True and no variable is fixed
across regimes.
Contains all attributes of each individual regression
References
----------
.. [1] Anselin, L. (1988) "Spatial Econometrics: Methods and Models".
Kluwer Academic Publishers. Dordrecht.
Examples
--------
We first need to import the needed modules, namely numpy to convert the
data we read into arrays that ``spreg`` understands and ``pysal`` to
perform all the analysis.
>>> import numpy as np
>>> import pysal
Open data on NCOVR US County Homicides (3085 areas) using pysal.open().
This is the DBF associated with the NAT shapefile. Note that
pysal.open() also reads data in CSV format; since the actual class
requires data to be passed in as numpy arrays, the user can read their
data in using any method.
>>> db = pysal.open(pysal.examples.get_path("NAT.dbf"),'r')
Extract the HR90 column (homicide rates in 1990) from the DBF file and make it the
dependent variable for the regression. Note that PySAL requires this to be
an numpy array of shape (n, 1) as opposed to the also common shape of (n, )
that other packages accept.
>>> y_var = 'HR90'
>>> y = np.array([db.by_col(y_var)]).reshape(3085,1)
Extract UE90 (unemployment rate) and PS90 (population structure) vectors from
the DBF to be used as independent variables in the regression. Other variables
can be inserted by adding their names to x_var, such as x_var = ['Var1','Var2','...]
Note that PySAL requires this to be an nxj numpy array, where j is the
number of independent variables (not including a constant). By default
this model adds a vector of ones to the independent variables passed in.
>>> x_var = ['PS90','UE90']
>>> x = np.array([db.by_col(name) for name in x_var]).T
The different regimes in this data are given according to the North and
South dummy (SOUTH).
>>> r_var = 'SOUTH'
>>> regimes = db.by_col(r_var)
Since we want to run a spatial lag model, we need to specify
the spatial weights matrix that includes the spatial configuration of the
observations. To do that, we can open an already existing gal file or
create a new one. In this case, we will create one from ``NAT.shp``.
>>> w = pysal.rook_from_shapefile(pysal.examples.get_path("NAT.shp"))
Unless there is a good reason not to do it, the weights have to be
row-standardized so every row of the matrix sums to one. Among other
things, this allows to interpret the spatial lag of a variable as the
average value of the neighboring observations. In PySAL, this can be
easily performed in the following way:
>>> w.transform = 'r'
This class runs a lag model, which means that includes the spatial lag of
the dependent variable on the right-hand side of the equation. If we want
to have the names of the variables printed in the output summary, we will
have to pass them in as well, although this is optional.
>>> model=GM_Lag_Regimes(y, x, regimes, w=w, regime_lag_sep=False, regime_err_sep=False, name_y=y_var, name_x=x_var, name_regimes=r_var, name_ds='NAT', name_w='NAT.shp')
>>> model.betas
array([[ 1.28897623],
[ 0.79777722],
[ 0.56366891],
[ 8.73327838],
[ 1.30433406],
[ 0.62418643],
[-0.39993716]])
Once the model is run, we can have a summary of the output by typing:
model.summary . Alternatively, we can obtain the standard error of
the coefficient estimates by calling:
>>> model.std_err
array([ 0.44682888, 0.14358192, 0.05655124, 1.06044865, 0.20184548,
0.06118262, 0.12387232])
In the example above, all coefficients but the spatial lag vary
according to the regime. It is also possible to have the spatial lag
varying according to the regime, which effective will result in an
independent spatial lag model estimated for each regime. To run these
models, the argument regime_lag_sep must be set to True:
>>> model=GM_Lag_Regimes(y, x, regimes, w=w, regime_lag_sep=True, name_y=y_var, name_x=x_var, name_regimes=r_var, name_ds='NAT', name_w='NAT.shp')
>>> print np.hstack((np.array(model.name_z).reshape(8,1),model.betas,np.sqrt(model.vm.diagonal().reshape(8,1))))
[['0_CONSTANT' '1.36584769' '0.39854720']
['0_PS90' '0.80875730' '0.11324884']
['0_UE90' '0.56946813' '0.04625087']
['0_W_HR90' '-0.4342438' '0.13350159']
['1_CONSTANT' '7.90731073' '1.63601874']
['1_PS90' '1.27465703' '0.24709870']
['1_UE90' '0.60167693' '0.07993322']
['1_W_HR90' '-0.2960338' '0.19934459']]
Alternatively, we can type: 'model.summary' to see the organized results output.
The class is flexible enough to accomodate a spatial lag model that,
besides the spatial lag of the dependent variable, includes other
non-spatial endogenous regressors. As an example, we will add the endogenous
variable RD90 (resource deprivation) and we decide to instrument for it with
FP89 (families below poverty):
>>> yd_var = ['RD90']
>>> yd = np.array([db.by_col(name) for name in yd_var]).T
>>> q_var = ['FP89']
>>> q = np.array([db.by_col(name) for name in q_var]).T
And we can run the model again:
>>> model = GM_Lag_Regimes(y, x, regimes, yend=yd, q=q, w=w, regime_lag_sep=False, regime_err_sep=False, name_y=y_var, name_x=x_var, name_yend=yd_var, name_q=q_var, name_regimes=r_var, name_ds='NAT', name_w='NAT.shp')
>>> model.betas
array([[ 3.42195202],
[ 1.03311878],
[ 0.14308741],
[ 8.99740066],
[ 1.91877758],
[-0.32084816],
[ 2.38918212],
[ 3.67243761],
[ 0.06959139]])
Once the model is run, we can obtain the standard error of the coefficient
estimates. Alternatively, we can have a summary of the output by typing:
model.summary
>>> model.std_err
array([ 0.49163311, 0.12237382, 0.05633464, 0.72555909, 0.17250521,
0.06749131, 0.27370369, 0.25106224, 0.05804213])
"""
def __init__(self, y, x, regimes, yend=None, q=None,
w=None, w_lags=1, lag_q=True,
robust=None, gwk=None, sig2n_k=False,
spat_diag=False, constant_regi='many',
cols2regi='all', regime_lag_sep=False, regime_err_sep=True,
cores=False, vm=False, name_y=None, name_x=None,
name_yend=None, name_q=None, name_regimes=None,
name_w=None, name_gwk=None, name_ds=None):
n = USER.check_arrays(y, x)
USER.check_y(y, n)
USER.check_weights(w, y, w_required=True)
USER.check_robust(robust, gwk)
USER.check_spat_diag(spat_diag, w)
name_x = USER.set_name_x(name_x, x, constant=True)
name_y = USER.set_name_y(name_y)
name_yend = USER.set_name_yend(name_yend, yend)
name_q = USER.set_name_q(name_q, q)
name_q.extend(
USER.set_name_q_sp(name_x, w_lags, name_q, lag_q, force_all=True))
self.name_regimes = USER.set_name_ds(name_regimes)
self.constant_regi = constant_regi
self.n = n
cols2regi = REGI.check_cols2regi(
constant_regi, cols2regi, x, yend=yend, add_cons=False)
self.cols2regi = cols2regi
self.regimes_set = REGI._get_regimes_set(regimes)
self.regimes = regimes
USER.check_regimes(self.regimes_set, self.n, x.shape[1])
if regime_err_sep == True and robust == 'hac':
set_warn(
self, "Error by regimes is incompatible with HAC estimation for Spatial Lag models. Hence, error and lag by regimes have been disabled for this model.")
regime_err_sep = False
regime_lag_sep = False
self.regime_err_sep = regime_err_sep
self.regime_lag_sep = regime_lag_sep
if regime_lag_sep == True:
if not regime_err_sep:
raise Exception, "regime_err_sep must be True when regime_lag_sep=True."
cols2regi += [True]
w_i, regi_ids, warn = REGI.w_regimes(
w, regimes, self.regimes_set, transform=True, get_ids=True, min_n=len(cols2regi) + 1)
set_warn(self, warn)
else:
cols2regi += [False]
if regime_err_sep == True and set(cols2regi) == set([True]) and constant_regi == 'many':
self.y = y
self.GM_Lag_Regimes_Multi(y, x, w_i, w, regi_ids,
yend=yend, q=q, w_lags=w_lags, lag_q=lag_q, cores=cores,
robust=robust, gwk=gwk, sig2n_k=sig2n_k, cols2regi=cols2regi,
spat_diag=spat_diag, vm=vm, name_y=name_y, name_x=name_x,
name_yend=name_yend, name_q=name_q, name_regimes=self.name_regimes,
name_w=name_w, name_gwk=name_gwk, name_ds=name_ds)
else:
if regime_lag_sep == True:
w = REGI.w_regimes_union(w, w_i, self.regimes_set)
yend2, q2 = set_endog(y, x, w, yend, q, w_lags, lag_q)
name_yend.append(USER.set_name_yend_sp(name_y))
TSLS_Regimes.__init__(self, y=y, x=x, yend=yend2, q=q2,
regimes=regimes, w=w, robust=robust, gwk=gwk,
sig2n_k=sig2n_k, spat_diag=spat_diag, vm=vm,
constant_regi=constant_regi, cols2regi=cols2regi, regime_err_sep=regime_err_sep,
name_y=name_y, name_x=name_x, name_yend=name_yend, name_q=name_q,
name_regimes=name_regimes, name_w=name_w, name_gwk=name_gwk,
name_ds=name_ds, summ=False)
if regime_lag_sep:
self.sp_att_reg(w_i, regi_ids, yend2[:, -1].reshape(self.n, 1))
else:
self.rho = self.betas[-1]
self.predy_e, self.e_pred, warn = sp_att(w, self.y, self.predy,
yend2[:, -1].reshape(self.n, 1), self.rho)
set_warn(self, warn)
self.regime_lag_sep = regime_lag_sep
self.title = "SPATIAL " + self.title
SUMMARY.GM_Lag(
reg=self, w=w, vm=vm, spat_diag=spat_diag, regimes=True)
def GM_Lag_Regimes_Multi(self, y, x, w_i, w, regi_ids, cores=False,
yend=None, q=None, w_lags=1, lag_q=True,
robust=None, gwk=None, sig2n_k=False, cols2regi='all',
spat_diag=False, vm=False, name_y=None, name_x=None,
name_yend=None, name_q=None, name_regimes=None,
name_w=None, name_gwk=None, name_ds=None):
# pool = mp.Pool(cores)
self.name_ds = USER.set_name_ds(name_ds)
name_x = USER.set_name_x(name_x, x)
name_yend.append(USER.set_name_yend_sp(name_y))
self.name_w = USER.set_name_w(name_w, w_i)
self.name_gwk = USER.set_name_w(name_gwk, gwk)
results_p = {}
"""
for r in self.regimes_set:
w_r = w_i[r].sparse
if system() == 'Windows':
is_win = True
results_p[r] = _work(*(y,x,regi_ids,r,yend,q,w_r,w_lags,lag_q,robust,sig2n_k,self.name_ds,name_y,name_x,name_yend,name_q,self.name_w,name_regimes))
else:
results_p[r] = pool.apply_async(_work,args=(y,x,regi_ids,r,yend,q,w_r,w_lags,lag_q,robust,sig2n_k,self.name_ds,name_y,name_x,name_yend,name_q,self.name_w,name_regimes, ))
is_win = False
"""
for r in self.regimes_set:
w_r = w_i[r].sparse
if cores:
pool = mp.Pool(None)
results_p[r] = pool.apply_async(_work, args=(
y, x, regi_ids, r, yend, q, w_r, w_lags, lag_q, robust, sig2n_k, self.name_ds, name_y, name_x, name_yend, name_q, self.name_w, name_regimes, ))
else:
results_p[r] = _work(*(y, x, regi_ids, r, yend, q, w_r, w_lags, lag_q, robust,
sig2n_k, self.name_ds, name_y, name_x, name_yend, name_q, self.name_w, name_regimes))
self.kryd = 0
self.kr = len(cols2regi) + 1
self.kf = 0
self.nr = len(self.regimes_set)
self.name_x_r = name_x + name_yend
self.name_regimes = name_regimes
self.vm = np.zeros((self.nr * self.kr, self.nr * self.kr), float)
self.betas = np.zeros((self.nr * self.kr, 1), float)
self.u = np.zeros((self.n, 1), float)
self.predy = np.zeros((self.n, 1), float)
self.predy_e = np.zeros((self.n, 1), float)
self.e_pred = np.zeros((self.n, 1), float)
"""
if not is_win:
pool.close()
pool.join()
"""
if cores:
pool.close()
pool.join()
results = {}
self.name_y, self.name_x, self.name_yend, self.name_q, self.name_z, self.name_h = [
], [], [], [], [], []
counter = 0
for r in self.regimes_set:
"""
if is_win:
results[r] = results_p[r]
else:
results[r] = results_p[r].get()
"""
if not cores:
results[r] = results_p[r]
else:
results[r] = results_p[r].get()
results[r].predy_e, results[r].e_pred, warn = sp_att(w_i[r], results[r].y, results[
r].predy, results[r].yend[:, -1].reshape(results[r].n, 1), results[r].rho)
set_warn(results[r], warn)
results[r].w = w_i[r]
self.vm[(counter * self.kr):((counter + 1) * self.kr),
(counter * self.kr):((counter + 1) * self.kr)] = results[r].vm
self.betas[
(counter * self.kr):((counter + 1) * self.kr), ] = results[r].betas
self.u[regi_ids[r], ] = results[r].u
self.predy[regi_ids[r], ] = results[r].predy
self.predy_e[regi_ids[r], ] = results[r].predy_e
self.e_pred[regi_ids[r], ] = results[r].e_pred
self.name_y += results[r].name_y
self.name_x += results[r].name_x
self.name_yend += results[r].name_yend
self.name_q += results[r].name_q
self.name_z += results[r].name_z
self.name_h += results[r].name_h
if r == self.regimes_set[0]:
self.hac_var = np.zeros((self.n, results[r].h.shape[1]), float)
self.hac_var[regi_ids[r], ] = results[r].h
counter += 1
self.multi = results
if robust == 'hac':
hac_multi(self, gwk, constant=True)
if robust == 'ogmm':
set_warn(
self, "Residuals treated as homoskedastic for the purpose of diagnostics.")
self.chow = REGI.Chow(self)
if spat_diag:
pass
#self._get_spat_diag_props(y, x, w, yend, q, w_lags, lag_q)
SUMMARY.GM_Lag_multi(
reg=self, multireg=self.multi, vm=vm, spat_diag=spat_diag, regimes=True, w=w)
def sp_att_reg(self, w_i, regi_ids, wy):
predy_e_r, e_pred_r = {}, {}
self.predy_e = np.zeros((self.n, 1), float)
self.e_pred = np.zeros((self.n, 1), float)
counter = 1
for r in self.regimes_set:
self.rho = self.betas[(self.kr - self.kryd) * self.nr + self.kf - (
self.yend.shape[1] - self.nr * self.kryd) + self.kryd * counter - 1]
self.predy_e[regi_ids[r], ], self.e_pred[regi_ids[r], ], warn = sp_att(w_i[r],
self.y[regi_ids[r]], self.predy[
regi_ids[r]],
wy[regi_ids[r]], self.rho)
counter += 1
def _get_spat_diag_props(self, y, x, w, yend, q, w_lags, lag_q):
self._cache = {}
yend, q = set_endog(y, x, w, yend, q, w_lags, lag_q)
x = USER.check_constant(x)
x = REGI.regimeX_setup(
x, self.regimes, [True] * x.shape[1], self.regimes_set)
self.z = sphstack(x, REGI.regimeX_setup(
yend, self.regimes, [True] * (yend.shape[1] - 1) + [False], self.regimes_set))
self.h = sphstack(
x, REGI.regimeX_setup(q, self.regimes, [True] * q.shape[1], self.regimes_set))
hthi = np.linalg.inv(spdot(self.h.T, self.h))
zth = spdot(self.z.T, self.h)
self.varb = np.linalg.inv(spdot(spdot(zth, hthi), zth.T))
def _work(y, x, regi_ids, r, yend, q, w_r, w_lags, lag_q, robust, sig2n_k, name_ds, name_y, name_x, name_yend, name_q, name_w, name_regimes):
y_r = y[regi_ids[r]]
x_r = x[regi_ids[r]]
if yend != None:
yend_r = yend[regi_ids[r]]
else:
yend_r = yend
if q != None:
q_r = q[regi_ids[r]]
else:
q_r = q
yend_r, q_r = set_endog_sparse(y_r, x_r, w_r, yend_r, q_r, w_lags, lag_q)
x_constant = USER.check_constant(x_r)
if robust == 'hac' or robust == 'ogmm':
robust2 = None
else:
robust2 = robust
model = BaseTSLS(
y_r, x_constant, yend_r, q_r, robust=robust2, sig2n_k=sig2n_k)
model.title = "SPATIAL TWO STAGE LEAST SQUARES ESTIMATION - REGIME %s" % r
if robust == 'ogmm':
_optimal_weight(model, sig2n_k, warn=False)
model.rho = model.betas[-1]
model.robust = USER.set_robust(robust)
model.name_ds = name_ds
model.name_y = '%s_%s' % (str(r), name_y)
model.name_x = ['%s_%s' % (str(r), i) for i in name_x]
model.name_yend = ['%s_%s' % (str(r), i) for i in name_yend]
model.name_z = model.name_x + model.name_yend
model.name_q = ['%s_%s' % (str(r), i) for i in name_q]
model.name_h = model.name_x + model.name_q
model.name_w = name_w
model.name_regimes = name_regimes
return model
def _test():
import doctest
start_suppress = np.get_printoptions()['suppress']
np.set_printoptions(suppress=True)
doctest.testmod()
np.set_printoptions(suppress=start_suppress)
if __name__ == '__main__':
_test()
import numpy as np
import pysal
db = pysal.open(pysal.examples.get_path("columbus.dbf"), 'r')
y_var = 'CRIME'
y = np.array([db.by_col(y_var)]).reshape(49, 1)
x_var = ['INC']
x = np.array([db.by_col(name) for name in x_var]).T
yd_var = ['HOVAL']
yd = np.array([db.by_col(name) for name in yd_var]).T
q_var = ['DISCBD']
q = np.array([db.by_col(name) for name in q_var]).T
r_var = 'NSA'
regimes = db.by_col(r_var)
w = pysal.queen_from_shapefile(pysal.examples.get_path("columbus.shp"))
w.transform = 'r'
model = GM_Lag_Regimes(y, x, regimes, yend=yd, q=q, w=w, constant_regi='many', spat_diag=True, sig2n_k=False, lag_q=True, name_y=y_var,
name_x=x_var, name_yend=yd_var, name_q=q_var, name_regimes=r_var, name_ds='columbus', name_w='columbus.gal', regime_err_sep=True, robust='white')
print model.summary
| gpl-2.0 |
KellyChan/python-examples | python/aldebaran/hana/hana/motion/cartesian/motion_hulaHoop.py | 3 | 2824 | # -*- encoding: UTF-8 -*-
import sys
import motion
import almath
from naoqi import ALProxy
def StiffnessOn(proxy):
# We use the "Body" name to signify the collection of all joints
pNames = "Body"
pStiffnessLists = 1.0
pTimeLists = 1.0
proxy.stiffnessInterpolation(pNames, pStiffnessLists, pTimeLists)
def main(robotIP):
'''
Example showing a Hula Hoop Motion
with the NAO cartesian control of torso
'''
# Init proxies.
try:
motionProxy = ALProxy("ALMotion", robotIP, 9559)
except Exception, e:
print "Could not create proxy to ALMotion"
print "Error was: ", e
try:
postureProxy = ALProxy("ALRobotPosture", robotIP, 9559)
except Exception, e:
print "Could not create proxy to ALRobotPosture"
print "Error was: ", e
# Set NAO in Stiffness On
StiffnessOn(motionProxy)
# Send NAO to Pose Init
postureProxy.goToPosture("StandInit", 0.5)
# Define the changes relative to the current position
dx = 0.07 # translation axis X (meter)
dy = 0.07 # translation axis Y (meter)
dwx = 0.15 # rotation axis X (rad)
dwy = 0.15 # rotation axis Y (rad)
# Define a path of two hula hoop loops
path = [ [+dx, 0.0, 0.0, 0.0, -dwy, 0.0], # point 01 : forward / bend backward
[0.0, -dy, 0.0, -dwx, 0.0, 0.0], # point 02 : right / bend left
[-dx, 0.0, 0.0, 0.0, dwy, 0.0], # point 03 : backward / bend forward
[0.0, +dy, 0.0, dwx, 0.0, 0.0], # point 04 : left / bend right
[+dx, 0.0, 0.0, 0.0, -dwy, 0.0], # point 01 : forward / bend backward
[0.0, -dy, 0.0, -dwx, 0.0, 0.0], # point 02 : right / bend left
[-dx, 0.0, 0.0, 0.0, dwy, 0.0], # point 03 : backward / bend forward
[0.0, +dy, 0.0, dwx, 0.0, 0.0], # point 04 : left / bend right
[+dx, 0.0, 0.0, 0.0, -dwy, 0.0], # point 05 : forward / bend backward
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0] ] # point 06 : Back to init pose
timeOneMove = 0.4 #seconds
times = []
for i in range(len(path)):
times.append( (i+1)*timeOneMove )
# call the cartesian control API
effector = "Torso"
space = motion.FRAME_ROBOT
axisMask = almath.AXIS_MASK_ALL
isAbsolute = False
motionProxy.positionInterpolation(effector, space, path,
axisMask, times, isAbsolute)
if __name__ == "__main__":
robotIp = "127.0.0.1"
if len(sys.argv) <= 1:
print "Usage python motion_hulaHoop.py robotIP (optional default: 127.0.0.1)"
else:
robotIp = sys.argv[1]
main(robotIp)
| mit |
HoracioAlvarado/fwd | venv/Lib/site-packages/sqlalchemy/testing/util.py | 55 | 7544 | # testing/util.py
# Copyright (C) 2005-2016 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
from ..util import jython, pypy, defaultdict, decorator, py2k
import decimal
import gc
import time
import random
import sys
import types
if jython:
def jython_gc_collect(*args):
"""aggressive gc.collect for tests."""
gc.collect()
time.sleep(0.1)
gc.collect()
gc.collect()
return 0
# "lazy" gc, for VM's that don't GC on refcount == 0
gc_collect = lazy_gc = jython_gc_collect
elif pypy:
def pypy_gc_collect(*args):
gc.collect()
gc.collect()
gc_collect = lazy_gc = pypy_gc_collect
else:
# assume CPython - straight gc.collect, lazy_gc() is a pass
gc_collect = gc.collect
def lazy_gc():
pass
def picklers():
picklers = set()
if py2k:
try:
import cPickle
picklers.add(cPickle)
except ImportError:
pass
import pickle
picklers.add(pickle)
# yes, this thing needs this much testing
for pickle_ in picklers:
for protocol in -1, 0, 1, 2:
yield pickle_.loads, lambda d: pickle_.dumps(d, protocol)
def round_decimal(value, prec):
if isinstance(value, float):
return round(value, prec)
# can also use shift() here but that is 2.6 only
return (value * decimal.Decimal("1" + "0" * prec)
).to_integral(decimal.ROUND_FLOOR) / \
pow(10, prec)
class RandomSet(set):
def __iter__(self):
l = list(set.__iter__(self))
random.shuffle(l)
return iter(l)
def pop(self):
index = random.randint(0, len(self) - 1)
item = list(set.__iter__(self))[index]
self.remove(item)
return item
def union(self, other):
return RandomSet(set.union(self, other))
def difference(self, other):
return RandomSet(set.difference(self, other))
def intersection(self, other):
return RandomSet(set.intersection(self, other))
def copy(self):
return RandomSet(self)
def conforms_partial_ordering(tuples, sorted_elements):
"""True if the given sorting conforms to the given partial ordering."""
deps = defaultdict(set)
for parent, child in tuples:
deps[parent].add(child)
for i, node in enumerate(sorted_elements):
for n in sorted_elements[i:]:
if node in deps[n]:
return False
else:
return True
def all_partial_orderings(tuples, elements):
edges = defaultdict(set)
for parent, child in tuples:
edges[child].add(parent)
def _all_orderings(elements):
if len(elements) == 1:
yield list(elements)
else:
for elem in elements:
subset = set(elements).difference([elem])
if not subset.intersection(edges[elem]):
for sub_ordering in _all_orderings(subset):
yield [elem] + sub_ordering
return iter(_all_orderings(elements))
def function_named(fn, name):
"""Return a function with a given __name__.
Will assign to __name__ and return the original function if possible on
the Python implementation, otherwise a new function will be constructed.
This function should be phased out as much as possible
in favor of @decorator. Tests that "generate" many named tests
should be modernized.
"""
try:
fn.__name__ = name
except TypeError:
fn = types.FunctionType(fn.__code__, fn.__globals__, name,
fn.__defaults__, fn.__closure__)
return fn
def run_as_contextmanager(ctx, fn, *arg, **kw):
"""Run the given function under the given contextmanager,
simulating the behavior of 'with' to support older
Python versions.
This is not necessary anymore as we have placed 2.6
as minimum Python version, however some tests are still using
this structure.
"""
obj = ctx.__enter__()
try:
result = fn(obj, *arg, **kw)
ctx.__exit__(None, None, None)
return result
except:
exc_info = sys.exc_info()
raise_ = ctx.__exit__(*exc_info)
if raise_ is None:
raise
else:
return raise_
def rowset(results):
"""Converts the results of sql execution into a plain set of column tuples.
Useful for asserting the results of an unordered query.
"""
return set([tuple(row) for row in results])
def fail(msg):
assert False, msg
@decorator
def provide_metadata(fn, *args, **kw):
"""Provide bound MetaData for a single test, dropping afterwards."""
from . import config
from . import engines
from sqlalchemy import schema
metadata = schema.MetaData(config.db)
self = args[0]
prev_meta = getattr(self, 'metadata', None)
self.metadata = metadata
try:
return fn(*args, **kw)
finally:
engines.drop_all_tables(metadata, config.db)
self.metadata = prev_meta
def force_drop_names(*names):
"""Force the given table names to be dropped after test complete,
isolating for foreign key cycles
"""
from . import config
from sqlalchemy import inspect
@decorator
def go(fn, *args, **kw):
try:
return fn(*args, **kw)
finally:
drop_all_tables(
config.db, inspect(config.db), include_names=names)
return go
class adict(dict):
"""Dict keys available as attributes. Shadows."""
def __getattribute__(self, key):
try:
return self[key]
except KeyError:
return dict.__getattribute__(self, key)
def __call__(self, *keys):
return tuple([self[key] for key in keys])
get_all = __call__
def drop_all_tables(engine, inspector, schema=None, include_names=None):
from sqlalchemy import Column, Table, Integer, MetaData, \
ForeignKeyConstraint
from sqlalchemy.schema import DropTable, DropConstraint
if include_names is not None:
include_names = set(include_names)
with engine.connect() as conn:
for tname, fkcs in reversed(
inspector.get_sorted_table_and_fkc_names(schema=schema)):
if tname:
if include_names is not None and tname not in include_names:
continue
conn.execute(DropTable(
Table(tname, MetaData(), schema=schema)
))
elif fkcs:
if not engine.dialect.supports_alter:
continue
for tname, fkc in fkcs:
if include_names is not None and \
tname not in include_names:
continue
tb = Table(
tname, MetaData(),
Column('x', Integer),
Column('y', Integer),
schema=schema
)
conn.execute(DropConstraint(
ForeignKeyConstraint(
[tb.c.x], [tb.c.y], name=fkc)
))
def teardown_events(event_cls):
@decorator
def decorate(fn, *arg, **kw):
try:
return fn(*arg, **kw)
finally:
event_cls._clear()
return decorate
| mit |
adrset/kernel | tools/perf/scripts/python/sctop.py | 11180 | 1924 | # system call top
# (c) 2010, Tom Zanussi <tzanussi@gmail.com>
# Licensed under the terms of the GNU GPL License version 2
#
# Periodically displays system-wide system call totals, broken down by
# syscall. If a [comm] arg is specified, only syscalls called by
# [comm] are displayed. If an [interval] arg is specified, the display
# will be refreshed every [interval] seconds. The default interval is
# 3 seconds.
import os, sys, thread, time
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from perf_trace_context import *
from Core import *
from Util import *
usage = "perf script -s sctop.py [comm] [interval]\n";
for_comm = None
default_interval = 3
interval = default_interval
if len(sys.argv) > 3:
sys.exit(usage)
if len(sys.argv) > 2:
for_comm = sys.argv[1]
interval = int(sys.argv[2])
elif len(sys.argv) > 1:
try:
interval = int(sys.argv[1])
except ValueError:
for_comm = sys.argv[1]
interval = default_interval
syscalls = autodict()
def trace_begin():
thread.start_new_thread(print_syscall_totals, (interval,))
pass
def raw_syscalls__sys_enter(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
id, args):
if for_comm is not None:
if common_comm != for_comm:
return
try:
syscalls[id] += 1
except TypeError:
syscalls[id] = 1
def print_syscall_totals(interval):
while 1:
clear_term()
if for_comm is not None:
print "\nsyscall events for %s:\n\n" % (for_comm),
else:
print "\nsyscall events:\n\n",
print "%-40s %10s\n" % ("event", "count"),
print "%-40s %10s\n" % ("----------------------------------------", \
"----------"),
for id, val in sorted(syscalls.iteritems(), key = lambda(k, v): (v, k), \
reverse = True):
try:
print "%-40s %10d\n" % (syscall_name(id), val),
except TypeError:
pass
syscalls.clear()
time.sleep(interval)
| gpl-2.0 |
avrem/ardupilot | Tools/autotest/ardusub.py | 4 | 6516 | #!/usr/bin/env python
# Dive ArduSub in SITL
from __future__ import print_function
import os
from pymavlink import mavutil
from common import AutoTest
from common import NotAchievedException
# get location of scripts
testdir = os.path.dirname(os.path.realpath(__file__))
SITL_START_LOCATION = mavutil.location(33.810313, -118.393867, 0, 185)
class Joystick():
Pitch = 1
Roll = 2
Throttle = 3
Yaw = 4
Forward = 5
Lateral = 6
class AutoTestSub(AutoTest):
@staticmethod
def get_not_armable_mode_list():
return []
@staticmethod
def get_not_disarmed_settable_modes_list():
return []
@staticmethod
def get_no_position_not_settable_modes_list():
return ["AUTO", "GUIDED", "CIRCLE", "POSHOLD"]
@staticmethod
def get_position_armable_modes_list():
return []
@staticmethod
def get_normal_armable_modes_list():
return ["ACRO", "ALT_HOLD", "MANUAL", "STABILIZE", "SURFACE"]
def log_name(self):
return "ArduSub"
def test_filepath(self):
return os.path.realpath(__file__)
def default_mode(self):
return 'MANUAL'
def sitl_start_location(self):
return SITL_START_LOCATION
def default_frame(self):
return 'vectored'
def init(self):
super(AutoTestSub, self).init()
# FIXME:
self.set_parameter("FS_GCS_ENABLE", 0)
def is_sub(self):
return True
def arming_test_mission(self):
return os.path.join(testdir, "ArduSub-Missions", "test_arming.txt")
def dive_manual(self):
self.wait_ready_to_arm()
self.arm_vehicle()
self.set_rc(Joystick.Throttle, 1600)
self.set_rc(Joystick.Forward, 1600)
self.set_rc(Joystick.Lateral, 1550)
self.wait_distance(50, accuracy=7, timeout=200)
self.set_rc(Joystick.Yaw, 1550)
self.wait_heading(0)
self.set_rc(Joystick.Yaw, 1500)
self.wait_distance(50, accuracy=7, timeout=100)
self.set_rc(Joystick.Yaw, 1550)
self.wait_heading(0)
self.set_rc(Joystick.Yaw, 1500)
self.set_rc(Joystick.Forward, 1500)
self.set_rc(Joystick.Lateral, 1100)
self.wait_distance(75, accuracy=7, timeout=100)
self.set_rc_default()
self.disarm_vehicle()
self.progress("Manual dive OK")
def dive_mission(self, filename):
self.progress("Executing mission %s" % filename)
self.load_mission(filename)
self.set_rc_default()
self.arm_vehicle()
self.mavproxy.send('mode auto\n')
self.wait_mode('AUTO')
self.wait_waypoint(1, 5, max_dist=5)
self.disarm_vehicle()
self.progress("Mission OK")
def test_gripper_mission(self):
self.context_push()
ex = None
try:
try:
self.get_parameter("GRIP_ENABLE", timeout=5)
except NotAchievedException as e:
self.progress("Skipping; Gripper not enabled in config?")
return
self.load_mission("sub-gripper-mission.txt")
self.mavproxy.send('mode loiter\n')
self.wait_ready_to_arm()
self.arm_vehicle()
self.mavproxy.send('mode auto\n')
self.wait_mode('AUTO')
self.mavproxy.expect("Gripper Grabbed")
self.mavproxy.expect("Gripper Released")
except Exception as e:
self.progress("Exception caught")
ex = e
self.context_pop()
if ex is not None:
raise ex
def dive_set_position_target(self):
self.change_mode('GUIDED')
self.wait_ready_to_arm()
self.arm_vehicle()
startpos = self.mav.recv_match(type='GLOBAL_POSITION_INT',
blocking=True)
lat = 5
lon = 5
alt = 10
tstart = self.get_sim_time()
while True:
if self.get_sim_time_cached() - tstart > 200:
raise NotAchievedException("Did not move far enough")
# send a position-control command
self.mav.mav.set_position_target_global_int_send(
0, # timestamp
1, # target system_id
1, # target component id
mavutil.mavlink.MAV_FRAME_GLOBAL_RELATIVE_ALT_INT,
0b1111111111111000, # mask specifying use-only-lat-lon-alt
lat, # lat
lon, # lon
alt, # alt
0, # vx
0, # vy
0, # vz
0, # afx
0, # afy
0, # afz
0, # yaw
0, # yawrate
)
pos = self.mav.recv_match(type='GLOBAL_POSITION_INT',
blocking=True)
delta = self.get_distance_int(startpos, pos)
self.progress("delta=%f (want >10)" % delta)
if delta > 10:
break
self.change_mode('MANUAL')
self.disarm_vehicle()
def reboot_sitl(self):
"""Reboot SITL instance and wait it to reconnect."""
self.mavproxy.send("reboot\n")
self.mavproxy.expect("Init ArduSub")
# empty mav to avoid getting old timestamps:
while self.mav.recv_match(blocking=False):
pass
self.initialise_after_reboot_sitl()
def disabled_tests(self):
ret = super(AutoTestSub, self).disabled_tests()
ret.update({
"SensorConfigErrorLoop": "Sub does not instantiate AP_Stats. Also see https://github.com/ArduPilot/ardupilot/issues/10247",
})
return ret
def tests(self):
'''return list of all tests'''
ret = super(AutoTestSub, self).tests()
ret.extend([
("DiveManual", "Dive manual", self.dive_manual),
("DiveMission",
"Dive mission",
lambda: self.dive_mission("sub_mission.txt")),
("GripperMission",
"Test gripper mission items",
self.test_gripper_mission),
("SET_POSITION_TARGET_GLOBAL_INT",
"Move vehicle using SET_POSITION_TARGET_GLOBAL_INT",
self.dive_set_position_target),
("DownLoadLogs", "Download logs", lambda:
self.log_download(
self.buildlogs_path("ArduSub-log.bin"),
upload_logs=len(self.fail_list) > 0)),
])
return ret
| gpl-3.0 |
googleads/google-ads-python | google/ads/googleads/v7/services/services/webpage_view_service/transports/__init__.py | 2 | 1051 | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from collections import OrderedDict
from typing import Dict, Type
from .base import WebpageViewServiceTransport
from .grpc import WebpageViewServiceGrpcTransport
# Compile a registry of transports.
_transport_registry = (
OrderedDict()
) # type: Dict[str, Type[WebpageViewServiceTransport]]
_transport_registry["grpc"] = WebpageViewServiceGrpcTransport
__all__ = (
"WebpageViewServiceTransport",
"WebpageViewServiceGrpcTransport",
)
| apache-2.0 |
h2oai/h2o-3 | h2o-py/tests/testdir_apis/H2O_Module/pyunit_h2oset_timezone_DEPRECATED.py | 4 | 1041 | from __future__ import print_function
import sys
sys.path.insert(1,"../../../")
from tests import pyunit_utils
import h2o
import random
def h2oset_timezone():
"""
Python API test: h2o.set_timezone(value)
Deprecated, set h2o.cluster().timezone instead.
Copy from pyunit_get_set_list_timezones.py
"""
origTZ = h2o.get_timezone()
print("Original timezone: {0}".format(origTZ))
timezones = h2o.list_timezones()
# don't use the first one..it's a header for the table
print("timezones[0]:", timezones[0])
zone = timezones[random.randint(1,timezones.nrow-1),0].split(" ")[1].split(",")[0]
print("Setting the timezone: {0}".format(zone))
h2o.set_timezone(zone)
newTZ = h2o.get_timezone()
assert newTZ == zone, "Expected new timezone to be {0}, but got {01}".format(zone, newTZ)
print("Setting the timezone back to original: {0}".format(origTZ))
h2o.set_timezone(origTZ)
if __name__ == "__main__":
pyunit_utils.standalone_test(h2oset_timezone)
else:
h2oset_timezone()
| apache-2.0 |
barnacles10/ChopSuey | tools/perf/scripts/python/failed-syscalls-by-pid.py | 944 | 1869 | # failed system call counts, by pid
# (c) 2010, Tom Zanussi <tzanussi@gmail.com>
# Licensed under the terms of the GNU GPL License version 2
#
# Displays system-wide failed system call totals, broken down by pid.
# If a [comm] arg is specified, only syscalls called by [comm] are displayed.
import os
import sys
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from perf_trace_context import *
from Core import *
usage = "perf trace -s syscall-counts-by-pid.py [comm]\n";
for_comm = None
if len(sys.argv) > 2:
sys.exit(usage)
if len(sys.argv) > 1:
for_comm = sys.argv[1]
syscalls = autodict()
def trace_begin():
pass
def trace_end():
print_error_totals()
def raw_syscalls__sys_exit(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
id, ret):
if for_comm is not None:
if common_comm != for_comm:
return
if ret < 0:
try:
syscalls[common_comm][common_pid][id][ret] += 1
except TypeError:
syscalls[common_comm][common_pid][id][ret] = 1
def print_error_totals():
if for_comm is not None:
print "\nsyscall errors for %s:\n\n" % (for_comm),
else:
print "\nsyscall errors:\n\n",
print "%-30s %10s\n" % ("comm [pid]", "count"),
print "%-30s %10s\n" % ("------------------------------", \
"----------"),
comm_keys = syscalls.keys()
for comm in comm_keys:
pid_keys = syscalls[comm].keys()
for pid in pid_keys:
print "\n%s [%d]\n" % (comm, pid),
id_keys = syscalls[comm][pid].keys()
for id in id_keys:
print " syscall: %-16d\n" % (id),
ret_keys = syscalls[comm][pid][id].keys()
for ret, val in sorted(syscalls[comm][pid][id].iteritems(), key = lambda(k, v): (v, k), reverse = True):
print " err = %-20d %10d\n" % (ret, val),
| gpl-2.0 |
colin2328/asciiclass | labs/lab3/worldcup_wrangler.py | 1 | 14912 | from wrangler import dw
import sys
if(len(sys.argv) < 3):
sys.exit('Error: Please include an input and output file. Example python script.py input.csv output.csv')
w = dw.DataWrangler()
# Split data repeatedly on newline into rows
w.add(dw.Split(column=["data"],
table=0,
status="active",
drop=True,
result="row",
update=False,
insert_position="right",
row=None,
on="\n",
before=None,
after=None,
ignore_between=None,
which=1,
max=0,
positions=None,
quote_character=None))
# Cut on '"'
w.add(dw.Cut(column=[],
table=0,
status="active",
drop=False,
result="column",
update=True,
insert_position="right",
row=None,
on="\"",
before=None,
after=None,
ignore_between=None,
which=1,
max=0,
positions=None))
# Cut from data between ']]' and ', '
w.add(dw.Cut(column=["data"],
table=0,
status="active",
drop=False,
result="column",
update=True,
insert_position="right",
row=None,
on=".*",
before=", ",
after="]]",
ignore_between=None,
which=1,
max=1,
positions=None))
# Cut from data before '{'
w.add(dw.Cut(column=["data"],
table=0,
status="active",
drop=False,
result="column",
update=True,
insert_position="right",
row=None,
on=".*",
before="{",
after=None,
ignore_between=None,
which=1,
max=1,
positions=None))
# Cut from data before '(\[\['
w.add(dw.Cut(column=["data"],
table=0,
status="active",
drop=False,
result="column",
update=True,
insert_position="right",
row=None,
on=".*",
before="\\(\\[\\[",
after=None,
ignore_between=None,
which=1,
max=1,
positions=None))
# Cut from data on '[\[ any number FIFA any word any word \|'
w.add(dw.Cut(column=["data"],
table=0,
status="active",
drop=False,
result="column",
update=True,
insert_position="right",
row=None,
on="\\[\\[[0-9]+ FIFA [a-zA-Z]+ [a-zA-Z]+\\|",
before=None,
after=None,
ignore_between=None,
which=1,
max="0",
positions=None))
# Cut from data on ']]'
w.add(dw.Cut(column=["data"],
table=0,
status="active",
drop=False,
result="column",
update=True,
insert_position="right",
row=None,
on="]]",
before=None,
after=None,
ignore_between=None,
which=1,
max="0",
positions=None))
# Cut from data between '1990' and ')'
w.add(dw.Cut(column=["data"],
table=0,
status="active",
drop=False,
result="column",
update=True,
insert_position="right",
row=None,
on=".*",
before="\\)",
after="1990",
ignore_between=None,
which=1,
max=1,
positions=None))
# Cut from data after '}}'
w.add(dw.Cut(column=["data"],
table=0,
status="active",
drop=False,
result="column",
update=True,
insert_position="right",
row=None,
on=".*",
before=None,
after="}}",
ignore_between=None,
which=1,
max=1,
positions=None))
# Cut from data between '1974' and ','
w.add(dw.Cut(column=["data"],
table=0,
status="active",
drop=False,
result="column",
update=True,
insert_position="right",
row=None,
on=".*",
before=",",
after="1974",
ignore_between=None,
which=1,
max=1,
positions=None))
# Cut from data on '{{ any word |'
w.add(dw.Cut(column=["data"],
table=0,
status="active",
drop=False,
result="column",
update=True,
insert_position="right",
row=None,
on="{{[a-zA-Z]+\\|",
before=None,
after=None,
ignore_between=None,
which=1,
max=1,
positions=None))
# Cut from data on '}}'
w.add(dw.Cut(column=["data"],
table=0,
status="active",
drop=False,
result="column",
update=True,
insert_position="right",
row=None,
on="}}",
before=None,
after=None,
ignore_between=None,
which=1,
max=1,
positions=None))
# Delete rows where data = '|-'
w.add(dw.Filter(column=[],
table=0,
status="active",
drop=False,
row=dw.Row(column=[],
table=0,
status="active",
drop=False,
conditions=[dw.Eq(column=[],
table=0,
status="active",
drop=False,
lcol="data",
value="|-",
op_str="=")])))
# Cut from data on '| any number '
w.add(dw.Cut(column=["data"],
table=0,
status="active",
drop=False,
result="column",
update=True,
insert_position="right",
row=None,
on="\\|\\d+",
before=None,
after=None,
ignore_between=None,
which=1,
max=1,
positions=None))
# Delete rows where data = '|align=center| —'
w.add(dw.Filter(column=[],
table=0,
status="active",
drop=False,
row=dw.Row(column=[],
table=0,
status="active",
drop=False,
conditions=[dw.Eq(column=[],
table=0,
status="active",
drop=False,
lcol="data",
value="|align=center| —",
op_str="=")])))
# Cut from data on '('
w.add(dw.Cut(column=["data"],
table=0,
status="active",
drop=False,
result="column",
update=True,
insert_position="right",
row=None,
on="\\(",
before=None,
after=None,
ignore_between=None,
which=1,
max=1,
positions=None))
# Cut from data on ')'
w.add(dw.Cut(column=["data"],
table=0,
status="active",
drop=False,
result="column",
update=True,
insert_position="right",
row=None,
on="\\)",
before=None,
after=None,
ignore_between=None,
which=1,
max=1,
positions=None))
# Delete row 94
w.add(dw.Filter(column=[],
table=0,
status="active",
drop=False,
row=dw.Row(column=[],
table=0,
status="active",
drop=False,
conditions=[dw.RowIndex(column=[],
table=0,
status="active",
drop=False,
indices=[93])])))
# Delete row 94
w.add(dw.Filter(column=[],
table=0,
status="active",
drop=False,
row=dw.Row(column=[],
table=0,
status="active",
drop=False,
conditions=[dw.RowIndex(column=[],
table=0,
status="active",
drop=False,
indices=[93])])))
# Delete row 94
w.add(dw.Filter(column=[],
table=0,
status="active",
drop=False,
row=dw.Row(column=[],
table=0,
status="active",
drop=False,
conditions=[dw.RowIndex(column=[],
table=0,
status="active",
drop=False,
indices=[93])])))
# Cut from data on '[\[#1\|\*'
w.add(dw.Cut(column=["data"],
table=0,
status="active",
drop=False,
result="column",
update=True,
insert_position="right",
row=None,
on="\\[\\[#1\\|\\*",
before=None,
after=None,
ignore_between=None,
which=1,
max=1,
positions=None))
# Cut from data on '! Team !! Titles !! Runners-up !! Third place !! Fourth place !! Top 4 <br/> finishes'
w.add(dw.Cut(column=["data"],
table=0,
status="active",
drop=False,
result="column",
update=True,
insert_position="right",
row=None,
on="! Team !! Titles !! Runners-up !! Third place !! Fourth place !! Top 4 <br/> finishes",
before=None,
after=None,
ignore_between=None,
which=1,
max="0",
positions=None))
# Wrap empty rows
w.add(dw.Wrap(column=[],
table=0,
status="active",
drop=False,
row=dw.Row(column=[],
table=0,
status="active",
drop=False,
conditions=[dw.Empty(column=[],
table=0,
status="active",
drop=False,
percent_valid=0,
num_valid=0)])))
# Fold wrap, wrap1, wrap2, wrap3... using header as a key
w.add(dw.Fold(column=["wrap","wrap1","wrap2","wrap3","wrap4","wrap5"],
table=0,
status="active",
drop=False,
keys=[-1]))
# Translate value up
w.add(dw.Translate(column=["value"],
table=0,
status="active",
drop=False,
direction="up",
values=1))
# Drop value
w.add(dw.Drop(column=["value"],
table=0,
status="active",
drop=True))
# Extract from fold between positions 4, 5
w.add(dw.Extract(column=["fold"],
table=0,
status="active",
drop=False,
result="column",
update=False,
insert_position="right",
row=None,
on=None,
before=None,
after=None,
ignore_between=None,
which=1,
max=1,
positions=[4,5]))
# Drop fold
w.add(dw.Drop(column=["fold"],
table=0,
status="active",
drop=True))
# Extract from translate on ' any word '
w.add(dw.Extract(column=["translate"],
table=0,
status="active",
drop=False,
result="column",
update=False,
insert_position="right",
row=None,
on="[a-zA-Z]+",
before=None,
after=None,
ignore_between=None,
which=1,
max=1,
positions=None))
# Fill extract1 with values from above
w.add(dw.Fill(column=["extract1"],
table=0,
status="active",
drop=False,
direction="down",
method="copy",
row=None))
# Delete rows where extract is null
w.add(dw.Filter(column=[],
table=0,
status="active",
drop=False,
row=dw.Row(column=[],
table=0,
status="active",
drop=False,
conditions=[dw.IsNull(column=[],
table=0,
status="active",
drop=False,
lcol="extract",
value=None,
op_str="is null")])))
# Delete rows where extract = '5'
w.add(dw.Filter(column=[],
table=0,
status="active",
drop=False,
row=dw.Row(column=[],
table=0,
status="active",
drop=False,
conditions=[dw.Eq(column=[],
table=0,
status="active",
drop=False,
lcol="extract",
value="5",
op_str="=")])))
# Split translate repeatedly on ',' into rows
w.add(dw.Split(column=["translate"],
table=0,
status="active",
drop=True,
result="row",
update=False,
insert_position="right",
row=None,
on=",",
before=None,
after=None,
ignore_between=None,
which=1,
max="0",
positions=None,
quote_character=None))
# Cut from translate on '*'
w.add(dw.Cut(column=["translate"],
table=0,
status="active",
drop=False,
result="column",
update=True,
insert_position="right",
row=None,
on="\\*",
before=None,
after=None,
ignore_between=None,
which=1,
max=1,
positions=None))
# Delete row 77
w.add(dw.Filter(column=[],
table=0,
status="active",
drop=False,
row=dw.Row(column=[],
table=0,
status="active",
drop=False,
conditions=[dw.RowIndex(column=[],
table=0,
status="active",
drop=False,
indices=[76])])))
w.apply_to_file(sys.argv[1]).print_csv(sys.argv[2]) | mit |
txemi/ansible | lib/ansible/modules/network/nxos/nxos_smu.py | 21 | 6168 | #!/usr/bin/python
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: nxos_smu
extends_documentation_fragment: nxos
version_added: "2.2"
short_description: Perform SMUs on Cisco NX-OS devices.
description:
- Perform software maintenance upgrades (SMUs) on Cisco NX-OS devices.
author: Gabriele Gerbino (@GGabriele)
notes:
- The module can only activate and commit a package,
not remove or deactivate it.
- Use C(transport=nxapi) to avoid connection timeout
options:
pkg:
description:
- Name of the remote package.
required: true
file_system:
description:
- The remote file system of the device. If omitted,
devices that support a file_system parameter will use
their default values.
required: false
default: null
'''
EXAMPLES = '''
- nxos_smu:
pkg: "nxos.CSCuz65185-n9k_EOR-1.0.0-7.0.3.I2.2d.lib32_n9000.rpm"
username: "{{ un }}"
password: "{{ pwd }}"
host: "{{ inventory_hostname }}"
'''
RETURN = '''
file_system:
description: The remote file system of the device.
returned: always
type: string
sample: "bootflash:"
pkg:
description: Name of the remote package
type: string
returned: always
sample: "nxos.CSCuz65185-n9k_EOR-1.0.0-7.0.3.I2.2d.lib32_n9000.rpm"
updates:
description: commands sent to the device
returned: always
type: list
sample: ["install add bootflash:nxos.CSCuz65185-n9k_EOR-1.0.0-7.0.3.I2.2d.lib32_n9000.rpm",
"install activate bootflash:nxos.CSCuz65185-n9k_EOR-1.0.0-7.0.3.I2.2d.lib32_n9000.rpm force",
"install commit bootflash:nxos.CSCuz65185-n9k_EOR-1.0.0-7.0.3.I2.2d.lib32_n9000.rpm"]
changed:
description: check to see if a change was made on the device
returned: always
type: boolean
sample: true
'''
from ansible.module_utils.nxos import get_config, load_config, run_commands
from ansible.module_utils.nxos import nxos_argument_spec, check_args
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.netcfg import CustomNetworkConfig
import time
import collections
import re
import re
def execute_show_command(command, module, command_type='cli_show'):
if module.params['transport'] == 'cli':
cmds = [command]
body = run_commands(module, cmds)
elif module.params['transport'] == 'nxapi':
cmds = [command]
body = run_commands(module, cmds)
return body
def remote_file_exists(module, dst, file_system='bootflash:'):
command = 'dir {0}/{1}'.format(file_system, dst)
body = execute_show_command(command, module, command_type='cli_show_ascii')
if 'No such file' in body[0]:
return False
return True
def apply_patch(module, commands):
for command in commands:
load_config(module, [command])
time.sleep(5)
if 'failed' in response:
module.fail_json(msg="Operation failed!", response=response)
def get_commands(module, pkg, file_system):
commands = []
splitted_pkg = pkg.split('.')
fixed_pkg = '.'.join(splitted_pkg[0:-1])
command = 'show install inactive'
inactive_body = execute_show_command(command, module,
command_type='cli_show_ascii')
command = 'show install active'
active_body = execute_show_command(command, module,
command_type='cli_show_ascii')
if fixed_pkg not in inactive_body[0] and fixed_pkg not in active_body[0]:
commands.append('install add {0}{1}'.format(file_system, pkg))
if fixed_pkg not in active_body[0]:
commands.append('install activate {0}{1} force'.format(
file_system, pkg))
command = 'show install committed'
install_body = execute_show_command(command, module,
command_type='cli_show_ascii')
if fixed_pkg not in install_body[0]:
commands.append('install commit {0}{1}'.format(file_system, pkg))
return commands
def main():
argument_spec = dict(
pkg=dict(required=True),
file_system=dict(required=False, default='bootflash:'),
include_defaults=dict(default=False),
config=dict(),
save=dict(type='bool', default=False)
)
argument_spec.update(nxos_argument_spec)
module = AnsibleModule(argument_spec=argument_spec,
supports_check_mode=True)
warnings = list()
check_args(module, warnings)
pkg = module.params['pkg']
file_system = module.params['file_system']
changed = False
remote_exists = remote_file_exists(module, pkg, file_system=file_system)
if not remote_exists:
module.fail_json(msg="The requested package doesn't exist "
"on the device")
commands = get_commands(module, pkg, file_system)
if not module.check_mode and commands:
try:
apply_patch(module, commands)
changed = True
except ShellError:
e = get_exception()
module.fail_json(msg=str(e))
if 'configure' in commands:
commands.pop(0)
module.exit_json(changed=changed,
pkg=pkg,
file_system=file_system,
updates=commands)
if __name__ == '__main__':
main()
| gpl-3.0 |
jordiclariana/ansible | lib/ansible/modules/notification/rocketchat.py | 19 | 8608 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2016, Deepak Kothandan <deepak.kothandan@outlook.com>
# (c) 2015, Stefan Berggren <nsg@nsg.cc>
# (c) 2014, Ramon de la Fuente <ramon@delafuente.nl>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'version': '1.0'}
DOCUMENTATION = """
module: rocketchat
short_description: Send notifications to Rocket Chat
description:
- The M(rocketchat) module sends notifications to Rocket Chat via the Incoming WebHook integration
version_added: "2.2"
author: "Ramon de la Fuente (@ramondelafuente)"
options:
domain:
description:
- The domain for your environment without protocol. (i.e.
C(example.com) or C(chat.example.com))
required: true
token:
description:
- Rocket Chat Incoming Webhook integration token. This provides
authentication to Rocket Chat's Incoming webhook for posting
messages.
required: true
protocol:
description:
- Specify the protocol used to send notification messages before the webhook url. (i.e. http or https)
required: false
default: https
choices:
- 'http'
- 'https'
msg:
description:
- Message to be sent.
required: false
default: None
channel:
description:
- Channel to send the message to. If absent, the message goes to the channel selected for the I(token)
specifed during the creation of webhook.
required: false
default: None
username:
description:
- This is the sender of the message.
required: false
default: "Ansible"
icon_url:
description:
- URL for the message sender's icon.
required: false
default: "https://www.ansible.com/favicon.ico"
icon_emoji:
description:
- Emoji for the message sender. The representation for the available emojis can be
got from Rocket Chat. (for example :thumbsup:) (if I(icon_emoji) is set, I(icon_url) will not be used)
required: false
default: None
link_names:
description:
- Automatically create links for channels and usernames in I(msg).
required: false
default: 1
choices:
- 1
- 0
validate_certs:
description:
- If C(no), SSL certificates will not be validated. This should only be used
on personally controlled sites using self-signed certificates.
required: false
default: 'yes'
choices:
- 'yes'
- 'no'
color:
description:
- Allow text to use default colors - use the default of 'normal' to not send a custom color bar at the start of the message
required: false
default: 'normal'
choices:
- 'normal'
- 'good'
- 'warning'
- 'danger'
attachments:
description:
- Define a list of attachments.
required: false
default: None
"""
EXAMPLES = """
- name: Send notification message via Rocket Chat
rocketchat:
token: thetoken/generatedby/rocketchat
domain: chat.example.com
msg: '{{ inventory_hostname }} completed'
delegate_to: localhost
- name: Send notification message via Rocket Chat all options
rocketchat:
domain: chat.example.com
token: thetoken/generatedby/rocketchat
msg: '{{ inventory_hostname }} completed'
channel: #ansible
username: 'Ansible on {{ inventory_hostname }}'
icon_url: http://www.example.com/some-image-file.png
link_names: 0
delegate_to: localhost
- name: insert a color bar in front of the message for visibility purposes and use the default webhook icon and name configured in rocketchat
rocketchat:
token: thetoken/generatedby/rocketchat
domain: chat.example.com
msg: '{{ inventory_hostname }} is alive!'
color: good
username: ''
icon_url: ''
delegate_to: localhost
- name: Use the attachments API
rocketchat:
token: thetoken/generatedby/rocketchat
domain: chat.example.com
attachments:
- text: Display my system load on host A and B
color: #ff00dd
title: System load
fields:
- title: System A
value: 'load average: 0,74, 0,66, 0,63'
short: True
- title: System B
value: 'load average: 5,16, 4,64, 2,43'
short: True
delegate_to: localhost
"""
RETURN = """
changed:
description: A flag indicating if any change was made or not.
returned: success
type: boolean
sample: false
"""
ROCKETCHAT_INCOMING_WEBHOOK = '%s://%s/hooks/%s'
def build_payload_for_rocketchat(module, text, channel, username, icon_url, icon_emoji, link_names, color, attachments):
payload = {}
if color == "normal" and text is not None:
payload = dict(text=text)
elif text is not None:
payload = dict(attachments=[dict(text=text, color=color)])
if channel is not None:
if (channel[0] == '#') or (channel[0] == '@'):
payload['channel'] = channel
else:
payload['channel'] = '#' + channel
if username is not None:
payload['username'] = username
if icon_emoji is not None:
payload['icon_emoji'] = icon_emoji
else:
payload['icon_url'] = icon_url
if link_names is not None:
payload['link_names'] = link_names
if attachments is not None:
if 'attachments' not in payload:
payload['attachments'] = []
if attachments is not None:
for attachment in attachments:
if 'fallback' not in attachment:
attachment['fallback'] = attachment['text']
payload['attachments'].append(attachment)
payload="payload=" + module.jsonify(payload)
return payload
def do_notify_rocketchat(module, domain, token, protocol, payload):
if token.count('/') < 1:
module.fail_json(msg="Invalid Token specified, provide a valid token")
rocketchat_incoming_webhook = ROCKETCHAT_INCOMING_WEBHOOK % (protocol, domain, token)
response, info = fetch_url(module, rocketchat_incoming_webhook, data=payload)
if info['status'] != 200:
module.fail_json(msg="failed to send message, return status=%s" % str(info['status']))
def main():
module = AnsibleModule(
argument_spec = dict(
domain = dict(type='str', required=True, default=None),
token = dict(type='str', required=True, no_log=True),
protocol = dict(type='str', default='https', choices=['http', 'https']),
msg = dict(type='str', required=False, default=None),
channel = dict(type='str', default=None),
username = dict(type='str', default='Ansible'),
icon_url = dict(type='str', default='https://www.ansible.com/favicon.ico'),
icon_emoji = dict(type='str', default=None),
link_names = dict(type='int', default=1, choices=[0,1]),
validate_certs = dict(default='yes', type='bool'),
color = dict(type='str', default='normal', choices=['normal', 'good', 'warning', 'danger']),
attachments = dict(type='list', required=False, default=None)
)
)
domain = module.params['domain']
token = module.params['token']
protocol = module.params['protocol']
text = module.params['msg']
channel = module.params['channel']
username = module.params['username']
icon_url = module.params['icon_url']
icon_emoji = module.params['icon_emoji']
link_names = module.params['link_names']
color = module.params['color']
attachments = module.params['attachments']
payload = build_payload_for_rocketchat(module, text, channel, username, icon_url, icon_emoji, link_names, color, attachments)
do_notify_rocketchat(module, domain, token, protocol, payload)
module.exit_json(msg="OK")
# import module snippets
from ansible.module_utils.basic import *
from ansible.module_utils.urls import *
if __name__ == '__main__':
main()
| gpl-3.0 |
luisfdez/pywinrm | winrm/tests/kerberos/test_cffi_mini.py | 4 | 8021 | import os
from cffi import FFI
ffi = FFI()
ffi.cdef("""
// Original source could be found here:
// https://github.com/krb5/krb5/blob/master/src/lib/gssapi/generic/gssapi.hin
typedef uint32_t gss_uint32;
typedef gss_uint32 OM_uint32;
typedef struct gss_OID_desc_struct {
OM_uint32 length;
void *elements;
} gss_OID_desc, *gss_OID;
typedef struct gss_buffer_desc_struct {
size_t length;
void *value;
} gss_buffer_desc, *gss_buffer_t;
// TODO investigate why we can not inspect gss_name_t
struct gss_name_struct;
typedef struct gss_name_struct * gss_name_t;
//typedef struct gss_name_struct {
// size_t length;
// char *value;
// gss_OID type;
//} gss_name_desc, *gss_name_t;
struct gss_cred_id_struct;
typedef struct gss_cred_id_struct * gss_cred_id_t;
struct gss_ctx_id_struct;
typedef struct gss_ctx_id_struct * gss_ctx_id_t;
typedef struct gss_channel_bindings_struct {
OM_uint32 initiator_addrtype;
gss_buffer_desc initiator_address;
OM_uint32 acceptor_addrtype;
gss_buffer_desc acceptor_address;
gss_buffer_desc application_data;
} *gss_channel_bindings_t;
#define GSS_C_GSS_CODE ...
#define GSS_C_MECH_CODE ...
#define GSS_C_NO_NAME ...
#define GSS_C_NO_BUFFER ...
#define GSS_C_NO_OID ...
#define GSS_C_NO_OID_SET ...
#define GSS_C_NO_CONTEXT ...
#define GSS_C_NO_CREDENTIAL ...
#define GSS_C_NO_CHANNEL_BINDINGS ...
#define GSS_C_NO_OID ...
#define GSS_C_NO_CHANNEL_BINDINGS ...
#define GSS_C_NULL_OID ...
#define GSS_C_INDEFINITE ...
extern gss_OID GSS_C_NT_HOSTBASED_SERVICE;
OM_uint32
gss_import_name(
OM_uint32 *, /* minor_status */
gss_buffer_t, /* input_name_buffer */
gss_OID, /* input_name_type(used to be const) */
gss_name_t *); /* output_name */
OM_uint32
gss_init_sec_context(
OM_uint32 *, /* minor_status */
gss_cred_id_t, /* claimant_cred_handle */
gss_ctx_id_t *, /* context_handle */
gss_name_t, /* target_name */
gss_OID, /* mech_type (used to be const) */
OM_uint32, /* req_flags */
OM_uint32, /* time_req */
gss_channel_bindings_t, /* input_chan_bindings */
gss_buffer_t, /* input_token */
gss_OID *, /* actual_mech_type */
gss_buffer_t, /* output_token */
OM_uint32 *, /* ret_flags */
OM_uint32 *); /* time_rec */
OM_uint32
gss_display_status(
OM_uint32 *, /* minor_status */
OM_uint32, /* status_value */
int, /* status_type */
gss_OID, /* mech_type (used to be const) */
OM_uint32 *, /* message_context */
gss_buffer_t); /* status_string */
OM_uint32
gss_release_buffer(
OM_uint32 *, /* minor_status */
gss_buffer_t); /* buffer */
""")
C = ffi.verify(
"""
#include <gssapi/gssapi.h>
#include <gssapi/gssapi_generic.h>
#include <gssapi/gssapi_krb5.h>
""",
# include_dirs=['/usr/include/gssapi'], # This is not required
libraries=['gssapi_krb5'])
class GSSInternalError(Exception):
pass
class GSSError(Exception):
pass
class CredentialsCacheNotFound(GSSError):
pass
# TODO find better name
class ServerNotFoundInKerberosDatabase(GSSError):
pass
class KerberosServerNotFound(GSSError):
"""Usually have following message: Cannot resolve servers for KDC in realm
'SOME.REALM'"""
pass
def _gss_buffer_to_str(gss_buffer):
out_str = ffi.string(ffi.cast('char *', gss_buffer.value))
C.gss_release_buffer(ffi.new('OM_uint32 *'), gss_buffer)
return out_str
def _str_to_gss_buffer(in_str):
return ffi.new('gss_buffer_t', [len(in_str), ffi.new('char[]', in_str)])
def validate_gss_status(major_value, minor_value):
if major_value == 0:
return
minor_status_p = ffi.new('OM_uint32 *')
message_ctx_p = ffi.new('OM_uint32 *')
status_str_buf = ffi.new('gss_buffer_t')
mech_type = ffi.new('gss_OID', [C.GSS_C_NO_OID])
major_status = C.gss_display_status(
minor_status_p, major_value, C.GSS_C_GSS_CODE, mech_type,
message_ctx_p, status_str_buf)
if major_status != 0:
raise GSSInternalError(
'Failed to get GSS major display status for last API call')
major_status_str = _gss_buffer_to_str(status_str_buf)
mech_type = ffi.new('gss_OID', [C.GSS_C_NULL_OID])
major_status = C.gss_display_status(
minor_status_p, minor_value, C.GSS_C_MECH_CODE, mech_type,
message_ctx_p, status_str_buf)
if major_status != 0:
raise GSSInternalError(
'Failed to get GSS minor display status for last API call')
minor_status_str = _gss_buffer_to_str(status_str_buf)
# TODO investigate how to de-allocate memory
# TODO replace hardcoded integers into constants/flags from cffi
if major_value == 851968 and minor_value == 2529639107:
# TODO In addition to minor_value check we need to check that kerberos
# client is installed.
raise CredentialsCacheNotFound(
minor_status_str
+ '. Make sure that Kerberos Linux Client was installed. '
+ 'Run "sudo apt-get install krb5-user" for Debian/Ubuntu Linux.')
elif major_value == 851968 and minor_value == 2529638919:
raise ServerNotFoundInKerberosDatabase(minor_status_str)
elif major_value == 851968 and minor_value == 2529639132:
raise KerberosServerNotFound(
minor_status_str
+ '. Make sure that Kerberos Server is reachable over network. '
+ 'Try use ping or telnet tools in order to check that.')
else:
# __main__.GSSError: (('An unsupported mechanism was requested', 65536)
# ,('Unknown error', 0))
# __main__.GSSError: (('A required output parameter could not be
# written', 34078720), ('Unknown error', 0))
raise GSSError((major_status_str, major_value), (
minor_status_str, minor_value))
def authenticate_gss_client_init(service, principal):
if not service:
raise GSSError('Service was not provided. Please specify '
'service in "service@server-host" format')
if not principal:
raise GSSError('Principal was not provided. Please specify '
'principal in "username@realm" format')
minor_status_p = ffi.new('OM_uint32 *')
service_buf = _str_to_gss_buffer(service)
out_server_name_p = ffi.new('gss_name_t *')
major_status = C.gss_import_name(
minor_status_p, service_buf,
C.GSS_C_NT_HOSTBASED_SERVICE, # ffi.cast('gss_OID', C.GSS_C_NO_OID),
out_server_name_p)
validate_gss_status(major_status, minor_status_p[0])
# gss_flags = C.GSS_C_MUTUAL_FLAG | C.GSS_C_SEQUENCE_FLAG |
# C.GSS_C_CONF_FLAG | C.GSS_C_INTEG_FLAG
gss_flags = 0
input_token = ffi.new('gss_buffer_t')
output_token = ffi.new('gss_buffer_t')
ret_flags = ffi.new('OM_uint32 *')
major_status = C.gss_init_sec_context(
minor_status_p, ffi.NULL, ffi.cast(
'gss_ctx_id_t *', C.GSS_C_NO_CONTEXT), out_server_name_p[0],
ffi.cast('gss_OID', C.GSS_C_NO_OID),
gss_flags,
0,
# ffi.cast('gss_channel_bindings_t', C.GSS_C_NO_CHANNEL_BINDINGS),
ffi.NULL,
input_token,
# ffi.cast('gss_OID *', C.GSS_C_NO_OID),
ffi.NULL,
output_token,
ret_flags,
# ffi.cast('OM_uint32 *', C.GSS_C_INDEFINITE))
ffi.NULL)
validate_gss_status(major_status, minor_status_p[0])
if __name__ == '__main__':
krb_service = os.environ.get('WINRM_KRB_SERVICE', 'HTTP@server-host')
krb_principal = os.environ.get('WINRM_KRB_PRINCIPAL', 'username@realm')
# FIXME: Investigate how to pass server name and fix following error
# __main__.GSSError: (('A required output parameter could not be written',
# 34078720), ('Unknown error', 0))
authenticate_gss_client_init(krb_service, krb_principal)
| mit |
andrmuel/gr-dab | python/qa/qa_ofdm_move_and_insert_zero.py | 1 | 1294 | #!/usr/bin/env python
from gnuradio import gr, gr_unittest, blocks
import grdab
class qa_ofdm_move_and_insert_zero(gr_unittest.TestCase):
"""
@brief QA for the block that moves the signal to the middle of the band and inserts the zero carrier in the middle.
This class implements a test bench to verify the corresponding C++ class.
"""
def setUp(self):
self.tb = gr.top_block()
def tearDown(self):
self.tb = None
def test_001_ofdm_move_and_insert_zero(self):
num_carriers = 4
fft_length = 10
d_zeros_on_left = 3
src_data0 = range(0,8)
expected_result0 = [0,0,0]+[0,1]+[0]+[2,3]+[0,0]+[0,0,0]+[4,5]+[0]+[6,7]+[0,0]
expected_result0 = [complex(x) for x in expected_result0]
src0 = blocks.vector_source_c(src_data0)
s2v0 = blocks.stream_to_vector(gr.sizeof_gr_complex, num_carriers)
ofdm_move_and_insert_zero = grdab.ofdm_move_and_insert_zero(fft_length,num_carriers)
v2s0 = blocks.vector_to_stream(gr.sizeof_gr_complex, fft_length)
dst0 = blocks.vector_sink_c()
self.tb.connect(src0, s2v0, ofdm_move_and_insert_zero, v2s0, dst0)
self.tb.run()
result_data0 = dst0.data()
# print expected_result0
# print result_data0
self.assertComplexTuplesAlmostEqual(expected_result0, result_data0, 6)
if __name__ == '__main__':
gr_unittest.main()
| gpl-3.0 |
eonpatapon/nova | nova/tests/unit/virt/disk/vfs/test_guestfs.py | 47 | 11421 | # Copyright (C) 2012 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import fixtures
import mock
from nova import exception
from nova import test
from nova.tests.unit.virt.disk.vfs import fakeguestfs
from nova.virt.disk.vfs import guestfs as vfsimpl
from nova.virt.image import model as imgmodel
class VirtDiskVFSGuestFSTest(test.NoDBTestCase):
def setUp(self):
super(VirtDiskVFSGuestFSTest, self).setUp()
self.useFixture(
fixtures.MonkeyPatch('nova.virt.disk.vfs.guestfs.guestfs',
fakeguestfs))
self.qcowfile = imgmodel.LocalFileImage("/dummy.qcow2",
imgmodel.FORMAT_QCOW2)
self.rawfile = imgmodel.LocalFileImage("/dummy.img",
imgmodel.FORMAT_RAW)
self.lvmfile = imgmodel.LocalBlockImage("/dev/volgroup/myvol")
self.rbdfile = imgmodel.RBDImage("myvol", "mypool",
"cthulu",
"arrrrrgh",
["server1:123", "server2:123"])
def _do_test_appliance_setup_inspect(self, image, drives, forcetcg):
if forcetcg:
vfsimpl.force_tcg()
else:
vfsimpl.force_tcg(False)
vfs = vfsimpl.VFSGuestFS(
image,
partition=-1)
vfs.setup()
if forcetcg:
self.assertEqual("force_tcg", vfs.handle.backend_settings)
vfsimpl.force_tcg(False)
else:
self.assertIsNone(vfs.handle.backend_settings)
self.assertTrue(vfs.handle.running)
self.assertEqual(drives,
vfs.handle.drives)
self.assertEqual(3, len(vfs.handle.mounts))
self.assertEqual("/dev/mapper/guestvgf-lv_root",
vfs.handle.mounts[0][1])
self.assertEqual("/dev/vda1",
vfs.handle.mounts[1][1])
self.assertEqual("/dev/mapper/guestvgf-lv_home",
vfs.handle.mounts[2][1])
self.assertEqual("/", vfs.handle.mounts[0][2])
self.assertEqual("/boot", vfs.handle.mounts[1][2])
self.assertEqual("/home", vfs.handle.mounts[2][2])
handle = vfs.handle
vfs.teardown()
self.assertIsNone(vfs.handle)
self.assertFalse(handle.running)
self.assertTrue(handle.closed)
self.assertEqual(0, len(handle.mounts))
def test_appliance_setup_inspect_auto(self):
drives = [("/dummy.qcow2", {"format": "qcow2"})]
self._do_test_appliance_setup_inspect(self.qcowfile, drives, False)
def test_appliance_setup_inspect_tcg(self):
drives = [("/dummy.qcow2", {"format": "qcow2"})]
self._do_test_appliance_setup_inspect(self.qcowfile, drives, True)
def test_appliance_setup_inspect_raw(self):
drives = [("/dummy.img", {"format": "raw"})]
self._do_test_appliance_setup_inspect(self.rawfile, drives, True)
def test_appliance_setup_inspect_lvm(self):
drives = [("/dev/volgroup/myvol", {"format": "raw"})]
self._do_test_appliance_setup_inspect(self.lvmfile, drives, True)
def test_appliance_setup_inspect_rbd(self):
drives = [("mypool/myvol", {"format": "raw",
"protocol": "rbd",
"username": "cthulu",
"secret": "arrrrrgh",
"server": ["server1:123",
"server2:123"]})]
self._do_test_appliance_setup_inspect(self.rbdfile, drives, True)
def test_appliance_setup_inspect_no_root_raises(self):
vfs = vfsimpl.VFSGuestFS(self.qcowfile,
partition=-1)
# call setup to init the handle so we can stub it
vfs.setup()
self.assertIsNone(vfs.handle.backend_settings)
def fake_inspect_os():
return []
self.stubs.Set(vfs.handle, 'inspect_os', fake_inspect_os)
self.assertRaises(exception.NovaException, vfs.setup_os_inspect)
def test_appliance_setup_inspect_multi_boots_raises(self):
vfs = vfsimpl.VFSGuestFS(self.qcowfile,
partition=-1)
# call setup to init the handle so we can stub it
vfs.setup()
self.assertIsNone(vfs.handle.backend_settings)
def fake_inspect_os():
return ['fake1', 'fake2']
self.stubs.Set(vfs.handle, 'inspect_os', fake_inspect_os)
self.assertRaises(exception.NovaException, vfs.setup_os_inspect)
def test_appliance_setup_static_nopart(self):
vfs = vfsimpl.VFSGuestFS(self.qcowfile,
partition=None)
vfs.setup()
self.assertIsNone(vfs.handle.backend_settings)
self.assertTrue(vfs.handle.running)
self.assertEqual(1, len(vfs.handle.mounts))
self.assertEqual("/dev/sda", vfs.handle.mounts[0][1])
self.assertEqual("/", vfs.handle.mounts[0][2])
handle = vfs.handle
vfs.teardown()
self.assertIsNone(vfs.handle)
self.assertFalse(handle.running)
self.assertTrue(handle.closed)
self.assertEqual(0, len(handle.mounts))
def test_appliance_setup_static_part(self):
vfs = vfsimpl.VFSGuestFS(self.qcowfile,
partition=2)
vfs.setup()
self.assertIsNone(vfs.handle.backend_settings)
self.assertTrue(vfs.handle.running)
self.assertEqual(1, len(vfs.handle.mounts))
self.assertEqual("/dev/sda2", vfs.handle.mounts[0][1])
self.assertEqual("/", vfs.handle.mounts[0][2])
handle = vfs.handle
vfs.teardown()
self.assertIsNone(vfs.handle)
self.assertFalse(handle.running)
self.assertTrue(handle.closed)
self.assertEqual(0, len(handle.mounts))
def test_makepath(self):
vfs = vfsimpl.VFSGuestFS(self.qcowfile)
vfs.setup()
vfs.make_path("/some/dir")
vfs.make_path("/other/dir")
self.assertIn("/some/dir", vfs.handle.files)
self.assertIn("/other/dir", vfs.handle.files)
self.assertTrue(vfs.handle.files["/some/dir"]["isdir"])
self.assertTrue(vfs.handle.files["/other/dir"]["isdir"])
vfs.teardown()
def test_append_file(self):
vfs = vfsimpl.VFSGuestFS(self.qcowfile)
vfs.setup()
vfs.append_file("/some/file", " Goodbye")
self.assertIn("/some/file", vfs.handle.files)
self.assertEqual("Hello World Goodbye",
vfs.handle.files["/some/file"]["content"])
vfs.teardown()
def test_replace_file(self):
vfs = vfsimpl.VFSGuestFS(self.qcowfile)
vfs.setup()
vfs.replace_file("/some/file", "Goodbye")
self.assertIn("/some/file", vfs.handle.files)
self.assertEqual("Goodbye",
vfs.handle.files["/some/file"]["content"])
vfs.teardown()
def test_read_file(self):
vfs = vfsimpl.VFSGuestFS(self.qcowfile)
vfs.setup()
self.assertEqual("Hello World", vfs.read_file("/some/file"))
vfs.teardown()
def test_has_file(self):
vfs = vfsimpl.VFSGuestFS(self.qcowfile)
vfs.setup()
vfs.read_file("/some/file")
self.assertTrue(vfs.has_file("/some/file"))
self.assertFalse(vfs.has_file("/other/file"))
vfs.teardown()
def test_set_permissions(self):
vfs = vfsimpl.VFSGuestFS(self.qcowfile)
vfs.setup()
vfs.read_file("/some/file")
self.assertEqual(0o700, vfs.handle.files["/some/file"]["mode"])
vfs.set_permissions("/some/file", 0o7777)
self.assertEqual(0o7777, vfs.handle.files["/some/file"]["mode"])
vfs.teardown()
def test_set_ownership(self):
vfs = vfsimpl.VFSGuestFS(self.qcowfile)
vfs.setup()
vfs.read_file("/some/file")
self.assertEqual(100, vfs.handle.files["/some/file"]["uid"])
self.assertEqual(100, vfs.handle.files["/some/file"]["gid"])
vfs.set_ownership("/some/file", "fred", None)
self.assertEqual(105, vfs.handle.files["/some/file"]["uid"])
self.assertEqual(100, vfs.handle.files["/some/file"]["gid"])
vfs.set_ownership("/some/file", None, "users")
self.assertEqual(105, vfs.handle.files["/some/file"]["uid"])
self.assertEqual(500, vfs.handle.files["/some/file"]["gid"])
vfs.set_ownership("/some/file", "joe", "admins")
self.assertEqual(110, vfs.handle.files["/some/file"]["uid"])
self.assertEqual(600, vfs.handle.files["/some/file"]["gid"])
vfs.teardown()
def test_close_on_error(self):
vfs = vfsimpl.VFSGuestFS(self.qcowfile)
vfs.setup()
self.assertFalse(vfs.handle.kwargs['close_on_exit'])
vfs.teardown()
self.stubs.Set(fakeguestfs.GuestFS, 'SUPPORT_CLOSE_ON_EXIT', False)
vfs = vfsimpl.VFSGuestFS(self.qcowfile)
vfs.setup()
self.assertNotIn('close_on_exit', vfs.handle.kwargs)
vfs.teardown()
def test_python_return_dict(self):
vfs = vfsimpl.VFSGuestFS(self.qcowfile)
vfs.setup()
self.assertFalse(vfs.handle.kwargs['python_return_dict'])
vfs.teardown()
self.stubs.Set(fakeguestfs.GuestFS, 'SUPPORT_RETURN_DICT', False)
vfs = vfsimpl.VFSGuestFS(self.qcowfile)
vfs.setup()
self.assertNotIn('python_return_dict', vfs.handle.kwargs)
vfs.teardown()
def test_setup_debug_disable(self):
vfs = vfsimpl.VFSGuestFS(self.qcowfile)
vfs.setup()
self.assertFalse(vfs.handle.trace_enabled)
self.assertFalse(vfs.handle.verbose_enabled)
self.assertIsNone(vfs.handle.event_callback)
def test_setup_debug_enabled(self):
self.flags(debug=True, group='guestfs')
vfs = vfsimpl.VFSGuestFS(self.qcowfile)
vfs.setup()
self.assertTrue(vfs.handle.trace_enabled)
self.assertTrue(vfs.handle.verbose_enabled)
self.assertIsNotNone(vfs.handle.event_callback)
def test_get_format_fs(self):
vfs = vfsimpl.VFSGuestFS(self.rawfile)
vfs.setup()
self.assertIsNotNone(vfs.handle)
self.assertTrue('ext3', vfs.get_image_fs())
vfs.teardown()
@mock.patch.object(vfsimpl.VFSGuestFS, 'setup_os')
def test_setup_mount(self, setup_os):
vfs = vfsimpl.VFSGuestFS(self.qcowfile)
vfs.setup()
self.assertTrue(setup_os.called)
@mock.patch.object(vfsimpl.VFSGuestFS, 'setup_os')
def test_setup_mount_false(self, setup_os):
vfs = vfsimpl.VFSGuestFS(self.qcowfile)
vfs.setup(mount=False)
self.assertFalse(setup_os.called)
| apache-2.0 |
senser/xmppBot | ZenPacks/community/xmppBot/Jabber/plugins/setjid.py | 1 | 4107 | """Check if the sender is a valid zenoss admin. For access control"""
from Jabber.Plugins import Plugin
from Jabber.ZenAdapter import ZenAdapter
from Jabber.Options import Options
from optparse import OptionError
import transaction
class SetJid(Plugin):
name = 'mapuser'
capabilities = ['setjid', 'mapuser', 'help']
def call(self, args, sender, log, **kw):
log.debug('mapuser plugin running with %s' % args)
opts = self.options()
# parse the options
try:
(options, arguments) = opts.parse_args(args)
log.debug('Done parsing arguments. Options are "%s", arguments expanded to %s' % (options, arguments))
except OptionError, message:
return str(message)
if options.zenUser is None or options.jabberId is None:
return 'NO. -u and -j are both required.'
adapter = ZenAdapter()
jabberId = options.jabberId.lower()
haveUser = False
for user in adapter.userSettings():
if user.id.lower() == options.zenUser.lower():
haveUser = True
try:
currentId = user.getProperty('JabberId')
except AttributeError:
currentId = False
if currentId:
if options.jabberId == currentId.lower():
if options.force:
self.mapIds(jabberId, user)
return 'This user mapping already looks like this. Forced option was used, so I set it anyway.'
else:
return 'This user mapping already looks like this.'
if '/' in sender:
sender = sender.split('/')[0]
if currentId.lower() == sender.lower():
if options.force:
return 'This is your Zenoss user id, and the mapping is already set correctly. Changing it will prevent you from communicating with me. If you really want to change it, do so from the Zenoss interface or -f.'
else:
self.mapIds(jabberId, user)
return 'This is your Zenoss user id, and the mapping is already set correctly. However, the force option was used, so I set it anyway. Since this will probably break communication with me, you can change it back from the Zope interface.'
log.debug('Setting the jabberid mapping property to %s for zenuser %s' % (jabberId, user))
self.mapIds(jabberId, user)
break
if haveUser:
return 'JabberId for this user has been saved. Thanks.'
else:
return 'Sorry! I Could not find a Zenoss user by the name %s' % options.zenUser
def mapIds(self, jabberId, zenUser):
self.setPropertyIfNeeded(zenUser)
zenUser._updateProperty('JabberId', jabberId)
transaction.commit()
def setPropertyIfNeeded(self, zenUser):
if not zenUser.hasProperty('JabberId'):
zenUser.manage_addProperty('JabberId', '', 'string')
zenUser._setProperty('JabberId', '', 'string')
try:
zenUser.getProperty('JabberId')
except AttributeError:
zenUser.manage_addProperty('JabberId', '', 'string')
# unnecessary?
#zenUser._setProperty('JabberId', '', 'string')
def private(self):
return False
def options(self):
parser = Options(description = 'Acknowledge events by eventid', prog = 'ack')
parser.add_option('-u', '--user', dest='zenUser', help='Zenoss username (must already exist in zenoss).')
parser.add_option('-j', '--jid', dest='jabberId', help='JabberID to map to the zenoss user.')
parser.add_option('-f', '--force', dest='force', action='store_true', help='Force association even if it could disallow your own user. USE WITH CAUTION.')
return parser
def help(self):
opts = self.options()
return str(opts.help())
| gpl-2.0 |
laurentiush/bioformats | components/autogen/src/gen-meta-support.py | 7 | 4779 | #!/usr/bin/env bash
###
# #%L
# Bio-Formats autogen package for programmatically generating source code.
# %%
# Copyright (C) 2007 - 2015 Open Microscopy Environment:
# - Board of Regents of the University of Wisconsin-Madison
# - Glencoe Software, Inc.
# - University of Dundee
# %%
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as
# published by the Free Software Foundation, either version 2 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public
# License along with this program. If not, see
# <http://www.gnu.org/licenses/gpl-2.0.html>.
# #L%
###
from os import listdir
import re
from os.path import basename, dirname, join, abspath, isfile
HEADER = """# This file documents the metadata support for each file format that
# Bio-Formats can handle. Default value for unlisted properties is Missing,
# indicating that the property cannot be represented in the format, or our
# knowledge regarding the property regarding this format is incomplete.
# To define the status of a property, use the syntax:
#
# Entity.Property = Status [Comment]
#
# "Status" is one of Yes, No, Partial or Missing.
# There is usually no need to specify Missing status, as it is the default.
#
# "Comment" is optional extra text for specifying further details, such as
# when the status changed. This value can include a revision, a ticket, a
# datestamp or any other appropriate information.
#
# As a shortcut for every property of a given entity, you can write:
#
# Entity [Comment]
#
# Examples:
#
# Dimensions = Yes since r2351
# Objective.NominalMagnification = Yes added on 2008 Jan 8
# ImagingEnvironment.Temperature = Partial see ticket #167 for details
"""
currentDir = dirname(__file__)
outputFile = join(currentDir, 'meta-support.txt')
componentsDir = abspath(join(currentDir, '..', '..'))
def is_file(f, ftype=".java"):
return isfile(f) and f.endswith(ftype)
def get_xml_elements():
"""List all XML elements from the model"""
elements = []
modelDir = join(
componentsDir, 'ome-xml', 'build', 'src', 'ome', 'xml', 'model')
for f in sorted(listdir(modelDir)):
if not is_file(join(modelDir, f)):
continue
elements.append(basename(f).rstrip('.java'))
return elements
def get_readers():
"""List all GPL and BSD readers"""
readers = []
for ftype in ['formats-gpl', 'formats-bsd']:
formatsDir = join(componentsDir, ftype, 'src', 'loci', 'formats', 'in')
for f in sorted(listdir(formatsDir), key=str.lower):
if not is_file(join(formatsDir, f), ftype="Reader.java"):
continue
readers.append(join(formatsDir, f))
return readers
def split_element(s, elements):
"""Split a string using a list of starting elements"""
candidates = []
for element in elements:
if not s.startswith(element):
continue
candidates.append(element)
if len(candidates) == 0:
return
if len(candidates) > 2:
raise Exception('Found more than 2 matching XML elements')
# If more than 1 element is found, use the longest one
found_element = max(candidates, key=len)
return "%s.%s" % (s[0:len(found_element)], s[len(found_element):])
# Look for Metadatastore setter metthods
pattern = re.compile('store\.set(\w+)')
# Register Metadatastore setter calls in MetadataTools
metadatatools = join(componentsDir, 'formats-api', 'src', 'loci', 'formats',
'MetadataTools.java')
commonElements = []
with open(metadatatools) as f:
commonElements = pattern.findall(f.read())
# Read XML elements from the model
xml_elements = get_xml_elements()
with open(outputFile, 'w') as f:
f.write(HEADER)
for reader in get_readers():
# Open the reader for parsing
readername = basename(reader).rstrip('.java')
print "Parsing %s" % readername
f.write("[%s]\n" % readername)
text = open(reader).read()
# Find Metadatastore setter calls
r = pattern.findall(text)
r.extend(commonElements)
if not r:
f.write("\n")
continue
# Enforce unique elements
r = set(r)
for metadata_element in sorted(r):
split_metadata = split_element(metadata_element, xml_elements)
if split_metadata:
f.write("%s = Yes\n" % split_metadata)
f.write("\n")
| gpl-2.0 |
Bysmyyr/chromium-crosswalk | third_party/WebKit/Tools/Scripts/webkitpy/layout_tests/controllers/single_test_runner.py | 6 | 24521 | # Copyright (C) 2011 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import logging
import re
import time
from webkitpy.layout_tests.controllers import repaint_overlay
from webkitpy.layout_tests.controllers import test_result_writer
from webkitpy.layout_tests.port.driver import DeviceFailure, DriverInput, DriverOutput
from webkitpy.layout_tests.models import test_expectations
from webkitpy.layout_tests.models import test_failures
from webkitpy.layout_tests.models.test_results import TestResult
from webkitpy.layout_tests.models import testharness_results
_log = logging.getLogger(__name__)
def run_single_test(
port, options, results_directory, worker_name, primary_driver,
secondary_driver, test_input, stop_when_done):
runner = SingleTestRunner(
port, options, results_directory, worker_name, primary_driver,
secondary_driver, test_input, stop_when_done)
try:
return runner.run()
except DeviceFailure as e:
_log.error("device failed: %s", str(e))
return TestResult(test_input.test_name, device_failed=True)
class SingleTestRunner(object):
(ALONGSIDE_TEST, PLATFORM_DIR, VERSION_DIR, UPDATE) = ('alongside', 'platform', 'version', 'update')
def __init__(self, port, options, results_directory, worker_name,
primary_driver, secondary_driver, test_input, stop_when_done):
self._port = port
self._filesystem = port.host.filesystem
self._options = options
self._results_directory = results_directory
self._driver = primary_driver
self._reference_driver = primary_driver
self._timeout = test_input.timeout
self._worker_name = worker_name
self._test_name = test_input.test_name
self._should_run_pixel_test = test_input.should_run_pixel_test
self._reference_files = test_input.reference_files
self._should_add_missing_baselines = test_input.should_add_missing_baselines
self._stop_when_done = stop_when_done
# If this is a virtual test that uses the default flags instead of the
# virtual flags for it's references, run it on the secondary driver so
# that the primary driver does not need to be restarted.
if (secondary_driver and
self._port.is_virtual_test(self._test_name) and
not self._port.lookup_virtual_reference_args(self._test_name)):
self._reference_driver = secondary_driver
if self._reference_files:
# Detect and report a test which has a wrong combination of expectation files.
# For example, if 'foo.html' has two expectation files, 'foo-expected.html' and
# 'foo-expected.txt', we should warn users. One test file must be used exclusively
# in either layout tests or reftests, but not in both.
for suffix in ('.txt', '.png', '.wav'):
expected_filename = self._port.expected_filename(self._test_name, suffix)
if self._filesystem.exists(expected_filename):
_log.error('%s is a reftest, but has an unused expectation file. Please remove %s.',
self._test_name, expected_filename)
def _expected_driver_output(self):
return DriverOutput(self._port.expected_text(self._test_name),
self._port.expected_image(self._test_name),
self._port.expected_checksum(self._test_name),
self._port.expected_audio(self._test_name))
def _should_fetch_expected_checksum(self):
return self._should_run_pixel_test and not (self._options.new_baseline or self._options.reset_results)
def _driver_input(self):
# The image hash is used to avoid doing an image dump if the
# checksums match, so it should be set to a blank value if we
# are generating a new baseline. (Otherwise, an image from a
# previous run will be copied into the baseline."""
image_hash = None
if self._should_fetch_expected_checksum():
image_hash = self._port.expected_checksum(self._test_name)
test_base = self._port.lookup_virtual_test_base(self._test_name)
if test_base:
# If the file actually exists under the virtual dir, we want to use it (largely for virtual references),
# but we want to use the extra command line args either way.
if self._filesystem.exists(self._port.abspath_for_test(self._test_name)):
test_name = self._test_name
else:
test_name = test_base
args = self._port.lookup_virtual_test_args(self._test_name)
else:
test_name = self._test_name
args = self._port.lookup_physical_test_args(self._test_name)
return DriverInput(test_name, self._timeout, image_hash, self._should_run_pixel_test, args)
def run(self):
if self._options.enable_sanitizer:
return self._run_sanitized_test()
if self._reference_files:
if self._options.reset_results:
reftest_type = set([reference_file[0] for reference_file in self._reference_files])
result = TestResult(self._test_name, reftest_type=reftest_type)
result.type = test_expectations.SKIP
return result
return self._run_reftest()
if self._options.reset_results:
return self._run_rebaseline()
return self._run_compare_test()
def _run_sanitized_test(self):
# running a sanitized test means that we ignore the actual test output and just look
# for timeouts and crashes (real or forced by the driver). Most crashes should
# indicate problems found by a sanitizer (ASAN, LSAN, etc.), but we will report
# on other crashes and timeouts as well in order to detect at least *some* basic failures.
driver_output = self._driver.run_test(self._driver_input(), self._stop_when_done)
expected_driver_output = self._expected_driver_output()
failures = self._handle_error(driver_output)
test_result = TestResult(self._test_name, failures, driver_output.test_time, driver_output.has_stderr(),
pid=driver_output.pid)
test_result_writer.write_test_result(self._filesystem, self._port, self._results_directory, self._test_name, driver_output, expected_driver_output, test_result.failures)
return test_result
def _run_compare_test(self):
driver_output = self._driver.run_test(self._driver_input(), self._stop_when_done)
expected_driver_output = self._expected_driver_output()
test_result = self._compare_output(expected_driver_output, driver_output)
if self._should_add_missing_baselines:
self._add_missing_baselines(test_result, driver_output)
test_result_writer.write_test_result(self._filesystem, self._port, self._results_directory, self._test_name, driver_output, expected_driver_output, test_result.failures)
return test_result
def _run_rebaseline(self):
driver_output = self._driver.run_test(self._driver_input(), self._stop_when_done)
failures = self._handle_error(driver_output)
test_result_writer.write_test_result(self._filesystem, self._port, self._results_directory, self._test_name, driver_output, None, failures)
# FIXME: It the test crashed or timed out, it might be better to avoid
# to write new baselines.
self._overwrite_baselines(driver_output)
return TestResult(self._test_name, failures, driver_output.test_time, driver_output.has_stderr(),
pid=driver_output.pid)
_render_tree_dump_pattern = re.compile(r"^layer at \(\d+,\d+\) size \d+x\d+\n")
def _add_missing_baselines(self, test_result, driver_output):
missingImage = test_result.has_failure_matching_types(test_failures.FailureMissingImage, test_failures.FailureMissingImageHash)
if test_result.has_failure_matching_types(test_failures.FailureMissingResult):
self._save_baseline_data(driver_output.text, '.txt', self._location_for_new_baseline(driver_output.text, '.txt'))
if test_result.has_failure_matching_types(test_failures.FailureMissingAudio):
self._save_baseline_data(driver_output.audio, '.wav', self._location_for_new_baseline(driver_output.audio, '.wav'))
if missingImage:
self._save_baseline_data(driver_output.image, '.png', self._location_for_new_baseline(driver_output.image, '.png'))
def _location_for_new_baseline(self, data, extension):
if self._options.add_platform_exceptions:
return self.VERSION_DIR
if extension == '.png':
return self.PLATFORM_DIR
if extension == '.wav':
return self.ALONGSIDE_TEST
if extension == '.txt' and self._render_tree_dump_pattern.match(data):
return self.PLATFORM_DIR
return self.ALONGSIDE_TEST
def _overwrite_baselines(self, driver_output):
location = self.VERSION_DIR if self._options.add_platform_exceptions else self.UPDATE
self._save_baseline_data(driver_output.text, '.txt', location)
self._save_baseline_data(driver_output.audio, '.wav', location)
if self._should_run_pixel_test:
self._save_baseline_data(driver_output.image, '.png', location)
def _save_baseline_data(self, data, extension, location):
if data is None:
return
port = self._port
fs = self._filesystem
if location == self.ALONGSIDE_TEST:
output_dir = fs.dirname(port.abspath_for_test(self._test_name))
elif location == self.VERSION_DIR:
output_dir = fs.join(port.baseline_version_dir(), fs.dirname(self._test_name))
elif location == self.PLATFORM_DIR:
output_dir = fs.join(port.baseline_platform_dir(), fs.dirname(self._test_name))
elif location == self.UPDATE:
output_dir = fs.dirname(port.expected_filename(self._test_name, extension))
else:
raise AssertionError('unrecognized baseline location: %s' % location)
fs.maybe_make_directory(output_dir)
output_basename = fs.basename(fs.splitext(self._test_name)[0] + "-expected" + extension)
output_path = fs.join(output_dir, output_basename)
_log.info('Writing new expected result "%s"' % port.relative_test_filename(output_path))
port.update_baseline(output_path, data)
def _handle_error(self, driver_output, reference_filename=None):
"""Returns test failures if some unusual errors happen in driver's run.
Args:
driver_output: The output from the driver.
reference_filename: The full path to the reference file which produced the driver_output.
This arg is optional and should be used only in reftests until we have a better way to know
which html file is used for producing the driver_output.
"""
failures = []
fs = self._filesystem
if driver_output.timeout:
failures.append(test_failures.FailureTimeout(bool(reference_filename)))
if reference_filename:
testname = self._port.relative_test_filename(reference_filename)
else:
testname = self._test_name
if driver_output.crash:
failures.append(test_failures.FailureCrash(bool(reference_filename),
driver_output.crashed_process_name,
driver_output.crashed_pid,
self._port.output_contains_sanitizer_messages(driver_output.crash_log)))
if driver_output.error:
_log.debug("%s %s crashed, (stderr lines):" % (self._worker_name, testname))
else:
_log.debug("%s %s crashed, (no stderr)" % (self._worker_name, testname))
elif driver_output.leak:
failures.append(test_failures.FailureLeak(bool(reference_filename),
driver_output.leak_log))
_log.debug("%s %s leaked" % (self._worker_name, testname))
elif driver_output.error:
_log.debug("%s %s output stderr lines:" % (self._worker_name, testname))
for line in driver_output.error.splitlines():
_log.debug(" %s" % line)
return failures
def _compare_output(self, expected_driver_output, driver_output):
failures = []
failures.extend(self._handle_error(driver_output))
if driver_output.crash:
# Don't continue any more if we already have a crash.
# In case of timeouts, we continue since we still want to see the text and image output.
return TestResult(self._test_name, failures, driver_output.test_time, driver_output.has_stderr(),
pid=driver_output.pid)
is_testharness_test, testharness_failures = self._compare_testharness_test(driver_output, expected_driver_output)
if is_testharness_test:
failures.extend(testharness_failures)
else:
failures.extend(self._compare_text(expected_driver_output.text, driver_output.text))
failures.extend(self._compare_audio(expected_driver_output.audio, driver_output.audio))
if self._should_run_pixel_test:
failures.extend(self._compare_image(expected_driver_output, driver_output))
has_repaint_overlay = (repaint_overlay.result_contains_repaint_rects(expected_driver_output.text) or
repaint_overlay.result_contains_repaint_rects(driver_output.text))
return TestResult(self._test_name, failures, driver_output.test_time, driver_output.has_stderr(),
pid=driver_output.pid, has_repaint_overlay=has_repaint_overlay)
def _compare_testharness_test(self, driver_output, expected_driver_output):
if expected_driver_output.image or expected_driver_output.audio or expected_driver_output.text:
return False, []
if driver_output.image or driver_output.audio or self._is_render_tree(driver_output.text):
return False, []
text = driver_output.text or ''
if not testharness_results.is_testharness_output(text):
return False, []
if not testharness_results.is_testharness_output_passing(text):
return True, [test_failures.FailureTestHarnessAssertion()]
return True, []
def _is_render_tree(self, text):
return text and "layer at (0,0) size 800x600" in text
def _compare_text(self, expected_text, actual_text):
failures = []
if (expected_text and actual_text and
# Assuming expected_text is already normalized.
self._port.do_text_results_differ(expected_text, self._get_normalized_output_text(actual_text))):
failures.append(test_failures.FailureTextMismatch())
elif actual_text and not expected_text:
failures.append(test_failures.FailureMissingResult())
return failures
def _compare_audio(self, expected_audio, actual_audio):
failures = []
if (expected_audio and actual_audio and
self._port.do_audio_results_differ(expected_audio, actual_audio)):
failures.append(test_failures.FailureAudioMismatch())
elif actual_audio and not expected_audio:
failures.append(test_failures.FailureMissingAudio())
return failures
def _get_normalized_output_text(self, output):
"""Returns the normalized text output, i.e. the output in which
the end-of-line characters are normalized to "\n"."""
# Running tests on Windows produces "\r\n". The "\n" part is helpfully
# changed to "\r\n" by our system (Python/Cygwin), resulting in
# "\r\r\n", when, in fact, we wanted to compare the text output with
# the normalized text expectation files.
return output.replace("\r\r\n", "\r\n").replace("\r\n", "\n")
# FIXME: This function also creates the image diff. Maybe that work should
# be handled elsewhere?
def _compare_image(self, expected_driver_output, driver_output):
failures = []
# If we didn't produce a hash file, this test must be text-only.
if driver_output.image_hash is None:
return failures
if not expected_driver_output.image:
failures.append(test_failures.FailureMissingImage())
elif not expected_driver_output.image_hash:
failures.append(test_failures.FailureMissingImageHash())
elif driver_output.image_hash != expected_driver_output.image_hash:
diff, err_str = self._port.diff_image(expected_driver_output.image, driver_output.image)
if err_str:
_log.warning(' %s : %s' % (self._test_name, err_str))
failures.append(test_failures.FailureImageHashMismatch())
driver_output.error = (driver_output.error or '') + err_str
else:
driver_output.image_diff = diff
if driver_output.image_diff:
failures.append(test_failures.FailureImageHashMismatch())
else:
# See https://bugs.webkit.org/show_bug.cgi?id=69444 for why this isn't a full failure.
_log.warning(' %s -> pixel hash failed (but diff passed)' % self._test_name)
return failures
def _run_reftest(self):
test_output = self._driver.run_test(self._driver_input(), self._stop_when_done)
total_test_time = 0
reference_output = None
test_result = None
# If the test crashed, or timed out, there's no point in running the reference at all.
# This can save a lot of execution time if we have a lot of crashes or timeouts.
if test_output.crash or test_output.timeout:
expected_driver_output = DriverOutput(text=None, image=None, image_hash=None, audio=None)
test_result = self._compare_output(expected_driver_output, test_output)
if test_output.crash:
test_result_writer.write_test_result(self._filesystem, self._port, self._results_directory, self._test_name, test_output, expected_driver_output, test_result.failures)
return test_result
# A reftest can have multiple match references and multiple mismatch references;
# the test fails if any mismatch matches and all of the matches don't match.
# To minimize the number of references we have to check, we run all of the mismatches first,
# then the matches, and short-circuit out as soon as we can.
# Note that sorting by the expectation sorts "!=" before "==" so this is easy to do.
putAllMismatchBeforeMatch = sorted
reference_test_names = []
for expectation, reference_filename in putAllMismatchBeforeMatch(self._reference_files):
if self._port.lookup_virtual_test_base(self._test_name):
args = self._port.lookup_virtual_reference_args(self._test_name)
else:
args = self._port.lookup_physical_reference_args(self._test_name)
reference_test_name = self._port.relative_test_filename(reference_filename)
reference_test_names.append(reference_test_name)
driver_input = DriverInput(reference_test_name, self._timeout, image_hash=None, should_run_pixel_test=True, args=args)
reference_output = self._reference_driver.run_test(driver_input, self._stop_when_done)
test_result = self._compare_output_with_reference(reference_output, test_output, reference_filename, expectation == '!=')
if (expectation == '!=' and test_result.failures) or (expectation == '==' and not test_result.failures):
break
total_test_time += test_result.test_run_time
assert(reference_output)
test_result_writer.write_test_result(self._filesystem, self._port, self._results_directory, self._test_name, test_output, reference_output, test_result.failures)
# FIXME: We don't really deal with a mix of reftest types properly. We pass in a set() to reftest_type
# and only really handle the first of the references in the result.
reftest_type = list(set([reference_file[0] for reference_file in self._reference_files]))
return TestResult(self._test_name, test_result.failures, total_test_time + test_result.test_run_time,
test_result.has_stderr, reftest_type=reftest_type, pid=test_result.pid,
references=reference_test_names)
def _compare_output_with_reference(self, reference_driver_output, actual_driver_output, reference_filename, mismatch):
total_test_time = reference_driver_output.test_time + actual_driver_output.test_time
has_stderr = reference_driver_output.has_stderr() or actual_driver_output.has_stderr()
failures = []
failures.extend(self._handle_error(actual_driver_output))
if failures:
# Don't continue any more if we already have crash or timeout.
return TestResult(self._test_name, failures, total_test_time, has_stderr)
failures.extend(self._handle_error(reference_driver_output, reference_filename=reference_filename))
if failures:
return TestResult(self._test_name, failures, total_test_time, has_stderr, pid=actual_driver_output.pid)
if not reference_driver_output.image_hash and not actual_driver_output.image_hash:
failures.append(test_failures.FailureReftestNoImagesGenerated(reference_filename))
elif mismatch:
if reference_driver_output.image_hash == actual_driver_output.image_hash:
diff, err_str = self._port.diff_image(reference_driver_output.image, actual_driver_output.image)
if not diff:
failures.append(test_failures.FailureReftestMismatchDidNotOccur(reference_filename))
elif err_str:
_log.error(err_str)
else:
_log.warning(" %s -> ref test hashes matched but diff failed" % self._test_name)
elif reference_driver_output.image_hash != actual_driver_output.image_hash:
diff, err_str = self._port.diff_image(reference_driver_output.image, actual_driver_output.image)
if diff:
failures.append(test_failures.FailureReftestMismatch(reference_filename))
elif err_str:
_log.error(err_str)
else:
_log.warning(" %s -> ref test hashes didn't match but diff passed" % self._test_name)
return TestResult(self._test_name, failures, total_test_time, has_stderr, pid=actual_driver_output.pid)
| bsd-3-clause |
ojengwa/oh-mainline | vendor/packages/zope.interface/src/zope/interface/common/interfaces.py | 22 | 4219 | ##############################################################################
#
# Copyright (c) 2003 Zope Foundation and Contributors.
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE.
#
##############################################################################
"""Interfaces for standard python exceptions
"""
from zope.interface import Interface
from zope.interface import classImplements
class IException(Interface): pass
class IStandardError(IException): pass
class IWarning(IException): pass
class ISyntaxError(IStandardError): pass
class ILookupError(IStandardError): pass
class IValueError(IStandardError): pass
class IRuntimeError(IStandardError): pass
class IArithmeticError(IStandardError): pass
class IAssertionError(IStandardError): pass
class IAttributeError(IStandardError): pass
class IDeprecationWarning(IWarning): pass
class IEOFError(IStandardError): pass
class IEnvironmentError(IStandardError): pass
class IFloatingPointError(IArithmeticError): pass
class IIOError(IEnvironmentError): pass
class IImportError(IStandardError): pass
class IIndentationError(ISyntaxError): pass
class IIndexError(ILookupError): pass
class IKeyError(ILookupError): pass
class IKeyboardInterrupt(IStandardError): pass
class IMemoryError(IStandardError): pass
class INameError(IStandardError): pass
class INotImplementedError(IRuntimeError): pass
class IOSError(IEnvironmentError): pass
class IOverflowError(IArithmeticError): pass
class IOverflowWarning(IWarning): pass
class IReferenceError(IStandardError): pass
class IRuntimeWarning(IWarning): pass
class IStopIteration(IException): pass
class ISyntaxWarning(IWarning): pass
class ISystemError(IStandardError): pass
class ISystemExit(IException): pass
class ITabError(IIndentationError): pass
class ITypeError(IStandardError): pass
class IUnboundLocalError(INameError): pass
class IUnicodeError(IValueError): pass
class IUserWarning(IWarning): pass
class IZeroDivisionError(IArithmeticError): pass
classImplements(ArithmeticError, IArithmeticError)
classImplements(AssertionError, IAssertionError)
classImplements(AttributeError, IAttributeError)
classImplements(DeprecationWarning, IDeprecationWarning)
classImplements(EnvironmentError, IEnvironmentError)
classImplements(EOFError, IEOFError)
classImplements(Exception, IException)
classImplements(FloatingPointError, IFloatingPointError)
classImplements(ImportError, IImportError)
classImplements(IndentationError, IIndentationError)
classImplements(IndexError, IIndexError)
classImplements(IOError, IIOError)
classImplements(KeyboardInterrupt, IKeyboardInterrupt)
classImplements(KeyError, IKeyError)
classImplements(LookupError, ILookupError)
classImplements(MemoryError, IMemoryError)
classImplements(NameError, INameError)
classImplements(NotImplementedError, INotImplementedError)
classImplements(OSError, IOSError)
classImplements(OverflowError, IOverflowError)
try:
classImplements(OverflowWarning, IOverflowWarning)
except NameError:
pass # OverflowWarning was removed in Python 2.5
classImplements(ReferenceError, IReferenceError)
classImplements(RuntimeError, IRuntimeError)
classImplements(RuntimeWarning, IRuntimeWarning)
try:
classImplements(StandardError, IStandardError)
except NameError:
pass # StandardError does not exist in Python 3
classImplements(StopIteration, IStopIteration)
classImplements(SyntaxError, ISyntaxError)
classImplements(SyntaxWarning, ISyntaxWarning)
classImplements(SystemError, ISystemError)
classImplements(SystemExit, ISystemExit)
classImplements(TabError, ITabError)
classImplements(TypeError, ITypeError)
classImplements(UnboundLocalError, IUnboundLocalError)
classImplements(UnicodeError, IUnicodeError)
classImplements(UserWarning, IUserWarning)
classImplements(ValueError, IValueError)
classImplements(Warning, IWarning)
classImplements(ZeroDivisionError, IZeroDivisionError)
| agpl-3.0 |
sshwsfc/django-xadmin | xadmin/migrations/0001_initial.py | 17 | 2846 | # -*- coding: utf-8 -*-
# Generated by Django 1.9.2 on 2016-03-20 13:46
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL)
]
operations = [
migrations.CreateModel(
name='Bookmark',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=128, verbose_name='Title')),
('url_name', models.CharField(max_length=64, verbose_name='Url Name')),
('query', models.CharField(blank=True, max_length=1000, verbose_name='Query String')),
('is_share', models.BooleanField(default=False, verbose_name='Is Shared')),
('content_type', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='contenttypes.ContentType')),
('user', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL, verbose_name='user')),
],
options={
'verbose_name': 'Bookmark',
'verbose_name_plural': 'Bookmarks',
},
),
migrations.CreateModel(
name='UserSettings',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('key', models.CharField(max_length=256, verbose_name='Settings Key')),
('value', models.TextField(verbose_name='Settings Content')),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL, verbose_name='user')),
],
options={
'verbose_name': 'User Setting',
'verbose_name_plural': 'User Settings',
},
),
migrations.CreateModel(
name='UserWidget',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('page_id', models.CharField(max_length=256, verbose_name='Page')),
('widget_type', models.CharField(max_length=50, verbose_name='Widget Type')),
('value', models.TextField(verbose_name='Widget Params')),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL, verbose_name='user')),
],
options={
'verbose_name': 'User Widget',
'verbose_name_plural': 'User Widgets',
},
),
]
| bsd-3-clause |
inovtec-solutions/OpenERP | openerp/addons/event/event.py | 18 | 23687 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from datetime import datetime, timedelta
from openerp.osv import fields, osv
from openerp.tools.translate import _
from openerp import SUPERUSER_ID
class event_type(osv.osv):
""" Event Type """
_name = 'event.type'
_description = __doc__
_columns = {
'name': fields.char('Event Type', size=64, required=True),
'default_reply_to': fields.char('Default Reply-To', size=64,help="The email address of the organizer which is put in the 'Reply-To' of all emails sent automatically at event or registrations confirmation. You can also put your email address of your mail gateway if you use one." ),
'default_email_event': fields.many2one('email.template','Event Confirmation Email', help="It will select this default confirmation event mail value when you choose this event"),
'default_email_registration': fields.many2one('email.template','Registration Confirmation Email', help="It will select this default confirmation registration mail value when you choose this event"),
'default_registration_min': fields.integer('Default Minimum Registration', help="It will select this default minimum value when you choose this event"),
'default_registration_max': fields.integer('Default Maximum Registration', help="It will select this default maximum value when you choose this event"),
}
_defaults = {
'default_registration_min': 0,
'default_registration_max': 0,
}
event_type()
class event_event(osv.osv):
"""Event"""
_name = 'event.event'
_description = __doc__
_order = 'date_begin'
_inherit = ['mail.thread', 'ir.needaction_mixin']
def name_get(self, cr, uid, ids, context=None):
if not ids:
return []
if isinstance(ids, (long, int)):
ids = [ids]
res = []
for record in self.browse(cr, uid, ids, context=context):
date = record.date_begin.split(" ")[0]
date_end = record.date_end.split(" ")[0]
if date != date_end:
date += ' - ' + date_end
display_name = record.name + ' (' + date + ')'
res.append((record['id'], display_name))
return res
def copy(self, cr, uid, id, default=None, context=None):
""" Reset the state and the registrations while copying an event
"""
if not default:
default = {}
default.update({
'state': 'draft',
'registration_ids': False,
})
return super(event_event, self).copy(cr, uid, id, default=default, context=context)
def button_draft(self, cr, uid, ids, context=None):
return self.write(cr, uid, ids, {'state': 'draft'}, context=context)
def button_cancel(self, cr, uid, ids, context=None):
registration = self.pool.get('event.registration')
reg_ids = registration.search(cr, uid, [('event_id','in',ids)], context=context)
for event_reg in registration.browse(cr,uid,reg_ids,context=context):
if event_reg.state == 'done':
raise osv.except_osv(_('Error!'),_("You have already set a registration for this event as 'Attended'. Please reset it to draft if you want to cancel this event.") )
registration.write(cr, uid, reg_ids, {'state': 'cancel'}, context=context)
return self.write(cr, uid, ids, {'state': 'cancel'}, context=context)
def button_done(self, cr, uid, ids, context=None):
return self.write(cr, uid, ids, {'state': 'done'}, context=context)
def check_registration_limits(self, cr, uid, ids, context=None):
for self.event in self.browse(cr, uid, ids, context=context):
total_confirmed = self.event.register_current
if total_confirmed < self.event.register_min or total_confirmed > self.event.register_max and self.event.register_max!=0:
raise osv.except_osv(_('Error!'),_("The total of confirmed registration for the event '%s' does not meet the expected minimum/maximum. Please reconsider those limits before going further.") % (self.event.name))
def check_registration_limits_before(self, cr, uid, ids, no_of_registration, context=None):
for event in self.browse(cr, uid, ids, context=context):
available_seats = event.register_avail
if available_seats and no_of_registration > available_seats:
raise osv.except_osv(_('Warning!'),_("Only %d Seats are Available!") % (available_seats))
elif available_seats == 0:
raise osv.except_osv(_('Warning!'),_("No Tickets Available!"))
def confirm_event(self, cr, uid, ids, context=None):
register_pool = self.pool.get('event.registration')
if self.event.email_confirmation_id:
#send reminder that will confirm the event for all the people that were already confirmed
reg_ids = register_pool.search(cr, uid, [
('event_id', '=', self.event.id),
('state', 'not in', ['draft', 'cancel'])], context=context)
register_pool.mail_user_confirm(cr, uid, reg_ids)
return self.write(cr, uid, ids, {'state': 'confirm'}, context=context)
def button_confirm(self, cr, uid, ids, context=None):
""" Confirm Event and send confirmation email to all register peoples
"""
if isinstance(ids, (int, long)):
ids = [ids]
self.check_registration_limits(cr, uid, ids, context=context)
return self.confirm_event(cr, uid, ids, context=context)
def _get_register(self, cr, uid, ids, fields, args, context=None):
"""Get Confirm or uncofirm register value.
@param ids: List of Event registration type's id
@param fields: List of function fields(register_current and register_prospect).
@param context: A standard dictionary for contextual values
@return: Dictionary of function fields value.
"""
res = {}
for event in self.browse(cr, uid, ids, context=context):
res[event.id] = {}
reg_open = reg_done = reg_draft =0
for registration in event.registration_ids:
if registration.state == 'open':
reg_open += registration.nb_register
elif registration.state == 'done':
reg_done += registration.nb_register
elif registration.state == 'draft':
reg_draft += registration.nb_register
for field in fields:
number = 0
if field == 'register_current':
number = reg_open
elif field == 'register_attended':
number = reg_done
elif field == 'register_prospect':
number = reg_draft
elif field == 'register_avail':
#the number of ticket is unlimited if the event.register_max field is not set.
#In that cas we arbitrary set it to 9999, it is used in the kanban view to special case the display of the 'subscribe' button
number = event.register_max - reg_open if event.register_max != 0 else 9999
res[event.id][field] = number
return res
def _subscribe_fnc(self, cr, uid, ids, fields, args, context=None):
"""This functional fields compute if the current user (uid) is already subscribed or not to the event passed in parameter (ids)
"""
register_pool = self.pool.get('event.registration')
res = {}
for event in self.browse(cr, uid, ids, context=context):
res[event.id] = False
curr_reg_id = register_pool.search(cr, uid, [('user_id', '=', uid), ('event_id', '=' ,event.id)])
if curr_reg_id:
for reg in register_pool.browse(cr, uid, curr_reg_id, context=context):
if reg.state in ('open','done'):
res[event.id]= True
continue
return res
_columns = {
'name': fields.char('Name', size=64, required=True, translate=True, readonly=False, states={'done': [('readonly', True)]}),
'user_id': fields.many2one('res.users', 'Responsible User', readonly=False, states={'done': [('readonly', True)]}),
'type': fields.many2one('event.type', 'Type of Event', readonly=False, states={'done': [('readonly', True)]}),
'register_max': fields.integer('Maximum Registrations', help="You can for each event define a maximum registration level. If you have too much registrations you are not able to confirm your event. (put 0 to ignore this rule )", readonly=True, states={'draft': [('readonly', False)]}),
'register_min': fields.integer('Minimum Registrations', help="You can for each event define a minimum registration level. If you do not enough registrations you are not able to confirm your event. (put 0 to ignore this rule )", readonly=True, states={'draft': [('readonly', False)]}),
'register_current': fields.function(_get_register, string='Confirmed Registrations', multi='register_numbers'),
'register_avail': fields.function(_get_register, string='Available Registrations', multi='register_numbers',type='integer'),
'register_prospect': fields.function(_get_register, string='Unconfirmed Registrations', multi='register_numbers'),
'register_attended': fields.function(_get_register, string='# of Participations', multi='register_numbers'),
'registration_ids': fields.one2many('event.registration', 'event_id', 'Registrations', readonly=False, states={'done': [('readonly', True)]}),
'date_begin': fields.datetime('Start Date', required=True, readonly=True, states={'draft': [('readonly', False)]}),
'date_end': fields.datetime('End Date', required=True, readonly=True, states={'draft': [('readonly', False)]}),
'state': fields.selection([
('draft', 'Unconfirmed'),
('cancel', 'Cancelled'),
('confirm', 'Confirmed'),
('done', 'Done')],
'Status', readonly=True, required=True,
track_visibility='onchange',
help='If event is created, the status is \'Draft\'.If event is confirmed for the particular dates the status is set to \'Confirmed\'. If the event is over, the status is set to \'Done\'.If event is cancelled the status is set to \'Cancelled\'.'),
'email_registration_id' : fields.many2one('email.template','Registration Confirmation Email', help='This field contains the template of the mail that will be automatically sent each time a registration for this event is confirmed.'),
'email_confirmation_id' : fields.many2one('email.template','Event Confirmation Email', help="If you set an email template, each participant will receive this email announcing the confirmation of the event."),
'reply_to': fields.char('Reply-To Email', size=64, readonly=False, states={'done': [('readonly', True)]}, help="The email address of the organizer is likely to be put here, with the effect to be in the 'Reply-To' of the mails sent automatically at event or registrations confirmation. You can also put the email address of your mail gateway if you use one."),
'main_speaker_id': fields.many2one('res.partner','Main Speaker', readonly=False, states={'done': [('readonly', True)]}, help="Speaker who will be giving speech at the event."),
'address_id': fields.many2one('res.partner','Location Address', readonly=False, states={'done': [('readonly', True)]}),
'street': fields.related('address_id','street',type='char',string='Street'),
'street2': fields.related('address_id','street2',type='char',string='Street2'),
'state_id': fields.related('address_id','state_id',type='many2one', relation="res.country.state", string='State'),
'zip': fields.related('address_id','zip',type='char',string='zip'),
'city': fields.related('address_id','city',type='char',string='city'),
'speaker_confirmed': fields.boolean('Speaker Confirmed', readonly=False, states={'done': [('readonly', True)]}),
'country_id': fields.related('address_id', 'country_id',
type='many2one', relation='res.country', string='Country', readonly=False, states={'done': [('readonly', True)]}),
'note': fields.text('Description', readonly=False, states={'done': [('readonly', True)]}),
'company_id': fields.many2one('res.company', 'Company', required=False, change_default=True, readonly=False, states={'done': [('readonly', True)]}),
'is_subscribed' : fields.function(_subscribe_fnc, type="boolean", string='Subscribed'),
}
_defaults = {
'state': 'draft',
'company_id': lambda self,cr,uid,c: self.pool.get('res.company')._company_default_get(cr, uid, 'event.event', context=c),
'user_id': lambda obj, cr, uid, context: uid,
}
def subscribe_to_event(self, cr, uid, ids, context=None):
register_pool = self.pool.get('event.registration')
user_pool = self.pool.get('res.users')
num_of_seats = int(context.get('ticket', 1))
self.check_registration_limits_before(cr, uid, ids, num_of_seats, context=context)
user = user_pool.browse(cr, uid, uid, context=context)
curr_reg_ids = register_pool.search(cr, uid, [('user_id', '=', user.id), ('event_id', '=' , ids[0])])
#the subscription is done with SUPERUSER_ID because in case we share the kanban view, we want anyone to be able to subscribe
if not curr_reg_ids:
curr_reg_ids = [register_pool.create(cr, SUPERUSER_ID, {'event_id': ids[0] ,'email': user.email, 'name':user.name, 'user_id': user.id, 'nb_register': num_of_seats})]
else:
register_pool.write(cr, uid, curr_reg_ids, {'nb_register': num_of_seats}, context=context)
return register_pool.confirm_registration(cr, SUPERUSER_ID, curr_reg_ids, context=context)
def unsubscribe_to_event(self, cr, uid, ids, context=None):
register_pool = self.pool.get('event.registration')
#the unsubscription is done with SUPERUSER_ID because in case we share the kanban view, we want anyone to be able to unsubscribe
curr_reg_ids = register_pool.search(cr, SUPERUSER_ID, [('user_id', '=', uid), ('event_id', '=', ids[0])])
return register_pool.button_reg_cancel(cr, SUPERUSER_ID, curr_reg_ids, context=context)
def _check_closing_date(self, cr, uid, ids, context=None):
for event in self.browse(cr, uid, ids, context=context):
if event.date_end < event.date_begin:
return False
return True
_constraints = [
(_check_closing_date, 'Error ! Closing Date cannot be set before Beginning Date.', ['date_end']),
]
def onchange_event_type(self, cr, uid, ids, type_event, context=None):
if type_event:
type_info = self.pool.get('event.type').browse(cr,uid,type_event,context)
dic ={
'reply_to': type_info.default_reply_to,
'email_registration_id': type_info.default_email_registration.id,
'email_confirmation_id': type_info.default_email_event.id,
'register_min': type_info.default_registration_min,
'register_max': type_info.default_registration_max,
}
return {'value': dic}
def on_change_address_id(self, cr, uid, ids, address_id, context=None):
values = {}
if not address_id:
return values
address = self.pool.get('res.partner').browse(cr, uid, address_id, context=context)
values.update({
'street' : address.street,
'street2' : address.street2,
'city' : address.city,
'country_id' : address.country_id and address.country_id.id or False,
'state_id' : address.state_id and address.state_id.id or False,
'zip' : address.zip,
})
return {'value' : values}
def onchange_start_date(self, cr, uid, ids, date_begin=False, date_end=False, context=None):
res = {'value':{}}
if date_end:
return res
if date_begin and isinstance(date_begin, str):
date_begin = datetime.strptime(date_begin, "%Y-%m-%d %H:%M:%S")
date_end = date_begin + timedelta(hours=1)
res['value'] = {'date_end': date_end.strftime("%Y-%m-%d %H:%M:%S")}
return res
class event_registration(osv.osv):
"""Event Registration"""
_name= 'event.registration'
_description = __doc__
_inherit = ['mail.thread', 'ir.needaction_mixin']
_columns = {
'id': fields.integer('ID'),
'origin': fields.char('Source Document', size=124,readonly=True,help="Reference of the sales order which created the registration"),
'nb_register': fields.integer('Number of Participants', required=True, readonly=True, states={'draft': [('readonly', False)]}),
'event_id': fields.many2one('event.event', 'Event', required=True, readonly=True, states={'draft': [('readonly', False)]}),
'partner_id': fields.many2one('res.partner', 'Partner', states={'done': [('readonly', True)]}),
'create_date': fields.datetime('Creation Date' , readonly=True),
'date_closed': fields.datetime('Attended Date', readonly=True),
'date_open': fields.datetime('Registration Date', readonly=True),
'reply_to': fields.related('event_id','reply_to',string='Reply-to Email', type='char', size=128, readonly=True,),
'log_ids': fields.one2many('mail.message', 'res_id', 'Logs', domain=[('model','=',_name)]),
'event_end_date': fields.related('event_id','date_end', type='datetime', string="Event End Date", readonly=True),
'event_begin_date': fields.related('event_id', 'date_begin', type='datetime', string="Event Start Date", readonly=True),
'user_id': fields.many2one('res.users', 'User', states={'done': [('readonly', True)]}),
'company_id': fields.related('event_id', 'company_id', type='many2one', relation='res.company', string='Company', store=True, readonly=True, states={'draft':[('readonly',False)]}),
'state': fields.selection([('draft', 'Unconfirmed'),
('cancel', 'Cancelled'),
('open', 'Confirmed'),
('done', 'Attended')], 'Status',
track_visibility='onchange',
size=16, readonly=True),
'email': fields.char('Email', size=64),
'phone': fields.char('Phone', size=64),
'name': fields.char('Name', size=128, select=True),
}
_defaults = {
'nb_register': 1,
'state': 'draft',
}
_order = 'name, create_date desc'
def do_draft(self, cr, uid, ids, context=None):
return self.write(cr, uid, ids, {'state': 'draft'}, context=context)
def confirm_registration(self, cr, uid, ids, context=None):
for reg in self.browse(cr, uid, ids, context=context or {}):
self.pool.get('event.event').message_post(cr, uid, [reg.event_id.id], body=_('New registration confirmed: %s.') % (reg.name or '', ),subtype="event.mt_event_registration", context=context)
return self.write(cr, uid, ids, {'state': 'open'}, context=context)
def registration_open(self, cr, uid, ids, context=None):
""" Open Registration
"""
event_obj = self.pool.get('event.event')
for register in self.browse(cr, uid, ids, context=context):
event_id = register.event_id.id
no_of_registration = register.nb_register
event_obj.check_registration_limits_before(cr, uid, [event_id], no_of_registration, context=context)
res = self.confirm_registration(cr, uid, ids, context=context)
self.mail_user(cr, uid, ids, context=context)
return res
def button_reg_close(self, cr, uid, ids, context=None):
""" Close Registration
"""
if context is None:
context = {}
today = fields.datetime.now()
for registration in self.browse(cr, uid, ids, context=context):
if today >= registration.event_id.date_begin:
values = {'state': 'done', 'date_closed': today}
self.write(cr, uid, ids, values)
else:
raise osv.except_osv(_('Error!'), _("You must wait for the starting day of the event to do this action."))
return True
def button_reg_cancel(self, cr, uid, ids, context=None, *args):
return self.write(cr, uid, ids, {'state': 'cancel'})
def mail_user(self, cr, uid, ids, context=None):
"""
Send email to user with email_template when registration is done
"""
for registration in self.browse(cr, uid, ids, context=context):
if registration.event_id.state == 'confirm' and registration.event_id.email_confirmation_id.id:
self.mail_user_confirm(cr, uid, ids, context=context)
else:
template_id = registration.event_id.email_registration_id.id
if template_id:
mail_message = self.pool.get('email.template').send_mail(cr,uid,template_id,registration.id)
return True
def mail_user_confirm(self, cr, uid, ids, context=None):
"""
Send email to user when the event is confirmed
"""
for registration in self.browse(cr, uid, ids, context=context):
template_id = registration.event_id.email_confirmation_id.id
if template_id:
mail_message = self.pool.get('email.template').send_mail(cr,uid,template_id,registration.id)
return True
def onchange_contact_id(self, cr, uid, ids, contact, partner, context=None):
if not contact:
return {}
addr_obj = self.pool.get('res.partner')
contact_id = addr_obj.browse(cr, uid, contact, context=context)
return {'value': {
'email':contact_id.email,
'name':contact_id.name,
'phone':contact_id.phone,
}}
def onchange_partner_id(self, cr, uid, ids, part, context=None):
res_obj = self.pool.get('res.partner')
data = {}
if not part:
return {'value': data}
addr = res_obj.address_get(cr, uid, [part]).get('default', False)
if addr:
d = self.onchange_contact_id(cr, uid, ids, addr, part, context)
data.update(d['value'])
return {'value': data}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
shashisp/blumix-webpy | app/applications/welcome/controllers/default.py | 1 | 1858 | # -*- coding: utf-8 -*-
# this file is released under public domain and you can use without limitations
#########################################################################
## This is a sample controller
## - index is the default action of any application
## - user is required for authentication and authorization
## - download is for downloading files uploaded in the db (does streaming)
#########################################################################
def index():
"""
example action using the internationalization operator T and flash
rendered by views/default/index.html or views/generic.html
if you need a simple wiki simply replace the two lines below with:
return auth.wiki()
"""
# response.flash = T("Hello World")
return dict(message="Hello from MyApp")
def user():
"""
exposes:
http://..../[app]/default/user/login
http://..../[app]/default/user/logout
http://..../[app]/default/user/register
http://..../[app]/default/user/profile
http://..../[app]/default/user/retrieve_password
http://..../[app]/default/user/change_password
http://..../[app]/default/user/manage_users (requires membership in
use @auth.requires_login()
@auth.requires_membership('group name')
@auth.requires_permission('read','table name',record_id)
to decorate functions that need access control
"""
return dict(form=auth())
@cache.action()
def download():
"""
allows downloading of uploaded files
http://..../[app]/default/download/[filename]
"""
return response.download(request, db)
def call():
"""
exposes services. for example:
http://..../[app]/default/call/jsonrpc
decorate with @services.jsonrpc the functions to expose
supports xml, json, xmlrpc, jsonrpc, amfrpc, rss, csv
"""
return service()
| mit |
lrowe/splinter | tests/test_webdriver_phantomjs.py | 1 | 2438 | import unittest
from splinter import Browser
from .fake_webapp import EXAMPLE_APP
from .base import WebDriverTests
class PhantomJSBrowserTest(WebDriverTests, unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.browser = Browser("phantomjs")
@classmethod
def tearDownClass(cls):
cls.browser.quit()
def setUp(self):
self.browser.visit(EXAMPLE_APP)
def test_get_alert(self):
with self.assertRaises(NotImplementedError):
self.browser.get_alert()
def test_right_click(self):
with self.assertRaises(NotImplementedError):
self.browser.find_by_id('visible').right_click()
def test_double_click(self):
with self.assertRaises(NotImplementedError):
self.browser.find_by_id('visible').double_click()
def test_access_prompts_and_be_able_to_fill_then(self):
with self.assertRaises(NotImplementedError):
self.browser.get_alert()
def test_access_confirm_and_accept_and_dismiss_them_using_with(self):
with self.assertRaises(NotImplementedError):
self.browser.get_alert()
def test_access_confirm_and_accept_and_dismiss_them(self):
with self.assertRaises(NotImplementedError):
self.browser.get_alert()
def test_access_alerts_using_with(self):
with self.assertRaises(NotImplementedError):
self.browser.get_alert()
def test_access_alerts_and_accept_them(self):
with self.assertRaises(NotImplementedError):
self.browser.get_alert()
def test_can_work_on_popups(self):
# FIXME: Check https://github.com/detro/ghostdriver/issues/180 to see if
# we can implement this test
pass
class PhantomJSBrowserTestWithCustomHeaders(unittest.TestCase):
@classmethod
def setUpClass(cls):
custom_headers = {'X-Splinter-Customheaders-1': 'Hello',
'X-Splinter-Customheaders-2': 'Bye'}
cls.browser = Browser("phantomjs", custom_headers=custom_headers)
def test_create_a_phantomjs_with_custom_headers(self):
self.browser.visit(EXAMPLE_APP + 'headers')
self.assertTrue(
self.browser.is_text_present('X-Splinter-Customheaders-1: Hello'))
self.assertTrue(
self.browser.is_text_present('X-Splinter-Customheaders-2: Bye'))
@classmethod
def tearDownClass(cls):
cls.browser.quit()
| bsd-3-clause |
LIS/lis-tempest | tempest/lib/services/compute/keypairs_client.py | 2 | 2114 | # Copyright 2012 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_serialization import jsonutils as json
from tempest.lib.api_schema.response.compute.v2_1 import keypairs as schema
from tempest.lib.common import rest_client
from tempest.lib.services.compute import base_compute_client
class KeyPairsClient(base_compute_client.BaseComputeClient):
def list_keypairs(self):
resp, body = self.get("os-keypairs")
body = json.loads(body)
self.validate_response(schema.list_keypairs, resp, body)
return rest_client.ResponseBody(resp, body)
def show_keypair(self, keypair_name):
resp, body = self.get("os-keypairs/%s" % keypair_name)
body = json.loads(body)
self.validate_response(schema.get_keypair, resp, body)
return rest_client.ResponseBody(resp, body)
def create_keypair(self, **kwargs):
"""Create a keypair.
Available params: see http://developer.openstack.org/
api-ref-compute-v2.1.html#createKeypair
"""
post_body = json.dumps({'keypair': kwargs})
resp, body = self.post("os-keypairs", body=post_body)
body = json.loads(body)
self.validate_response(schema.create_keypair, resp, body)
return rest_client.ResponseBody(resp, body)
def delete_keypair(self, keypair_name):
resp, body = self.delete("os-keypairs/%s" % keypair_name)
self.validate_response(schema.delete_keypair, resp, body)
return rest_client.ResponseBody(resp, body)
| apache-2.0 |
fbradyirl/home-assistant | homeassistant/components/github/sensor.py | 2 | 7295 | """Support for GitHub."""
from datetime import timedelta
import logging
import voluptuous as vol
from homeassistant.components.sensor import PLATFORM_SCHEMA
from homeassistant.const import (
ATTR_NAME,
CONF_ACCESS_TOKEN,
CONF_NAME,
CONF_PATH,
CONF_URL,
)
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.entity import Entity
_LOGGER = logging.getLogger(__name__)
CONF_REPOS = "repositories"
ATTR_LATEST_COMMIT_MESSAGE = "latest_commit_message"
ATTR_LATEST_COMMIT_SHA = "latest_commit_sha"
ATTR_LATEST_RELEASE_URL = "latest_release_url"
ATTR_LATEST_OPEN_ISSUE_URL = "latest_open_issue_url"
ATTR_OPEN_ISSUES = "open_issues"
ATTR_LATEST_OPEN_PULL_REQUEST_URL = "latest_open_pull_request_url"
ATTR_OPEN_PULL_REQUESTS = "open_pull_requests"
ATTR_PATH = "path"
ATTR_STARGAZERS = "stargazers"
DEFAULT_NAME = "GitHub"
SCAN_INTERVAL = timedelta(seconds=300)
REPO_SCHEMA = vol.Schema(
{vol.Required(CONF_PATH): cv.string, vol.Optional(CONF_NAME): cv.string}
)
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Required(CONF_ACCESS_TOKEN): cv.string,
vol.Optional(CONF_URL): cv.url,
vol.Required(CONF_REPOS): vol.All(cv.ensure_list, [REPO_SCHEMA]),
}
)
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the GitHub sensor platform."""
sensors = []
for repository in config[CONF_REPOS]:
data = GitHubData(
repository=repository,
access_token=config.get(CONF_ACCESS_TOKEN),
server_url=config.get(CONF_URL),
)
if data.setup_error is True:
_LOGGER.error(
"Error setting up GitHub platform. %s",
"Check previous errors for details",
)
return
sensors.append(GitHubSensor(data))
add_entities(sensors, True)
class GitHubSensor(Entity):
"""Representation of a GitHub sensor."""
def __init__(self, github_data):
"""Initialize the GitHub sensor."""
self._unique_id = github_data.repository_path
self._name = None
self._state = None
self._available = False
self._repository_path = None
self._latest_commit_message = None
self._latest_commit_sha = None
self._latest_release_url = None
self._open_issue_count = None
self._latest_open_issue_url = None
self._pull_request_count = None
self._latest_open_pr_url = None
self._stargazers = None
self._github_data = github_data
@property
def name(self):
"""Return the name of the sensor."""
return self._name
@property
def unique_id(self):
"""Return unique ID for the sensor."""
return self._unique_id
@property
def state(self):
"""Return the state of the sensor."""
return self._state
@property
def available(self):
"""Return True if entity is available."""
return self._available
@property
def device_state_attributes(self):
"""Return the state attributes."""
return {
ATTR_PATH: self._repository_path,
ATTR_NAME: self._name,
ATTR_LATEST_COMMIT_MESSAGE: self._latest_commit_message,
ATTR_LATEST_COMMIT_SHA: self._latest_commit_sha,
ATTR_LATEST_RELEASE_URL: self._latest_release_url,
ATTR_LATEST_OPEN_ISSUE_URL: self._latest_open_issue_url,
ATTR_OPEN_ISSUES: self._open_issue_count,
ATTR_LATEST_OPEN_PULL_REQUEST_URL: self._latest_open_pr_url,
ATTR_OPEN_PULL_REQUESTS: self._pull_request_count,
ATTR_STARGAZERS: self._stargazers,
}
@property
def icon(self):
"""Return the icon to use in the frontend."""
return "mdi:github-circle"
def update(self):
"""Collect updated data from GitHub API."""
self._github_data.update()
self._name = self._github_data.name
self._state = self._github_data.latest_commit_sha
self._repository_path = self._github_data.repository_path
self._available = self._github_data.available
self._latest_commit_message = self._github_data.latest_commit_message
self._latest_commit_sha = self._github_data.latest_commit_sha
self._latest_release_url = self._github_data.latest_release_url
self._open_issue_count = self._github_data.open_issue_count
self._latest_open_issue_url = self._github_data.latest_open_issue_url
self._pull_request_count = self._github_data.pull_request_count
self._latest_open_pr_url = self._github_data.latest_open_pr_url
self._stargazers = self._github_data.stargazers
class GitHubData:
"""GitHub Data object."""
def __init__(self, repository, access_token=None, server_url=None):
"""Set up GitHub."""
import github
self._github = github
self.setup_error = False
try:
if server_url is not None:
server_url += "/api/v3"
self._github_obj = github.Github(access_token, base_url=server_url)
else:
self._github_obj = github.Github(access_token)
self.repository_path = repository[CONF_PATH]
repo = self._github_obj.get_repo(self.repository_path)
except self._github.GithubException as err:
_LOGGER.error("GitHub error for %s: %s", self.repository_path, err)
self.setup_error = True
return
self.name = repository.get(CONF_NAME, repo.name)
self.available = False
self.latest_commit_message = None
self.latest_commit_sha = None
self.latest_release_url = None
self.open_issue_count = None
self.latest_open_issue_url = None
self.pull_request_count = None
self.latest_open_pr_url = None
self.stargazers = None
def update(self):
"""Update GitHub Sensor."""
try:
repo = self._github_obj.get_repo(self.repository_path)
self.stargazers = repo.stargazers_count
open_issues = repo.get_issues(state="open", sort="created")
if open_issues is not None:
self.open_issue_count = open_issues.totalCount
if open_issues.totalCount > 0:
self.latest_open_issue_url = open_issues[0].html_url
open_pull_requests = repo.get_pulls(state="open", sort="created")
if open_pull_requests is not None:
self.pull_request_count = open_pull_requests.totalCount
if open_pull_requests.totalCount > 0:
self.latest_open_pr_url = open_pull_requests[0].html_url
latest_commit = repo.get_commits()[0]
self.latest_commit_sha = latest_commit.sha
self.latest_commit_message = latest_commit.commit.message
releases = repo.get_releases()
if releases and releases.totalCount > 0:
self.latest_release_url = releases[0].html_url
self.available = True
except self._github.GithubException as err:
_LOGGER.error("GitHub error for %s: %s", self.repository_path, err)
self.available = False
| apache-2.0 |
lukesanantonio/gen | gen.py | 2 | 14437 | # Gen v0.1
# A generic, JSON-based asset pipeline for heterogeneous setups and
# unusual configurations.
#
# This is free and unencumbered software released into the public domain.
# For more information, please refer to <http://unlicense.org/>
import os
import shutil
import json
import subprocess
import jinja2
import jinja2.meta
import sys
import imp
import argparse
import logging
import copy
import time
# Helper functions
def in_out_file(asset_root, dist_root, f):
return os.path.join(asset_root, f), os.path.join(dist_root, f)
def is_newer(i_file, o_file):
if (not os.path.exists(o_file) or
os.path.getmtime(i_file) > os.path.getmtime(o_file)):
return True
return False
def find_asset_object(assets, directory):
directory = os.path.normpath(directory)
# Go as long as we don't repeat ourselves.
while directory != os.path.dirname(directory):
for cur_asset in assets:
if cur_asset['root'] == directory:
return cur_asset
# Remove the last component of the directory, effectively pointing to
# it's parent.
directory = os.path.dirname(directory)
return None
# Exceptions
class AssetRootNotFound(Exception):
pass
class ValidationError(Exception):
def __init__(self, msg, obj):
super().__init__(msg)
self.obj = obj
class InputTypeError(ValidationError):
def __init__(self, obj, expected_type):
super().__init__("Input object '{0}' must be type: '{1}'"
.format(repr(obj), str(expected_type)), obj)
self.expected_type = expected_type
class InputAttributeError(ValidationError):
def __init__(self, obj, attr):
super().__init__("Input object '{0}' must have attribute: '{1}'"
.format(repr(obj), str(attr)), obj)
self.attr = attr
class SourceNotFoundError(ValidationError):
def __init__(self, obj, fname):
super().__init__("Source file '{0}' doesn't exist.".format(fname), obj)
self.fname = fname
class Environment:
def __init__(self, root, dist_root):
"""Initialize the root and the dist root with given values."""
self.root = os.path.abspath(root)
self.dist_root = os.path.abspath(dist_root)
class Output:
def __init__(self, logger=None):
self.log = logger or logging.getLogger(__name__)
self.log.addHandler(logging.StreamHandler(sys.stdout))
self.log.setLevel(logging.WARNING)
def on_transform(self, in_file, out_file):
self.log.info(os.path.relpath(in_file) + ' => ' +
os.path.relpath(out_file))
def on_skip(self, out_file):
self.log.debug('Skipping ' + os.path.relpath(out_file))
def on_command(self, args):
self.log.info('Running: ' + ' '.join(args))
def on_error(self, msg):
self.log.error(msg)
def on_remove(self, filename, **kwargs):
adj = kwargs.get('adj', '') + ' '
filetype = kwargs.get('filetype') or 'file'
self.log.info("Removing " + adj + filetype + ': ' + filename)
class Operations:
def __init__(self, out=None):
self.out = out or Output()
def copy(self, input_file, output_file):
# Make sure the destination directory exists.
os.makedirs(os.path.dirname(output_file), exist_ok=True)
# Copy the file
shutil.copy(input_file, output_file)
shutil.copystat(input_file, output_file)
# Notify the environment
self.out.on_transform(input_file, output_file)
def file_from_content(self, input_file, content, output_file):
os.makedirs(os.path.dirname(output_file), exist_ok=True)
with open(output_file, "w") as f:
f.write(content)
shutil.copystat(input_file, output_file)
self.out.on_transform(input_file, output_file)
def subprocess_transform(self, prg, options, input_file, output_file):
args = [prg, input_file, output_file]
args[1:1] = options
os.makedirs(os.path.dirname(output_file), exist_ok=True)
self.out.on_command(args)
if subprocess.call(args):
self.out.on_transform(input_file, output_file)
shutil.copystat(input_file, output_file)
class BaseAsset:
def __init__(self, root, dist, inputs, ops, options, env):
self.root = os.path.abspath(root)
self.dist = os.path.abspath(dist)
# Validate each input.
for i in range(len(inputs)):
try:
self.validate(inputs[i])
except ValidationError as e:
# Rethrow with a modified message.
tb = sys.exc_info()[2]
e.args[0] = 'input[{0}]: '.format(i) + e.args[0]
raise e.with_traceback(tb)
except NotImplementedError:
# If validation isn't implemented that doesn't really matter.
# Just bail.
break
# If there was validation, it passed.
self.inputs = inputs
self.operations = ops
self.options = options
self.env = env
def list_output(self):
raise NotImplementedError
def get_dependencies(self, fname):
"""Return a list of files relative to self.root that fname requires."""
raise NotImplementedError
def install(self, filename):
raise NotImplementedError
def validate(self, input_obj):
raise NotImplementedError
def install_all(self):
"""Install all files and return what files were installed."""
to_install = self.list_output()
for f in to_install:
self.install(f)
return to_install
class StaticAsset(BaseAsset):
def validate(self, fname):
# Make sure it's a string.
if not isinstance(fname, str):
raise InputTypeError(fname, str)
# Make sure the file exists!
if not os.path.exists(os.path.join(self.root, fname)):
raise SourceNotFoundError(fname, fname)
def _get_source_list(self, input_obj):
# If we are given a directory, use all the files in that directory.
abs_input = os.path.join(self.root, input_obj)
if os.path.isdir(abs_input):
files = []
for child in os.listdir(abs_input):
child = os.path.join(abs_input, child)
files.extend(self._get_source_list(os.path.normpath(child)))
return files
# Otherwise it's just a file, easy.
else:
return [os.path.relpath(abs_input, self.root)]
def get_dependencies(self, filename):
return [filename]
def list_output(self):
files = []
for i in self.inputs:
files.extend(self._get_source_list(i))
return files
def install(self, filename):
in_f, out_f = in_out_file(self.root, self.dist, filename)
self.operations.copy(in_f, out_f)
class Jinja2Asset(BaseAsset):
def validate(self, input_obj):
# Here we expect an object with a filename and parameters.
if not isinstance(input_obj, dict):
raise InputTypeError(input_obj, dict)
# Make sure we have a filename and that it exists.
f = input_obj.get('filename', None)
if f is None:
raise InputAttributeError(input_obj, 'filename')
if not os.path.exists(os.path.join(self.root, f)):
raise SourceNotFoundError(input_obj, f)
def get_dependencies(self, filename):
depends = [filename]
source = os.path.join(self.root, filename)
ast = jinja2.Environment().parse(open(source).read())
template_depends = jinja2.meta.find_referenced_templates(ast)
for dependency in template_depends:
if dependency:
dependency = os.path.join(os.path.dirname(source), dependency)
dependency = os.path.normpath(dependency)
dependency = os.path.relpath(dependency, self.root)
depends.extend(self.get_dependencies(dependency))
return depends
def list_output(self):
output = []
for i in self.inputs:
output.append(i['filename'])
return output
def install(self, filename):
# Set up our Jinja2 environment.
loader = jinja2.FileSystemLoader(self.root)
self.__jinja2env = jinja2.Environment(loader=loader)
# Find the asset object based off it's filename.
# TODO Make this process automatic in the base class.
input_obj = None
for i in self.inputs:
if i['filename'] == filename:
input_obj = i
break
if input_obj is None:
raise ValueError("Cannot find input object with filename: '{0}'"
.format(filename))
# The filename is relative to the asset root, but that is where Jinja2
# looks, so it works out.
template = self.__jinja2env.get_template(filename)
if 'parameters' in input_obj:
rendered_template = template.render(input_obj['parameters'])
else:
rendered_template = template.render()
in_f, out_f = in_out_file(self.root, self.dist, filename)
self.operations.file_from_content(in_f, rendered_template, out_f)
class ScssAsset(StaticAsset):
def get_dependencies(self, filename):
return [os.path.splitext(filename)[0] + '.scss']
def list_output(self):
output = super().list_output()
for i in range(len(output)):
output[i] = os.path.splitext(output[i])[0] + '.css'
return output
def install(self, filename):
in_f = os.path.join(self.root, os.path.splitext(filename)[0] + '.scss')
out_f = os.path.join(self.dist, filename)
# Check for search paths provided.
search_paths = self.options.get('search_paths', [])
command_options = []
for path in search_paths:
command_options.extend(['--load-path',
os.path.join(self.env.dist_root, path)])
self.operations.subprocess_transform('scss', command_options,
in_f, out_f)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('-f', '--assets-file', default=None,
help="Specify the assets json file " +
"(default ./assets.json).")
parser.add_argument('-w', '--watch', action="store_true", default=False,
help="Stay open and watch for file changes.")
parser.add_argument('-v', '--verbose', action="count", default=0,
help="Log files copied to stderr.")
arguments = parser.parse_args()
out = Output()
if arguments.verbose == 1:
out.log.setLevel(logging.INFO)
elif arguments.verbose > 1:
out.log.setLevel(logging.DEBUG)
# Parse the assets.json file.
try:
assets_json_filename = arguments.assets_file or 'assets.json'
assets_json = json.load(open(assets_json_filename))
except OSError:
out.on_error("Failed to open '" + assets_json_filename + "'!")
sys.exit(1)
env = Environment(os.getcwd(),
os.path.abspath(assets_json.get('dist', 'dist/')))
transformations = {'static': StaticAsset, 'jinja2': Jinja2Asset,
'scss': ScssAsset}
while True:
# Load up our cached modification times.
try:
cache = json.load(open('.gencache.json'))
except OSError:
cache = {}
cache_to_write = copy.copy(cache)
output = []
for asset in assets_json.get('assets', []):
# Find the asset-specific dist dir.
asset_dist = os.path.join(env.dist_root,
asset.get('dist', asset['root']))
asset_dist = os.path.normpath(asset_dist)
# Find our asset class!
asset_type = transformations.get(asset['type'])
if asset_type:
try:
asset_obj = asset_type
asset_obj = asset_type(asset['root'], asset_dist,
asset['input'], Operations(out),
asset.get('type_options', {}), env)
except ValidationError as e:
out.on_error(e)
continue
else:
out.on_error("No plugin available to handle '" +
asset['type'] + "' assets.")
continue
for f in asset_obj.list_output():
depends = asset_obj.get_dependencies(f)
regeneration_required = False
for dependency in depends:
dependency_source = os.path.join(asset['root'], dependency)
dependency_mtime = os.path.getmtime(dependency_source)
# If the dependency has been changed:
if cache.get(dependency, 0) < dependency_mtime:
# Update the cache.
cache_to_write[dependency] = dependency_mtime
# Make sure we regenerate the output file later.
regeneration_required = True
if regeneration_required:
asset_obj.install(f)
else:
out.on_skip(f)
output.append(os.path.join(asset_dist, f))
# Write the cache.
json.dump(cache_to_write, open('.gencache.json', 'w'))
for dirname, dirs, files in os.walk(env.dist_root, topdown=False):
for f in files:
# Check if the file should be there.
f = os.path.join(dirname, f)
if f not in output:
out.on_remove(os.path.relpath(f), adj='old')
os.remove(os.path.join(env.dist_root, f))
# Also remove empty children directories.
for d in dirs:
d = os.path.join(dirname, d)
if len(os.listdir(d)) == 0:
out.on_remove(os.path.relpath(f), adj='empty',
filetype='directory')
os.rmdir(d)
time.sleep(.25)
if not arguments.watch:
break
| unlicense |
daymer/xWIKI_Karma | CustomModules/mysql-connector-python-2.1.7/lib/cpy_distutils.py | 1 | 24414 | # MySQL Connector/Python - MySQL driver written in Python.
# Copyright (c) 2014, 2017, Oracle and/or its affiliates. All rights reserved.
# MySQL Connector/Python is licensed under the terms of the GPLv2
# <http://www.gnu.org/licenses/old-licenses/gpl-2.0.html>, like most
# MySQL Connectors. There are special exceptions to the terms and
# conditions of the GPLv2 as it is applied to this software, see the
# FOSS License Exception
# <http://www.mysql.com/about/legal/licensing/foss-exception.html>.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
"""Implements the DistUtils command 'build_ext'
"""
from distutils.command.build_ext import build_ext
from distutils.command.install import install
from distutils.command.install_lib import install_lib
from distutils.errors import DistutilsExecError
from distutils.util import get_platform
from distutils.dir_util import copy_tree
from distutils import log
from glob import glob
import os
import shlex
import struct
from subprocess import Popen, PIPE, STDOUT, check_call
import sys
import platform
import shutil
ARCH_64BIT = sys.maxsize > 2**32 # Works with Python 2.6 and greater
py_arch = '64-bit' if ARCH_64BIT else '32-bit'
CEXT_OPTIONS = [
('with-mysql-capi=', None,
"Location of MySQL C API installation or path to mysql_config"),
('extra-compile-args=', None,
"Extra compile args"),
('extra-link-args=', None,
"Extra link args")
]
CEXT_STATIC_OPTIONS = [
('static', None,
"Link C libraries statically with the C Extension"),
]
INSTALL_OPTIONS = [
('byte-code-only=', None,
"Remove Python .py files; leave byte code .pyc only"),
]
def win_dll_is64bit(dll_file):
"""Check if a Windows DLL is 64 bit or not
Returns True if the library dll_file is 64bit.
Raises ValueError when magic of header is invalid.
Raises IOError when file could not be read.
Raises OSError when execute on none-Windows platform.
Returns True or False.
"""
if os.name != 'nt':
raise OSError("win_ddl_is64bit only useful on Windows")
with open(dll_file, 'rb') as fp:
# IMAGE_DOS_HEADER
e_magic = fp.read(2)
if e_magic != b'MZ':
raise ValueError("Wrong magic in header")
fp.seek(60)
offset = struct.unpack("I", fp.read(4))[0]
# IMAGE_FILE_HEADER
fp.seek(offset)
file_header = fp.read(6)
(signature, machine) = struct.unpack("<4sH", file_header)
if machine == 0x014c: # IMAGE_FILE_MACHINE_I386
return False
elif machine in (0x8664, 0x2000): # IMAGE_FILE_MACHINE_I386/AMD64
return True
def unix_lib_is64bit(lib_file):
"""Check if a library on UNIX is 64 bit or not
This function uses the `file` command to check if a library on
UNIX-like platforms is 32 or 64 bit.
Returns True if the library is 64bit.
Raises ValueError when magic of header is invalid.
Raises IOError when file could not be read.
Raises OSError when execute on none-Windows platform.
Returns True or False.
"""
if os.name != 'posix':
raise OSError("unix_lib_is64bit only useful on UNIX-like systems")
if os.isdir(lib_file):
mysqlclient_libs = []
for root, _, files in os.walk(lib_file):
for filename in files:
filepath = os.path.join(root, filename)
if filename.startswith('libmysqlclient') and \
not os.path.islink(filepath) and \
'_r' not in filename and \
'.a' not in filename:
mysqlclient_libs.append(filepath)
if mysqlclient_libs:
break
# give priority to .so files instead of .a
mysqlclient_libs.sort()
lib_file = mysqlclient_libs[-1]
log.debug("# Using file command to test lib_file {0}".format(lib_file))
if platform.uname() == 'SunOS':
cmd_list = ['file', '-L', lib_file]
else:
cmd_list = ['file', '-L', lib_file]
prc = Popen(cmd_list, stdin=PIPE, stderr=STDOUT,
stdout=PIPE)
stdout = prc.communicate()[0]
stdout = stdout.split(':')[1]
log.debug("# lib_file {0} stdout: {1}".format(lib_file, stdout))
if 'x86_64' in stdout or 'x86-64' in stdout or '32-bit' not in stdout:
return True
return False
def parse_mysql_config_info(options, stdout):
log.debug("# stdout: {0}".format(stdout))
info = {}
for option, line in zip(options, stdout.split('\n')):
log.debug("# option: {0}".format(option))
log.debug("# line: {0}".format(line))
info[option] = line.strip()
ver = info['version']
if '-' in ver:
ver, _ = ver.split('-', 2)
info['version'] = tuple([int(v) for v in ver.split('.')[0:3]])
libs = shlex.split(info['libs'])
info['lib_dir'] = libs[0].replace('-L', '')
info['libs'] = [ lib.replace('-l', '') for lib in libs[1:] ]
if platform.uname()[0] == 'SunOS':
info['lib_dir'] = info['lib_dir'].replace('-R', '')
info['libs'] = [lib.replace('-R', '') for lib in info['libs']]
log.debug("# info['libs']: ")
for lib in info['libs']:
log.debug("# {0}".format(lib))
libs = shlex.split(info['libs_r'])
info['lib_r_dir'] = libs[0].replace('-L', '')
info['libs_r'] = [ lib.replace('-l', '') for lib in libs[1:] ]
info['include'] = [x.strip() for x in info['include'].split('-I')[1:]]
return info
def get_mysql_config_info(mysql_config):
"""Get MySQL information using mysql_config tool
Returns a dict.
"""
options = ['cflags', 'include', 'libs', 'libs_r', 'plugindir', 'version']
cmd = [mysql_config] + [ "--{0}".format(opt) for opt in options ]
try:
proc = Popen(cmd, stdout=PIPE, universal_newlines=True)
stdout, _ = proc.communicate()
except OSError as exc:
raise DistutilsExecError("Failed executing mysql_config: {0}".format(
str(exc)))
info = parse_mysql_config_info(options, stdout)
# Try to figure out the architecture
info['arch'] = None
if os.name == 'posix':
if platform.uname()[0] == 'SunOS':
print("info['lib_dir']: {0}".format(info['lib_dir']))
print("info['libs'][0]: {0}".format(info['libs'][0]))
pathname = os.path.abspath(os.path.join(info['lib_dir'],
'lib',
info['libs'][0])) + '/*'
else:
pathname = os.path.join(info['lib_dir'],
'lib' + info['libs'][0]) + '*'
print("# Looking mysqlclient_lib at path: {0}".format(pathname))
log.debug("# searching mysqlclient_lib at: %s", pathname)
libs = glob(pathname)
mysqlclient_libs = []
for filepath in libs:
_, filename = os.path.split(filepath)
log.debug("# filename {0}".format(filename))
if filename.startswith('libmysqlclient') and \
not os.path.islink(filepath) and \
'_r' not in filename and \
'.a' not in filename:
mysqlclient_libs.append(filepath)
mysqlclient_libs.sort()
stdout = None
try:
log.debug("# mysqlclient_lib: {0}".format(mysqlclient_libs[-1]))
for mysqlclient_lib in mysqlclient_libs:
log.debug("#+ {0}".format(mysqlclient_lib))
log.debug("# tested mysqlclient_lib[-1]: "
"{0}".format(mysqlclient_libs[-1]))
if platform.uname()[0] == 'SunOS':
print("mysqlclient_lib: {0}".format(mysqlclient_libs[-1]))
cmd_list = ['file', mysqlclient_libs[-1]]
else:
cmd_list = ['file', '-L', mysqlclient_libs[-1]]
proc = Popen(cmd_list, stdout=PIPE,
universal_newlines=True)
stdout, _ = proc.communicate()
stdout = stdout.split(':')[1]
except OSError as exc:
raise DistutilsExecError(
"Although the system seems POSIX, the file-command could not "
"be executed: {0}".format(str(exc)))
if stdout:
if '64' in stdout:
info['arch'] = "x86_64"
else:
info['arch'] = "i386"
else:
raise DistutilsExecError(
"Failed getting out put from the file-command"
)
else:
raise DistutilsExecError(
"Cannot determine architecture on {0} systems".format(os.name))
return info
def remove_cext(distribution):
"""Remove the C Extension from the distribution
This function can be useful in Distutils commands for creating
pure Python modules.
"""
to_remove = []
for ext_mod in distribution.ext_modules:
if ext_mod.name == '_mysql_connector':
to_remove.append(ext_mod)
for ext_mod in to_remove:
distribution.ext_modules.remove(ext_mod)
class BuildExtDynamic(build_ext):
"""Build Connector/Python C Extension"""
description = "build Connector/Python C Extension"
user_options = build_ext.user_options + CEXT_OPTIONS
min_connector_c_version = None
arch = None
_mysql_config_info = None
def initialize_options(self):
build_ext.initialize_options(self)
self.extra_compile_args = None
self.extra_link_args = None
self.with_mysql_capi = None
def _finalize_connector_c(self, connc_loc):
"""Finalize the --with-connector-c command line argument
"""
platform = get_platform()
self._mysql_config_info = None
min_version = BuildExtDynamic.min_connector_c_version
err_invalid_loc = "MySQL C API location is invalid; was %s"
mysql_config = None
err_version = "MySQL C API {0}.{1}.{2} or later required".format(
*BuildExtDynamic.min_connector_c_version)
if not os.path.exists(connc_loc):
log.error(err_invalid_loc, connc_loc)
sys.exit(1)
if os.path.isdir(connc_loc):
# if directory, and no mysql_config is available, figure out the
# lib/ and include/ folders from the the filesystem
mysql_config = os.path.join(connc_loc, 'bin', 'mysql_config')
if os.path.isfile(mysql_config) and \
os.access(mysql_config, os.X_OK):
connc_loc = mysql_config
log.debug("# connc_loc: {0}".format(connc_loc))
else:
# Probably using MS Windows
myconfigh = os.path.join(connc_loc, 'include', 'my_config.h')
if not os.path.exists(myconfigh):
log.error("MySQL C API installation invalid "
"(my_config.h not found)")
sys.exit(1)
else:
with open(myconfigh, 'rb') as fp:
for line in fp.readlines():
if b'#define VERSION' in line:
version = tuple([
int(v) for v in
line.split()[2].replace(
b'"', b'').split(b'.')
])
if version < min_version:
log.error(err_version);
sys.exit(1)
break
# On Windows we check libmysql.dll
if os.name == 'nt':
lib = os.path.join(self.with_mysql_capi, 'lib',
'libmysql.dll')
connc_64bit = win_dll_is64bit(lib)
# On OSX we check libmysqlclient.dylib
elif 'macos' in platform:
lib = os.path.join(self.with_mysql_capi, 'lib',
'libmysqlclient.dylib')
connc_64bit = unix_lib_is64bit(lib)
# On other Unices we check libmysqlclient (follow symlinks)
elif os.name == 'posix':
connc_64bit = unix_lib_is64bit(connc_loc)
else:
raise OSError("Unsupported platform: %s" % os.name)
include_dirs = [os.path.join(connc_loc, 'include')]
if os.name == 'nt':
libraries = ['libmysql']
else:
libraries = ['-lmysqlclient']
library_dirs = os.path.join(connc_loc, 'lib')
log.debug("# connc_64bit: {0}".format(connc_64bit))
if connc_64bit:
self.arch = 'x86_64'
else:
self.arch = 'i386'
# We were given the location of the mysql_config tool (not on Windows)
if not os.name == 'nt' and os.path.isfile(connc_loc) \
and os.access(connc_loc, os.X_OK):
mysql_config = connc_loc
# Check mysql_config
myc_info = get_mysql_config_info(mysql_config)
log.debug("# myc_info: {0}".format(myc_info))
if myc_info['version'] < min_version:
log.error(err_version)
sys.exit(1)
include_dirs = myc_info['include']
libraries = myc_info['libs']
library_dirs = myc_info['lib_dir']
self._mysql_config_info = myc_info
self.arch = self._mysql_config_info['arch']
connc_64bit = self.arch == 'x86_64'
for include_dir in include_dirs:
if not os.path.exists(include_dir):
log.error(err_invalid_loc, connc_loc)
sys.exit(1)
# Set up the build_ext class
self.include_dirs.extend(include_dirs)
self.libraries.extend(libraries)
self.library_dirs.append(library_dirs)
# We try to offer a nice message when the architecture of Python
# is not the same as MySQL Connector/C binaries.
print("# self.arch: {0}".format(self.arch))
if ARCH_64BIT != connc_64bit:
log.error("Python is {0}, but does not "
"match MySQL C API {1} architecture, "
"type: {2}"
"".format(py_arch,
'64-bit' if connc_64bit else '32-bit',
self.arch))
sys.exit(1)
def finalize_options(self):
self.set_undefined_options(
'install',
('extra_compile_args', 'extra_compile_args'),
('extra_link_args', 'extra_link_args'),
('with_mysql_capi', 'with_mysql_capi'))
build_ext.finalize_options(self)
print("# Python architecture: {0}".format(py_arch))
print("# Python ARCH_64BIT: {0}".format(ARCH_64BIT))
if self.with_mysql_capi:
self._finalize_connector_c(self.with_mysql_capi)
def fix_compiler(self):
platform = get_platform()
cc = self.compiler
if not cc:
return
if 'macosx-10.9' in platform:
for needle in ['-mno-fused-madd']:
try:
cc.compiler.remove(needle)
cc.compiler_so.remove(needle)
except ValueError:
# We are removing, so OK when needle not there
pass
for name, args in cc.__dict__.items():
if not args or not isinstance(args, list):
continue
new_args = []
enum_args = enumerate(args)
for i, arg in enum_args:
if arg == '-arch':
# Skip not needed architecture
if args[i+1] != self.arch:
next(enum_args)
else:
new_args.append(arg)
else:
new_args.append(arg)
try:
cc.setattr(name, new_args)
except AttributeError:
# Old class
cc.__dict__[name] = new_args
# Add system headers to Extensions extra_compile_args
sysheaders = [ '-isystem' + dir for dir in cc.include_dirs]
for ext in self.extensions:
# Add extra compile args
if self.extra_compile_args:
ext.extra_compile_args.extend(self.extra_compile_args.split())
# Add extra link args
if self.extra_link_args:
ext.extra_link_args.extend(self.extra_link_args.split())
# Add system headers
for sysheader in sysheaders:
if sysheader not in ext.extra_compile_args:
ext.extra_compile_args.append(sysheader)
# Stop warnings about unknown pragma
if os.name != 'nt':
ext.extra_compile_args.append('-Wno-unknown-pragmas')
def run(self):
"""Run the command"""
if os.name == 'nt':
for ext in self.extensions:
# Use the multithread, static version of the run-time library
ext.extra_compile_args.append("/MT")
# Add extra compile args
if self.extra_compile_args:
ext.extra_compile_args.extend(self.extra_compile_args.split())
# Add extra link args
if self.extra_link_args:
ext.extra_link_args.extend(self.extra_link_args.split())
build_ext.run(self)
else:
self.real_build_extensions = self.build_extensions
self.build_extensions = lambda: None
build_ext.run(self)
self.fix_compiler()
self.real_build_extensions()
class BuildExtStatic(BuildExtDynamic):
"""Build and Link libraries statically with the C Extensions"""
user_options = build_ext.user_options + CEXT_OPTIONS
def finalize_options(self):
install_obj = self.distribution.get_command_obj('install')
install_obj.with_mysql_capi = self.with_mysql_capi
install_obj.extra_compile_args = self.extra_compile_args
install_obj.extra_link_args = self.extra_link_args
install_obj.static = True
options_pairs = []
if not self.extra_compile_args:
options_pairs.append(('extra_compile_args', 'extra_compile_args'))
if not self.extra_link_args:
options_pairs.append(('extra_link_args', 'extra_link_args'))
if not self.with_mysql_capi:
options_pairs.append(('with_mysql_capi', 'with_mysql_capi'))
if options_pairs:
self.set_undefined_options('install', *options_pairs)
build_ext.finalize_options(self)
print("# Python architecture: {0}".format(py_arch))
print("# Python ARCH_64BIT: {0}".format(ARCH_64BIT))
self.connc_lib = os.path.join(self.build_temp, 'connc', 'lib')
self.connc_include = os.path.join(self.build_temp, 'connc', 'include')
if self.with_mysql_capi:
self._finalize_connector_c(self.with_mysql_capi)
def _finalize_connector_c(self, connc_loc):
if not os.path.isdir(connc_loc):
log.error("MySQL C API should be a directory")
sys.exit(1)
log.info("Copying MySQL libraries")
copy_tree(os.path.join(connc_loc, 'lib'), self.connc_lib)
log.info("Copying MySQL header files")
copy_tree(os.path.join(connc_loc, 'include'), self.connc_include)
# Remove all but static libraries to force static linking
if os.name == 'posix':
log.info("Removing non-static MySQL libraries from %s" % self.connc_lib)
for lib_file in os.listdir(self.connc_lib):
lib_file_path = os.path.join(self.connc_lib, lib_file)
if os.path.isfile(lib_file_path) and not lib_file.endswith('.a'):
os.unlink(os.path.join(self.connc_lib, lib_file))
def fix_compiler(self):
BuildExtDynamic.fix_compiler(self)
include_dirs = []
library_dirs = []
libraries = []
if os.name == 'posix':
include_dirs.append(self.connc_include)
library_dirs.append(self.connc_lib)
if self.with_mysql_capi:
libraries.append("mysqlclient")
# As we statically link and the "libmysqlclient.a" library
# carry no information what it depends on, we need to
# manually add library dependencies here.
if platform.system() not in ["Darwin", "Windows"]:
libraries.append("rt")
for ext in self.extensions:
ext.include_dirs.extend(include_dirs)
ext.library_dirs.extend(library_dirs)
ext.libraries.extend(libraries)
# Add extra compile args
if self.extra_compile_args:
ext.extra_compile_args.extend(self.extra_compile_args.split())
# Add extra link args
if self.extra_link_args:
ext.extra_link_args.extend(self.extra_link_args.split())
class InstallLib(install_lib):
user_options = install_lib.user_options + CEXT_OPTIONS + INSTALL_OPTIONS
boolean_options = ['byte-code-only']
def initialize_options(self):
install_lib.initialize_options(self)
self.byte_code_only = None
def finalize_options(self):
install_lib.finalize_options(self)
self.set_undefined_options('install',
('byte_code_only', 'byte_code_only'))
self.set_undefined_options('build', ('build_base', 'build_dir'))
def run(self):
self.build()
outfiles = self.install()
# (Optionally) compile .py to .pyc
if outfiles is not None and self.distribution.has_pure_modules():
self.byte_compile(outfiles)
if self.byte_code_only:
for source_file in outfiles:
if os.path.join('mysql', '__init__.py') in source_file:
continue
log.info("Removing %s", source_file)
os.remove(source_file)
class Install(install):
"""Install Connector/Python C Extension"""
description = "install MySQL Connector/Python"
user_options = install.user_options + CEXT_OPTIONS + INSTALL_OPTIONS + \
CEXT_STATIC_OPTIONS
boolean_options = ['byte-code-only', 'static']
need_ext = False
def initialize_options(self):
install.initialize_options(self)
self.extra_compile_args = None
self.extra_link_args = None
self.with_mysql_capi = None
self.byte_code_only = None
self.static = None
def finalize_options(self):
if self.static:
log.info("Linking C Extension statically with libraries")
self.distribution.cmdclass['build_ext'] = BuildExtStatic
if self.byte_code_only is None:
self.byte_code_only = False
build_ext_obj = self.distribution.get_command_obj('build_ext')
build_ext_obj.with_mysql_capi = self.with_mysql_capi
build_ext_obj.extra_compile_args = self.extra_compile_args
build_ext_obj.extra_link_args = self.extra_link_args
build_ext_obj.static = self.static
if self.with_mysql_capi:
self.need_ext = True
if not self.need_ext:
remove_cext(self.distribution)
install.finalize_options(self)
def run(self):
if not self.need_ext:
log.info("Not Installing MySQL C Extension")
else:
log.info("Installing MySQL C Extension")
install.run(self)
| apache-2.0 |
natefoo/pip | pip/_vendor/requests/packages/chardet/euctwfreq.py | 3133 | 34872 | ######################## BEGIN LICENSE BLOCK ########################
# The Original Code is Mozilla Communicator client code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 1998
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
# EUCTW frequency table
# Converted from big5 work
# by Taiwan's Mandarin Promotion Council
# <http:#www.edu.tw:81/mandr/>
# 128 --> 0.42261
# 256 --> 0.57851
# 512 --> 0.74851
# 1024 --> 0.89384
# 2048 --> 0.97583
#
# Idea Distribution Ratio = 0.74851/(1-0.74851) =2.98
# Random Distribution Ration = 512/(5401-512)=0.105
#
# Typical Distribution Ratio about 25% of Ideal one, still much higher than RDR
EUCTW_TYPICAL_DISTRIBUTION_RATIO = 0.75
# Char to FreqOrder table ,
EUCTW_TABLE_SIZE = 8102
EUCTWCharToFreqOrder = (
1,1800,1506, 255,1431, 198, 9, 82, 6,7310, 177, 202,3615,1256,2808, 110, # 2742
3735, 33,3241, 261, 76, 44,2113, 16,2931,2184,1176, 659,3868, 26,3404,2643, # 2758
1198,3869,3313,4060, 410,2211, 302, 590, 361,1963, 8, 204, 58,4296,7311,1931, # 2774
63,7312,7313, 317,1614, 75, 222, 159,4061,2412,1480,7314,3500,3068, 224,2809, # 2790
3616, 3, 10,3870,1471, 29,2774,1135,2852,1939, 873, 130,3242,1123, 312,7315, # 2806
4297,2051, 507, 252, 682,7316, 142,1914, 124, 206,2932, 34,3501,3173, 64, 604, # 2822
7317,2494,1976,1977, 155,1990, 645, 641,1606,7318,3405, 337, 72, 406,7319, 80, # 2838
630, 238,3174,1509, 263, 939,1092,2644, 756,1440,1094,3406, 449, 69,2969, 591, # 2854
179,2095, 471, 115,2034,1843, 60, 50,2970, 134, 806,1868, 734,2035,3407, 180, # 2870
995,1607, 156, 537,2893, 688,7320, 319,1305, 779,2144, 514,2374, 298,4298, 359, # 2886
2495, 90,2707,1338, 663, 11, 906,1099,2545, 20,2436, 182, 532,1716,7321, 732, # 2902
1376,4062,1311,1420,3175, 25,2312,1056, 113, 399, 382,1949, 242,3408,2467, 529, # 2918
3243, 475,1447,3617,7322, 117, 21, 656, 810,1297,2295,2329,3502,7323, 126,4063, # 2934
706, 456, 150, 613,4299, 71,1118,2036,4064, 145,3069, 85, 835, 486,2114,1246, # 2950
1426, 428, 727,1285,1015, 800, 106, 623, 303,1281,7324,2127,2354, 347,3736, 221, # 2966
3503,3110,7325,1955,1153,4065, 83, 296,1199,3070, 192, 624, 93,7326, 822,1897, # 2982
2810,3111, 795,2064, 991,1554,1542,1592, 27, 43,2853, 859, 139,1456, 860,4300, # 2998
437, 712,3871, 164,2392,3112, 695, 211,3017,2096, 195,3872,1608,3504,3505,3618, # 3014
3873, 234, 811,2971,2097,3874,2229,1441,3506,1615,2375, 668,2076,1638, 305, 228, # 3030
1664,4301, 467, 415,7327, 262,2098,1593, 239, 108, 300, 200,1033, 512,1247,2077, # 3046
7328,7329,2173,3176,3619,2673, 593, 845,1062,3244, 88,1723,2037,3875,1950, 212, # 3062
266, 152, 149, 468,1898,4066,4302, 77, 187,7330,3018, 37, 5,2972,7331,3876, # 3078
7332,7333, 39,2517,4303,2894,3177,2078, 55, 148, 74,4304, 545, 483,1474,1029, # 3094
1665, 217,1869,1531,3113,1104,2645,4067, 24, 172,3507, 900,3877,3508,3509,4305, # 3110
32,1408,2811,1312, 329, 487,2355,2247,2708, 784,2674, 4,3019,3314,1427,1788, # 3126
188, 109, 499,7334,3620,1717,1789, 888,1217,3020,4306,7335,3510,7336,3315,1520, # 3142
3621,3878, 196,1034, 775,7337,7338, 929,1815, 249, 439, 38,7339,1063,7340, 794, # 3158
3879,1435,2296, 46, 178,3245,2065,7341,2376,7342, 214,1709,4307, 804, 35, 707, # 3174
324,3622,1601,2546, 140, 459,4068,7343,7344,1365, 839, 272, 978,2257,2572,3409, # 3190
2128,1363,3623,1423, 697, 100,3071, 48, 70,1231, 495,3114,2193,7345,1294,7346, # 3206
2079, 462, 586,1042,3246, 853, 256, 988, 185,2377,3410,1698, 434,1084,7347,3411, # 3222
314,2615,2775,4308,2330,2331, 569,2280, 637,1816,2518, 757,1162,1878,1616,3412, # 3238
287,1577,2115, 768,4309,1671,2854,3511,2519,1321,3737, 909,2413,7348,4069, 933, # 3254
3738,7349,2052,2356,1222,4310, 765,2414,1322, 786,4311,7350,1919,1462,1677,2895, # 3270
1699,7351,4312,1424,2437,3115,3624,2590,3316,1774,1940,3413,3880,4070, 309,1369, # 3286
1130,2812, 364,2230,1653,1299,3881,3512,3882,3883,2646, 525,1085,3021, 902,2000, # 3302
1475, 964,4313, 421,1844,1415,1057,2281, 940,1364,3116, 376,4314,4315,1381, 7, # 3318
2520, 983,2378, 336,1710,2675,1845, 321,3414, 559,1131,3022,2742,1808,1132,1313, # 3334
265,1481,1857,7352, 352,1203,2813,3247, 167,1089, 420,2814, 776, 792,1724,3513, # 3350
4071,2438,3248,7353,4072,7354, 446, 229, 333,2743, 901,3739,1200,1557,4316,2647, # 3366
1920, 395,2744,2676,3740,4073,1835, 125, 916,3178,2616,4317,7355,7356,3741,7357, # 3382
7358,7359,4318,3117,3625,1133,2547,1757,3415,1510,2313,1409,3514,7360,2145, 438, # 3398
2591,2896,2379,3317,1068, 958,3023, 461, 311,2855,2677,4074,1915,3179,4075,1978, # 3414
383, 750,2745,2617,4076, 274, 539, 385,1278,1442,7361,1154,1964, 384, 561, 210, # 3430
98,1295,2548,3515,7362,1711,2415,1482,3416,3884,2897,1257, 129,7363,3742, 642, # 3446
523,2776,2777,2648,7364, 141,2231,1333, 68, 176, 441, 876, 907,4077, 603,2592, # 3462
710, 171,3417, 404, 549, 18,3118,2393,1410,3626,1666,7365,3516,4319,2898,4320, # 3478
7366,2973, 368,7367, 146, 366, 99, 871,3627,1543, 748, 807,1586,1185, 22,2258, # 3494
379,3743,3180,7368,3181, 505,1941,2618,1991,1382,2314,7369, 380,2357, 218, 702, # 3510
1817,1248,3418,3024,3517,3318,3249,7370,2974,3628, 930,3250,3744,7371, 59,7372, # 3526
585, 601,4078, 497,3419,1112,1314,4321,1801,7373,1223,1472,2174,7374, 749,1836, # 3542
690,1899,3745,1772,3885,1476, 429,1043,1790,2232,2116, 917,4079, 447,1086,1629, # 3558
7375, 556,7376,7377,2020,1654, 844,1090, 105, 550, 966,1758,2815,1008,1782, 686, # 3574
1095,7378,2282, 793,1602,7379,3518,2593,4322,4080,2933,2297,4323,3746, 980,2496, # 3590
544, 353, 527,4324, 908,2678,2899,7380, 381,2619,1942,1348,7381,1341,1252, 560, # 3606
3072,7382,3420,2856,7383,2053, 973, 886,2080, 143,4325,7384,7385, 157,3886, 496, # 3622
4081, 57, 840, 540,2038,4326,4327,3421,2117,1445, 970,2259,1748,1965,2081,4082, # 3638
3119,1234,1775,3251,2816,3629, 773,1206,2129,1066,2039,1326,3887,1738,1725,4083, # 3654
279,3120, 51,1544,2594, 423,1578,2130,2066, 173,4328,1879,7386,7387,1583, 264, # 3670
610,3630,4329,2439, 280, 154,7388,7389,7390,1739, 338,1282,3073, 693,2857,1411, # 3686
1074,3747,2440,7391,4330,7392,7393,1240, 952,2394,7394,2900,1538,2679, 685,1483, # 3702
4084,2468,1436, 953,4085,2054,4331, 671,2395, 79,4086,2441,3252, 608, 567,2680, # 3718
3422,4087,4088,1691, 393,1261,1791,2396,7395,4332,7396,7397,7398,7399,1383,1672, # 3734
3748,3182,1464, 522,1119, 661,1150, 216, 675,4333,3888,1432,3519, 609,4334,2681, # 3750
2397,7400,7401,7402,4089,3025, 0,7403,2469, 315, 231,2442, 301,3319,4335,2380, # 3766
7404, 233,4090,3631,1818,4336,4337,7405, 96,1776,1315,2082,7406, 257,7407,1809, # 3782
3632,2709,1139,1819,4091,2021,1124,2163,2778,1777,2649,7408,3074, 363,1655,3183, # 3798
7409,2975,7410,7411,7412,3889,1567,3890, 718, 103,3184, 849,1443, 341,3320,2934, # 3814
1484,7413,1712, 127, 67, 339,4092,2398, 679,1412, 821,7414,7415, 834, 738, 351, # 3830
2976,2146, 846, 235,1497,1880, 418,1992,3749,2710, 186,1100,2147,2746,3520,1545, # 3846
1355,2935,2858,1377, 583,3891,4093,2573,2977,7416,1298,3633,1078,2549,3634,2358, # 3862
78,3750,3751, 267,1289,2099,2001,1594,4094, 348, 369,1274,2194,2175,1837,4338, # 3878
1820,2817,3635,2747,2283,2002,4339,2936,2748, 144,3321, 882,4340,3892,2749,3423, # 3894
4341,2901,7417,4095,1726, 320,7418,3893,3026, 788,2978,7419,2818,1773,1327,2859, # 3910
3894,2819,7420,1306,4342,2003,1700,3752,3521,2359,2650, 787,2022, 506, 824,3636, # 3926
534, 323,4343,1044,3322,2023,1900, 946,3424,7421,1778,1500,1678,7422,1881,4344, # 3942
165, 243,4345,3637,2521, 123, 683,4096, 764,4346, 36,3895,1792, 589,2902, 816, # 3958
626,1667,3027,2233,1639,1555,1622,3753,3896,7423,3897,2860,1370,1228,1932, 891, # 3974
2083,2903, 304,4097,7424, 292,2979,2711,3522, 691,2100,4098,1115,4347, 118, 662, # 3990
7425, 611,1156, 854,2381,1316,2861, 2, 386, 515,2904,7426,7427,3253, 868,2234, # 4006
1486, 855,2651, 785,2212,3028,7428,1040,3185,3523,7429,3121, 448,7430,1525,7431, # 4022
2164,4348,7432,3754,7433,4099,2820,3524,3122, 503, 818,3898,3123,1568, 814, 676, # 4038
1444, 306,1749,7434,3755,1416,1030, 197,1428, 805,2821,1501,4349,7435,7436,7437, # 4054
1993,7438,4350,7439,7440,2195, 13,2779,3638,2980,3124,1229,1916,7441,3756,2131, # 4070
7442,4100,4351,2399,3525,7443,2213,1511,1727,1120,7444,7445, 646,3757,2443, 307, # 4086
7446,7447,1595,3186,7448,7449,7450,3639,1113,1356,3899,1465,2522,2523,7451, 519, # 4102
7452, 128,2132, 92,2284,1979,7453,3900,1512, 342,3125,2196,7454,2780,2214,1980, # 4118
3323,7455, 290,1656,1317, 789, 827,2360,7456,3758,4352, 562, 581,3901,7457, 401, # 4134
4353,2248, 94,4354,1399,2781,7458,1463,2024,4355,3187,1943,7459, 828,1105,4101, # 4150
1262,1394,7460,4102, 605,4356,7461,1783,2862,7462,2822, 819,2101, 578,2197,2937, # 4166
7463,1502, 436,3254,4103,3255,2823,3902,2905,3425,3426,7464,2712,2315,7465,7466, # 4182
2332,2067, 23,4357, 193, 826,3759,2102, 699,1630,4104,3075, 390,1793,1064,3526, # 4198
7467,1579,3076,3077,1400,7468,4105,1838,1640,2863,7469,4358,4359, 137,4106, 598, # 4214
3078,1966, 780, 104, 974,2938,7470, 278, 899, 253, 402, 572, 504, 493,1339,7471, # 4230
3903,1275,4360,2574,2550,7472,3640,3029,3079,2249, 565,1334,2713, 863, 41,7473, # 4246
7474,4361,7475,1657,2333, 19, 463,2750,4107, 606,7476,2981,3256,1087,2084,1323, # 4262
2652,2982,7477,1631,1623,1750,4108,2682,7478,2864, 791,2714,2653,2334, 232,2416, # 4278
7479,2983,1498,7480,2654,2620, 755,1366,3641,3257,3126,2025,1609, 119,1917,3427, # 4294
862,1026,4109,7481,3904,3760,4362,3905,4363,2260,1951,2470,7482,1125, 817,4110, # 4310
4111,3906,1513,1766,2040,1487,4112,3030,3258,2824,3761,3127,7483,7484,1507,7485, # 4326
2683, 733, 40,1632,1106,2865, 345,4113, 841,2524, 230,4364,2984,1846,3259,3428, # 4342
7486,1263, 986,3429,7487, 735, 879, 254,1137, 857, 622,1300,1180,1388,1562,3907, # 4358
3908,2939, 967,2751,2655,1349, 592,2133,1692,3324,2985,1994,4114,1679,3909,1901, # 4374
2185,7488, 739,3642,2715,1296,1290,7489,4115,2198,2199,1921,1563,2595,2551,1870, # 4390
2752,2986,7490, 435,7491, 343,1108, 596, 17,1751,4365,2235,3430,3643,7492,4366, # 4406
294,3527,2940,1693, 477, 979, 281,2041,3528, 643,2042,3644,2621,2782,2261,1031, # 4422
2335,2134,2298,3529,4367, 367,1249,2552,7493,3530,7494,4368,1283,3325,2004, 240, # 4438
1762,3326,4369,4370, 836,1069,3128, 474,7495,2148,2525, 268,3531,7496,3188,1521, # 4454
1284,7497,1658,1546,4116,7498,3532,3533,7499,4117,3327,2684,1685,4118, 961,1673, # 4470
2622, 190,2005,2200,3762,4371,4372,7500, 570,2497,3645,1490,7501,4373,2623,3260, # 4486
1956,4374, 584,1514, 396,1045,1944,7502,4375,1967,2444,7503,7504,4376,3910, 619, # 4502
7505,3129,3261, 215,2006,2783,2553,3189,4377,3190,4378, 763,4119,3763,4379,7506, # 4518
7507,1957,1767,2941,3328,3646,1174, 452,1477,4380,3329,3130,7508,2825,1253,2382, # 4534
2186,1091,2285,4120, 492,7509, 638,1169,1824,2135,1752,3911, 648, 926,1021,1324, # 4550
4381, 520,4382, 997, 847,1007, 892,4383,3764,2262,1871,3647,7510,2400,1784,4384, # 4566
1952,2942,3080,3191,1728,4121,2043,3648,4385,2007,1701,3131,1551, 30,2263,4122, # 4582
7511,2026,4386,3534,7512, 501,7513,4123, 594,3431,2165,1821,3535,3432,3536,3192, # 4598
829,2826,4124,7514,1680,3132,1225,4125,7515,3262,4387,4126,3133,2336,7516,4388, # 4614
4127,7517,3912,3913,7518,1847,2383,2596,3330,7519,4389, 374,3914, 652,4128,4129, # 4630
375,1140, 798,7520,7521,7522,2361,4390,2264, 546,1659, 138,3031,2445,4391,7523, # 4646
2250, 612,1848, 910, 796,3765,1740,1371, 825,3766,3767,7524,2906,2554,7525, 692, # 4662
444,3032,2624, 801,4392,4130,7526,1491, 244,1053,3033,4131,4132, 340,7527,3915, # 4678
1041,2987, 293,1168, 87,1357,7528,1539, 959,7529,2236, 721, 694,4133,3768, 219, # 4694
1478, 644,1417,3331,2656,1413,1401,1335,1389,3916,7530,7531,2988,2362,3134,1825, # 4710
730,1515, 184,2827, 66,4393,7532,1660,2943, 246,3332, 378,1457, 226,3433, 975, # 4726
3917,2944,1264,3537, 674, 696,7533, 163,7534,1141,2417,2166, 713,3538,3333,4394, # 4742
3918,7535,7536,1186, 15,7537,1079,1070,7538,1522,3193,3539, 276,1050,2716, 758, # 4758
1126, 653,2945,3263,7539,2337, 889,3540,3919,3081,2989, 903,1250,4395,3920,3434, # 4774
3541,1342,1681,1718, 766,3264, 286, 89,2946,3649,7540,1713,7541,2597,3334,2990, # 4790
7542,2947,2215,3194,2866,7543,4396,2498,2526, 181, 387,1075,3921, 731,2187,3335, # 4806
7544,3265, 310, 313,3435,2299, 770,4134, 54,3034, 189,4397,3082,3769,3922,7545, # 4822
1230,1617,1849, 355,3542,4135,4398,3336, 111,4136,3650,1350,3135,3436,3035,4137, # 4838
2149,3266,3543,7546,2784,3923,3924,2991, 722,2008,7547,1071, 247,1207,2338,2471, # 4854
1378,4399,2009, 864,1437,1214,4400, 373,3770,1142,2216, 667,4401, 442,2753,2555, # 4870
3771,3925,1968,4138,3267,1839, 837, 170,1107, 934,1336,1882,7548,7549,2118,4139, # 4886
2828, 743,1569,7550,4402,4140, 582,2384,1418,3437,7551,1802,7552, 357,1395,1729, # 4902
3651,3268,2418,1564,2237,7553,3083,3772,1633,4403,1114,2085,4141,1532,7554, 482, # 4918
2446,4404,7555,7556,1492, 833,1466,7557,2717,3544,1641,2829,7558,1526,1272,3652, # 4934
4142,1686,1794, 416,2556,1902,1953,1803,7559,3773,2785,3774,1159,2316,7560,2867, # 4950
4405,1610,1584,3036,2419,2754, 443,3269,1163,3136,7561,7562,3926,7563,4143,2499, # 4966
3037,4406,3927,3137,2103,1647,3545,2010,1872,4144,7564,4145, 431,3438,7565, 250, # 4982
97, 81,4146,7566,1648,1850,1558, 160, 848,7567, 866, 740,1694,7568,2201,2830, # 4998
3195,4147,4407,3653,1687, 950,2472, 426, 469,3196,3654,3655,3928,7569,7570,1188, # 5014
424,1995, 861,3546,4148,3775,2202,2685, 168,1235,3547,4149,7571,2086,1674,4408, # 5030
3337,3270, 220,2557,1009,7572,3776, 670,2992, 332,1208, 717,7573,7574,3548,2447, # 5046
3929,3338,7575, 513,7576,1209,2868,3339,3138,4409,1080,7577,7578,7579,7580,2527, # 5062
3656,3549, 815,1587,3930,3931,7581,3550,3439,3777,1254,4410,1328,3038,1390,3932, # 5078
1741,3933,3778,3934,7582, 236,3779,2448,3271,7583,7584,3657,3780,1273,3781,4411, # 5094
7585, 308,7586,4412, 245,4413,1851,2473,1307,2575, 430, 715,2136,2449,7587, 270, # 5110
199,2869,3935,7588,3551,2718,1753, 761,1754, 725,1661,1840,4414,3440,3658,7589, # 5126
7590, 587, 14,3272, 227,2598, 326, 480,2265, 943,2755,3552, 291, 650,1883,7591, # 5142
1702,1226, 102,1547, 62,3441, 904,4415,3442,1164,4150,7592,7593,1224,1548,2756, # 5158
391, 498,1493,7594,1386,1419,7595,2055,1177,4416, 813, 880,1081,2363, 566,1145, # 5174
4417,2286,1001,1035,2558,2599,2238, 394,1286,7596,7597,2068,7598, 86,1494,1730, # 5190
3936, 491,1588, 745, 897,2948, 843,3340,3937,2757,2870,3273,1768, 998,2217,2069, # 5206
397,1826,1195,1969,3659,2993,3341, 284,7599,3782,2500,2137,2119,1903,7600,3938, # 5222
2150,3939,4151,1036,3443,1904, 114,2559,4152, 209,1527,7601,7602,2949,2831,2625, # 5238
2385,2719,3139, 812,2560,7603,3274,7604,1559, 737,1884,3660,1210, 885, 28,2686, # 5254
3553,3783,7605,4153,1004,1779,4418,7606, 346,1981,2218,2687,4419,3784,1742, 797, # 5270
1642,3940,1933,1072,1384,2151, 896,3941,3275,3661,3197,2871,3554,7607,2561,1958, # 5286
4420,2450,1785,7608,7609,7610,3942,4154,1005,1308,3662,4155,2720,4421,4422,1528, # 5302
2600, 161,1178,4156,1982, 987,4423,1101,4157, 631,3943,1157,3198,2420,1343,1241, # 5318
1016,2239,2562, 372, 877,2339,2501,1160, 555,1934, 911,3944,7611, 466,1170, 169, # 5334
1051,2907,2688,3663,2474,2994,1182,2011,2563,1251,2626,7612, 992,2340,3444,1540, # 5350
2721,1201,2070,2401,1996,2475,7613,4424, 528,1922,2188,1503,1873,1570,2364,3342, # 5366
3276,7614, 557,1073,7615,1827,3445,2087,2266,3140,3039,3084, 767,3085,2786,4425, # 5382
1006,4158,4426,2341,1267,2176,3664,3199, 778,3945,3200,2722,1597,2657,7616,4427, # 5398
7617,3446,7618,7619,7620,3277,2689,1433,3278, 131, 95,1504,3946, 723,4159,3141, # 5414
1841,3555,2758,2189,3947,2027,2104,3665,7621,2995,3948,1218,7622,3343,3201,3949, # 5430
4160,2576, 248,1634,3785, 912,7623,2832,3666,3040,3786, 654, 53,7624,2996,7625, # 5446
1688,4428, 777,3447,1032,3950,1425,7626, 191, 820,2120,2833, 971,4429, 931,3202, # 5462
135, 664, 783,3787,1997, 772,2908,1935,3951,3788,4430,2909,3203, 282,2723, 640, # 5478
1372,3448,1127, 922, 325,3344,7627,7628, 711,2044,7629,7630,3952,2219,2787,1936, # 5494
3953,3345,2220,2251,3789,2300,7631,4431,3790,1258,3279,3954,3204,2138,2950,3955, # 5510
3956,7632,2221, 258,3205,4432, 101,1227,7633,3280,1755,7634,1391,3281,7635,2910, # 5526
2056, 893,7636,7637,7638,1402,4161,2342,7639,7640,3206,3556,7641,7642, 878,1325, # 5542
1780,2788,4433, 259,1385,2577, 744,1183,2267,4434,7643,3957,2502,7644, 684,1024, # 5558
4162,7645, 472,3557,3449,1165,3282,3958,3959, 322,2152, 881, 455,1695,1152,1340, # 5574
660, 554,2153,4435,1058,4436,4163, 830,1065,3346,3960,4437,1923,7646,1703,1918, # 5590
7647, 932,2268, 122,7648,4438, 947, 677,7649,3791,2627, 297,1905,1924,2269,4439, # 5606
2317,3283,7650,7651,4164,7652,4165, 84,4166, 112, 989,7653, 547,1059,3961, 701, # 5622
3558,1019,7654,4167,7655,3450, 942, 639, 457,2301,2451, 993,2951, 407, 851, 494, # 5638
4440,3347, 927,7656,1237,7657,2421,3348, 573,4168, 680, 921,2911,1279,1874, 285, # 5654
790,1448,1983, 719,2167,7658,7659,4441,3962,3963,1649,7660,1541, 563,7661,1077, # 5670
7662,3349,3041,3451, 511,2997,3964,3965,3667,3966,1268,2564,3350,3207,4442,4443, # 5686
7663, 535,1048,1276,1189,2912,2028,3142,1438,1373,2834,2952,1134,2012,7664,4169, # 5702
1238,2578,3086,1259,7665, 700,7666,2953,3143,3668,4170,7667,4171,1146,1875,1906, # 5718
4444,2601,3967, 781,2422, 132,1589, 203, 147, 273,2789,2402, 898,1786,2154,3968, # 5734
3969,7668,3792,2790,7669,7670,4445,4446,7671,3208,7672,1635,3793, 965,7673,1804, # 5750
2690,1516,3559,1121,1082,1329,3284,3970,1449,3794, 65,1128,2835,2913,2759,1590, # 5766
3795,7674,7675, 12,2658, 45, 976,2579,3144,4447, 517,2528,1013,1037,3209,7676, # 5782
3796,2836,7677,3797,7678,3452,7679,2602, 614,1998,2318,3798,3087,2724,2628,7680, # 5798
2580,4172, 599,1269,7681,1810,3669,7682,2691,3088, 759,1060, 489,1805,3351,3285, # 5814
1358,7683,7684,2386,1387,1215,2629,2252, 490,7685,7686,4173,1759,2387,2343,7687, # 5830
4448,3799,1907,3971,2630,1806,3210,4449,3453,3286,2760,2344, 874,7688,7689,3454, # 5846
3670,1858, 91,2914,3671,3042,3800,4450,7690,3145,3972,2659,7691,3455,1202,1403, # 5862
3801,2954,2529,1517,2503,4451,3456,2504,7692,4452,7693,2692,1885,1495,1731,3973, # 5878
2365,4453,7694,2029,7695,7696,3974,2693,1216, 237,2581,4174,2319,3975,3802,4454, # 5894
4455,2694,3560,3457, 445,4456,7697,7698,7699,7700,2761, 61,3976,3672,1822,3977, # 5910
7701, 687,2045, 935, 925, 405,2660, 703,1096,1859,2725,4457,3978,1876,1367,2695, # 5926
3352, 918,2105,1781,2476, 334,3287,1611,1093,4458, 564,3146,3458,3673,3353, 945, # 5942
2631,2057,4459,7702,1925, 872,4175,7703,3459,2696,3089, 349,4176,3674,3979,4460, # 5958
3803,4177,3675,2155,3980,4461,4462,4178,4463,2403,2046, 782,3981, 400, 251,4179, # 5974
1624,7704,7705, 277,3676, 299,1265, 476,1191,3804,2121,4180,4181,1109, 205,7706, # 5990
2582,1000,2156,3561,1860,7707,7708,7709,4464,7710,4465,2565, 107,2477,2157,3982, # 6006
3460,3147,7711,1533, 541,1301, 158, 753,4182,2872,3562,7712,1696, 370,1088,4183, # 6022
4466,3563, 579, 327, 440, 162,2240, 269,1937,1374,3461, 968,3043, 56,1396,3090, # 6038
2106,3288,3354,7713,1926,2158,4467,2998,7714,3564,7715,7716,3677,4468,2478,7717, # 6054
2791,7718,1650,4469,7719,2603,7720,7721,3983,2661,3355,1149,3356,3984,3805,3985, # 6070
7722,1076, 49,7723, 951,3211,3289,3290, 450,2837, 920,7724,1811,2792,2366,4184, # 6086
1908,1138,2367,3806,3462,7725,3212,4470,1909,1147,1518,2423,4471,3807,7726,4472, # 6102
2388,2604, 260,1795,3213,7727,7728,3808,3291, 708,7729,3565,1704,7730,3566,1351, # 6118
1618,3357,2999,1886, 944,4185,3358,4186,3044,3359,4187,7731,3678, 422, 413,1714, # 6134
3292, 500,2058,2345,4188,2479,7732,1344,1910, 954,7733,1668,7734,7735,3986,2404, # 6150
4189,3567,3809,4190,7736,2302,1318,2505,3091, 133,3092,2873,4473, 629, 31,2838, # 6166
2697,3810,4474, 850, 949,4475,3987,2955,1732,2088,4191,1496,1852,7737,3988, 620, # 6182
3214, 981,1242,3679,3360,1619,3680,1643,3293,2139,2452,1970,1719,3463,2168,7738, # 6198
3215,7739,7740,3361,1828,7741,1277,4476,1565,2047,7742,1636,3568,3093,7743, 869, # 6214
2839, 655,3811,3812,3094,3989,3000,3813,1310,3569,4477,7744,7745,7746,1733, 558, # 6230
4478,3681, 335,1549,3045,1756,4192,3682,1945,3464,1829,1291,1192, 470,2726,2107, # 6246
2793, 913,1054,3990,7747,1027,7748,3046,3991,4479, 982,2662,3362,3148,3465,3216, # 6262
3217,1946,2794,7749, 571,4480,7750,1830,7751,3570,2583,1523,2424,7752,2089, 984, # 6278
4481,3683,1959,7753,3684, 852, 923,2795,3466,3685, 969,1519, 999,2048,2320,1705, # 6294
7754,3095, 615,1662, 151, 597,3992,2405,2321,1049, 275,4482,3686,4193, 568,3687, # 6310
3571,2480,4194,3688,7755,2425,2270, 409,3218,7756,1566,2874,3467,1002, 769,2840, # 6326
194,2090,3149,3689,2222,3294,4195, 628,1505,7757,7758,1763,2177,3001,3993, 521, # 6342
1161,2584,1787,2203,2406,4483,3994,1625,4196,4197, 412, 42,3096, 464,7759,2632, # 6358
4484,3363,1760,1571,2875,3468,2530,1219,2204,3814,2633,2140,2368,4485,4486,3295, # 6374
1651,3364,3572,7760,7761,3573,2481,3469,7762,3690,7763,7764,2271,2091, 460,7765, # 6390
4487,7766,3002, 962, 588,3574, 289,3219,2634,1116, 52,7767,3047,1796,7768,7769, # 6406
7770,1467,7771,1598,1143,3691,4198,1984,1734,1067,4488,1280,3365, 465,4489,1572, # 6422
510,7772,1927,2241,1812,1644,3575,7773,4490,3692,7774,7775,2663,1573,1534,7776, # 6438
7777,4199, 536,1807,1761,3470,3815,3150,2635,7778,7779,7780,4491,3471,2915,1911, # 6454
2796,7781,3296,1122, 377,3220,7782, 360,7783,7784,4200,1529, 551,7785,2059,3693, # 6470
1769,2426,7786,2916,4201,3297,3097,2322,2108,2030,4492,1404, 136,1468,1479, 672, # 6486
1171,3221,2303, 271,3151,7787,2762,7788,2049, 678,2727, 865,1947,4493,7789,2013, # 6502
3995,2956,7790,2728,2223,1397,3048,3694,4494,4495,1735,2917,3366,3576,7791,3816, # 6518
509,2841,2453,2876,3817,7792,7793,3152,3153,4496,4202,2531,4497,2304,1166,1010, # 6534
552, 681,1887,7794,7795,2957,2958,3996,1287,1596,1861,3154, 358, 453, 736, 175, # 6550
478,1117, 905,1167,1097,7796,1853,1530,7797,1706,7798,2178,3472,2287,3695,3473, # 6566
3577,4203,2092,4204,7799,3367,1193,2482,4205,1458,2190,2205,1862,1888,1421,3298, # 6582
2918,3049,2179,3474, 595,2122,7800,3997,7801,7802,4206,1707,2636, 223,3696,1359, # 6598
751,3098, 183,3475,7803,2797,3003, 419,2369, 633, 704,3818,2389, 241,7804,7805, # 6614
7806, 838,3004,3697,2272,2763,2454,3819,1938,2050,3998,1309,3099,2242,1181,7807, # 6630
1136,2206,3820,2370,1446,4207,2305,4498,7808,7809,4208,1055,2605, 484,3698,7810, # 6646
3999, 625,4209,2273,3368,1499,4210,4000,7811,4001,4211,3222,2274,2275,3476,7812, # 6662
7813,2764, 808,2606,3699,3369,4002,4212,3100,2532, 526,3370,3821,4213, 955,7814, # 6678
1620,4214,2637,2427,7815,1429,3700,1669,1831, 994, 928,7816,3578,1260,7817,7818, # 6694
7819,1948,2288, 741,2919,1626,4215,2729,2455, 867,1184, 362,3371,1392,7820,7821, # 6710
4003,4216,1770,1736,3223,2920,4499,4500,1928,2698,1459,1158,7822,3050,3372,2877, # 6726
1292,1929,2506,2842,3701,1985,1187,2071,2014,2607,4217,7823,2566,2507,2169,3702, # 6742
2483,3299,7824,3703,4501,7825,7826, 666,1003,3005,1022,3579,4218,7827,4502,1813, # 6758
2253, 574,3822,1603, 295,1535, 705,3823,4219, 283, 858, 417,7828,7829,3224,4503, # 6774
4504,3051,1220,1889,1046,2276,2456,4004,1393,1599, 689,2567, 388,4220,7830,2484, # 6790
802,7831,2798,3824,2060,1405,2254,7832,4505,3825,2109,1052,1345,3225,1585,7833, # 6806
809,7834,7835,7836, 575,2730,3477, 956,1552,1469,1144,2323,7837,2324,1560,2457, # 6822
3580,3226,4005, 616,2207,3155,2180,2289,7838,1832,7839,3478,4506,7840,1319,3704, # 6838
3705,1211,3581,1023,3227,1293,2799,7841,7842,7843,3826, 607,2306,3827, 762,2878, # 6854
1439,4221,1360,7844,1485,3052,7845,4507,1038,4222,1450,2061,2638,4223,1379,4508, # 6870
2585,7846,7847,4224,1352,1414,2325,2921,1172,7848,7849,3828,3829,7850,1797,1451, # 6886
7851,7852,7853,7854,2922,4006,4007,2485,2346, 411,4008,4009,3582,3300,3101,4509, # 6902
1561,2664,1452,4010,1375,7855,7856, 47,2959, 316,7857,1406,1591,2923,3156,7858, # 6918
1025,2141,3102,3157, 354,2731, 884,2224,4225,2407, 508,3706, 726,3583, 996,2428, # 6934
3584, 729,7859, 392,2191,1453,4011,4510,3707,7860,7861,2458,3585,2608,1675,2800, # 6950
919,2347,2960,2348,1270,4511,4012, 73,7862,7863, 647,7864,3228,2843,2255,1550, # 6966
1346,3006,7865,1332, 883,3479,7866,7867,7868,7869,3301,2765,7870,1212, 831,1347, # 6982
4226,4512,2326,3830,1863,3053, 720,3831,4513,4514,3832,7871,4227,7872,7873,4515, # 6998
7874,7875,1798,4516,3708,2609,4517,3586,1645,2371,7876,7877,2924, 669,2208,2665, # 7014
2429,7878,2879,7879,7880,1028,3229,7881,4228,2408,7882,2256,1353,7883,7884,4518, # 7030
3158, 518,7885,4013,7886,4229,1960,7887,2142,4230,7888,7889,3007,2349,2350,3833, # 7046
516,1833,1454,4014,2699,4231,4519,2225,2610,1971,1129,3587,7890,2766,7891,2961, # 7062
1422, 577,1470,3008,1524,3373,7892,7893, 432,4232,3054,3480,7894,2586,1455,2508, # 7078
2226,1972,1175,7895,1020,2732,4015,3481,4520,7896,2733,7897,1743,1361,3055,3482, # 7094
2639,4016,4233,4521,2290, 895, 924,4234,2170, 331,2243,3056, 166,1627,3057,1098, # 7110
7898,1232,2880,2227,3374,4522, 657, 403,1196,2372, 542,3709,3375,1600,4235,3483, # 7126
7899,4523,2767,3230, 576, 530,1362,7900,4524,2533,2666,3710,4017,7901, 842,3834, # 7142
7902,2801,2031,1014,4018, 213,2700,3376, 665, 621,4236,7903,3711,2925,2430,7904, # 7158
2431,3302,3588,3377,7905,4237,2534,4238,4525,3589,1682,4239,3484,1380,7906, 724, # 7174
2277, 600,1670,7907,1337,1233,4526,3103,2244,7908,1621,4527,7909, 651,4240,7910, # 7190
1612,4241,2611,7911,2844,7912,2734,2307,3058,7913, 716,2459,3059, 174,1255,2701, # 7206
4019,3590, 548,1320,1398, 728,4020,1574,7914,1890,1197,3060,4021,7915,3061,3062, # 7222
3712,3591,3713, 747,7916, 635,4242,4528,7917,7918,7919,4243,7920,7921,4529,7922, # 7238
3378,4530,2432, 451,7923,3714,2535,2072,4244,2735,4245,4022,7924,1764,4531,7925, # 7254
4246, 350,7926,2278,2390,2486,7927,4247,4023,2245,1434,4024, 488,4532, 458,4248, # 7270
4025,3715, 771,1330,2391,3835,2568,3159,2159,2409,1553,2667,3160,4249,7928,2487, # 7286
2881,2612,1720,2702,4250,3379,4533,7929,2536,4251,7930,3231,4252,2768,7931,2015, # 7302
2736,7932,1155,1017,3716,3836,7933,3303,2308, 201,1864,4253,1430,7934,4026,7935, # 7318
7936,7937,7938,7939,4254,1604,7940, 414,1865, 371,2587,4534,4535,3485,2016,3104, # 7334
4536,1708, 960,4255, 887, 389,2171,1536,1663,1721,7941,2228,4027,2351,2926,1580, # 7350
7942,7943,7944,1744,7945,2537,4537,4538,7946,4539,7947,2073,7948,7949,3592,3380, # 7366
2882,4256,7950,4257,2640,3381,2802, 673,2703,2460, 709,3486,4028,3593,4258,7951, # 7382
1148, 502, 634,7952,7953,1204,4540,3594,1575,4541,2613,3717,7954,3718,3105, 948, # 7398
3232, 121,1745,3837,1110,7955,4259,3063,2509,3009,4029,3719,1151,1771,3838,1488, # 7414
4030,1986,7956,2433,3487,7957,7958,2093,7959,4260,3839,1213,1407,2803, 531,2737, # 7430
2538,3233,1011,1537,7960,2769,4261,3106,1061,7961,3720,3721,1866,2883,7962,2017, # 7446
120,4262,4263,2062,3595,3234,2309,3840,2668,3382,1954,4542,7963,7964,3488,1047, # 7462
2704,1266,7965,1368,4543,2845, 649,3383,3841,2539,2738,1102,2846,2669,7966,7967, # 7478
1999,7968,1111,3596,2962,7969,2488,3842,3597,2804,1854,3384,3722,7970,7971,3385, # 7494
2410,2884,3304,3235,3598,7972,2569,7973,3599,2805,4031,1460, 856,7974,3600,7975, # 7510
2885,2963,7976,2886,3843,7977,4264, 632,2510, 875,3844,1697,3845,2291,7978,7979, # 7526
4544,3010,1239, 580,4545,4265,7980, 914, 936,2074,1190,4032,1039,2123,7981,7982, # 7542
7983,3386,1473,7984,1354,4266,3846,7985,2172,3064,4033, 915,3305,4267,4268,3306, # 7558
1605,1834,7986,2739, 398,3601,4269,3847,4034, 328,1912,2847,4035,3848,1331,4270, # 7574
3011, 937,4271,7987,3602,4036,4037,3387,2160,4546,3388, 524, 742, 538,3065,1012, # 7590
7988,7989,3849,2461,7990, 658,1103, 225,3850,7991,7992,4547,7993,4548,7994,3236, # 7606
1243,7995,4038, 963,2246,4549,7996,2705,3603,3161,7997,7998,2588,2327,7999,4550, # 7622
8000,8001,8002,3489,3307, 957,3389,2540,2032,1930,2927,2462, 870,2018,3604,1746, # 7638
2770,2771,2434,2463,8003,3851,8004,3723,3107,3724,3490,3390,3725,8005,1179,3066, # 7654
8006,3162,2373,4272,3726,2541,3163,3108,2740,4039,8007,3391,1556,2542,2292, 977, # 7670
2887,2033,4040,1205,3392,8008,1765,3393,3164,2124,1271,1689, 714,4551,3491,8009, # 7686
2328,3852, 533,4273,3605,2181, 617,8010,2464,3308,3492,2310,8011,8012,3165,8013, # 7702
8014,3853,1987, 618, 427,2641,3493,3394,8015,8016,1244,1690,8017,2806,4274,4552, # 7718
8018,3494,8019,8020,2279,1576, 473,3606,4275,3395, 972,8021,3607,8022,3067,8023, # 7734
8024,4553,4554,8025,3727,4041,4042,8026, 153,4555, 356,8027,1891,2888,4276,2143, # 7750
408, 803,2352,8028,3854,8029,4277,1646,2570,2511,4556,4557,3855,8030,3856,4278, # 7766
8031,2411,3396, 752,8032,8033,1961,2964,8034, 746,3012,2465,8035,4279,3728, 698, # 7782
4558,1892,4280,3608,2543,4559,3609,3857,8036,3166,3397,8037,1823,1302,4043,2706, # 7798
3858,1973,4281,8038,4282,3167, 823,1303,1288,1236,2848,3495,4044,3398, 774,3859, # 7814
8039,1581,4560,1304,2849,3860,4561,8040,2435,2161,1083,3237,4283,4045,4284, 344, # 7830
1173, 288,2311, 454,1683,8041,8042,1461,4562,4046,2589,8043,8044,4563, 985, 894, # 7846
8045,3399,3168,8046,1913,2928,3729,1988,8047,2110,1974,8048,4047,8049,2571,1194, # 7862
425,8050,4564,3169,1245,3730,4285,8051,8052,2850,8053, 636,4565,1855,3861, 760, # 7878
1799,8054,4286,2209,1508,4566,4048,1893,1684,2293,8055,8056,8057,4287,4288,2210, # 7894
479,8058,8059, 832,8060,4049,2489,8061,2965,2490,3731, 990,3109, 627,1814,2642, # 7910
4289,1582,4290,2125,2111,3496,4567,8062, 799,4291,3170,8063,4568,2112,1737,3013, # 7926
1018, 543, 754,4292,3309,1676,4569,4570,4050,8064,1489,8065,3497,8066,2614,2889, # 7942
4051,8067,8068,2966,8069,8070,8071,8072,3171,4571,4572,2182,1722,8073,3238,3239, # 7958
1842,3610,1715, 481, 365,1975,1856,8074,8075,1962,2491,4573,8076,2126,3611,3240, # 7974
433,1894,2063,2075,8077, 602,2741,8078,8079,8080,8081,8082,3014,1628,3400,8083, # 7990
3172,4574,4052,2890,4575,2512,8084,2544,2772,8085,8086,8087,3310,4576,2891,8088, # 8006
4577,8089,2851,4578,4579,1221,2967,4053,2513,8090,8091,8092,1867,1989,8093,8094, # 8022
8095,1895,8096,8097,4580,1896,4054, 318,8098,2094,4055,4293,8099,8100, 485,8101, # 8038
938,3862, 553,2670, 116,8102,3863,3612,8103,3498,2671,2773,3401,3311,2807,8104, # 8054
3613,2929,4056,1747,2930,2968,8105,8106, 207,8107,8108,2672,4581,2514,8109,3015, # 8070
890,3614,3864,8110,1877,3732,3402,8111,2183,2353,3403,1652,8112,8113,8114, 941, # 8086
2294, 208,3499,4057,2019, 330,4294,3865,2892,2492,3733,4295,8115,8116,8117,8118, # 8102
#Everything below is of no interest for detection purpose
2515,1613,4582,8119,3312,3866,2516,8120,4058,8121,1637,4059,2466,4583,3867,8122, # 8118
2493,3016,3734,8123,8124,2192,8125,8126,2162,8127,8128,8129,8130,8131,8132,8133, # 8134
8134,8135,8136,8137,8138,8139,8140,8141,8142,8143,8144,8145,8146,8147,8148,8149, # 8150
8150,8151,8152,8153,8154,8155,8156,8157,8158,8159,8160,8161,8162,8163,8164,8165, # 8166
8166,8167,8168,8169,8170,8171,8172,8173,8174,8175,8176,8177,8178,8179,8180,8181, # 8182
8182,8183,8184,8185,8186,8187,8188,8189,8190,8191,8192,8193,8194,8195,8196,8197, # 8198
8198,8199,8200,8201,8202,8203,8204,8205,8206,8207,8208,8209,8210,8211,8212,8213, # 8214
8214,8215,8216,8217,8218,8219,8220,8221,8222,8223,8224,8225,8226,8227,8228,8229, # 8230
8230,8231,8232,8233,8234,8235,8236,8237,8238,8239,8240,8241,8242,8243,8244,8245, # 8246
8246,8247,8248,8249,8250,8251,8252,8253,8254,8255,8256,8257,8258,8259,8260,8261, # 8262
8262,8263,8264,8265,8266,8267,8268,8269,8270,8271,8272,8273,8274,8275,8276,8277, # 8278
8278,8279,8280,8281,8282,8283,8284,8285,8286,8287,8288,8289,8290,8291,8292,8293, # 8294
8294,8295,8296,8297,8298,8299,8300,8301,8302,8303,8304,8305,8306,8307,8308,8309, # 8310
8310,8311,8312,8313,8314,8315,8316,8317,8318,8319,8320,8321,8322,8323,8324,8325, # 8326
8326,8327,8328,8329,8330,8331,8332,8333,8334,8335,8336,8337,8338,8339,8340,8341, # 8342
8342,8343,8344,8345,8346,8347,8348,8349,8350,8351,8352,8353,8354,8355,8356,8357, # 8358
8358,8359,8360,8361,8362,8363,8364,8365,8366,8367,8368,8369,8370,8371,8372,8373, # 8374
8374,8375,8376,8377,8378,8379,8380,8381,8382,8383,8384,8385,8386,8387,8388,8389, # 8390
8390,8391,8392,8393,8394,8395,8396,8397,8398,8399,8400,8401,8402,8403,8404,8405, # 8406
8406,8407,8408,8409,8410,8411,8412,8413,8414,8415,8416,8417,8418,8419,8420,8421, # 8422
8422,8423,8424,8425,8426,8427,8428,8429,8430,8431,8432,8433,8434,8435,8436,8437, # 8438
8438,8439,8440,8441,8442,8443,8444,8445,8446,8447,8448,8449,8450,8451,8452,8453, # 8454
8454,8455,8456,8457,8458,8459,8460,8461,8462,8463,8464,8465,8466,8467,8468,8469, # 8470
8470,8471,8472,8473,8474,8475,8476,8477,8478,8479,8480,8481,8482,8483,8484,8485, # 8486
8486,8487,8488,8489,8490,8491,8492,8493,8494,8495,8496,8497,8498,8499,8500,8501, # 8502
8502,8503,8504,8505,8506,8507,8508,8509,8510,8511,8512,8513,8514,8515,8516,8517, # 8518
8518,8519,8520,8521,8522,8523,8524,8525,8526,8527,8528,8529,8530,8531,8532,8533, # 8534
8534,8535,8536,8537,8538,8539,8540,8541,8542,8543,8544,8545,8546,8547,8548,8549, # 8550
8550,8551,8552,8553,8554,8555,8556,8557,8558,8559,8560,8561,8562,8563,8564,8565, # 8566
8566,8567,8568,8569,8570,8571,8572,8573,8574,8575,8576,8577,8578,8579,8580,8581, # 8582
8582,8583,8584,8585,8586,8587,8588,8589,8590,8591,8592,8593,8594,8595,8596,8597, # 8598
8598,8599,8600,8601,8602,8603,8604,8605,8606,8607,8608,8609,8610,8611,8612,8613, # 8614
8614,8615,8616,8617,8618,8619,8620,8621,8622,8623,8624,8625,8626,8627,8628,8629, # 8630
8630,8631,8632,8633,8634,8635,8636,8637,8638,8639,8640,8641,8642,8643,8644,8645, # 8646
8646,8647,8648,8649,8650,8651,8652,8653,8654,8655,8656,8657,8658,8659,8660,8661, # 8662
8662,8663,8664,8665,8666,8667,8668,8669,8670,8671,8672,8673,8674,8675,8676,8677, # 8678
8678,8679,8680,8681,8682,8683,8684,8685,8686,8687,8688,8689,8690,8691,8692,8693, # 8694
8694,8695,8696,8697,8698,8699,8700,8701,8702,8703,8704,8705,8706,8707,8708,8709, # 8710
8710,8711,8712,8713,8714,8715,8716,8717,8718,8719,8720,8721,8722,8723,8724,8725, # 8726
8726,8727,8728,8729,8730,8731,8732,8733,8734,8735,8736,8737,8738,8739,8740,8741) # 8742
# flake8: noqa
| mit |
PeterWangIntel/chromium-crosswalk | build/android/pylib/utils/parallelizer.py | 51 | 7129 | # Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
""" Wrapper that allows method execution in parallel.
This class wraps a list of objects of the same type, emulates their
interface, and executes any functions called on the objects in parallel
in ReraiserThreads.
This means that, given a list of objects:
class Foo:
def __init__(self):
self.baz = Baz()
def bar(self, my_param):
// do something
list_of_foos = [Foo(1), Foo(2), Foo(3)]
we can take a sequential operation on that list of objects:
for f in list_of_foos:
f.bar('Hello')
and run it in parallel across all of the objects:
Parallelizer(list_of_foos).bar('Hello')
It can also handle (non-method) attributes of objects, so that this:
for f in list_of_foos:
f.baz.myBazMethod()
can be run in parallel with:
Parallelizer(list_of_foos).baz.myBazMethod()
Because it emulates the interface of the wrapped objects, a Parallelizer
can be passed to a method or function that takes objects of that type:
def DoesSomethingWithFoo(the_foo):
the_foo.bar('Hello')
the_foo.bar('world')
the_foo.baz.myBazMethod
DoesSomethingWithFoo(Parallelizer(list_of_foos))
Note that this class spins up a thread for each object. Using this class
to parallelize operations that are already fast will incur a net performance
penalty.
"""
# pylint: disable=protected-access
from pylib.utils import reraiser_thread
from pylib.utils import watchdog_timer
_DEFAULT_TIMEOUT = 30
_DEFAULT_RETRIES = 3
class Parallelizer(object):
"""Allows parallel execution of method calls across a group of objects."""
def __init__(self, objs):
assert (objs is not None and len(objs) > 0), (
"Passed empty list to 'Parallelizer'")
self._orig_objs = objs
self._objs = objs
def __getattr__(self, name):
"""Emulate getting the |name| attribute of |self|.
Args:
name: The name of the attribute to retrieve.
Returns:
A Parallelizer emulating the |name| attribute of |self|.
"""
self.pGet(None)
r = type(self)(self._orig_objs)
r._objs = [getattr(o, name) for o in self._objs]
return r
def __getitem__(self, index):
"""Emulate getting the value of |self| at |index|.
Returns:
A Parallelizer emulating the value of |self| at |index|.
"""
self.pGet(None)
r = type(self)(self._orig_objs)
r._objs = [o[index] for o in self._objs]
return r
def __call__(self, *args, **kwargs):
"""Emulate calling |self| with |args| and |kwargs|.
Note that this call is asynchronous. Call pFinish on the return value to
block until the call finishes.
Returns:
A Parallelizer wrapping the ReraiserThreadGroup running the call in
parallel.
Raises:
AttributeError if the wrapped objects aren't callable.
"""
self.pGet(None)
if not self._objs:
raise AttributeError('Nothing to call.')
for o in self._objs:
if not callable(o):
raise AttributeError("'%s' is not callable" % o.__name__)
r = type(self)(self._orig_objs)
r._objs = reraiser_thread.ReraiserThreadGroup(
[reraiser_thread.ReraiserThread(
o, args=args, kwargs=kwargs,
name='%s.%s' % (str(d), o.__name__))
for d, o in zip(self._orig_objs, self._objs)])
r._objs.StartAll() # pylint: disable=W0212
return r
def pFinish(self, timeout):
"""Finish any outstanding asynchronous operations.
Args:
timeout: The maximum number of seconds to wait for an individual
result to return, or None to wait forever.
Returns:
self, now emulating the return values.
"""
self._assertNoShadow('pFinish')
if isinstance(self._objs, reraiser_thread.ReraiserThreadGroup):
self._objs.JoinAll()
self._objs = self._objs.GetAllReturnValues(
watchdog_timer.WatchdogTimer(timeout))
return self
def pGet(self, timeout):
"""Get the current wrapped objects.
Args:
timeout: Same as |pFinish|.
Returns:
A list of the results, in order of the provided devices.
Raises:
Any exception raised by any of the called functions.
"""
self._assertNoShadow('pGet')
self.pFinish(timeout)
return self._objs
def pMap(self, f, *args, **kwargs):
"""Map a function across the current wrapped objects in parallel.
This calls f(o, *args, **kwargs) for each o in the set of wrapped objects.
Note that this call is asynchronous. Call pFinish on the return value to
block until the call finishes.
Args:
f: The function to call.
args: The positional args to pass to f.
kwargs: The keyword args to pass to f.
Returns:
A Parallelizer wrapping the ReraiserThreadGroup running the map in
parallel.
"""
self._assertNoShadow('pMap')
r = type(self)(self._orig_objs)
r._objs = reraiser_thread.ReraiserThreadGroup(
[reraiser_thread.ReraiserThread(
f, args=tuple([o] + list(args)), kwargs=kwargs,
name='%s(%s)' % (f.__name__, d))
for d, o in zip(self._orig_objs, self._objs)])
r._objs.StartAll() # pylint: disable=W0212
return r
def _assertNoShadow(self, attr_name):
"""Ensures that |attr_name| isn't shadowing part of the wrapped obejcts.
If the wrapped objects _do_ have an |attr_name| attribute, it will be
inaccessible to clients.
Args:
attr_name: The attribute to check.
Raises:
AssertionError if the wrapped objects have an attribute named 'attr_name'
or '_assertNoShadow'.
"""
if isinstance(self._objs, reraiser_thread.ReraiserThreadGroup):
assert not hasattr(self._objs, '_assertNoShadow')
assert not hasattr(self._objs, attr_name)
else:
assert not any(hasattr(o, '_assertNoShadow') for o in self._objs)
assert not any(hasattr(o, attr_name) for o in self._objs)
class SyncParallelizer(Parallelizer):
"""A Parallelizer that blocks on function calls."""
#override
def __call__(self, *args, **kwargs):
"""Emulate calling |self| with |args| and |kwargs|.
Note that this call is synchronous.
Returns:
A Parallelizer emulating the value returned from calling |self| with
|args| and |kwargs|.
Raises:
AttributeError if the wrapped objects aren't callable.
"""
r = super(SyncParallelizer, self).__call__(*args, **kwargs)
r.pFinish(None)
return r
#override
def pMap(self, f, *args, **kwargs):
"""Map a function across the current wrapped objects in parallel.
This calls f(o, *args, **kwargs) for each o in the set of wrapped objects.
Note that this call is synchronous.
Args:
f: The function to call.
args: The positional args to pass to f.
kwargs: The keyword args to pass to f.
Returns:
A Parallelizer wrapping the ReraiserThreadGroup running the map in
parallel.
"""
r = super(SyncParallelizer, self).pMap(f, *args, **kwargs)
r.pFinish(None)
return r
| bsd-3-clause |
axbaretto/beam | sdks/python/.tox/docs/lib/python2.7/site-packages/pip/_vendor/html5lib/filters/whitespace.py | 353 | 1139 | from __future__ import absolute_import, division, unicode_literals
import re
from . import base
from ..constants import rcdataElements, spaceCharacters
spaceCharacters = "".join(spaceCharacters)
SPACES_REGEX = re.compile("[%s]+" % spaceCharacters)
class Filter(base.Filter):
spacePreserveElements = frozenset(["pre", "textarea"] + list(rcdataElements))
def __iter__(self):
preserve = 0
for token in base.Filter.__iter__(self):
type = token["type"]
if type == "StartTag" \
and (preserve or token["name"] in self.spacePreserveElements):
preserve += 1
elif type == "EndTag" and preserve:
preserve -= 1
elif not preserve and type == "SpaceCharacters" and token["data"]:
# Test on token["data"] above to not introduce spaces where there were not
token["data"] = " "
elif not preserve and type == "Characters":
token["data"] = collapse_spaces(token["data"])
yield token
def collapse_spaces(text):
return SPACES_REGEX.sub(' ', text)
| apache-2.0 |
ric03uec/boto | boto/sdb/db/manager/sdbmanager.py | 17 | 27150 | # Copyright (c) 2006,2007,2008 Mitch Garnaat http://garnaat.org/
# Copyright (c) 2010 Chris Moyer http://coredumped.org/
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
import boto
import re
from boto.utils import find_class
import uuid
from boto.sdb.db.key import Key
from boto.sdb.db.blob import Blob
from boto.sdb.db.property import ListProperty, MapProperty
from datetime import datetime, date, time
from boto.exception import SDBPersistenceError, S3ResponseError
ISO8601 = '%Y-%m-%dT%H:%M:%SZ'
class TimeDecodeError(Exception):
pass
class SDBConverter(object):
"""
Responsible for converting base Python types to format compatible
with underlying database. For SimpleDB, that means everything
needs to be converted to a string when stored in SimpleDB and from
a string when retrieved.
To convert a value, pass it to the encode or decode method. The
encode method will take a Python native value and convert to DB
format. The decode method will take a DB format value and convert
it to Python native format. To find the appropriate method to
call, the generic encode/decode methods will look for the
type-specific method by searching for a method
called"encode_<type name>" or "decode_<type name>".
"""
def __init__(self, manager):
# Do a delayed import to prevent possible circular import errors.
from boto.sdb.db.model import Model
self.model_class = Model
self.manager = manager
self.type_map = {bool: (self.encode_bool, self.decode_bool),
int: (self.encode_int, self.decode_int),
long: (self.encode_long, self.decode_long),
float: (self.encode_float, self.decode_float),
self.model_class: (
self.encode_reference, self.decode_reference
),
Key: (self.encode_reference, self.decode_reference),
datetime: (self.encode_datetime, self.decode_datetime),
date: (self.encode_date, self.decode_date),
time: (self.encode_time, self.decode_time),
Blob: (self.encode_blob, self.decode_blob),
str: (self.encode_string, self.decode_string),
}
def encode(self, item_type, value):
try:
if self.model_class in item_type.mro():
item_type = self.model_class
except:
pass
if item_type in self.type_map:
encode = self.type_map[item_type][0]
return encode(value)
return value
def decode(self, item_type, value):
if item_type in self.type_map:
decode = self.type_map[item_type][1]
return decode(value)
return value
def encode_list(self, prop, value):
if value in (None, []):
return []
if not isinstance(value, list):
# This is a little trick to avoid encoding when it's just a single value,
# since that most likely means it's from a query
item_type = getattr(prop, "item_type")
return self.encode(item_type, value)
# Just enumerate(value) won't work here because
# we need to add in some zero padding
# We support lists up to 1,000 attributes, since
# SDB technically only supports 1024 attributes anyway
values = {}
for k, v in enumerate(value):
values["%03d" % k] = v
return self.encode_map(prop, values)
def encode_map(self, prop, value):
import urllib
if value is None:
return None
if not isinstance(value, dict):
raise ValueError('Expected a dict value, got %s' % type(value))
new_value = []
for key in value:
item_type = getattr(prop, "item_type")
if self.model_class in item_type.mro():
item_type = self.model_class
encoded_value = self.encode(item_type, value[key])
if encoded_value is not None:
new_value.append('%s:%s' % (urllib.quote(key), encoded_value))
return new_value
def encode_prop(self, prop, value):
if isinstance(prop, ListProperty):
return self.encode_list(prop, value)
elif isinstance(prop, MapProperty):
return self.encode_map(prop, value)
else:
return self.encode(prop.data_type, value)
def decode_list(self, prop, value):
if not isinstance(value, list):
value = [value]
if hasattr(prop, 'item_type'):
item_type = getattr(prop, "item_type")
dec_val = {}
for val in value:
if val is not None:
k, v = self.decode_map_element(item_type, val)
try:
k = int(k)
except:
k = v
dec_val[k] = v
value = dec_val.values()
return value
def decode_map(self, prop, value):
if not isinstance(value, list):
value = [value]
ret_value = {}
item_type = getattr(prop, "item_type")
for val in value:
k, v = self.decode_map_element(item_type, val)
ret_value[k] = v
return ret_value
def decode_map_element(self, item_type, value):
"""Decode a single element for a map"""
import urllib
key = value
if ":" in value:
key, value = value.split(':', 1)
key = urllib.unquote(key)
if self.model_class in item_type.mro():
value = item_type(id=value)
else:
value = self.decode(item_type, value)
return (key, value)
def decode_prop(self, prop, value):
if isinstance(prop, ListProperty):
return self.decode_list(prop, value)
elif isinstance(prop, MapProperty):
return self.decode_map(prop, value)
else:
return self.decode(prop.data_type, value)
def encode_int(self, value):
value = int(value)
value += 2147483648
return '%010d' % value
def decode_int(self, value):
try:
value = int(value)
except:
boto.log.error("Error, %s is not an integer" % value)
value = 0
value = int(value)
value -= 2147483648
return int(value)
def encode_long(self, value):
value = long(value)
value += 9223372036854775808
return '%020d' % value
def decode_long(self, value):
value = long(value)
value -= 9223372036854775808
return value
def encode_bool(self, value):
if value == True or str(value).lower() in ("true", "yes"):
return 'true'
else:
return 'false'
def decode_bool(self, value):
if value.lower() == 'true':
return True
else:
return False
def encode_float(self, value):
"""
See http://tools.ietf.org/html/draft-wood-ldapext-float-00.
"""
s = '%e' % value
l = s.split('e')
mantissa = l[0].ljust(18, '0')
exponent = l[1]
if value == 0.0:
case = '3'
exponent = '000'
elif mantissa[0] != '-' and exponent[0] == '+':
case = '5'
exponent = exponent[1:].rjust(3, '0')
elif mantissa[0] != '-' and exponent[0] == '-':
case = '4'
exponent = 999 + int(exponent)
exponent = '%03d' % exponent
elif mantissa[0] == '-' and exponent[0] == '-':
case = '2'
mantissa = '%f' % (10 + float(mantissa))
mantissa = mantissa.ljust(18, '0')
exponent = exponent[1:].rjust(3, '0')
else:
case = '1'
mantissa = '%f' % (10 + float(mantissa))
mantissa = mantissa.ljust(18, '0')
exponent = 999 - int(exponent)
exponent = '%03d' % exponent
return '%s %s %s' % (case, exponent, mantissa)
def decode_float(self, value):
case = value[0]
exponent = value[2:5]
mantissa = value[6:]
if case == '3':
return 0.0
elif case == '5':
pass
elif case == '4':
exponent = '%03d' % (int(exponent) - 999)
elif case == '2':
mantissa = '%f' % (float(mantissa) - 10)
exponent = '-' + exponent
else:
mantissa = '%f' % (float(mantissa) - 10)
exponent = '%03d' % abs((int(exponent) - 999))
return float(mantissa + 'e' + exponent)
def encode_datetime(self, value):
if isinstance(value, basestring):
return value
if isinstance(value, datetime):
return value.strftime(ISO8601)
else:
return value.isoformat()
def decode_datetime(self, value):
"""Handles both Dates and DateTime objects"""
if value is None:
return value
try:
if "T" in value:
if "." in value:
# Handle true "isoformat()" dates, which may have a microsecond on at the end of them
return datetime.strptime(value.split(".")[0], "%Y-%m-%dT%H:%M:%S")
else:
return datetime.strptime(value, ISO8601)
else:
value = value.split("-")
return date(int(value[0]), int(value[1]), int(value[2]))
except Exception, e:
return None
def encode_date(self, value):
if isinstance(value, basestring):
return value
return value.isoformat()
def decode_date(self, value):
try:
value = value.split("-")
return date(int(value[0]), int(value[1]), int(value[2]))
except:
return None
encode_time = encode_date
def decode_time(self, value):
""" converts strings in the form of HH:MM:SS.mmmmmm
(created by datetime.time.isoformat()) to
datetime.time objects.
Timzone-aware strings ("HH:MM:SS.mmmmmm+HH:MM") won't
be handled right now and will raise TimeDecodeError.
"""
if '-' in value or '+' in value:
# TODO: Handle tzinfo
raise TimeDecodeError("Can't handle timezone aware objects: %r" % value)
tmp = value.split('.')
arg = map(int, tmp[0].split(':'))
if len(tmp) == 2:
arg.append(int(tmp[1]))
return time(*arg)
def encode_reference(self, value):
if value in (None, 'None', '', ' '):
return None
if isinstance(value, basestring):
return value
else:
return value.id
def decode_reference(self, value):
if not value or value == "None":
return None
return value
def encode_blob(self, value):
if not value:
return None
if isinstance(value, basestring):
return value
if not value.id:
bucket = self.manager.get_blob_bucket()
key = bucket.new_key(str(uuid.uuid4()))
value.id = "s3://%s/%s" % (key.bucket.name, key.name)
else:
match = re.match("^s3:\/\/([^\/]*)\/(.*)$", value.id)
if match:
s3 = self.manager.get_s3_connection()
bucket = s3.get_bucket(match.group(1), validate=False)
key = bucket.get_key(match.group(2))
else:
raise SDBPersistenceError("Invalid Blob ID: %s" % value.id)
if value.value is not None:
key.set_contents_from_string(value.value)
return value.id
def decode_blob(self, value):
if not value:
return None
match = re.match("^s3:\/\/([^\/]*)\/(.*)$", value)
if match:
s3 = self.manager.get_s3_connection()
bucket = s3.get_bucket(match.group(1), validate=False)
try:
key = bucket.get_key(match.group(2))
except S3ResponseError, e:
if e.reason != "Forbidden":
raise
return None
else:
return None
if key:
return Blob(file=key, id="s3://%s/%s" % (key.bucket.name, key.name))
else:
return None
def encode_string(self, value):
"""Convert ASCII, Latin-1 or UTF-8 to pure Unicode"""
if not isinstance(value, str):
return value
try:
return unicode(value, 'utf-8')
except:
# really, this should throw an exception.
# in the interest of not breaking current
# systems, however:
arr = []
for ch in value:
arr.append(unichr(ord(ch)))
return u"".join(arr)
def decode_string(self, value):
"""Decoding a string is really nothing, just
return the value as-is"""
return value
class SDBManager(object):
def __init__(self, cls, db_name, db_user, db_passwd,
db_host, db_port, db_table, ddl_dir, enable_ssl,
consistent=None):
self.cls = cls
self.db_name = db_name
self.db_user = db_user
self.db_passwd = db_passwd
self.db_host = db_host
self.db_port = db_port
self.db_table = db_table
self.ddl_dir = ddl_dir
self.enable_ssl = enable_ssl
self.s3 = None
self.bucket = None
self.converter = SDBConverter(self)
self._sdb = None
self._domain = None
if consistent is None and hasattr(cls, "__consistent__"):
consistent = cls.__consistent__
self.consistent = consistent
@property
def sdb(self):
if self._sdb is None:
self._connect()
return self._sdb
@property
def domain(self):
if self._domain is None:
self._connect()
return self._domain
def _connect(self):
args = dict(aws_access_key_id=self.db_user,
aws_secret_access_key=self.db_passwd,
is_secure=self.enable_ssl)
try:
region = [x for x in boto.sdb.regions() if x.endpoint == self.db_host][0]
args['region'] = region
except IndexError:
pass
self._sdb = boto.connect_sdb(**args)
# This assumes that the domain has already been created
# It's much more efficient to do it this way rather than
# having this make a roundtrip each time to validate.
# The downside is that if the domain doesn't exist, it breaks
self._domain = self._sdb.lookup(self.db_name, validate=False)
if not self._domain:
self._domain = self._sdb.create_domain(self.db_name)
def _object_lister(self, cls, query_lister):
for item in query_lister:
obj = self.get_object(cls, item.name, item)
if obj:
yield obj
def encode_value(self, prop, value):
if value is None:
return None
if not prop:
return str(value)
return self.converter.encode_prop(prop, value)
def decode_value(self, prop, value):
return self.converter.decode_prop(prop, value)
def get_s3_connection(self):
if not self.s3:
self.s3 = boto.connect_s3(self.db_user, self.db_passwd)
return self.s3
def get_blob_bucket(self, bucket_name=None):
s3 = self.get_s3_connection()
bucket_name = "%s-%s" % (s3.aws_access_key_id, self.domain.name)
bucket_name = bucket_name.lower()
try:
self.bucket = s3.get_bucket(bucket_name)
except:
self.bucket = s3.create_bucket(bucket_name)
return self.bucket
def load_object(self, obj):
if not obj._loaded:
a = self.domain.get_attributes(obj.id, consistent_read=self.consistent)
if '__type__' in a:
for prop in obj.properties(hidden=False):
if prop.name in a:
value = self.decode_value(prop, a[prop.name])
value = prop.make_value_from_datastore(value)
try:
setattr(obj, prop.name, value)
except Exception, e:
boto.log.exception(e)
obj._loaded = True
def get_object(self, cls, id, a=None):
obj = None
if not a:
a = self.domain.get_attributes(id, consistent_read=self.consistent)
if '__type__' in a:
if not cls or a['__type__'] != cls.__name__:
cls = find_class(a['__module__'], a['__type__'])
if cls:
params = {}
for prop in cls.properties(hidden=False):
if prop.name in a:
value = self.decode_value(prop, a[prop.name])
value = prop.make_value_from_datastore(value)
params[prop.name] = value
obj = cls(id, **params)
obj._loaded = True
else:
s = '(%s) class %s.%s not found' % (id, a['__module__'], a['__type__'])
boto.log.info('sdbmanager: %s' % s)
return obj
def get_object_from_id(self, id):
return self.get_object(None, id)
def query(self, query):
query_str = "select * from `%s` %s" % (self.domain.name, self._build_filter_part(query.model_class, query.filters, query.sort_by, query.select))
if query.limit:
query_str += " limit %s" % query.limit
rs = self.domain.select(query_str, max_items=query.limit, next_token = query.next_token)
query.rs = rs
return self._object_lister(query.model_class, rs)
def count(self, cls, filters, quick=True, sort_by=None, select=None):
"""
Get the number of results that would
be returned in this query
"""
query = "select count(*) from `%s` %s" % (self.domain.name, self._build_filter_part(cls, filters, sort_by, select))
count = 0
for row in self.domain.select(query):
count += int(row['Count'])
if quick:
return count
return count
def _build_filter(self, property, name, op, val):
if name == "__id__":
name = 'itemName()'
if name != "itemName()":
name = '`%s`' % name
if val is None:
if op in ('is', '='):
return "%(name)s is null" % {"name": name}
elif op in ('is not', '!='):
return "%s is not null" % name
else:
val = ""
if property.__class__ == ListProperty:
if op in ("is", "="):
op = "like"
elif op in ("!=", "not"):
op = "not like"
if not(op in ["like", "not like"] and val.startswith("%")):
val = "%%:%s" % val
return "%s %s '%s'" % (name, op, val.replace("'", "''"))
def _build_filter_part(self, cls, filters, order_by=None, select=None):
"""
Build the filter part
"""
import types
query_parts = []
order_by_filtered = False
if order_by:
if order_by[0] == "-":
order_by_method = "DESC"
order_by = order_by[1:]
else:
order_by_method = "ASC"
if select:
if order_by and order_by in select:
order_by_filtered = True
query_parts.append("(%s)" % select)
if isinstance(filters, basestring):
query = "WHERE %s AND `__type__` = '%s'" % (filters, cls.__name__)
if order_by in ["__id__", "itemName()"]:
query += " ORDER BY itemName() %s" % order_by_method
elif order_by is not None:
query += " ORDER BY `%s` %s" % (order_by, order_by_method)
return query
for filter in filters:
filter_parts = []
filter_props = filter[0]
if not isinstance(filter_props, list):
filter_props = [filter_props]
for filter_prop in filter_props:
(name, op) = filter_prop.strip().split(" ", 1)
value = filter[1]
property = cls.find_property(name)
if name == order_by:
order_by_filtered = True
if types.TypeType(value) == types.ListType:
filter_parts_sub = []
for val in value:
val = self.encode_value(property, val)
if isinstance(val, list):
for v in val:
filter_parts_sub.append(self._build_filter(property, name, op, v))
else:
filter_parts_sub.append(self._build_filter(property, name, op, val))
filter_parts.append("(%s)" % (" OR ".join(filter_parts_sub)))
else:
val = self.encode_value(property, value)
if isinstance(val, list):
for v in val:
filter_parts.append(self._build_filter(property, name, op, v))
else:
filter_parts.append(self._build_filter(property, name, op, val))
query_parts.append("(%s)" % (" or ".join(filter_parts)))
type_query = "(`__type__` = '%s'" % cls.__name__
for subclass in self._get_all_decendents(cls).keys():
type_query += " or `__type__` = '%s'" % subclass
type_query += ")"
query_parts.append(type_query)
order_by_query = ""
if order_by:
if not order_by_filtered:
query_parts.append("`%s` LIKE '%%'" % order_by)
if order_by in ["__id__", "itemName()"]:
order_by_query = " ORDER BY itemName() %s" % order_by_method
else:
order_by_query = " ORDER BY `%s` %s" % (order_by, order_by_method)
if len(query_parts) > 0:
return "WHERE %s %s" % (" AND ".join(query_parts), order_by_query)
else:
return ""
def _get_all_decendents(self, cls):
"""Get all decendents for a given class"""
decendents = {}
for sc in cls.__sub_classes__:
decendents[sc.__name__] = sc
decendents.update(self._get_all_decendents(sc))
return decendents
def query_gql(self, query_string, *args, **kwds):
raise NotImplementedError("GQL queries not supported in SimpleDB")
def save_object(self, obj, expected_value=None):
if not obj.id:
obj.id = str(uuid.uuid4())
attrs = {'__type__': obj.__class__.__name__,
'__module__': obj.__class__.__module__,
'__lineage__': obj.get_lineage()}
del_attrs = []
for property in obj.properties(hidden=False):
value = property.get_value_for_datastore(obj)
if value is not None:
value = self.encode_value(property, value)
if value == []:
value = None
if value is None:
del_attrs.append(property.name)
continue
attrs[property.name] = value
if property.unique:
try:
args = {property.name: value}
obj2 = obj.find(**args).next()
if obj2.id != obj.id:
raise SDBPersistenceError("Error: %s must be unique!" % property.name)
except(StopIteration):
pass
# Convert the Expected value to SDB format
if expected_value:
prop = obj.find_property(expected_value[0])
v = expected_value[1]
if v is not None and not isinstance(v, bool):
v = self.encode_value(prop, v)
expected_value[1] = v
self.domain.put_attributes(obj.id, attrs, replace=True, expected_value=expected_value)
if len(del_attrs) > 0:
self.domain.delete_attributes(obj.id, del_attrs)
return obj
def delete_object(self, obj):
self.domain.delete_attributes(obj.id)
def set_property(self, prop, obj, name, value):
setattr(obj, name, value)
value = prop.get_value_for_datastore(obj)
value = self.encode_value(prop, value)
if prop.unique:
try:
args = {prop.name: value}
obj2 = obj.find(**args).next()
if obj2.id != obj.id:
raise SDBPersistenceError("Error: %s must be unique!" % prop.name)
except(StopIteration):
pass
self.domain.put_attributes(obj.id, {name: value}, replace=True)
def get_property(self, prop, obj, name):
a = self.domain.get_attributes(obj.id, consistent_read=self.consistent)
# try to get the attribute value from SDB
if name in a:
value = self.decode_value(prop, a[name])
value = prop.make_value_from_datastore(value)
setattr(obj, prop.name, value)
return value
raise AttributeError('%s not found' % name)
def set_key_value(self, obj, name, value):
self.domain.put_attributes(obj.id, {name: value}, replace=True)
def delete_key_value(self, obj, name):
self.domain.delete_attributes(obj.id, name)
def get_key_value(self, obj, name):
a = self.domain.get_attributes(obj.id, name, consistent_read=self.consistent)
if name in a:
return a[name]
else:
return None
def get_raw_item(self, obj):
return self.domain.get_item(obj.id)
| mit |
thorwhalen/ut | ml/skwrap/feature_extraction/dict_vectorizer.py | 1 | 7588 |
__author__ = 'thor'
from sklearn.feature_extraction import DictVectorizer
from sklearn.externals import six
import numpy as np
from pandas import DataFrame
from collections import Counter
class IterDictVectorizer(DictVectorizer):
"""Transforms lists of feature-value mappings or rows of a dataframe to vectors.
It is like DictVectorizer (whose description was copied below), but:
(1) works with pandas DataFrame X input (rows become feature-value mappings dict)
(2) a minimum number of feature=value counts can be specified (by min_count)
(3) The fit is faster than with DictVectorizer (at least with DataFrame input)
This transformer turns lists of mappings (dict-like objects) of feature
names to feature values into Numpy arrays or scipy.sparse matrices for use
with scikit-learn estimators.
When feature values are strings, this transformer will do a binary one-hot
(aka one-of-K) coding: one boolean-valued feature is constructed for each
of the possible string values that the feature can take on. For instance,
a feature "f" that can take on the values "ham" and "spam" will become two
features in the output, one signifying "f=ham", the other "f=spam".
Features that do not occur in a sample (mapping) will have a zero value
in the resulting array/matrix.
Parameters
----------
dtype : callable, optional
The type of feature values. Passed to Numpy array/scipy.sparse matrix
constructors as the dtype argument.
separator: string, optional
Separator string used when constructing new features for one-hot
coding.
sparse: boolean, optional.
Whether transform should produce scipy.sparse matrices.
True by default.
sort: boolean, optional.
Whether ``feature_names_`` and ``vocabulary_`` should be sorted when fitting.
True by default.
min_count: positive float or int:
If min_count >= 1, min_count is the minimum number of feature=value count.
If min_count < 1, min_count represent the minimum proportion of the data that should have feature=value
Attributes
----------
vocabulary_ : dict
A dictionary mapping feature names to feature indices.
feature_names_ : list
A list of length n_features containing the feature names (e.g., "f=ham"
and "f=spam").
Examples
--------
>>> from sklearn.feature_extraction import DictVectorizer
>>> v = DictVectorizer(sparse=False)
>>> D = [{'foo': 1, 'bar': 2}, {'foo': 3, 'baz': 1}]
>>> X = v.fit_transform(D)
>>> X
array([[ 2., 0., 1.],
[ 0., 1., 3.]])
>>> v.inverse_transform(X) == \
[{'bar': 2.0, 'foo': 1.0}, {'baz': 1.0, 'foo': 3.0}]
True
>>> v.transform({'foo': 4, 'unseen_feature': 3})
array([[ 0., 0., 4.]])
>>> from ut.ml.skwrap.feature_extraction import IterDictVectorizer
>>> from pandas import DataFrame
>>> v = IterDictVectorizer(sparse=False)
>>> D = DataFrame([{'foo': 1, 'bar': 2}, {'foo': 3, 'baz': 1}])
>>> X = v.fit_transform(D)
>>> X
array([[ 2., 0., 1.],
[ 0., 1., 3.]])
See also
--------
FeatureHasher : performs vectorization using only a hash function.
sklearn.preprocessing.OneHotEncoder : handles nominal/categorical features
encoded as columns of integers.
"""
def __init__(self, dtype=np.float64, separator="=", sparse=True, sort=True, min_count=0):
self.dtype = dtype
self.separator = separator
self.sparse = sparse
self.sort = sort
self.min_count = min_count
def fit(self, X, y=None):
"""Learn a list of feature name -> indices mappings.
Parameters
----------
X : Mapping or iterable over Mappings
Dict(s) or Mapping(s) from feature names (arbitrary Python
objects) to feature values (strings or convertible to dtype).
y : (ignored)
Returns
-------
self
"""
feature_names = []
vocab = {}
feature_template = "{}" + self.separator + "{}"
if isinstance(X, DataFrame):
counts_of = dict()
for col, val in X.items():
counts_of[col] = Counter(val.dropna())
self.feature_counts_ = {}
_min_count = self.min_count
if self.min_count < 1:
_min_count *= len(X)
else:
_min_count = self.min_count
self.df_columns_ = set()
for k, v in counts_of.items():
for kk, vv in v.items():
if vv >= _min_count:
self.feature_counts_[feature_template.format(k, kk)] = vv
self.df_columns_.add(k)
feature_names = list(self.feature_counts_.keys())
else:
for x in X:
for f, v in six.iteritems(x):
if isinstance(v, six.string_types):
f = feature_template.format(f, v)
if f not in vocab:
feature_names.append(f)
vocab[f] = len(vocab)
if self.sort:
feature_names.sort()
vocab = dict((f, i) for i, f in enumerate(feature_names))
self.feature_names_ = feature_names
self.vocabulary_ = vocab
return self
def transform(self, X, y=None):
if isinstance(X, DataFrame):
X = map(lambda x: x[1].dropna().to_dict(), X.iterrows())
return super(IterDictVectorizer, self).transform(X)
def fit_transform(self, X, y=None):
self.fit(X)
return self.transform(X)
class IterDictVectorizerWithText(object):
def __init__(self, dtype=np.float64, separator="=", sparse=True, sort=True, min_count=0,
text_vectorizers={}):
self.dict_vectorizer = IterDictVectorizer(
dtype=dtype, separator=separator, sparse=sparse, sort=sort, min_count=min_count
)
self.text_vectorizers = text_vectorizers
def fit(self, X, y=None):
# input validation
assert isinstance(X, DataFrame), "X must be a pandas DataFrame"
if not set(self.text_vectorizers.keys()).issubset(X.columns):
RuntimeError("The following columns were specified in text_vectorizers, but were not in X:\n" +
" {}".format(set(self.text_vectorizers.keys()).difference(X.columns)))
# carry out the normal IterDictVectorizer.fit() for columns not in text_vectorizers
self.dict_vectorizer_cols_ = set(X.columns).difference(list(self.text_vectorizers.keys()))
self.dict_vectorizer.fit(X[self.dict_vectorizer_cols_])
self.vocabulary_ = self.dict_vectorizer.vocabulary_
# use the CounterVectorizers of text_vectorizers to fit the specified string columns
for col in set(X.columns).intersection(list(self.text_vectorizers.keys())):
self.text_vectorizers[col].fit(X[col])
offset = len(self.vocabulary_)
self.vocabulary_ = dict(self.vocabulary_,
**{k : v + offset for k, v in self.text_vectorizers[col].items()})
self.feature_names_ = list(self.vocabulary_.keys())
def transform(self, X, y=None):
X1 = self.dict_vectorizer.transform(X[self.dict_vectorizer_cols_])
X2 = np.hstack((map(lambda col: self.text_vectorizers[col].transform(X[col]), list(self.text_vectorizers.keys()))))
return np.hstack((X1, X2))
| mit |
brandond/ansible | lib/ansible/modules/network/fortios/fortios_firewall_ippool6.py | 24 | 8050 | #!/usr/bin/python
from __future__ import (absolute_import, division, print_function)
# Copyright 2018 Fortinet, Inc.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
#
# the lib use python logging can get it if the following is set in your
# Ansible config.
__metaclass__ = type
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'metadata_version': '1.1'}
DOCUMENTATION = '''
---
module: fortios_firewall_ippool6
short_description: Configure IPv6 IP pools in Fortinet's FortiOS and FortiGate.
description:
- This module is able to configure a FortiGate or FortiOS by
allowing the user to configure firewall feature and ippool6 category.
Examples includes all options and need to be adjusted to datasources before usage.
Tested with FOS v6.0.2
version_added: "2.8"
author:
- Miguel Angel Munoz (@mamunozgonzalez)
- Nicolas Thomas (@thomnico)
notes:
- Requires fortiosapi library developed by Fortinet
- Run as a local_action in your playbook
requirements:
- fortiosapi>=0.9.8
options:
host:
description:
- FortiOS or FortiGate ip address.
required: true
username:
description:
- FortiOS or FortiGate username.
required: true
password:
description:
- FortiOS or FortiGate password.
default: ""
vdom:
description:
- Virtual domain, among those defined previously. A vdom is a
virtual instance of the FortiGate that can be configured and
used as a different unit.
default: root
https:
description:
- Indicates if the requests towards FortiGate must use HTTPS
protocol
type: bool
default: false
firewall_ippool6:
description:
- Configure IPv6 IP pools.
default: null
suboptions:
state:
description:
- Indicates whether to create or remove the object
choices:
- present
- absent
comments:
description:
- Comment.
endip:
description:
- "Final IPv6 address (inclusive) in the range for the address pool (format xxxx:xxxx:xxxx:xxxx:xxxx:xxxx:xxxx:xxxx, Default: ::)."
name:
description:
- IPv6 IP pool name.
required: true
startip:
description:
- "First IPv6 address (inclusive) in the range for the address pool (format xxxx:xxxx:xxxx:xxxx:xxxx:xxxx:xxxx:xxxx, Default: ::)."
'''
EXAMPLES = '''
- hosts: localhost
vars:
host: "192.168.122.40"
username: "admin"
password: ""
vdom: "root"
tasks:
- name: Configure IPv6 IP pools.
fortios_firewall_ippool6:
host: "{{ host }}"
username: "{{ username }}"
password: "{{ password }}"
vdom: "{{ vdom }}"
firewall_ippool6:
state: "present"
comments: "<your_own_comment>"
endip: "<your_own_value>"
name: "default_name_5"
startip: "<your_own_value>"
'''
RETURN = '''
build:
description: Build number of the fortigate image
returned: always
type: str
sample: '1547'
http_method:
description: Last method used to provision the content into FortiGate
returned: always
type: str
sample: 'PUT'
http_status:
description: Last result given by FortiGate on last operation applied
returned: always
type: str
sample: "200"
mkey:
description: Master key (id) used in the last call to FortiGate
returned: success
type: str
sample: "key1"
name:
description: Name of the table used to fulfill the request
returned: always
type: str
sample: "urlfilter"
path:
description: Path of the table used to fulfill the request
returned: always
type: str
sample: "webfilter"
revision:
description: Internal revision number
returned: always
type: str
sample: "17.0.2.10658"
serial:
description: Serial number of the unit
returned: always
type: str
sample: "FGVMEVYYQT3AB5352"
status:
description: Indication of the operation's result
returned: always
type: str
sample: "success"
vdom:
description: Virtual domain used
returned: always
type: str
sample: "root"
version:
description: Version of the FortiGate
returned: always
type: str
sample: "v5.6.3"
'''
from ansible.module_utils.basic import AnsibleModule
fos = None
def login(data):
host = data['host']
username = data['username']
password = data['password']
fos.debug('on')
if 'https' in data and not data['https']:
fos.https('off')
else:
fos.https('on')
fos.login(host, username, password)
def filter_firewall_ippool6_data(json):
option_list = ['comments', 'endip', 'name',
'startip']
dictionary = {}
for attribute in option_list:
if attribute in json and json[attribute] is not None:
dictionary[attribute] = json[attribute]
return dictionary
def firewall_ippool6(data, fos):
vdom = data['vdom']
firewall_ippool6_data = data['firewall_ippool6']
filtered_data = filter_firewall_ippool6_data(firewall_ippool6_data)
if firewall_ippool6_data['state'] == "present":
return fos.set('firewall',
'ippool6',
data=filtered_data,
vdom=vdom)
elif firewall_ippool6_data['state'] == "absent":
return fos.delete('firewall',
'ippool6',
mkey=filtered_data['name'],
vdom=vdom)
def fortios_firewall(data, fos):
login(data)
methodlist = ['firewall_ippool6']
for method in methodlist:
if data[method]:
resp = eval(method)(data, fos)
break
fos.logout()
return not resp['status'] == "success", resp['status'] == "success", resp
def main():
fields = {
"host": {"required": True, "type": "str"},
"username": {"required": True, "type": "str"},
"password": {"required": False, "type": "str", "no_log": True},
"vdom": {"required": False, "type": "str", "default": "root"},
"https": {"required": False, "type": "bool", "default": "False"},
"firewall_ippool6": {
"required": False, "type": "dict",
"options": {
"state": {"required": True, "type": "str",
"choices": ["present", "absent"]},
"comments": {"required": False, "type": "str"},
"endip": {"required": False, "type": "str"},
"name": {"required": True, "type": "str"},
"startip": {"required": False, "type": "str"}
}
}
}
module = AnsibleModule(argument_spec=fields,
supports_check_mode=False)
try:
from fortiosapi import FortiOSAPI
except ImportError:
module.fail_json(msg="fortiosapi module is required")
global fos
fos = FortiOSAPI()
is_error, has_changed, result = fortios_firewall(module.params, fos)
if not is_error:
module.exit_json(changed=has_changed, meta=result)
else:
module.fail_json(msg="Error in repo", meta=result)
if __name__ == '__main__':
main()
| gpl-3.0 |
microcom/odoo | addons/base_gengo/controller/gengo_callback.py | 18 | 2338 | # -*- coding: utf-8 -*-
import openerp
from openerp import SUPERUSER_ID
from openerp.addons.web import http
from openerp.addons.web.http import request
from werkzeug.wrappers import BaseResponse as Response
import json
class website_gengo(http.Controller):
def get_gengo_key(self, cr):
icp = request.registry['ir.config_parameter']
return icp.get_param(cr, SUPERUSER_ID, request.registry['base.gengo.translations'].GENGO_KEY, default="")
@http.route('/website/gengo_callback', type='http', auth='none', csrf=False)
def gengo_callback(self, **post):
print "IN website/gengo_callback"
cr, uid, context = request.cr, openerp.SUPERUSER_ID, request.context
translation_pool = request.registry['ir.translation']
if post and post.get('job') and post.get('pgk'):
if post.get('pgk') != self.get_gengo_key(cr):
return Response("Bad authentication", status=104)
job = json.loads(post['job'], 'utf-8')
tid = job.get('custom_data', False)
if (job.get('status') == 'approved') and tid:
term = translation_pool.browse(cr, uid, int(tid), context=context)
if term.src != job.get('body_src'):
return Response("Text Altered - Not saved", status=418)
domain = [
'|',
('id', "=", int(tid)),
'&', '&', '&', '&', '&',
('state', '=', term.state),
('gengo_translation', '=', term.gengo_translation),
('src', "=", term.src),
('type', "=", term.type),
('name', "=", term.name),
('lang', "=", term.lang),
#('order_id', "=", term.order_id),
]
all_ir_tanslations = translation_pool.search(cr, uid, domain, context=context or {})
if all_ir_tanslations:
vals = {'state': 'translated', 'value': job.get('body_tgt')}
translation_pool.write(cr, uid, all_ir_tanslations, vals, context=context)
return Response("OK", status=200)
else:
return Response("No terms found", status=412)
return Response("Not saved", status=418)
| agpl-3.0 |
CYBERBUGJR/Diamond | src/collectors/udp/test/testudp.py | 30 | 2265 | #!/usr/bin/python
# coding=utf-8
################################################################################
from test import CollectorTestCase
from test import get_collector_config
from test import unittest
from mock import Mock
from mock import patch
try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO
from diamond.collector import Collector
from udp import UDPCollector
################################################################################
class TestUDPCollector(CollectorTestCase):
def setUp(self, allowed_names=None):
if not allowed_names:
allowed_names = []
config = get_collector_config('UDPCollector', {
'allowed_names': allowed_names,
'interval': 1
})
self.collector = UDPCollector(config, None)
def test_import(self):
self.assertTrue(UDPCollector)
@patch('os.access', Mock(return_value=True))
@patch('__builtin__.open')
@patch.object(Collector, 'publish')
def test_should_open_proc_net_snmp(self, publish_mock, open_mock):
UDPCollector.PROC = ['/proc/net/snmp']
open_mock.return_value = StringIO('')
self.collector.collect()
open_mock.assert_called_once_with('/proc/net/snmp')
@patch.object(Collector, 'publish')
def test_should_work_with_real_data(self, publish_mock):
self.setUp([])
UDPCollector.PROC = [
self.getFixturePath('proc_net_snmp_1'),
]
self.collector.collect()
self.assertPublishedMany(publish_mock, {})
UDPCollector.PROC = [
self.getFixturePath('proc_net_snmp_2'),
]
self.collector.collect()
metrics = {
'InDatagrams': 352320636.0,
'InErrors': 5.0,
'NoPorts': 449.0,
'OutDatagrams': 352353358.0,
}
self.setDocExample(collector=self.collector.__class__.__name__,
metrics=metrics,
defaultpath=self.collector.config['path'])
self.assertPublishedMany(publish_mock, metrics)
################################################################################
if __name__ == "__main__":
unittest.main()
| mit |
bmbove/omxremote | cherrypy/_cpchecker.py | 41 | 14499 | import os
import warnings
import cherrypy
from cherrypy._cpcompat import iteritems, copykeys, builtins
class Checker(object):
"""A checker for CherryPy sites and their mounted applications.
When this object is called at engine startup, it executes each
of its own methods whose names start with ``check_``. If you wish
to disable selected checks, simply add a line in your global
config which sets the appropriate method to False::
[global]
checker.check_skipped_app_config = False
You may also dynamically add or replace ``check_*`` methods in this way.
"""
on = True
"""If True (the default), run all checks; if False, turn off all checks."""
def __init__(self):
self._populate_known_types()
def __call__(self):
"""Run all check_* methods."""
if self.on:
oldformatwarning = warnings.formatwarning
warnings.formatwarning = self.formatwarning
try:
for name in dir(self):
if name.startswith("check_"):
method = getattr(self, name)
if method and hasattr(method, '__call__'):
method()
finally:
warnings.formatwarning = oldformatwarning
def formatwarning(self, message, category, filename, lineno, line=None):
"""Function to format a warning."""
return "CherryPy Checker:\n%s\n\n" % message
# This value should be set inside _cpconfig.
global_config_contained_paths = False
def check_app_config_entries_dont_start_with_script_name(self):
"""Check for Application config with sections that repeat script_name."""
for sn, app in cherrypy.tree.apps.items():
if not isinstance(app, cherrypy.Application):
continue
if not app.config:
continue
if sn == '':
continue
sn_atoms = sn.strip("/").split("/")
for key in app.config.keys():
key_atoms = key.strip("/").split("/")
if key_atoms[:len(sn_atoms)] == sn_atoms:
warnings.warn(
"The application mounted at %r has config " \
"entries that start with its script name: %r" % (sn, key))
def check_site_config_entries_in_app_config(self):
"""Check for mounted Applications that have site-scoped config."""
for sn, app in iteritems(cherrypy.tree.apps):
if not isinstance(app, cherrypy.Application):
continue
msg = []
for section, entries in iteritems(app.config):
if section.startswith('/'):
for key, value in iteritems(entries):
for n in ("engine.", "server.", "tree.", "checker."):
if key.startswith(n):
msg.append("[%s] %s = %s" % (section, key, value))
if msg:
msg.insert(0,
"The application mounted at %r contains the following "
"config entries, which are only allowed in site-wide "
"config. Move them to a [global] section and pass them "
"to cherrypy.config.update() instead of tree.mount()." % sn)
warnings.warn(os.linesep.join(msg))
def check_skipped_app_config(self):
"""Check for mounted Applications that have no config."""
for sn, app in cherrypy.tree.apps.items():
if not isinstance(app, cherrypy.Application):
continue
if not app.config:
msg = "The Application mounted at %r has an empty config." % sn
if self.global_config_contained_paths:
msg += (" It looks like the config you passed to "
"cherrypy.config.update() contains application-"
"specific sections. You must explicitly pass "
"application config via "
"cherrypy.tree.mount(..., config=app_config)")
warnings.warn(msg)
return
def check_app_config_brackets(self):
"""Check for Application config with extraneous brackets in section names."""
for sn, app in cherrypy.tree.apps.items():
if not isinstance(app, cherrypy.Application):
continue
if not app.config:
continue
for key in app.config.keys():
if key.startswith("[") or key.endswith("]"):
warnings.warn(
"The application mounted at %r has config " \
"section names with extraneous brackets: %r. "
"Config *files* need brackets; config *dicts* "
"(e.g. passed to tree.mount) do not." % (sn, key))
def check_static_paths(self):
"""Check Application config for incorrect static paths."""
# Use the dummy Request object in the main thread.
request = cherrypy.request
for sn, app in cherrypy.tree.apps.items():
if not isinstance(app, cherrypy.Application):
continue
request.app = app
for section in app.config:
# get_resource will populate request.config
request.get_resource(section + "/dummy.html")
conf = request.config.get
if conf("tools.staticdir.on", False):
msg = ""
root = conf("tools.staticdir.root")
dir = conf("tools.staticdir.dir")
if dir is None:
msg = "tools.staticdir.dir is not set."
else:
fulldir = ""
if os.path.isabs(dir):
fulldir = dir
if root:
msg = ("dir is an absolute path, even "
"though a root is provided.")
testdir = os.path.join(root, dir[1:])
if os.path.exists(testdir):
msg += ("\nIf you meant to serve the "
"filesystem folder at %r, remove "
"the leading slash from dir." % testdir)
else:
if not root:
msg = "dir is a relative path and no root provided."
else:
fulldir = os.path.join(root, dir)
if not os.path.isabs(fulldir):
msg = "%r is not an absolute path." % fulldir
if fulldir and not os.path.exists(fulldir):
if msg:
msg += "\n"
msg += ("%r (root + dir) is not an existing "
"filesystem path." % fulldir)
if msg:
warnings.warn("%s\nsection: [%s]\nroot: %r\ndir: %r"
% (msg, section, root, dir))
# -------------------------- Compatibility -------------------------- #
obsolete = {
'server.default_content_type': 'tools.response_headers.headers',
'log_access_file': 'log.access_file',
'log_config_options': None,
'log_file': 'log.error_file',
'log_file_not_found': None,
'log_request_headers': 'tools.log_headers.on',
'log_to_screen': 'log.screen',
'show_tracebacks': 'request.show_tracebacks',
'throw_errors': 'request.throw_errors',
'profiler.on': ('cherrypy.tree.mount(profiler.make_app('
'cherrypy.Application(Root())))'),
}
deprecated = {}
def _compat(self, config):
"""Process config and warn on each obsolete or deprecated entry."""
for section, conf in config.items():
if isinstance(conf, dict):
for k, v in conf.items():
if k in self.obsolete:
warnings.warn("%r is obsolete. Use %r instead.\n"
"section: [%s]" %
(k, self.obsolete[k], section))
elif k in self.deprecated:
warnings.warn("%r is deprecated. Use %r instead.\n"
"section: [%s]" %
(k, self.deprecated[k], section))
else:
if section in self.obsolete:
warnings.warn("%r is obsolete. Use %r instead."
% (section, self.obsolete[section]))
elif section in self.deprecated:
warnings.warn("%r is deprecated. Use %r instead."
% (section, self.deprecated[section]))
def check_compatibility(self):
"""Process config and warn on each obsolete or deprecated entry."""
self._compat(cherrypy.config)
for sn, app in cherrypy.tree.apps.items():
if not isinstance(app, cherrypy.Application):
continue
self._compat(app.config)
# ------------------------ Known Namespaces ------------------------ #
extra_config_namespaces = []
def _known_ns(self, app):
ns = ["wsgi"]
ns.extend(copykeys(app.toolboxes))
ns.extend(copykeys(app.namespaces))
ns.extend(copykeys(app.request_class.namespaces))
ns.extend(copykeys(cherrypy.config.namespaces))
ns += self.extra_config_namespaces
for section, conf in app.config.items():
is_path_section = section.startswith("/")
if is_path_section and isinstance(conf, dict):
for k, v in conf.items():
atoms = k.split(".")
if len(atoms) > 1:
if atoms[0] not in ns:
# Spit out a special warning if a known
# namespace is preceded by "cherrypy."
if (atoms[0] == "cherrypy" and atoms[1] in ns):
msg = ("The config entry %r is invalid; "
"try %r instead.\nsection: [%s]"
% (k, ".".join(atoms[1:]), section))
else:
msg = ("The config entry %r is invalid, because "
"the %r config namespace is unknown.\n"
"section: [%s]" % (k, atoms[0], section))
warnings.warn(msg)
elif atoms[0] == "tools":
if atoms[1] not in dir(cherrypy.tools):
msg = ("The config entry %r may be invalid, "
"because the %r tool was not found.\n"
"section: [%s]" % (k, atoms[1], section))
warnings.warn(msg)
def check_config_namespaces(self):
"""Process config and warn on each unknown config namespace."""
for sn, app in cherrypy.tree.apps.items():
if not isinstance(app, cherrypy.Application):
continue
self._known_ns(app)
# -------------------------- Config Types -------------------------- #
known_config_types = {}
def _populate_known_types(self):
b = [x for x in vars(builtins).values()
if type(x) is type(str)]
def traverse(obj, namespace):
for name in dir(obj):
# Hack for 3.2's warning about body_params
if name == 'body_params':
continue
vtype = type(getattr(obj, name, None))
if vtype in b:
self.known_config_types[namespace + "." + name] = vtype
traverse(cherrypy.request, "request")
traverse(cherrypy.response, "response")
traverse(cherrypy.server, "server")
traverse(cherrypy.engine, "engine")
traverse(cherrypy.log, "log")
def _known_types(self, config):
msg = ("The config entry %r in section %r is of type %r, "
"which does not match the expected type %r.")
for section, conf in config.items():
if isinstance(conf, dict):
for k, v in conf.items():
if v is not None:
expected_type = self.known_config_types.get(k, None)
vtype = type(v)
if expected_type and vtype != expected_type:
warnings.warn(msg % (k, section, vtype.__name__,
expected_type.__name__))
else:
k, v = section, conf
if v is not None:
expected_type = self.known_config_types.get(k, None)
vtype = type(v)
if expected_type and vtype != expected_type:
warnings.warn(msg % (k, section, vtype.__name__,
expected_type.__name__))
def check_config_types(self):
"""Assert that config values are of the same type as default values."""
self._known_types(cherrypy.config)
for sn, app in cherrypy.tree.apps.items():
if not isinstance(app, cherrypy.Application):
continue
self._known_types(app.config)
# -------------------- Specific config warnings -------------------- #
def check_localhost(self):
"""Warn if any socket_host is 'localhost'. See #711."""
for k, v in cherrypy.config.items():
if k == 'server.socket_host' and v == 'localhost':
warnings.warn("The use of 'localhost' as a socket host can "
"cause problems on newer systems, since 'localhost' can "
"map to either an IPv4 or an IPv6 address. You should "
"use '127.0.0.1' or '[::1]' instead.")
| bsd-3-clause |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.