repo_name
stringlengths
5
100
path
stringlengths
4
294
copies
stringclasses
990 values
size
stringlengths
4
7
content
stringlengths
666
1M
license
stringclasses
15 values
Beauhurst/django
tests/model_fields/test_decimalfield.py
7
3676
import unittest from decimal import Decimal from django.core import validators from django.core.exceptions import ValidationError from django.db import connection, models from django.test import TestCase from .models import BigD, Foo class DecimalFieldTests(TestCase): def test_to_python(self): f = models.DecimalField(max_digits=4, decimal_places=2) self.assertEqual(f.to_python(3), Decimal('3')) self.assertEqual(f.to_python('3.14'), Decimal('3.14')) # to_python() converts floats and honors max_digits. self.assertEqual(f.to_python(3.1415926535897), Decimal('3.142')) self.assertEqual(f.to_python(2.4), Decimal('2.400')) # Uses default rounding of ROUND_HALF_EVEN. self.assertEqual(f.to_python(2.0625), Decimal('2.062')) self.assertEqual(f.to_python(2.1875), Decimal('2.188')) with self.assertRaises(ValidationError): f.to_python('abc') def test_default(self): f = models.DecimalField(default=Decimal('0.00')) self.assertEqual(f.get_default(), Decimal('0.00')) def test_format(self): f = models.DecimalField(max_digits=5, decimal_places=1) self.assertEqual(f._format(f.to_python(2)), '2.0') self.assertEqual(f._format(f.to_python('2.6')), '2.6') self.assertIsNone(f._format(None)) def test_get_prep_value(self): f = models.DecimalField(max_digits=5, decimal_places=1) self.assertIsNone(f.get_prep_value(None)) self.assertEqual(f.get_prep_value('2.4'), Decimal('2.4')) def test_filter_with_strings(self): """ Should be able to filter decimal fields using strings (#8023). """ foo = Foo.objects.create(a='abc', d=Decimal('12.34')) self.assertEqual(list(Foo.objects.filter(d='12.34')), [foo]) def test_save_without_float_conversion(self): """ Ensure decimals don't go through a corrupting float conversion during save (#5079). """ bd = BigD(d='12.9') bd.save() bd = BigD.objects.get(pk=bd.pk) self.assertEqual(bd.d, Decimal('12.9')) @unittest.skipIf(connection.vendor == 'sqlite', 'SQLite stores values rounded to 15 significant digits.') def test_fetch_from_db_without_float_rounding(self): big_decimal = BigD.objects.create(d=Decimal('.100000000000000000000000000005')) big_decimal.refresh_from_db() self.assertEqual(big_decimal.d, Decimal('.100000000000000000000000000005')) def test_lookup_really_big_value(self): """ Really big values can be used in a filter statement. """ # This should not crash. Foo.objects.filter(d__gte=100000000000) def test_max_digits_validation(self): field = models.DecimalField(max_digits=2) expected_message = validators.DecimalValidator.messages['max_digits'] % {'max': 2} with self.assertRaisesMessage(ValidationError, expected_message): field.clean(100, None) def test_max_decimal_places_validation(self): field = models.DecimalField(decimal_places=1) expected_message = validators.DecimalValidator.messages['max_decimal_places'] % {'max': 1} with self.assertRaisesMessage(ValidationError, expected_message): field.clean(Decimal('0.99'), None) def test_max_whole_digits_validation(self): field = models.DecimalField(max_digits=3, decimal_places=1) expected_message = validators.DecimalValidator.messages['max_whole_digits'] % {'max': 2} with self.assertRaisesMessage(ValidationError, expected_message): field.clean(Decimal('999'), None)
bsd-3-clause
alshedivat/tensorflow
tensorflow/python/keras/utils/layer_utils.py
10
12454
# Copyright 2018 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== # pylint: disable=protected-access """Utilities related to layer/model functionality. """ from __future__ import absolute_import from __future__ import division from __future__ import print_function import numpy as np from tensorflow.python.keras import backend as K from tensorflow.python.keras.utils.conv_utils import convert_kernel from tensorflow.python.util.tf_export import tf_export @tf_export('keras.utils.get_source_inputs') def get_source_inputs(tensor, layer=None, node_index=None): """Returns the list of input tensors necessary to compute `tensor`. Output will always be a list of tensors (potentially with 1 element). Arguments: tensor: The tensor to start from. layer: Origin layer of the tensor. Will be determined via tensor._keras_history if not provided. node_index: Origin node index of the tensor. Returns: List of input tensors. """ if not hasattr(tensor, '_keras_history'): return tensor if layer is None or node_index: layer, node_index, _ = tensor._keras_history if not layer._inbound_nodes: return [tensor] else: node = layer._inbound_nodes[node_index] if not node.inbound_layers: # Reached an Input layer, stop recursion. return node.input_tensors else: source_tensors = [] for i in range(len(node.inbound_layers)): x = node.input_tensors[i] layer = node.inbound_layers[i] node_index = node.node_indices[i] previous_sources = get_source_inputs(x, layer, node_index) # Avoid input redundancy. for x in previous_sources: if x not in source_tensors: source_tensors.append(x) return source_tensors def count_params(weights): """Count the total number of scalars composing the weights. Arguments: weights: An iterable containing the weights on which to compute params Returns: The total number of scalars composing the weights """ return int(np.sum([np.prod(p.get_shape().as_list()) for p in set(weights)])) def print_summary(model, line_length=None, positions=None, print_fn=None): """Prints a summary of a model. Arguments: model: Keras model instance. line_length: Total length of printed lines (e.g. set this to adapt the display to different terminal window sizes). positions: Relative or absolute positions of log elements in each line. If not provided, defaults to `[.33, .55, .67, 1.]`. print_fn: Print function to use. It will be called on each line of the summary. You can set it to a custom function in order to capture the string summary. It defaults to `print` (prints to stdout). """ if print_fn is None: print_fn = print if model.__class__.__name__ == 'Sequential': sequential_like = True elif not model._is_graph_network: # We treat subclassed models as a simple sequence of layers, for logging # purposes. sequential_like = True else: sequential_like = True nodes_by_depth = model._nodes_by_depth.values() nodes = [] for v in nodes_by_depth: if (len(v) > 1) or (len(v) == 1 and len(v[0].inbound_layers) > 1): # if the model has multiple nodes # or if the nodes have multiple inbound_layers # the model is no longer sequential sequential_like = False break nodes += v if sequential_like: # search for shared layers for layer in model.layers: flag = False for node in layer._inbound_nodes: if node in nodes: if flag: sequential_like = False break else: flag = True if not sequential_like: break if sequential_like: line_length = line_length or 65 positions = positions or [.45, .85, 1.] if positions[-1] <= 1: positions = [int(line_length * p) for p in positions] # header names for the different log elements to_display = ['Layer (type)', 'Output Shape', 'Param #'] else: line_length = line_length or 98 positions = positions or [.33, .55, .67, 1.] if positions[-1] <= 1: positions = [int(line_length * p) for p in positions] # header names for the different log elements to_display = ['Layer (type)', 'Output Shape', 'Param #', 'Connected to'] relevant_nodes = [] for v in model._nodes_by_depth.values(): relevant_nodes += v def print_row(fields, positions): line = '' for i in range(len(fields)): if i > 0: line = line[:-1] + ' ' line += str(fields[i]) line = line[:positions[i]] line += ' ' * (positions[i] - len(line)) print_fn(line) print_fn('_' * line_length) print_row(to_display, positions) print_fn('=' * line_length) def print_layer_summary(layer): """Prints a summary for a single layer. Arguments: layer: target layer. """ try: output_shape = layer.output_shape except AttributeError: output_shape = 'multiple' except RuntimeError: # output_shape unknown in Eager mode. output_shape = '?' name = layer.name cls_name = layer.__class__.__name__ fields = [name + ' (' + cls_name + ')', output_shape, layer.count_params()] print_row(fields, positions) def print_layer_summary_with_connections(layer): """Prints a summary for a single layer (including topological connections). Arguments: layer: target layer. """ try: output_shape = layer.output_shape except AttributeError: output_shape = 'multiple' connections = [] for node in layer._inbound_nodes: if relevant_nodes and node not in relevant_nodes: # node is not part of the current network continue for i in range(len(node.inbound_layers)): inbound_layer = node.inbound_layers[i].name inbound_node_index = node.node_indices[i] inbound_tensor_index = node.tensor_indices[i] connections.append(inbound_layer + '[' + str(inbound_node_index) + '][' + str(inbound_tensor_index) + ']') name = layer.name cls_name = layer.__class__.__name__ if not connections: first_connection = '' else: first_connection = connections[0] fields = [ name + ' (' + cls_name + ')', output_shape, layer.count_params(), first_connection ] print_row(fields, positions) if len(connections) > 1: for i in range(1, len(connections)): fields = ['', '', '', connections[i]] print_row(fields, positions) layers = model.layers for i in range(len(layers)): if sequential_like: print_layer_summary(layers[i]) else: print_layer_summary_with_connections(layers[i]) if i == len(layers) - 1: print_fn('=' * line_length) else: print_fn('_' * line_length) model._check_trainable_weights_consistency() if hasattr(model, '_collected_trainable_weights'): trainable_count = count_params(model._collected_trainable_weights) else: trainable_count = count_params(model.trainable_weights) non_trainable_count = count_params(model.non_trainable_weights) print_fn('Total params: {:,}'.format(trainable_count + non_trainable_count)) print_fn('Trainable params: {:,}'.format(trainable_count)) print_fn('Non-trainable params: {:,}'.format(non_trainable_count)) print_fn('_' * line_length) def gather_trainable_weights(trainable, sub_layers, extra_variables): """Lists the trainable weights for an object with sub-layers. Args: trainable: Whether the object collecting the variables is trainable. sub_layers: A flat list of Layer objects owned by this object, to collect variables from. extra_variables: Any extra variables to include. Their `.trainable` property is used to categorize them. Returns: A list of collected trainable weights/variables. """ if not trainable: return [] weights = [] for layer in sub_layers: weights += layer.trainable_weights trainable_extra_variables = [ v for v in extra_variables if v.trainable] return weights + trainable_extra_variables def gather_non_trainable_weights(trainable, sub_layers, extra_variables): """Lists the non-trainable weights for an object with sub-layers. Args: trainable: Whether the object collecting the variables is trainable. sub_layers: A flat list of Layer objects owned by this object, to collect variables from. extra_variables: Any extra variables to include. Their `.trainable` property is used to categorize them. Returns: A list of collected non-trainable weights/variables. """ trainable_extra_variables = [] non_trainable_extra_variables = [] for v in extra_variables: if v.trainable: trainable_extra_variables.append(v) else: non_trainable_extra_variables.append(v) weights = [] for layer in sub_layers: weights += layer.non_trainable_weights if not trainable: trainable_weights = [] for layer in sub_layers: trainable_weights += layer.trainable_weights return (trainable_weights + trainable_extra_variables + weights + non_trainable_extra_variables) return weights + non_trainable_extra_variables @tf_export('keras.utils.convert_all_kernels_in_model') def convert_all_kernels_in_model(model): """Converts all convolution kernels in a model from Theano to TensorFlow. Also works from TensorFlow to Theano. Arguments: model: target model for the conversion. """ # Note: SeparableConvolution not included # since only supported by TF. conv_classes = { 'Conv1D', 'Conv2D', 'Conv3D', 'Conv2DTranspose', } to_assign = [] for layer in model.layers: if layer.__class__.__name__ in conv_classes: original_kernel = K.get_value(layer.kernel) converted_kernel = convert_kernel(original_kernel) to_assign.append((layer.kernel, converted_kernel)) K.batch_set_value(to_assign) def convert_dense_weights_data_format(dense, previous_feature_map_shape, target_data_format='channels_first'): """Utility useful when changing a convnet's `data_format`. When porting the weights of a convnet from one data format to the other, if the convnet includes a `Flatten` layer (applied to the last convolutional feature map) followed by a `Dense` layer, the weights of that `Dense` layer should be updated to reflect the new dimension ordering. Arguments: dense: The target `Dense` layer. previous_feature_map_shape: A shape tuple of 3 integers, e.g. `(512, 7, 7)`. The shape of the convolutional feature map right before the `Flatten` layer that came before the target `Dense` layer. target_data_format: One of "channels_last", "channels_first". Set it "channels_last" if converting a "channels_first" model to "channels_last", or reciprocally. """ assert target_data_format in {'channels_last', 'channels_first'} kernel, bias = dense.get_weights() for i in range(kernel.shape[1]): if target_data_format == 'channels_first': c, h, w = previous_feature_map_shape original_fm_shape = (h, w, c) ki = kernel[:, i].reshape(original_fm_shape) ki = np.transpose(ki, (2, 0, 1)) # last -> first else: h, w, c = previous_feature_map_shape original_fm_shape = (c, h, w) ki = kernel[:, i].reshape(original_fm_shape) ki = np.transpose(ki, (1, 2, 0)) # first -> last kernel[:, i] = np.reshape(ki, (np.prod(previous_feature_map_shape),)) dense.set_weights([kernel, bias])
apache-2.0
mapr/hue
desktop/core/ext-py/django-extensions-1.5.0/django_extensions/management/email_notifications.py
37
5300
import sys import traceback from optparse import make_option from django.conf import settings from django.core.mail import send_mail from django.core.management.base import BaseCommand class EmailNotificationCommand(BaseCommand): """A BaseCommand subclass which adds sending email fuctionality. Subclasses will have an extra command line option ``--email-notification`` and will be able to send emails by calling ``send_email_notification()`` if SMTP host and port are specified in settings. The handling of the command line option is left to the management command implementation. Configuration is done in settings.EMAIL_NOTIFICATIONS dict. Configuration example:: EMAIL_NOTIFICATIONS = { 'scripts.my_script': { 'subject': 'my_script subject', 'body': 'my_script body', 'from_email': 'from_email@example.com', 'recipients': ('recipient0@example.com',), 'no_admins': False, 'no_traceback': False, 'notification_level': 0, 'fail_silently': False }, 'scripts.another_script': { ... }, ... } Configuration explained: subject: Email subject. body: Email body. from_email: Email from address. recipients: Sequence of email recipient addresses. no_admins: When True do not include ADMINS to recipients. no_traceback: When True do not include traceback to email body. notification_level: 0: send email on fail, 1: send email always. fail_silently: Parameter passed to django's send_mail(). """ option_list = BaseCommand.option_list + ( make_option('--email-notifications', action='store_true', dest='email_notifications', help='Send email notifications for command.'), make_option('--email-exception', action='store_true', dest='email_exception', help='Send email for command exceptions.'), ) def run_from_argv(self, argv): """Overriden in order to access the command line arguments.""" self.argv_string = ' '.join(argv) super(EmailNotificationCommand, self).run_from_argv(argv) def execute(self, *args, **options): """Overriden in order to send emails on unhandled exception. If an unhandled exception in ``def handle(self, *args, **options)`` occurs and `--email-exception` is set or `self.email_exception` is set to True send an email to ADMINS with the traceback and then reraise the exception. """ try: super(EmailNotificationCommand, self).execute(*args, **options) except Exception as e: if (options.get('email_exception', False) or getattr(self, 'email_exception', False)): self.send_email_notification(include_traceback=True) raise e def send_email_notification(self, notification_id=None, include_traceback=False, verbosity=1): """Send email notifications. Reads settings from settings.EMAIL_NOTIFICATIONS dict, if available, using ``notification_id`` as a key or else provides reasonable defaults. """ # Load email notification settings if available if notification_id is not None: try: email_settings = settings.EMAIL_NOTIFICATIONS.get(notification_id, {}) except AttributeError: email_settings = {} else: email_settings = {} # Exit if no traceback found and not in 'notify always' mode if (not include_traceback and not email_settings.get('notification_level', 0)): print(self.style.ERROR("Exiting, not in 'notify always' mode.")) return # Set email fields. subject = email_settings.get('subject', "Django extensions email notification.") body = email_settings.get( 'body', "Reporting execution of command: '%s'" % self.argv_string ) # Include traceback if (include_traceback and not email_settings.get('no_traceback', False)): try: exc_type, exc_value, exc_traceback = sys.exc_info() trb = ''.join(traceback.format_tb(exc_traceback)) body += "\n\nTraceback:\n\n%s\n" % trb finally: del exc_traceback # Set from address from_email = email_settings.get('from_email', settings.DEFAULT_FROM_EMAIL) # Calculate recipients recipients = list(email_settings.get('recipients', [])) if not email_settings.get('no_admins', False): recipients.extend([a[1] for a in settings.ADMINS]) if not recipients: if verbosity > 0: print(self.style.ERROR("No email recipients available.")) return # Send email... send_mail(subject, body, from_email, recipients, fail_silently=email_settings.get('fail_silently', True))
apache-2.0
Eric-Zhong/odoo
addons/calendar/__init__.py
391
1038
# -*- coding: utf-8 -*- ############################################################################## # # OpenERP, Open Source Business Applications # Copyright (c) 2011 OpenERP S.A. <http://openerp.com> # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ############################################################################## from . import calendar import controllers import contacts
agpl-3.0
Eivindbergman/Skrapa
libs/tornado/test/routing_test.py
24
7594
# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from __future__ import absolute_import, division, print_function from tornado.httputil import HTTPHeaders, HTTPMessageDelegate, HTTPServerConnectionDelegate, ResponseStartLine from tornado.routing import HostMatches, PathMatches, ReversibleRouter, Router, Rule, RuleRouter from tornado.testing import AsyncHTTPTestCase from tornado.web import Application, HTTPError, RequestHandler from tornado.wsgi import WSGIContainer class BasicRouter(Router): def find_handler(self, request, **kwargs): class MessageDelegate(HTTPMessageDelegate): def __init__(self, connection): self.connection = connection def finish(self): self.connection.write_headers( ResponseStartLine("HTTP/1.1", 200, "OK"), HTTPHeaders({"Content-Length": "2"}), b"OK" ) self.connection.finish() return MessageDelegate(request.connection) class BasicRouterTestCase(AsyncHTTPTestCase): def get_app(self): return BasicRouter() def test_basic_router(self): response = self.fetch("/any_request") self.assertEqual(response.body, b"OK") resources = {} class GetResource(RequestHandler): def get(self, path): if path not in resources: raise HTTPError(404) self.finish(resources[path]) class PostResource(RequestHandler): def post(self, path): resources[path] = self.request.body class HTTPMethodRouter(Router): def __init__(self, app): self.app = app def find_handler(self, request, **kwargs): handler = GetResource if request.method == "GET" else PostResource return self.app.get_handler_delegate(request, handler, path_args=[request.path]) class HTTPMethodRouterTestCase(AsyncHTTPTestCase): def get_app(self): return HTTPMethodRouter(Application()) def test_http_method_router(self): response = self.fetch("/post_resource", method="POST", body="data") self.assertEqual(response.code, 200) response = self.fetch("/get_resource") self.assertEqual(response.code, 404) response = self.fetch("/post_resource") self.assertEqual(response.code, 200) self.assertEqual(response.body, b"data") def _get_named_handler(handler_name): class Handler(RequestHandler): def get(self, *args, **kwargs): if self.application.settings.get("app_name") is not None: self.write(self.application.settings["app_name"] + ": ") self.finish(handler_name + ": " + self.reverse_url(handler_name)) return Handler FirstHandler = _get_named_handler("first_handler") SecondHandler = _get_named_handler("second_handler") class CustomRouter(ReversibleRouter): def __init__(self): super(CustomRouter, self).__init__() self.routes = {} def add_routes(self, routes): self.routes.update(routes) def find_handler(self, request, **kwargs): if request.path in self.routes: app, handler = self.routes[request.path] return app.get_handler_delegate(request, handler) def reverse_url(self, name, *args): handler_path = '/' + name return handler_path if handler_path in self.routes else None class CustomRouterTestCase(AsyncHTTPTestCase): def get_app(self): class CustomApplication(Application): def reverse_url(self, name, *args): return router.reverse_url(name, *args) router = CustomRouter() app1 = CustomApplication(app_name="app1") app2 = CustomApplication(app_name="app2") router.add_routes({ "/first_handler": (app1, FirstHandler), "/second_handler": (app2, SecondHandler), "/first_handler_second_app": (app2, FirstHandler), }) return router def test_custom_router(self): response = self.fetch("/first_handler") self.assertEqual(response.body, b"app1: first_handler: /first_handler") response = self.fetch("/second_handler") self.assertEqual(response.body, b"app2: second_handler: /second_handler") response = self.fetch("/first_handler_second_app") self.assertEqual(response.body, b"app2: first_handler: /first_handler") class ConnectionDelegate(HTTPServerConnectionDelegate): def start_request(self, server_conn, request_conn): class MessageDelegate(HTTPMessageDelegate): def __init__(self, connection): self.connection = connection def finish(self): response_body = b"OK" self.connection.write_headers( ResponseStartLine("HTTP/1.1", 200, "OK"), HTTPHeaders({"Content-Length": str(len(response_body))})) self.connection.write(response_body) self.connection.finish() return MessageDelegate(request_conn) class RuleRouterTest(AsyncHTTPTestCase): def get_app(self): app = Application() def request_callable(request): request.write(b"HTTP/1.1 200 OK\r\nContent-Length: 2\r\n\r\nOK") request.finish() app.add_handlers(".*", [ (HostMatches("www.example.com"), [ (PathMatches("/first_handler"), "tornado.test.routing_test.SecondHandler", {}, "second_handler") ]), Rule(PathMatches("/first_handler"), FirstHandler, name="first_handler"), Rule(PathMatches("/request_callable"), request_callable), ("/connection_delegate", ConnectionDelegate()) ]) return app def test_rule_based_router(self): response = self.fetch("/first_handler") self.assertEqual(response.body, b"first_handler: /first_handler") response = self.fetch("/first_handler", headers={'Host': 'www.example.com'}) self.assertEqual(response.body, b"second_handler: /first_handler") response = self.fetch("/connection_delegate") self.assertEqual(response.body, b"OK") response = self.fetch("/request_callable") self.assertEqual(response.body, b"OK") response = self.fetch("/404") self.assertEqual(response.code, 404) class WSGIContainerTestCase(AsyncHTTPTestCase): def get_app(self): wsgi_app = WSGIContainer(self.wsgi_app) class Handler(RequestHandler): def get(self, *args, **kwargs): self.finish(self.reverse_url("tornado")) return RuleRouter([ (PathMatches("/tornado.*"), Application([(r"/tornado/test", Handler, {}, "tornado")])), (PathMatches("/wsgi"), wsgi_app), ]) def wsgi_app(self, environ, start_response): start_response("200 OK", []) return [b"WSGI"] def test_wsgi_container(self): response = self.fetch("/tornado/test") self.assertEqual(response.body, b"/tornado/test") response = self.fetch("/wsgi") self.assertEqual(response.body, b"WSGI")
gpl-3.0
arnavd96/Cinemiezer
myvenv/lib/python3.4/site-packages/music21/alpha/trecento/trecentoCadence.py
1
4279
# -*- coding: utf-8 -*- import unittest import re import copy from music21 import tinyNotation from music21 import environment environLocal = environment.Environment() class CadenceNoteToken(tinyNotation.NoteToken): ''' Subclass of NoteToken where 2.. represents a dotted dotted half note (that is, a dotted half tied to a dotted quarter) instead of a double dotted note. This makes entering Trecento music (which uses this note value often) much easier. 1.. and 4.. etc. are similarly transformed. ''' def dots(self, n, search, pm, t, parent): ''' adds the appropriate number of dots to the right place. Subclassed in TrecentoNotation where two dots has a different meaning. ''' dots = len(search.group(1)) if dots == 1: n.duration.dots = 1 elif dots == 2: n.duration.dotGroups = (1, 1) t = re.sub(pm, '', t) return t class CadenceRestToken(tinyNotation.RestToken): ''' Subclass of RestToken where 2.. represents a dotted dotted half note (that is, a dotted half tied to a dotted quarter) instead of a double dotted note. This makes entering Trecento music (which uses this note value often) much easier. 1.. and 4.. etc. are similarly transformed. ''' def dots(self, n, search, pm, t, parent): ''' adds the appropriate number of dots to the right place. Subclassed in TrecentoNotation where two dots has a different meaning. ''' dots = len(search.group(1)) if dots == 1: n.duration.dots = 1 elif dots == 2: n.duration.dotGroups = (1, 1) t = re.sub(pm, '', t) return t class CadenceConverter(tinyNotation.Converter): ''' Subclass of Tiny Notation that calls these tokens instead of the defaults >>> dLucaGloriaIncipit = alpha.trecento.trecentoCadence.CadenceConverter("6/8 c'2. d'8 c'4 a8 f4 f8 a4 c'4 c'8").parse().stream >>> dLucaGloriaIncipit.rightBarline = 'final' >>> dLucaGloriaIncipit.elements (<music21.stream.Measure 1 offset=0.0>, <music21.stream.Measure 2 offset=3.0>, <music21.stream.Measure 3 offset=6.0>) ''' def __init__(self, stringRep=""): super(CadenceConverter, self).__init__(stringRep) self.tokenMap = [ (r'(\d+\/\d+)', tinyNotation.TimeSignatureToken), (r'r(\S*)', CadenceRestToken), (r'(\S*)', CadenceNoteToken), # last ] ###### test routines class Test(unittest.TestCase): def runTest(self): pass def testCopyAndDeepcopy(self): '''Test copying all objects defined in this module ''' import sys for part in sys.modules[self.__module__].__dict__: if part.startswith('_') or part.startswith('__'): continue elif part in ['Test', 'TestExternal']: continue elif callable(part): #environLocal.printDebug(['testing copying on', part]) obj = getattr(self.__module__, part)() a = copy.copy(obj) b = copy.deepcopy(obj) self.assertNotEqual(a, obj) self.assertNotEqual(b, obj) def testDotGroups(self): cn = CadenceConverter('c#2..') cn.parse() a = cn.stream.flat.notes[0] # returns the stored music21 note. self.assertEqual(a.name, 'C#') self.assertEqual(a.duration.type, 'half') self.assertEqual(a.duration.dotGroups, (1,1)) self.assertEqual(a.duration.quarterLength, 4.5) class TestExternal(unittest.TestCase): ''' These objects generate PNGs, etc. ''' def runTest(self): pass def testTrecentoLine(self): ''' should display a 6 beat long line with some triplets ''' st = CadenceConverter('e2 f8 e f trip{g16 f e} d8 c B trip{d16 c B}').parse().stream self.assertAlmostEqual(st.duration.quarterLength, 6.0) st.show() if __name__ == "__main__": import music21 music21.mainTest(Test) #------------------------------------------------------------------------------ # eof
mit
alejo8591/angular-labs
lab15/order/models.py
1
2516
#-*- encoding:utf-8 -*- from django.db import models import datetime from django.utils.text import slugify class Customer(models.Model): customer_name = models.CharField(max_length=128, blank=True, null=True, verbose_name='Nombre', help_text='Ingrese el Nombre Completo.') customer_slug = models.SlugField(max_length=128, blank=True, null=True) customer_address = models.CharField(max_length=64, blank=True, null=True, verbose_name='Direccion', help_text='Ingrese la Direccion del Cliente.') customer_phone = models.CharField(max_length=24, blank=True, null=True, verbose_name='Telefono', help_text='Ingrese el teléfono del Cliente.') """ Campos para auditar el cliente con respecto a la creacion y la actualizacion """ date_created_customer = models.DateTimeField(auto_now=True) date_updated_customer = models.DateTimeField() def save(self, *args, **kwargs): # Tomando la info del tiempo en ese instante date = datetime.datetime.now() self.date_updated_customer = date self.customer_slug = slugify(self.customer_name) super(Customer, self).save(*args, **kwargs) def __str__(self): return self.customer_name class Product(models.Model): product_name = models.CharField(max_length=128, blank=True, null=True, verbose_name='Producto', help_text='Ingrese el Nombre del Producto.') product_price = models.DecimalField(max_digits=64, decimal_places=2, verbose_name='Precio', help_text='Precio del Producto') product_type = models.CharField(max_length=128, blank=True, null=True, verbose_name='Tipo de Producto', help_text='Ingrese el Tipo de Producto al que pertence.') product_description = models.TextField(max_length=400, verbose_name='Descripción del Producto', help_text='Ingrese la Descripción del Producto.') product_likes = models.IntegerField(null=True, blank=True, default=0) def __str__(self): return self.product_name class Stock(models.Model): stock_product_id = models.ForeignKey('Product') stock_quantity = models.IntegerField(max_length=24, verbose_name='Cantidad del Producto', help_text='Ingrese la Cantidad de Producto Disponible') def __str__(self): return self.stock_product_id.product_name class Order(models.Model): order_customer_id = models.ForeignKey('Customer') order_product_id = models.ForeignKey('Product') order_amount = models.IntegerField(max_length=64) order_date = models.DateField(auto_now=True) def __str__(self): return self.order_product_id.customer_name
mit
vikatory/kbengine
kbe/src/lib/python/Lib/email/mime/audio.py
159
2674
# Copyright (C) 2001-2007 Python Software Foundation # Author: Anthony Baxter # Contact: email-sig@python.org """Class representing audio/* type MIME documents.""" __all__ = ['MIMEAudio'] import sndhdr from io import BytesIO from email import encoders from email.mime.nonmultipart import MIMENonMultipart _sndhdr_MIMEmap = {'au' : 'basic', 'wav' :'x-wav', 'aiff':'x-aiff', 'aifc':'x-aiff', } # There are others in sndhdr that don't have MIME types. :( # Additional ones to be added to sndhdr? midi, mp3, realaudio, wma?? def _whatsnd(data): """Try to identify a sound file type. sndhdr.what() has a pretty cruddy interface, unfortunately. This is why we re-do it here. It would be easier to reverse engineer the Unix 'file' command and use the standard 'magic' file, as shipped with a modern Unix. """ hdr = data[:512] fakefile = BytesIO(hdr) for testfn in sndhdr.tests: res = testfn(hdr, fakefile) if res is not None: return _sndhdr_MIMEmap.get(res[0]) return None class MIMEAudio(MIMENonMultipart): """Class for generating audio/* MIME documents.""" def __init__(self, _audiodata, _subtype=None, _encoder=encoders.encode_base64, **_params): """Create an audio/* type MIME document. _audiodata is a string containing the raw audio data. If this data can be decoded by the standard Python `sndhdr' module, then the subtype will be automatically included in the Content-Type header. Otherwise, you can specify the specific audio subtype via the _subtype parameter. If _subtype is not given, and no subtype can be guessed, a TypeError is raised. _encoder is a function which will perform the actual encoding for transport of the image data. It takes one argument, which is this Image instance. It should use get_payload() and set_payload() to change the payload to the encoded form. It should also add any Content-Transfer-Encoding or other headers to the message as necessary. The default encoding is Base64. Any additional keyword arguments are passed to the base class constructor, which turns them into parameters on the Content-Type header. """ if _subtype is None: _subtype = _whatsnd(_audiodata) if _subtype is None: raise TypeError('Could not find audio MIME subtype') MIMENonMultipart.__init__(self, 'audio', _subtype, **_params) self.set_payload(_audiodata) _encoder(self)
lgpl-3.0
mobify/python-driver
benchmarks/future_batches.py
11
1463
# Copyright 2013-2015 DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import logging from base import benchmark, BenchmarkThread from six.moves import queue log = logging.getLogger(__name__) class Runner(BenchmarkThread): def run(self): futures = queue.Queue(maxsize=121) self.start_profile() for i in range(self.num_queries): if i > 0 and i % 120 == 0: # clear the existing queue while True: try: futures.get_nowait().result() except queue.Empty: break future = self.session.execute_async(self.query, self.values) futures.put_nowait(future) while True: try: futures.get_nowait().result() except queue.Empty: break self.finish_profile() if __name__ == "__main__": benchmark(Runner)
apache-2.0
Pluto-tv/blink-crosswalk
Tools/Scripts/webkitpy/style/checkers/cpp_unittest.py
7
237385
# -*- coding: utf-8; -*- # # Copyright (C) 2011 Google Inc. All rights reserved. # Copyright (C) 2009 Torch Mobile Inc. # Copyright (C) 2009 Apple Inc. All rights reserved. # Copyright (C) 2010 Chris Jerdonek (cjerdonek@webkit.org) # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above # copyright notice, this list of conditions and the following disclaimer # in the documentation and/or other materials provided with the # distribution. # * Neither the name of Google Inc. nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. """Unit test for cpp_style.py.""" # FIXME: Add a good test that tests UpdateIncludeState. import os import random import re import unittest import cpp as cpp_style from cpp import CppChecker from ..filter import FilterConfiguration from webkitpy.common.system.filesystem import FileSystem # This class works as an error collector and replaces cpp_style.Error # function for the unit tests. We also verify each category we see # is in STYLE_CATEGORIES, to help keep that list up to date. class ErrorCollector: _all_style_categories = CppChecker.categories # This is a list including all categories seen in any unit test. _seen_style_categories = {} def __init__(self, assert_fn, filter=None, lines_to_check=None): """assert_fn: a function to call when we notice a problem. filter: filters the errors that we are concerned about.""" self._assert_fn = assert_fn self._errors = [] self._lines_to_check = lines_to_check if not filter: filter = FilterConfiguration() self._filter = filter def __call__(self, line_number, category, confidence, message): self._assert_fn(category in self._all_style_categories, 'Message "%s" has category "%s",' ' which is not in STYLE_CATEGORIES' % (message, category)) if self._lines_to_check and not line_number in self._lines_to_check: return False if self._filter.should_check(category, ""): self._seen_style_categories[category] = 1 self._errors.append('%s [%s] [%d]' % (message, category, confidence)) return True def results(self): if len(self._errors) < 2: return ''.join(self._errors) # Most tests expect to have a string. else: return self._errors # Let's give a list if there is more than one. def result_list(self): return self._errors def verify_all_categories_are_seen(self): """Fails if there's a category in _all_style_categories - _seen_style_categories. This should only be called after all tests are run, so _seen_style_categories has had a chance to fully populate. Since this isn't called from within the normal unittest framework, we can't use the normal unittest assert macros. Instead we just exit when we see an error. Good thing this test is always run last! """ for category in self._all_style_categories: if category not in self._seen_style_categories: import sys sys.exit('FATAL ERROR: There are no tests for category "%s"' % category) class CppFunctionsTest(unittest.TestCase): """Supports testing functions that do not need CppStyleTestBase.""" def test_convert_to_lower_with_underscores(self): self.assertEqual(cpp_style._convert_to_lower_with_underscores('ABC'), 'abc') self.assertEqual(cpp_style._convert_to_lower_with_underscores('aB'), 'a_b') self.assertEqual(cpp_style._convert_to_lower_with_underscores('isAName'), 'is_a_name') self.assertEqual(cpp_style._convert_to_lower_with_underscores('AnotherTest'), 'another_test') self.assertEqual(cpp_style._convert_to_lower_with_underscores('PassRefPtr<MyClass>'), 'pass_ref_ptr<my_class>') self.assertEqual(cpp_style._convert_to_lower_with_underscores('_ABC'), '_abc') def test_create_acronym(self): self.assertEqual(cpp_style._create_acronym('ABC'), 'ABC') self.assertEqual(cpp_style._create_acronym('IsAName'), 'IAN') self.assertEqual(cpp_style._create_acronym('PassRefPtr<MyClass>'), 'PRP<MC>') def test_is_c_or_objective_c(self): clean_lines = cpp_style.CleansedLines(['']) clean_objc_lines = cpp_style.CleansedLines(['#import "header.h"']) self.assertTrue(cpp_style._FileState(clean_lines, 'c').is_c_or_objective_c()) self.assertTrue(cpp_style._FileState(clean_lines, 'm').is_c_or_objective_c()) self.assertFalse(cpp_style._FileState(clean_lines, 'cpp').is_c_or_objective_c()) self.assertFalse(cpp_style._FileState(clean_lines, 'cc').is_c_or_objective_c()) self.assertFalse(cpp_style._FileState(clean_lines, 'h').is_c_or_objective_c()) self.assertTrue(cpp_style._FileState(clean_objc_lines, 'h').is_c_or_objective_c()) def test_parameter(self): # Test type. parameter = cpp_style.Parameter('ExceptionCode', 13, 1) self.assertEqual(parameter.type, 'ExceptionCode') self.assertEqual(parameter.name, '') self.assertEqual(parameter.row, 1) # Test type and name. parameter = cpp_style.Parameter('PassRefPtr<MyClass> parent', 19, 1) self.assertEqual(parameter.type, 'PassRefPtr<MyClass>') self.assertEqual(parameter.name, 'parent') self.assertEqual(parameter.row, 1) # Test type, no name, with default value. parameter = cpp_style.Parameter('MyClass = 0', 7, 0) self.assertEqual(parameter.type, 'MyClass') self.assertEqual(parameter.name, '') self.assertEqual(parameter.row, 0) # Test type, name, and default value. parameter = cpp_style.Parameter('MyClass a = 0', 7, 0) self.assertEqual(parameter.type, 'MyClass') self.assertEqual(parameter.name, 'a') self.assertEqual(parameter.row, 0) def test_single_line_view(self): start_position = cpp_style.Position(row=1, column=1) end_position = cpp_style.Position(row=3, column=1) single_line_view = cpp_style.SingleLineView(['0', 'abcde', 'fgh', 'i'], start_position, end_position) self.assertEqual(single_line_view.single_line, 'bcde fgh i') self.assertEqual(single_line_view.convert_column_to_row(0), 1) self.assertEqual(single_line_view.convert_column_to_row(4), 1) self.assertEqual(single_line_view.convert_column_to_row(5), 2) self.assertEqual(single_line_view.convert_column_to_row(8), 2) self.assertEqual(single_line_view.convert_column_to_row(9), 3) self.assertEqual(single_line_view.convert_column_to_row(100), 3) start_position = cpp_style.Position(row=0, column=3) end_position = cpp_style.Position(row=0, column=4) single_line_view = cpp_style.SingleLineView(['abcdef'], start_position, end_position) self.assertEqual(single_line_view.single_line, 'd') def test_create_skeleton_parameters(self): self.assertEqual(cpp_style.create_skeleton_parameters(''), '') self.assertEqual(cpp_style.create_skeleton_parameters(' '), ' ') self.assertEqual(cpp_style.create_skeleton_parameters('long'), 'long,') self.assertEqual(cpp_style.create_skeleton_parameters('const unsigned long int'), ' int,') self.assertEqual(cpp_style.create_skeleton_parameters('long int*'), ' int ,') self.assertEqual(cpp_style.create_skeleton_parameters('PassRefPtr<Foo> a'), 'PassRefPtr a,') self.assertEqual(cpp_style.create_skeleton_parameters( 'ComplexTemplate<NestedTemplate1<MyClass1, MyClass2>, NestedTemplate1<MyClass1, MyClass2> > param, int second'), 'ComplexTemplate param, int second,') self.assertEqual(cpp_style.create_skeleton_parameters('int = 0, Namespace::Type& a'), 'int , Type a,') # Create skeleton parameters is a bit too aggressive with function variables, but # it allows for parsing other parameters and declarations like this are rare. self.assertEqual(cpp_style.create_skeleton_parameters('void (*fn)(int a, int b), Namespace::Type& a'), 'void , Type a,') # This doesn't look like functions declarations but the simplifications help to eliminate false positives. self.assertEqual(cpp_style.create_skeleton_parameters('b{d}'), 'b ,') def test_find_parameter_name_index(self): self.assertEqual(cpp_style.find_parameter_name_index(' int a '), 5) self.assertEqual(cpp_style.find_parameter_name_index(' PassRefPtr '), 16) self.assertEqual(cpp_style.find_parameter_name_index('double'), 6) def test_parameter_list(self): elided_lines = ['int blah(PassRefPtr<MyClass> paramName,', 'const Other1Class& foo,', 'const ComplexTemplate<Class1, NestedTemplate<P1, P2> >* const * param = new ComplexTemplate<Class1, NestedTemplate<P1, P2> >(34, 42),', 'int* myCount = 0);'] start_position = cpp_style.Position(row=0, column=8) end_position = cpp_style.Position(row=3, column=16) expected_parameters = ({'type': 'PassRefPtr<MyClass>', 'name': 'paramName', 'row': 0}, {'type': 'const Other1Class&', 'name': 'foo', 'row': 1}, {'type': 'const ComplexTemplate<Class1, NestedTemplate<P1, P2> >* const *', 'name': 'param', 'row': 2}, {'type': 'int*', 'name': 'myCount', 'row': 3}) index = 0 for parameter in cpp_style.parameter_list(elided_lines, start_position, end_position): expected_parameter = expected_parameters[index] self.assertEqual(parameter.type, expected_parameter['type']) self.assertEqual(parameter.name, expected_parameter['name']) self.assertEqual(parameter.row, expected_parameter['row']) index += 1 self.assertEqual(index, len(expected_parameters)) def test_check_parameter_against_text(self): error_collector = ErrorCollector(self.assertTrue) parameter = cpp_style.Parameter('FooF ooF', 4, 1) self.assertFalse(cpp_style._check_parameter_name_against_text(parameter, 'FooF', error_collector)) self.assertEqual(error_collector.results(), 'The parameter name "ooF" adds no information, so it should be removed. [readability/parameter_name] [5]') class CppStyleTestBase(unittest.TestCase): """Provides some useful helper functions for cpp_style tests. Attributes: min_confidence: An integer that is the current minimum confidence level for the tests. """ # FIXME: Refactor the unit tests so the confidence level is passed # explicitly, just like it is in the real code. min_confidence = 1; # Helper function to avoid needing to explicitly pass confidence # in all the unit test calls to cpp_style.process_file_data(). def process_file_data(self, filename, file_extension, lines, error, fs=None): """Call cpp_style.process_file_data() with the min_confidence.""" return cpp_style.process_file_data(filename, file_extension, lines, error, self.min_confidence, fs) def perform_lint(self, code, filename, basic_error_rules, fs=None, lines_to_check=None): error_collector = ErrorCollector(self.assertTrue, FilterConfiguration(basic_error_rules), lines_to_check) lines = code.split('\n') extension = filename.split('.')[1] self.process_file_data(filename, extension, lines, error_collector, fs) return error_collector.results() # Perform lint on single line of input and return the error message. def perform_single_line_lint(self, code, filename): basic_error_rules = ('-build/header_guard', '-legal/copyright', '-readability/fn_size', '-readability/parameter_name', '-readability/pass_ptr', '-whitespace/ending_newline') return self.perform_lint(code, filename, basic_error_rules) # Perform lint over multiple lines and return the error message. def perform_multi_line_lint(self, code, file_extension): basic_error_rules = ('-build/header_guard', '-legal/copyright', '-readability/parameter_name', '-whitespace/ending_newline') return self.perform_lint(code, 'test.' + file_extension, basic_error_rules) # Only keep some errors related to includes, namespaces and rtti. def perform_language_rules_check(self, filename, code, lines_to_check=None): basic_error_rules = ('-', '+build/include', '+build/include_order', '+build/namespaces', '+runtime/rtti') return self.perform_lint(code, filename, basic_error_rules, lines_to_check=lines_to_check) # Only keep function length errors. def perform_function_lengths_check(self, code): basic_error_rules = ('-', '+readability/fn_size') return self.perform_lint(code, 'test.cpp', basic_error_rules) # Only keep pass ptr errors. def perform_pass_ptr_check(self, code): basic_error_rules = ('-', '+readability/pass_ptr') return self.perform_lint(code, 'test.cpp', basic_error_rules) # Only keep leaky pattern errors. def perform_leaky_pattern_check(self, code): basic_error_rules = ('-', '+runtime/leaky_pattern') return self.perform_lint(code, 'test.cpp', basic_error_rules) # Only include what you use errors. def perform_include_what_you_use(self, code, filename='foo.h', fs=None): basic_error_rules = ('-', '+build/include_what_you_use') return self.perform_lint(code, filename, basic_error_rules, fs) def perform_avoid_static_cast_of_objects(self, code, filename='foo.cpp', fs=None): basic_error_rules = ('-', '+runtime/casting') return self.perform_lint(code, filename, basic_error_rules, fs) # Perform lint and compare the error message with "expected_message". def assert_lint(self, code, expected_message, file_name='foo.cpp'): self.assertEqual(expected_message, self.perform_single_line_lint(code, file_name)) def assert_lint_one_of_many_errors_re(self, code, expected_message_re, file_name='foo.cpp'): messages = self.perform_single_line_lint(code, file_name) for message in messages: if re.search(expected_message_re, message): return self.assertEqual(expected_message_re, messages) def assert_multi_line_lint(self, code, expected_message, file_name='foo.h'): file_extension = file_name[file_name.rfind('.') + 1:] self.assertEqual(expected_message, self.perform_multi_line_lint(code, file_extension)) def assert_multi_line_lint_re(self, code, expected_message_re, file_name='foo.h'): file_extension = file_name[file_name.rfind('.') + 1:] message = self.perform_multi_line_lint(code, file_extension) if not re.search(expected_message_re, message): self.fail('Message was:\n' + message + 'Expected match to "' + expected_message_re + '"') def assert_language_rules_check(self, file_name, code, expected_message, lines_to_check=None): self.assertEqual(expected_message, self.perform_language_rules_check(file_name, code, lines_to_check)) def assert_include_what_you_use(self, code, expected_message): self.assertEqual(expected_message, self.perform_include_what_you_use(code)) def assert_blank_lines_check(self, lines, start_errors, end_errors): error_collector = ErrorCollector(self.assertTrue) self.process_file_data('foo.cpp', 'cpp', lines, error_collector) self.assertEqual( start_errors, error_collector.results().count( 'Blank line at the start of a code block. Is this needed?' ' [whitespace/blank_line] [2]')) self.assertEqual( end_errors, error_collector.results().count( 'Blank line at the end of a code block. Is this needed?' ' [whitespace/blank_line] [3]')) def assert_positions_equal(self, position, tuple_position): """Checks if the two positions are equal. position: a cpp_style.Position object. tuple_position: a tuple (row, column) to compare against.""" self.assertEqual(position, cpp_style.Position(tuple_position[0], tuple_position[1]), 'position %s, tuple_position %s' % (position, tuple_position)) class FunctionDetectionTest(CppStyleTestBase): def perform_function_detection(self, lines, function_information, detection_line=0): clean_lines = cpp_style.CleansedLines(lines) function_state = cpp_style._FunctionState(5) error_collector = ErrorCollector(self.assertTrue) cpp_style.detect_functions(clean_lines, detection_line, function_state, error_collector) if not function_information: self.assertEqual(function_state.in_a_function, False) return self.assertEqual(function_state.in_a_function, True) self.assertEqual(function_state.current_function, function_information['name'] + '()') self.assertEqual(function_state.modifiers_and_return_type(), function_information['modifiers_and_return_type']) self.assertEqual(function_state.is_pure, function_information['is_pure']) self.assertEqual(function_state.is_declaration, function_information['is_declaration']) self.assert_positions_equal(function_state.function_name_start_position, function_information['function_name_start_position']) self.assert_positions_equal(function_state.parameter_start_position, function_information['parameter_start_position']) self.assert_positions_equal(function_state.parameter_end_position, function_information['parameter_end_position']) self.assert_positions_equal(function_state.body_start_position, function_information['body_start_position']) self.assert_positions_equal(function_state.end_position, function_information['end_position']) expected_parameters = function_information.get('parameter_list') if expected_parameters: actual_parameters = function_state.parameter_list() self.assertEqual(len(actual_parameters), len(expected_parameters)) for index in range(len(expected_parameters)): actual_parameter = actual_parameters[index] expected_parameter = expected_parameters[index] self.assertEqual(actual_parameter.type, expected_parameter['type']) self.assertEqual(actual_parameter.name, expected_parameter['name']) self.assertEqual(actual_parameter.row, expected_parameter['row']) def test_basic_function_detection(self): self.perform_function_detection( ['void theTestFunctionName(int) {', '}'], {'name': 'theTestFunctionName', 'modifiers_and_return_type': 'void', 'function_name_start_position': (0, 5), 'parameter_start_position': (0, 24), 'parameter_end_position': (0, 29), 'body_start_position': (0, 30), 'end_position': (1, 1), 'is_pure': False, 'is_declaration': False}) def test_function_declaration_detection(self): self.perform_function_detection( ['void aFunctionName(int);'], {'name': 'aFunctionName', 'modifiers_and_return_type': 'void', 'function_name_start_position': (0, 5), 'parameter_start_position': (0, 18), 'parameter_end_position': (0, 23), 'body_start_position': (0, 23), 'end_position': (0, 24), 'is_pure': False, 'is_declaration': True}) self.perform_function_detection( ['CheckedInt<T> operator /(const CheckedInt<T> &lhs, const CheckedInt<T> &rhs);'], {'name': 'operator /', 'modifiers_and_return_type': 'CheckedInt<T>', 'function_name_start_position': (0, 14), 'parameter_start_position': (0, 24), 'parameter_end_position': (0, 76), 'body_start_position': (0, 76), 'end_position': (0, 77), 'is_pure': False, 'is_declaration': True}) self.perform_function_detection( ['CheckedInt<T> operator -(const CheckedInt<T> &lhs, const CheckedInt<T> &rhs);'], {'name': 'operator -', 'modifiers_and_return_type': 'CheckedInt<T>', 'function_name_start_position': (0, 14), 'parameter_start_position': (0, 24), 'parameter_end_position': (0, 76), 'body_start_position': (0, 76), 'end_position': (0, 77), 'is_pure': False, 'is_declaration': True}) self.perform_function_detection( ['CheckedInt<T> operator !=(const CheckedInt<T> &lhs, const CheckedInt<T> &rhs);'], {'name': 'operator !=', 'modifiers_and_return_type': 'CheckedInt<T>', 'function_name_start_position': (0, 14), 'parameter_start_position': (0, 25), 'parameter_end_position': (0, 77), 'body_start_position': (0, 77), 'end_position': (0, 78), 'is_pure': False, 'is_declaration': True}) self.perform_function_detection( ['CheckedInt<T> operator +(const CheckedInt<T> &lhs, const CheckedInt<T> &rhs);'], {'name': 'operator +', 'modifiers_and_return_type': 'CheckedInt<T>', 'function_name_start_position': (0, 14), 'parameter_start_position': (0, 24), 'parameter_end_position': (0, 76), 'body_start_position': (0, 76), 'end_position': (0, 77), 'is_pure': False, 'is_declaration': True}) def test_pure_function_detection(self): self.perform_function_detection( ['virtual void theTestFunctionName(int = 0);'], {'name': 'theTestFunctionName', 'modifiers_and_return_type': 'virtual void', 'function_name_start_position': (0, 13), 'parameter_start_position': (0, 32), 'parameter_end_position': (0, 41), 'body_start_position': (0, 41), 'end_position': (0, 42), 'is_pure': False, 'is_declaration': True}) self.perform_function_detection( ['virtual void theTestFunctionName(int) = 0;'], {'name': 'theTestFunctionName', 'modifiers_and_return_type': 'virtual void', 'function_name_start_position': (0, 13), 'parameter_start_position': (0, 32), 'parameter_end_position': (0, 37), 'body_start_position': (0, 41), 'end_position': (0, 42), 'is_pure': True, 'is_declaration': True}) # Hopefully, no one writes code like this but it is a tricky case. self.perform_function_detection( ['virtual void theTestFunctionName(int)', ' = ', ' 0 ;'], {'name': 'theTestFunctionName', 'modifiers_and_return_type': 'virtual void', 'function_name_start_position': (0, 13), 'parameter_start_position': (0, 32), 'parameter_end_position': (0, 37), 'body_start_position': (2, 3), 'end_position': (2, 4), 'is_pure': True, 'is_declaration': True}) def test_ignore_macros(self): self.perform_function_detection(['void aFunctionName(int); \\'], None) def test_non_functions(self): # This case exposed an error because the open brace was in quotes. self.perform_function_detection( ['asm(', ' "stmdb sp!, {r1-r3}" "\n"', ');'], # This isn't a function but it looks like one to our simple # algorithm and that is ok. {'name': 'asm', 'modifiers_and_return_type': '', 'function_name_start_position': (0, 0), 'parameter_start_position': (0, 3), 'parameter_end_position': (2, 1), 'body_start_position': (2, 1), 'end_position': (2, 2), 'is_pure': False, 'is_declaration': True}) # Simple test case with something that is not a function. self.perform_function_detection(['class Stuff;'], None) def test_parameter_list(self): # A function with no arguments. function_state = self.perform_function_detection( ['void functionName();'], {'name': 'functionName', 'modifiers_and_return_type': 'void', 'function_name_start_position': (0, 5), 'parameter_start_position': (0, 17), 'parameter_end_position': (0, 19), 'body_start_position': (0, 19), 'end_position': (0, 20), 'is_pure': False, 'is_declaration': True, 'parameter_list': ()}) # A function with one argument. function_state = self.perform_function_detection( ['void functionName(int);'], {'name': 'functionName', 'modifiers_and_return_type': 'void', 'function_name_start_position': (0, 5), 'parameter_start_position': (0, 17), 'parameter_end_position': (0, 22), 'body_start_position': (0, 22), 'end_position': (0, 23), 'is_pure': False, 'is_declaration': True, 'parameter_list': ({'type': 'int', 'name': '', 'row': 0},)}) # A function with unsigned and short arguments function_state = self.perform_function_detection( ['void functionName(unsigned a, short b, long c, long long short unsigned int);'], {'name': 'functionName', 'modifiers_and_return_type': 'void', 'function_name_start_position': (0, 5), 'parameter_start_position': (0, 17), 'parameter_end_position': (0, 76), 'body_start_position': (0, 76), 'end_position': (0, 77), 'is_pure': False, 'is_declaration': True, 'parameter_list': ({'type': 'unsigned', 'name': 'a', 'row': 0}, {'type': 'short', 'name': 'b', 'row': 0}, {'type': 'long', 'name': 'c', 'row': 0}, {'type': 'long long short unsigned int', 'name': '', 'row': 0})}) # Some parameter type with modifiers and no parameter names. function_state = self.perform_function_detection( ['virtual void determineARIADropEffects(Vector<String>*&, const unsigned long int*&, const MediaPlayer::Preload, Other<Other2, Other3<P1, P2> >, int);'], {'name': 'determineARIADropEffects', 'modifiers_and_return_type': 'virtual void', 'parameter_start_position': (0, 37), 'function_name_start_position': (0, 13), 'parameter_end_position': (0, 147), 'body_start_position': (0, 147), 'end_position': (0, 148), 'is_pure': False, 'is_declaration': True, 'parameter_list': ({'type': 'Vector<String>*&', 'name': '', 'row': 0}, {'type': 'const unsigned long int*&', 'name': '', 'row': 0}, {'type': 'const MediaPlayer::Preload', 'name': '', 'row': 0}, {'type': 'Other<Other2, Other3<P1, P2> >', 'name': '', 'row': 0}, {'type': 'int', 'name': '', 'row': 0})}) # Try parsing a function with a very complex definition. function_state = self.perform_function_detection( ['#define MyMacro(a) a', 'virtual', 'AnotherTemplate<Class1, Class2> aFunctionName(PassRefPtr<MyClass> paramName,', 'const Other1Class& foo,', 'const ComplexTemplate<Class1, NestedTemplate<P1, P2> >* const * param = new ComplexTemplate<Class1, NestedTemplate<P1, P2> >(34, 42),', 'int* myCount = 0);'], {'name': 'aFunctionName', 'modifiers_and_return_type': 'virtual AnotherTemplate<Class1, Class2>', 'function_name_start_position': (2, 32), 'parameter_start_position': (2, 45), 'parameter_end_position': (5, 17), 'body_start_position': (5, 17), 'end_position': (5, 18), 'is_pure': False, 'is_declaration': True, 'parameter_list': ({'type': 'PassRefPtr<MyClass>', 'name': 'paramName', 'row': 2}, {'type': 'const Other1Class&', 'name': 'foo', 'row': 3}, {'type': 'const ComplexTemplate<Class1, NestedTemplate<P1, P2> >* const *', 'name': 'param', 'row': 4}, {'type': 'int*', 'name': 'myCount', 'row': 5})}, detection_line=2) class CppStyleTest(CppStyleTestBase): def test_asm_lines_ignored(self): self.assert_lint( '__asm mov [registration], eax', '') # Test get line width. def test_get_line_width(self): self.assertEqual(0, cpp_style.get_line_width('')) self.assertEqual(10, cpp_style.get_line_width(u'x' * 10)) self.assertEqual(16, cpp_style.get_line_width(u'都|道|府|県|支庁')) def test_find_next_multi_line_comment_start(self): self.assertEqual(1, cpp_style.find_next_multi_line_comment_start([''], 0)) lines = ['a', 'b', '/* c'] self.assertEqual(2, cpp_style.find_next_multi_line_comment_start(lines, 0)) lines = ['char a[] = "/*";'] # not recognized as comment. self.assertEqual(1, cpp_style.find_next_multi_line_comment_start(lines, 0)) def test_find_next_multi_line_comment_end(self): self.assertEqual(1, cpp_style.find_next_multi_line_comment_end([''], 0)) lines = ['a', 'b', ' c */'] self.assertEqual(2, cpp_style.find_next_multi_line_comment_end(lines, 0)) def test_remove_multi_line_comments_from_range(self): lines = ['a', ' /* comment ', ' * still comment', ' comment */ ', 'b'] cpp_style.remove_multi_line_comments_from_range(lines, 1, 4) self.assertEqual(['a', '// dummy', '// dummy', '// dummy', 'b'], lines) def test_position(self): position = cpp_style.Position(3, 4) self.assert_positions_equal(position, (3, 4)) self.assertEqual(position.row, 3) self.assertTrue(position > cpp_style.Position(position.row - 1, position.column + 1)) self.assertTrue(position > cpp_style.Position(position.row, position.column - 1)) self.assertTrue(position < cpp_style.Position(position.row, position.column + 1)) self.assertTrue(position < cpp_style.Position(position.row + 1, position.column - 1)) self.assertEqual(position.__str__(), '(3, 4)') def test_rfind_in_lines(self): not_found_position = cpp_style.Position(10, 11) start_position = cpp_style.Position(2, 2) lines = ['ab', 'ace', 'test'] self.assertEqual(not_found_position, cpp_style._rfind_in_lines('st', lines, start_position, not_found_position)) self.assertTrue(cpp_style.Position(1, 1) == cpp_style._rfind_in_lines('a', lines, start_position, not_found_position)) self.assertEqual(cpp_style.Position(2, 2), cpp_style._rfind_in_lines('(te|a)', lines, start_position, not_found_position)) def test_close_expression(self): self.assertEqual(cpp_style.Position(1, -1), cpp_style.close_expression([')('], cpp_style.Position(0, 1))) self.assertEqual(cpp_style.Position(1, -1), cpp_style.close_expression([') ()'], cpp_style.Position(0, 1))) self.assertEqual(cpp_style.Position(0, 4), cpp_style.close_expression([')[)]'], cpp_style.Position(0, 1))) self.assertEqual(cpp_style.Position(0, 5), cpp_style.close_expression(['}{}{}'], cpp_style.Position(0, 3))) self.assertEqual(cpp_style.Position(1, 1), cpp_style.close_expression(['}{}{', '}'], cpp_style.Position(0, 3))) self.assertEqual(cpp_style.Position(2, -1), cpp_style.close_expression(['][][', ' '], cpp_style.Position(0, 3))) def test_spaces_at_end_of_line(self): self.assert_lint( '// Hello there ', 'Line ends in whitespace. Consider deleting these extra spaces.' ' [whitespace/end_of_line] [4]') # Test C-style cast cases. def test_cstyle_cast(self): self.assert_lint( 'int a = (int)1.0;', 'Using C-style cast. Use static_cast<int>(...) instead' ' [readability/casting] [4]') self.assert_lint( 'int *a = (int *)DEFINED_VALUE;', 'Using C-style cast. Use reinterpret_cast<int *>(...) instead' ' [readability/casting] [4]', 'foo.c') self.assert_lint( 'uint16 a = (uint16)1.0;', 'Using C-style cast. Use static_cast<uint16>(...) instead' ' [readability/casting] [4]') self.assert_lint( 'int32 a = (int32)1.0;', 'Using C-style cast. Use static_cast<int32>(...) instead' ' [readability/casting] [4]') self.assert_lint( 'uint64 a = (uint64)1.0;', 'Using C-style cast. Use static_cast<uint64>(...) instead' ' [readability/casting] [4]') # Test taking address of casts (runtime/casting) def test_runtime_casting(self): self.assert_lint( 'int* x = &static_cast<int*>(foo);', 'Are you taking an address of a cast? ' 'This is dangerous: could be a temp var. ' 'Take the address before doing the cast, rather than after' ' [runtime/casting] [4]') self.assert_lint( 'int* x = &dynamic_cast<int *>(foo);', ['Are you taking an address of a cast? ' 'This is dangerous: could be a temp var. ' 'Take the address before doing the cast, rather than after' ' [runtime/casting] [4]', 'Do not use dynamic_cast<>. If you need to cast within a class ' 'hierarchy, use static_cast<> to upcast. Google doesn\'t support ' 'RTTI. [runtime/rtti] [5]']) self.assert_lint( 'int* x = &reinterpret_cast<int *>(foo);', 'Are you taking an address of a cast? ' 'This is dangerous: could be a temp var. ' 'Take the address before doing the cast, rather than after' ' [runtime/casting] [4]') # It's OK to cast an address. self.assert_lint( 'int* x = reinterpret_cast<int *>(&foo);', '') def test_runtime_selfinit(self): self.assert_lint( 'Foo::Foo(Bar r, Bel l) : r_(r_), l_(l_) { }', 'You seem to be initializing a member variable with itself.' ' [runtime/init] [4]') self.assert_lint( 'Foo::Foo(Bar r, Bel l) : r_(r), l_(l) { }', '') self.assert_lint( 'Foo::Foo(Bar r) : r_(r), l_(r_), ll_(l_) { }', '') def test_runtime_rtti(self): statement = 'int* x = dynamic_cast<int*>(&foo);' error_message = ( 'Do not use dynamic_cast<>. If you need to cast within a class ' 'hierarchy, use static_cast<> to upcast. Google doesn\'t support ' 'RTTI. [runtime/rtti] [5]') # dynamic_cast is disallowed in most files. self.assert_language_rules_check('foo.cpp', statement, error_message) self.assert_language_rules_check('foo.h', statement, error_message) # Tests for static_cast readability. def test_static_cast_on_objects_with_toFoo(self): mock_header_contents = ['inline Foo* toFoo(Bar* bar)'] fs = FileSystem() orig_read_text_file_fn = fs.read_text_file def mock_read_text_file_fn(path): return mock_header_contents try: fs.read_text_file = mock_read_text_file_fn message = self.perform_avoid_static_cast_of_objects( 'Foo* x = static_cast<Foo*>(bar);', filename='casting.cpp', fs=fs) self.assertEqual(message, 'static_cast of class objects is not allowed. Use toFoo defined in Foo.h.' ' [runtime/casting] [4]') finally: fs.read_text_file = orig_read_text_file_fn def test_static_cast_on_objects_without_toFoo(self): mock_header_contents = ['inline FooBar* toFooBar(Bar* bar)'] fs = FileSystem() orig_read_text_file_fn = fs.read_text_file def mock_read_text_file_fn(path): return mock_header_contents try: fs.read_text_file = mock_read_text_file_fn message = self.perform_avoid_static_cast_of_objects( 'Foo* x = static_cast<Foo*>(bar);', filename='casting.cpp', fs=fs) self.assertEqual(message, 'static_cast of class objects is not allowed. Add toFoo in Foo.h and use it instead.' ' [runtime/casting] [4]') finally: fs.read_text_file = orig_read_text_file_fn # We cannot test this functionality because of difference of # function definitions. Anyway, we may never enable this. # # # Test for unnamed arguments in a method. # def test_check_for_unnamed_params(self): # message = ('All parameters should be named in a function' # ' [readability/function] [3]') # self.assert_lint('virtual void A(int*) const;', message) # self.assert_lint('virtual void B(void (*fn)(int*));', message) # self.assert_lint('virtual void C(int*);', message) # self.assert_lint('void *(*f)(void *) = x;', message) # self.assert_lint('void Method(char*) {', message) # self.assert_lint('void Method(char*);', message) # self.assert_lint('void Method(char* /*x*/);', message) # self.assert_lint('typedef void (*Method)(int32);', message) # self.assert_lint('static void operator delete[](void*) throw();', message) # # self.assert_lint('virtual void D(int* p);', '') # self.assert_lint('void operator delete(void* x) throw();', '') # self.assert_lint('void Method(char* x)\n{', '') # self.assert_lint('void Method(char* /*x*/)\n{', '') # self.assert_lint('void Method(char* x);', '') # self.assert_lint('typedef void (*Method)(int32 x);', '') # self.assert_lint('static void operator delete[](void* x) throw();', '') # self.assert_lint('static void operator delete[](void* /*x*/) throw();', '') # # # This one should technically warn, but doesn't because the function # # pointer is confusing. # self.assert_lint('virtual void E(void (*fn)(int* p));', '') # Test deprecated casts such as int(d) def test_deprecated_cast(self): self.assert_lint( 'int a = int(2.2);', 'Using deprecated casting style. ' 'Use static_cast<int>(...) instead' ' [readability/casting] [4]') # Checks for false positives... self.assert_lint( 'int a = int(); // Constructor, o.k.', '') self.assert_lint( 'X::X() : a(int()) { } // default Constructor, o.k.', '') self.assert_lint( 'operator bool(); // Conversion operator, o.k.', '') # The second parameter to a gMock method definition is a function signature # that often looks like a bad cast but should not picked up by lint. def test_mock_method(self): self.assert_lint( 'MOCK_METHOD0(method, int());', '') self.assert_lint( 'MOCK_CONST_METHOD1(method, float(string));', '') self.assert_lint( 'MOCK_CONST_METHOD2_T(method, double(float, float));', '') # Test sizeof(type) cases. def test_sizeof_type(self): self.assert_lint( 'sizeof(int);', 'Using sizeof(type). Use sizeof(varname) instead if possible' ' [runtime/sizeof] [1]') self.assert_lint( 'sizeof(int *);', 'Using sizeof(type). Use sizeof(varname) instead if possible' ' [runtime/sizeof] [1]') # Test typedef cases. There was a bug that cpp_style misidentified # typedef for pointer to function as C-style cast and produced # false-positive error messages. def test_typedef_for_pointer_to_function(self): self.assert_lint( 'typedef void (*Func)(int x);', '') self.assert_lint( 'typedef void (*Func)(int *x);', '') self.assert_lint( 'typedef void Func(int x);', '') self.assert_lint( 'typedef void Func(int *x);', '') def test_include_what_you_use_no_implementation_files(self): code = 'std::vector<int> foo;' self.assertEqual('Add #include <vector> for vector<>' ' [build/include_what_you_use] [4]', self.perform_include_what_you_use(code, 'foo.h')) self.assertEqual('', self.perform_include_what_you_use(code, 'foo.cpp')) def test_include_what_you_use(self): self.assert_include_what_you_use( '''#include <vector> std::vector<int> foo; ''', '') self.assert_include_what_you_use( '''#include <map> std::pair<int,int> foo; ''', '') self.assert_include_what_you_use( '''#include <multimap> std::pair<int,int> foo; ''', '') self.assert_include_what_you_use( '''#include <hash_map> std::pair<int,int> foo; ''', '') self.assert_include_what_you_use( '''#include <utility> std::pair<int,int> foo; ''', '') self.assert_include_what_you_use( '''#include <vector> DECLARE_string(foobar); ''', '') self.assert_include_what_you_use( '''#include <vector> DEFINE_string(foobar, "", ""); ''', '') self.assert_include_what_you_use( '''#include <vector> std::pair<int,int> foo; ''', 'Add #include <utility> for pair<>' ' [build/include_what_you_use] [4]') self.assert_include_what_you_use( '''#include "base/foobar.h" std::vector<int> foo; ''', 'Add #include <vector> for vector<>' ' [build/include_what_you_use] [4]') self.assert_include_what_you_use( '''#include <vector> std::set<int> foo; ''', 'Add #include <set> for set<>' ' [build/include_what_you_use] [4]') self.assert_include_what_you_use( '''#include "base/foobar.h" hash_map<int, int> foobar; ''', 'Add #include <hash_map> for hash_map<>' ' [build/include_what_you_use] [4]') self.assert_include_what_you_use( '''#include "base/foobar.h" bool foobar = std::less<int>(0,1); ''', 'Add #include <functional> for less<>' ' [build/include_what_you_use] [4]') self.assert_include_what_you_use( '''#include "base/foobar.h" bool foobar = min<int>(0,1); ''', 'Add #include <algorithm> for min [build/include_what_you_use] [4]') self.assert_include_what_you_use( 'void a(const string &foobar);', 'Add #include <string> for string [build/include_what_you_use] [4]') self.assert_include_what_you_use( '''#include "base/foobar.h" bool foobar = swap(0,1); ''', 'Add #include <algorithm> for swap [build/include_what_you_use] [4]') self.assert_include_what_you_use( '''#include "base/foobar.h" bool foobar = transform(a.begin(), a.end(), b.start(), Foo); ''', 'Add #include <algorithm> for transform ' '[build/include_what_you_use] [4]') self.assert_include_what_you_use( '''#include "base/foobar.h" bool foobar = min_element(a.begin(), a.end()); ''', 'Add #include <algorithm> for min_element ' '[build/include_what_you_use] [4]') self.assert_include_what_you_use( '''foo->swap(0,1); foo.swap(0,1); ''', '') self.assert_include_what_you_use( '''#include <string> void a(const std::multimap<int,string> &foobar); ''', 'Add #include <map> for multimap<>' ' [build/include_what_you_use] [4]') self.assert_include_what_you_use( '''#include <queue> void a(const std::priority_queue<int> &foobar); ''', '') self.assert_include_what_you_use( '''#include "base/basictypes.h" #include "base/port.h" #include <assert.h> #include <string> #include <vector> vector<string> hajoa;''', '') self.assert_include_what_you_use( '''#include <string> int i = numeric_limits<int>::max() ''', 'Add #include <limits> for numeric_limits<>' ' [build/include_what_you_use] [4]') self.assert_include_what_you_use( '''#include <limits> int i = numeric_limits<int>::max() ''', '') # Test the UpdateIncludeState code path. mock_header_contents = ['#include "blah/foo.h"', '#include "blah/bar.h"'] fs = FileSystem() orig_read_text_file_fn = fs.read_text_file def mock_read_text_file_fn(path): return mock_header_contents try: fs.read_text_file = mock_read_text_file_fn message = self.perform_include_what_you_use( '#include "config.h"\n' '#include "blah/a.h"\n', filename='blah/a.cpp', fs=fs) self.assertEqual(message, '') mock_header_contents = ['#include <set>'] message = self.perform_include_what_you_use( '''#include "config.h" #include "blah/a.h" std::set<int> foo;''', filename='blah/a.cpp', fs=fs) self.assertEqual(message, '') # If there's just a .cpp and the header can't be found then it's ok. message = self.perform_include_what_you_use( '''#include "config.h" #include "blah/a.h" std::set<int> foo;''', filename='blah/a.cpp') self.assertEqual(message, '') # Make sure we find the headers with relative paths. mock_header_contents = [''] message = self.perform_include_what_you_use( '''#include "config.h" #include "%s%sa.h" std::set<int> foo;''' % (os.path.basename(os.getcwd()), os.path.sep), filename='a.cpp', fs=fs) self.assertEqual(message, 'Add #include <set> for set<> ' '[build/include_what_you_use] [4]') finally: fs.read_text_file = orig_read_text_file_fn def test_files_belong_to_same_module(self): f = cpp_style.files_belong_to_same_module self.assertEqual((True, ''), f('a.cpp', 'a.h')) self.assertEqual((True, ''), f('base/google.cpp', 'base/google.h')) self.assertEqual((True, ''), f('base/google_test.cpp', 'base/google.h')) self.assertEqual((True, ''), f('base/google_unittest.cpp', 'base/google.h')) self.assertEqual((True, ''), f('base/internal/google_unittest.cpp', 'base/public/google.h')) self.assertEqual((True, 'xxx/yyy/'), f('xxx/yyy/base/internal/google_unittest.cpp', 'base/public/google.h')) self.assertEqual((True, 'xxx/yyy/'), f('xxx/yyy/base/google_unittest.cpp', 'base/public/google.h')) self.assertEqual((True, ''), f('base/google_unittest.cpp', 'base/google-inl.h')) self.assertEqual((True, '/home/build/google3/'), f('/home/build/google3/base/google.cpp', 'base/google.h')) self.assertEqual((False, ''), f('/home/build/google3/base/google.cpp', 'basu/google.h')) self.assertEqual((False, ''), f('a.cpp', 'b.h')) def test_cleanse_line(self): self.assertEqual('int foo = 0; ', cpp_style.cleanse_comments('int foo = 0; // danger!')) self.assertEqual('int o = 0;', cpp_style.cleanse_comments('int /* foo */ o = 0;')) self.assertEqual('foo(int a, int b);', cpp_style.cleanse_comments('foo(int a /* abc */, int b);')) self.assertEqual('f(a, b);', cpp_style.cleanse_comments('f(a, /* name */ b);')) self.assertEqual('f(a, b);', cpp_style.cleanse_comments('f(a /* name */, b);')) self.assertEqual('f(a, b);', cpp_style.cleanse_comments('f(a, /* name */b);')) def test_multi_line_comments(self): # missing explicit is bad self.assert_multi_line_lint( r'''int a = 0; /* multi-liner class Foo { Foo(int f); // should cause a lint warning in code } */ ''', '') self.assert_multi_line_lint( '''\ /* int a = 0; multi-liner static const int b = 0;''', ['Could not find end of multi-line comment' ' [readability/multiline_comment] [5]', 'Complex multi-line /*...*/-style comment found. ' 'Lint may give bogus warnings. Consider replacing these with ' '//-style comments, with #if 0...#endif, or with more clearly ' 'structured multi-line comments. [readability/multiline_comment] [5]']) self.assert_multi_line_lint(r''' /* multi-line comment''', ['Could not find end of multi-line comment' ' [readability/multiline_comment] [5]', 'Complex multi-line /*...*/-style comment found. ' 'Lint may give bogus warnings. Consider replacing these with ' '//-style comments, with #if 0...#endif, or with more clearly ' 'structured multi-line comments. [readability/multiline_comment] [5]']) self.assert_multi_line_lint(r''' // /* comment, but not multi-line''', '') def test_multiline_strings(self): multiline_string_error_message = ( 'Multi-line string ("...") found. This lint script doesn\'t ' 'do well with such strings, and may give bogus warnings. They\'re ' 'ugly and unnecessary, and you should use concatenation instead".' ' [readability/multiline_string] [5]') file_path = 'mydir/foo.cpp' error_collector = ErrorCollector(self.assertTrue) self.process_file_data(file_path, 'cpp', ['const char* str = "This is a\\', ' multiline string.";'], error_collector) self.assertEqual( 2, # One per line. error_collector.result_list().count(multiline_string_error_message)) # Test non-explicit single-argument constructors def test_explicit_single_argument_constructors(self): # missing explicit is bad self.assert_multi_line_lint( '''\ class Foo { Foo(int f); };''', 'Single-argument constructors should be marked explicit.' ' [runtime/explicit] [5]') # missing explicit is bad, even with whitespace self.assert_multi_line_lint( '''\ class Foo { Foo (int f); };''', ['Extra space before ( in function call [whitespace/parens] [4]', 'Single-argument constructors should be marked explicit.' ' [runtime/explicit] [5]']) # missing explicit, with distracting comment, is still bad self.assert_multi_line_lint( '''\ class Foo { Foo(int f); // simpler than Foo(blargh, blarg) };''', 'Single-argument constructors should be marked explicit.' ' [runtime/explicit] [5]') # missing explicit, with qualified classname self.assert_multi_line_lint( '''\ class Qualifier::AnotherOne::Foo { Foo(int f); };''', 'Single-argument constructors should be marked explicit.' ' [runtime/explicit] [5]') # structs are caught as well. self.assert_multi_line_lint( '''\ struct Foo { Foo(int f); };''', 'Single-argument constructors should be marked explicit.' ' [runtime/explicit] [5]') # Templatized classes are caught as well. self.assert_multi_line_lint( '''\ template<typename T> class Foo { Foo(int f); };''', 'Single-argument constructors should be marked explicit.' ' [runtime/explicit] [5]') # proper style is okay self.assert_multi_line_lint( '''\ class Foo { explicit Foo(int f); };''', '') # two argument constructor is okay self.assert_multi_line_lint( '''\ class Foo { Foo(int f, int b); };''', '') # two argument constructor, across two lines, is okay self.assert_multi_line_lint( '''\ class Foo { Foo(int f, int b); };''', '') # non-constructor (but similar name), is okay self.assert_multi_line_lint( '''\ class Foo { aFoo(int f); };''', '') # constructor with void argument is okay self.assert_multi_line_lint( '''\ class Foo { Foo(void); };''', '') # single argument method is okay self.assert_multi_line_lint( '''\ class Foo { Bar(int b); };''', '') # comments should be ignored self.assert_multi_line_lint( '''\ class Foo { // Foo(int f); };''', '') # single argument function following class definition is okay # (okay, it's not actually valid, but we don't want a false positive) self.assert_multi_line_lint( '''\ class Foo { Foo(int f, int b); }; Foo(int f);''', '') # single argument function is okay self.assert_multi_line_lint( '''static Foo(int f);''', '') # single argument copy constructor is okay. self.assert_multi_line_lint( '''\ class Foo { Foo(const Foo&); };''', '') self.assert_multi_line_lint( '''\ class Foo { Foo(Foo&); };''', '') def test_slash_star_comment_on_single_line(self): self.assert_multi_line_lint( '''/* static */ Foo(int f);''', '') self.assert_multi_line_lint( '''/*/ static */ Foo(int f);''', '') self.assert_multi_line_lint( '''/*/ static Foo(int f);''', 'Could not find end of multi-line comment' ' [readability/multiline_comment] [5]') self.assert_multi_line_lint( ''' /*/ static Foo(int f);''', 'Could not find end of multi-line comment' ' [readability/multiline_comment] [5]') # Test suspicious usage of "if" like this: # if (a == b) { # DoSomething(); # } if (a == c) { // Should be "else if". # DoSomething(); // This gets called twice if a == b && a == c. # } def test_suspicious_usage_of_if(self): self.assert_lint( ' if (a == b) {', '') self.assert_lint( ' } if (a == b) {', 'Did you mean "else if"? If not, start a new line for "if".' ' [readability/braces] [4]') # Test suspicious usage of memset. Specifically, a 0 # as the final argument is almost certainly an error. def test_suspicious_usage_of_memset(self): # Normal use is okay. self.assert_lint( ' memset(buf, 0, sizeof(buf))', '') # A 0 as the final argument is almost certainly an error. self.assert_lint( ' memset(buf, sizeof(buf), 0)', 'Did you mean "memset(buf, 0, sizeof(buf))"?' ' [runtime/memset] [4]') self.assert_lint( ' memset(buf, xsize * ysize, 0)', 'Did you mean "memset(buf, 0, xsize * ysize)"?' ' [runtime/memset] [4]') # There is legitimate test code that uses this form. # This is okay since the second argument is a literal. self.assert_lint( " memset(buf, 'y', 0)", '') self.assert_lint( ' memset(buf, 4, 0)', '') self.assert_lint( ' memset(buf, -1, 0)', '') self.assert_lint( ' memset(buf, 0xF1, 0)', '') self.assert_lint( ' memset(buf, 0xcd, 0)', '') def test_check_posix_threading(self): self.assert_lint('sctime_r()', '') self.assert_lint('strtok_r()', '') self.assert_lint(' strtok_r(foo, ba, r)', '') self.assert_lint('brand()', '') self.assert_lint('_rand()', '') self.assert_lint('.rand()', '') self.assert_lint('>rand()', '') self.assert_lint('rand()', 'Consider using rand_r(...) instead of rand(...)' ' for improved thread safety.' ' [runtime/threadsafe_fn] [2]') self.assert_lint('strtok()', 'Consider using strtok_r(...) ' 'instead of strtok(...)' ' for improved thread safety.' ' [runtime/threadsafe_fn] [2]') # Test potential format string bugs like printf(foo). def test_format_strings(self): self.assert_lint('printf("foo")', '') self.assert_lint('printf("foo: %s", foo)', '') self.assert_lint('DocidForPrintf(docid)', '') # Should not trigger. self.assert_lint( 'printf(foo)', 'Potential format string bug. Do printf("%s", foo) instead.' ' [runtime/printf] [4]') self.assert_lint( 'printf(foo.c_str())', 'Potential format string bug. ' 'Do printf("%s", foo.c_str()) instead.' ' [runtime/printf] [4]') self.assert_lint( 'printf(foo->c_str())', 'Potential format string bug. ' 'Do printf("%s", foo->c_str()) instead.' ' [runtime/printf] [4]') self.assert_lint( 'StringPrintf(foo)', 'Potential format string bug. Do StringPrintf("%s", foo) instead.' '' ' [runtime/printf] [4]') # Variable-length arrays are not permitted. def test_variable_length_array_detection(self): errmsg = ('Do not use variable-length arrays. Use an appropriately named ' "('k' followed by CamelCase) compile-time constant for the size." ' [runtime/arrays] [1]') self.assert_lint('int a[any_old_variable];', errmsg) self.assert_lint('int doublesize[some_var * 2];', errmsg) self.assert_lint('int a[afunction()];', errmsg) self.assert_lint('int a[function(kMaxFooBars)];', errmsg) self.assert_lint('bool aList[items_->size()];', errmsg) self.assert_lint('namespace::Type buffer[len+1];', errmsg) self.assert_lint('int a[64];', '') self.assert_lint('int a[0xFF];', '') self.assert_lint('int first[256], second[256];', '') self.assert_lint('int arrayName[kCompileTimeConstant];', '') self.assert_lint('char buf[somenamespace::kBufSize];', '') self.assert_lint('int arrayName[ALL_CAPS];', '') self.assert_lint('AClass array1[foo::bar::ALL_CAPS];', '') self.assert_lint('int a[kMaxStrLen + 1];', '') self.assert_lint('int a[sizeof(foo)];', '') self.assert_lint('int a[sizeof(*foo)];', '') self.assert_lint('int a[sizeof foo];', '') self.assert_lint('int a[sizeof(struct Foo)];', '') self.assert_lint('int a[128 - sizeof(const bar)];', '') self.assert_lint('int a[(sizeof(foo) * 4)];', '') self.assert_lint('int a[(arraysize(fixed_size_array)/2) << 1];', 'Missing spaces around / [whitespace/operators] [3]') self.assert_lint('delete a[some_var];', '') self.assert_lint('return a[some_var];', '') # Brace usage def test_braces(self): # Braces shouldn't be followed by a ; unless they're defining a struct # or initializing an array self.assert_lint('int a[3] = { 1, 2, 3 };', '') self.assert_lint( '''\ const int foo[] = {1, 2, 3 };''', '') # For single line, unmatched '}' with a ';' is ignored (not enough context) self.assert_multi_line_lint( '''\ int a[3] = { 1, 2, 3 };''', '') self.assert_multi_line_lint( '''\ int a[2][3] = { { 1, 2 }, { 3, 4 } };''', '') self.assert_multi_line_lint( '''\ int a[2][3] = { { 1, 2 }, { 3, 4 } };''', '') # CHECK/EXPECT_TRUE/EXPECT_FALSE replacements def test_check_check(self): self.assert_lint('CHECK(x == 42)', 'Consider using CHECK_EQ instead of CHECK(a == b)' ' [readability/check] [2]') self.assert_lint('CHECK(x != 42)', 'Consider using CHECK_NE instead of CHECK(a != b)' ' [readability/check] [2]') self.assert_lint('CHECK(x >= 42)', 'Consider using CHECK_GE instead of CHECK(a >= b)' ' [readability/check] [2]') self.assert_lint('CHECK(x > 42)', 'Consider using CHECK_GT instead of CHECK(a > b)' ' [readability/check] [2]') self.assert_lint('CHECK(x <= 42)', 'Consider using CHECK_LE instead of CHECK(a <= b)' ' [readability/check] [2]') self.assert_lint('CHECK(x < 42)', 'Consider using CHECK_LT instead of CHECK(a < b)' ' [readability/check] [2]') self.assert_lint('DCHECK(x == 42)', 'Consider using DCHECK_EQ instead of DCHECK(a == b)' ' [readability/check] [2]') self.assert_lint('DCHECK(x != 42)', 'Consider using DCHECK_NE instead of DCHECK(a != b)' ' [readability/check] [2]') self.assert_lint('DCHECK(x >= 42)', 'Consider using DCHECK_GE instead of DCHECK(a >= b)' ' [readability/check] [2]') self.assert_lint('DCHECK(x > 42)', 'Consider using DCHECK_GT instead of DCHECK(a > b)' ' [readability/check] [2]') self.assert_lint('DCHECK(x <= 42)', 'Consider using DCHECK_LE instead of DCHECK(a <= b)' ' [readability/check] [2]') self.assert_lint('DCHECK(x < 42)', 'Consider using DCHECK_LT instead of DCHECK(a < b)' ' [readability/check] [2]') self.assert_lint( 'EXPECT_TRUE("42" == x)', 'Consider using EXPECT_EQ instead of EXPECT_TRUE(a == b)' ' [readability/check] [2]') self.assert_lint( 'EXPECT_TRUE("42" != x)', 'Consider using EXPECT_NE instead of EXPECT_TRUE(a != b)' ' [readability/check] [2]') self.assert_lint( 'EXPECT_TRUE(+42 >= x)', 'Consider using EXPECT_GE instead of EXPECT_TRUE(a >= b)' ' [readability/check] [2]') self.assert_lint( 'EXPECT_TRUE_M(-42 > x)', 'Consider using EXPECT_GT_M instead of EXPECT_TRUE_M(a > b)' ' [readability/check] [2]') self.assert_lint( 'EXPECT_TRUE_M(42U <= x)', 'Consider using EXPECT_LE_M instead of EXPECT_TRUE_M(a <= b)' ' [readability/check] [2]') self.assert_lint( 'EXPECT_TRUE_M(42L < x)', 'Consider using EXPECT_LT_M instead of EXPECT_TRUE_M(a < b)' ' [readability/check] [2]') self.assert_lint( 'EXPECT_FALSE(x == 42)', 'Consider using EXPECT_NE instead of EXPECT_FALSE(a == b)' ' [readability/check] [2]') self.assert_lint( 'EXPECT_FALSE(x != 42)', 'Consider using EXPECT_EQ instead of EXPECT_FALSE(a != b)' ' [readability/check] [2]') self.assert_lint( 'EXPECT_FALSE(x >= 42)', 'Consider using EXPECT_LT instead of EXPECT_FALSE(a >= b)' ' [readability/check] [2]') self.assert_lint( 'ASSERT_FALSE(x > 42)', 'Consider using ASSERT_LE instead of ASSERT_FALSE(a > b)' ' [readability/check] [2]') self.assert_lint( 'ASSERT_FALSE(x <= 42)', 'Consider using ASSERT_GT instead of ASSERT_FALSE(a <= b)' ' [readability/check] [2]') self.assert_lint( 'ASSERT_FALSE_M(x < 42)', 'Consider using ASSERT_GE_M instead of ASSERT_FALSE_M(a < b)' ' [readability/check] [2]') self.assert_lint('CHECK(some_iterator == obj.end())', '') self.assert_lint('EXPECT_TRUE(some_iterator == obj.end())', '') self.assert_lint('EXPECT_FALSE(some_iterator == obj.end())', '') self.assert_lint('CHECK(CreateTestFile(dir, (1 << 20)));', '') self.assert_lint('CHECK(CreateTestFile(dir, (1 >> 20)));', '') self.assert_lint('CHECK(x<42)', ['Missing spaces around <' ' [whitespace/operators] [3]', 'Consider using CHECK_LT instead of CHECK(a < b)' ' [readability/check] [2]']) self.assert_lint('CHECK(x>42)', 'Consider using CHECK_GT instead of CHECK(a > b)' ' [readability/check] [2]') self.assert_lint( ' EXPECT_TRUE(42 < x) // Random comment.', 'Consider using EXPECT_LT instead of EXPECT_TRUE(a < b)' ' [readability/check] [2]') self.assert_lint( 'EXPECT_TRUE( 42 < x )', ['Extra space after ( in function call' ' [whitespace/parens] [4]', 'Consider using EXPECT_LT instead of EXPECT_TRUE(a < b)' ' [readability/check] [2]']) self.assert_lint( 'CHECK("foo" == "foo")', 'Consider using CHECK_EQ instead of CHECK(a == b)' ' [readability/check] [2]') self.assert_lint('CHECK_EQ("foo", "foo")', '') def test_brace_at_begin_of_line(self): self.assert_lint('{', 'This { should be at the end of the previous line' ' [whitespace/braces] [4]') self.assert_multi_line_lint( '#endif\n' '{\n' '}\n', '') self.assert_multi_line_lint( 'if (condition) {', '') self.assert_multi_line_lint( ' MACRO1(macroArg) {', '') self.assert_multi_line_lint( 'ACCESSOR_GETTER(MessageEventPorts) {', 'Place brace on its own line for function definitions. [whitespace/braces] [4]') self.assert_multi_line_lint( 'int foo() {', 'Place brace on its own line for function definitions. [whitespace/braces] [4]') self.assert_multi_line_lint( 'int foo() const {', 'Place brace on its own line for function definitions. [whitespace/braces] [4]') self.assert_multi_line_lint( 'int foo() override {', 'Place brace on its own line for function definitions. [whitespace/braces] [4]') self.assert_multi_line_lint( 'int foo() final {', 'Place brace on its own line for function definitions. [whitespace/braces] [4]') self.assert_multi_line_lint( 'int foo() const\n' '{\n' '}\n', '') self.assert_multi_line_lint( 'int foo() override\n' '{\n' '}\n', '') self.assert_multi_line_lint( 'int foo() final\n' '{\n' '}\n', '') self.assert_multi_line_lint( 'if (condition\n' ' && condition2\n' ' && condition3) {\n' '}\n', '') self.assert_multi_line_lint( 'if (condition) {\n' ' {\n' ' }\n', '') self.assert_multi_line_lint( 'int foo()\n' '{\n' ' {\n' ' }\n' '}\n', '') def test_mismatching_spaces_in_parens(self): self.assert_lint('if (foo ) {', 'Extra space before ) in if' ' [whitespace/parens] [5]') self.assert_lint('switch ( foo) {', 'Extra space after ( in switch' ' [whitespace/parens] [5]') self.assert_lint('for (foo; ba; bar ) {', 'Extra space before ) in for' ' [whitespace/parens] [5]') self.assert_lint('for ((foo); (ba); (bar) ) {', 'Extra space before ) in for' ' [whitespace/parens] [5]') self.assert_lint('for (; foo; bar) {', '') self.assert_lint('for (; (foo); (bar)) {', '') self.assert_lint('for ( ; foo; bar) {', '') self.assert_lint('for ( ; (foo); (bar)) {', '') self.assert_lint('for ( ; foo; bar ) {', 'Extra space before ) in for' ' [whitespace/parens] [5]') self.assert_lint('for ( ; (foo); (bar) ) {', 'Extra space before ) in for' ' [whitespace/parens] [5]') self.assert_lint('for (foo; bar; ) {', '') self.assert_lint('for ((foo); (bar); ) {', '') self.assert_lint('foreach (foo, foos ) {', 'Extra space before ) in foreach' ' [whitespace/parens] [5]') self.assert_lint('foreach ( foo, foos) {', 'Extra space after ( in foreach' ' [whitespace/parens] [5]') self.assert_lint('while ( foo) {', 'Extra space after ( in while' ' [whitespace/parens] [5]') def test_spacing_for_fncall(self): self.assert_lint('if (foo) {', '') self.assert_lint('for (foo;bar;baz) {', '') self.assert_lint('foreach (foo, foos) {', '') self.assert_lint('while (foo) {', '') self.assert_lint('switch (foo) {', '') self.assert_lint('new (RenderArena()) RenderInline(document())', '') self.assert_lint('foo( bar)', 'Extra space after ( in function call' ' [whitespace/parens] [4]') self.assert_lint('foobar( \\', '') self.assert_lint('foobar( \\', '') self.assert_lint('( a + b)', 'Extra space after (' ' [whitespace/parens] [2]') self.assert_lint('((a+b))', '') self.assert_lint('foo (foo)', 'Extra space before ( in function call' ' [whitespace/parens] [4]') self.assert_lint('#elif (foo(bar))', '') self.assert_lint('#elif (foo(bar) && foo(baz))', '') self.assert_lint('typedef foo (*foo)(foo)', '') self.assert_lint('typedef foo (*foo12bar_)(foo)', '') self.assert_lint('typedef foo (Foo::*bar)(foo)', '') self.assert_lint('foo (Foo::*bar)(', 'Extra space before ( in function call' ' [whitespace/parens] [4]') self.assert_lint('typedef foo (Foo::*bar)(', '') self.assert_lint('(foo)(bar)', '') self.assert_lint('Foo (*foo)(bar)', '') self.assert_lint('Foo (*foo)(Bar bar,', '') self.assert_lint('char (*p)[sizeof(foo)] = &foo', '') self.assert_lint('char (&ref)[sizeof(foo)] = &foo', '') self.assert_lint('const char32 (*table[])[6];', '') def test_spacing_before_braces(self): self.assert_lint('if (foo){', 'Missing space before {' ' [whitespace/braces] [5]') self.assert_lint('for{', 'Missing space before {' ' [whitespace/braces] [5]') self.assert_lint('for {', '') self.assert_lint('EXPECT_DEBUG_DEATH({', '') def test_spacing_between_braces(self): self.assert_lint(' { }', '') self.assert_lint(' {}', '') self.assert_lint(' { }', 'Too many spaces inside { }. [whitespace/braces] [5]') def test_spacing_around_else(self): self.assert_lint('}else {', 'Missing space before else' ' [whitespace/braces] [5]') self.assert_lint('} else{', 'Missing space before {' ' [whitespace/braces] [5]') self.assert_lint('} else {', '') self.assert_lint('} else if', '') def test_spacing_for_binary_ops(self): self.assert_lint('if (foo<=bar) {', 'Missing spaces around <=' ' [whitespace/operators] [3]') self.assert_lint('if (foo<bar) {', 'Missing spaces around <' ' [whitespace/operators] [3]') self.assert_lint('if (foo<bar->baz) {', 'Missing spaces around <' ' [whitespace/operators] [3]') self.assert_lint('if (foo<bar->bar) {', 'Missing spaces around <' ' [whitespace/operators] [3]') self.assert_lint('typedef hash_map<Foo, Bar', 'Missing spaces around <' ' [whitespace/operators] [3]') self.assert_lint('typedef hash_map<FoooooType, BaaaaarType,', '') self.assert_lint('a<Foo> t+=b;', 'Missing spaces around +=' ' [whitespace/operators] [3]') self.assert_lint('a<Foo> t-=b;', 'Missing spaces around -=' ' [whitespace/operators] [3]') self.assert_lint('a<Foo*> t*=b;', 'Missing spaces around *=' ' [whitespace/operators] [3]') self.assert_lint('a<Foo*> t/=b;', 'Missing spaces around /=' ' [whitespace/operators] [3]') self.assert_lint('a<Foo*> t|=b;', 'Missing spaces around |=' ' [whitespace/operators] [3]') self.assert_lint('a<Foo*> t&=b;', 'Missing spaces around &=' ' [whitespace/operators] [3]') self.assert_lint('a<Foo*> t<<=b;', 'Missing spaces around <<=' ' [whitespace/operators] [3]') self.assert_lint('a<Foo*> t>>=b;', 'Missing spaces around >>=' ' [whitespace/operators] [3]') self.assert_lint('a<Foo*> t>>=&b|c;', 'Missing spaces around >>=' ' [whitespace/operators] [3]') self.assert_lint('a<Foo*> t<<=*b/c;', 'Missing spaces around <<=' ' [whitespace/operators] [3]') self.assert_lint('a<Foo> t -= b;', '') self.assert_lint('a<Foo> t += b;', '') self.assert_lint('a<Foo*> t *= b;', '') self.assert_lint('a<Foo*> t /= b;', '') self.assert_lint('a<Foo*> t |= b;', '') self.assert_lint('a<Foo*> t &= b;', '') self.assert_lint('a<Foo*> t <<= b;', '') self.assert_lint('a<Foo*> t >>= b;', '') self.assert_lint('a<Foo*> t >>= &b|c;', 'Missing spaces around |' ' [whitespace/operators] [3]') self.assert_lint('a<Foo*> t <<= *b/c;', 'Missing spaces around /' ' [whitespace/operators] [3]') self.assert_lint('a<Foo*> t <<= b/c; //Test', [ 'Should have a space between // and comment ' '[whitespace/comments] [4]', 'Missing' ' spaces around / [whitespace/operators] [3]']) self.assert_lint('a<Foo*> t <<= b||c; //Test', ['One space before end' ' of line comments [whitespace/comments] [5]', 'Should have a space between // and comment ' '[whitespace/comments] [4]', 'Missing spaces around || [whitespace/operators] [3]']) self.assert_lint('a<Foo*> t <<= b&&c; // Test', 'Missing spaces around' ' && [whitespace/operators] [3]') self.assert_lint('a<Foo*> t <<= b&&&c; // Test', 'Missing spaces around' ' && [whitespace/operators] [3]') self.assert_lint('a<Foo*> t <<= b&&*c; // Test', 'Missing spaces around' ' && [whitespace/operators] [3]') self.assert_lint('a<Foo*> t <<= b && *c; // Test', '') self.assert_lint('a<Foo*> t <<= b && &c; // Test', '') self.assert_lint('a<Foo*> t <<= b || &c; /*Test', 'Complex multi-line ' '/*...*/-style comment found. Lint may give bogus ' 'warnings. Consider replacing these with //-style' ' comments, with #if 0...#endif, or with more clearly' ' structured multi-line comments. [readability/multiline_comment] [5]') self.assert_lint('a<Foo&> t <<= &b | &c;', '') self.assert_lint('a<Foo*> t <<= &b & &c; // Test', '') self.assert_lint('a<Foo*> t <<= *b / &c; // Test', '') self.assert_lint('if (a=b == 1)', 'Missing spaces around = [whitespace/operators] [4]') self.assert_lint('a = 1<<20', 'Missing spaces around << [whitespace/operators] [3]') self.assert_lint('a = 1>> 20', 'Missing spaces around >> [whitespace/operators] [3]') self.assert_lint('a = 1 >>20', 'Missing spaces around >> [whitespace/operators] [3]') self.assert_lint('a = 1>>20', 'Missing spaces around >> [whitespace/operators] [3]') self.assert_lint('func(OwnPtr<Vector<Foo>>)', '') self.assert_lint('func(OwnPtr<Vector<Foo>> foo)', '') self.assert_lint('func(OwnPtr<HashMap<Foo, Member<Bar>>>)', '') self.assert_lint('func(OwnPtr<Vector<Foo> >)', 'Use >> for ending template instead of > >. [readability/templatebrackets] [3]') self.assert_lint('func(OwnPtr<HashMap<Foo, Member<Bar>> >)', 'Use >> for ending template instead of > >. [readability/templatebrackets] [3]') self.assert_lint('func(OwnPtr<HashMap<Foo, Member<Bar> >>)', 'Use >> for ending template instead of > >. [readability/templatebrackets] [3]') self.assert_lint('func(OwnPtr<HashMap<Foo, Member<Bar> > >)', 'Use >> for ending template instead of > >. [readability/templatebrackets] [3]') self.assert_lint('Vector< ::Foo>)', 'Use <:: for template start instead of < ::. [readability/templatebrackets] [3]') self.assert_lint('Vector<Vector< ::Foo>>)', 'Use <:: for template start instead of < ::. [readability/templatebrackets] [3]') # FIXME: The following test should not show any error. self.assert_lint('func(OwnPtr<HashMap<Foo, Member<Bar\n >>>)', 'Missing spaces around < [whitespace/operators] [3]') self.assert_lint('if (a = b == 1)', '') self.assert_lint('a = 1 << 20', '') self.assert_multi_line_lint('#include <sys/io.h>\n', '') self.assert_multi_line_lint('#import <foo/bar.h>\n', '') def test_operator_methods(self): self.assert_lint('String operator+(const String&, const String&);', '') self.assert_lint('String operator/(const String&, const String&);', '') self.assert_lint('bool operator==(const String&, const String&);', '') self.assert_lint('String& operator-=(const String&, const String&);', '') self.assert_lint('String& operator+=(const String&, const String&);', '') self.assert_lint('String& operator*=(const String&, const String&);', '') self.assert_lint('String& operator%=(const String&, const String&);', '') self.assert_lint('String& operator&=(const String&, const String&);', '') self.assert_lint('String& operator<<=(const String&, const String&);', '') self.assert_lint('String& operator>>=(const String&, const String&);', '') self.assert_lint('String& operator|=(const String&, const String&);', '') self.assert_lint('String& operator^=(const String&, const String&);', '') def test_spacing_before_last_semicolon(self): self.assert_lint('call_function() ;', 'Extra space before last semicolon. If this should be an ' 'empty statement, use { } instead.' ' [whitespace/semicolon] [5]') self.assert_lint('while (true) ;', 'Extra space before last semicolon. If this should be an ' 'empty statement, use { } instead.' ' [whitespace/semicolon] [5]') self.assert_lint('default:;', 'Semicolon defining empty statement. Use { } instead.' ' [whitespace/semicolon] [5]') self.assert_lint(' ;', 'Line contains only semicolon. If this should be an empty ' 'statement, use { } instead.' ' [whitespace/semicolon] [5]') self.assert_lint('for (int i = 0; ;', '') # Static or global STL strings. def test_static_or_global_stlstrings(self): self.assert_lint('string foo;', 'For a static/global string constant, use a C style ' 'string instead: "char foo[]".' ' [runtime/string] [4]') self.assert_lint('string kFoo = "hello"; // English', 'For a static/global string constant, use a C style ' 'string instead: "char kFoo[]".' ' [runtime/string] [4]') self.assert_lint('static string foo;', 'For a static/global string constant, use a C style ' 'string instead: "static char foo[]".' ' [runtime/string] [4]') self.assert_lint('static const string foo;', 'For a static/global string constant, use a C style ' 'string instead: "static const char foo[]".' ' [runtime/string] [4]') self.assert_lint('string Foo::bar;', 'For a static/global string constant, use a C style ' 'string instead: "char Foo::bar[]".' ' [runtime/string] [4]') # Rare case. self.assert_lint('string foo("foobar");', 'For a static/global string constant, use a C style ' 'string instead: "char foo[]".' ' [runtime/string] [4]') # Should not catch local or member variables. self.assert_lint(' string foo', '') # Should not catch functions. self.assert_lint('string EmptyString() { return ""; }', '') self.assert_lint('string EmptyString () { return ""; }', '') self.assert_lint('string VeryLongNameFunctionSometimesEndsWith(\n' ' VeryLongNameType veryLongNameVariable) { }', '') self.assert_lint('template<>\n' 'string FunctionTemplateSpecialization<SomeType>(\n' ' int x) { return ""; }', '') self.assert_lint('template<>\n' 'string FunctionTemplateSpecialization<vector<A::B>* >(\n' ' int x) { return ""; }', '') # should not catch methods of template classes. self.assert_lint('string Class<Type>::Method() const\n' '{\n' ' return "";\n' '}\n', '') self.assert_lint('string Class<Type>::Method(\n' ' int arg) const\n' '{\n' ' return "";\n' '}\n', '') def test_no_spaces_in_function_calls(self): self.assert_lint('TellStory(1, 3);', '') self.assert_lint('TellStory(1, 3 );', 'Extra space before )' ' [whitespace/parens] [2]') self.assert_lint('TellStory(1 /* wolf */, 3 /* pigs */);', '') self.assert_multi_line_lint('#endif\n );', '') def test_one_spaces_between_code_and_comments(self): self.assert_lint('} // namespace foo', '') self.assert_lint('}// namespace foo', 'One space before end of line comments' ' [whitespace/comments] [5]') self.assert_lint('printf("foo"); // Outside quotes.', '') self.assert_lint('int i = 0; // Having one space is fine.','') self.assert_lint('int i = 0; // Having two spaces is bad.', 'One space before end of line comments' ' [whitespace/comments] [5]') self.assert_lint('int i = 0; // Having three spaces is bad.', 'One space before end of line comments' ' [whitespace/comments] [5]') self.assert_lint('// Top level comment', '') self.assert_lint(' // Line starts with four spaces.', '') self.assert_lint('foo();\n' '{ // A scope is opening.', '') self.assert_lint(' foo();\n' ' { // An indented scope is opening.', '') self.assert_lint('if (foo) { // not a pure scope', '') self.assert_lint('printf("// In quotes.")', '') self.assert_lint('printf("\\"%s // In quotes.")', '') self.assert_lint('printf("%s", "// In quotes.")', '') def test_line_ending_in_whitespace(self): self.assert_lint('int a; // This is a sentence.', '') self.assert_lint('int a; // This is a sentence. ', 'Line ends in whitespace. Consider deleting these extra spaces. [whitespace/end_of_line] [4]') def test_space_after_comment_marker(self): self.assert_lint('//', '') self.assert_lint('//x', 'Should have a space between // and comment' ' [whitespace/comments] [4]') self.assert_lint('// x', '') self.assert_lint('//----', '') self.assert_lint('//====', '') self.assert_lint('//////', '') self.assert_lint('////// x', '') self.assert_lint('/// x', '') self.assert_lint('////x', 'Should have a space between // and comment' ' [whitespace/comments] [4]') def test_newline_at_eof(self): def do_test(self, data, is_missing_eof): error_collector = ErrorCollector(self.assertTrue) self.process_file_data('foo.cpp', 'cpp', data.split('\n'), error_collector) # The warning appears only once. self.assertEqual( int(is_missing_eof), error_collector.results().count( 'Could not find a newline character at the end of the file.' ' [whitespace/ending_newline] [5]')) do_test(self, '// Newline\n// at EOF\n', False) do_test(self, '// No newline\n// at EOF', True) def test_invalid_utf8(self): def do_test(self, raw_bytes, has_invalid_utf8): error_collector = ErrorCollector(self.assertTrue) self.process_file_data('foo.cpp', 'cpp', unicode(raw_bytes, 'utf8', 'replace').split('\n'), error_collector) # The warning appears only once. self.assertEqual( int(has_invalid_utf8), error_collector.results().count( 'Line contains invalid UTF-8' ' (or Unicode replacement character).' ' [readability/utf8] [5]')) do_test(self, 'Hello world\n', False) do_test(self, '\xe9\x8e\xbd\n', False) do_test(self, '\xe9x\x8e\xbd\n', True) # This is the encoding of the replacement character itself (which # you can see by evaluating codecs.getencoder('utf8')(u'\ufffd')). do_test(self, '\xef\xbf\xbd\n', True) def test_is_blank_line(self): self.assertTrue(cpp_style.is_blank_line('')) self.assertTrue(cpp_style.is_blank_line(' ')) self.assertTrue(cpp_style.is_blank_line(' \t\r\n')) self.assertTrue(not cpp_style.is_blank_line('int a;')) self.assertTrue(not cpp_style.is_blank_line('{')) def test_blank_lines_check(self): self.assert_blank_lines_check(['{\n', '\n', '\n', '}\n'], 1, 1) self.assert_blank_lines_check([' if (foo) {\n', '\n', ' }\n'], 1, 1) self.assert_blank_lines_check( ['\n', '// {\n', '\n', '\n', '// Comment\n', '{\n', '}\n'], 0, 0) self.assert_blank_lines_check(['\n', 'run("{");\n', '\n'], 0, 0) self.assert_blank_lines_check(['\n', ' if (foo) { return 0; }\n', '\n'], 0, 0) def test_allow_blank_line_before_closing_namespace(self): error_collector = ErrorCollector(self.assertTrue) self.process_file_data('foo.cpp', 'cpp', ['namespace {', '', '} // namespace'], error_collector) self.assertEqual(0, error_collector.results().count( 'Blank line at the end of a code block. Is this needed?' ' [whitespace/blank_line] [3]')) def test_allow_blank_line_before_if_else_chain(self): error_collector = ErrorCollector(self.assertTrue) self.process_file_data('foo.cpp', 'cpp', ['if (hoge) {', '', # No warning '} else if (piyo) {', '', # No warning '} else if (piyopiyo) {', ' hoge = true;', # No warning '} else {', '', # Warning on this line '}'], error_collector) self.assertEqual(1, error_collector.results().count( 'Blank line at the end of a code block. Is this needed?' ' [whitespace/blank_line] [3]')) def test_else_on_same_line_as_closing_braces(self): error_collector = ErrorCollector(self.assertTrue) self.process_file_data('foo.cpp', 'cpp', ['if (hoge) {', '', '}', ' else {' # Warning on this line '', '}'], error_collector) self.assertEqual(1, error_collector.results().count( 'An else should appear on the same line as the preceding }' ' [whitespace/newline] [4]')) def test_else_clause_not_on_same_line_as_else(self): self.assert_lint(' else DoSomethingElse();', 'Else clause should never be on same line as else ' '(use 2 lines) [whitespace/newline] [4]') self.assert_lint(' else ifDoSomethingElse();', 'Else clause should never be on same line as else ' '(use 2 lines) [whitespace/newline] [4]') self.assert_lint(' else if (blah) {', '') self.assert_lint(' variable_ends_in_else = true;', '') def test_comma(self): self.assert_lint('a = f(1,2);', 'Missing space after , [whitespace/comma] [3]') self.assert_lint('int tmp=a,a=b,b=tmp;', ['Missing spaces around = [whitespace/operators] [4]', 'Missing space after , [whitespace/comma] [3]']) self.assert_lint('f(a, /* name */ b);', '') self.assert_lint('f(a, /* name */b);', '') def test_declaration(self): self.assert_lint('int a;', '') self.assert_lint('int a;', 'Extra space between int and a [whitespace/declaration] [3]') self.assert_lint('int* a;', 'Extra space between int* and a [whitespace/declaration] [3]') self.assert_lint('else if { }', '') self.assert_lint('else if { }', 'Extra space between else and if [whitespace/declaration] [3]') def test_pointer_reference_marker_location(self): self.assert_lint('int* b;', '', 'foo.cpp') self.assert_lint('int *b;', 'Declaration has space between type name and * in int *b [whitespace/declaration] [3]', 'foo.cpp') self.assert_lint('return *b;', '', 'foo.cpp') self.assert_lint('delete *b;', '', 'foo.cpp') self.assert_lint('int *b;', '', 'foo.c') self.assert_lint('int* b;', 'Declaration has space between * and variable name in int* b [whitespace/declaration] [3]', 'foo.c') self.assert_lint('int& b;', '', 'foo.cpp') self.assert_lint('int &b;', 'Declaration has space between type name and & in int &b [whitespace/declaration] [3]', 'foo.cpp') self.assert_lint('return &b;', '', 'foo.cpp') def test_indent(self): self.assert_lint('static int noindent;', '') self.assert_lint(' int fourSpaceIndent;', '') self.assert_lint(' int oneSpaceIndent;', 'Weird number of spaces at line-start. ' 'Are you using a 4-space indent? [whitespace/indent] [3]') self.assert_lint(' int threeSpaceIndent;', 'Weird number of spaces at line-start. ' 'Are you using a 4-space indent? [whitespace/indent] [3]') self.assert_lint(' char* oneSpaceIndent = "public:";', 'Weird number of spaces at line-start. ' 'Are you using a 4-space indent? [whitespace/indent] [3]') self.assert_lint(' public:', 'Weird number of spaces at line-start. ' 'Are you using a 4-space indent? [whitespace/indent] [3]') self.assert_lint(' public:', 'Weird number of spaces at line-start. ' 'Are you using a 4-space indent? [whitespace/indent] [3]') self.assert_lint(' public:', 'Weird number of spaces at line-start. ' 'Are you using a 4-space indent? [whitespace/indent] [3]') self.assert_multi_line_lint( 'class Foo {\n' 'public:\n' ' enum Bar {\n' ' Alpha,\n' ' Beta,\n' '#if ENABLED_BETZ\n' ' Charlie,\n' '#endif\n' ' };\n' '};', '') self.assert_multi_line_lint( 'if (true) {\n' ' myFunction(reallyLongParam1, reallyLongParam2,\n' ' reallyLongParam3);\n' '}\n', 'Weird number of spaces at line-start. Are you using a 4-space indent? [whitespace/indent] [3]') self.assert_multi_line_lint( 'if (true) {\n' ' myFunction(reallyLongParam1, reallyLongParam2,\n' ' reallyLongParam3);\n' '}\n', 'When wrapping a line, only indent 4 spaces. [whitespace/indent] [3]') def test_not_alabel(self): self.assert_lint('MyVeryLongNamespace::MyVeryLongClassName::', '') def test_tab(self): self.assert_lint('\tint a;', 'Tab found; better to use spaces [whitespace/tab] [1]') self.assert_lint('int a = 5;\t// set a to 5', 'Tab found; better to use spaces [whitespace/tab] [1]') def test_unnamed_namespaces_in_headers(self): self.assert_language_rules_check( 'foo.h', 'namespace {', 'Do not use unnamed namespaces in header files. See' ' http://google-styleguide.googlecode.com/svn/trunk/cppguide.xml#Namespaces' ' for more information. [build/namespaces] [4]') # namespace registration macros are OK. self.assert_language_rules_check('foo.h', 'namespace { \\', '') # named namespaces are OK. self.assert_language_rules_check('foo.h', 'namespace foo {', '') self.assert_language_rules_check('foo.h', 'namespace foonamespace {', '') self.assert_language_rules_check('foo.cpp', 'namespace {', '') self.assert_language_rules_check('foo.cpp', 'namespace foo {', '') def test_build_class(self): # Test that the linter can parse to the end of class definitions, # and that it will report when it can't. # Use multi-line linter because it performs the ClassState check. self.assert_multi_line_lint( 'class Foo {', 'Failed to find complete declaration of class Foo' ' [build/class] [5]') # Don't warn on forward declarations of various types. self.assert_multi_line_lint( 'class Foo;', '') self.assert_multi_line_lint( '''\ struct Foo* foo = NewFoo();''', '') # Here is an example where the linter gets confused, even though # the code doesn't violate the style guide. self.assert_multi_line_lint( 'class Foo\n' '#ifdef DERIVE_FROM_GOO\n' ' : public Goo {\n' '#else\n' ' : public Hoo {\n' '#endif\n' '};', 'Failed to find complete declaration of class Foo' ' [build/class] [5]') def test_build_end_comment(self): # The crosstool compiler we currently use will fail to compile the # code in this test, so we might consider removing the lint check. self.assert_lint('#endif Not a comment', 'Uncommented text after #endif is non-standard.' ' Use a comment.' ' [build/endif_comment] [5]') def test_build_forward_decl(self): # The crosstool compiler we currently use will fail to compile the # code in this test, so we might consider removing the lint check. self.assert_lint('class Foo::Goo;', 'Inner-style forward declarations are invalid.' ' Remove this line.' ' [build/forward_decl] [5]') def test_build_header_guard(self): file_path = 'mydir/Foo.h' # We can't rely on our internal stuff to get a sane path on the open source # side of things, so just parse out the suggested header guard. This # doesn't allow us to test the suggested header guard, but it does let us # test all the other header tests. error_collector = ErrorCollector(self.assertTrue) self.process_file_data(file_path, 'h', [], error_collector) expected_guard = '' matcher = re.compile( 'No \#ifndef header guard found\, suggested CPP variable is\: ([A-Za-z_0-9]+) ') for error in error_collector.result_list(): matches = matcher.match(error) if matches: expected_guard = matches.group(1) break # Make sure we extracted something for our header guard. self.assertNotEqual(expected_guard, '') # Wrong guard error_collector = ErrorCollector(self.assertTrue) self.process_file_data(file_path, 'h', ['#ifndef FOO_H', '#define FOO_H'], error_collector) self.assertEqual( 1, error_collector.result_list().count( '#ifndef header guard has wrong style, please use: %s' ' [build/header_guard] [5]' % expected_guard), error_collector.result_list()) # No define error_collector = ErrorCollector(self.assertTrue) self.process_file_data(file_path, 'h', ['#ifndef %s' % expected_guard], error_collector) self.assertEqual( 1, error_collector.result_list().count( 'No #ifndef header guard found, suggested CPP variable is: %s' ' [build/header_guard] [5]' % expected_guard), error_collector.result_list()) # Mismatched define error_collector = ErrorCollector(self.assertTrue) self.process_file_data(file_path, 'h', ['#ifndef %s' % expected_guard, '#define FOO_H'], error_collector) self.assertEqual( 1, error_collector.result_list().count( 'No #ifndef header guard found, suggested CPP variable is: %s' ' [build/header_guard] [5]' % expected_guard), error_collector.result_list()) # No header guard errors error_collector = ErrorCollector(self.assertTrue) self.process_file_data(file_path, 'h', ['#ifndef %s' % expected_guard, '#define %s' % expected_guard, '#endif // %s' % expected_guard], error_collector) for line in error_collector.result_list(): if line.find('build/header_guard') != -1: self.fail('Unexpected error: %s' % line) # Completely incorrect header guard error_collector = ErrorCollector(self.assertTrue) self.process_file_data(file_path, 'h', ['#ifndef FOO', '#define FOO', '#endif // FOO'], error_collector) self.assertEqual( 1, error_collector.result_list().count( '#ifndef header guard has wrong style, please use: %s' ' [build/header_guard] [5]' % expected_guard), error_collector.result_list()) # Special case for flymake error_collector = ErrorCollector(self.assertTrue) self.process_file_data('mydir/Foo_flymake.h', 'h', ['#ifndef %s' % expected_guard, '#define %s' % expected_guard, '#endif // %s' % expected_guard], error_collector) for line in error_collector.result_list(): if line.find('build/header_guard') != -1: self.fail('Unexpected error: %s' % line) error_collector = ErrorCollector(self.assertTrue) self.process_file_data('mydir/Foo_flymake.h', 'h', [], error_collector) self.assertEqual( 1, error_collector.result_list().count( 'No #ifndef header guard found, suggested CPP variable is: %s' ' [build/header_guard] [5]' % expected_guard), error_collector.result_list()) # Verify that we don't blindly suggest the WTF prefix for all headers. self.assertFalse(expected_guard.startswith('WTF_')) # Allow the WTF_ prefix for files in that directory. header_guard_filter = FilterConfiguration(('-', '+build/header_guard')) error_collector = ErrorCollector(self.assertTrue, header_guard_filter) self.process_file_data('Source/JavaScriptCore/wtf/TestName.h', 'h', ['#ifndef WTF_TestName_h', '#define WTF_TestName_h'], error_collector) self.assertEqual(0, len(error_collector.result_list()), error_collector.result_list()) # Also allow the non WTF_ prefix for files in that directory. error_collector = ErrorCollector(self.assertTrue, header_guard_filter) self.process_file_data('Source/JavaScriptCore/wtf/TestName.h', 'h', ['#ifndef TestName_h', '#define TestName_h'], error_collector) self.assertEqual(0, len(error_collector.result_list()), error_collector.result_list()) # Verify that we suggest the WTF prefix version. error_collector = ErrorCollector(self.assertTrue, header_guard_filter) self.process_file_data('Source/JavaScriptCore/wtf/TestName.h', 'h', ['#ifndef BAD_TestName_h', '#define BAD_TestName_h'], error_collector) self.assertEqual( 1, error_collector.result_list().count( '#ifndef header guard has wrong style, please use: WTF_TestName_h' ' [build/header_guard] [5]'), error_collector.result_list()) # Verify that the Chromium-style header guard is allowed as well. error_collector = ErrorCollector(self.assertTrue, header_guard_filter) self.process_file_data('Source/foo/testname.h', 'h', ['#ifndef BLINK_FOO_TESTNAME_H_', '#define BLINK_FOO_TESTNAME_H_'], error_collector) self.assertEqual(0, len(error_collector.result_list()), error_collector.result_list()) def test_build_printf_format(self): self.assert_lint( r'printf("\%%d", value);', '%, [, (, and { are undefined character escapes. Unescape them.' ' [build/printf_format] [3]') self.assert_lint( r'snprintf(buffer, sizeof(buffer), "\[%d", value);', '%, [, (, and { are undefined character escapes. Unescape them.' ' [build/printf_format] [3]') self.assert_lint( r'fprintf(file, "\(%d", value);', '%, [, (, and { are undefined character escapes. Unescape them.' ' [build/printf_format] [3]') self.assert_lint( r'vsnprintf(buffer, sizeof(buffer), "\\\{%d", ap);', '%, [, (, and { are undefined character escapes. Unescape them.' ' [build/printf_format] [3]') # Don't warn if double-slash precedes the symbol self.assert_lint(r'printf("\\%%%d", value);', '') def test_runtime_printf_format(self): self.assert_lint( r'fprintf(file, "%q", value);', '%q in format strings is deprecated. Use %ll instead.' ' [runtime/printf_format] [3]') self.assert_lint( r'aprintf(file, "The number is %12q", value);', '%q in format strings is deprecated. Use %ll instead.' ' [runtime/printf_format] [3]') self.assert_lint( r'printf(file, "The number is" "%-12q", value);', '%q in format strings is deprecated. Use %ll instead.' ' [runtime/printf_format] [3]') self.assert_lint( r'printf(file, "The number is" "%+12q", value);', '%q in format strings is deprecated. Use %ll instead.' ' [runtime/printf_format] [3]') self.assert_lint( r'printf(file, "The number is" "% 12q", value);', '%q in format strings is deprecated. Use %ll instead.' ' [runtime/printf_format] [3]') self.assert_lint( r'snprintf(file, "Never mix %d and %1$d parmaeters!", value);', '%N$ formats are unconventional. Try rewriting to avoid them.' ' [runtime/printf_format] [2]') def assert_lintLogCodeOnError(self, code, expected_message): # Special assert_lint which logs the input code on error. result = self.perform_single_line_lint(code, 'foo.cpp') if result != expected_message: self.fail('For code: "%s"\nGot: "%s"\nExpected: "%s"' % (code, result, expected_message)) def test_build_storage_class(self): qualifiers = [None, 'const', 'volatile'] signs = [None, 'signed', 'unsigned'] types = ['void', 'char', 'int', 'float', 'double', 'schar', 'int8', 'uint8', 'int16', 'uint16', 'int32', 'uint32', 'int64', 'uint64'] storage_classes = ['auto', 'extern', 'register', 'static', 'typedef'] build_storage_class_error_message = ( 'Storage class (static, extern, typedef, etc) should be first.' ' [build/storage_class] [5]') # Some explicit cases. Legal in C++, deprecated in C99. self.assert_lint('const int static foo = 5;', build_storage_class_error_message) self.assert_lint('char static foo;', build_storage_class_error_message) self.assert_lint('double const static foo = 2.0;', build_storage_class_error_message) self.assert_lint('uint64 typedef unsignedLongLong;', build_storage_class_error_message) self.assert_lint('int register foo = 0;', build_storage_class_error_message) # Since there are a very large number of possibilities, randomly # construct declarations. # Make sure that the declaration is logged if there's an error. # Seed generator with an integer for absolute reproducibility. random.seed(25) for unused_i in range(10): # Build up random list of non-storage-class declaration specs. other_decl_specs = [random.choice(qualifiers), random.choice(signs), random.choice(types)] # remove None other_decl_specs = filter(lambda x: x is not None, other_decl_specs) # shuffle random.shuffle(other_decl_specs) # insert storage class after the first storage_class = random.choice(storage_classes) insertion_point = random.randint(1, len(other_decl_specs)) decl_specs = (other_decl_specs[0:insertion_point] + [storage_class] + other_decl_specs[insertion_point:]) self.assert_lintLogCodeOnError( ' '.join(decl_specs) + ';', build_storage_class_error_message) # but no error if storage class is first self.assert_lintLogCodeOnError( storage_class + ' ' + ' '.join(other_decl_specs), '') def test_legal_copyright(self): legal_copyright_message = ( 'No copyright message found. ' 'You should have a line: "Copyright [year] <Copyright Owner>"' ' [legal/copyright] [5]') copyright_line = '// Copyright 2008 Google Inc. All Rights Reserved.' file_path = 'mydir/googleclient/foo.cpp' # There should be a copyright message in the first 10 lines error_collector = ErrorCollector(self.assertTrue) self.process_file_data(file_path, 'cpp', [], error_collector) self.assertEqual( 1, error_collector.result_list().count(legal_copyright_message)) error_collector = ErrorCollector(self.assertTrue) self.process_file_data( file_path, 'cpp', ['' for unused_i in range(10)] + [copyright_line], error_collector) self.assertEqual( 1, error_collector.result_list().count(legal_copyright_message)) # Test that warning isn't issued if Copyright line appears early enough. error_collector = ErrorCollector(self.assertTrue) self.process_file_data(file_path, 'cpp', [copyright_line], error_collector) for message in error_collector.result_list(): if message.find('legal/copyright') != -1: self.fail('Unexpected error: %s' % message) error_collector = ErrorCollector(self.assertTrue) self.process_file_data( file_path, 'cpp', ['' for unused_i in range(9)] + [copyright_line], error_collector) for message in error_collector.result_list(): if message.find('legal/copyright') != -1: self.fail('Unexpected error: %s' % message) def test_invalid_increment(self): self.assert_lint('*count++;', 'Changing pointer instead of value (or unused value of ' 'operator*). [runtime/invalid_increment] [5]') # Integral bitfields must be declared with either signed or unsigned keyword. def test_plain_integral_bitfields(self): errmsg = ('Please declare integral type bitfields with either signed or unsigned. [runtime/bitfields] [5]') self.assert_lint('int a : 30;', errmsg) self.assert_lint('mutable short a : 14;', errmsg) self.assert_lint('const char a : 6;', errmsg) self.assert_lint('long int a : 30;', errmsg) self.assert_lint('int a = 1 ? 0 : 30;', '') # A mixture of unsigned and bool bitfields in a class will generate a warning. def test_mixing_unsigned_bool_bitfields(self): def errmsg(bool_bitfields, unsigned_bitfields, name): bool_list = ', '.join(bool_bitfields) unsigned_list = ', '.join(unsigned_bitfields) return ('The class %s contains mixed unsigned and bool bitfields, ' 'which will pack into separate words on the MSVC compiler.\n' 'Bool bitfields are [%s].\nUnsigned bitfields are [%s].\n' 'Consider converting bool bitfields to unsigned. [runtime/bitfields] [5]' % (name, bool_list, unsigned_list)) def build_test_case(bitfields, name, will_warn, extra_warnings=[]): bool_bitfields = [] unsigned_bitfields = [] test_string = 'class %s {\n' % (name,) line = 2 for bitfield in bitfields: test_string += ' %s %s : %d;\n' % bitfield if bitfield[0] == 'bool': bool_bitfields.append('%d: %s' % (line, bitfield[1])) elif bitfield[0].startswith('unsigned'): unsigned_bitfields.append('%d: %s' % (line, bitfield[1])) line += 1 test_string += '}\n' error = '' if will_warn: error = errmsg(bool_bitfields, unsigned_bitfields, name) if extra_warnings and error: error = extra_warnings + [error] self.assert_multi_line_lint(test_string, error) build_test_case([('bool', 'm_boolMember', 4), ('unsigned', 'm_unsignedMember', 3)], 'MyClass', True) build_test_case([('bool', 'm_boolMember', 4), ('bool', 'm_anotherBool', 3)], 'MyClass', False) build_test_case([('unsigned', 'm_unsignedMember', 4), ('unsigned', 'm_anotherUnsigned', 3)], 'MyClass', False) build_test_case([('bool', 'm_boolMember', 4), ('bool', 'm_anotherbool', 3), ('bool', 'm_moreBool', 1), ('bool', 'm_lastBool', 1), ('unsigned int', 'm_tokenUnsigned', 4)], 'MyClass', True, ['Omit int when using unsigned [runtime/unsigned] [1]']) self.assert_multi_line_lint('class NoProblemsHere {\n' ' bool m_boolMember;\n' ' unsigned m_unsignedMember;\n' ' unsigned m_bitField1 : 1;\n' ' unsigned m_bitField4 : 4;\n' '}\n', '') # Bitfields which are not declared unsigned or bool will generate a warning. def test_unsigned_bool_bitfields(self): def errmsg(member, name, bit_type): return ('Member %s of class %s defined as a bitfield of type %s. ' 'Please declare all bitfields as unsigned. [runtime/bitfields] [4]' % (member, name, bit_type)) def warning_bitfield_test(member, name, bit_type, bits): self.assert_multi_line_lint('class %s {\n%s %s: %d;\n}\n' % (name, bit_type, member, bits), errmsg(member, name, bit_type)) def safe_bitfield_test(member, name, bit_type, bits): self.assert_multi_line_lint('class %s {\n%s %s: %d;\n}\n' % (name, bit_type, member, bits), '') warning_bitfield_test('a', 'A', 'int32_t', 25) warning_bitfield_test('m_someField', 'SomeClass', 'signed', 4) warning_bitfield_test('m_someField', 'SomeClass', 'SomeEnum', 2) safe_bitfield_test('a', 'A', 'unsigned', 22) safe_bitfield_test('m_someField', 'SomeClass', 'bool', 1) safe_bitfield_test('m_someField', 'SomeClass', 'unsigned', 2) # Declarations in 'Expected' or 'SameSizeAs' classes are OK. warning_bitfield_test('m_bitfields', 'SomeClass', 'int32_t', 32) safe_bitfield_test('m_bitfields', 'ExpectedSomeClass', 'int32_t', 32) safe_bitfield_test('m_bitfields', 'SameSizeAsSomeClass', 'int32_t', 32) class CleansedLinesTest(unittest.TestCase): def test_init(self): lines = ['Line 1', 'Line 2', 'Line 3 // Comment test', 'Line 4 "foo"'] clean_lines = cpp_style.CleansedLines(lines) self.assertEqual(lines, clean_lines.raw_lines) self.assertEqual(4, clean_lines.num_lines()) self.assertEqual(['Line 1', 'Line 2', 'Line 3 ', 'Line 4 "foo"'], clean_lines.lines) self.assertEqual(['Line 1', 'Line 2', 'Line 3 ', 'Line 4 ""'], clean_lines.elided) def test_init_empty(self): clean_lines = cpp_style.CleansedLines([]) self.assertEqual([], clean_lines.raw_lines) self.assertEqual(0, clean_lines.num_lines()) def test_collapse_strings(self): collapse = cpp_style.CleansedLines.collapse_strings self.assertEqual('""', collapse('""')) # "" (empty) self.assertEqual('"""', collapse('"""')) # """ (bad) self.assertEqual('""', collapse('"xyz"')) # "xyz" (string) self.assertEqual('""', collapse('"\\\""')) # "\"" (string) self.assertEqual('""', collapse('"\'"')) # "'" (string) self.assertEqual('"\"', collapse('"\"')) # "\" (bad) self.assertEqual('""', collapse('"\\\\"')) # "\\" (string) self.assertEqual('"', collapse('"\\\\\\"')) # "\\\" (bad) self.assertEqual('""', collapse('"\\\\\\\\"')) # "\\\\" (string) self.assertEqual('\'\'', collapse('\'\'')) # '' (empty) self.assertEqual('\'\'', collapse('\'a\'')) # 'a' (char) self.assertEqual('\'\'', collapse('\'\\\'\'')) # '\'' (char) self.assertEqual('\'', collapse('\'\\\'')) # '\' (bad) self.assertEqual('', collapse('\\012')) # '\012' (char) self.assertEqual('', collapse('\\xfF0')) # '\xfF0' (char) self.assertEqual('', collapse('\\n')) # '\n' (char) self.assertEqual('\#', collapse('\\#')) # '\#' (bad) self.assertEqual('StringReplace(body, "", "");', collapse('StringReplace(body, "\\\\", "\\\\\\\\");')) self.assertEqual('\'\' ""', collapse('\'"\' "foo"')) class OrderOfIncludesTest(CppStyleTestBase): def setUp(self): self.include_state = cpp_style._IncludeState() # Cheat os.path.abspath called in FileInfo class. self.os_path_abspath_orig = os.path.abspath os.path.abspath = lambda value: value def tearDown(self): os.path.abspath = self.os_path_abspath_orig def test_try_drop_common_suffixes(self): self.assertEqual('foo/foo', cpp_style._drop_common_suffixes('foo/foo-inl.h')) self.assertEqual('foo/bar/foo', cpp_style._drop_common_suffixes('foo/bar/foo_inl.h')) self.assertEqual('foo/foo', cpp_style._drop_common_suffixes('foo/foo.cpp')) self.assertEqual('foo/foo_unusualinternal', cpp_style._drop_common_suffixes('foo/foo_unusualinternal.h')) self.assertEqual('', cpp_style._drop_common_suffixes('_test.cpp')) self.assertEqual('test', cpp_style._drop_common_suffixes('test.cpp')) class OrderOfIncludesTest(CppStyleTestBase): def setUp(self): self.include_state = cpp_style._IncludeState() # Cheat os.path.abspath called in FileInfo class. self.os_path_abspath_orig = os.path.abspath self.os_path_isfile_orig = os.path.isfile os.path.abspath = lambda value: value def tearDown(self): os.path.abspath = self.os_path_abspath_orig os.path.isfile = self.os_path_isfile_orig def test_check_next_include_order__no_config(self): self.assertEqual('Header file should not contain WebCore config.h.', self.include_state.check_next_include_order(cpp_style._CONFIG_HEADER, True, True)) def test_check_next_include_order__no_self(self): self.assertEqual('Header file should not contain itself.', self.include_state.check_next_include_order(cpp_style._PRIMARY_HEADER, True, True)) # Test actual code to make sure that header types are correctly assigned. self.assert_language_rules_check('Foo.h', '#include "Foo.h"\n', 'Header file should not contain itself. Should be: alphabetically sorted.' ' [build/include_order] [4]') self.assert_language_rules_check('FooBar.h', '#include "Foo.h"\n', '') def test_check_next_include_order__likely_then_config(self): self.assertEqual('Found header this file implements before WebCore config.h.', self.include_state.check_next_include_order(cpp_style._PRIMARY_HEADER, False, True)) self.assertEqual('Found WebCore config.h after a header this file implements.', self.include_state.check_next_include_order(cpp_style._CONFIG_HEADER, False, True)) def test_check_next_include_order__other_then_config(self): self.assertEqual('Found other header before WebCore config.h.', self.include_state.check_next_include_order(cpp_style._OTHER_HEADER, False, True)) self.assertEqual('Found WebCore config.h after other header.', self.include_state.check_next_include_order(cpp_style._CONFIG_HEADER, False, True)) def test_check_next_include_order__config_then_other_then_likely(self): self.assertEqual('', self.include_state.check_next_include_order(cpp_style._CONFIG_HEADER, False, True)) self.assertEqual('Found other header before a header this file implements.', self.include_state.check_next_include_order(cpp_style._OTHER_HEADER, False, True)) self.assertEqual('Found header this file implements after other header.', self.include_state.check_next_include_order(cpp_style._PRIMARY_HEADER, False, True)) def test_check_alphabetical_include_order(self): self.assert_language_rules_check('foo.h', '#include "a.h"\n' '#include "c.h"\n' '#include "b.h"\n', 'Alphabetical sorting problem. [build/include_order] [4]') self.assert_language_rules_check('foo.h', '#include "a.h"\n' '#include "b.h"\n' '#include "c.h"\n', '') self.assert_language_rules_check('foo.h', '#include <assert.h>\n' '#include "bar.h"\n', 'Alphabetical sorting problem. [build/include_order] [4]') self.assert_language_rules_check('foo.h', '#include "bar.h"\n' '#include <assert.h>\n', '') def test_check_alphabetical_include_order_errors_reported_for_both_lines(self): # If one of the two lines of out of order headers are filtered, the error should be # reported on the other line. self.assert_language_rules_check('foo.h', '#include "a.h"\n' '#include "c.h"\n' '#include "b.h"\n', 'Alphabetical sorting problem. [build/include_order] [4]', lines_to_check=[2]) self.assert_language_rules_check('foo.h', '#include "a.h"\n' '#include "c.h"\n' '#include "b.h"\n', 'Alphabetical sorting problem. [build/include_order] [4]', lines_to_check=[3]) # If no lines are filtered, the error should be reported only once. self.assert_language_rules_check('foo.h', '#include "a.h"\n' '#include "c.h"\n' '#include "b.h"\n', 'Alphabetical sorting problem. [build/include_order] [4]') def test_check_line_break_after_own_header(self): self.assert_language_rules_check('foo.cpp', '#include "config.h"\n' '#include "foo.h"\n' '#include "bar.h"\n', 'You should add a blank line after implementation file\'s own header. [build/include_order] [4]') self.assert_language_rules_check('foo.cpp', '#include "config.h"\n' '#include "foo.h"\n' '\n' '#include "bar.h"\n', '') def test_check_preprocessor_in_include_section(self): self.assert_language_rules_check('foo.cpp', '#include "config.h"\n' '#include "foo.h"\n' '\n' '#ifdef BAZ\n' '#include "baz.h"\n' '#else\n' '#include "foobar.h"\n' '#endif"\n' '#include "bar.h"\n', # No flag because previous is in preprocessor section '') self.assert_language_rules_check('foo.cpp', '#include "config.h"\n' '#include "foo.h"\n' '\n' '#ifdef BAZ\n' '#include "baz.h"\n' '#endif"\n' '#include "bar.h"\n' '#include "a.h"\n', # Should still flag this. 'Alphabetical sorting problem. [build/include_order] [4]') self.assert_language_rules_check('foo.cpp', '#include "config.h"\n' '#include "foo.h"\n' '\n' '#ifdef BAZ\n' '#include "baz.h"\n' '#include "bar.h"\n' #Should still flag this '#endif"\n', 'Alphabetical sorting problem. [build/include_order] [4]') self.assert_language_rules_check('foo.cpp', '#include "config.h"\n' '#include "foo.h"\n' '\n' '#ifdef BAZ\n' '#include "baz.h"\n' '#endif"\n' '#ifdef FOOBAR\n' '#include "foobar.h"\n' '#endif"\n' '#include "bar.h"\n' '#include "a.h"\n', # Should still flag this. 'Alphabetical sorting problem. [build/include_order] [4]') # Check that after an already included error, the sorting rules still work. self.assert_language_rules_check('foo.cpp', '#include "config.h"\n' '#include "foo.h"\n' '\n' '#include "foo.h"\n' '#include "g.h"\n', '"foo.h" already included at foo.cpp:2 [build/include] [4]') def test_primary_header(self): # File with non-existing primary header should not produce errors. self.assert_language_rules_check('foo.cpp', '#include "config.h"\n' '\n' '#include "bar.h"\n', '') # Pretend that header files exist. os.path.isfile = lambda filename: True # Missing include for existing primary header -> error. self.assert_language_rules_check('foo.cpp', '#include "config.h"\n' '\n' '#include "bar.h"\n', 'Found other header before a header this file implements. ' 'Should be: config.h, primary header, blank line, and then ' 'alphabetically sorted. [build/include_order] [4]') # Having include for existing primary header -> no error. self.assert_language_rules_check('foo.cpp', '#include "config.h"\n' '#include "foo.h"\n' '\n' '#include "bar.h"\n', '') os.path.isfile = self.os_path_isfile_orig def test_public_primary_header(self): # System header is not considered a primary header. self.assert_language_rules_check('foo.cpp', '#include "config.h"\n' '#include <other/foo.h>\n' '\n' '#include "a.h"\n', 'Alphabetical sorting problem. [build/include_order] [4]') # ...except that it starts with public/. self.assert_language_rules_check('foo.cpp', '#include "config.h"\n' '#include <public/foo.h>\n' '\n' '#include "a.h"\n', '') # Even if it starts with public/ its base part must match with the source file name. self.assert_language_rules_check('foo.cpp', '#include "config.h"\n' '#include <public/foop.h>\n' '\n' '#include "a.h"\n', 'Alphabetical sorting problem. [build/include_order] [4]') def test_check_wtf_includes(self): self.assert_language_rules_check('foo.cpp', '#include "config.h"\n' '#include "foo.h"\n' '\n' '#include <wtf/Assertions.h>\n', 'wtf includes should be "wtf/file.h" instead of <wtf/file.h>.' ' [build/include] [4]') self.assert_language_rules_check('foo.cpp', '#include "config.h"\n' '#include "foo.h"\n' '\n' '#include "wtf/Assertions.h"\n', '') def test_check_cc_includes(self): self.assert_language_rules_check('bar/chromium/foo.cpp', '#include "config.h"\n' '#include "foo.h"\n' '\n' '#include "cc/CCProxy.h"\n', 'cc includes should be "CCFoo.h" instead of "cc/CCFoo.h".' ' [build/include] [4]') def test_classify_include(self): classify_include = cpp_style._classify_include include_state = cpp_style._IncludeState() self.assertEqual(cpp_style._CONFIG_HEADER, classify_include('foo/foo.cpp', 'config.h', False, include_state)) self.assertEqual(cpp_style._PRIMARY_HEADER, classify_include('foo/internal/foo.cpp', 'foo/public/foo.h', False, include_state)) self.assertEqual(cpp_style._PRIMARY_HEADER, classify_include('foo/internal/foo.cpp', 'foo/other/public/foo.h', False, include_state)) self.assertEqual(cpp_style._OTHER_HEADER, classify_include('foo/internal/foo.cpp', 'foo/other/public/foop.h', False, include_state)) self.assertEqual(cpp_style._OTHER_HEADER, classify_include('foo/foo.cpp', 'string', True, include_state)) self.assertEqual(cpp_style._PRIMARY_HEADER, classify_include('fooCustom.cpp', 'foo.h', False, include_state)) self.assertEqual(cpp_style._PRIMARY_HEADER, classify_include('PrefixFooCustom.cpp', 'Foo.h', False, include_state)) self.assertEqual(cpp_style._MOC_HEADER, classify_include('foo.cpp', 'foo.moc', False, include_state)) self.assertEqual(cpp_style._MOC_HEADER, classify_include('foo.cpp', 'moc_foo.cpp', False, include_state)) # <public/foo.h> must be considered as primary even if is_system is True. self.assertEqual(cpp_style._PRIMARY_HEADER, classify_include('foo/foo.cpp', 'public/foo.h', True, include_state)) self.assertEqual(cpp_style._OTHER_HEADER, classify_include('foo.cpp', 'foo.h', True, include_state)) self.assertEqual(cpp_style._OTHER_HEADER, classify_include('foo.cpp', 'public/foop.h', True, include_state)) # Qt private APIs use _p.h suffix. self.assertEqual(cpp_style._PRIMARY_HEADER, classify_include('foo.cpp', 'foo_p.h', False, include_state)) # Tricky example where both includes might be classified as primary. self.assert_language_rules_check('ScrollbarThemeWince.cpp', '#include "config.h"\n' '#include "ScrollbarThemeWince.h"\n' '\n' '#include "Scrollbar.h"\n', '') self.assert_language_rules_check('ScrollbarThemeWince.cpp', '#include "config.h"\n' '#include "Scrollbar.h"\n' '\n' '#include "ScrollbarThemeWince.h"\n', 'Found header this file implements after a header this file implements.' ' Should be: config.h, primary header, blank line, and then alphabetically sorted.' ' [build/include_order] [4]') self.assert_language_rules_check('ResourceHandleWin.cpp', '#include "config.h"\n' '#include "ResourceHandle.h"\n' '\n' '#include "ResourceHandleWin.h"\n', '') def test_try_drop_common_suffixes(self): self.assertEqual('foo/foo', cpp_style._drop_common_suffixes('foo/foo-inl.h')) self.assertEqual('foo/bar/foo', cpp_style._drop_common_suffixes('foo/bar/foo_inl.h')) self.assertEqual('foo/foo', cpp_style._drop_common_suffixes('foo/foo.cpp')) self.assertEqual('foo/foo_unusualinternal', cpp_style._drop_common_suffixes('foo/foo_unusualinternal.h')) self.assertEqual('', cpp_style._drop_common_suffixes('_test.cpp')) self.assertEqual('test', cpp_style._drop_common_suffixes('test.cpp')) self.assertEqual('test', cpp_style._drop_common_suffixes('test.cpp')) class CheckForFunctionLengthsTest(CppStyleTestBase): def setUp(self): # Reducing these thresholds for the tests speeds up tests significantly. self.old_normal_trigger = cpp_style._FunctionState._NORMAL_TRIGGER self.old_test_trigger = cpp_style._FunctionState._TEST_TRIGGER cpp_style._FunctionState._NORMAL_TRIGGER = 10 cpp_style._FunctionState._TEST_TRIGGER = 25 def tearDown(self): cpp_style._FunctionState._NORMAL_TRIGGER = self.old_normal_trigger cpp_style._FunctionState._TEST_TRIGGER = self.old_test_trigger # FIXME: Eliminate the need for this function. def set_min_confidence(self, min_confidence): """Set new test confidence and return old test confidence.""" old_min_confidence = self.min_confidence self.min_confidence = min_confidence return old_min_confidence def assert_function_lengths_check(self, code, expected_message): """Check warnings for long function bodies are as expected. Args: code: C++ source code expected to generate a warning message. expected_message: Message expected to be generated by the C++ code. """ self.assertEqual(expected_message, self.perform_function_lengths_check(code)) def trigger_lines(self, error_level): """Return number of lines needed to trigger a function length warning. Args: error_level: --v setting for cpp_style. Returns: Number of lines needed to trigger a function length warning. """ return cpp_style._FunctionState._NORMAL_TRIGGER * 2 ** error_level def trigger_test_lines(self, error_level): """Return number of lines needed to trigger a test function length warning. Args: error_level: --v setting for cpp_style. Returns: Number of lines needed to trigger a test function length warning. """ return cpp_style._FunctionState._TEST_TRIGGER * 2 ** error_level def assert_function_length_check_definition(self, lines, error_level): """Generate long function definition and check warnings are as expected. Args: lines: Number of lines to generate. error_level: --v setting for cpp_style. """ trigger_level = self.trigger_lines(self.min_confidence) self.assert_function_lengths_check( 'void test(int x)' + self.function_body(lines), ('Small and focused functions are preferred: ' 'test() has %d non-comment lines ' '(error triggered by exceeding %d lines).' ' [readability/fn_size] [%d]' % (lines, trigger_level, error_level))) def assert_function_length_check_definition_ok(self, lines): """Generate shorter function definition and check no warning is produced. Args: lines: Number of lines to generate. """ self.assert_function_lengths_check( 'void test(int x)' + self.function_body(lines), '') def assert_function_length_check_at_error_level(self, error_level): """Generate and check function at the trigger level for --v setting. Args: error_level: --v setting for cpp_style. """ self.assert_function_length_check_definition(self.trigger_lines(error_level), error_level) def assert_function_length_check_below_error_level(self, error_level): """Generate and check function just below the trigger level for --v setting. Args: error_level: --v setting for cpp_style. """ self.assert_function_length_check_definition(self.trigger_lines(error_level) - 1, error_level - 1) def assert_function_length_check_above_error_level(self, error_level): """Generate and check function just above the trigger level for --v setting. Args: error_level: --v setting for cpp_style. """ self.assert_function_length_check_definition(self.trigger_lines(error_level) + 1, error_level) def function_body(self, number_of_lines): return ' {\n' + ' this_is_just_a_test();\n' * number_of_lines + '}' def function_body_with_blank_lines(self, number_of_lines): return ' {\n' + ' this_is_just_a_test();\n\n' * number_of_lines + '}' def function_body_with_no_lints(self, number_of_lines): return ' {\n' + ' this_is_just_a_test(); // NOLINT\n' * number_of_lines + '}' # Test line length checks. def test_function_length_check_declaration(self): self.assert_function_lengths_check( 'void test();', # Not a function definition '') def test_function_length_check_declaration_with_block_following(self): self.assert_function_lengths_check( ('void test();\n' + self.function_body(66)), # Not a function definition '') def test_function_length_check_class_definition(self): self.assert_function_lengths_check( # Not a function definition 'class Test' + self.function_body(66) + ';', '') def test_function_length_check_trivial(self): self.assert_function_lengths_check( 'void test() {}', # Not counted '') def test_function_length_check_empty(self): self.assert_function_lengths_check( 'void test() {\n}', '') def test_function_length_check_definition_below_severity0(self): old_min_confidence = self.set_min_confidence(0) self.assert_function_length_check_definition_ok(self.trigger_lines(0) - 1) self.set_min_confidence(old_min_confidence) def test_function_length_check_definition_at_severity0(self): old_min_confidence = self.set_min_confidence(0) self.assert_function_length_check_definition_ok(self.trigger_lines(0)) self.set_min_confidence(old_min_confidence) def test_function_length_check_definition_above_severity0(self): old_min_confidence = self.set_min_confidence(0) self.assert_function_length_check_above_error_level(0) self.set_min_confidence(old_min_confidence) def test_function_length_check_definition_below_severity1v0(self): old_min_confidence = self.set_min_confidence(0) self.assert_function_length_check_below_error_level(1) self.set_min_confidence(old_min_confidence) def test_function_length_check_definition_at_severity1v0(self): old_min_confidence = self.set_min_confidence(0) self.assert_function_length_check_at_error_level(1) self.set_min_confidence(old_min_confidence) def test_function_length_check_definition_below_severity1(self): self.assert_function_length_check_definition_ok(self.trigger_lines(1) - 1) def test_function_length_check_definition_at_severity1(self): self.assert_function_length_check_definition_ok(self.trigger_lines(1)) def test_function_length_check_definition_above_severity1(self): self.assert_function_length_check_above_error_level(1) def test_function_length_check_definition_severity1_plus_indented(self): error_level = 1 error_lines = self.trigger_lines(error_level) + 1 trigger_level = self.trigger_lines(self.min_confidence) indent_spaces = ' ' self.assert_function_lengths_check( re.sub(r'(?m)^(.)', indent_spaces + r'\1', 'void test_indent(int x)\n' + self.function_body(error_lines)), ('Small and focused functions are preferred: ' 'test_indent() has %d non-comment lines ' '(error triggered by exceeding %d lines).' ' [readability/fn_size] [%d]') % (error_lines, trigger_level, error_level)) def test_function_length_check_definition_severity1_plus_blanks(self): error_level = 1 error_lines = self.trigger_lines(error_level) + 1 trigger_level = self.trigger_lines(self.min_confidence) self.assert_function_lengths_check( 'void test_blanks(int x)' + self.function_body(error_lines), ('Small and focused functions are preferred: ' 'test_blanks() has %d non-comment lines ' '(error triggered by exceeding %d lines).' ' [readability/fn_size] [%d]') % (error_lines, trigger_level, error_level)) def test_function_length_check_complex_definition_severity1(self): error_level = 1 error_lines = self.trigger_lines(error_level) + 1 trigger_level = self.trigger_lines(self.min_confidence) self.assert_function_lengths_check( ('my_namespace::my_other_namespace::MyVeryLongTypeName<Type1, bool func(const Element*)>*\n' 'my_namespace::my_other_namespace<Type3, Type4>::~MyFunction<Type5<Type6, Type7> >(int arg1, char* arg2)' + self.function_body(error_lines)), ('Small and focused functions are preferred: ' 'my_namespace::my_other_namespace<Type3, Type4>::~MyFunction<Type5<Type6, Type7> >()' ' has %d non-comment lines ' '(error triggered by exceeding %d lines).' ' [readability/fn_size] [%d]') % (error_lines, trigger_level, error_level)) def test_function_length_check_definition_severity1_for_test(self): error_level = 1 error_lines = self.trigger_test_lines(error_level) + 1 trigger_level = self.trigger_test_lines(self.min_confidence) self.assert_function_lengths_check( 'TEST_F(Test, Mutator)' + self.function_body(error_lines), ('Small and focused functions are preferred: ' 'TEST_F(Test, Mutator) has %d non-comment lines ' '(error triggered by exceeding %d lines).' ' [readability/fn_size] [%d]') % (error_lines, trigger_level, error_level)) def test_function_length_check_definition_severity1_for_split_line_test(self): error_level = 1 error_lines = self.trigger_test_lines(error_level) + 1 trigger_level = self.trigger_test_lines(self.min_confidence) self.assert_function_lengths_check( ('TEST_F(GoogleUpdateRecoveryRegistryProtectedTest,\n' ' FixGoogleUpdate_AllValues_MachineApp)' # note: 4 spaces + self.function_body(error_lines)), ('Small and focused functions are preferred: ' 'TEST_F(GoogleUpdateRecoveryRegistryProtectedTest, ' # 1 space 'FixGoogleUpdate_AllValues_MachineApp) has %d non-comment lines ' '(error triggered by exceeding %d lines).' ' [readability/fn_size] [%d]') % (error_lines, trigger_level, error_level)) def test_function_length_check_definition_severity1_for_bad_test_doesnt_break(self): error_level = 1 error_lines = self.trigger_test_lines(error_level) + 1 trigger_level = self.trigger_test_lines(self.min_confidence) # Since the function name isn't valid, the function detection algorithm # will skip it, so no error is produced. self.assert_function_lengths_check( ('TEST_F(' + self.function_body(error_lines)), '') def test_function_length_check_definition_severity1_with_embedded_no_lints(self): error_level = 1 error_lines = self.trigger_lines(error_level) + 1 trigger_level = self.trigger_lines(self.min_confidence) self.assert_function_lengths_check( 'void test(int x)' + self.function_body_with_no_lints(error_lines), ('Small and focused functions are preferred: ' 'test() has %d non-comment lines ' '(error triggered by exceeding %d lines).' ' [readability/fn_size] [%d]') % (error_lines, trigger_level, error_level)) def test_function_length_check_definition_severity1_with_no_lint(self): self.assert_function_lengths_check( ('void test(int x)' + self.function_body(self.trigger_lines(1)) + ' // NOLINT -- long function'), '') def test_function_length_check_definition_below_severity2(self): self.assert_function_length_check_below_error_level(2) def test_function_length_check_definition_severity2(self): self.assert_function_length_check_at_error_level(2) def test_function_length_check_definition_above_severity2(self): self.assert_function_length_check_above_error_level(2) def test_function_length_check_definition_below_severity3(self): self.assert_function_length_check_below_error_level(3) def test_function_length_check_definition_severity3(self): self.assert_function_length_check_at_error_level(3) def test_function_length_check_definition_above_severity3(self): self.assert_function_length_check_above_error_level(3) def test_function_length_check_definition_below_severity4(self): self.assert_function_length_check_below_error_level(4) def test_function_length_check_definition_severity4(self): self.assert_function_length_check_at_error_level(4) def test_function_length_check_definition_above_severity4(self): self.assert_function_length_check_above_error_level(4) def test_function_length_check_definition_below_severity5(self): self.assert_function_length_check_below_error_level(5) def test_function_length_check_definition_at_severity5(self): self.assert_function_length_check_at_error_level(5) def test_function_length_check_definition_above_severity5(self): self.assert_function_length_check_above_error_level(5) def test_function_length_check_definition_huge_lines(self): # 5 is the limit self.assert_function_length_check_definition(self.trigger_lines(6), 5) def test_function_length_not_determinable(self): # Macro invocation without terminating semicolon. self.assert_function_lengths_check( 'MACRO(arg)', '') # Macro with underscores self.assert_function_lengths_check( 'MACRO_WITH_UNDERSCORES(arg1, arg2, arg3)', '') self.assert_function_lengths_check( 'NonMacro(arg)', 'Lint failed to find start of function body.' ' [readability/fn_size] [5]') class NoNonVirtualDestructorsTest(CppStyleTestBase): def test_no_error(self): self.assert_multi_line_lint( '''\ class Foo { virtual ~Foo(); virtual void foo(); };''', '') self.assert_multi_line_lint( '''\ class Foo { virtual inline ~Foo(); virtual void foo(); };''', '') self.assert_multi_line_lint( '''\ class Foo { inline virtual ~Foo(); virtual void foo(); };''', '') self.assert_multi_line_lint( '''\ class Foo::Goo { virtual ~Goo(); virtual void goo(); };''', '') self.assert_multi_line_lint( 'class Foo { void foo(); };', 'More than one command on the same line [whitespace/newline] [4]') self.assert_multi_line_lint( 'class MyClass {\n' ' int getIntValue() { ASSERT(m_ptr); return *m_ptr; }\n' '};\n', '') self.assert_multi_line_lint( 'class MyClass {\n' ' int getIntValue()\n' ' {\n' ' ASSERT(m_ptr); return *m_ptr;\n' ' }\n' '};\n', 'More than one command on the same line [whitespace/newline] [4]') self.assert_multi_line_lint( '''\ class Qualified::Goo : public Foo { virtual void goo(); };''', '') def test_no_destructor_when_virtual_needed(self): self.assert_multi_line_lint_re( '''\ class Foo { virtual void foo(); };''', 'The class Foo probably needs a virtual destructor') def test_enum_casing(self): self.assert_multi_line_lint( '''\ enum Foo { FOO_ONE = 1, FOO_TWO }; enum { FOO_ONE }; enum {FooOne, fooTwo}; enum { FOO_ONE };''', ['enum members should use InterCaps with an initial capital letter. [readability/enum_casing] [4]'] * 5) self.assert_multi_line_lint( '''\ enum Foo { fooOne = 1, FooTwo = 2 };''', 'enum members should use InterCaps with an initial capital letter. [readability/enum_casing] [4]') self.assert_multi_line_lint( '''\ enum Foo { FooOne = 1, FooTwo, kFooConst, } fooVar = FooOne; enum { FooOne, FooTwo }; enum { FooOne, FooTwo } fooVar = FooTwo; enum { FooOne= FooTwo } foo; enum Enum123 { FooOne, FooTwo = FooOne, };''', '') self.assert_multi_line_lint( '''\ // WebIDL enum enum Foo { FOO_ONE = 1, FOO_TWO = 2, };''', '') self.assert_multi_line_lint( '''\ // WebKitIDL enum enum Foo { FOO_ONE, FOO_TWO };''', '') def test_destructor_non_virtual_when_virtual_needed(self): self.assert_multi_line_lint_re( '''\ class Foo { ~Foo(); virtual void foo(); };''', 'The class Foo probably needs a virtual destructor') def test_no_warn_when_derived(self): self.assert_multi_line_lint( '''\ class Foo : public Goo { virtual void foo(); };''', '') def test_internal_braces(self): self.assert_multi_line_lint_re( '''\ class Foo { enum Goo { Goo }; virtual void foo(); };''', 'The class Foo probably needs a virtual destructor') def test_inner_class_needs_virtual_destructor(self): self.assert_multi_line_lint_re( '''\ class Foo { class Goo { virtual void goo(); }; };''', 'The class Goo probably needs a virtual destructor') def test_outer_class_needs_virtual_destructor(self): self.assert_multi_line_lint_re( '''\ class Foo { class Goo { }; virtual void foo(); };''', 'The class Foo probably needs a virtual destructor') def test_qualified_class_needs_virtual_destructor(self): self.assert_multi_line_lint_re( '''\ class Qualified::Foo { virtual void foo(); };''', 'The class Qualified::Foo probably needs a virtual destructor') def test_multi_line_declaration_no_error(self): self.assert_multi_line_lint_re( '''\ class Foo : public Goo { virtual void foo(); };''', '') def test_multi_line_declaration_with_error(self): self.assert_multi_line_lint( '''\ class Foo { virtual void foo(); };''', ['This { should be at the end of the previous line ' '[whitespace/braces] [4]', 'The class Foo probably needs a virtual destructor due to having ' 'virtual method(s), one declared at line 3. [runtime/virtual] [4]']) class PassPtrTest(CppStyleTestBase): # For http://webkit.org/coding/RefPtr.html def assert_pass_ptr_check(self, code, expected_message): """Check warnings for Pass*Ptr are as expected. Args: code: C++ source code expected to generate a warning message. expected_message: Message expected to be generated by the C++ code. """ self.assertEqual(expected_message, self.perform_pass_ptr_check(code)) def test_pass_ref_ptr_in_function(self): self.assert_pass_ptr_check( 'int myFunction()\n' '{\n' ' PassRefPtr<Type1> variable = variable2;\n' '}', 'Local variables should never be PassRefPtr (see ' 'http://webkit.org/coding/RefPtr.html). [readability/pass_ptr] [5]') def test_pass_own_ptr_in_function(self): self.assert_pass_ptr_check( 'int myFunction()\n' '{\n' ' PassOwnPtr<Type1> variable = variable2;\n' '}', 'Local variables should never be PassOwnPtr (see ' 'http://webkit.org/coding/RefPtr.html). [readability/pass_ptr] [5]') def test_pass_other_type_ptr_in_function(self): self.assert_pass_ptr_check( 'int myFunction()\n' '{\n' ' PassOtherTypePtr<Type1> variable;\n' '}', 'Local variables should never be PassOtherTypePtr (see ' 'http://webkit.org/coding/RefPtr.html). [readability/pass_ptr] [5]') def test_pass_ref_ptr_return_value(self): self.assert_pass_ptr_check( 'PassRefPtr<Type1>\n' 'myFunction(int)\n' '{\n' '}', '') self.assert_pass_ptr_check( 'PassRefPtr<Type1> myFunction(int)\n' '{\n' '}', '') self.assert_pass_ptr_check( 'PassRefPtr<Type1> myFunction();\n', '') self.assert_pass_ptr_check( 'OwnRefPtr<Type1> myFunction();\n', '') self.assert_pass_ptr_check( 'RefPtr<Type1> myFunction(int)\n' '{\n' '}', 'The return type should use PassRefPtr instead of RefPtr. [readability/pass_ptr] [5]') self.assert_pass_ptr_check( 'OwnPtr<Type1> myFunction(int)\n' '{\n' '}', 'The return type should use PassOwnPtr instead of OwnPtr. [readability/pass_ptr] [5]') def test_ref_ptr_parameter_value(self): self.assert_pass_ptr_check( 'int myFunction(PassRefPtr<Type1>)\n' '{\n' '}', '') self.assert_pass_ptr_check( 'int myFunction(RefPtr<Type1>)\n' '{\n' '}', 'The parameter type should use PassRefPtr instead of RefPtr. [readability/pass_ptr] [5]') self.assert_pass_ptr_check( 'int myFunction(RefPtr<Type1>&)\n' '{\n' '}', '') self.assert_pass_ptr_check( 'int myFunction(RefPtr<Type1>*)\n' '{\n' '}', '') self.assert_pass_ptr_check( 'int myFunction(RefPtr<Type1>* = 0)\n' '{\n' '}', '') self.assert_pass_ptr_check( 'int myFunction(RefPtr<Type1>* = 0)\n' '{\n' '}', '') def test_own_ptr_parameter_value(self): self.assert_pass_ptr_check( 'int myFunction(PassOwnPtr<Type1>)\n' '{\n' '}', '') self.assert_pass_ptr_check( 'int myFunction(OwnPtr<Type1>)\n' '{\n' '}', 'The parameter type should use PassOwnPtr instead of OwnPtr. [readability/pass_ptr] [5]') self.assert_pass_ptr_check( 'int myFunction(OwnPtr<Type1>& simple)\n' '{\n' '}', '') def test_ref_ptr_member_variable(self): self.assert_pass_ptr_check( 'class Foo {' ' RefPtr<Type1> m_other;\n' '};\n', '') class LeakyPatternTest(CppStyleTestBase): def assert_leaky_pattern_check(self, code, expected_message): """Check warnings for leaky patterns are as expected. Args: code: C++ source code expected to generate a warning message. expected_message: Message expected to be generated by the C++ code. """ self.assertEqual(expected_message, self.perform_leaky_pattern_check(code)) def test_get_dc(self): self.assert_leaky_pattern_check( 'HDC hdc = GetDC(hwnd);', 'Use the class HWndDC instead of calling GetDC to avoid potential ' 'memory leaks. [runtime/leaky_pattern] [5]') def test_get_dc(self): self.assert_leaky_pattern_check( 'HDC hdc = GetDCEx(hwnd, 0, 0);', 'Use the class HWndDC instead of calling GetDCEx to avoid potential ' 'memory leaks. [runtime/leaky_pattern] [5]') def test_own_get_dc(self): self.assert_leaky_pattern_check( 'HWndDC hdc(hwnd);', '') def test_create_dc(self): self.assert_leaky_pattern_check( 'HDC dc2 = ::CreateDC();', 'Use adoptPtr and OwnPtr<HDC> when calling CreateDC to avoid potential ' 'memory leaks. [runtime/leaky_pattern] [5]') self.assert_leaky_pattern_check( 'adoptPtr(CreateDC());', '') def test_create_compatible_dc(self): self.assert_leaky_pattern_check( 'HDC dc2 = CreateCompatibleDC(dc);', 'Use adoptPtr and OwnPtr<HDC> when calling CreateCompatibleDC to avoid potential ' 'memory leaks. [runtime/leaky_pattern] [5]') self.assert_leaky_pattern_check( 'adoptPtr(CreateCompatibleDC(dc));', '') class WebKitStyleTest(CppStyleTestBase): # for http://webkit.org/coding/coding-style.html def test_indentation(self): # 1. Use spaces, not tabs. Tabs should only appear in files that # require them for semantic meaning, like Makefiles. self.assert_multi_line_lint( 'class Foo {\n' ' int goo;\n' '};', '') self.assert_multi_line_lint( 'class Foo {\n' '\tint goo;\n' '};', 'Tab found; better to use spaces [whitespace/tab] [1]') # 2. The indent size is 4 spaces. self.assert_multi_line_lint( 'class Foo {\n' ' int goo;\n' '};', '') self.assert_multi_line_lint( 'class Foo {\n' ' int goo;\n' '};', 'Weird number of spaces at line-start. Are you using a 4-space indent? [whitespace/indent] [3]') # 3. In a header, code inside a namespace should not be indented. self.assert_multi_line_lint( 'namespace WebCore {\n\n' 'class Document {\n' ' int myVariable;\n' '};\n' '}', '', 'foo.h') self.assert_multi_line_lint( 'namespace OuterNamespace {\n' ' namespace InnerNamespace {\n' ' class Document {\n' '};\n' '};\n' '}', 'Code inside a namespace should not be indented. [whitespace/indent] [4]', 'foo.h') self.assert_multi_line_lint( 'namespace OuterNamespace {\n' ' class Document {\n' ' namespace InnerNamespace {\n' '};\n' '};\n' '}', 'Code inside a namespace should not be indented. [whitespace/indent] [4]', 'foo.h') self.assert_multi_line_lint( 'namespace WebCore {\n' '#if 0\n' ' class Document {\n' '};\n' '#endif\n' '}', 'Code inside a namespace should not be indented. [whitespace/indent] [4]', 'foo.h') self.assert_multi_line_lint( 'namespace WebCore {\n' 'class Document {\n' '};\n' '}', '', 'foo.h') # 4. In an implementation file (files with the extension .cpp, .c # or .mm), code inside a namespace should not be indented. self.assert_multi_line_lint( 'namespace WebCore {\n\n' 'Document::Foo()\n' ' : foo(bar)\n' ' , boo(far)\n' '{\n' ' stuff();\n' '}', '', 'foo.cpp') self.assert_multi_line_lint( 'namespace OuterNamespace {\n' 'namespace InnerNamespace {\n' 'Document::Foo() { }\n' ' void* p;\n' '}\n' '}\n', 'Code inside a namespace should not be indented. [whitespace/indent] [4]', 'foo.cpp') self.assert_multi_line_lint( 'namespace OuterNamespace {\n' 'namespace InnerNamespace {\n' 'Document::Foo() { }\n' '}\n' ' void* p;\n' '}\n', 'Code inside a namespace should not be indented. [whitespace/indent] [4]', 'foo.cpp') self.assert_multi_line_lint( 'namespace WebCore {\n\n' ' const char* foo = "start:;"\n' ' "dfsfsfs";\n' '}\n', 'Code inside a namespace should not be indented. [whitespace/indent] [4]', 'foo.cpp') self.assert_multi_line_lint( 'namespace WebCore {\n\n' 'const char* foo(void* a = ";", // ;\n' ' void* b);\n' ' void* p;\n' '}\n', 'Code inside a namespace should not be indented. [whitespace/indent] [4]', 'foo.cpp') self.assert_multi_line_lint( 'namespace WebCore {\n\n' 'const char* foo[] = {\n' ' "void* b);", // ;\n' ' "asfdf",\n' ' }\n' ' void* p;\n' '}\n', 'Code inside a namespace should not be indented. [whitespace/indent] [4]', 'foo.cpp') self.assert_multi_line_lint( 'namespace WebCore {\n\n' 'const char* foo[] = {\n' ' "void* b);", // }\n' ' "asfdf",\n' ' }\n' '}\n', '', 'foo.cpp') self.assert_multi_line_lint( ' namespace WebCore {\n\n' ' void Document::Foo()\n' ' {\n' 'start: // infinite loops are fun!\n' ' goto start;\n' ' }', 'namespace should never be indented. [whitespace/indent] [4]', 'foo.cpp') self.assert_multi_line_lint( 'namespace WebCore {\n' ' Document::Foo() { }\n' '}', 'Code inside a namespace should not be indented.' ' [whitespace/indent] [4]', 'foo.cpp') self.assert_multi_line_lint( 'namespace WebCore {\n' '#define abc(x) x; \\\n' ' x\n' '}', '', 'foo.cpp') self.assert_multi_line_lint( 'namespace WebCore {\n' '#define abc(x) x; \\\n' ' x\n' ' void* x;' '}', 'Code inside a namespace should not be indented. [whitespace/indent] [4]', 'foo.cpp') # 5. A case label should line up with its switch statement. The # case statement is indented. self.assert_multi_line_lint( ' switch (condition) {\n' ' case fooCondition:\n' ' case barCondition:\n' ' i++;\n' ' break;\n' ' default:\n' ' i--;\n' ' }\n', '') self.assert_multi_line_lint( ' switch (condition) {\n' ' case fooCondition:\n' ' switch (otherCondition) {\n' ' default:\n' ' return;\n' ' }\n' ' default:\n' ' i--;\n' ' }\n', '') self.assert_multi_line_lint( ' switch (condition) {\n' ' case fooCondition: break;\n' ' default: return;\n' ' }\n', '') self.assert_multi_line_lint( ' switch (condition) {\n' ' case fooCondition:\n' ' case barCondition:\n' ' i++;\n' ' break;\n' ' default:\n' ' i--;\n' ' }\n', 'A case label should not be indented, but line up with its switch statement.' ' [whitespace/indent] [4]') self.assert_multi_line_lint( ' switch (condition) {\n' ' case fooCondition:\n' ' break;\n' ' default:\n' ' i--;\n' ' }\n', 'A case label should not be indented, but line up with its switch statement.' ' [whitespace/indent] [4]') self.assert_multi_line_lint( ' switch (condition) {\n' ' case fooCondition:\n' ' case barCondition:\n' ' switch (otherCondition) {\n' ' default:\n' ' return;\n' ' }\n' ' default:\n' ' i--;\n' ' }\n', 'A case label should not be indented, but line up with its switch statement.' ' [whitespace/indent] [4]') self.assert_multi_line_lint( ' switch (condition) {\n' ' case fooCondition:\n' ' case barCondition:\n' ' i++;\n' ' break;\n\n' ' default:\n' ' i--;\n' ' }\n', 'Non-label code inside switch statements should be indented.' ' [whitespace/indent] [4]') self.assert_multi_line_lint( ' switch (condition) {\n' ' case fooCondition:\n' ' case barCondition:\n' ' switch (otherCondition) {\n' ' default:\n' ' return;\n' ' }\n' ' default:\n' ' i--;\n' ' }\n', 'Non-label code inside switch statements should be indented.' ' [whitespace/indent] [4]') # 6. Boolean expressions at the same nesting level that span # multiple lines should have their operators on the left side of # the line instead of the right side. self.assert_multi_line_lint( ' return attr->name() == srcAttr\n' ' || attr->name() == lowsrcAttr;\n', '') self.assert_multi_line_lint( ' return attr->name() == srcAttr ||\n' ' attr->name() == lowsrcAttr;\n', 'Boolean expressions that span multiple lines should have their ' 'operators on the left side of the line instead of the right side.' ' [whitespace/operators] [4]') def test_spacing(self): # 1. Do not place spaces around unary operators. self.assert_multi_line_lint( 'i++;', '') self.assert_multi_line_lint( 'i ++;', 'Extra space for operator ++; [whitespace/operators] [4]') # 2. Do place spaces around binary and ternary operators. self.assert_multi_line_lint( 'y = m * x + b;', '') self.assert_multi_line_lint( 'f(a, b);', '') self.assert_multi_line_lint( 'c = a | b;', '') self.assert_multi_line_lint( 'return condition ? 1 : 0;', '') self.assert_multi_line_lint( 'y=m*x+b;', 'Missing spaces around = [whitespace/operators] [4]') self.assert_multi_line_lint( 'f(a,b);', 'Missing space after , [whitespace/comma] [3]') self.assert_multi_line_lint( 'c = a|b;', 'Missing spaces around | [whitespace/operators] [3]') # FIXME: We cannot catch this lint error. # self.assert_multi_line_lint( # 'return condition ? 1:0;', # '') # 3. Place spaces between control statements and their parentheses. self.assert_multi_line_lint( ' if (condition)\n' ' doIt();\n', '') self.assert_multi_line_lint( ' if(condition)\n' ' doIt();\n', 'Missing space before ( in if( [whitespace/parens] [5]') # 4. Do not place spaces between a function and its parentheses, # or between a parenthesis and its content. self.assert_multi_line_lint( 'f(a, b);', '') self.assert_multi_line_lint( 'f (a, b);', 'Extra space before ( in function call [whitespace/parens] [4]') self.assert_multi_line_lint( 'f( a, b );', ['Extra space after ( in function call [whitespace/parens] [4]', 'Extra space before ) [whitespace/parens] [2]']) def test_line_breaking(self): # 1. Each statement should get its own line. self.assert_multi_line_lint( ' x++;\n' ' y++;\n' ' if (condition);\n' ' doIt();\n', '') self.assert_multi_line_lint( ' if (condition) \\\n' ' doIt();\n', '') self.assert_multi_line_lint( ' x++; y++;', 'More than one command on the same line [whitespace/newline] [4]') self.assert_multi_line_lint( ' if (condition) doIt();\n', 'More than one command on the same line in if [whitespace/parens] [4]') # Ensure that having a # in the line doesn't hide the error. self.assert_multi_line_lint( ' x++; char a[] = "#";', 'More than one command on the same line [whitespace/newline] [4]') # Ignore preprocessor if's. self.assert_multi_line_lint( '#if (condition) || (condition2)\n', '') # 2. An else statement should go on the same line as a preceding # close brace if one is present, else it should line up with the # if statement. self.assert_multi_line_lint( 'if (condition) {\n' ' doSomething();\n' ' doSomethingAgain();\n' '} else {\n' ' doSomethingElse();\n' ' doSomethingElseAgain();\n' '}\n', '') self.assert_multi_line_lint( 'if (condition)\n' ' doSomething();\n' 'else\n' ' doSomethingElse();\n', '') self.assert_multi_line_lint( 'if (condition) {\n' ' doSomething();\n' '} else {\n' ' doSomethingElse();\n' ' doSomethingElseAgain();\n' '}\n', '') self.assert_multi_line_lint( '#define TEST_ASSERT(expression) do { if (!(expression)) { TestsController::shared().testFailed(__FILE__, __LINE__, #expression); return; } } while (0)\n', '') self.assert_multi_line_lint( '#define TEST_ASSERT(expression) do { if ( !(expression)) { TestsController::shared().testFailed(__FILE__, __LINE__, #expression); return; } } while (0)\n', 'Extra space after ( in if [whitespace/parens] [5]') # FIXME: currently we only check first conditional, so we cannot detect errors in next ones. # self.assert_multi_line_lint( # '#define TEST_ASSERT(expression) do { if (!(expression)) { TestsController::shared().testFailed(__FILE__, __LINE__, #expression); return; } } while (0 )\n', # 'Mismatching spaces inside () in if [whitespace/parens] [5]') self.assert_multi_line_lint( 'WTF_MAKE_NONCOPYABLE(ClassName); WTF_MAKE_FAST_ALLOCATED;\n', '') self.assert_multi_line_lint( 'if (condition) {\n' ' doSomething();\n' ' doSomethingAgain();\n' '}\n' 'else {\n' ' doSomethingElse();\n' ' doSomethingElseAgain();\n' '}\n', 'An else should appear on the same line as the preceding } [whitespace/newline] [4]') self.assert_multi_line_lint( 'if (condition) doSomething(); else doSomethingElse();\n', ['More than one command on the same line [whitespace/newline] [4]', 'Else clause should never be on same line as else (use 2 lines) [whitespace/newline] [4]', 'More than one command on the same line in if [whitespace/parens] [4]']) self.assert_multi_line_lint( 'if (condition) doSomething(); else {\n' ' doSomethingElse();\n' '}\n', ['More than one command on the same line in if [whitespace/parens] [4]', 'If one part of an if-else statement uses curly braces, the other part must too. [whitespace/braces] [4]']) self.assert_multi_line_lint( 'void func()\n' '{\n' ' while (condition) { }\n' ' return 0;\n' '}\n', '') self.assert_multi_line_lint( 'void func()\n' '{\n' ' for (i = 0; i < 42; i++) { foobar(); }\n' ' return 0;\n' '}\n', 'More than one command on the same line in for [whitespace/parens] [4]') # 3. An else if statement should be written as an if statement # when the prior if concludes with a return statement. self.assert_multi_line_lint( 'if (motivated) {\n' ' if (liquid)\n' ' return money;\n' '} else if (tired) {\n' ' break;\n' '}', '') self.assert_multi_line_lint( 'if (condition)\n' ' doSomething();\n' 'else if (otherCondition)\n' ' doSomethingElse();\n', '') self.assert_multi_line_lint( 'if (condition)\n' ' doSomething();\n' 'else\n' ' doSomethingElse();\n', '') self.assert_multi_line_lint( 'if (condition)\n' ' returnValue = foo;\n' 'else if (otherCondition)\n' ' returnValue = bar;\n', '') self.assert_multi_line_lint( 'if (condition)\n' ' returnValue = foo;\n' 'else\n' ' returnValue = bar;\n', '') self.assert_multi_line_lint( 'if (condition)\n' ' doSomething();\n' 'else if (liquid)\n' ' return money;\n' 'else if (broke)\n' ' return favor;\n' 'else\n' ' sleep(28800);\n', '') self.assert_multi_line_lint( 'if (liquid) {\n' ' prepare();\n' ' return money;\n' '} else if (greedy) {\n' ' keep();\n' ' return nothing;\n' '}\n', 'An else if statement should be written as an if statement when the ' 'prior "if" concludes with a return, break, continue or goto statement.' ' [readability/control_flow] [4]') self.assert_multi_line_lint( ' if (stupid) {\n' 'infiniteLoop:\n' ' goto infiniteLoop;\n' ' } else if (evil)\n' ' goto hell;\n', ['If one part of an if-else statement uses curly braces, the other part must too. [whitespace/braces] [4]', 'An else if statement should be written as an if statement when the ' 'prior "if" concludes with a return, break, continue or goto statement.' ' [readability/control_flow] [4]']) self.assert_multi_line_lint( 'if (liquid)\n' '{\n' ' prepare();\n' ' return money;\n' '}\n' 'else if (greedy)\n' ' keep();\n', ['If one part of an if-else statement uses curly braces, the other part must too. [whitespace/braces] [4]', 'This { should be at the end of the previous line [whitespace/braces] [4]', 'An else should appear on the same line as the preceding } [whitespace/newline] [4]', 'An else if statement should be written as an if statement when the ' 'prior "if" concludes with a return, break, continue or goto statement.' ' [readability/control_flow] [4]']) self.assert_multi_line_lint( 'if (gone)\n' ' return;\n' 'else if (here)\n' ' go();\n', 'An else if statement should be written as an if statement when the ' 'prior "if" concludes with a return, break, continue or goto statement.' ' [readability/control_flow] [4]') self.assert_multi_line_lint( 'if (gone)\n' ' return;\n' 'else\n' ' go();\n', 'An else statement can be removed when the prior "if" concludes ' 'with a return, break, continue or goto statement.' ' [readability/control_flow] [4]') self.assert_multi_line_lint( 'if (motivated) {\n' ' prepare();\n' ' continue;\n' '} else {\n' ' cleanUp();\n' ' break;\n' '}\n', 'An else statement can be removed when the prior "if" concludes ' 'with a return, break, continue or goto statement.' ' [readability/control_flow] [4]') self.assert_multi_line_lint( 'if (tired)\n' ' break;\n' 'else {\n' ' prepare();\n' ' continue;\n' '}\n', ['If one part of an if-else statement uses curly braces, the other part must too. [whitespace/braces] [4]', 'An else statement can be removed when the prior "if" concludes ' 'with a return, break, continue or goto statement.' ' [readability/control_flow] [4]']) def test_braces(self): # 1. Function definitions: place each brace on its own line. self.assert_multi_line_lint( 'int main()\n' '{\n' ' doSomething();\n' '}\n', '') self.assert_multi_line_lint( 'int main() {\n' ' doSomething();\n' '}\n', 'Place brace on its own line for function definitions. [whitespace/braces] [4]') # 2. Other braces: place the open brace on the line preceding the # code block; place the close brace on its own line. self.assert_multi_line_lint( 'class MyClass {\n' ' int foo;\n' '};\n', '') self.assert_multi_line_lint( 'namespace WebCore {\n' 'int foo;\n' '};\n', '') self.assert_multi_line_lint( 'for (int i = 0; i < 10; i++) {\n' ' DoSomething();\n' '};\n', '') self.assert_multi_line_lint( 'class MyClass\n' '{\n' ' int foo;\n' '};\n', 'This { should be at the end of the previous line [whitespace/braces] [4]') self.assert_multi_line_lint( 'if (condition)\n' '{\n' ' int foo;\n' '}\n', 'This { should be at the end of the previous line [whitespace/braces] [4]') self.assert_multi_line_lint( 'for (int i = 0; i < 10; i++)\n' '{\n' ' int foo;\n' '}\n', 'This { should be at the end of the previous line [whitespace/braces] [4]') self.assert_multi_line_lint( 'while (true)\n' '{\n' ' int foo;\n' '}\n', 'This { should be at the end of the previous line [whitespace/braces] [4]') self.assert_multi_line_lint( 'foreach (Foo* foo, foos)\n' '{\n' ' int bar;\n' '}\n', 'This { should be at the end of the previous line [whitespace/braces] [4]') self.assert_multi_line_lint( 'switch (type)\n' '{\n' 'case foo: return;\n' '}\n', 'This { should be at the end of the previous line [whitespace/braces] [4]') self.assert_multi_line_lint( 'if (condition)\n' '{\n' ' int foo;\n' '}\n', 'This { should be at the end of the previous line [whitespace/braces] [4]') self.assert_multi_line_lint( 'for (int i = 0; i < 10; i++)\n' '{\n' ' int foo;\n' '}\n', 'This { should be at the end of the previous line [whitespace/braces] [4]') self.assert_multi_line_lint( 'while (true)\n' '{\n' ' int foo;\n' '}\n', 'This { should be at the end of the previous line [whitespace/braces] [4]') self.assert_multi_line_lint( 'switch (type)\n' '{\n' 'case foo: return;\n' '}\n', 'This { should be at the end of the previous line [whitespace/braces] [4]') self.assert_multi_line_lint( 'else if (type)\n' '{\n' 'case foo: return;\n' '}\n', 'This { should be at the end of the previous line [whitespace/braces] [4]') # 3. Curly braces are not required for single-line conditionals and # loop bodies, but are required for single-statement bodies that # span multiple lines. # # Positive tests # self.assert_multi_line_lint( 'if (condition1)\n' ' statement1();\n' 'else\n' ' statement2();\n', '') self.assert_multi_line_lint( 'if (condition1)\n' ' statement1();\n' 'else if (condition2)\n' ' statement2();\n', '') self.assert_multi_line_lint( 'if (condition1)\n' ' statement1();\n' 'else if (condition2)\n' ' statement2();\n' 'else\n' ' statement3();\n', '') self.assert_multi_line_lint( 'for (; foo; bar)\n' ' int foo;\n', '') self.assert_multi_line_lint( 'for (; foo; bar) {\n' ' int foo;\n' '}\n', '') self.assert_multi_line_lint( 'foreach (foo, foos) {\n' ' int bar;\n' '}\n', '') self.assert_multi_line_lint( 'foreach (foo, foos)\n' ' int bar;\n', '') self.assert_multi_line_lint( 'while (true) {\n' ' int foo;\n' '}\n', '') self.assert_multi_line_lint( 'while (true)\n' ' int foo;\n', '') self.assert_multi_line_lint( 'if (condition1) {\n' ' statement1();\n' '} else {\n' ' statement2();\n' '}\n', '') self.assert_multi_line_lint( 'if (condition1) {\n' ' statement1();\n' '} else if (condition2) {\n' ' statement2();\n' '}\n', '') self.assert_multi_line_lint( 'if (condition1) {\n' ' statement1();\n' '} else if (condition2) {\n' ' statement2();\n' '} else {\n' ' statement3();\n' '}\n', '') self.assert_multi_line_lint( 'if (condition1) {\n' ' statement1();\n' ' statement1_2();\n' '} else if (condition2) {\n' ' statement2();\n' ' statement2_2();\n' '}\n', '') self.assert_multi_line_lint( 'if (condition1) {\n' ' statement1();\n' ' statement1_2();\n' '} else if (condition2) {\n' ' statement2();\n' ' statement2_2();\n' '} else {\n' ' statement3();\n' ' statement3_2();\n' '}\n', '') # # Negative tests # self.assert_multi_line_lint( 'if (condition)\n' ' doSomething(\n' ' spanningMultipleLines);\n', 'A conditional or loop body must use braces if the statement is more than one line long. [whitespace/braces] [4]') self.assert_multi_line_lint( 'if (condition)\n' ' // Single-line comment\n' ' doSomething();\n', 'A conditional or loop body must use braces if the statement is more than one line long. [whitespace/braces] [4]') self.assert_multi_line_lint( 'if (condition1)\n' ' statement1();\n' 'else if (condition2)\n' ' // Single-line comment\n' ' statement2();\n', 'A conditional or loop body must use braces if the statement is more than one line long. [whitespace/braces] [4]') self.assert_multi_line_lint( 'if (condition1)\n' ' statement1();\n' 'else if (condition2)\n' ' statement2();\n' 'else\n' ' // Single-line comment\n' ' statement3();\n', 'A conditional or loop body must use braces if the statement is more than one line long. [whitespace/braces] [4]') self.assert_multi_line_lint( 'for (; foo; bar)\n' ' // Single-line comment\n' ' int foo;\n', 'A conditional or loop body must use braces if the statement is more than one line long. [whitespace/braces] [4]') self.assert_multi_line_lint( 'foreach (foo, foos)\n' ' // Single-line comment\n' ' int bar;\n', 'A conditional or loop body must use braces if the statement is more than one line long. [whitespace/braces] [4]') self.assert_multi_line_lint( 'while (true)\n' ' // Single-line comment\n' ' int foo;\n' '\n', 'A conditional or loop body must use braces if the statement is more than one line long. [whitespace/braces] [4]') # 4. If one part of an if-else statement uses curly braces, the # other part must too. self.assert_multi_line_lint( 'if (condition1) {\n' ' doSomething1();\n' ' doSomething1_2();\n' '} else if (condition2)\n' ' doSomething2();\n' 'else\n' ' doSomething3();\n', 'If one part of an if-else statement uses curly braces, the other part must too. [whitespace/braces] [4]') self.assert_multi_line_lint( 'if (condition1)\n' ' doSomething1();\n' 'else if (condition2) {\n' ' doSomething2();\n' ' doSomething2_2();\n' '} else\n' ' doSomething3();\n', 'If one part of an if-else statement uses curly braces, the other part must too. [whitespace/braces] [4]') self.assert_multi_line_lint( 'if (condition1) {\n' ' doSomething1();\n' '} else if (condition2) {\n' ' doSomething2();\n' ' doSomething2_2();\n' '} else\n' ' doSomething3();\n', 'If one part of an if-else statement uses curly braces, the other part must too. [whitespace/braces] [4]') self.assert_multi_line_lint( 'if (condition1)\n' ' doSomething1();\n' 'else if (condition2)\n' ' doSomething2();\n' 'else {\n' ' doSomething3();\n' ' doSomething3_2();\n' '}\n', 'If one part of an if-else statement uses curly braces, the other part must too. [whitespace/braces] [4]') self.assert_multi_line_lint( 'if (condition1) {\n' ' doSomething1();\n' ' doSomething1_2();\n' '} else if (condition2)\n' ' doSomething2();\n' 'else {\n' ' doSomething3();\n' ' doSomething3_2();\n' '}\n', 'If one part of an if-else statement uses curly braces, the other part must too. [whitespace/braces] [4]') self.assert_multi_line_lint( 'if (condition1)\n' ' doSomething1();\n' 'else if (condition2) {\n' ' doSomething2();\n' ' doSomething2_2();\n' '} else {\n' ' doSomething3();\n' ' doSomething3_2();\n' '}\n', 'If one part of an if-else statement uses curly braces, the other part must too. [whitespace/braces] [4]') # 5. Control clauses without a body should use empty braces. self.assert_multi_line_lint( 'for ( ; current; current = current->next) { }\n', '') self.assert_multi_line_lint( 'for ( ; current;\n' ' current = current->next) { }\n', 'Weird number of spaces at line-start. Are you using a 4-space indent? [whitespace/indent] [3]') self.assert_multi_line_lint( 'for ( ; current; current = current->next);\n', 'Semicolon defining empty statement for this loop. Use { } instead. [whitespace/semicolon] [5]') self.assert_multi_line_lint( 'while (true);\n', 'Semicolon defining empty statement for this loop. Use { } instead. [whitespace/semicolon] [5]') self.assert_multi_line_lint( '} while (true);\n', '') def test_null_false_zero(self): # 1. In C++, the null pointer value should be written as 0. In C, # it should be written as NULL. In Objective-C and Objective-C++, # follow the guideline for C or C++, respectively, but use nil to # represent a null Objective-C object. self.assert_lint( 'functionCall(NULL)', 'Use 0 instead of NULL.' ' [readability/null] [5]', 'foo.cpp') self.assert_lint( "// Don't use NULL in comments since it isn't in code.", 'Use 0 or null instead of NULL (even in *comments*).' ' [readability/null] [4]', 'foo.cpp') self.assert_lint( '"A string with NULL" // and a comment with NULL is tricky to flag correctly in cpp_style.', 'Use 0 or null instead of NULL (even in *comments*).' ' [readability/null] [4]', 'foo.cpp') self.assert_lint( '"A string containing NULL is ok"', '', 'foo.cpp') self.assert_lint( 'if (aboutNULL)', '', 'foo.cpp') self.assert_lint( 'myVariable = NULLify', '', 'foo.cpp') # Make sure that the NULL check does not apply to C and Objective-C files. self.assert_lint( 'functionCall(NULL)', '', 'foo.c') self.assert_lint( 'functionCall(NULL)', '', 'foo.m') # Make sure that the NULL check does not apply to g_object_{set,get} and # g_str{join,concat} self.assert_lint( 'g_object_get(foo, "prop", &bar, NULL);', '') self.assert_lint( 'g_object_set(foo, "prop", bar, NULL);', '') self.assert_lint( 'g_build_filename(foo, bar, NULL);', '') self.assert_lint( 'gst_bin_add_many(foo, bar, boo, NULL);', '') self.assert_lint( 'gst_bin_remove_many(foo, bar, boo, NULL);', '') self.assert_lint( 'gst_element_link_many(foo, bar, boo, NULL);', '') self.assert_lint( 'gst_element_unlink_many(foo, bar, boo, NULL);', '') self.assert_lint( 'gst_structure_get(foo, "value", G_TYPE_INT, &value, NULL);', '') self.assert_lint( 'gst_structure_set(foo, "value", G_TYPE_INT, value, NULL);', '') self.assert_lint( 'gst_structure_remove_fields(foo, "value", "bar", NULL);', '') self.assert_lint( 'gst_structure_new("foo", "value", G_TYPE_INT, value, NULL);', '') self.assert_lint( 'gst_structure_id_new(FOO, VALUE, G_TYPE_INT, value, NULL);', '') self.assert_lint( 'gst_structure_id_set(FOO, VALUE, G_TYPE_INT, value, NULL);', '') self.assert_lint( 'gst_structure_id_get(FOO, VALUE, G_TYPE_INT, &value, NULL);', '') self.assert_lint( 'gst_caps_new_simple(mime, "value", G_TYPE_INT, &value, NULL);', '') self.assert_lint( 'gst_caps_new_full(structure1, structure2, NULL);', '') self.assert_lint( 'gchar* result = g_strconcat("part1", "part2", "part3", NULL);', '') self.assert_lint( 'gchar* result = g_strconcat("part1", NULL);', '') self.assert_lint( 'gchar* result = g_strjoin(",", "part1", "part2", "part3", NULL);', '') self.assert_lint( 'gchar* result = g_strjoin(",", "part1", NULL);', '') self.assert_lint( 'gchar* result = gdk_pixbuf_save_to_callback(pixbuf, function, data, type, error, NULL);', '') self.assert_lint( 'gchar* result = gdk_pixbuf_save_to_buffer(pixbuf, function, data, type, error, NULL);', '') self.assert_lint( 'gchar* result = gdk_pixbuf_save_to_stream(pixbuf, function, data, type, error, NULL);', '') self.assert_lint( 'gtk_widget_style_get(style, "propertyName", &value, "otherName", &otherValue, NULL);', '') self.assert_lint( 'gtk_style_context_get_style(context, "propertyName", &value, "otherName", &otherValue, NULL);', '') self.assert_lint( 'gtk_style_context_get(context, static_cast<GtkStateFlags>(0), "property", &value, NULL);', '') self.assert_lint( 'gtk_widget_style_get_property(style, NULL, NULL);', 'Use 0 instead of NULL. [readability/null] [5]', 'foo.cpp') self.assert_lint( 'gtk_widget_style_get_valist(style, NULL, NULL);', 'Use 0 instead of NULL. [readability/null] [5]', 'foo.cpp') # 2. C++ and C bool values should be written as true and # false. Objective-C BOOL values should be written as YES and NO. # FIXME: Implement this. # 3. Tests for true/false and null/non-null should be done without # equality comparisons. self.assert_lint_one_of_many_errors_re( 'if (string != NULL)', r'Tests for true/false and null/non-null should be done without equality comparisons\.') self.assert_lint( 'if (p == nullptr)', 'Tests for true/false and null/non-null should be done without equality comparisons.' ' [readability/comparison_to_boolean] [5]') self.assert_lint( 'if (condition == true)', 'Tests for true/false and null/non-null should be done without equality comparisons.' ' [readability/comparison_to_boolean] [5]') self.assert_lint( 'if (myVariable != /* Why would anyone put a comment here? */ false)', 'Tests for true/false and null/non-null should be done without equality comparisons.' ' [readability/comparison_to_boolean] [5]') self.assert_lint_one_of_many_errors_re( 'if (NULL == thisMayBeNull)', r'Tests for true/false and null/non-null should be done without equality comparisons\.') self.assert_lint( 'if (nullptr /* funny place for a comment */ == p)', 'Tests for true/false and null/non-null should be done without equality comparisons.' ' [readability/comparison_to_boolean] [5]') self.assert_lint( 'if (true != anotherCondition)', 'Tests for true/false and null/non-null should be done without equality comparisons.' ' [readability/comparison_to_boolean] [5]') self.assert_lint( 'if (false == myBoolValue)', 'Tests for true/false and null/non-null should be done without equality comparisons.' ' [readability/comparison_to_boolean] [5]') self.assert_lint( 'if (fontType == trueType)', '') self.assert_lint( 'if (othertrue == fontType)', '') self.assert_lint( 'if (LIKELY(foo == 0))', '') self.assert_lint( 'if (UNLIKELY(foo == 0))', '') self.assert_lint( 'if ((a - b) == 0.5)', '') self.assert_lint( 'if (0.5 == (a - b))', '') self.assert_lint( 'if (LIKELY(foo == NULL))', 'Use 0 instead of NULL. [readability/null] [5]') self.assert_lint( 'if (UNLIKELY(foo == NULL))', 'Use 0 instead of NULL. [readability/null] [5]') def test_directive_indentation(self): self.assert_lint( " #if FOO", "preprocessor directives (e.g., #ifdef, #define, #import) should never be indented." " [whitespace/indent] [4]", "foo.cpp") def test_using_std(self): self.assert_lint( 'using std::min;', "Use 'using namespace std;' instead of 'using std::min;'." " [build/using_std] [4]", 'foo.cpp') def test_using_std_swap_ignored(self): self.assert_lint( 'using std::swap;', '', 'foo.cpp') def test_max_macro(self): self.assert_lint( 'int i = MAX(0, 1);', '', 'foo.c') self.assert_lint( 'int i = MAX(0, 1);', 'Use std::max() or std::max<type>() instead of the MAX() macro.' ' [runtime/max_min_macros] [4]', 'foo.cpp') self.assert_lint( 'inline int foo() { return MAX(0, 1); }', 'Use std::max() or std::max<type>() instead of the MAX() macro.' ' [runtime/max_min_macros] [4]', 'foo.h') def test_min_macro(self): self.assert_lint( 'int i = MIN(0, 1);', '', 'foo.c') self.assert_lint( 'int i = MIN(0, 1);', 'Use std::min() or std::min<type>() instead of the MIN() macro.' ' [runtime/max_min_macros] [4]', 'foo.cpp') self.assert_lint( 'inline int foo() { return MIN(0, 1); }', 'Use std::min() or std::min<type>() instead of the MIN() macro.' ' [runtime/max_min_macros] [4]', 'foo.h') def test_ctype_fucntion(self): self.assert_lint( 'int i = isascii(8);', 'Use equivelent function in <wtf/ASCIICType.h> instead of the ' 'isascii() function. [runtime/ctype_function] [4]', 'foo.cpp') def test_names(self): name_underscore_error_message = " is incorrectly named. Don't use underscores in your identifier names. [readability/naming/underscores] [4]" name_tooshort_error_message = " is incorrectly named. Don't use the single letter 'l' as an identifier name. [readability/naming] [4]" # Basic cases from WebKit style guide. self.assert_lint('struct Data;', '') self.assert_lint('size_t bufferSize;', '') self.assert_lint('class HTMLDocument;', '') self.assert_lint('String mimeType();', '') self.assert_lint('size_t buffer_size;', 'buffer_size' + name_underscore_error_message) self.assert_lint('short m_length;', '') self.assert_lint('short _length;', '_length' + name_underscore_error_message) self.assert_lint('short length_;', 'length_' + name_underscore_error_message) self.assert_lint('unsigned _length;', '_length' + name_underscore_error_message) self.assert_lint('unsigned long _length;', '_length' + name_underscore_error_message) self.assert_lint('unsigned long long _length;', '_length' + name_underscore_error_message) # Allow underscores in Objective C files. self.assert_lint('unsigned long long _length;', '', 'foo.m') self.assert_lint('unsigned long long _length;', '', 'foo.mm') self.assert_lint('#import "header_file.h"\n' 'unsigned long long _length;', '', 'foo.h') self.assert_lint('unsigned long long _length;\n' '@interface WebFullscreenWindow;', '', 'foo.h') self.assert_lint('unsigned long long _length;\n' '@implementation WebFullscreenWindow;', '', 'foo.h') self.assert_lint('unsigned long long _length;\n' '@class WebWindowFadeAnimation;', '', 'foo.h') # Variable name 'l' is easy to confuse with '1' self.assert_lint('int l;', 'l' + name_tooshort_error_message) self.assert_lint('size_t l;', 'l' + name_tooshort_error_message) self.assert_lint('long long l;', 'l' + name_tooshort_error_message) # Pointers, references, functions, templates, and adjectives. self.assert_lint('char* under_score;', 'under_score' + name_underscore_error_message) self.assert_lint('const int UNDER_SCORE;', 'UNDER_SCORE' + name_underscore_error_message) self.assert_lint('static inline const char const& const under_score;', 'under_score' + name_underscore_error_message) self.assert_lint('WebCore::LayoutObject* under_score;', 'under_score' + name_underscore_error_message) self.assert_lint('int func_name();', 'func_name' + name_underscore_error_message) self.assert_lint('RefPtr<LayoutObject*> under_score;', 'under_score' + name_underscore_error_message) self.assert_lint('WTF::Vector<WTF::RefPtr<const LayoutObject* const>> under_score;', 'under_score' + name_underscore_error_message) self.assert_lint('int under_score[];', 'under_score' + name_underscore_error_message) self.assert_lint('struct dirent* under_score;', 'under_score' + name_underscore_error_message) self.assert_lint('long under_score;', 'under_score' + name_underscore_error_message) self.assert_lint('long long under_score;', 'under_score' + name_underscore_error_message) self.assert_lint('long double under_score;', 'under_score' + name_underscore_error_message) self.assert_lint('long long int under_score;', 'under_score' + name_underscore_error_message) # Declarations in control statement. self.assert_lint('if (int under_score = 42) {', 'under_score' + name_underscore_error_message) self.assert_lint('else if (int under_score = 42) {', 'under_score' + name_underscore_error_message) self.assert_lint('for (int under_score = 42; cond; i++) {', 'under_score' + name_underscore_error_message) self.assert_lint('while (foo & under_score = bar) {', 'under_score' + name_underscore_error_message) self.assert_lint('for (foo * under_score = p; cond; i++) {', 'under_score' + name_underscore_error_message) self.assert_lint('for (foo * under_score; cond; i++) {', 'under_score' + name_underscore_error_message) self.assert_lint('while (foo & value_in_thirdparty_library) {', '') self.assert_lint('while (foo * value_in_thirdparty_library) {', '') self.assert_lint('if (mli && S_OK == mli->foo()) {', '') # More member variables and functions. self.assert_lint('int SomeClass::s_validName', '') self.assert_lint('int m_under_score;', 'm_under_score' + name_underscore_error_message) self.assert_lint('int SomeClass::s_under_score = 0;', 'SomeClass::s_under_score' + name_underscore_error_message) self.assert_lint('int SomeClass::under_score = 0;', 'SomeClass::under_score' + name_underscore_error_message) # Other statements. self.assert_lint('return INT_MAX;', '') self.assert_lint('return_t under_score;', 'under_score' + name_underscore_error_message) self.assert_lint('goto under_score;', 'under_score' + name_underscore_error_message) self.assert_lint('delete static_cast<Foo*>(p);', '') # Multiple variables in one line. self.assert_lint('void myFunction(int variable1, int another_variable);', 'another_variable' + name_underscore_error_message) self.assert_lint('int variable1, another_variable;', 'another_variable' + name_underscore_error_message) self.assert_lint('int first_variable, secondVariable;', 'first_variable' + name_underscore_error_message) self.assert_lint('void my_function(int variable_1, int variable_2);', ['my_function' + name_underscore_error_message, 'variable_1' + name_underscore_error_message, 'variable_2' + name_underscore_error_message]) self.assert_lint('for (int variable_1, variable_2;;) {', ['variable_1' + name_underscore_error_message, 'variable_2' + name_underscore_error_message]) # There is an exception for op code functions but only in the JavaScriptCore directory. self.assert_lint('void this_op_code(int var1, int var2)', '', 'Source/JavaScriptCore/foo.cpp') self.assert_lint('void op_code(int var1, int var2)', '', 'Source/JavaScriptCore/foo.cpp') self.assert_lint('void this_op_code(int var1, int var2)', 'this_op_code' + name_underscore_error_message) # GObject requires certain magical names in class declarations. self.assert_lint('void webkit_dom_object_init();', '') self.assert_lint('void webkit_dom_object_class_init();', '') # There is an exception for GTK+ API. self.assert_lint('void webkit_web_view_load(int var1, int var2)', '', 'Source/Webkit/gtk/webkit/foo.cpp') self.assert_lint('void webkit_web_view_load(int var1, int var2)', '', 'Source/Webkit2/UIProcess/gtk/foo.cpp') # Test that this doesn't also apply to files not in a 'gtk' directory. self.assert_lint('void webkit_web_view_load(int var1, int var2)', 'webkit_web_view_load is incorrectly named. Don\'t use underscores in your identifier names.' ' [readability/naming/underscores] [4]', 'Source/Webkit/webkit/foo.cpp') # Test that this doesn't also apply to names that don't start with 'webkit_'. self.assert_lint_one_of_many_errors_re('void otherkit_web_view_load(int var1, int var2)', 'otherkit_web_view_load is incorrectly named. Don\'t use underscores in your identifier names.' ' [readability/naming/underscores] [4]', 'Source/Webkit/webkit/foo.cpp') # There is an exception for some unit tests that begin with "tst_". self.assert_lint('void tst_QWebFrame::arrayObjectEnumerable(int var1, int var2)', '') # The Qt API uses names that begin with "qt_" or "_q_". self.assert_lint('void QTFrame::qt_drt_is_awesome(int var1, int var2)', '') self.assert_lint('void QTFrame::_q_drt_is_awesome(int var1, int var2)', '') self.assert_lint('void qt_drt_is_awesome(int var1, int var2);', '') self.assert_lint('void _q_drt_is_awesome(int var1, int var2);', '') # Cairo forward-declarations should not be a failure. self.assert_lint('typedef struct _cairo cairo_t;', '') self.assert_lint('typedef struct _cairo_surface cairo_surface_t;', '') self.assert_lint('typedef struct _cairo_scaled_font cairo_scaled_font_t;', '') # EFL forward-declarations should not be a failure. self.assert_lint('typedef struct _Ecore_Evas Ecore_Evas;', '') self.assert_lint('typedef struct _Ecore_Pipe Ecore_Pipe;', '') self.assert_lint('typedef struct _Eina_Rectangle Eina_Rectangle;', '') self.assert_lint('typedef struct _Evas_Object Evas_Object;', '') self.assert_lint('typedef struct _Ewk_History_Item Ewk_History_Item;', '') # NPAPI functions that start with NPN_, NPP_ or NP_ are allowed. self.assert_lint('void NPN_Status(NPP, const char*)', '') self.assert_lint('NPError NPP_SetWindow(NPP instance, NPWindow *window)', '') self.assert_lint('NPObject* NP_Allocate(NPP, NPClass*)', '') # const_iterator is allowed as well. self.assert_lint('typedef VectorType::const_iterator const_iterator;', '') # vm_throw is allowed as well. self.assert_lint('int vm_throw;', '') # Bitfields. self.assert_lint('unsigned _fillRule : 1;', '_fillRule' + name_underscore_error_message) # new operators in initialization. self.assert_lint('OwnPtr<uint32_t> variable(new uint32_t);', '') self.assert_lint('OwnPtr<uint32_t> variable(new (expr) uint32_t);', '') self.assert_lint('OwnPtr<uint32_t> under_score(new uint32_t);', 'under_score' + name_underscore_error_message) # Conversion operator declaration. self.assert_lint('operator int64_t();', '') def test_parameter_names(self): # Leave meaningless variable names out of function declarations. meaningless_variable_name_error_message = 'The parameter name "%s" adds no information, so it should be removed. [readability/parameter_name] [5]' parameter_error_rules = ('-', '+readability/parameter_name') # No variable name, so no error. self.assertEqual('', self.perform_lint('void func(int);', 'test.cpp', parameter_error_rules)) # Verify that copying the name of the set function causes the error (with some odd casing). self.assertEqual(meaningless_variable_name_error_message % 'itemCount', self.perform_lint('void setItemCount(size_t itemCount);', 'test.cpp', parameter_error_rules)) self.assertEqual(meaningless_variable_name_error_message % 'abcCount', self.perform_lint('void setABCCount(size_t abcCount);', 'test.cpp', parameter_error_rules)) # Verify that copying a type name will trigger the warning (even if the type is a template parameter). self.assertEqual(meaningless_variable_name_error_message % 'context', self.perform_lint('void funct(PassRefPtr<ScriptExecutionContext> context);', 'test.cpp', parameter_error_rules)) # Verify that acronyms as variable names trigger the error (for both set functions and type names). self.assertEqual(meaningless_variable_name_error_message % 'ec', self.perform_lint('void setExceptionCode(int ec);', 'test.cpp', parameter_error_rules)) self.assertEqual(meaningless_variable_name_error_message % 'ec', self.perform_lint('void funct(ExceptionCode ec);', 'test.cpp', parameter_error_rules)) # 'object' alone, appended, or as part of an acronym is meaningless. self.assertEqual(meaningless_variable_name_error_message % 'object', self.perform_lint('void funct(RenderView object);', 'test.cpp', parameter_error_rules)) self.assertEqual(meaningless_variable_name_error_message % 'viewObject', self.perform_lint('void funct(RenderView viewObject);', 'test.cpp', parameter_error_rules)) self.assertEqual(meaningless_variable_name_error_message % 'rvo', self.perform_lint('void funct(RenderView rvo);', 'test.cpp', parameter_error_rules)) # Check that r, g, b, and a are allowed. self.assertEqual('', self.perform_lint('void setRGBAValues(int r, int g, int b, int a);', 'test.cpp', parameter_error_rules)) # Verify that a simple substring match isn't done which would cause false positives. self.assertEqual('', self.perform_lint('void setNateLateCount(size_t elate);', 'test.cpp', parameter_error_rules)) self.assertEqual('', self.perform_lint('void funct(NateLate elate);', 'test.cpp', parameter_error_rules)) # Don't have generate warnings for functions (only declarations). self.assertEqual('', self.perform_lint('void funct(PassRefPtr<ScriptExecutionContext> context)\n' '{\n' '}\n', 'test.cpp', parameter_error_rules)) def test_comments(self): # A comment at the beginning of a line is ok. self.assert_lint('// comment', '') self.assert_lint(' // comment', '') self.assert_lint('} // namespace WebCore', 'One space before end of line comments' ' [whitespace/comments] [5]') def test_redundant_virtual(self): self.assert_lint('virtual void fooMethod() override;', '"virtual" is redundant since function is already declared as "override" [readability/inheritance] [4]') self.assert_lint('virtual void fooMethod(\n) override {}', '"virtual" is redundant since function is already declared as "override" [readability/inheritance] [4]') self.assert_lint('virtual void fooMethod() final;', '"virtual" is redundant since function is already declared as "final" [readability/inheritance] [4]') self.assert_lint('virtual void fooMethod(\n) final {}', '"virtual" is redundant since function is already declared as "final" [readability/inheritance] [4]') def test_redundant_override(self): self.assert_lint('void fooMethod() override final;', '"override" is redundant since function is already declared as "final" [readability/inheritance] [4]') self.assert_lint('void fooMethod(\n) override final {}', '"override" is redundant since function is already declared as "final" [readability/inheritance] [4]') self.assert_lint('void fooMethod() final override;', '"override" is redundant since function is already declared as "final" [readability/inheritance] [4]') self.assert_lint('void fooMethod(\n) final override {}', '"override" is redundant since function is already declared as "final" [readability/inheritance] [4]') def test_webkit_export_check(self): webkit_export_error_rules = ('-', '+readability/webkit_export') self.assertEqual('', self.perform_lint('WEBKIT_EXPORT int foo();\n', 'WebKit/chromium/public/test.h', webkit_export_error_rules)) self.assertEqual('', self.perform_lint('WEBKIT_EXPORT int foo();\n', 'WebKit/chromium/tests/test.h', webkit_export_error_rules)) self.assertEqual('WEBKIT_EXPORT should only be used in header files. [readability/webkit_export] [5]', self.perform_lint('WEBKIT_EXPORT int foo();\n', 'WebKit/chromium/public/test.cpp', webkit_export_error_rules)) self.assertEqual('WEBKIT_EXPORT should only appear in the chromium public (or tests) directory. [readability/webkit_export] [5]', self.perform_lint('WEBKIT_EXPORT int foo();\n', 'WebKit/chromium/src/test.h', webkit_export_error_rules)) self.assertEqual('WEBKIT_EXPORT should not be used on a function with a body. [readability/webkit_export] [5]', self.perform_lint('WEBKIT_EXPORT int foo() { }\n', 'WebKit/chromium/public/test.h', webkit_export_error_rules)) self.assertEqual('WEBKIT_EXPORT should not be used on a function with a body. [readability/webkit_export] [5]', self.perform_lint('WEBKIT_EXPORT inline int foo()\n' '{\n' '}\n', 'WebKit/chromium/public/test.h', webkit_export_error_rules)) self.assertEqual('WEBKIT_EXPORT should not be used with a pure virtual function. [readability/webkit_export] [5]', self.perform_lint('{}\n' 'WEBKIT_EXPORT\n' 'virtual\n' 'int\n' 'foo() = 0;\n', 'WebKit/chromium/public/test.h', webkit_export_error_rules)) self.assertEqual('', self.perform_lint('{}\n' 'WEBKIT_EXPORT\n' 'virtual\n' 'int\n' 'foo() = 0;\n', 'test.h', webkit_export_error_rules)) def test_other(self): # FIXME: Implement this. pass class CppCheckerTest(unittest.TestCase): """Tests CppChecker class.""" def mock_handle_style_error(self): pass def _checker(self): return CppChecker("foo", "h", self.mock_handle_style_error, 3) def test_init(self): """Test __init__ constructor.""" checker = self._checker() self.assertEqual(checker.file_extension, "h") self.assertEqual(checker.file_path, "foo") self.assertEqual(checker.handle_style_error, self.mock_handle_style_error) self.assertEqual(checker.min_confidence, 3) def test_eq(self): """Test __eq__ equality function.""" checker1 = self._checker() checker2 = self._checker() # == calls __eq__. self.assertTrue(checker1 == checker2) def mock_handle_style_error2(self): pass # Verify that a difference in any argument cause equality to fail. checker = CppChecker("foo", "h", self.mock_handle_style_error, 3) self.assertFalse(checker == CppChecker("bar", "h", self.mock_handle_style_error, 3)) self.assertFalse(checker == CppChecker("foo", "c", self.mock_handle_style_error, 3)) self.assertFalse(checker == CppChecker("foo", "h", mock_handle_style_error2, 3)) self.assertFalse(checker == CppChecker("foo", "h", self.mock_handle_style_error, 4)) def test_ne(self): """Test __ne__ inequality function.""" checker1 = self._checker() checker2 = self._checker() # != calls __ne__. # By default, __ne__ always returns true on different objects. # Thus, just check the distinguishing case to verify that the # code defines __ne__. self.assertFalse(checker1 != checker2)
bsd-3-clause
erkanay/django
django/contrib/auth/admin.py
37
8018
from django.db import transaction from django.conf import settings from django.conf.urls import url from django.contrib import admin from django.contrib.admin.options import IS_POPUP_VAR from django.contrib.auth import update_session_auth_hash from django.contrib.auth.forms import (UserCreationForm, UserChangeForm, AdminPasswordChangeForm) from django.contrib.auth.models import User, Group from django.contrib import messages from django.core.exceptions import PermissionDenied from django.http import HttpResponseRedirect, Http404 from django.shortcuts import get_object_or_404 from django.template.response import TemplateResponse from django.utils.html import escape from django.utils.decorators import method_decorator from django.utils.translation import ugettext, ugettext_lazy as _ from django.views.decorators.csrf import csrf_protect from django.views.decorators.debug import sensitive_post_parameters csrf_protect_m = method_decorator(csrf_protect) sensitive_post_parameters_m = method_decorator(sensitive_post_parameters()) class GroupAdmin(admin.ModelAdmin): search_fields = ('name',) ordering = ('name',) filter_horizontal = ('permissions',) def formfield_for_manytomany(self, db_field, request=None, **kwargs): if db_field.name == 'permissions': qs = kwargs.get('queryset', db_field.rel.to.objects) # Avoid a major performance hit resolving permission names which # triggers a content_type load: kwargs['queryset'] = qs.select_related('content_type') return super(GroupAdmin, self).formfield_for_manytomany( db_field, request=request, **kwargs) class UserAdmin(admin.ModelAdmin): add_form_template = 'admin/auth/user/add_form.html' change_user_password_template = None fieldsets = ( (None, {'fields': ('username', 'password')}), (_('Personal info'), {'fields': ('first_name', 'last_name', 'email')}), (_('Permissions'), {'fields': ('is_active', 'is_staff', 'is_superuser', 'groups', 'user_permissions')}), (_('Important dates'), {'fields': ('last_login', 'date_joined')}), ) add_fieldsets = ( (None, { 'classes': ('wide',), 'fields': ('username', 'password1', 'password2'), }), ) form = UserChangeForm add_form = UserCreationForm change_password_form = AdminPasswordChangeForm list_display = ('username', 'email', 'first_name', 'last_name', 'is_staff') list_filter = ('is_staff', 'is_superuser', 'is_active', 'groups') search_fields = ('username', 'first_name', 'last_name', 'email') ordering = ('username',) filter_horizontal = ('groups', 'user_permissions',) def get_fieldsets(self, request, obj=None): if not obj: return self.add_fieldsets return super(UserAdmin, self).get_fieldsets(request, obj) def get_form(self, request, obj=None, **kwargs): """ Use special form during user creation """ defaults = {} if obj is None: defaults['form'] = self.add_form defaults.update(kwargs) return super(UserAdmin, self).get_form(request, obj, **defaults) def get_urls(self): return [ url(r'^(\d+)/password/$', self.admin_site.admin_view(self.user_change_password)), ] + super(UserAdmin, self).get_urls() def lookup_allowed(self, lookup, value): # See #20078: we don't want to allow any lookups involving passwords. if lookup.startswith('password'): return False return super(UserAdmin, self).lookup_allowed(lookup, value) @sensitive_post_parameters_m @csrf_protect_m @transaction.atomic def add_view(self, request, form_url='', extra_context=None): # It's an error for a user to have add permission but NOT change # permission for users. If we allowed such users to add users, they # could create superusers, which would mean they would essentially have # the permission to change users. To avoid the problem entirely, we # disallow users from adding users if they don't have change # permission. if not self.has_change_permission(request): if self.has_add_permission(request) and settings.DEBUG: # Raise Http404 in debug mode so that the user gets a helpful # error message. raise Http404( 'Your user does not have the "Change user" permission. In ' 'order to add users, Django requires that your user ' 'account have both the "Add user" and "Change user" ' 'permissions set.') raise PermissionDenied if extra_context is None: extra_context = {} username_field = self.model._meta.get_field(self.model.USERNAME_FIELD) defaults = { 'auto_populated_fields': (), 'username_help_text': username_field.help_text, } extra_context.update(defaults) return super(UserAdmin, self).add_view(request, form_url, extra_context) @sensitive_post_parameters_m def user_change_password(self, request, id, form_url=''): if not self.has_change_permission(request): raise PermissionDenied user = get_object_or_404(self.get_queryset(request), pk=id) if request.method == 'POST': form = self.change_password_form(user, request.POST) if form.is_valid(): form.save() change_message = self.construct_change_message(request, form, None) self.log_change(request, user, change_message) msg = ugettext('Password changed successfully.') messages.success(request, msg) update_session_auth_hash(request, form.user) return HttpResponseRedirect('..') else: form = self.change_password_form(user) fieldsets = [(None, {'fields': list(form.base_fields)})] adminForm = admin.helpers.AdminForm(form, fieldsets, {}) context = { 'title': _('Change password: %s') % escape(user.get_username()), 'adminForm': adminForm, 'form_url': form_url, 'form': form, 'is_popup': (IS_POPUP_VAR in request.POST or IS_POPUP_VAR in request.GET), 'add': True, 'change': False, 'has_delete_permission': False, 'has_change_permission': True, 'has_absolute_url': False, 'opts': self.model._meta, 'original': user, 'save_as': False, 'show_save': True, } context.update(admin.site.each_context()) return TemplateResponse(request, self.change_user_password_template or 'admin/auth/user/change_password.html', context, current_app=self.admin_site.name) def response_add(self, request, obj, post_url_continue=None): """ Determines the HttpResponse for the add_view stage. It mostly defers to its superclass implementation but is customized because the User model has a slightly different workflow. """ # We should allow further modification of the user just added i.e. the # 'Save' button should behave like the 'Save and continue editing' # button except in two scenarios: # * The user has pressed the 'Save and add another' button # * We are adding a user in a popup if '_addanother' not in request.POST and IS_POPUP_VAR not in request.POST: request.POST['_continue'] = 1 return super(UserAdmin, self).response_add(request, obj, post_url_continue) admin.site.register(Group, GroupAdmin) admin.site.register(User, UserAdmin)
bsd-3-clause
aseldawy/spatialhadoop
src/contrib/hod/testing/testRingmasterRPCs.py
182
5759
#Licensed to the Apache Software Foundation (ASF) under one #or more contributor license agreements. See the NOTICE file #distributed with this work for additional information #regarding copyright ownership. The ASF licenses this file #to you under the Apache License, Version 2.0 (the #"License"); you may not use this file except in compliance #with the License. You may obtain a copy of the License at # http://www.apache.org/licenses/LICENSE-2.0 #Unless required by applicable law or agreed to in writing, software #distributed under the License is distributed on an "AS IS" BASIS, #WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #See the License for the specific language governing permissions and #limitations under the License. import unittest, os, sys, re, threading, time import logging myDirectory = os.path.realpath(sys.argv[0]) rootDirectory = re.sub("/testing/.*", "", myDirectory) sys.path.append(rootDirectory) from testing.lib import BaseTestSuite excludes = ['test_MINITEST1', 'test_MINITEST2'] from hodlib.GridServices import * from hodlib.Common.desc import ServiceDesc from hodlib.RingMaster.ringMaster import _LogMasterSources configuration = { 'hod': {}, 'resource_manager': { 'id': 'torque', 'batch-home': '/home/y/' }, 'ringmaster': { 'max-connect' : 2, 'max-master-failures' : 5 }, 'hodring': { }, 'gridservice-mapred': { 'id': 'mapred' } , 'gridservice-hdfs': { 'id': 'hdfs' }, 'servicedesc' : {} , 'nodepooldesc': {} , } # All test-case classes should have the naming convention test_.* class test_MINITEST1(unittest.TestCase): def setUp(self): pass # All testMethods have to have their names start with 'test' def testSuccess(self): pass def testFailure(self): pass def tearDown(self): pass class test_Multiple_Workers(unittest.TestCase): def setUp(self): self.config = configuration self.config['ringmaster']['workers_per_ring'] = 2 hdfsDesc = self.config['servicedesc']['hdfs'] = ServiceDesc(self.config['gridservice-hdfs']) mrDesc = self.config['servicedesc']['mapred'] = ServiceDesc(self.config['gridservice-mapred']) self.hdfs = Hdfs(hdfsDesc, [], 0, 19, workers_per_ring = \ self.config['ringmaster']['workers_per_ring']) self.mr = MapReduce(mrDesc, [],1, 19, workers_per_ring = \ self.config['ringmaster']['workers_per_ring']) self.log = logging.getLogger() pass # All testMethods have to have their names start with 'test' def testWorkersCount(self): self.serviceDict = {} self.serviceDict[self.hdfs.getName()] = self.hdfs self.serviceDict[self.mr.getName()] = self.mr self.rpcSet = _LogMasterSources(self.serviceDict, self.config, None, self.log, None) cmdList = self.rpcSet.getCommand('host1') self.assertEquals(len(cmdList), 2) self.assertEquals(cmdList[0].dict['argv'][0], 'namenode') self.assertEquals(cmdList[1].dict['argv'][0], 'namenode') addParams = ['fs.default.name=host1:51234', 'dfs.http.address=host1:5125' ] self.rpcSet.addMasterParams('host1', addParams) # print "NN is launched" cmdList = self.rpcSet.getCommand('host2') self.assertEquals(len(cmdList), 1) self.assertEquals(cmdList[0].dict['argv'][0], 'jobtracker') addParams = ['mapred.job.tracker=host2:51236', 'mapred.job.tracker.http.address=host2:51237'] self.rpcSet.addMasterParams('host2', addParams) # print "JT is launched" cmdList = self.rpcSet.getCommand('host3') # Verify the workers count per ring : TTs + DNs self.assertEquals(len(cmdList), self.config['ringmaster']['workers_per_ring'] * 2) pass def testFailure(self): pass def tearDown(self): pass class test_GetCommand(unittest.TestCase): def setUp(self): self.config = configuration hdfsDesc = self.config['servicedesc']['hdfs'] = ServiceDesc(self.config['gridservice-hdfs']) mrDesc = self.config['servicedesc']['mapred'] = ServiceDesc(self.config['gridservice-mapred']) # API : serviceObj = service(desc, workDirs, reqNodes, version) self.hdfs = Hdfs(hdfsDesc, [], 0, 17) self.hdfsExternal = HdfsExternal(hdfsDesc, [], 17) self.mr = MapReduce(mrDesc, [],1, 17) self.mrExternal = MapReduceExternal(mrDesc, [], 17) self.log = logging.getLogger() pass # All testMethods have to have their names start with 'test' def testBothInternal(self): self.serviceDict = {} self.serviceDict[self.hdfs.getName()] = self.hdfs self.serviceDict[self.mr.getName()] = self.mr self.rpcSet = _LogMasterSources(self.serviceDict, self.config, None, self.log, None) cmdList = self.rpcSet.getCommand('localhost') self.assertEquals(cmdList.__len__(), 2) self.assertEquals(cmdList[0].dict['argv'][0], 'namenode') self.assertEquals(cmdList[1].dict['argv'][0], 'namenode') pass def tearDown(self): pass class RingmasterRPCsTestSuite(BaseTestSuite): def __init__(self): # suite setup BaseTestSuite.__init__(self, __name__, excludes) pass def cleanUp(self): # suite tearDown pass def RunRingmasterRPCsTests(): # modulename_suite suite = RingmasterRPCsTestSuite() testResult = suite.runTests() suite.cleanUp() return testResult if __name__ == "__main__": RunRingmasterRPCsTests()
apache-2.0
Varentsov/servo
tests/dromaeo/run_dromaeo.py
111
2507
#!/usr/bin/env python # This Source Code Form is subject to the terms of the Mozilla Public # License, v. 2.0. If a copy of the MPL was not distributed with this # file, You can obtain one at http://mozilla.org/MPL/2.0/. import os import subprocess import sys import BaseHTTPServer import SimpleHTTPServer import urlparse import json # Port to run the HTTP server on for Dromaeo. TEST_SERVER_PORT = 8192 # Run servo and print / parse the results for a specific Dromaeo module. def run_servo(servo_exe, tests): url = "http://localhost:{0}/dromaeo/web/index.html?{1}&automated&post_json".format(TEST_SERVER_PORT, tests) args = [servo_exe, url, "-z", "-f"] return subprocess.Popen(args) # Print usage if command line args are incorrect def print_usage(): print("USAGE: {0} tests servo_binary dromaeo_base_dir".format(sys.argv[0])) # Handle the POST at the end class RequestHandler(SimpleHTTPServer.SimpleHTTPRequestHandler): def do_POST(self): self.send_response(200) self.end_headers() self.wfile.write("<HTML>POST OK.<BR><BR>") length = int(self.headers.getheader('content-length')) parameters = urlparse.parse_qs(self.rfile.read(length)) self.server.got_post = True self.server.post_data = parameters['data'] def log_message(self, format, *args): return if __name__ == '__main__': if len(sys.argv) == 4: tests = sys.argv[1] servo_exe = sys.argv[2] base_dir = sys.argv[3] os.chdir(base_dir) # Ensure servo binary can be found if not os.path.isfile(servo_exe): print("Unable to find {0}. This script expects an existing build of Servo.".format(servo_exe)) sys.exit(1) # Start the test server server = BaseHTTPServer.HTTPServer(('', TEST_SERVER_PORT), RequestHandler) print("Testing Dromaeo on Servo!") proc = run_servo(servo_exe, tests) server.got_post = False while not server.got_post: server.handle_request() data = json.loads(server.post_data[0]) n = 0 l = 0 for test in data: n = max(n, len(data[test])) l = max(l, len(test)) print("\n Test{0} | Time".format(" " * (l - len("Test")))) print("-{0}-|-{1}-".format("-" * l, "-" * n)) for test in data: print(" {0}{1} | {2}".format(test, " " * (l - len(test)), data[test])) proc.kill() else: print_usage()
mpl-2.0
AnotherIvan/calibre
src/calibre/gui2/store/stores/mills_boon_uk_plugin.py
15
3027
# -*- coding: utf-8 -*- from __future__ import (unicode_literals, division, absolute_import, print_function) store_version = 2 # Needed for dynamic plugin loading __license__ = 'GPL 3' __copyright__ = '2011, John Schember <john@nachtimwald.com>' __docformat__ = 'restructuredtext en' import urllib2 from contextlib import closing from lxml import html from PyQt5.Qt import QUrl from calibre import browser, url_slash_cleaner from calibre.gui2 import open_url from calibre.gui2.store import StorePlugin from calibre.gui2.store.basic_config import BasicStoreConfig from calibre.gui2.store.search_result import SearchResult from calibre.gui2.store.web_store_dialog import WebStoreDialog class MillsBoonUKStore(BasicStoreConfig, StorePlugin): def open(self, parent=None, detail_item=None, external=False): url = 'http://www.awin1.com/awclick.php?mid=1150&id=120917' detail_url = 'http://www.awin1.com/cread.php?awinmid=1150&awinaffid=120917&clickref=&p=' if external or self.config.get('open_external', False): if detail_item: url = detail_url + detail_item open_url(QUrl(url_slash_cleaner(url))) else: detail_url = None if detail_item: detail_url = url + detail_item d = WebStoreDialog(self.gui, url, parent, detail_url) d.setWindowTitle(self.name) d.set_tags(self.config.get('tags', '')) d.exec_() def search(self, query, max_results=10, timeout=60): base_url = 'http://www.millsandboon.co.uk' url = base_url + '/search?format=ebook&q=' + urllib2.quote(query) #print(url) br = browser() counter = max_results with closing(br.open(url, timeout=timeout)) as f: doc = html.fromstring(f.read()) for data in doc.xpath('//article[contains(@class, "group")]'): if counter <= 0: break id_ = ''.join(data.xpath('.//div[@class="img-wrapper"]/a/@href')).strip() id_ = base_url + id_ if not id_: continue cover_url = ''.join(data.xpath('.//div[@class="img-wrapper"]/a/img/@src')) title = ''.join(data.xpath('.//div[@class="img-wrapper"]/a/img/@alt')).strip() author = ''.join(data.xpath('.//a[@class="author"]/text()')) price = ''.join(data.xpath('.//li[@class="productAttribute" and child::span[text()="eBook"]]/input/@value')) format_ = ''.join(data.xpath('.//p[@class="doc-meta-format"]/span[last()]/text()')) drm = SearchResult.DRM_LOCKED counter -= 1 s = SearchResult() s.cover_url = cover_url s.title = title.strip() s.author = author.strip() s.price = price s.detail_item = id_ s.drm = drm s.formats = format_ yield s
gpl-3.0
MebiusHKU/flask-web
flask/lib/python2.7/site-packages/pip/utils/__init__.py
186
26814
from __future__ import absolute_import import contextlib import errno import locale import logging import re import os import posixpath import shutil import stat import subprocess import sys import tarfile import zipfile from pip.exceptions import InstallationError from pip.compat import console_to_str, stdlib_pkgs from pip.locations import ( site_packages, user_site, running_under_virtualenv, virtualenv_no_global, write_delete_marker_file, ) from pip._vendor import pkg_resources from pip._vendor.six.moves import input from pip._vendor.six import PY2 from pip._vendor.retrying import retry if PY2: from io import BytesIO as StringIO else: from io import StringIO __all__ = ['rmtree', 'display_path', 'backup_dir', 'ask', 'Inf', 'normalize_name', 'splitext', 'format_size', 'is_installable_dir', 'is_svn_page', 'file_contents', 'split_leading_dir', 'has_leading_dir', 'make_path_relative', 'normalize_path', 'renames', 'get_terminal_size', 'get_prog', 'unzip_file', 'untar_file', 'unpack_file', 'call_subprocess', 'captured_stdout', 'remove_tracebacks', 'ensure_dir', 'ARCHIVE_EXTENSIONS', 'SUPPORTED_EXTENSIONS', 'get_installed_version'] logger = logging.getLogger(__name__) BZ2_EXTENSIONS = ('.tar.bz2', '.tbz') ZIP_EXTENSIONS = ('.zip', '.whl') TAR_EXTENSIONS = ('.tar.gz', '.tgz', '.tar') ARCHIVE_EXTENSIONS = ZIP_EXTENSIONS + BZ2_EXTENSIONS + TAR_EXTENSIONS try: import bz2 # noqa SUPPORTED_EXTENSIONS = ZIP_EXTENSIONS + BZ2_EXTENSIONS + TAR_EXTENSIONS except ImportError: logger.debug('bz2 module is not available') SUPPORTED_EXTENSIONS = ZIP_EXTENSIONS + TAR_EXTENSIONS def import_or_raise(pkg_or_module_string, ExceptionType, *args, **kwargs): try: return __import__(pkg_or_module_string) except ImportError: raise ExceptionType(*args, **kwargs) def ensure_dir(path): """os.path.makedirs without EEXIST.""" try: os.makedirs(path) except OSError as e: if e.errno != errno.EEXIST: raise def get_prog(): try: if os.path.basename(sys.argv[0]) in ('__main__.py', '-c'): return "%s -m pip" % sys.executable except (AttributeError, TypeError, IndexError): pass return 'pip' # Retry every half second for up to 3 seconds @retry(stop_max_delay=3000, wait_fixed=500) def rmtree(dir, ignore_errors=False): shutil.rmtree(dir, ignore_errors=ignore_errors, onerror=rmtree_errorhandler) def rmtree_errorhandler(func, path, exc_info): """On Windows, the files in .svn are read-only, so when rmtree() tries to remove them, an exception is thrown. We catch that here, remove the read-only attribute, and hopefully continue without problems.""" # if file type currently read only if os.stat(path).st_mode & stat.S_IREAD: # convert to read/write os.chmod(path, stat.S_IWRITE) # use the original function to repeat the operation func(path) return else: raise def display_path(path): """Gives the display value for a given path, making it relative to cwd if possible.""" path = os.path.normcase(os.path.abspath(path)) if sys.version_info[0] == 2: path = path.decode(sys.getfilesystemencoding(), 'replace') path = path.encode(sys.getdefaultencoding(), 'replace') if path.startswith(os.getcwd() + os.path.sep): path = '.' + path[len(os.getcwd()):] return path def backup_dir(dir, ext='.bak'): """Figure out the name of a directory to back up the given dir to (adding .bak, .bak2, etc)""" n = 1 extension = ext while os.path.exists(dir + extension): n += 1 extension = ext + str(n) return dir + extension def ask_path_exists(message, options): for action in os.environ.get('PIP_EXISTS_ACTION', '').split(): if action in options: return action return ask(message, options) def ask(message, options): """Ask the message interactively, with the given possible responses""" while 1: if os.environ.get('PIP_NO_INPUT'): raise Exception( 'No input was expected ($PIP_NO_INPUT set); question: %s' % message ) response = input(message) response = response.strip().lower() if response not in options: print( 'Your response (%r) was not one of the expected responses: ' '%s' % (response, ', '.join(options)) ) else: return response class _Inf(object): """I am bigger than everything!""" def __eq__(self, other): if self is other: return True else: return False def __ne__(self, other): return not self.__eq__(other) def __lt__(self, other): return False def __le__(self, other): return False def __gt__(self, other): return True def __ge__(self, other): return True def __repr__(self): return 'Inf' Inf = _Inf() # this object is not currently used as a sortable in our code del _Inf _normalize_re = re.compile(r'[^a-z]', re.I) def normalize_name(name): return _normalize_re.sub('-', name.lower()) def format_size(bytes): if bytes > 1000 * 1000: return '%.1fMB' % (bytes / 1000.0 / 1000) elif bytes > 10 * 1000: return '%ikB' % (bytes / 1000) elif bytes > 1000: return '%.1fkB' % (bytes / 1000.0) else: return '%ibytes' % bytes def is_installable_dir(path): """Return True if `path` is a directory containing a setup.py file.""" if not os.path.isdir(path): return False setup_py = os.path.join(path, 'setup.py') if os.path.isfile(setup_py): return True return False def is_svn_page(html): """ Returns true if the page appears to be the index page of an svn repository """ return (re.search(r'<title>[^<]*Revision \d+:', html) and re.search(r'Powered by (?:<a[^>]*?>)?Subversion', html, re.I)) def file_contents(filename): with open(filename, 'rb') as fp: return fp.read().decode('utf-8') def split_leading_dir(path): path = str(path) path = path.lstrip('/').lstrip('\\') if '/' in path and (('\\' in path and path.find('/') < path.find('\\')) or '\\' not in path): return path.split('/', 1) elif '\\' in path: return path.split('\\', 1) else: return path, '' def has_leading_dir(paths): """Returns true if all the paths have the same leading path name (i.e., everything is in one subdirectory in an archive)""" common_prefix = None for path in paths: prefix, rest = split_leading_dir(path) if not prefix: return False elif common_prefix is None: common_prefix = prefix elif prefix != common_prefix: return False return True def make_path_relative(path, rel_to): """ Make a filename relative, where the filename path, and it is relative to rel_to >>> make_path_relative('/usr/share/something/a-file.pth', ... '/usr/share/another-place/src/Directory') '../../../something/a-file.pth' >>> make_path_relative('/usr/share/something/a-file.pth', ... '/home/user/src/Directory') '../../../usr/share/something/a-file.pth' >>> make_path_relative('/usr/share/a-file.pth', '/usr/share/') 'a-file.pth' """ path_filename = os.path.basename(path) path = os.path.dirname(path) path = os.path.normpath(os.path.abspath(path)) rel_to = os.path.normpath(os.path.abspath(rel_to)) path_parts = path.strip(os.path.sep).split(os.path.sep) rel_to_parts = rel_to.strip(os.path.sep).split(os.path.sep) while path_parts and rel_to_parts and path_parts[0] == rel_to_parts[0]: path_parts.pop(0) rel_to_parts.pop(0) full_parts = ['..'] * len(rel_to_parts) + path_parts + [path_filename] if full_parts == ['']: return '.' + os.path.sep return os.path.sep.join(full_parts) def normalize_path(path, resolve_symlinks=True): """ Convert a path to its canonical, case-normalized, absolute version. """ path = os.path.expanduser(path) if resolve_symlinks: path = os.path.realpath(path) else: path = os.path.abspath(path) return os.path.normcase(path) def splitext(path): """Like os.path.splitext, but take off .tar too""" base, ext = posixpath.splitext(path) if base.lower().endswith('.tar'): ext = base[-4:] + ext base = base[:-4] return base, ext def renames(old, new): """Like os.renames(), but handles renaming across devices.""" # Implementation borrowed from os.renames(). head, tail = os.path.split(new) if head and tail and not os.path.exists(head): os.makedirs(head) shutil.move(old, new) head, tail = os.path.split(old) if head and tail: try: os.removedirs(head) except OSError: pass def is_local(path): """ Return True if path is within sys.prefix, if we're running in a virtualenv. If we're not in a virtualenv, all paths are considered "local." """ if not running_under_virtualenv(): return True return normalize_path(path).startswith(normalize_path(sys.prefix)) def dist_is_local(dist): """ Return True if given Distribution object is installed locally (i.e. within current virtualenv). Always True if we're not in a virtualenv. """ return is_local(dist_location(dist)) def dist_in_usersite(dist): """ Return True if given Distribution is installed in user site. """ norm_path = normalize_path(dist_location(dist)) return norm_path.startswith(normalize_path(user_site)) def dist_in_site_packages(dist): """ Return True if given Distribution is installed in distutils.sysconfig.get_python_lib(). """ return normalize_path( dist_location(dist) ).startswith(normalize_path(site_packages)) def dist_is_editable(dist): """Is distribution an editable install?""" # TODO: factor out determining editableness out of FrozenRequirement from pip import FrozenRequirement req = FrozenRequirement.from_dist(dist, []) return req.editable def get_installed_distributions(local_only=True, skip=stdlib_pkgs, include_editables=True, editables_only=False, user_only=False): """ Return a list of installed Distribution objects. If ``local_only`` is True (default), only return installations local to the current virtualenv, if in a virtualenv. ``skip`` argument is an iterable of lower-case project names to ignore; defaults to stdlib_pkgs If ``editables`` is False, don't report editables. If ``editables_only`` is True , only report editables. If ``user_only`` is True , only report installations in the user site directory. """ if local_only: local_test = dist_is_local else: def local_test(d): return True if include_editables: def editable_test(d): return True else: def editable_test(d): return not dist_is_editable(d) if editables_only: def editables_only_test(d): return dist_is_editable(d) else: def editables_only_test(d): return True if user_only: user_test = dist_in_usersite else: def user_test(d): return True return [d for d in pkg_resources.working_set if local_test(d) and d.key not in skip and editable_test(d) and editables_only_test(d) and user_test(d) ] def egg_link_path(dist): """ Return the path for the .egg-link file if it exists, otherwise, None. There's 3 scenarios: 1) not in a virtualenv try to find in site.USER_SITE, then site_packages 2) in a no-global virtualenv try to find in site_packages 3) in a yes-global virtualenv try to find in site_packages, then site.USER_SITE (don't look in global location) For #1 and #3, there could be odd cases, where there's an egg-link in 2 locations. This method will just return the first one found. """ sites = [] if running_under_virtualenv(): if virtualenv_no_global(): sites.append(site_packages) else: sites.append(site_packages) if user_site: sites.append(user_site) else: if user_site: sites.append(user_site) sites.append(site_packages) for site in sites: egglink = os.path.join(site, dist.project_name) + '.egg-link' if os.path.isfile(egglink): return egglink def dist_location(dist): """ Get the site-packages location of this distribution. Generally this is dist.location, except in the case of develop-installed packages, where dist.location is the source code location, and we want to know where the egg-link file is. """ egg_link = egg_link_path(dist) if egg_link: return egg_link return dist.location def get_terminal_size(): """Returns a tuple (x, y) representing the width(x) and the height(x) in characters of the terminal window.""" def ioctl_GWINSZ(fd): try: import fcntl import termios import struct cr = struct.unpack( 'hh', fcntl.ioctl(fd, termios.TIOCGWINSZ, '1234') ) except: return None if cr == (0, 0): return None return cr cr = ioctl_GWINSZ(0) or ioctl_GWINSZ(1) or ioctl_GWINSZ(2) if not cr: try: fd = os.open(os.ctermid(), os.O_RDONLY) cr = ioctl_GWINSZ(fd) os.close(fd) except: pass if not cr: cr = (os.environ.get('LINES', 25), os.environ.get('COLUMNS', 80)) return int(cr[1]), int(cr[0]) def current_umask(): """Get the current umask which involves having to set it temporarily.""" mask = os.umask(0) os.umask(mask) return mask def unzip_file(filename, location, flatten=True): """ Unzip the file (with path `filename`) to the destination `location`. All files are written based on system defaults and umask (i.e. permissions are not preserved), except that regular file members with any execute permissions (user, group, or world) have "chmod +x" applied after being written. Note that for windows, any execute changes using os.chmod are no-ops per the python docs. """ ensure_dir(location) zipfp = open(filename, 'rb') try: zip = zipfile.ZipFile(zipfp, allowZip64=True) leading = has_leading_dir(zip.namelist()) and flatten for info in zip.infolist(): name = info.filename data = zip.read(name) fn = name if leading: fn = split_leading_dir(name)[1] fn = os.path.join(location, fn) dir = os.path.dirname(fn) if fn.endswith('/') or fn.endswith('\\'): # A directory ensure_dir(fn) else: ensure_dir(dir) fp = open(fn, 'wb') try: fp.write(data) finally: fp.close() mode = info.external_attr >> 16 # if mode and regular file and any execute permissions for # user/group/world? if mode and stat.S_ISREG(mode) and mode & 0o111: # make dest file have execute for user/group/world # (chmod +x) no-op on windows per python docs os.chmod(fn, (0o777 - current_umask() | 0o111)) finally: zipfp.close() def untar_file(filename, location): """ Untar the file (with path `filename`) to the destination `location`. All files are written based on system defaults and umask (i.e. permissions are not preserved), except that regular file members with any execute permissions (user, group, or world) have "chmod +x" applied after being written. Note that for windows, any execute changes using os.chmod are no-ops per the python docs. """ ensure_dir(location) if filename.lower().endswith('.gz') or filename.lower().endswith('.tgz'): mode = 'r:gz' elif filename.lower().endswith(BZ2_EXTENSIONS): mode = 'r:bz2' elif filename.lower().endswith('.tar'): mode = 'r' else: logger.warning( 'Cannot determine compression type for file %s', filename, ) mode = 'r:*' tar = tarfile.open(filename, mode) try: # note: python<=2.5 doesn't seem to know about pax headers, filter them leading = has_leading_dir([ member.name for member in tar.getmembers() if member.name != 'pax_global_header' ]) for member in tar.getmembers(): fn = member.name if fn == 'pax_global_header': continue if leading: fn = split_leading_dir(fn)[1] path = os.path.join(location, fn) if member.isdir(): ensure_dir(path) elif member.issym(): try: tar._extract_member(member, path) except Exception as exc: # Some corrupt tar files seem to produce this # (specifically bad symlinks) logger.warning( 'In the tar file %s the member %s is invalid: %s', filename, member.name, exc, ) continue else: try: fp = tar.extractfile(member) except (KeyError, AttributeError) as exc: # Some corrupt tar files seem to produce this # (specifically bad symlinks) logger.warning( 'In the tar file %s the member %s is invalid: %s', filename, member.name, exc, ) continue ensure_dir(os.path.dirname(path)) destfp = open(path, 'wb') try: shutil.copyfileobj(fp, destfp) finally: destfp.close() fp.close() # member have any execute permissions for user/group/world? if member.mode & 0o111: # make dest file have execute for user/group/world # no-op on windows per python docs os.chmod(path, (0o777 - current_umask() | 0o111)) finally: tar.close() def unpack_file(filename, location, content_type, link): filename = os.path.realpath(filename) if (content_type == 'application/zip' or filename.lower().endswith(ZIP_EXTENSIONS) or zipfile.is_zipfile(filename)): unzip_file( filename, location, flatten=not filename.endswith('.whl') ) elif (content_type == 'application/x-gzip' or tarfile.is_tarfile(filename) or filename.lower().endswith(TAR_EXTENSIONS + BZ2_EXTENSIONS)): untar_file(filename, location) elif (content_type and content_type.startswith('text/html') and is_svn_page(file_contents(filename))): # We don't really care about this from pip.vcs.subversion import Subversion Subversion('svn+' + link.url).unpack(location) else: # FIXME: handle? # FIXME: magic signatures? logger.critical( 'Cannot unpack file %s (downloaded from %s, content-type: %s); ' 'cannot detect archive format', filename, location, content_type, ) raise InstallationError( 'Cannot determine archive format of %s' % location ) def remove_tracebacks(output): pattern = (r'(?:\W+File "(?:.*)", line (?:.*)\W+(?:.*)\W+\^\W+)?' r'Syntax(?:Error|Warning): (?:.*)') output = re.sub(pattern, '', output) if PY2: return output # compileall.compile_dir() prints different messages to stdout # in Python 3 return re.sub(r"\*\*\* Error compiling (?:.*)", '', output) def call_subprocess(cmd, show_stdout=True, cwd=None, raise_on_returncode=True, command_level=logging.DEBUG, command_desc=None, extra_environ=None): if command_desc is None: cmd_parts = [] for part in cmd: if ' ' in part or '\n' in part or '"' in part or "'" in part: part = '"%s"' % part.replace('"', '\\"') cmd_parts.append(part) command_desc = ' '.join(cmd_parts) if show_stdout: stdout = None else: stdout = subprocess.PIPE logger.log(command_level, "Running command %s", command_desc) env = os.environ.copy() if extra_environ: env.update(extra_environ) try: proc = subprocess.Popen( cmd, stderr=subprocess.STDOUT, stdin=None, stdout=stdout, cwd=cwd, env=env) except Exception as exc: logger.critical( "Error %s while executing command %s", exc, command_desc, ) raise all_output = [] if stdout is not None: while True: line = console_to_str(proc.stdout.readline()) if not line: break line = line.rstrip() all_output.append(line + '\n') logger.debug(line) if not all_output: returned_stdout, returned_stderr = proc.communicate() all_output = [returned_stdout or ''] proc.wait() if proc.returncode: if raise_on_returncode: if all_output: logger.info( 'Complete output from command %s:', command_desc, ) logger.info( ''.join(all_output) + '\n----------------------------------------' ) raise InstallationError( 'Command "%s" failed with error code %s in %s' % (command_desc, proc.returncode, cwd)) else: logger.warning( 'Command "%s" had error code %s in %s', command_desc, proc.returncode, cwd, ) if stdout is not None: return remove_tracebacks(''.join(all_output)) def read_text_file(filename): """Return the contents of *filename*. Try to decode the file contents with utf-8, the preferred system encoding (e.g., cp1252 on some Windows machines), and latin1, in that order. Decoding a byte string with latin1 will never raise an error. In the worst case, the returned string will contain some garbage characters. """ with open(filename, 'rb') as fp: data = fp.read() encodings = ['utf-8', locale.getpreferredencoding(False), 'latin1'] for enc in encodings: try: data = data.decode(enc) except UnicodeDecodeError: continue break assert type(data) != bytes # Latin1 should have worked. return data def _make_build_dir(build_dir): os.makedirs(build_dir) write_delete_marker_file(build_dir) class FakeFile(object): """Wrap a list of lines in an object with readline() to make ConfigParser happy.""" def __init__(self, lines): self._gen = (l for l in lines) def readline(self): try: try: return next(self._gen) except NameError: return self._gen.next() except StopIteration: return '' def __iter__(self): return self._gen class StreamWrapper(StringIO): @classmethod def from_stream(cls, orig_stream): cls.orig_stream = orig_stream return cls() # compileall.compile_dir() needs stdout.encoding to print to stdout @property def encoding(self): return self.orig_stream.encoding @contextlib.contextmanager def captured_output(stream_name): """Return a context manager used by captured_stdout/stdin/stderr that temporarily replaces the sys stream *stream_name* with a StringIO. Taken from Lib/support/__init__.py in the CPython repo. """ orig_stdout = getattr(sys, stream_name) setattr(sys, stream_name, StreamWrapper.from_stream(orig_stdout)) try: yield getattr(sys, stream_name) finally: setattr(sys, stream_name, orig_stdout) def captured_stdout(): """Capture the output of sys.stdout: with captured_stdout() as stdout: print('hello') self.assertEqual(stdout.getvalue(), 'hello\n') Taken from Lib/support/__init__.py in the CPython repo. """ return captured_output('stdout') class cached_property(object): """A property that is only computed once per instance and then replaces itself with an ordinary attribute. Deleting the attribute resets the property. Source: https://github.com/bottlepy/bottle/blob/0.11.5/bottle.py#L175 """ def __init__(self, func): self.__doc__ = getattr(func, '__doc__') self.func = func def __get__(self, obj, cls): if obj is None: # We're being accessed from the class itself, not from an object return self value = obj.__dict__[self.func.__name__] = self.func(obj) return value def get_installed_version(dist_name): """Get the installed version of dist_name avoiding pkg_resources cache""" # Create a requirement that we'll look for inside of setuptools. req = pkg_resources.Requirement.parse(dist_name) # We want to avoid having this cached, so we need to construct a new # working set each time. working_set = pkg_resources.WorkingSet() # Get the installed distribution from our working set dist = working_set.find(req) # Check to see if we got an installed distribution or not, if we did # we want to return it's version. return dist.version if dist else None
bsd-3-clause
flccrakers/dj-tango
djtango/data.py
1
14930
# -*- coding: utf-8 -*- import sqlite3, os from djtango import utils from djtango.tangosong import TangoSong class djDataConnection: def __init__(self, home, databaseName='djtango.db'): # print(home) if not os.path.isdir(home): os.makedirs(home) self.path = os.path.join(home, databaseName) self.pathTangoDatabase = os.path.join(home, 'el-recodo.db') self.typeList = {} def getDataFromSql(self, sqlfile): ret = "" with open(sqlfile) as file: for line in file: ret += line return ret def createDatabase(self): open(self.path, 'w').close() open(self.pathTangoDatabase, 'w').close conn = sqlite3.connect(self.path) cursor = conn.cursor() # create the table script = self.getDataFromSql('./djtango/sql/databaseCreation.sql') print(script) cursor.executescript(script) # fill the default table script = self.getDataFromSql('./djtango/sql/databaseFill.sql') print(script) cursor.executescript(script) conn.commit() conn.close() def existTangoInTangoDatabase(self, tango): conn = sqlite3.connect(self.pathTangoDatabase) sql = "SELECT * FROM tangos WHERE norm_artist = ? and norm_title = ?" cursor = conn.cursor() cursor.execute(sql, (utils.remove_accents(tango.artist).lower(), utils.remove_accents(tango.title).lower(),)) rows = cursor.fetchall() conn.commit() conn.close() return rows def updateTitleArtistInTangoDatabase(self, ID, artist, title): conn = sqlite3.connect(self.pathTangoDatabase) cursor = conn.cursor() sql = """ UPDATE tangos SET norm_artist = ?, norm_title = ? WHERE ID = ? """ # print (sql) cursor.execute(sql, (artist, title, ID)) conn.commit() conn.close() def getAllTangInTangoDatabase(self): conn = sqlite3.connect(self.pathTangoDatabase) cursor = conn.cursor() sql = "SELECT ID, artist, title FROM tangos" cursor.execute(sql) rows = cursor.fetchall() conn.commit() conn.close() return rows def updatePath(self, ID, newpath): # print(str(ID)+" "+newpath) conn = sqlite3.connect(self.path) cursor = conn.cursor() sql = """ UPDATE tangos SET tangopath = ? WHERE ID = ? """ # print (sql) cursor.execute(sql, (newpath, ID)) # cursor.execute(sql, (ID,)) conn.commit() conn.close() def searchTango(self, tango): conn = sqlite3.connect(self.path) cursor = conn.cursor() sql = "SELECT * FROM tangos WHERE title = ? and artist = ? and album = ? and genre = ?" # print (sql) # print (tango.path) cursor.execute(sql, (tango.title, tango.artist, tango.album, tango.type)) rows = cursor.fetchall() conn.commit() conn.close() # if len(rows) > 0: # print("found tangos") # else: # print("no tango") tangoList = [] for row in rows: ctango = TangoSong(row[1], row[0]) ctango.title = row[2] ctango.artist = row[3] ctango.album = row[4] ctango.type = row[5] if ctango.type == 0: ctango.type = 5 ctango.year = row[6] ctango.bpmHuman = row[7] ctango.bpmFromFile = row[8] ctango.duration = row[9] ctango.singer = row[10] ctango.composer = row[11] ctango.author = row[12] ctango.tstart = row[13] ctango.tend = row[14] ctango.treated = row[15] tangoList.append(ctango) # print (ctango.type) return tangoList def existTango(self, tango): conn = sqlite3.connect(self.path) cursor = conn.cursor() sql = "SELECT * FROM tangos WHERE tangopath = ?" # print (sql) # print (tango.path) cursor.execute(sql, (tango.path,)) rows = cursor.fetchall() conn.commit() conn.close() if len(rows) > 0: return True else: return False def insertTango(self, tango): conn = sqlite3.connect(self.path) cursor = conn.cursor() # don't insert a tango who already exist if self.existTango(tango): return if not self.typeList: sql = "SELECT * FROM tangoType" cursor.execute(sql) rows = cursor.fetchall() for row in rows: self.typeList[row[1]] = row[0] if str(tango.type).lower() in self.typeList: tango.type = self.typeList[str(tango.type).lower()] else: tango.type = self.typeList['unknown'] sql = "INSERT INTO tangos (tangopath, title, artist, album, genre, year) VALUES(?,?,?,?,?,?)" cursor.execute(sql, tango.listDB()) # print ("inserting "+str(tango.path)) ret = cursor.lastrowid # print("in Data last row: "+str(ret)) conn.commit() conn.close() return ret # def insertManyTango(self, tangoList): # conn = sqlite3.connect(self.path) # cursor = conn.cursor() # sql = "INSERT INTO tangos (path, title, artist, album, genre, year) VALUES(?,?,?,?,?,?)" def getAllTangos(self): conn = sqlite3.connect(self.path) cursor = conn.cursor() # sql = "SELECT tangos.ID, tangos.path, tangos.title, tangos.artist, tangos.album, tangoType.type, tangos.year \ # FROM tangos, tangoType\ # WHERE tangos.genre = tangoType.ID" sql = "SELECT * from tangos"; cursor.execute(sql) rows = cursor.fetchall() conn.close() tangoList = [] for row in rows: ctango = TangoSong(row[1], row[0]) ctango.title = row[2] ctango.artist = row[3] ctango.album = row[4] ctango.type = row[5] if ctango.type == 0: ctango.type = 5 ctango.year = row[6] ctango.bpmHuman = row[7] ctango.bpmFromFile = row[8] ctango.duration = row[9] ctango.singer = row[10] ctango.composer = row[11] ctango.author = row[12] ctango.tstart = row[13] ctango.tend = row[14] ctango.treated = row[15] tangoList.append(ctango) # print (ctango.type) return tangoList def getTangoFromMilonga(self, name): # print("in data geting milonga") ID = self.getMilongaID(name) conn = sqlite3.connect(self.path) cursor = conn.cursor() sql = "SELECT * FROM tangos, Milonga_Tango WHERE tangos.ID = Milonga_Tango.idTango AND Milonga_Tango.IdMilonga = " + str( ID) cursor.execute(sql) rows = cursor.fetchall() conn.close() tangoList = [] for row in rows: # print(row) ctango = TangoSong(row[1], row[0]) ctango.title = row[2] ctango.artist = row[3] ctango.album = row[4] ctango.type = row[5] if ctango.type == 0: ctango.type = 5 ctango.year = row[6] ctango.bpmHuman = row[7] ctango.bpmFromFile = row[8] # print("duration in database: "+str(row[9])) ctango.duration = row[9] ctango.singer = row[10] ctango.composer = row[11] ctango.author = row[12] ctango.tstart = row[13] ctango.tend = row[14] ctango.treated = row[15] tangoList.append(ctango) # print (ctango.type) return tangoList def getTangoFromListID(self, listID): s = ',' listIDstring = s.join(["'" + str(ID) + "'" for ID in listID]) sql = "SELECT * FROM tangos WHERE ID IN (" + listIDstring + ")" # print (sql) conn = sqlite3.connect(self.path) cursor = conn.cursor() cursor.execute(sql) rows = cursor.fetchall() conn.close() tangoList = [] for row in rows: # print(row) ctango = TangoSong(row[1], row[0]) ctango.title = row[2] ctango.artist = row[3] ctango.album = row[4] ctango.type = row[5] if ctango.type == 0: ctango.type = 5 ctango.year = row[6] ctango.bpmHuman = row[7] ctango.bpmFromFile = row[8] # print("duration in database: "+str(row[9])) ctango.duration = row[9] ctango.singer = row[10] ctango.composer = row[11] ctango.author = row[12] ctango.tstart = row[13] ctango.tend = row[14] ctango.treated = row[15] tangoList.append(ctango) # print (ctango.type) return tangoList def getTangoTypeList(self): conn = sqlite3.connect(self.path) cursor = conn.cursor() typeList = {} sql = "SELECT * FROM tangoType" cursor.execute(sql) rows = cursor.fetchall() conn.close() for row in rows: typeList[row[0]] = row # rint (row) return typeList def updateTango(self, tango): # print("will update tango") conn = sqlite3.connect(self.path) cursor = conn.cursor() sql = """ UPDATE tangos SET title = ?, artist = ?, album = ?, genre = ?, year = ?, bpmHuman = ?, bpmFromFile = ?, duration = ?, tangopath = ?, tstart = ?, tend = ?, author=?, singer=?, composer = ?, treated = ? WHERE ID = ? """ # print (tango.listUpdateDB()) cursor.execute(sql, tango.listUpdateDB()) conn.commit() conn.close() def deleteTango(self, ID): conn = sqlite3.connect(self.path) cursor = conn.cursor() print("deleting tango ID " + str(ID)) sql = """ DELETE FROM tangos WHERE ID = ? """ # print (tango.listUpdateDB()) cursor.execute(sql, [ID, ]) conn.commit() conn.close() def updateBPM(self, tango): print("will update in database " + str(tango.bpmHuman)) conn = sqlite3.connect(self.path) cursor = conn.cursor() sql = """ UPDATE tangos SET bpmHuman = ? WHERE ID = ? """ # print (sql) cursor.execute(sql, (tango.bpmHuman, tango.ID)) conn.commit() conn.close() def updateProperties(self, durationFadOut, fadoutTime, writeTagBox, normalize, TYPE): # print("I will update the database for preferences") conn = sqlite3.connect(self.path) cursor = conn.cursor() sql = """UPDATE preferences SET timeCortina = ?, timeFadOut = ?, writeID3tag = ?, normalize = ? """ cursor.execute(sql, (fadoutTime / 1000, durationFadOut / 1000, writeTagBox, normalize)) conn.commit() conn.close() # print (TYPE) # TODO : add a finction that update the type self.updateType(TYPE) def updateSongPath(self, songPath): # print("I will the song path") conn = sqlite3.connect(self.path) cursor = conn.cursor() sql = """UPDATE preferences SET baseDir = ?""" cursor.execute(sql, (songPath,)) conn.commit() conn.close() def updateType(self, TYPE): conn = sqlite3.connect(self.path) cursor = conn.cursor() # print (TYPE) # print(TYPE.length) sql = """DELETE FROM tangoType""" cursor.execute(sql) # "INSERT INTO tangos (path, title, artist, album, genre, year) VALUES(?,?,?,?,?,?)" sql = "INSERT INTO tangoType (ID, type, R, G, B, T) VALUES(?,?,?,?,?,?)" for nb in TYPE: # print (TYPE[nb][0]) cursor.execute(sql, TYPE[nb]) conn.commit() conn.close() def setNewSongAvailable(self, value): conn = sqlite3.connect(self.path) cursor = conn.cursor() val = 0 if value: val = 1 sql = "UPDATE preferences SET newSongAvailable= " + val cursor.execute(sql) conn.commit() conn.colse() def getPreferences(self): ret = {} conn = sqlite3.connect(self.path) cursor = conn.cursor() sql = "SELECT * FROM preferences" cursor.execute(sql) rows = cursor.fetchall() conn.close() # print(rows) for row in rows: ret['path'] = row[0] ret['cortinaDuration'] = row[1] ret['fadoutTime'] = row[2] ret['writeTag'] = row[3] ret['normalize'] = row[4] return ret def getMilongaID(self, name): conn = sqlite3.connect(self.path) cursor = conn.cursor() sql = "SELECT * FROM Milonga WHERE Name = ?" cursor.execute(sql, (name,)) rows = cursor.fetchall() conn.commit() conn.close() if len(rows) > 0: return rows[0][0] else: return 0 def getListOfMilongas(self): ret = [] conn = sqlite3.connect(self.path) cursor = conn.cursor() sql = "SELECT * FROM Milonga" cursor.execute(sql) rows = cursor.fetchall() conn.commit() conn.close() for row in reversed(rows): ret.append(row[1]) return ret def deleteMilonga(self, milongaID=0, name=''): if milongaID == 0 and name == '': return False conn = sqlite3.connect(self.path) cursor = conn.cursor() if milongaID == 0 and not name == '': milongaID = self.getMilongaID(name) # if milongaID > 0: sql = "DELETE FROM Milonga WHERE ID = ?" cursor.execute(sql, (milongaID,)) sql = "DELETE FROM Milonga_TANGO WHERE IdMilonga = ?" cursor.execute(sql, (milongaID,)) conn.commit() conn.close() return True def saveMilonga(self, name, tangoList): conn = sqlite3.connect(self.path) cursor = conn.cursor() milongaID = self.getMilongaID(name) if milongaID > 0: self.deleteMilonga(milongaID) sql = "INSERT INTO Milonga (Name) VALUES(?)" cursor.execute(sql, (name,)) ID = cursor.lastrowid # print(ID) count = 1 for tangoId in tangoList: sql = "INSERT INTO Milonga_Tango (IdMilonga, IdTango, Ord) VALUES(?,?,?)" cursor.execute(sql, (ID, tangoId, count)) count += 1 conn.commit() conn.close()
gpl-3.0
mollstam/UnrealPy
UnrealPyEmbed/Development/Python/2015.08.07-Python2710-x64-Source-vs2015/Python27/Source/SQLAlchemy-1.0.6/examples/dogpile_caching/model.py
30
3026
"""model.py The datamodel, which represents Person that has multiple Address objects, each with PostalCode, City, Country. Person --(1..n)--> Address Address --(has a)--> PostalCode PostalCode --(has a)--> City City --(has a)--> Country """ from sqlalchemy import Column, Integer, String, ForeignKey from sqlalchemy.orm import relationship from .caching_query import FromCache, RelationshipCache from .environment import Base, bootstrap class Country(Base): __tablename__ = 'country' id = Column(Integer, primary_key=True) name = Column(String(100), nullable=False) def __init__(self, name): self.name = name class City(Base): __tablename__ = 'city' id = Column(Integer, primary_key=True) name = Column(String(100), nullable=False) country_id = Column(Integer, ForeignKey('country.id'), nullable=False) country = relationship(Country) def __init__(self, name, country): self.name = name self.country = country class PostalCode(Base): __tablename__ = 'postal_code' id = Column(Integer, primary_key=True) code = Column(String(10), nullable=False) city_id = Column(Integer, ForeignKey('city.id'), nullable=False) city = relationship(City) @property def country(self): return self.city.country def __init__(self, code, city): self.code = code self.city = city class Address(Base): __tablename__ = 'address' id = Column(Integer, primary_key=True) person_id = Column(Integer, ForeignKey('person.id'), nullable=False) street = Column(String(200), nullable=False) postal_code_id = Column(Integer, ForeignKey('postal_code.id')) postal_code = relationship(PostalCode) @property def city(self): return self.postal_code.city @property def country(self): return self.postal_code.country def __str__(self): return "%s\t"\ "%s, %s\t"\ "%s" % (self.street, self.city.name, self.postal_code.code, self.country.name) class Person(Base): __tablename__ = 'person' id = Column(Integer, primary_key=True) name = Column(String(100), nullable=False) addresses = relationship(Address, collection_class=set) def __init__(self, name, *addresses): self.name = name self.addresses = set(addresses) def __str__(self): return self.name def __repr__(self): return "Person(name=%r)" % self.name def format_full(self): return "\t".join([str(x) for x in [self] + list(self.addresses)]) # Caching options. A set of three RelationshipCache options # which can be applied to Query(), causing the "lazy load" # of these attributes to be loaded from cache. cache_address_bits = RelationshipCache(PostalCode.city, "default").\ and_( RelationshipCache(City.country, "default") ).and_( RelationshipCache(Address.postal_code, "default") ) bootstrap()
mit
yoki/phantomjs
src/breakpad/src/third_party/protobuf/protobuf/python/google/protobuf/internal/python_message.py
259
40284
# Protocol Buffers - Google's data interchange format # Copyright 2008 Google Inc. All rights reserved. # http://code.google.com/p/protobuf/ # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above # copyright notice, this list of conditions and the following disclaimer # in the documentation and/or other materials provided with the # distribution. # * Neither the name of Google Inc. nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # This code is meant to work on Python 2.4 and above only. # # TODO(robinson): Helpers for verbose, common checks like seeing if a # descriptor's cpp_type is CPPTYPE_MESSAGE. """Contains a metaclass and helper functions used to create protocol message classes from Descriptor objects at runtime. Recall that a metaclass is the "type" of a class. (A class is to a metaclass what an instance is to a class.) In this case, we use the GeneratedProtocolMessageType metaclass to inject all the useful functionality into the classes output by the protocol compiler at compile-time. The upshot of all this is that the real implementation details for ALL pure-Python protocol buffers are *here in this file*. """ __author__ = 'robinson@google.com (Will Robinson)' try: from cStringIO import StringIO except ImportError: from StringIO import StringIO import struct import weakref # We use "as" to avoid name collisions with variables. from google.protobuf.internal import containers from google.protobuf.internal import decoder from google.protobuf.internal import encoder from google.protobuf.internal import message_listener as message_listener_mod from google.protobuf.internal import type_checkers from google.protobuf.internal import wire_format from google.protobuf import descriptor as descriptor_mod from google.protobuf import message as message_mod from google.protobuf import text_format _FieldDescriptor = descriptor_mod.FieldDescriptor def NewMessage(descriptor, dictionary): _AddClassAttributesForNestedExtensions(descriptor, dictionary) _AddSlots(descriptor, dictionary) def InitMessage(descriptor, cls): cls._decoders_by_tag = {} cls._extensions_by_name = {} cls._extensions_by_number = {} if (descriptor.has_options and descriptor.GetOptions().message_set_wire_format): cls._decoders_by_tag[decoder.MESSAGE_SET_ITEM_TAG] = ( decoder.MessageSetItemDecoder(cls._extensions_by_number)) # Attach stuff to each FieldDescriptor for quick lookup later on. for field in descriptor.fields: _AttachFieldHelpers(cls, field) _AddEnumValues(descriptor, cls) _AddInitMethod(descriptor, cls) _AddPropertiesForFields(descriptor, cls) _AddPropertiesForExtensions(descriptor, cls) _AddStaticMethods(cls) _AddMessageMethods(descriptor, cls) _AddPrivateHelperMethods(cls) # Stateless helpers for GeneratedProtocolMessageType below. # Outside clients should not access these directly. # # I opted not to make any of these methods on the metaclass, to make it more # clear that I'm not really using any state there and to keep clients from # thinking that they have direct access to these construction helpers. def _PropertyName(proto_field_name): """Returns the name of the public property attribute which clients can use to get and (in some cases) set the value of a protocol message field. Args: proto_field_name: The protocol message field name, exactly as it appears (or would appear) in a .proto file. """ # TODO(robinson): Escape Python keywords (e.g., yield), and test this support. # nnorwitz makes my day by writing: # """ # FYI. See the keyword module in the stdlib. This could be as simple as: # # if keyword.iskeyword(proto_field_name): # return proto_field_name + "_" # return proto_field_name # """ # Kenton says: The above is a BAD IDEA. People rely on being able to use # getattr() and setattr() to reflectively manipulate field values. If we # rename the properties, then every such user has to also make sure to apply # the same transformation. Note that currently if you name a field "yield", # you can still access it just fine using getattr/setattr -- it's not even # that cumbersome to do so. # TODO(kenton): Remove this method entirely if/when everyone agrees with my # position. return proto_field_name def _VerifyExtensionHandle(message, extension_handle): """Verify that the given extension handle is valid.""" if not isinstance(extension_handle, _FieldDescriptor): raise KeyError('HasExtension() expects an extension handle, got: %s' % extension_handle) if not extension_handle.is_extension: raise KeyError('"%s" is not an extension.' % extension_handle.full_name) if extension_handle.containing_type is not message.DESCRIPTOR: raise KeyError('Extension "%s" extends message type "%s", but this ' 'message is of type "%s".' % (extension_handle.full_name, extension_handle.containing_type.full_name, message.DESCRIPTOR.full_name)) def _AddSlots(message_descriptor, dictionary): """Adds a __slots__ entry to dictionary, containing the names of all valid attributes for this message type. Args: message_descriptor: A Descriptor instance describing this message type. dictionary: Class dictionary to which we'll add a '__slots__' entry. """ dictionary['__slots__'] = ['_cached_byte_size', '_cached_byte_size_dirty', '_fields', '_is_present_in_parent', '_listener', '_listener_for_children', '__weakref__'] def _IsMessageSetExtension(field): return (field.is_extension and field.containing_type.has_options and field.containing_type.GetOptions().message_set_wire_format and field.type == _FieldDescriptor.TYPE_MESSAGE and field.message_type == field.extension_scope and field.label == _FieldDescriptor.LABEL_OPTIONAL) def _AttachFieldHelpers(cls, field_descriptor): is_repeated = (field_descriptor.label == _FieldDescriptor.LABEL_REPEATED) is_packed = (field_descriptor.has_options and field_descriptor.GetOptions().packed) if _IsMessageSetExtension(field_descriptor): field_encoder = encoder.MessageSetItemEncoder(field_descriptor.number) sizer = encoder.MessageSetItemSizer(field_descriptor.number) else: field_encoder = type_checkers.TYPE_TO_ENCODER[field_descriptor.type]( field_descriptor.number, is_repeated, is_packed) sizer = type_checkers.TYPE_TO_SIZER[field_descriptor.type]( field_descriptor.number, is_repeated, is_packed) field_descriptor._encoder = field_encoder field_descriptor._sizer = sizer field_descriptor._default_constructor = _DefaultValueConstructorForField( field_descriptor) def AddDecoder(wiretype, is_packed): tag_bytes = encoder.TagBytes(field_descriptor.number, wiretype) cls._decoders_by_tag[tag_bytes] = ( type_checkers.TYPE_TO_DECODER[field_descriptor.type]( field_descriptor.number, is_repeated, is_packed, field_descriptor, field_descriptor._default_constructor)) AddDecoder(type_checkers.FIELD_TYPE_TO_WIRE_TYPE[field_descriptor.type], False) if is_repeated and wire_format.IsTypePackable(field_descriptor.type): # To support wire compatibility of adding packed = true, add a decoder for # packed values regardless of the field's options. AddDecoder(wire_format.WIRETYPE_LENGTH_DELIMITED, True) def _AddClassAttributesForNestedExtensions(descriptor, dictionary): extension_dict = descriptor.extensions_by_name for extension_name, extension_field in extension_dict.iteritems(): assert extension_name not in dictionary dictionary[extension_name] = extension_field def _AddEnumValues(descriptor, cls): """Sets class-level attributes for all enum fields defined in this message. Args: descriptor: Descriptor object for this message type. cls: Class we're constructing for this message type. """ for enum_type in descriptor.enum_types: for enum_value in enum_type.values: setattr(cls, enum_value.name, enum_value.number) def _DefaultValueConstructorForField(field): """Returns a function which returns a default value for a field. Args: field: FieldDescriptor object for this field. The returned function has one argument: message: Message instance containing this field, or a weakref proxy of same. That function in turn returns a default value for this field. The default value may refer back to |message| via a weak reference. """ if field.label == _FieldDescriptor.LABEL_REPEATED: if field.default_value != []: raise ValueError('Repeated field default value not empty list: %s' % ( field.default_value)) if field.cpp_type == _FieldDescriptor.CPPTYPE_MESSAGE: # We can't look at _concrete_class yet since it might not have # been set. (Depends on order in which we initialize the classes). message_type = field.message_type def MakeRepeatedMessageDefault(message): return containers.RepeatedCompositeFieldContainer( message._listener_for_children, field.message_type) return MakeRepeatedMessageDefault else: type_checker = type_checkers.GetTypeChecker(field.cpp_type, field.type) def MakeRepeatedScalarDefault(message): return containers.RepeatedScalarFieldContainer( message._listener_for_children, type_checker) return MakeRepeatedScalarDefault if field.cpp_type == _FieldDescriptor.CPPTYPE_MESSAGE: # _concrete_class may not yet be initialized. message_type = field.message_type def MakeSubMessageDefault(message): result = message_type._concrete_class() result._SetListener(message._listener_for_children) return result return MakeSubMessageDefault def MakeScalarDefault(message): return field.default_value return MakeScalarDefault def _AddInitMethod(message_descriptor, cls): """Adds an __init__ method to cls.""" fields = message_descriptor.fields def init(self, **kwargs): self._cached_byte_size = 0 self._cached_byte_size_dirty = len(kwargs) > 0 self._fields = {} self._is_present_in_parent = False self._listener = message_listener_mod.NullMessageListener() self._listener_for_children = _Listener(self) for field_name, field_value in kwargs.iteritems(): field = _GetFieldByName(message_descriptor, field_name) if field is None: raise TypeError("%s() got an unexpected keyword argument '%s'" % (message_descriptor.name, field_name)) if field.label == _FieldDescriptor.LABEL_REPEATED: copy = field._default_constructor(self) if field.cpp_type == _FieldDescriptor.CPPTYPE_MESSAGE: # Composite for val in field_value: copy.add().MergeFrom(val) else: # Scalar copy.extend(field_value) self._fields[field] = copy elif field.cpp_type == _FieldDescriptor.CPPTYPE_MESSAGE: copy = field._default_constructor(self) copy.MergeFrom(field_value) self._fields[field] = copy else: setattr(self, field_name, field_value) init.__module__ = None init.__doc__ = None cls.__init__ = init def _GetFieldByName(message_descriptor, field_name): """Returns a field descriptor by field name. Args: message_descriptor: A Descriptor describing all fields in message. field_name: The name of the field to retrieve. Returns: The field descriptor associated with the field name. """ try: return message_descriptor.fields_by_name[field_name] except KeyError: raise ValueError('Protocol message has no "%s" field.' % field_name) def _AddPropertiesForFields(descriptor, cls): """Adds properties for all fields in this protocol message type.""" for field in descriptor.fields: _AddPropertiesForField(field, cls) if descriptor.is_extendable: # _ExtensionDict is just an adaptor with no state so we allocate a new one # every time it is accessed. cls.Extensions = property(lambda self: _ExtensionDict(self)) def _AddPropertiesForField(field, cls): """Adds a public property for a protocol message field. Clients can use this property to get and (in the case of non-repeated scalar fields) directly set the value of a protocol message field. Args: field: A FieldDescriptor for this field. cls: The class we're constructing. """ # Catch it if we add other types that we should # handle specially here. assert _FieldDescriptor.MAX_CPPTYPE == 10 constant_name = field.name.upper() + "_FIELD_NUMBER" setattr(cls, constant_name, field.number) if field.label == _FieldDescriptor.LABEL_REPEATED: _AddPropertiesForRepeatedField(field, cls) elif field.cpp_type == _FieldDescriptor.CPPTYPE_MESSAGE: _AddPropertiesForNonRepeatedCompositeField(field, cls) else: _AddPropertiesForNonRepeatedScalarField(field, cls) def _AddPropertiesForRepeatedField(field, cls): """Adds a public property for a "repeated" protocol message field. Clients can use this property to get the value of the field, which will be either a _RepeatedScalarFieldContainer or _RepeatedCompositeFieldContainer (see below). Note that when clients add values to these containers, we perform type-checking in the case of repeated scalar fields, and we also set any necessary "has" bits as a side-effect. Args: field: A FieldDescriptor for this field. cls: The class we're constructing. """ proto_field_name = field.name property_name = _PropertyName(proto_field_name) def getter(self): field_value = self._fields.get(field) if field_value is None: # Construct a new object to represent this field. field_value = field._default_constructor(self) # Atomically check if another thread has preempted us and, if not, swap # in the new object we just created. If someone has preempted us, we # take that object and discard ours. # WARNING: We are relying on setdefault() being atomic. This is true # in CPython but we haven't investigated others. This warning appears # in several other locations in this file. field_value = self._fields.setdefault(field, field_value) return field_value getter.__module__ = None getter.__doc__ = 'Getter for %s.' % proto_field_name # We define a setter just so we can throw an exception with a more # helpful error message. def setter(self, new_value): raise AttributeError('Assignment not allowed to repeated field ' '"%s" in protocol message object.' % proto_field_name) doc = 'Magic attribute generated for "%s" proto field.' % proto_field_name setattr(cls, property_name, property(getter, setter, doc=doc)) def _AddPropertiesForNonRepeatedScalarField(field, cls): """Adds a public property for a nonrepeated, scalar protocol message field. Clients can use this property to get and directly set the value of the field. Note that when the client sets the value of a field by using this property, all necessary "has" bits are set as a side-effect, and we also perform type-checking. Args: field: A FieldDescriptor for this field. cls: The class we're constructing. """ proto_field_name = field.name property_name = _PropertyName(proto_field_name) type_checker = type_checkers.GetTypeChecker(field.cpp_type, field.type) default_value = field.default_value valid_values = set() def getter(self): return self._fields.get(field, default_value) getter.__module__ = None getter.__doc__ = 'Getter for %s.' % proto_field_name def setter(self, new_value): type_checker.CheckValue(new_value) self._fields[field] = new_value # Check _cached_byte_size_dirty inline to improve performance, since scalar # setters are called frequently. if not self._cached_byte_size_dirty: self._Modified() setter.__module__ = None setter.__doc__ = 'Setter for %s.' % proto_field_name # Add a property to encapsulate the getter/setter. doc = 'Magic attribute generated for "%s" proto field.' % proto_field_name setattr(cls, property_name, property(getter, setter, doc=doc)) def _AddPropertiesForNonRepeatedCompositeField(field, cls): """Adds a public property for a nonrepeated, composite protocol message field. A composite field is a "group" or "message" field. Clients can use this property to get the value of the field, but cannot assign to the property directly. Args: field: A FieldDescriptor for this field. cls: The class we're constructing. """ # TODO(robinson): Remove duplication with similar method # for non-repeated scalars. proto_field_name = field.name property_name = _PropertyName(proto_field_name) message_type = field.message_type def getter(self): field_value = self._fields.get(field) if field_value is None: # Construct a new object to represent this field. field_value = message_type._concrete_class() field_value._SetListener(self._listener_for_children) # Atomically check if another thread has preempted us and, if not, swap # in the new object we just created. If someone has preempted us, we # take that object and discard ours. # WARNING: We are relying on setdefault() being atomic. This is true # in CPython but we haven't investigated others. This warning appears # in several other locations in this file. field_value = self._fields.setdefault(field, field_value) return field_value getter.__module__ = None getter.__doc__ = 'Getter for %s.' % proto_field_name # We define a setter just so we can throw an exception with a more # helpful error message. def setter(self, new_value): raise AttributeError('Assignment not allowed to composite field ' '"%s" in protocol message object.' % proto_field_name) # Add a property to encapsulate the getter. doc = 'Magic attribute generated for "%s" proto field.' % proto_field_name setattr(cls, property_name, property(getter, setter, doc=doc)) def _AddPropertiesForExtensions(descriptor, cls): """Adds properties for all fields in this protocol message type.""" extension_dict = descriptor.extensions_by_name for extension_name, extension_field in extension_dict.iteritems(): constant_name = extension_name.upper() + "_FIELD_NUMBER" setattr(cls, constant_name, extension_field.number) def _AddStaticMethods(cls): # TODO(robinson): This probably needs to be thread-safe(?) def RegisterExtension(extension_handle): extension_handle.containing_type = cls.DESCRIPTOR _AttachFieldHelpers(cls, extension_handle) # Try to insert our extension, failing if an extension with the same number # already exists. actual_handle = cls._extensions_by_number.setdefault( extension_handle.number, extension_handle) if actual_handle is not extension_handle: raise AssertionError( 'Extensions "%s" and "%s" both try to extend message type "%s" with ' 'field number %d.' % (extension_handle.full_name, actual_handle.full_name, cls.DESCRIPTOR.full_name, extension_handle.number)) cls._extensions_by_name[extension_handle.full_name] = extension_handle handle = extension_handle # avoid line wrapping if _IsMessageSetExtension(handle): # MessageSet extension. Also register under type name. cls._extensions_by_name[ extension_handle.message_type.full_name] = extension_handle cls.RegisterExtension = staticmethod(RegisterExtension) def FromString(s): message = cls() message.MergeFromString(s) return message cls.FromString = staticmethod(FromString) def _IsPresent(item): """Given a (FieldDescriptor, value) tuple from _fields, return true if the value should be included in the list returned by ListFields().""" if item[0].label == _FieldDescriptor.LABEL_REPEATED: return bool(item[1]) elif item[0].cpp_type == _FieldDescriptor.CPPTYPE_MESSAGE: return item[1]._is_present_in_parent else: return True def _AddListFieldsMethod(message_descriptor, cls): """Helper for _AddMessageMethods().""" def ListFields(self): all_fields = [item for item in self._fields.iteritems() if _IsPresent(item)] all_fields.sort(key = lambda item: item[0].number) return all_fields cls.ListFields = ListFields def _AddHasFieldMethod(message_descriptor, cls): """Helper for _AddMessageMethods().""" singular_fields = {} for field in message_descriptor.fields: if field.label != _FieldDescriptor.LABEL_REPEATED: singular_fields[field.name] = field def HasField(self, field_name): try: field = singular_fields[field_name] except KeyError: raise ValueError( 'Protocol message has no singular "%s" field.' % field_name) if field.cpp_type == _FieldDescriptor.CPPTYPE_MESSAGE: value = self._fields.get(field) return value is not None and value._is_present_in_parent else: return field in self._fields cls.HasField = HasField def _AddClearFieldMethod(message_descriptor, cls): """Helper for _AddMessageMethods().""" def ClearField(self, field_name): try: field = message_descriptor.fields_by_name[field_name] except KeyError: raise ValueError('Protocol message has no "%s" field.' % field_name) if field in self._fields: # Note: If the field is a sub-message, its listener will still point # at us. That's fine, because the worst than can happen is that it # will call _Modified() and invalidate our byte size. Big deal. del self._fields[field] # Always call _Modified() -- even if nothing was changed, this is # a mutating method, and thus calling it should cause the field to become # present in the parent message. self._Modified() cls.ClearField = ClearField def _AddClearExtensionMethod(cls): """Helper for _AddMessageMethods().""" def ClearExtension(self, extension_handle): _VerifyExtensionHandle(self, extension_handle) # Similar to ClearField(), above. if extension_handle in self._fields: del self._fields[extension_handle] self._Modified() cls.ClearExtension = ClearExtension def _AddClearMethod(message_descriptor, cls): """Helper for _AddMessageMethods().""" def Clear(self): # Clear fields. self._fields = {} self._Modified() cls.Clear = Clear def _AddHasExtensionMethod(cls): """Helper for _AddMessageMethods().""" def HasExtension(self, extension_handle): _VerifyExtensionHandle(self, extension_handle) if extension_handle.label == _FieldDescriptor.LABEL_REPEATED: raise KeyError('"%s" is repeated.' % extension_handle.full_name) if extension_handle.cpp_type == _FieldDescriptor.CPPTYPE_MESSAGE: value = self._fields.get(extension_handle) return value is not None and value._is_present_in_parent else: return extension_handle in self._fields cls.HasExtension = HasExtension def _AddEqualsMethod(message_descriptor, cls): """Helper for _AddMessageMethods().""" def __eq__(self, other): if (not isinstance(other, message_mod.Message) or other.DESCRIPTOR != self.DESCRIPTOR): return False if self is other: return True return self.ListFields() == other.ListFields() cls.__eq__ = __eq__ def _AddStrMethod(message_descriptor, cls): """Helper for _AddMessageMethods().""" def __str__(self): return text_format.MessageToString(self) cls.__str__ = __str__ def _AddUnicodeMethod(unused_message_descriptor, cls): """Helper for _AddMessageMethods().""" def __unicode__(self): return text_format.MessageToString(self, as_utf8=True).decode('utf-8') cls.__unicode__ = __unicode__ def _AddSetListenerMethod(cls): """Helper for _AddMessageMethods().""" def SetListener(self, listener): if listener is None: self._listener = message_listener_mod.NullMessageListener() else: self._listener = listener cls._SetListener = SetListener def _BytesForNonRepeatedElement(value, field_number, field_type): """Returns the number of bytes needed to serialize a non-repeated element. The returned byte count includes space for tag information and any other additional space associated with serializing value. Args: value: Value we're serializing. field_number: Field number of this value. (Since the field number is stored as part of a varint-encoded tag, this has an impact on the total bytes required to serialize the value). field_type: The type of the field. One of the TYPE_* constants within FieldDescriptor. """ try: fn = type_checkers.TYPE_TO_BYTE_SIZE_FN[field_type] return fn(field_number, value) except KeyError: raise message_mod.EncodeError('Unrecognized field type: %d' % field_type) def _AddByteSizeMethod(message_descriptor, cls): """Helper for _AddMessageMethods().""" def ByteSize(self): if not self._cached_byte_size_dirty: return self._cached_byte_size size = 0 for field_descriptor, field_value in self.ListFields(): size += field_descriptor._sizer(field_value) self._cached_byte_size = size self._cached_byte_size_dirty = False self._listener_for_children.dirty = False return size cls.ByteSize = ByteSize def _AddSerializeToStringMethod(message_descriptor, cls): """Helper for _AddMessageMethods().""" def SerializeToString(self): # Check if the message has all of its required fields set. errors = [] if not self.IsInitialized(): raise message_mod.EncodeError( 'Message is missing required fields: ' + ','.join(self.FindInitializationErrors())) return self.SerializePartialToString() cls.SerializeToString = SerializeToString def _AddSerializePartialToStringMethod(message_descriptor, cls): """Helper for _AddMessageMethods().""" def SerializePartialToString(self): out = StringIO() self._InternalSerialize(out.write) return out.getvalue() cls.SerializePartialToString = SerializePartialToString def InternalSerialize(self, write_bytes): for field_descriptor, field_value in self.ListFields(): field_descriptor._encoder(write_bytes, field_value) cls._InternalSerialize = InternalSerialize def _AddMergeFromStringMethod(message_descriptor, cls): """Helper for _AddMessageMethods().""" def MergeFromString(self, serialized): length = len(serialized) try: if self._InternalParse(serialized, 0, length) != length: # The only reason _InternalParse would return early is if it # encountered an end-group tag. raise message_mod.DecodeError('Unexpected end-group tag.') except IndexError: raise message_mod.DecodeError('Truncated message.') except struct.error, e: raise message_mod.DecodeError(e) return length # Return this for legacy reasons. cls.MergeFromString = MergeFromString local_ReadTag = decoder.ReadTag local_SkipField = decoder.SkipField decoders_by_tag = cls._decoders_by_tag def InternalParse(self, buffer, pos, end): self._Modified() field_dict = self._fields while pos != end: (tag_bytes, new_pos) = local_ReadTag(buffer, pos) field_decoder = decoders_by_tag.get(tag_bytes) if field_decoder is None: new_pos = local_SkipField(buffer, new_pos, end, tag_bytes) if new_pos == -1: return pos pos = new_pos else: pos = field_decoder(buffer, new_pos, end, self, field_dict) return pos cls._InternalParse = InternalParse def _AddIsInitializedMethod(message_descriptor, cls): """Adds the IsInitialized and FindInitializationError methods to the protocol message class.""" required_fields = [field for field in message_descriptor.fields if field.label == _FieldDescriptor.LABEL_REQUIRED] def IsInitialized(self, errors=None): """Checks if all required fields of a message are set. Args: errors: A list which, if provided, will be populated with the field paths of all missing required fields. Returns: True iff the specified message has all required fields set. """ # Performance is critical so we avoid HasField() and ListFields(). for field in required_fields: if (field not in self._fields or (field.cpp_type == _FieldDescriptor.CPPTYPE_MESSAGE and not self._fields[field]._is_present_in_parent)): if errors is not None: errors.extend(self.FindInitializationErrors()) return False for field, value in self._fields.iteritems(): if field.cpp_type == _FieldDescriptor.CPPTYPE_MESSAGE: if field.label == _FieldDescriptor.LABEL_REPEATED: for element in value: if not element.IsInitialized(): if errors is not None: errors.extend(self.FindInitializationErrors()) return False elif value._is_present_in_parent and not value.IsInitialized(): if errors is not None: errors.extend(self.FindInitializationErrors()) return False return True cls.IsInitialized = IsInitialized def FindInitializationErrors(self): """Finds required fields which are not initialized. Returns: A list of strings. Each string is a path to an uninitialized field from the top-level message, e.g. "foo.bar[5].baz". """ errors = [] # simplify things for field in required_fields: if not self.HasField(field.name): errors.append(field.name) for field, value in self.ListFields(): if field.cpp_type == _FieldDescriptor.CPPTYPE_MESSAGE: if field.is_extension: name = "(%s)" % field.full_name else: name = field.name if field.label == _FieldDescriptor.LABEL_REPEATED: for i in xrange(len(value)): element = value[i] prefix = "%s[%d]." % (name, i) sub_errors = element.FindInitializationErrors() errors += [ prefix + error for error in sub_errors ] else: prefix = name + "." sub_errors = value.FindInitializationErrors() errors += [ prefix + error for error in sub_errors ] return errors cls.FindInitializationErrors = FindInitializationErrors def _AddMergeFromMethod(cls): LABEL_REPEATED = _FieldDescriptor.LABEL_REPEATED CPPTYPE_MESSAGE = _FieldDescriptor.CPPTYPE_MESSAGE def MergeFrom(self, msg): if not isinstance(msg, cls): raise TypeError( "Parameter to MergeFrom() must be instance of same class.") assert msg is not self self._Modified() fields = self._fields for field, value in msg._fields.iteritems(): if field.label == LABEL_REPEATED: field_value = fields.get(field) if field_value is None: # Construct a new object to represent this field. field_value = field._default_constructor(self) fields[field] = field_value field_value.MergeFrom(value) elif field.cpp_type == CPPTYPE_MESSAGE: if value._is_present_in_parent: field_value = fields.get(field) if field_value is None: # Construct a new object to represent this field. field_value = field._default_constructor(self) fields[field] = field_value field_value.MergeFrom(value) else: self._fields[field] = value cls.MergeFrom = MergeFrom def _AddMessageMethods(message_descriptor, cls): """Adds implementations of all Message methods to cls.""" _AddListFieldsMethod(message_descriptor, cls) _AddHasFieldMethod(message_descriptor, cls) _AddClearFieldMethod(message_descriptor, cls) if message_descriptor.is_extendable: _AddClearExtensionMethod(cls) _AddHasExtensionMethod(cls) _AddClearMethod(message_descriptor, cls) _AddEqualsMethod(message_descriptor, cls) _AddStrMethod(message_descriptor, cls) _AddUnicodeMethod(message_descriptor, cls) _AddSetListenerMethod(cls) _AddByteSizeMethod(message_descriptor, cls) _AddSerializeToStringMethod(message_descriptor, cls) _AddSerializePartialToStringMethod(message_descriptor, cls) _AddMergeFromStringMethod(message_descriptor, cls) _AddIsInitializedMethod(message_descriptor, cls) _AddMergeFromMethod(cls) def _AddPrivateHelperMethods(cls): """Adds implementation of private helper methods to cls.""" def Modified(self): """Sets the _cached_byte_size_dirty bit to true, and propagates this to our listener iff this was a state change. """ # Note: Some callers check _cached_byte_size_dirty before calling # _Modified() as an extra optimization. So, if this method is ever # changed such that it does stuff even when _cached_byte_size_dirty is # already true, the callers need to be updated. if not self._cached_byte_size_dirty: self._cached_byte_size_dirty = True self._listener_for_children.dirty = True self._is_present_in_parent = True self._listener.Modified() cls._Modified = Modified cls.SetInParent = Modified class _Listener(object): """MessageListener implementation that a parent message registers with its child message. In order to support semantics like: foo.bar.baz.qux = 23 assert foo.HasField('bar') ...child objects must have back references to their parents. This helper class is at the heart of this support. """ def __init__(self, parent_message): """Args: parent_message: The message whose _Modified() method we should call when we receive Modified() messages. """ # This listener establishes a back reference from a child (contained) object # to its parent (containing) object. We make this a weak reference to avoid # creating cyclic garbage when the client finishes with the 'parent' object # in the tree. if isinstance(parent_message, weakref.ProxyType): self._parent_message_weakref = parent_message else: self._parent_message_weakref = weakref.proxy(parent_message) # As an optimization, we also indicate directly on the listener whether # or not the parent message is dirty. This way we can avoid traversing # up the tree in the common case. self.dirty = False def Modified(self): if self.dirty: return try: # Propagate the signal to our parents iff this is the first field set. self._parent_message_weakref._Modified() except ReferenceError: # We can get here if a client has kept a reference to a child object, # and is now setting a field on it, but the child's parent has been # garbage-collected. This is not an error. pass # TODO(robinson): Move elsewhere? This file is getting pretty ridiculous... # TODO(robinson): Unify error handling of "unknown extension" crap. # TODO(robinson): Support iteritems()-style iteration over all # extensions with the "has" bits turned on? class _ExtensionDict(object): """Dict-like container for supporting an indexable "Extensions" field on proto instances. Note that in all cases we expect extension handles to be FieldDescriptors. """ def __init__(self, extended_message): """extended_message: Message instance for which we are the Extensions dict. """ self._extended_message = extended_message def __getitem__(self, extension_handle): """Returns the current value of the given extension handle.""" _VerifyExtensionHandle(self._extended_message, extension_handle) result = self._extended_message._fields.get(extension_handle) if result is not None: return result if extension_handle.label == _FieldDescriptor.LABEL_REPEATED: result = extension_handle._default_constructor(self._extended_message) elif extension_handle.cpp_type == _FieldDescriptor.CPPTYPE_MESSAGE: result = extension_handle.message_type._concrete_class() try: result._SetListener(self._extended_message._listener_for_children) except ReferenceError: pass else: # Singular scalar -- just return the default without inserting into the # dict. return extension_handle.default_value # Atomically check if another thread has preempted us and, if not, swap # in the new object we just created. If someone has preempted us, we # take that object and discard ours. # WARNING: We are relying on setdefault() being atomic. This is true # in CPython but we haven't investigated others. This warning appears # in several other locations in this file. result = self._extended_message._fields.setdefault( extension_handle, result) return result def __eq__(self, other): if not isinstance(other, self.__class__): return False my_fields = self._extended_message.ListFields() other_fields = other._extended_message.ListFields() # Get rid of non-extension fields. my_fields = [ field for field in my_fields if field.is_extension ] other_fields = [ field for field in other_fields if field.is_extension ] return my_fields == other_fields def __ne__(self, other): return not self == other def __hash__(self): raise TypeError('unhashable object') # Note that this is only meaningful for non-repeated, scalar extension # fields. Note also that we may have to call _Modified() when we do # successfully set a field this way, to set any necssary "has" bits in the # ancestors of the extended message. def __setitem__(self, extension_handle, value): """If extension_handle specifies a non-repeated, scalar extension field, sets the value of that field. """ _VerifyExtensionHandle(self._extended_message, extension_handle) if (extension_handle.label == _FieldDescriptor.LABEL_REPEATED or extension_handle.cpp_type == _FieldDescriptor.CPPTYPE_MESSAGE): raise TypeError( 'Cannot assign to extension "%s" because it is a repeated or ' 'composite type.' % extension_handle.full_name) # It's slightly wasteful to lookup the type checker each time, # but we expect this to be a vanishingly uncommon case anyway. type_checker = type_checkers.GetTypeChecker( extension_handle.cpp_type, extension_handle.type) type_checker.CheckValue(value) self._extended_message._fields[extension_handle] = value self._extended_message._Modified() def _FindExtensionByName(self, name): """Tries to find a known extension with the specified name. Args: name: Extension full name. Returns: Extension field descriptor. """ return self._extended_message._extensions_by_name.get(name, None)
bsd-3-clause
Hasimir/letsencrypt
letsencrypt/tests/account_test.py
28
6651
"""Tests for letsencrypt.account.""" import datetime import os import shutil import stat import tempfile import unittest import mock import pytz from acme import jose from acme import messages from letsencrypt import errors from letsencrypt.tests import test_util KEY = jose.JWKRSA.load(test_util.load_vector("rsa512_key_2.pem")) class AccountTest(unittest.TestCase): """Tests for letsencrypt.account.Account.""" def setUp(self): from letsencrypt.account import Account self.regr = mock.MagicMock() self.meta = Account.Meta( creation_host="test.letsencrypt.org", creation_dt=datetime.datetime( 2015, 7, 4, 14, 4, 10, tzinfo=pytz.UTC)) self.acc = Account(self.regr, KEY, self.meta) with mock.patch("letsencrypt.account.socket") as mock_socket: mock_socket.getfqdn.return_value = "test.letsencrypt.org" with mock.patch("letsencrypt.account.datetime") as mock_dt: mock_dt.datetime.now.return_value = self.meta.creation_dt self.acc_no_meta = Account(self.regr, KEY) def test_init(self): self.assertEqual(self.regr, self.acc.regr) self.assertEqual(KEY, self.acc.key) self.assertEqual(self.meta, self.acc_no_meta.meta) def test_id(self): self.assertEqual( self.acc.id, "2ba35a3bdf380ed76a5ac9e740568395") def test_slug(self): self.assertEqual( self.acc.slug, "test.letsencrypt.org@2015-07-04T14:04:10Z (2ba3)") def test_repr(self): self.assertEqual( repr(self.acc), "<Account(2ba35a3bdf380ed76a5ac9e740568395)>") class ReportNewAccountTest(unittest.TestCase): """Tests for letsencrypt.account.report_new_account.""" def setUp(self): self.config = mock.MagicMock(config_dir="/etc/letsencrypt") reg = messages.Registration.from_data(email="rhino@jungle.io") self.acc = mock.MagicMock(regr=messages.RegistrationResource( uri=None, new_authzr_uri=None, body=reg)) def _call(self): from letsencrypt.account import report_new_account report_new_account(self.acc, self.config) @mock.patch("letsencrypt.client.zope.component.queryUtility") def test_no_reporter(self, mock_zope): mock_zope.return_value = None self._call() @mock.patch("letsencrypt.client.zope.component.queryUtility") def test_it(self, mock_zope): self._call() call_list = mock_zope().add_message.call_args_list self.assertTrue(self.config.config_dir in call_list[0][0][0]) self.assertTrue( ", ".join(self.acc.regr.body.emails) in call_list[1][0][0]) class AccountMemoryStorageTest(unittest.TestCase): """Tests for letsencrypt.account.AccountMemoryStorage.""" def setUp(self): from letsencrypt.account import AccountMemoryStorage self.storage = AccountMemoryStorage() def test_it(self): account = mock.Mock(id="x") self.assertEqual([], self.storage.find_all()) self.assertRaises(errors.AccountNotFound, self.storage.load, "x") self.storage.save(account) self.assertEqual([account], self.storage.find_all()) self.assertEqual(account, self.storage.load("x")) self.storage.save(account) self.assertEqual([account], self.storage.find_all()) class AccountFileStorageTest(unittest.TestCase): """Tests for letsencrypt.account.AccountFileStorage.""" def setUp(self): self.tmp = tempfile.mkdtemp() self.config = mock.MagicMock( accounts_dir=os.path.join(self.tmp, "accounts")) from letsencrypt.account import AccountFileStorage self.storage = AccountFileStorage(self.config) from letsencrypt.account import Account self.acc = Account( regr=messages.RegistrationResource( uri=None, new_authzr_uri=None, body=messages.Registration()), key=KEY) def tearDown(self): shutil.rmtree(self.tmp) def test_init_creates_dir(self): self.assertTrue(os.path.isdir(self.config.accounts_dir)) def test_save_and_restore(self): self.storage.save(self.acc) account_path = os.path.join(self.config.accounts_dir, self.acc.id) self.assertTrue(os.path.exists(account_path)) for file_name in "regr.json", "meta.json", "private_key.json": self.assertTrue(os.path.exists( os.path.join(account_path, file_name))) self.assertEqual("0400", oct(os.stat(os.path.join( account_path, "private_key.json"))[stat.ST_MODE] & 0o777)) # restore self.assertEqual(self.acc, self.storage.load(self.acc.id)) def test_find_all(self): self.storage.save(self.acc) self.assertEqual([self.acc], self.storage.find_all()) def test_find_all_none_empty_list(self): self.assertEqual([], self.storage.find_all()) def test_find_all_accounts_dir_absent(self): os.rmdir(self.config.accounts_dir) self.assertEqual([], self.storage.find_all()) def test_find_all_load_skips(self): self.storage.load = mock.MagicMock( side_effect=["x", errors.AccountStorageError, "z"]) with mock.patch("letsencrypt.account.os.listdir") as mock_listdir: mock_listdir.return_value = ["x", "y", "z"] self.assertEqual(["x", "z"], self.storage.find_all()) def test_load_non_existent_raises_error(self): self.assertRaises(errors.AccountNotFound, self.storage.load, "missing") def test_load_id_mismatch_raises_error(self): self.storage.save(self.acc) shutil.move(os.path.join(self.config.accounts_dir, self.acc.id), os.path.join(self.config.accounts_dir, "x" + self.acc.id)) self.assertRaises(errors.AccountStorageError, self.storage.load, "x" + self.acc.id) def test_load_ioerror(self): self.storage.save(self.acc) mock_open = mock.mock_open() mock_open.side_effect = IOError with mock.patch("__builtin__.open", mock_open): self.assertRaises( errors.AccountStorageError, self.storage.load, self.acc.id) def test_save_ioerrors(self): mock_open = mock.mock_open() mock_open.side_effect = IOError # TODO: [None, None, IOError] with mock.patch("__builtin__.open", mock_open): self.assertRaises( errors.AccountStorageError, self.storage.save, self.acc) if __name__ == "__main__": unittest.main() # pragma: no cover
apache-2.0
david30907d/feedback_django
spirit/core/utils/markdown/block.py
2
4445
# -*- coding: utf-8 -*- from __future__ import unicode_literals import re import copy import mistune from .parsers.poll import PollParser class BlockGrammar(mistune.BlockGrammar): # todo: remove all *_link #link_block = re.compile( # r'^https?://[^\s]+' # r'(?:\n+|$)' #) audio_link = re.compile( r'^https?://[^\s]+\.(mp3|ogg|wav)' r'(\?[^\s]+)?' r'(?:\n+|$)' ) image_link = re.compile( r'^https?://[^\s]+/(?P<image_name>[^\s]+)\.' r'(?P<extension>png|jpg|jpeg|gif|bmp|tif|tiff)' r'(\?[^\s]+)?' r'(?:\n+|$)' ) video_link = re.compile( r'^https?://[^\s]+\.(mov|mp4|webm|ogv)' r'(\?[^\s]+)?' r'(?:\n+|$)' ) # Try to get the video ID. Works for URLs of the form: # * https://www.youtube.com/watch?v=Z0UISCEe52Y # * http://youtu.be/afyK1HSFfgw # * https://www.youtube.com/embed/vsF0K3Ou1v0 youtube = re.compile( r'^https?://(www\.)?' r'(youtube\.com/watch\?v=' r'|youtu\.be/' r'|youtube\.com/embed/)' r'(?P<id>[a-zA-Z0-9_\-]{11})' r'(?:\n+|$)' ) # Try to get the video ID. Works for URLs of the form: # * https://vimeo.com/11111111 # * https://www.vimeo.com/11111111 # * https://player.vimeo.com/video/11111111 # * https://vimeo.com/channels/11111111 # * https://vimeo.com/groups/name/videos/11111111 # * https://vimeo.com/album/2222222/video/11111111 # * https://vimeo.com/11111111?param=value vimeo = re.compile( r'^https?://(www\.|player\.)?' r'vimeo\.com/' r'(channels/' r'|groups/[^/]+/videos/' r'|album/(\d+)/video/' r'|video/)?' r'(?P<id>\d+)' r'(\?[^\s]+)?' r'(?:\n+|$)' ) # Capture polls: # [poll name=foo min=1 max=1 close=1d mode=default] # # Which opt you prefer? # 1. opt 1 # 2. opt 2 # [/poll] poll = re.compile( r'^(?:\[poll' r'((?:\s+name=(?P<name>[\w\-_]+))' r'(?:\s+min=(?P<min>\d+))?' r'(?:\s+max=(?P<max>\d+))?' r'(?:\s+close=(?P<close>\d+)d)?' r'(?:\s+mode=(?P<mode>(default|secret)))?' r'|(?P<invalid_params>[^\]]*))' r'\])\n' r'((?:#\s*(?P<title>[^\n]+\n))?' r'(?P<choices>(?:\d+\.\s*[^\n]+\n){2,})' r'|(?P<invalid_body>(?:[^\n]+\n)*))' r'(?:\[/poll\])' ) class BlockLexer(mistune.BlockLexer): default_rules = copy.copy(mistune.BlockLexer.default_rules) default_rules.insert(0, 'audio_link') default_rules.insert(0, 'image_link') default_rules.insert(0, 'video_link') default_rules.insert(0, 'youtube') default_rules.insert(0, 'vimeo') default_rules.insert(0, 'poll') def __init__(self, rules=None, **kwargs): if rules is None: rules = BlockGrammar() super(BlockLexer, self).__init__(rules=rules, **kwargs) self.polls = { 'polls': [], 'choices': [] } def parse_audio_link(self, m): self.tokens.append({ 'type': 'audio_link', 'link': m.group(0).strip() }) def parse_image_link(self, m): link = m.group(0).strip() title = m.group('image_name').strip() self.tokens.append({ 'type': 'image_link', 'src': link, 'title': title, 'text': title }) def parse_video_link(self, m): self.tokens.append({ 'type': 'video_link', 'link': m.group(0).strip() }) def parse_youtube(self, m): self.tokens.append({ 'type': 'youtube', 'video_id': m.group("id") }) def parse_vimeo(self, m): self.tokens.append({ 'type': 'vimeo', 'video_id': m.group("id") }) def parse_poll(self, m): parser = PollParser(polls=self.polls, data=m.groupdict()) if parser.is_valid(): poll = parser.cleaned_data['poll'] choices = parser.cleaned_data['choices'] self.polls['polls'].append(poll) self.polls['choices'].extend(choices) self.tokens.append({ 'type': 'poll', 'name': poll['name'] }) else: self.tokens.append({ 'type': 'poll', 'raw': m.group(0) })
mit
alephu5/Soundbyte
environment/lib/python3.3/site-packages/scipy/linalg/interpolative.py
3
30950
#****************************************************************************** # Copyright (C) 2013 Kenneth L. Ho # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # Redistributions of source code must retain the above copyright notice, this # list of conditions and the following disclaimer. Redistributions in binary # form must reproduce the above copyright notice, this list of conditions and # the following disclaimer in the documentation and/or other materials # provided with the distribution. # # None of the names of the copyright holders may be used to endorse or # promote products derived from this software without specific prior written # permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE # ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE # LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR # CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF # SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS # INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN # CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) # ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE # POSSIBILITY OF SUCH DAMAGE. #****************************************************************************** # Python module for interfacing with `id_dist`. r""" ====================================================================== Interpolative matrix decomposition (:mod:`scipy.linalg.interpolative`) ====================================================================== .. moduleauthor:: Kenneth L. Ho <klho@stanford.edu> .. versionadded:: 0.13 .. currentmodule:: scipy.linalg.interpolative An interpolative decomposition (ID) of a matrix :math:`A \in \mathbb{C}^{m \times n}` of rank :math:`k \leq \min \{ m, n \}` is a factorization .. math:: A \Pi = \begin{bmatrix} A \Pi_{1} & A \Pi_{2} \end{bmatrix} = A \Pi_{1} \begin{bmatrix} I & T \end{bmatrix}, where :math:`\Pi = [\Pi_{1}, \Pi_{2}]` is a permutation matrix with :math:`\Pi_{1} \in \{ 0, 1 \}^{n \times k}`, i.e., :math:`A \Pi_{2} = A \Pi_{1} T`. This can equivalently be written as :math:`A = BP`, where :math:`B = A \Pi_{1}` and :math:`P = [I, T] \Pi^{\mathsf{T}}` are the *skeleton* and *interpolation matrices*, respectively. If :math:`A` does not have exact rank :math:`k`, then there exists an approximation in the form of an ID such that :math:`A = BP + E`, where :math:`\| E \| \sim \sigma_{k + 1}` is on the order of the :math:`(k + 1)`-th largest singular value of :math:`A`. Note that :math:`\sigma_{k + 1}` is the best possible error for a rank-:math:`k` approximation and, in fact, is achieved by the singular value decomposition (SVD) :math:`A \approx U S V^{*}`, where :math:`U \in \mathbb{C}^{m \times k}` and :math:`V \in \mathbb{C}^{n \times k}` have orthonormal columns and :math:`S = \mathop{\mathrm{diag}} (\sigma_{i}) \in \mathbb{C}^{k \times k}` is diagonal with nonnegative entries. The principal advantages of using an ID over an SVD are that: - it is cheaper to construct; - it preserves the structure of :math:`A`; and - it is more efficient to compute with in light of the identity submatrix of :math:`P`. Routines ======== Main functionality: .. autosummary:: :toctree: generated/ interp_decomp reconstruct_matrix_from_id reconstruct_interp_matrix reconstruct_skel_matrix id_to_svd svd estimate_spectral_norm estimate_spectral_norm_diff estimate_rank Support functions: .. autosummary:: :toctree: generated/ seed rand References ========== This module uses the ID software package [1]_ by Martinsson, Rokhlin, Shkolnisky, and Tygert, which is a Fortran library for computing IDs using various algorithms, including the rank-revealing QR approach of [2]_ and the more recent randomized methods described in [3]_, [4]_, and [5]_. This module exposes its functionality in a way convenient for Python users. Note that this module does not add any functionality beyond that of organizing a simpler and more consistent interface. We advise the user to consult also the `documentation for the ID package <https://cims.nyu.edu/~tygert/id_doc.pdf>`_. .. [1] P.G. Martinsson, V. Rokhlin, Y. Shkolnisky, M. Tygert. "ID: a software package for low-rank approximation of matrices via interpolative decompositions, version 0.2." http://cims.nyu.edu/~tygert/id_doc.pdf. .. [2] H. Cheng, Z. Gimbutas, P.G. Martinsson, V. Rokhlin. "On the compression of low rank matrices." *SIAM J. Sci. Comput.* 26 (4): 1389--1404, 2005. `doi:10.1137/030602678 <http://dx.doi.org/10.1137/030602678>`_. .. [3] E. Liberty, F. Woolfe, P.G. Martinsson, V. Rokhlin, M. Tygert. "Randomized algorithms for the low-rank approximation of matrices." *Proc. Natl. Acad. Sci. U.S.A.* 104 (51): 20167--20172, 2007. `doi:10.1073/pnas.0709640104 <http://dx.doi.org/10.1073/pnas.0709640104>`_. .. [4] P.G. Martinsson, V. Rokhlin, M. Tygert. "A randomized algorithm for the decomposition of matrices." *Appl. Comput. Harmon. Anal.* 30 (1): 47--68, 2011. `doi:10.1016/j.acha.2010.02.003 <http://dx.doi.org/10.1016/j.acha.2010.02.003>`_. .. [5] F. Woolfe, E. Liberty, V. Rokhlin, M. Tygert. "A fast randomized algorithm for the approximation of matrices." *Appl. Comput. Harmon. Anal.* 25 (3): 335--366, 2008. `doi:10.1016/j.acha.2007.12.002 <http://dx.doi.org/10.1016/j.acha.2007.12.002>`_. Tutorial ======== Initializing ------------ The first step is to import :mod:`scipy.linalg.interpolative` by issuing the command: >>> import scipy.linalg.interpolative as sli Now let's build a matrix. For this, we consider a Hilbert matrix, which is well know to have low rank: >>> from scipy.linalg import hilbert >>> n = 1000 >>> A = hilbert(n) We can also do this explicitly via: >>> import numpy as np >>> n = 1000 >>> A = np.empty((n, n), order='F') >>> for j in range(n): >>> for i in range(m): >>> A[i,j] = 1. / (i + j + 1) Note the use of the flag ``order='F'`` in :func:`numpy.empty`. This instantiates the matrix in Fortran-contiguous order and is important for avoiding data copying when passing to the backend. We then define multiplication routines for the matrix by regarding it as a :class:`scipy.sparse.linalg.LinearOperator`: >>> from scipy.sparse.linalg import aslinearoperator >>> L = aslinearoperator(A) This automatically sets up methods describing the action of the matrix and its adjoint on a vector. Computing an ID --------------- We have several choices of algorithm to compute an ID. These fall largely according to two dichotomies: 1. how the matrix is represented, i.e., via its entries or via its action on a vector; and 2. whether to approximate it to a fixed relative precision or to a fixed rank. We step through each choice in turn below. In all cases, the ID is represented by three parameters: 1. a rank ``k``; 2. an index array ``idx``; and 3. interpolation coefficients ``proj``. The ID is specified by the relation ``np.dot(A[:,idx[:k]], proj) == A[:,idx[k:]]``. From matrix entries ................... We first consider a matrix given in terms of its entries. To compute an ID to a fixed precision, type: >>> k, idx, proj = sli.interp_decomp(A, eps) where ``eps < 1`` is the desired precision. To compute an ID to a fixed rank, use: >>> idx, proj = sli.interp_decomp(A, k) where ``k >= 1`` is the desired rank. Both algorithms use random sampling and are usually faster than the corresponding older, deterministic algorithms, which can be accessed via the commands: >>> k, idx, proj = sli.interp_decomp(A, eps, rand=False) and: >>> idx, proj = sli.interp_decomp(A, k, rand=False) respectively. From matrix action .................. Now consider a matrix given in terms of its action on a vector as a :class:`scipy.sparse.linalg.LinearOperator`. To compute an ID to a fixed precision, type: >>> k, idx, proj = sli.interp_decomp(L, eps) To compute an ID to a fixed rank, use: >>> idx, proj = sli.interp_decomp(L, k) These algorithms are randomized. Reconstructing an ID -------------------- The ID routines above do not output the skeleton and interpolation matrices explicitly but instead return the relevant information in a more compact (and sometimes more useful) form. To build these matrices, write: >>> B = sli.reconstruct_skel_matrix(A, k, idx) for the skeleton matrix and: >>> P = sli.reconstruct_interp_matrix(idx, proj) for the interpolation matrix. The ID approximation can then be computed as: >>> C = np.dot(B, P) This can also be constructed directly using: >>> C = sli.reconstruct_matrix_from_id(B, idx, proj) without having to first compute ``P``. Alternatively, this can be done explicitly as well using: >>> B = A[:,idx[:k]] >>> P = np.hstack([np.eye(k), proj])[:,np.argsort(idx)] >>> C = np.dot(B, P) Computing an SVD ---------------- An ID can be converted to an SVD via the command: >>> U, S, V = sli.id_to_svd(B, idx, proj) The SVD approximation is then: >>> C = np.dot(U, np.dot(np.diag(S), np.dot(V.conj().T))) The SVD can also be computed "fresh" by combining both the ID and conversion steps into one command. Following the various ID algorithms above, there are correspondingly various SVD algorithms that one can employ. From matrix entries ................... We consider first SVD algorithms for a matrix given in terms of its entries. To compute an SVD to a fixed precision, type: >>> U, S, V = sli.svd(A, eps) To compute an SVD to a fixed rank, use: >>> U, S, V = sli.svd(A, k) Both algorithms use random sampling; for the determinstic versions, issue the keyword ``rand=False`` as above. From matrix action .................. Now consider a matrix given in terms of its action on a vector. To compute an SVD to a fixed precision, type: >>> U, S, V = sli.svd(L, eps) To compute an SVD to a fixed rank, use: >>> U, S, V = sli.svd(L, k) Utility routines ---------------- Several utility routines are also available. To estimate the spectral norm of a matrix, use: >>> snorm = sli.estimate_spectral_norm(A) This algorithm is based on the randomized power method and thus requires only matrix-vector products. The number of iterations to take can be set using the keyword ``its`` (default: ``its=20``). The matrix is interpreted as a :class:`scipy.sparse.linalg.LinearOperator`, but it is also valid to supply it as a :class:`numpy.ndarray`, in which case it is trivially converted using :func:`scipy.sparse.linalg.aslinearoperator`. The same algorithm can also estimate the spectral norm of the difference of two matrices ``A1`` and ``A2`` as follows: >>> diff = sli.estimate_spectral_norm_diff(A1, A2) This is often useful for checking the accuracy of a matrix approximation. Some routines in :mod:`scipy.linalg.interpolative` require estimating the rank of a matrix as well. This can be done with either: >>> k = sli.estimate_rank(A, eps) or: >>> k = sli.estimate_rank(L, eps) depending on the representation. The parameter ``eps`` controls the definition of the numerical rank. Finally, the random number generation required for all randomized routines can be controlled via :func:`scipy.linalg.interpolative.seed`. To reset the seed values to their original values, use: >>> sli.seed('default') To specify the seed values, use: >>> sli.seed(s) where ``s`` must be an integer or array of 55 floats. If an integer, the array of floats is obtained by using `np.random.rand` with the given integer seed. To simply generate some random numbers, type: >>> sli.rand(n) where ``n`` is the number of random numbers to generate. Remarks ------- The above functions all automatically detect the appropriate interface and work with both real and complex data types, passing input arguments to the proper backend routine. """ import scipy.linalg._interpolative_backend as backend import numpy as np _DTYPE_ERROR = ValueError("invalid input dtype (input must be float64 or complex128)") _TYPE_ERROR = TypeError("invalid input type (must be array or LinearOperator)") def _is_real(A): try: if A.dtype == np.complex128: return False elif A.dtype == np.float64: return True else: raise _DTYPE_ERROR except AttributeError: raise _TYPE_ERROR def seed(seed=None): """ Seed the internal random number generator used in this ID package. The generator is a lagged Fibonacci method with 55-element internal state. Parameters ---------- seed : int, sequence, 'default', optional If 'default', the random seed is reset to a default value. If `seed` is a sequence containing 55 floating-point numbers in range [0,1], these are used to set the internal state of the generator. If the value is an integer, the internal state is obtained from `numpy.random.RandomState` (MT19937) with the integer used as the initial seed. If `seed` is omitted (None), `numpy.random` is used to initialize the generator. """ # For details, see :func:`backend.id_srand`, :func:`backend.id_srandi`, # and :func:`backend.id_srando`. if isinstance(seed, str) and seed == 'default': backend.id_srando() elif hasattr(seed, '__len__'): state = np.asfortranarray(seed, dtype=float) if state.shape != (55,): raise ValueError("invalid input size") elif state.min() < 0 or state.max() > 1: raise ValueError("values not in range [0,1]") backend.id_srandi(state) elif seed is None: backend.id_srandi(np.random.rand(55)) else: rnd = np.random.RandomState(seed) backend.id_srandi(rnd.rand(55)) def rand(*shape): """ Generate standard uniform pseudorandom numbers via a very efficient lagged Fibonacci method. This routine is used for all random number generation in this package and can affect ID and SVD results. Parameters ---------- shape Shape of output array """ # For details, see :func:`backend.id_srand`, and :func:`backend.id_srando`. return backend.id_srand(np.prod(shape)).reshape(shape) def interp_decomp(A, eps_or_k, rand=True): """ Compute ID of a matrix. An ID of a matrix `A` is a factorization defined by a rank `k`, a column index array `idx`, and interpolation coefficients `proj` such that:: numpy.dot(A[:,idx[:k]], proj) = A[:,idx[k:]] The original matrix can then be reconstructed as:: numpy.hstack([A[:,idx[:k]], numpy.dot(A[:,idx[:k]], proj)] )[:,numpy.argsort(idx)] or via the routine :func:`reconstruct_matrix_from_id`. This can equivalently be written as:: numpy.dot(A[:,idx[:k]], numpy.hstack([numpy.eye(k), proj]) )[:,np.argsort(idx)] in terms of the skeleton and interpolation matrices:: B = A[:,idx[:k]] and:: P = numpy.hstack([numpy.eye(k), proj])[:,np.argsort(idx)] respectively. See also :func:`reconstruct_interp_matrix` and :func:`reconstruct_skel_matrix`. The ID can be computed to any relative precision or rank (depending on the value of `eps_or_k`). If a precision is specified (`eps_or_k < 1`), then this function has the output signature:: k, idx, proj = interp_decomp(A, eps_or_k) Otherwise, if a rank is specified (`eps_or_k >= 1`), then the output signature is:: idx, proj = interp_decomp(A, eps_or_k) .. This function automatically detects the form of the input parameters and passes them to the appropriate backend. For details, see :func:`backend.iddp_id`, :func:`backend.iddp_aid`, :func:`backend.iddp_rid`, :func:`backend.iddr_id`, :func:`backend.iddr_aid`, :func:`backend.iddr_rid`, :func:`backend.idzp_id`, :func:`backend.idzp_aid`, :func:`backend.idzp_rid`, :func:`backend.idzr_id`, :func:`backend.idzr_aid`, and :func:`backend.idzr_rid`. Parameters ---------- A : :class:`numpy.ndarray` or :class:`scipy.sparse.linalg.LinearOperator` with `rmatvec` Matrix to be factored eps_or_k : float or int Relative error (if `eps_or_k < 1`) or rank (if `eps_or_k >= 1`) of approximation. rand : bool, optional Whether to use random sampling if `A` is of type :class:`numpy.ndarray` (randomized algorithms are always used if `A` is of type :class:`scipy.sparse.linalg.LinearOperator`). Returns ------- k : int Rank required to achieve specified relative precision if `eps_or_k < 1`. idx : :class:`numpy.ndarray` Column index array. proj : :class:`numpy.ndarray` Interpolation coefficients. """ from scipy.sparse.linalg import LinearOperator real = _is_real(A) if isinstance(A, np.ndarray): if eps_or_k < 1: eps = eps_or_k if rand: if real: k, idx, proj = backend.iddp_aid(eps, A) else: k, idx, proj = backend.idzp_aid(eps, A) else: if real: k, idx, proj = backend.iddp_id(eps, A) else: k, idx, proj = backend.idzp_id(eps, A) return k, idx - 1, proj else: k = int(eps_or_k) if rand: if real: idx, proj = backend.iddr_aid(A, k) else: idx, proj = backend.idzr_aid(A, k) else: if real: idx, proj = backend.iddr_id(A, k) else: idx, proj = backend.idzr_id(A, k) return idx - 1, proj elif isinstance(A, LinearOperator): m, n = A.shape matveca = A.rmatvec if eps_or_k < 1: eps = eps_or_k if real: k, idx, proj = backend.iddp_rid(eps, m, n, matveca) else: k, idx, proj = backend.idzp_rid(eps, m, n, matveca) return k, idx - 1, proj else: k = int(eps_or_k) if real: idx, proj = backend.iddr_rid(m, n, matveca, k) else: idx, proj = backend.idzr_rid(m, n, matveca, k) return idx - 1, proj else: raise _TYPE_ERROR def reconstruct_matrix_from_id(B, idx, proj): """ Reconstruct matrix from its ID. A matrix `A` with skeleton matrix `B` and ID indices and coefficients `idx` and `proj`, respectively, can be reconstructed as:: numpy.hstack([B, numpy.dot(B, proj)])[:,numpy.argsort(idx)] See also :func:`reconstruct_interp_matrix` and :func:`reconstruct_skel_matrix`. .. This function automatically detects the matrix data type and calls the appropriate backend. For details, see :func:`backend.idd_reconid` and :func:`backend.idz_reconid`. Parameters ---------- B : :class:`numpy.ndarray` Skeleton matrix. idx : :class:`numpy.ndarray` Column index array. proj : :class:`numpy.ndarray` Interpolation coefficients. Returns ------- :class:`numpy.ndarray` Reconstructed matrix. """ if _is_real(B): return backend.idd_reconid(B, idx + 1, proj) else: return backend.idz_reconid(B, idx + 1, proj) def reconstruct_interp_matrix(idx, proj): """ Reconstruct interpolation matrix from ID. The interpolation matrix can be reconstructed from the ID indices and coefficients `idx` and `proj`, respectively, as:: P = numpy.hstack([numpy.eye(proj.shape[0]), proj])[:,numpy.argsort(idx)] The original matrix can then be reconstructed from its skeleton matrix `B` via:: numpy.dot(B, P) See also :func:`reconstruct_matrix_from_id` and :func:`reconstruct_skel_matrix`. .. This function automatically detects the matrix data type and calls the appropriate backend. For details, see :func:`backend.idd_reconint` and :func:`backend.idz_reconint`. Parameters ---------- idx : :class:`numpy.ndarray` Column index array. proj : :class:`numpy.ndarray` Interpolation coefficients. Returns ------- :class:`numpy.ndarray` Interpolation matrix. """ if _is_real(proj): return backend.idd_reconint(idx + 1, proj) else: return backend.idz_reconint(idx + 1, proj) def reconstruct_skel_matrix(A, k, idx): """ Reconstruct skeleton matrix from ID. The skeleton matrix can be reconstructed from the original matrix `A` and its ID rank and indices `k` and `idx`, respectively, as:: B = A[:,idx[:k]] The original matrix can then be reconstructed via:: numpy.hstack([B, numpy.dot(B, proj)])[:,numpy.argsort(idx)] See also :func:`reconstruct_matrix_from_id` and :func:`reconstruct_interp_matrix`. .. This function automatically detects the matrix data type and calls the appropriate backend. For details, see :func:`backend.idd_copycols` and :func:`backend.idz_copycols`. Parameters ---------- A : :class:`numpy.ndarray` Original matrix. k : int Rank of ID. idx : :class:`numpy.ndarray` Column index array. Returns ------- :class:`numpy.ndarray` Skeleton matrix. """ if _is_real(A): return backend.idd_copycols(A, k, idx + 1) else: return backend.idz_copycols(A, k, idx + 1) def id_to_svd(B, idx, proj): """ Convert ID to SVD. The SVD reconstruction of a matrix with skeleton matrix `B` and ID indices and coefficients `idx` and `proj`, respectively, is:: U, S, V = id_to_svd(B, idx, proj) A = numpy.dot(U, numpy.dot(numpy.diag(S), V.conj().T)) See also :func:`svd`. .. This function automatically detects the matrix data type and calls the appropriate backend. For details, see :func:`backend.idd_id2svd` and :func:`backend.idz_id2svd`. Parameters ---------- B : :class:`numpy.ndarray` Skeleton matrix. idx : :class:`numpy.ndarray` Column index array. proj : :class:`numpy.ndarray` Interpolation coefficients. Returns ------- U : :class:`numpy.ndarray` Left singular vectors. S : :class:`numpy.ndarray` Singular values. V : :class:`numpy.ndarray` Right singular vectors. """ if _is_real(B): U, V, S = backend.idd_id2svd(B, idx + 1, proj) else: U, V, S = backend.idz_id2svd(B, idx + 1, proj) return U, S, V def estimate_spectral_norm(A, its=20): """ Estimate spectral norm of a matrix by the randomized power method. .. This function automatically detects the matrix data type and calls the appropriate backend. For details, see :func:`backend.idd_snorm` and :func:`backend.idz_snorm`. Parameters ---------- A : :class:`scipy.sparse.linalg.LinearOperator` Matrix given as a :class:`scipy.sparse.linalg.LinearOperator` with the `matvec` and `rmatvec` methods (to apply the matrix and its adjoint). its : int Number of power method iterations. Returns ------- float Spectral norm estimate. """ from scipy.sparse.linalg import aslinearoperator A = aslinearoperator(A) m, n = A.shape matvec = lambda x: A. matvec(x) matveca = lambda x: A.rmatvec(x) if _is_real(A): return backend.idd_snorm(m, n, matveca, matvec, its=its) else: return backend.idz_snorm(m, n, matveca, matvec, its=its) def estimate_spectral_norm_diff(A, B, its=20): """ Estimate spectral norm of the difference of two matrices by the randomized power method. .. This function automatically detects the matrix data type and calls the appropriate backend. For details, see :func:`backend.idd_diffsnorm` and :func:`backend.idz_diffsnorm`. Parameters ---------- A : :class:`scipy.sparse.linalg.LinearOperator` First matrix given as a :class:`scipy.sparse.linalg.LinearOperator` with the `matvec` and `rmatvec` methods (to apply the matrix and its adjoint). B : :class:`scipy.sparse.linalg.LinearOperator` Second matrix given as a :class:`scipy.sparse.linalg.LinearOperator` with the `matvec` and `rmatvec` methods (to apply the matrix and its adjoint). its : int Number of power method iterations. Returns ------- float Spectral norm estimate of matrix difference. """ from scipy.sparse.linalg import aslinearoperator A = aslinearoperator(A) B = aslinearoperator(B) m, n = A.shape matvec1 = lambda x: A. matvec(x) matveca1 = lambda x: A.rmatvec(x) matvec2 = lambda x: B. matvec(x) matveca2 = lambda x: B.rmatvec(x) if _is_real(A): return backend.idd_diffsnorm( m, n, matveca1, matveca2, matvec1, matvec2, its=its) else: return backend.idz_diffsnorm( m, n, matveca1, matveca2, matvec1, matvec2, its=its) def svd(A, eps_or_k, rand=True): """ Compute SVD of a matrix via an ID. An SVD of a matrix `A` is a factorization:: A = numpy.dot(U, numpy.dot(numpy.diag(S), V.conj().T)) where `U` and `V` have orthonormal columns and `S` is nonnegative. The SVD can be computed to any relative precision or rank (depending on the value of `eps_or_k`). See also :func:`interp_decomp` and :func:`id_to_svd`. .. This function automatically detects the form of the input parameters and passes them to the appropriate backend. For details, see :func:`backend.iddp_svd`, :func:`backend.iddp_asvd`, :func:`backend.iddp_rsvd`, :func:`backend.iddr_svd`, :func:`backend.iddr_asvd`, :func:`backend.iddr_rsvd`, :func:`backend.idzp_svd`, :func:`backend.idzp_asvd`, :func:`backend.idzp_rsvd`, :func:`backend.idzr_svd`, :func:`backend.idzr_asvd`, and :func:`backend.idzr_rsvd`. Parameters ---------- A : :class:`numpy.ndarray` or :class:`scipy.sparse.linalg.LinearOperator` Matrix to be factored, given as either a :class:`numpy.ndarray` or a :class:`scipy.sparse.linalg.LinearOperator` with the `matvec` and `rmatvec` methods (to apply the matrix and its adjoint). eps_or_k : float or int Relative error (if `eps_or_k < 1`) or rank (if `eps_or_k >= 1`) of approximation. rand : bool, optional Whether to use random sampling if `A` is of type :class:`numpy.ndarray` (randomized algorithms are always used if `A` is of type :class:`scipy.sparse.linalg.LinearOperator`). Returns ------- U : :class:`numpy.ndarray` Left singular vectors. S : :class:`numpy.ndarray` Singular values. V : :class:`numpy.ndarray` Right singular vectors. """ from scipy.sparse.linalg import LinearOperator real = _is_real(A) if isinstance(A, np.ndarray): if eps_or_k < 1: eps = eps_or_k if rand: if real: U, V, S = backend.iddp_asvd(eps, A) else: U, V, S = backend.idzp_asvd(eps, A) else: if real: U, V, S = backend.iddp_svd(eps, A) else: U, V, S = backend.idzp_svd(eps, A) else: k = int(eps_or_k) if rand: if real: U, V, S = backend.iddr_asvd(A, k) else: U, V, S = backend.idzr_asvd(A, k) else: if real: U, V, S = backend.iddr_svd(A, k) else: U, V, S = backend.idzr_svd(A, k) elif isinstance(A, LinearOperator): m, n = A.shape matvec = lambda x: A.matvec(x) matveca = lambda x: A.rmatvec(x) if eps_or_k < 1: eps = eps_or_k if real: U, V, S = backend.iddp_rsvd(eps, m, n, matveca, matvec) else: U, V, S = backend.idzp_rsvd(eps, m, n, matveca, matvec) else: k = int(eps_or_k) if real: U, V, S = backend.iddr_rsvd(m, n, matveca, matvec, k) else: U, V, S = backend.idzr_rsvd(m, n, matveca, matvec, k) else: raise _TYPE_ERROR return U, S, V def estimate_rank(A, eps): """ Estimate matrix rank to a specified relative precision using randomized methods. The matrix `A` can be given as either a :class:`numpy.ndarray` or a :class:`scipy.sparse.linalg.LinearOperator`, with different algorithms used for each case. If `A` is of type :class:`numpy.ndarray`, then the output rank is typically about 8 higher than the actual numerical rank. .. This function automatically detects the form of the input parameters and passes them to the appropriate backend. For details, see :func:`backend.idd_estrank`, :func:`backend.idd_findrank`, :func:`backend.idz_estrank`, and :func:`backend.idz_findrank`. Parameters ---------- A : :class:`numpy.ndarray` or :class:`scipy.sparse.linalg.LinearOperator` Matrix whose rank is to be estimated, given as either a :class:`numpy.ndarray` or a :class:`scipy.sparse.linalg.LinearOperator` with the `rmatvec` method (to apply the matrix adjoint). eps : float Relative error for numerical rank definition. Returns ------- int Estimated matrix rank. """ from scipy.sparse.linalg import LinearOperator real = _is_real(A) if isinstance(A, np.ndarray): if real: rank = backend.idd_estrank(eps, A) else: rank = backend.idz_estrank(eps, A) if rank == 0: # special return value for nearly full rank rank = min(A.shape) return rank elif isinstance(A, LinearOperator): m, n = A.shape matveca = A.rmatvec if real: return backend.idd_findrank(eps, m, n, matveca) else: return backend.idz_findrank(eps, m, n, matveca) else: raise _TYPE_ERROR
gpl-3.0
c2corg/v6_api
c2corg_api/views/document_history.py
1
2850
from c2corg_api.caching import cache_document_history from c2corg_api.models import DBSession from c2corg_api.models.cache_version import get_cache_key from c2corg_api.models.document import DocumentLocale, DOCUMENT_TYPE from c2corg_api.models.document_history import DocumentVersion from c2corg_api.views import cors_policy, etag_cache from c2corg_api.views.document_version import serialize_version from c2corg_api.views.validation import validate_lang, validate_id from c2corg_api.caching import get_or_create from cornice.resource import resource, view from pyramid.httpexceptions import HTTPNotFound from sqlalchemy.orm import joinedload @resource(path='/document/{id}/history/{lang}', cors_policy=cors_policy) class HistoryDocumentRest(object): """ Unique class for returning history of a document. """ def __init__(self, request): self.request = request @view(validators=[validate_id, validate_lang]) def get(self): document_id = self.request.validated['id'] lang = self.request.validated['lang'] def create_response(): return self._get_history(document_id, lang) # history entry point does no precise document type. cache_key = get_cache_key( document_id, lang, document_type=DOCUMENT_TYPE) if not cache_key: raise HTTPNotFound( 'no version for document {0}'.format(document_id)) else: # set and check the etag: if the etag value provided in the # request equals the current etag, return 'NotModified' etag_cache(self.request, cache_key) return get_or_create( cache_document_history, cache_key, create_response) def _get_history(self, document_id, lang): # FIXME conditional permission check (when outings implemented) # is_outing = DBSession.query(Outing) \ # .filter(Outing.document_id == document_id).count() # if is_outing > 0: # # validate permission (authenticated + associated) # # return 403 if not correct title = DBSession.query(DocumentLocale.title) \ .filter(DocumentLocale.document_id == document_id) \ .filter(DocumentLocale.lang == lang) \ .first() if not title: raise HTTPNotFound('no locale document for "{0}"'.format(lang)) versions = DBSession.query(DocumentVersion) \ .options(joinedload('history_metadata').joinedload('user')) \ .filter(DocumentVersion.document_id == document_id) \ .filter(DocumentVersion.lang == lang) \ .order_by(DocumentVersion.id) \ .all() return { 'title': title.title, 'versions': [serialize_version(v) for v in versions] }
agpl-3.0
kmolab/kmolab.github.io
data/Brython-3.3.4/Lib/test/test_list.py
14
4245
import sys from test import support, list_tests import pickle class ListTest(list_tests.CommonTest): type2test = list def test_basic(self): self.assertEqual(list([]), []) l0_3 = [0, 1, 2, 3] l0_3_bis = list(l0_3) self.assertEqual(l0_3, l0_3_bis) self.assertTrue(l0_3 is not l0_3_bis) self.assertEqual(list(()), []) self.assertEqual(list((0, 1, 2, 3)), [0, 1, 2, 3]) self.assertEqual(list(''), []) self.assertEqual(list('spam'), ['s', 'p', 'a', 'm']) if sys.maxsize == 0x7fffffff: # This test can currently only work on 32-bit machines. # XXX If/when PySequence_Length() returns a ssize_t, it should be # XXX re-enabled. # Verify clearing of bug #556025. # This assumes that the max data size (sys.maxint) == max # address size this also assumes that the address size is at # least 4 bytes with 8 byte addresses, the bug is not well # tested # # Note: This test is expected to SEGV under Cygwin 1.3.12 or # earlier due to a newlib bug. See the following mailing list # thread for the details: # http://sources.redhat.com/ml/newlib/2002/msg00369.html self.assertRaises(MemoryError, list, range(sys.maxsize // 2)) # This code used to segfault in Py2.4a3 x = [] x.extend(-y for y in x) self.assertEqual(x, []) def test_truth(self): super().test_truth() self.assertTrue(not []) self.assertTrue([42]) def test_identity(self): self.assertTrue([] is not []) def test_len(self): super().test_len() self.assertEqual(len([]), 0) self.assertEqual(len([0]), 1) self.assertEqual(len([0, 1, 2]), 3) def test_overflow(self): lst = [4, 5, 6, 7] n = int((sys.maxsize*2+2) // len(lst)) def mul(a, b): return a * b def imul(a, b): a *= b self.assertRaises((MemoryError, OverflowError), mul, lst, n) self.assertRaises((MemoryError, OverflowError), imul, lst, n) def test_repr_large(self): # Check the repr of large list objects def check(n): l = [0] * n s = repr(l) self.assertEqual(s, '[' + ', '.join(['0'] * n) + ']') check(10) # check our checking code check(1000000) def test_iterator_pickle(self): # Userlist iterators don't support pickling yet since # they are based on generators. data = self.type2test([4, 5, 6, 7]) it = itorg = iter(data) d = pickle.dumps(it) it = pickle.loads(d) self.assertEqual(type(itorg), type(it)) self.assertEqual(self.type2test(it), self.type2test(data)) it = pickle.loads(d) next(it) d = pickle.dumps(it) self.assertEqual(self.type2test(it), self.type2test(data)[1:]) def test_reversed_pickle(self): data = self.type2test([4, 5, 6, 7]) it = itorg = reversed(data) d = pickle.dumps(it) it = pickle.loads(d) self.assertEqual(type(itorg), type(it)) self.assertEqual(self.type2test(it), self.type2test(reversed(data))) it = pickle.loads(d) next(it) d = pickle.dumps(it) self.assertEqual(self.type2test(it), self.type2test(reversed(data))[1:]) def test_no_comdat_folding(self): # Issue 8847: In the PGO build, the MSVC linker's COMDAT folding # optimization causes failures in code that relies on distinct # function addresses. class L(list): pass with self.assertRaises(TypeError): (3,) + L([1,2]) def test_main(verbose=None): print('test_main') support.run_unittest(ListTest) # verify reference counting import sys if verbose and hasattr(sys, "gettotalrefcount"): import gc counts = [None] * 5 for i in range(len(counts)): support.run_unittest(ListTest) gc.collect() counts[i] = sys.gettotalrefcount() print(counts) if __name__ == "__main__": test_main(verbose=True)
agpl-3.0
ccnmtl/lettuce
tests/integration/lib/Django-1.3/django/conf/locale/sv/formats.py
232
1365
# -*- encoding: utf-8 -*- # This file is distributed under the same license as the Django package. # # The *_FORMAT strings use the Django date format syntax, # see http://docs.djangoproject.com/en/dev/ref/templates/builtins/#date DATE_FORMAT = 'j F Y' TIME_FORMAT = 'H:i' DATETIME_FORMAT = 'j F Y H:i' YEAR_MONTH_FORMAT = 'F Y' MONTH_DAY_FORMAT = 'j F' SHORT_DATE_FORMAT = 'Y-m-d' SHORT_DATETIME_FORMAT = 'Y-m-d H:i' FIRST_DAY_OF_WEEK = 1 # The *_INPUT_FORMATS strings use the Python strftime format syntax, # see http://docs.python.org/library/datetime.html#strftime-strptime-behavior DATE_INPUT_FORMATS = ( '%Y-%m-%d', # '2006-10-25' '%m/%d/%Y', # '10/25/2006' '%m/%d/%y', # '10/25/06' ) TIME_INPUT_FORMATS = ( '%H:%M:%S', # '14:30:59' '%H:%M', # '14:30' ) DATETIME_INPUT_FORMATS = ( '%Y-%m-%d %H:%M:%S', # '2006-10-25 14:30:59' '%Y-%m-%d %H:%M', # '2006-10-25 14:30' '%Y-%m-%d', # '2006-10-25' '%m/%d/%Y %H:%M:%S', # '10/25/2006 14:30:59' '%m/%d/%Y %H:%M', # '10/25/2006 14:30' '%m/%d/%Y', # '10/25/2006' '%m/%d/%y %H:%M:%S', # '10/25/06 14:30:59' '%m/%d/%y %H:%M', # '10/25/06 14:30' '%m/%d/%y', # '10/25/06' ) DECIMAL_SEPARATOR = ',' THOUSAND_SEPARATOR = ' ' NUMBER_GROUPING = 3
gpl-3.0
kikocorreoso/brython
www/src/Lib/unittest/test/test_runner.py
4
36766
import io import os import sys import pickle import subprocess import unittest from unittest.case import _Outcome from unittest.test.support import (LoggingResult, ResultWithNoStartTestRunStopTestRun) def resultFactory(*_): return unittest.TestResult() def getRunner(): return unittest.TextTestRunner(resultclass=resultFactory, stream=io.StringIO()) def runTests(*cases): suite = unittest.TestSuite() for case in cases: tests = unittest.defaultTestLoader.loadTestsFromTestCase(case) suite.addTests(tests) runner = getRunner() # creating a nested suite exposes some potential bugs realSuite = unittest.TestSuite() realSuite.addTest(suite) # adding empty suites to the end exposes potential bugs suite.addTest(unittest.TestSuite()) realSuite.addTest(unittest.TestSuite()) return runner.run(realSuite) def cleanup(ordering, blowUp=False): if not blowUp: ordering.append('cleanup_good') else: ordering.append('cleanup_exc') raise Exception('CleanUpExc') class TestCleanUp(unittest.TestCase): def testCleanUp(self): class TestableTest(unittest.TestCase): def testNothing(self): pass test = TestableTest('testNothing') self.assertEqual(test._cleanups, []) cleanups = [] def cleanup1(*args, **kwargs): cleanups.append((1, args, kwargs)) def cleanup2(*args, **kwargs): cleanups.append((2, args, kwargs)) test.addCleanup(cleanup1, 1, 2, 3, four='hello', five='goodbye') test.addCleanup(cleanup2) self.assertEqual(test._cleanups, [(cleanup1, (1, 2, 3), dict(four='hello', five='goodbye')), (cleanup2, (), {})]) self.assertTrue(test.doCleanups()) self.assertEqual(cleanups, [(2, (), {}), (1, (1, 2, 3), dict(four='hello', five='goodbye'))]) def testCleanUpWithErrors(self): class TestableTest(unittest.TestCase): def testNothing(self): pass test = TestableTest('testNothing') outcome = test._outcome = _Outcome() CleanUpExc = Exception('foo') exc2 = Exception('bar') def cleanup1(): raise CleanUpExc def cleanup2(): raise exc2 test.addCleanup(cleanup1) test.addCleanup(cleanup2) self.assertFalse(test.doCleanups()) self.assertFalse(outcome.success) ((_, (Type1, instance1, _)), (_, (Type2, instance2, _))) = reversed(outcome.errors) self.assertEqual((Type1, instance1), (Exception, CleanUpExc)) self.assertEqual((Type2, instance2), (Exception, exc2)) def testCleanupInRun(self): blowUp = False ordering = [] class TestableTest(unittest.TestCase): def setUp(self): ordering.append('setUp') if blowUp: raise Exception('foo') def testNothing(self): ordering.append('test') def tearDown(self): ordering.append('tearDown') test = TestableTest('testNothing') def cleanup1(): ordering.append('cleanup1') def cleanup2(): ordering.append('cleanup2') test.addCleanup(cleanup1) test.addCleanup(cleanup2) def success(some_test): self.assertEqual(some_test, test) ordering.append('success') result = unittest.TestResult() result.addSuccess = success test.run(result) self.assertEqual(ordering, ['setUp', 'test', 'tearDown', 'cleanup2', 'cleanup1', 'success']) blowUp = True ordering = [] test = TestableTest('testNothing') test.addCleanup(cleanup1) test.run(result) self.assertEqual(ordering, ['setUp', 'cleanup1']) def testTestCaseDebugExecutesCleanups(self): ordering = [] class TestableTest(unittest.TestCase): def setUp(self): ordering.append('setUp') self.addCleanup(cleanup1) def testNothing(self): ordering.append('test') def tearDown(self): ordering.append('tearDown') test = TestableTest('testNothing') def cleanup1(): ordering.append('cleanup1') test.addCleanup(cleanup2) def cleanup2(): ordering.append('cleanup2') test.debug() self.assertEqual(ordering, ['setUp', 'test', 'tearDown', 'cleanup1', 'cleanup2']) class TestClassCleanup(unittest.TestCase): def test_addClassCleanUp(self): class TestableTest(unittest.TestCase): def testNothing(self): pass test = TestableTest('testNothing') self.assertEqual(test._class_cleanups, []) class_cleanups = [] def class_cleanup1(*args, **kwargs): class_cleanups.append((3, args, kwargs)) def class_cleanup2(*args, **kwargs): class_cleanups.append((4, args, kwargs)) TestableTest.addClassCleanup(class_cleanup1, 1, 2, 3, four='hello', five='goodbye') TestableTest.addClassCleanup(class_cleanup2) self.assertEqual(test._class_cleanups, [(class_cleanup1, (1, 2, 3), dict(four='hello', five='goodbye')), (class_cleanup2, (), {})]) TestableTest.doClassCleanups() self.assertEqual(class_cleanups, [(4, (), {}), (3, (1, 2, 3), dict(four='hello', five='goodbye'))]) def test_run_class_cleanUp(self): ordering = [] blowUp = True class TestableTest(unittest.TestCase): @classmethod def setUpClass(cls): ordering.append('setUpClass') cls.addClassCleanup(cleanup, ordering) if blowUp: raise Exception() def testNothing(self): ordering.append('test') @classmethod def tearDownClass(cls): ordering.append('tearDownClass') runTests(TestableTest) self.assertEqual(ordering, ['setUpClass', 'cleanup_good']) ordering = [] blowUp = False runTests(TestableTest) self.assertEqual(ordering, ['setUpClass', 'test', 'tearDownClass', 'cleanup_good']) def test_debug_executes_classCleanUp(self): ordering = [] class TestableTest(unittest.TestCase): @classmethod def setUpClass(cls): ordering.append('setUpClass') cls.addClassCleanup(cleanup, ordering) def testNothing(self): ordering.append('test') @classmethod def tearDownClass(cls): ordering.append('tearDownClass') suite = unittest.defaultTestLoader.loadTestsFromTestCase(TestableTest) suite.debug() self.assertEqual(ordering, ['setUpClass', 'test', 'tearDownClass', 'cleanup_good']) def test_doClassCleanups_with_errors_addClassCleanUp(self): class TestableTest(unittest.TestCase): def testNothing(self): pass def cleanup1(): raise Exception('cleanup1') def cleanup2(): raise Exception('cleanup2') TestableTest.addClassCleanup(cleanup1) TestableTest.addClassCleanup(cleanup2) with self.assertRaises(Exception) as e: TestableTest.doClassCleanups() self.assertEqual(e, 'cleanup1') def test_with_errors_addCleanUp(self): ordering = [] class TestableTest(unittest.TestCase): @classmethod def setUpClass(cls): ordering.append('setUpClass') cls.addClassCleanup(cleanup, ordering) def setUp(self): ordering.append('setUp') self.addCleanup(cleanup, ordering, blowUp=True) def testNothing(self): pass @classmethod def tearDownClass(cls): ordering.append('tearDownClass') result = runTests(TestableTest) self.assertEqual(result.errors[0][1].splitlines()[-1], 'Exception: CleanUpExc') self.assertEqual(ordering, ['setUpClass', 'setUp', 'cleanup_exc', 'tearDownClass', 'cleanup_good']) def test_run_with_errors_addClassCleanUp(self): ordering = [] class TestableTest(unittest.TestCase): @classmethod def setUpClass(cls): ordering.append('setUpClass') cls.addClassCleanup(cleanup, ordering, blowUp=True) def setUp(self): ordering.append('setUp') self.addCleanup(cleanup, ordering) def testNothing(self): ordering.append('test') @classmethod def tearDownClass(cls): ordering.append('tearDownClass') result = runTests(TestableTest) self.assertEqual(result.errors[0][1].splitlines()[-1], 'Exception: CleanUpExc') self.assertEqual(ordering, ['setUpClass', 'setUp', 'test', 'cleanup_good', 'tearDownClass', 'cleanup_exc']) def test_with_errors_in_addClassCleanup_and_setUps(self): ordering = [] class_blow_up = False method_blow_up = False class TestableTest(unittest.TestCase): @classmethod def setUpClass(cls): ordering.append('setUpClass') cls.addClassCleanup(cleanup, ordering, blowUp=True) if class_blow_up: raise Exception('ClassExc') def setUp(self): ordering.append('setUp') if method_blow_up: raise Exception('MethodExc') def testNothing(self): ordering.append('test') @classmethod def tearDownClass(cls): ordering.append('tearDownClass') result = runTests(TestableTest) self.assertEqual(result.errors[0][1].splitlines()[-1], 'Exception: CleanUpExc') self.assertEqual(ordering, ['setUpClass', 'setUp', 'test', 'tearDownClass', 'cleanup_exc']) ordering = [] class_blow_up = True method_blow_up = False result = runTests(TestableTest) self.assertEqual(result.errors[0][1].splitlines()[-1], 'Exception: ClassExc') self.assertEqual(result.errors[1][1].splitlines()[-1], 'Exception: CleanUpExc') self.assertEqual(ordering, ['setUpClass', 'cleanup_exc']) ordering = [] class_blow_up = False method_blow_up = True result = runTests(TestableTest) self.assertEqual(result.errors[0][1].splitlines()[-1], 'Exception: MethodExc') self.assertEqual(result.errors[1][1].splitlines()[-1], 'Exception: CleanUpExc') self.assertEqual(ordering, ['setUpClass', 'setUp', 'tearDownClass', 'cleanup_exc']) class TestModuleCleanUp(unittest.TestCase): def test_add_and_do_ModuleCleanup(self): module_cleanups = [] def module_cleanup1(*args, **kwargs): module_cleanups.append((3, args, kwargs)) def module_cleanup2(*args, **kwargs): module_cleanups.append((4, args, kwargs)) class Module(object): unittest.addModuleCleanup(module_cleanup1, 1, 2, 3, four='hello', five='goodbye') unittest.addModuleCleanup(module_cleanup2) self.assertEqual(unittest.case._module_cleanups, [(module_cleanup1, (1, 2, 3), dict(four='hello', five='goodbye')), (module_cleanup2, (), {})]) unittest.case.doModuleCleanups() self.assertEqual(module_cleanups, [(4, (), {}), (3, (1, 2, 3), dict(four='hello', five='goodbye'))]) self.assertEqual(unittest.case._module_cleanups, []) def test_doModuleCleanup_with_errors_in_addModuleCleanup(self): module_cleanups = [] def module_cleanup_good(*args, **kwargs): module_cleanups.append((3, args, kwargs)) def module_cleanup_bad(*args, **kwargs): raise Exception('CleanUpExc') class Module(object): unittest.addModuleCleanup(module_cleanup_good, 1, 2, 3, four='hello', five='goodbye') unittest.addModuleCleanup(module_cleanup_bad) self.assertEqual(unittest.case._module_cleanups, [(module_cleanup_good, (1, 2, 3), dict(four='hello', five='goodbye')), (module_cleanup_bad, (), {})]) with self.assertRaises(Exception) as e: unittest.case.doModuleCleanups() self.assertEqual(str(e.exception), 'CleanUpExc') self.assertEqual(unittest.case._module_cleanups, []) def test_addModuleCleanup_arg_errors(self): cleanups = [] def cleanup(*args, **kwargs): cleanups.append((args, kwargs)) class Module(object): unittest.addModuleCleanup(cleanup, 1, 2, function='hello') with self.assertRaises(TypeError): unittest.addModuleCleanup(function=cleanup, arg='hello') with self.assertRaises(TypeError): unittest.addModuleCleanup() unittest.case.doModuleCleanups() self.assertEqual(cleanups, [((1, 2), {'function': 'hello'})]) def test_run_module_cleanUp(self): blowUp = True ordering = [] class Module(object): @staticmethod def setUpModule(): ordering.append('setUpModule') unittest.addModuleCleanup(cleanup, ordering) if blowUp: raise Exception('setUpModule Exc') @staticmethod def tearDownModule(): ordering.append('tearDownModule') class TestableTest(unittest.TestCase): @classmethod def setUpClass(cls): ordering.append('setUpClass') def testNothing(self): ordering.append('test') @classmethod def tearDownClass(cls): ordering.append('tearDownClass') TestableTest.__module__ = 'Module' sys.modules['Module'] = Module result = runTests(TestableTest) self.assertEqual(ordering, ['setUpModule', 'cleanup_good']) self.assertEqual(result.errors[0][1].splitlines()[-1], 'Exception: setUpModule Exc') ordering = [] blowUp = False runTests(TestableTest) self.assertEqual(ordering, ['setUpModule', 'setUpClass', 'test', 'tearDownClass', 'tearDownModule', 'cleanup_good']) self.assertEqual(unittest.case._module_cleanups, []) def test_run_multiple_module_cleanUp(self): blowUp = True blowUp2 = False ordering = [] class Module1(object): @staticmethod def setUpModule(): ordering.append('setUpModule') unittest.addModuleCleanup(cleanup, ordering) if blowUp: raise Exception() @staticmethod def tearDownModule(): ordering.append('tearDownModule') class Module2(object): @staticmethod def setUpModule(): ordering.append('setUpModule2') unittest.addModuleCleanup(cleanup, ordering) if blowUp2: raise Exception() @staticmethod def tearDownModule(): ordering.append('tearDownModule2') class TestableTest(unittest.TestCase): @classmethod def setUpClass(cls): ordering.append('setUpClass') def testNothing(self): ordering.append('test') @classmethod def tearDownClass(cls): ordering.append('tearDownClass') class TestableTest2(unittest.TestCase): @classmethod def setUpClass(cls): ordering.append('setUpClass2') def testNothing(self): ordering.append('test2') @classmethod def tearDownClass(cls): ordering.append('tearDownClass2') TestableTest.__module__ = 'Module1' sys.modules['Module1'] = Module1 TestableTest2.__module__ = 'Module2' sys.modules['Module2'] = Module2 runTests(TestableTest, TestableTest2) self.assertEqual(ordering, ['setUpModule', 'cleanup_good', 'setUpModule2', 'setUpClass2', 'test2', 'tearDownClass2', 'tearDownModule2', 'cleanup_good']) ordering = [] blowUp = False blowUp2 = True runTests(TestableTest, TestableTest2) self.assertEqual(ordering, ['setUpModule', 'setUpClass', 'test', 'tearDownClass', 'tearDownModule', 'cleanup_good', 'setUpModule2', 'cleanup_good']) ordering = [] blowUp = False blowUp2 = False runTests(TestableTest, TestableTest2) self.assertEqual(ordering, ['setUpModule', 'setUpClass', 'test', 'tearDownClass', 'tearDownModule', 'cleanup_good', 'setUpModule2', 'setUpClass2', 'test2', 'tearDownClass2', 'tearDownModule2', 'cleanup_good']) self.assertEqual(unittest.case._module_cleanups, []) def test_debug_module_executes_cleanUp(self): ordering = [] class Module(object): @staticmethod def setUpModule(): ordering.append('setUpModule') unittest.addModuleCleanup(cleanup, ordering) @staticmethod def tearDownModule(): ordering.append('tearDownModule') class TestableTest(unittest.TestCase): @classmethod def setUpClass(cls): ordering.append('setUpClass') def testNothing(self): ordering.append('test') @classmethod def tearDownClass(cls): ordering.append('tearDownClass') TestableTest.__module__ = 'Module' sys.modules['Module'] = Module suite = unittest.defaultTestLoader.loadTestsFromTestCase(TestableTest) suite.debug() self.assertEqual(ordering, ['setUpModule', 'setUpClass', 'test', 'tearDownClass', 'tearDownModule', 'cleanup_good']) self.assertEqual(unittest.case._module_cleanups, []) def test_addClassCleanup_arg_errors(self): cleanups = [] def cleanup(*args, **kwargs): cleanups.append((args, kwargs)) class TestableTest(unittest.TestCase): @classmethod def setUpClass(cls): cls.addClassCleanup(cleanup, 1, 2, function=3, cls=4) with self.assertRaises(TypeError): cls.addClassCleanup(function=cleanup, arg='hello') def testNothing(self): pass with self.assertRaises(TypeError): TestableTest.addClassCleanup() with self.assertRaises(TypeError): unittest.TestCase.addCleanup(cls=TestableTest(), function=cleanup) runTests(TestableTest) self.assertEqual(cleanups, [((1, 2), {'function': 3, 'cls': 4})]) def test_addCleanup_arg_errors(self): cleanups = [] def cleanup(*args, **kwargs): cleanups.append((args, kwargs)) class TestableTest(unittest.TestCase): def setUp(self2): self2.addCleanup(cleanup, 1, 2, function=3, self=4) with self.assertWarns(DeprecationWarning): self2.addCleanup(function=cleanup, arg='hello') def testNothing(self): pass with self.assertRaises(TypeError): TestableTest().addCleanup() with self.assertRaises(TypeError): unittest.TestCase.addCleanup(self=TestableTest(), function=cleanup) runTests(TestableTest) self.assertEqual(cleanups, [((), {'arg': 'hello'}), ((1, 2), {'function': 3, 'self': 4})]) def test_with_errors_in_addClassCleanup(self): ordering = [] class Module(object): @staticmethod def setUpModule(): ordering.append('setUpModule') unittest.addModuleCleanup(cleanup, ordering) @staticmethod def tearDownModule(): ordering.append('tearDownModule') class TestableTest(unittest.TestCase): @classmethod def setUpClass(cls): ordering.append('setUpClass') cls.addClassCleanup(cleanup, ordering, blowUp=True) def testNothing(self): ordering.append('test') @classmethod def tearDownClass(cls): ordering.append('tearDownClass') TestableTest.__module__ = 'Module' sys.modules['Module'] = Module result = runTests(TestableTest) self.assertEqual(result.errors[0][1].splitlines()[-1], 'Exception: CleanUpExc') self.assertEqual(ordering, ['setUpModule', 'setUpClass', 'test', 'tearDownClass', 'cleanup_exc', 'tearDownModule', 'cleanup_good']) def test_with_errors_in_addCleanup(self): ordering = [] class Module(object): @staticmethod def setUpModule(): ordering.append('setUpModule') unittest.addModuleCleanup(cleanup, ordering) @staticmethod def tearDownModule(): ordering.append('tearDownModule') class TestableTest(unittest.TestCase): def setUp(self): ordering.append('setUp') self.addCleanup(cleanup, ordering, blowUp=True) def testNothing(self): ordering.append('test') def tearDown(self): ordering.append('tearDown') TestableTest.__module__ = 'Module' sys.modules['Module'] = Module result = runTests(TestableTest) self.assertEqual(result.errors[0][1].splitlines()[-1], 'Exception: CleanUpExc') self.assertEqual(ordering, ['setUpModule', 'setUp', 'test', 'tearDown', 'cleanup_exc', 'tearDownModule', 'cleanup_good']) def test_with_errors_in_addModuleCleanup_and_setUps(self): ordering = [] module_blow_up = False class_blow_up = False method_blow_up = False class Module(object): @staticmethod def setUpModule(): ordering.append('setUpModule') unittest.addModuleCleanup(cleanup, ordering, blowUp=True) if module_blow_up: raise Exception('ModuleExc') @staticmethod def tearDownModule(): ordering.append('tearDownModule') class TestableTest(unittest.TestCase): @classmethod def setUpClass(cls): ordering.append('setUpClass') if class_blow_up: raise Exception('ClassExc') def setUp(self): ordering.append('setUp') if method_blow_up: raise Exception('MethodExc') def testNothing(self): ordering.append('test') @classmethod def tearDownClass(cls): ordering.append('tearDownClass') TestableTest.__module__ = 'Module' sys.modules['Module'] = Module result = runTests(TestableTest) self.assertEqual(result.errors[0][1].splitlines()[-1], 'Exception: CleanUpExc') self.assertEqual(ordering, ['setUpModule', 'setUpClass', 'setUp', 'test', 'tearDownClass', 'tearDownModule', 'cleanup_exc']) ordering = [] module_blow_up = True class_blow_up = False method_blow_up = False result = runTests(TestableTest) self.assertEqual(result.errors[0][1].splitlines()[-1], 'Exception: CleanUpExc') self.assertEqual(result.errors[1][1].splitlines()[-1], 'Exception: ModuleExc') self.assertEqual(ordering, ['setUpModule', 'cleanup_exc']) ordering = [] module_blow_up = False class_blow_up = True method_blow_up = False result = runTests(TestableTest) self.assertEqual(result.errors[0][1].splitlines()[-1], 'Exception: ClassExc') self.assertEqual(result.errors[1][1].splitlines()[-1], 'Exception: CleanUpExc') self.assertEqual(ordering, ['setUpModule', 'setUpClass', 'tearDownModule', 'cleanup_exc']) ordering = [] module_blow_up = False class_blow_up = False method_blow_up = True result = runTests(TestableTest) self.assertEqual(result.errors[0][1].splitlines()[-1], 'Exception: MethodExc') self.assertEqual(result.errors[1][1].splitlines()[-1], 'Exception: CleanUpExc') self.assertEqual(ordering, ['setUpModule', 'setUpClass', 'setUp', 'tearDownClass', 'tearDownModule', 'cleanup_exc']) def test_module_cleanUp_with_multiple_classes(self): ordering =[] def cleanup1(): ordering.append('cleanup1') def cleanup2(): ordering.append('cleanup2') def cleanup3(): ordering.append('cleanup3') class Module(object): @staticmethod def setUpModule(): ordering.append('setUpModule') unittest.addModuleCleanup(cleanup1) @staticmethod def tearDownModule(): ordering.append('tearDownModule') class TestableTest(unittest.TestCase): def setUp(self): ordering.append('setUp') self.addCleanup(cleanup2) def testNothing(self): ordering.append('test') def tearDown(self): ordering.append('tearDown') class OtherTestableTest(unittest.TestCase): def setUp(self): ordering.append('setUp2') self.addCleanup(cleanup3) def testNothing(self): ordering.append('test2') def tearDown(self): ordering.append('tearDown2') TestableTest.__module__ = 'Module' OtherTestableTest.__module__ = 'Module' sys.modules['Module'] = Module runTests(TestableTest, OtherTestableTest) self.assertEqual(ordering, ['setUpModule', 'setUp', 'test', 'tearDown', 'cleanup2', 'setUp2', 'test2', 'tearDown2', 'cleanup3', 'tearDownModule', 'cleanup1']) class Test_TextTestRunner(unittest.TestCase): """Tests for TextTestRunner.""" def setUp(self): # clean the environment from pre-existing PYTHONWARNINGS to make # test_warnings results consistent self.pythonwarnings = os.environ.get('PYTHONWARNINGS') if self.pythonwarnings: del os.environ['PYTHONWARNINGS'] def tearDown(self): # bring back pre-existing PYTHONWARNINGS if present if self.pythonwarnings: os.environ['PYTHONWARNINGS'] = self.pythonwarnings def test_init(self): runner = unittest.TextTestRunner() self.assertFalse(runner.failfast) self.assertFalse(runner.buffer) self.assertEqual(runner.verbosity, 1) self.assertEqual(runner.warnings, None) self.assertTrue(runner.descriptions) self.assertEqual(runner.resultclass, unittest.TextTestResult) self.assertFalse(runner.tb_locals) def test_multiple_inheritance(self): class AResult(unittest.TestResult): def __init__(self, stream, descriptions, verbosity): super(AResult, self).__init__(stream, descriptions, verbosity) class ATextResult(unittest.TextTestResult, AResult): pass # This used to raise an exception due to TextTestResult not passing # on arguments in its __init__ super call ATextResult(None, None, 1) def testBufferAndFailfast(self): class Test(unittest.TestCase): def testFoo(self): pass result = unittest.TestResult() runner = unittest.TextTestRunner(stream=io.StringIO(), failfast=True, buffer=True) # Use our result object runner._makeResult = lambda: result runner.run(Test('testFoo')) self.assertTrue(result.failfast) self.assertTrue(result.buffer) def test_locals(self): runner = unittest.TextTestRunner(stream=io.StringIO(), tb_locals=True) result = runner.run(unittest.TestSuite()) self.assertEqual(True, result.tb_locals) def testRunnerRegistersResult(self): class Test(unittest.TestCase): def testFoo(self): pass originalRegisterResult = unittest.runner.registerResult def cleanup(): unittest.runner.registerResult = originalRegisterResult self.addCleanup(cleanup) result = unittest.TestResult() runner = unittest.TextTestRunner(stream=io.StringIO()) # Use our result object runner._makeResult = lambda: result self.wasRegistered = 0 def fakeRegisterResult(thisResult): self.wasRegistered += 1 self.assertEqual(thisResult, result) unittest.runner.registerResult = fakeRegisterResult runner.run(unittest.TestSuite()) self.assertEqual(self.wasRegistered, 1) def test_works_with_result_without_startTestRun_stopTestRun(self): class OldTextResult(ResultWithNoStartTestRunStopTestRun): separator2 = '' def printErrors(self): pass class Runner(unittest.TextTestRunner): def __init__(self): super(Runner, self).__init__(io.StringIO()) def _makeResult(self): return OldTextResult() runner = Runner() runner.run(unittest.TestSuite()) def test_startTestRun_stopTestRun_called(self): class LoggingTextResult(LoggingResult): separator2 = '' def printErrors(self): pass class LoggingRunner(unittest.TextTestRunner): def __init__(self, events): super(LoggingRunner, self).__init__(io.StringIO()) self._events = events def _makeResult(self): return LoggingTextResult(self._events) events = [] runner = LoggingRunner(events) runner.run(unittest.TestSuite()) expected = ['startTestRun', 'stopTestRun'] self.assertEqual(events, expected) def test_pickle_unpickle(self): # Issue #7197: a TextTestRunner should be (un)pickleable. This is # required by test_multiprocessing under Windows (in verbose mode). stream = io.StringIO("foo") runner = unittest.TextTestRunner(stream) for protocol in range(2, pickle.HIGHEST_PROTOCOL + 1): s = pickle.dumps(runner, protocol) obj = pickle.loads(s) # StringIO objects never compare equal, a cheap test instead. self.assertEqual(obj.stream.getvalue(), stream.getvalue()) def test_resultclass(self): def MockResultClass(*args): return args STREAM = object() DESCRIPTIONS = object() VERBOSITY = object() runner = unittest.TextTestRunner(STREAM, DESCRIPTIONS, VERBOSITY, resultclass=MockResultClass) self.assertEqual(runner.resultclass, MockResultClass) expectedresult = (runner.stream, DESCRIPTIONS, VERBOSITY) self.assertEqual(runner._makeResult(), expectedresult) def test_warnings(self): """ Check that warnings argument of TextTestRunner correctly affects the behavior of the warnings. """ # see #10535 and the _test_warnings file for more information def get_parse_out_err(p): return [b.splitlines() for b in p.communicate()] opts = dict(stdout=subprocess.PIPE, stderr=subprocess.PIPE, cwd=os.path.dirname(__file__)) ae_msg = b'Please use assertEqual instead.' at_msg = b'Please use assertTrue instead.' # no args -> all the warnings are printed, unittest warnings only once p = subprocess.Popen([sys.executable, '-E', '_test_warnings.py'], **opts) with p: out, err = get_parse_out_err(p) self.assertIn(b'OK', err) # check that the total number of warnings in the output is correct self.assertEqual(len(out), 12) # check that the numbers of the different kind of warnings is correct for msg in [b'dw', b'iw', b'uw']: self.assertEqual(out.count(msg), 3) for msg in [ae_msg, at_msg, b'rw']: self.assertEqual(out.count(msg), 1) args_list = ( # passing 'ignore' as warnings arg -> no warnings [sys.executable, '_test_warnings.py', 'ignore'], # -W doesn't affect the result if the arg is passed [sys.executable, '-Wa', '_test_warnings.py', 'ignore'], # -W affects the result if the arg is not passed [sys.executable, '-Wi', '_test_warnings.py'] ) # in all these cases no warnings are printed for args in args_list: p = subprocess.Popen(args, **opts) with p: out, err = get_parse_out_err(p) self.assertIn(b'OK', err) self.assertEqual(len(out), 0) # passing 'always' as warnings arg -> all the warnings printed, # unittest warnings only once p = subprocess.Popen([sys.executable, '_test_warnings.py', 'always'], **opts) with p: out, err = get_parse_out_err(p) self.assertIn(b'OK', err) self.assertEqual(len(out), 14) for msg in [b'dw', b'iw', b'uw', b'rw']: self.assertEqual(out.count(msg), 3) for msg in [ae_msg, at_msg]: self.assertEqual(out.count(msg), 1) def testStdErrLookedUpAtInstantiationTime(self): # see issue 10786 old_stderr = sys.stderr f = io.StringIO() sys.stderr = f try: runner = unittest.TextTestRunner() self.assertTrue(runner.stream.stream is f) finally: sys.stderr = old_stderr def testSpecifiedStreamUsed(self): # see issue 10786 f = io.StringIO() runner = unittest.TextTestRunner(f) self.assertTrue(runner.stream.stream is f) if __name__ == "__main__": unittest.main()
bsd-3-clause
gromikakao/lge-kernel-gproj
scripts/build-all.py
1250
9474
#! /usr/bin/env python # Copyright (c) 2009-2011, Code Aurora Forum. All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the distribution. # * Neither the name of Code Aurora nor # the names of its contributors may be used to endorse or promote # products derived from this software without specific prior written # permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND # NON-INFRINGEMENT ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR # CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, # EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, # PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; # OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, # WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR # OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF # ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # Build the kernel for all targets using the Android build environment. # # TODO: Accept arguments to indicate what to build. import glob from optparse import OptionParser import subprocess import os import os.path import shutil import sys version = 'build-all.py, version 0.01' build_dir = '../all-kernels' make_command = ["vmlinux", "modules"] make_env = os.environ make_env.update({ 'ARCH': 'arm', 'CROSS_COMPILE': 'arm-none-linux-gnueabi-', 'KCONFIG_NOTIMESTAMP': 'true' }) all_options = {} def error(msg): sys.stderr.write("error: %s\n" % msg) def fail(msg): """Fail with a user-printed message""" error(msg) sys.exit(1) def check_kernel(): """Ensure that PWD is a kernel directory""" if (not os.path.isfile('MAINTAINERS') or not os.path.isfile('arch/arm/mach-msm/Kconfig')): fail("This doesn't seem to be an MSM kernel dir") def check_build(): """Ensure that the build directory is present.""" if not os.path.isdir(build_dir): try: os.makedirs(build_dir) except OSError as exc: if exc.errno == errno.EEXIST: pass else: raise def update_config(file, str): print 'Updating %s with \'%s\'\n' % (file, str) defconfig = open(file, 'a') defconfig.write(str + '\n') defconfig.close() def scan_configs(): """Get the full list of defconfigs appropriate for this tree.""" names = {} for n in glob.glob('arch/arm/configs/[fm]sm[0-9-]*_defconfig'): names[os.path.basename(n)[:-10]] = n for n in glob.glob('arch/arm/configs/qsd*_defconfig'): names[os.path.basename(n)[:-10]] = n for n in glob.glob('arch/arm/configs/apq*_defconfig'): names[os.path.basename(n)[:-10]] = n return names class Builder: def __init__(self, logname): self.logname = logname self.fd = open(logname, 'w') def run(self, args): devnull = open('/dev/null', 'r') proc = subprocess.Popen(args, stdin=devnull, env=make_env, bufsize=0, stdout=subprocess.PIPE, stderr=subprocess.STDOUT) count = 0 # for line in proc.stdout: rawfd = proc.stdout.fileno() while True: line = os.read(rawfd, 1024) if not line: break self.fd.write(line) self.fd.flush() if all_options.verbose: sys.stdout.write(line) sys.stdout.flush() else: for i in range(line.count('\n')): count += 1 if count == 64: count = 0 print sys.stdout.write('.') sys.stdout.flush() print result = proc.wait() self.fd.close() return result failed_targets = [] def build(target): dest_dir = os.path.join(build_dir, target) log_name = '%s/log-%s.log' % (build_dir, target) print 'Building %s in %s log %s' % (target, dest_dir, log_name) if not os.path.isdir(dest_dir): os.mkdir(dest_dir) defconfig = 'arch/arm/configs/%s_defconfig' % target dotconfig = '%s/.config' % dest_dir savedefconfig = '%s/defconfig' % dest_dir shutil.copyfile(defconfig, dotconfig) devnull = open('/dev/null', 'r') subprocess.check_call(['make', 'O=%s' % dest_dir, '%s_defconfig' % target], env=make_env, stdin=devnull) devnull.close() if not all_options.updateconfigs: build = Builder(log_name) result = build.run(['make', 'O=%s' % dest_dir] + make_command) if result != 0: if all_options.keep_going: failed_targets.append(target) fail_or_error = error else: fail_or_error = fail fail_or_error("Failed to build %s, see %s" % (target, build.logname)) # Copy the defconfig back. if all_options.configs or all_options.updateconfigs: devnull = open('/dev/null', 'r') subprocess.check_call(['make', 'O=%s' % dest_dir, 'savedefconfig'], env=make_env, stdin=devnull) devnull.close() shutil.copyfile(savedefconfig, defconfig) def build_many(allconf, targets): print "Building %d target(s)" % len(targets) for target in targets: if all_options.updateconfigs: update_config(allconf[target], all_options.updateconfigs) build(target) if failed_targets: fail('\n '.join(["Failed targets:"] + [target for target in failed_targets])) def main(): global make_command check_kernel() check_build() configs = scan_configs() usage = (""" %prog [options] all -- Build all targets %prog [options] target target ... -- List specific targets %prog [options] perf -- Build all perf targets %prog [options] noperf -- Build all non-perf targets""") parser = OptionParser(usage=usage, version=version) parser.add_option('--configs', action='store_true', dest='configs', help="Copy configs back into tree") parser.add_option('--list', action='store_true', dest='list', help='List available targets') parser.add_option('-v', '--verbose', action='store_true', dest='verbose', help='Output to stdout in addition to log file') parser.add_option('--oldconfig', action='store_true', dest='oldconfig', help='Only process "make oldconfig"') parser.add_option('--updateconfigs', dest='updateconfigs', help="Update defconfigs with provided option setting, " "e.g. --updateconfigs=\'CONFIG_USE_THING=y\'") parser.add_option('-j', '--jobs', type='int', dest="jobs", help="Number of simultaneous jobs") parser.add_option('-l', '--load-average', type='int', dest='load_average', help="Don't start multiple jobs unless load is below LOAD_AVERAGE") parser.add_option('-k', '--keep-going', action='store_true', dest='keep_going', default=False, help="Keep building other targets if a target fails") parser.add_option('-m', '--make-target', action='append', help='Build the indicated make target (default: %s)' % ' '.join(make_command)) (options, args) = parser.parse_args() global all_options all_options = options if options.list: print "Available targets:" for target in configs.keys(): print " %s" % target sys.exit(0) if options.oldconfig: make_command = ["oldconfig"] elif options.make_target: make_command = options.make_target if options.jobs: make_command.append("-j%d" % options.jobs) if options.load_average: make_command.append("-l%d" % options.load_average) if args == ['all']: build_many(configs, configs.keys()) elif args == ['perf']: targets = [] for t in configs.keys(): if "perf" in t: targets.append(t) build_many(configs, targets) elif args == ['noperf']: targets = [] for t in configs.keys(): if "perf" not in t: targets.append(t) build_many(configs, targets) elif len(args) > 0: targets = [] for t in args: if t not in configs.keys(): parser.error("Target '%s' not one of %s" % (t, configs.keys())) targets.append(t) build_many(configs, targets) else: parser.error("Must specify a target to build, or 'all'") if __name__ == "__main__": main()
gpl-2.0
lordtangent/arsenalsuite
cpp/lib/PyQt4/examples/tutorial/t10.py
11
4041
#!/usr/bin/env python # PyQt tutorial 10 import sys from PyQt4 import QtCore, QtGui class LCDRange(QtGui.QWidget): valueChanged = QtCore.pyqtSignal(int) def __init__(self, parent=None): super(LCDRange, self).__init__(parent) lcd = QtGui.QLCDNumber(2) lcd.setSegmentStyle(QtGui.QLCDNumber.Filled) self.slider = QtGui.QSlider(QtCore.Qt.Horizontal) self.slider.setRange(0, 99) self.slider.setValue(0) self.slider.valueChanged.connect(lcd.display) self.slider.valueChanged.connect(self.valueChanged) layout = QtGui.QVBoxLayout() layout.addWidget(lcd) layout.addWidget(self.slider) self.setLayout(layout) self.setFocusProxy(self.slider) def value(self): return self.slider.value() def setValue(self, value): self.slider.setValue(value) def setRange(self, minValue, maxValue): if minValue < 0 or maxValue > 99 or minValue > maxValue: QtCore.qWarning("LCDRange::setRange(%d, %d)\n" "\tRange must be 0..99\n" "\tand minValue must not be greater than maxValue" % (minValue, maxValue)) return self.slider.setRange(minValue, maxValue) class CannonField(QtGui.QWidget): angleChanged = QtCore.pyqtSignal(int) forceChanged = QtCore.pyqtSignal(int) def __init__(self, parent=None): super(CannonField, self).__init__(parent) self.currentAngle = 45 self.currentForce = 0 self.setPalette(QtGui.QPalette(QtGui.QColor(250, 250, 200))) self.setAutoFillBackground(True) def angle(self): return self.currentAngle def setAngle(self, angle): if angle < 5: angle = 5 if angle > 70: angle = 70; if self.currentAngle == angle: return self.currentAngle = angle self.update(self.cannonRect()) self.angleChanged.emit(self.currentAngle) def force(self): return self.currentForce def setForce(self, force): if force < 0: force = 0 if self.currentForce == force: return self.currentForce = force; self.forceChanged.emit(self.currentForce) def paintEvent(self, event): painter = QtGui.QPainter(self) painter.setPen(QtCore.Qt.NoPen) painter.setBrush(QtCore.Qt.blue) painter.translate(0, self.height()) painter.drawPie(QtCore.QRect(-35, -35, 70, 70), 0, 90 * 16) painter.rotate(-self.currentAngle) painter.drawRect(QtCore.QRect(33, -5, 20, 10)) def cannonRect(self): result = QtCore.QRect(0, 0, 50, 50) result.moveBottomLeft(self.rect().bottomLeft()) return result class MyWidget(QtGui.QWidget): def __init__(self, parent=None): super(MyWidget, self).__init__(parent) quit = QtGui.QPushButton("&Quit") quit.setFont(QtGui.QFont("Times", 18, QtGui.QFont.Bold)) quit.clicked.connect(QtGui.qApp.quit) angle = LCDRange() angle.setRange(5, 70) force = LCDRange() force.setRange(10, 50) cannonField = CannonField() angle.valueChanged.connect(cannonField.setAngle) cannonField.angleChanged.connect(angle.setValue) force.valueChanged.connect(cannonField.setForce) cannonField.forceChanged.connect(force.setValue) leftLayout = QtGui.QVBoxLayout() leftLayout.addWidget(angle) leftLayout.addWidget(force) gridLayout = QtGui.QGridLayout() gridLayout.addWidget(quit, 0, 0) gridLayout.addLayout(leftLayout, 1, 0) gridLayout.addWidget(cannonField, 1, 1, 2, 1) gridLayout.setColumnStretch(1, 10) self.setLayout(gridLayout) angle.setValue(60) force.setValue(25) angle.setFocus() app = QtGui.QApplication(sys.argv) widget = MyWidget() widget.setGeometry(100, 100, 500, 355) widget.show() sys.exit(app.exec_())
gpl-2.0
mstriemer/olympia
src/olympia/devhub/tests/test_utils.py
3
37461
import json import os.path from copy import deepcopy import mock from celery.result import AsyncResult from django.conf import settings from django.test.utils import override_settings from olympia import amo from olympia.amo.tests import TestCase from olympia.addons.models import Addon from olympia.amo.tests import addon_factory, version_factory from olympia.devhub import tasks, utils from olympia.devhub.tasks import annotate_validation_results from olympia.files.models import ( File, FileUpload, FileValidation, ValidationAnnotation) from olympia.versions.models import Version def merge_dicts(base, changes): res = base.copy() res.update(changes) return res class TestValidationComparator(TestCase): SIGNING_SUMMARY = {'high': 0, 'medium': 0, 'low': 0, 'trivial': 0} def setUp(self): super(TestValidationComparator, self).setUp() self.old_msg = {} self.new_msg = {} self.expected_msg = {} def compare(self, old, changes, expected_changes): """Compare two messages, and assert that the expected annotations are present. `old` is a message dict from the previous set of results, `new` is a dict containing the set of property changes between the old message and the new one, and `expected` is the full set of annotations expected to be added to the new message.""" # Clear and update the original dicts so they can be referenced # in the structures passed as arguments. for msg in self.old_msg, self.new_msg, self.expected_msg: msg.clear() self.old_msg.update(old) self.new_msg.update(merge_dicts(old, changes)) if expected_changes is not None: self.expected_msg.update( merge_dicts(self.new_msg, expected_changes)) if ('signing_severity' in self.expected_msg and 'ignore_duplicates' not in self.expected_msg): # The annotator should add an ignore_duplicates key to all # signing-related messages that don't have one. if utils.ValidationComparator.message_key(self.expected_msg): self.expected_msg['ignore_duplicates'] = ( utils.ValidationComparator.is_ignorable(self.expected_msg)) results = self.run_comparator(self.old_msg, self.new_msg.copy()) if expected_changes is not None: assert results['messages'] == [self.expected_msg] if 'signing_severity' in self.new_msg: summary = merge_dicts(self.SIGNING_SUMMARY, {self.new_msg['signing_severity']: 1}) summaries = (results['signing_summary'], results['signing_ignored_summary']) # If the message is ignored, we should see it counted only in the # ignored message summary, otherwise we should expect to see it # counted only in the main summary. if self.expected_msg.get('ignored'): assert summaries == (self.SIGNING_SUMMARY, summary) else: assert summaries == (summary, self.SIGNING_SUMMARY) return results def run_comparator(self, old, new): return (utils.ValidationComparator({'messages': [old]}) .compare_results({'messages': [new]})) def test_compare_data(self): """Test that the `compare` merges data as expected.""" A = {'id': ('a', 'b', 'c'), 'file': 'thing.js', 'context': ('x', 'y', 'z'), 'thing': 'stuff'} B = {'thing': 'other_thing', 'foo': 'bar'} C = {'matched': A} A_plus_B = {'id': ('a', 'b', 'c'), 'file': 'thing.js', 'context': ('x', 'y', 'z'), 'thing': 'other_thing', 'foo': 'bar'} FINAL = {'id': ('a', 'b', 'c'), 'file': 'thing.js', 'context': ('x', 'y', 'z'), 'thing': 'other_thing', 'foo': 'bar', 'matched': A} self.compare(A, B, C) assert self.old_msg == A assert self.new_msg == A_plus_B assert self.expected_msg == FINAL self.compare(A, B, {'matched': self.old_msg}) assert self.old_msg == A assert self.new_msg == A_plus_B assert self.expected_msg == FINAL def test_compare_nested_matches(self): """Test that nested matches are not included.""" old = { 'id': ('a', 'b', 'c'), 'file': 'thing.js', 'context': ('x', 'y', 'z'), 'thing': 'stuff', 'matched': {'something': 'else'}, } old_without_matched = old.copy() del old_without_matched['matched'] changes = { 'thing': 'other_thing', 'foo': 'bar', } expected_result = { 'id': ('a', 'b', 'c'), 'file': 'thing.js', 'context': ('x', 'y', 'z'), 'thing': 'other_thing', 'foo': 'bar', 'matched': old_without_matched, } results = self.compare(old, changes, expected_changes=None) assert results['messages'] == [expected_result] def test_compare_results(self): """Test that `compare` tests results correctly.""" with mock.patch.object(self, 'run_comparator') as comparator: MSG = {'id': (), 'context': (), 'file': 'file.js', 'signing_severity': 'low'} EXPECTED = {'matched': MSG, 'ignored': True} FINAL = merge_dicts(MSG, EXPECTED) comparator.return_value = { 'messages': [FINAL], 'signing_summary': {'low': 0, 'medium': 0, 'high': 0, 'trivial': 0}, 'signing_ignored_summary': {'low': 1, 'medium': 0, 'high': 0, 'trivial': 0}} # Signing summary with ignored messages: self.compare(MSG, {}, EXPECTED) comparator.return_value['signing_summary']['low'] = 1 try: self.compare(MSG, {}, EXPECTED) except AssertionError: pass else: assert False, 'Bad signing summary passed.' comparator.return_value['signing_summary']['low'] = 0 comparator.return_value['signing_ignored_summary']['low'] = 0 try: self.compare(MSG, {}, EXPECTED) except AssertionError: pass else: assert False, 'Bad ignored signing summary passed.' # Signing summary without ignored messages: CHANGES = {'id': ('a', 'b', 'c')} FINAL = merge_dicts(MSG, CHANGES) comparator.return_value['messages'] = [FINAL] comparator.return_value['signing_summary']['low'] = 1 self.compare(MSG, CHANGES, {}) comparator.return_value['signing_summary']['low'] = 0 try: self.compare(MSG, CHANGES, {}) except AssertionError: pass else: assert False, 'Bad signing summary passed.' comparator.return_value['signing_summary']['low'] = 1 comparator.return_value['signing_ignored_summary']['low'] = 1 try: self.compare(MSG, CHANGES, {}) except AssertionError: pass else: assert False, 'Bad ignored signing summary passed.' def test_matching_message(self): """Test the behavior of matching messages.""" # Low severity messages are ignored unless flagged as not ignorable. for severity in 'low', 'trivial': self.compare({'id': ('a', 'b', 'c'), 'signing_severity': severity, 'context': ('x', 'y', 'z'), 'file': 'foo.js'}, {}, {'ignored': True, 'matched': self.old_msg}) self.compare({'id': ('a', 'b', 'c'), 'signing_severity': severity, 'ignore_duplicates': False, 'context': ('x', 'y', 'z'), 'file': 'foo.js'}, {}, {'ignored': False, 'matched': self.old_msg}) # Other severities are ignored only when flagged as ignorable. for severity in 'medium', 'high': self.compare({'id': ('a', 'b', 'c'), 'signing_severity': severity, 'context': ('x', 'y', 'z'), 'file': 'foo.js'}, {}, {'ignored': False, 'matched': self.old_msg}) self.compare({'id': ('a', 'b', 'c'), 'signing_severity': severity, 'ignore_duplicates': True, 'context': ('x', 'y', 'z'), 'file': 'foo.js'}, {}, {'ignored': True, 'matched': self.old_msg}) # Check that unchecked properties don't matter. self.compare({'id': ('a', 'b', 'c'), 'signing_severity': 'low', 'context': ('x', 'y', 'z'), 'file': 'foo.js'}, {'thing': 'stuff'}, {'ignored': True, 'matched': self.old_msg}) # Non-signing messages should be matched, but not annotated for # ignorability. self.compare({'id': ('a', 'b', 'c'), 'context': ('x', 'y', 'z'), 'file': 'foo.js'}, {}, {'matched': self.old_msg}) def test_non_matching_messages(self): """Test that messages which should not match, don't.""" message = {'id': ('a', 'b', 'c'), 'signing_severity': 'low', 'context': ('x', 'y', 'z'), 'context_data': {'function': 'foo_bar'}, 'file': 'foo.js'} self.compare(message, {'id': ('d', 'e', 'f')}, {}) self.compare(message, {'signing_severity': 'high'}, {}) self.compare(message, {'context': ('a', 'b', 'c')}, {}) self.compare(message, {'context_data': {}}, {}) self.compare(message, {'file': 'these-are-not-the-droids.js'}, {}) # No context in the old message. msg = merge_dicts(message, {'context': None}) self.compare(msg, {'context': ('a', 'b', 'c')}, {}) self.compare(msg, {'context': None}, {}) del msg['context'] self.compare(msg, {'context': ('a', 'b', 'c')}, {}) self.compare(msg, {'context': None}, {}) # No signing severity in the old message. msg = merge_dicts(message, {'signing_severity': None}) self.compare(msg, {'signing_severity': 'low'}, {}) del msg['signing_severity'] self.compare(msg, {'signing_severity': 'low'}, {}) # Token non-signing message. self.compare({'id': ('a', 'b', 'c'), 'context': ('x', 'y', 'z'), 'file': 'foo.js'}, {'id': ()}, {}) def test_file_tuples(self): """Test that messages with file tuples, rather than strings, are treated correctly.""" file_tuple = (u'thing.jar', u'foo.js') file_list = list(file_tuple) file_string = u'/'.join(file_tuple) message = {'id': ('a', 'b', 'c'), 'signing_severity': 'low', 'context': ('x', 'y', 'z'), 'context_data': {'function': 'foo_bar'}, 'file': file_tuple} matches = {'ignored': True, 'matched': self.old_msg} # Tuple, no changes, matches. self.compare(message, {}, matches) self.compare(message, {'file': file_list}, matches) self.compare(message, {'file': file_string}, matches) # Changes, fails. self.compare(message, {'file': 'foo thing.js'}, {}) # List, no changes, matches. message['file'] = file_list self.compare(message, {}, matches) self.compare(message, {'file': file_list}, matches) self.compare(message, {'file': file_string}, matches) # Changes, fails. self.compare(message, {'file': 'foo thing.js'}, {}) # String, no changes, matches. message['file'] = file_string self.compare(message, {}, matches) self.compare(message, {'file': file_list}, matches) self.compare(message, {'file': file_string}, matches) # Changes, fails. self.compare(message, {'file': 'foo thing.js'}, {}) def test_json_deserialization(self): """Test that the JSON deserializer returns the expected hashable objects.""" assert (utils.json_decode('["foo", ["bar", "baz"], 12, null, ' '[], false]') == ('foo', ('bar', 'baz'), 12, None, (), False)) def test_annotate_results(self): """Test that results are annotated as expected.""" RESULTS = deepcopy(amo.VALIDATOR_SKELETON_RESULTS) RESULTS['messages'] = [ {'id': ['foo', 'bar'], 'context': ['foo', 'bar', 'baz'], 'file': 'foo', 'signing_severity': 'low'}, {'id': ['a', 'b'], 'context': ['c', 'd', 'e'], 'file': 'f', 'ignore_duplicates': False, 'signing_severity': 'high'}, {'id': ['z', 'y'], 'context': ['x', 'w', 'v'], 'file': 'u', 'signing_severity': 'high'}, ] HASH = 'xxx' def annotation(hash_, message, **kw): """Create a ValidationAnnotation object for the given file hash, and the key of the given message, with the given keywords.""" key = utils.ValidationComparator.message_key(message) return ValidationAnnotation( file_hash=hash_, message_key=json.dumps(key), **kw) # Create two annotations for this file, and one for a message in this # file, but with the wrong hash. ValidationAnnotation.objects.bulk_create(( annotation(HASH, RESULTS['messages'][0], ignore_duplicates=False), annotation(HASH, RESULTS['messages'][1], ignore_duplicates=True), annotation('zzz', RESULTS['messages'][2], ignore_duplicates=True), )) # Annote a copy of the results. annotated = deepcopy(RESULTS) utils.ValidationComparator(annotated).annotate_results(HASH) # The two annotations for this file should be applied. assert annotated['messages'][0]['ignore_duplicates'] is False assert annotated['messages'][1]['ignore_duplicates'] is True # The annotation for the wrong file should not be applied, and # `ignore_duplicates` should be set to the default for the messge # severity (false). assert annotated['messages'][2]['ignore_duplicates'] is False def test_is_ignorable(self): """Test that is_ignorable returns the correct value in all relevant circumstances.""" MESSAGE = {'id': ['foo', 'bar', 'baz'], 'message': 'Foo', 'description': 'Foo', 'context': ['foo', 'bar', 'baz'], 'file': 'foo.js', 'line': 1} IGNORABLE_TYPES = ('notice', 'warning') OTHER_TYPES = ('error',) IGNORABLE_SEVERITIES = ('trivial', 'low') OTHER_SEVERITIES = ('medium', 'high') def is_ignorable(**kw): """Return true if the base message with the given keyword overrides is ignorable.""" msg = merge_dicts(MESSAGE, kw) return utils.ValidationComparator.is_ignorable(msg) # Non-ignorable types are not ignorable regardless of severity. for type_ in OTHER_TYPES: for severity in IGNORABLE_SEVERITIES + OTHER_SEVERITIES: assert not is_ignorable(signing_severity=severity, type=type_) # Non-ignorable severities are not ignorable regardless of type. for severity in OTHER_SEVERITIES: for type_ in IGNORABLE_TYPES + OTHER_TYPES: assert not is_ignorable(signing_severity=severity, type=type_) # Ignorable types with ignorable severities are ignorable. for severity in IGNORABLE_SEVERITIES: for type_ in IGNORABLE_TYPES: assert is_ignorable(signing_severity=severity, type=type_) class TestValidationAnnotatorBase(TestCase): def setUp(self): # FIXME: Switch to factory_boy. # self.file = FileFactory(version__version='1.0') # self.file_1_1 = FileFactory(version__version='1.1', # version__addon=self.file.version.addon) # self.file_upload = FileUploadFactory(file=XPIFactory( # guid=self.addon.guid, version=self.xpi_version)) # Create File objects for version 1.0 and 1.1. self.addon = Addon.objects.create(guid='test-desktop@nowhere', slug='test-amo-addon') self.version = Version.objects.create(version='1.0', addon=self.addon) self.file = File.objects.create(filename='desktop.xpi', version=self.version, status=amo.STATUS_PUBLIC) self.version_1_1 = Version.objects.create(version='1.1', addon=self.addon) self.file_1_1 = File.objects.create(filename='desktop.xpi', version=self.version_1_1) # Creating the files and versions above resets this. self.addon.update(status=amo.STATUS_PUBLIC) # Create a FileUpload object for an XPI containing version 1.1. path = os.path.join(settings.ROOT, 'src/olympia/devhub/tests/addons/desktop.xpi') self.file_upload = FileUpload.objects.create(path=path) self.xpi_version = '1.1' # Patch validation tasks that we expect the annotator to call. self.patchers = [] self.save_file = self.patch( 'olympia.devhub.tasks.handle_file_validation_result').subtask self.save_upload = self.patch( 'olympia.devhub.tasks.handle_upload_validation_result').subtask self.validate_file = self.patch( 'olympia.devhub.tasks.validate_file').subtask self.validate_upload = self.patch( 'olympia.devhub.tasks.validate_file_path').subtask def patch(self, thing): """Patch the given "thing", and revert the patch on test teardown.""" patcher = mock.patch(thing) self.addCleanup(patcher.stop) return patcher.start() def check_upload(self, file_, listed=True): """Check that our file upload is matched to the given file.""" # Create an annotator, make sure it matches the expected older file. va = utils.ValidationAnnotator(self.file_upload, listed=listed) assert va.prev_file == file_ # Make sure we run the correct validation task for the matched file, # if there is a match. if file_: self.validate_file.assert_called_once_with( [file_.pk], {'hash_': file_.original_hash, 'is_webextension': False}) else: assert not self.validate_file.called # Make sure we run the correct validation task for the upload. self.validate_upload.assert_called_once_with( [self.file_upload.path], {'hash_': self.file_upload.hash, 'listed': listed, 'is_webextension': False}) # Make sure we run the correct save validation task, with a # fallback error handler. channel = (amo.RELEASE_CHANNEL_LISTED if listed else amo.RELEASE_CHANNEL_UNLISTED) self.save_upload.assert_has_calls([ mock.call([mock.ANY, self.file_upload.pk, channel], {'annotate': False}, immutable=True), mock.call([self.file_upload.pk, channel], link_error=mock.ANY)]) def check_file(self, file_new, file_old): """Check that the given new file is matched to the given old file.""" # Create an annotator, make sure it matches the expected older file. va = utils.ValidationAnnotator(file_new) assert va.prev_file == file_old # We shouldn't be attempting to validate a bare upload. assert not self.validate_upload.called # Make sure we run the correct validation tasks for both files, # or only one validation task if there's no match. if file_old: self.validate_file.assert_has_calls([ mock.call([file_new.pk], { 'hash_': file_new.original_hash, 'is_webextension': False}), mock.call([file_old.pk], { 'hash_': file_old.original_hash, 'is_webextension': False}) ]) else: self.validate_file.assert_called_once_with( [file_new.pk], {'hash_': file_new.original_hash, 'is_webextension': False}) # Make sure we run the correct save validation task, with a # fallback error handler. self.save_file.assert_has_calls([ mock.call([mock.ANY, file_new.pk, file_new.version.channel], {'annotate': False}, immutable=True), mock.call([file_new.pk, file_new.version.channel], link_error=mock.ANY)]) class TestValidationAnnotatorUnlisted(TestValidationAnnotatorBase): def setUp(self): super(TestValidationAnnotatorUnlisted, self).setUp() self.make_addon_unlisted(self.addon) self.file_1_1.reload() self.file.reload() def test_find_fileupload_prev_version(self): """Test that the correct previous version is found for a new upload.""" self.check_upload(self.file, listed=False) def test_find_file_prev_version(self): """Test that the correct previous version is found for a File.""" self.check_file(self.file_1_1, self.file) def test_find_future_fileupload_version(self): """Test that a future version will not be matched.""" self.version.update(version='1.2') self.check_upload(None, listed=False) def test_find_future_file(self): """Test that a future version will not be matched.""" self.version.update(version='1.2') self.check_file(self.file_1_1, None) def test_update_annotations(self): """Test that annotations are correctly copied from an old file to a new one.""" HASH_0 = 'xxx' HASH_1 = 'yyy' RESULTS = deepcopy(amo.VALIDATOR_SKELETON_RESULTS) RESULTS['messages'] = [ {'id': ['foo'], 'context': ['foo'], 'file': 'foo'}, {'id': ['baz'], 'context': ['baz'], 'file': 'baz'}, ] self.file.update(original_hash=HASH_0) self.file_1_1.update(original_hash=HASH_1) # Attach the validation results to our previous version's file, # and update the object's cached foreign key value. self.file.validation = FileValidation.objects.create( file=self.file_1_1, validation=json.dumps(RESULTS)) def annotation(hash_, key, **kw): return ValidationAnnotation(file_hash=hash_, message_key=key, **kw) def key(metasyntatic_variable): """Return an arbitrary, but valid, message key for the given arbitrary string.""" return '[["{0}"], ["{0}"], "{0}", null, false]'.format( metasyntatic_variable) # Create two annotations which match the above messages, and # one which does not. ValidationAnnotation.objects.bulk_create(( annotation(HASH_0, key('foo'), ignore_duplicates=True), annotation(HASH_0, key('bar'), ignore_duplicates=True), annotation(HASH_0, key('baz'), ignore_duplicates=False), )) # Create the annotator and make sure it links our target # file to the previous version. annotator = utils.ValidationAnnotator(self.file_1_1) assert annotator.prev_file == self.file annotator.update_annotations() # The two annotations which match messages in the above # validation results should be duplicated for this version. # The third annotation should not. assert (set(ValidationAnnotation.objects.filter(file_hash=HASH_1) .values_list('message_key', 'ignore_duplicates')) == set(((key('foo'), True), (key('baz'), False)))) class TestValidationAnnotatorListed(TestValidationAnnotatorBase): def test_full_to_full_fileupload(self): """Test that a full reviewed version is matched to the nearest full reviewed version.""" self.version_1_1.update(version='1.0.1') self.file_1_1.update(status=amo.STATUS_PUBLIC) self.check_upload(self.file_1_1) def test_full_to_unreviewed(self): """Test that a full reviewed version is not matched to an unreviewed version.""" self.file_1_1.update(status=amo.STATUS_AWAITING_REVIEW) self.check_upload(self.file) # We can't prevent matching against beta versions # until we change the file upload process to allow flagging # beta versions prior to validation. def test_full_to_full_file(self): """Test that a full reviewed version is matched to the nearest full reviewed version.""" self.file_1_1.update(status=amo.STATUS_PUBLIC) self.check_file(self.file_1_1, self.file) for status in amo.STATUS_AWAITING_REVIEW, amo.STATUS_BETA: self.validate_file.reset_mock() self.save_file.reset_mock() self.file.update(status=status) self.check_file(self.file_1_1, None) @mock.patch('olympia.devhub.utils.chain') def test_run_once_per_file(self, chain): """Tests that only a single validation task is run for a given file.""" task = mock.Mock() chain.return_value = task task.delay.return_value = mock.Mock(task_id='42') assert isinstance(tasks.validate(self.file), mock.Mock) assert task.delay.call_count == 1 assert isinstance(tasks.validate(self.file), AsyncResult) assert task.delay.call_count == 1 assert isinstance(tasks.validate(self.file_1_1), mock.Mock) assert task.delay.call_count == 2 @mock.patch('olympia.devhub.utils.chain') def test_run_once_file_upload(self, chain): """Tests that only a single validation task is run for a given file upload.""" task = mock.Mock() chain.return_value = task task.delay.return_value = mock.Mock(task_id='42') assert isinstance( tasks.validate(self.file_upload, listed=True), mock.Mock) assert task.delay.call_count == 1 assert isinstance( tasks.validate(self.file_upload, listed=True), AsyncResult) assert task.delay.call_count == 1 def test_cache_key(self): """Tests that the correct cache key is generated for a given object.""" assert (utils.ValidationAnnotator(self.file).cache_key == 'validation-task:files.File:{0}:None'.format(self.file.pk)) assert (utils.ValidationAnnotator(self.file_upload, listed=False) .cache_key == 'validation-task:files.FileUpload:{0}:False'.format( self.file_upload.pk)) @mock.patch('olympia.devhub.utils.parse_addon') def test_search_plugin(self, parse_addon): """Test that search plugins are handled correctly.""" parse_addon.return_value = {'guid': None, 'version': '20140103'} addon = addon_factory(type=amo.ADDON_SEARCH, version_kw={'version': '20140101'}) assert addon.guid is None self.check_upload(None) self.validate_upload.reset_mock() self.save_file.reset_mock() version = version_factory(addon=addon, version='20140102') self.check_file(version.files.get(), None) class TestValidationAnnotatorBeta(TestValidationAnnotatorBase): def setUp(self): super(TestValidationAnnotatorBeta, self).setUp() self.xpi_version = '1.1b1' parse_addon = self.patch('olympia.devhub.utils.parse_addon') parse_addon.return_value = {'version': self.xpi_version, 'guid': self.addon.guid} def test_match_beta_to_release(self): """Test that a beta submission is matched to the latest approved release version.""" self.check_upload(self.file) def test_match_beta_to_signed_beta(self): """Test that a beta submission is matched to a prior signed beta version.""" self.file_1_1.update(status=amo.STATUS_BETA, is_signed=True) self.version_1_1.update(version='1.1b0') self.check_upload(self.file_1_1) def test_match_beta_to_unsigned_beta(self): """Test that a beta submission is not matched to a prior unsigned beta version.""" self.file_1_1.update(status=amo.STATUS_BETA) self.version_1_1.update(version='1.1b0') self.check_upload(self.file) # This is technically in tasks at the moment, but may make more sense as a # class method of ValidationAnnotator in the future. class TestAnnotateValidation(TestCase): """Test the `annotate_validation_results` task.""" VALIDATION = { 'messages': [{'id': ('a', 'b', 'c'), 'signing_severity': 'low', 'context': ('a', 'b', 'c'), 'file': 'foo.js'}] } def get_validation(self): """Return a safe-to-mutate, skeleton validation result set.""" return deepcopy(self.VALIDATION) def test_multiple_validations(self): """Test that multiple validations, to be merged by ValidationComparator, work.""" result = annotate_validation_results((self.get_validation(), self.get_validation())) assert (result['messages'][0]['matched'] == self.VALIDATION['messages'][0]) def test_single_validation(self): """Test that passing a single validation result works.""" result = annotate_validation_results(self.get_validation()) assert (result['messages'][0]['id'] == self.VALIDATION['messages'][0]['id']) def test_signing_summary_added(self): """Test that if a signing summary is missing, an empty one is added.""" assert 'signing_summary' not in self.VALIDATION result = annotate_validation_results(self.get_validation()) assert (result['signing_summary'] == {'high': 0, 'medium': 0, 'low': 0, 'trivial': 0}) def test_passed_based_on_signing_summary(self): """Test that the 'passed_auto_validation' flag is correctly added, based on signing summary.""" result = annotate_validation_results(self.get_validation()) assert result['passed_auto_validation'] is True validation = self.get_validation() validation['signing_summary'] = {'high': 0, 'medium': 0, 'low': 1, 'trivial': 0} result = annotate_validation_results(validation) assert result['passed_auto_validation'] is False result = annotate_validation_results((self.get_validation(), self.get_validation())) assert result['passed_auto_validation'] is True assert (result['signing_summary'] == {'high': 0, 'medium': 0, 'low': 0, 'trivial': 0}) assert (result['signing_ignored_summary'] == {'high': 0, 'medium': 0, 'low': 1, 'trivial': 0}) class TestLimitValidationResults(TestCase): """Test that higher priority messages are truncated last.""" def make_validation(self, types): """Take a list of error types or signing severities and make a validation results dict.""" validation = { 'messages': [], 'errors': 0, 'warnings': 0, 'notices': 0, } severities = ['low', 'medium', 'high'] for type_ in types: if type_ in severities: severity = type_ type_ = 'warning' else: severity = None validation[type_ + 's'] += 1 validation['messages'].append({'type': type_}) if severity is not None: validation['messages'][-1]['signing_severity'] = severity return validation @override_settings(VALIDATOR_MESSAGE_LIMIT=2) def test_errors_are_first(self): validation = self.make_validation( ['error', 'warning', 'notice', 'error']) utils.limit_validation_results(validation) limited = validation['messages'] assert len(limited) == 3 assert '2 messages were truncated' in limited[0]['message'] assert limited[1]['type'] == 'error' assert limited[2]['type'] == 'error' @override_settings(VALIDATOR_MESSAGE_LIMIT=3) def test_signing_severity_comes_second(self): validation = self.make_validation( ['error', 'warning', 'medium', 'notice', 'warning', 'error']) utils.limit_validation_results(validation) limited = validation['messages'] assert len(limited) == 4 assert '3 messages were truncated' in limited[0]['message'] assert limited[1]['type'] == 'error' assert limited[2]['type'] == 'error' assert limited[3]['type'] == 'warning' assert limited[3]['signing_severity'] == 'medium' class TestFixAddonsLinterOutput(TestCase): def test_fix_output(self): original_output = { 'count': 4, 'summary': { 'errors': 0, 'notices': 0, 'warnings': 4 }, 'metadata': { 'manifestVersion': 2, 'name': 'My Dogs New Tab', 'type': 1, 'version': '2.13.15', 'architecture': 'extension', 'emptyFiles': [], 'jsLibs': { 'lib/vendor/jquery.js': 'jquery.2.1.4.jquery.js' } }, 'errors': [], 'notices': [], 'warnings': [ { '_type': 'warning', 'code': 'MANIFEST_PERMISSIONS', 'message': '/permissions: Unknown permissions ...', 'description': 'See https://mzl.la/1R1n1t0 ...', 'file': 'manifest.json' }, { '_type': 'warning', 'code': 'MANIFEST_PERMISSIONS', 'message': '/permissions: Unknown permissions ...', 'description': 'See https://mzl.la/1R1n1t0 ....', 'file': 'manifest.json' }, { '_type': 'warning', 'code': 'MANIFEST_CSP', 'message': '\'content_security_policy\' is ...', 'description': 'A custom content_security_policy ...' }, { '_type': 'warning', 'code': 'NO_DOCUMENT_WRITE', 'message': 'Use of document.write strongly discouraged.', 'description': 'document.write will fail in...', 'column': 13, 'file': 'lib/vendor/knockout.js', 'line': 5449 } ] } fixed = utils.fix_addons_linter_output(original_output) assert fixed['success'] assert fixed['warnings'] == 4 assert 'uid' in fixed['messages'][0] assert 'id' in fixed['messages'][0] assert 'type' in fixed['messages'][0] assert fixed['messages'][0]['tier'] == 1 assert fixed['compatibility_summary'] == { 'warnings': 0, 'errors': 0, 'notices': 0, } assert fixed['ending_tier'] == 5 assert fixed['signing_summary'] == { 'low': 0, 'medium': 0, 'high': 0, 'trivial': 0 } assert fixed['metadata']['identified_files'] == { 'lib/vendor/jquery.js': {'path': 'jquery.2.1.4.jquery.js'} }
bsd-3-clause
fake-name/ReadableWebProxy
WebMirror/management/rss_parser_funcs/feed_parse_extractMojoTranslations.py
1
1942
def extractMojoTranslations(item): """ """ vol, chp, frag, postfix = extractVolChapterFragmentPostfix(item['title']) if not (chp or vol or frag) or 'preview' in item['title'].lower(): return None if 'Synopsis - Summaries' in item['tags']: return None # Apparently manga? if 'Air Master' in item['tags']: return None tagmap = [ ('Duke\'s Daughter and Knight Captain(62)', 'The Duke\'s Daughter Is the Knight Captain\'s (62) Young Wife', 'translated'), ('Drop!!', 'Drop!! ~A Tale of the Fragrance Princess~', 'translated'), ('Heibon na Watashi wa Tonikaku Hibon', 'Heibon na Watashi wa Tonikaku Hibon', 'translated'), ('Around 30', 'I am the Newly Born Woman of Around Thirty', 'translated'), ('The corner is fine. Please don\'t mind me', 'The corner is fine. Please don\'t mind me', 'translated'), ('Isekai de Mofumofu Nadenade Suru Tame ni Ganbattemasu', 'Isekai de Mofumofu Nadenade Suru Tame ni Ganbattemasu', 'translated'), ('Isekai de Mofumofu Nadenade no Tame ni Ganbattemasu', 'Isekai de Mofumofu Nadenade Suru Tame ni Ganbattemasu', 'translated'), ('Yankee wa Isekai de Seirei ni Aisaremasu', 'Yankee wa Isekai de Seirei ni Aisaremasu', 'translated'), ('Akuyaku Reijou wa Danna-sama wo Yasesasetai', 'Akuyaku Reijou wa Danna-sama wo Yasesasetai', 'translated'), ] for tagname, name, tl_type in tagmap: if tagname in item['tags']: return buildReleaseMessageWithType(item, name, vol, chp, frag=frag, postfix=postfix, tl_type=tl_type) return False
bsd-3-clause
ds-hwang/chromium-crosswalk
tools/perf/benchmarks/tab_switching.py
5
3903
# Copyright 2013 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. from core import perf_benchmark from measurements import tab_switching import page_sets from telemetry import benchmark @benchmark.Enabled('has tabs') @benchmark.Disabled('android') # http://crbug.com/460084 class TabSwitchingTop10(perf_benchmark.PerfBenchmark): """This test records the MPArch.RWH_TabSwitchPaintDuration histogram. The histogram is a measure of the time between when a tab was requested to be shown, and when first paint occurred. The script opens 10 pages in different tabs, waits for them to load, and then switches to each tab and records the metric. The pages were chosen from Alexa top ranking sites. """ test = tab_switching.TabSwitching page_set = page_sets.Top10PageSet @classmethod def Name(cls): return 'tab_switching.top_10' @benchmark.Enabled('has tabs') @benchmark.Disabled('android') # http://crbug.com/460084 class TabSwitchingTypical25(perf_benchmark.PerfBenchmark): """This test records the MPArch.RWH_TabSwitchPaintDuration histogram. The histogram is a measure of the time between when a tab was requested to be shown, and when first paint occurred. The script opens 25 pages in different tabs, waits for them to load, and then switches to each tab and records the metric. The pages were chosen from Alexa top ranking sites. """ test = tab_switching.TabSwitching def CreateStorySet(self, options): return page_sets.Typical25PageSet(run_no_page_interactions=True) @classmethod def Name(cls): return 'tab_switching.typical_25' @benchmark.Disabled('android') # http://crbug.com/460084 @benchmark.Enabled('has tabs') class TabSwitchingFiveBlankTabs(perf_benchmark.PerfBenchmark): """This test records the MPArch.RWH_TabSwitchPaintDuration histogram. The histogram is a measure of the time between when a tab was requested to be shown, and when first paint occurred. The script opens 5 blank pages in different tabs, waits for them to load, and then switches to each tab and records the metric. Blank pages are use to detect unnecessary idle wakeups. """ test = tab_switching.TabSwitching page_set = page_sets.FiveBlankPagesPageSet options = {'pageset_repeat': 10} @classmethod def Name(cls): return 'tab_switching.five_blank_pages' @benchmark.Enabled('has tabs') # http://crbug.com/460084, http://crbug.com/488067 @benchmark.Disabled('android', 'linux') class TabSwitchingToughEnergyCases(perf_benchmark.PerfBenchmark): """This test records the MPArch.RWH_TabSwitchPaintDuration histogram. The histogram is a measure of the time between when a tab was requested to be shown, and when first paint occurred. The script opens each page in a different tab, waits for them to load, and then switches to each tab and records the metric. The pages were written by hand to stress energy usage. """ test = tab_switching.TabSwitching page_set = page_sets.ToughEnergyCasesPageSet options = {'pageset_repeat': 10} @classmethod def Name(cls): return 'tab_switching.tough_energy_cases' @benchmark.Enabled('has tabs') @benchmark.Disabled('android') # http://crbug.com/460084 class TabSwitchingToughImageCases(perf_benchmark.PerfBenchmark): """This test records the MPArch.RWH_TabSwitchPaintDuration histogram. The histogram is a measure of the time between when a tab was requested to be shown, and when first paint occurred. The script opens each page in different tabs, waits for them to load, and then switches to each tab and records the metric. The pages were chosen by hand to stress the image decoding system. """ test = tab_switching.TabSwitching page_set = page_sets.ToughImageCasesPageSet @classmethod def Name(cls): return 'tab_switching.tough_image_cases'
bsd-3-clause
FireWRT/OpenWrt-Firefly-Libraries
staging_dir/host/lib/python3.4/idlelib/idle_test/test_textview.py
79
2871
'''Test the functions and main class method of textView.py. Since all methods and functions create (or destroy) a TextViewer, which is a widget containing multiple widgets, all tests must be gui tests. Using mock Text would not change this. Other mocks are used to retrieve information about calls. The coverage is essentially 100%. ''' from test.support import requires requires('gui') import unittest import os from tkinter import Tk from idlelib import textView as tv from idlelib.idle_test.mock_idle import Func from idlelib.idle_test.mock_tk import Mbox def setUpModule(): global root root = Tk() def tearDownModule(): global root root.destroy() # pyflakes falsely sees root as undefined del root class TV(tv.TextViewer): # used by TextViewTest transient = Func() grab_set = Func() wait_window = Func() class TextViewTest(unittest.TestCase): def setUp(self): TV.transient.__init__() TV.grab_set.__init__() TV.wait_window.__init__() def test_init_modal(self): view = TV(root, 'Title', 'test text') self.assertTrue(TV.transient.called) self.assertTrue(TV.grab_set.called) self.assertTrue(TV.wait_window.called) view.Ok() def test_init_nonmodal(self): view = TV(root, 'Title', 'test text', modal=False) self.assertFalse(TV.transient.called) self.assertFalse(TV.grab_set.called) self.assertFalse(TV.wait_window.called) view.Ok() def test_ok(self): view = TV(root, 'Title', 'test text', modal=False) view.destroy = Func() view.Ok() self.assertTrue(view.destroy.called) del view.destroy # unmask real function view.destroy class textviewTest(unittest.TestCase): @classmethod def setUpClass(cls): cls.orig_mbox = tv.tkMessageBox tv.tkMessageBox = Mbox @classmethod def tearDownClass(cls): tv.tkMessageBox = cls.orig_mbox del cls.orig_mbox def test_view_text(self): # If modal True, tkinter will error with 'can't invoke "event" command' view = tv.view_text(root, 'Title', 'test text', modal=False) self.assertIsInstance(view, tv.TextViewer) def test_view_file(self): test_dir = os.path.dirname(__file__) testfile = os.path.join(test_dir, 'test_textview.py') view = tv.view_file(root, 'Title', testfile, modal=False) self.assertIsInstance(view, tv.TextViewer) self.assertIn('Test', view.textView.get('1.0', '1.end')) view.Ok() # Mock messagebox will be used and view_file will not return anything testfile = os.path.join(test_dir, '../notthere.py') view = tv.view_file(root, 'Title', testfile, modal=False) self.assertIsNone(view) if __name__ == '__main__': unittest.main(verbosity=2)
gpl-2.0
cocosli/antlr4
runtime/Python3/src/antlr4/PredictionContext.py
6
24709
# # [The "BSD license"] # Copyright (c) 2012 Terence Parr # Copyright (c) 2012 Sam Harwell # Copyright (c) 2014 Eric Vergnaud # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions # are met: # # 1. Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # 2. Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the distribution. # 3. The name of the author may not be used to endorse or promote products # derived from this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR # IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES # OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. # IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, # INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT # NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF # THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #/ from io import StringIO from antlr4.RuleContext import RuleContext from antlr4.atn.ATN import ATN from antlr4.atn.ATNState import ATNState class PredictionContext(object): # Represents {@code $} in local context prediction, which means wildcard. # {@code#+x =#}. #/ EMPTY = None # Represents {@code $} in an array in full context mode, when {@code $} # doesn't mean wildcard: {@code $ + x = [$,x]}. Here, # {@code $} = {@link #EMPTY_RETURN_STATE}. #/ EMPTY_RETURN_STATE = 0x7FFFFFFF globalNodeCount = 1 id = globalNodeCount # Stores the computed hash code of this {@link PredictionContext}. The hash # code is computed in parts to match the following reference algorithm. # # <pre> # private int referenceHashCode() { # int hash = {@link MurmurHash#initialize MurmurHash.initialize}({@link #INITIAL_HASH}); # # for (int i = 0; i &lt; {@link #size()}; i++) { # hash = {@link MurmurHash#update MurmurHash.update}(hash, {@link #getParent getParent}(i)); # } # # for (int i = 0; i &lt; {@link #size()}; i++) { # hash = {@link MurmurHash#update MurmurHash.update}(hash, {@link #getReturnState getReturnState}(i)); # } # # hash = {@link MurmurHash#finish MurmurHash.finish}(hash, 2# {@link #size()}); # return hash; # } # </pre> #/ def __init__(self, cachedHashCode:int): self.cachedHashCode = cachedHashCode # This means only the {@link #EMPTY} context is in set. def isEmpty(self): return self is self.EMPTY def hasEmptyPath(self): return self.getReturnState(len(self) - 1) == self.EMPTY_RETURN_STATE def __hash__(self): return self.cachedHashCode def calculateHashCode(parent:PredictionContext, returnState:int): return hash( str(parent) + str(returnState)) def calculateEmptyHashCode(): return hash("") # Used to cache {@link PredictionContext} objects. Its used for the shared # context cash associated with contexts in DFA states. This cache # can be used for both lexers and parsers. class PredictionContextCache(object): def __init__(self): self.cache = dict() # Add a context to the cache and return it. If the context already exists, # return that one instead and do not add a new context to the cache. # Protect shared cache from unsafe thread access. # def add(self, ctx:PredictionContext): if ctx==PredictionContext.EMPTY: return PredictionContext.EMPTY existing = self.cache.get(ctx, None) if existing is not None: return existing self.cache[ctx] = ctx return ctx def get(self, ctx:PredictionContext): return self.cache.get(ctx, None) def __len__(self): return len(self.cache) class SingletonPredictionContext(PredictionContext): @staticmethod def create(parent:PredictionContext , returnState:int ): if returnState == PredictionContext.EMPTY_RETURN_STATE and parent is None: # someone can pass in the bits of an array ctx that mean $ return SingletonPredictionContext.EMPTY else: return SingletonPredictionContext(parent, returnState) def __init__(self, parent:PredictionContext, returnState:int): assert returnState!=ATNState.INVALID_STATE_NUMBER hashCode = calculateHashCode(parent, returnState) if parent is not None else calculateEmptyHashCode() super().__init__(hashCode) self.parentCtx = parent self.returnState = returnState def __len__(self): return 1 def getParent(self, index:int): assert index == 0 return self.parentCtx def getReturnState(self, index:int): assert index == 0 return self.returnState def __eq__(self, other): if self is other: return True elif other is None: return False elif not isinstance(other, SingletonPredictionContext): return False elif hash(self) != hash(other): return False # can't be same if hash is different else: return self.returnState == other.returnState and self.parentCtx==other.parentCtx def __hash__(self): return self.cachedHashCode def __str__(self): up = "" if self.parentCtx is None else str(self.parentCtx) if len(up)==0: if self.returnState == self.EMPTY_RETURN_STATE: return "$" else: return str(self.returnState) else: return str(self.returnState) + " " + up class EmptyPredictionContext(SingletonPredictionContext): def __init__(self): super().__init__(None, self.EMPTY_RETURN_STATE) def isEmpty(self): return True def getParent(self, index:int): return None def getReturnState(self, index:int): return self.returnState def __eq__(self, other): return self is other def __str__(self): return "$" PredictionContext.EMPTY = EmptyPredictionContext() class ArrayPredictionContext(PredictionContext): # Parent can be null only if full ctx mode and we make an array # from {@link #EMPTY} and non-empty. We merge {@link #EMPTY} by using null parent and # returnState == {@link #EMPTY_RETURN_STATE}. def __init__(self, parents:list, returnStates:list): super().__init__(calculateHashCode(parents, returnStates)) assert parents is not None and len(parents)>0 assert returnStates is not None and len(returnStates)>0 self.parents = parents self.returnStates = returnStates def isEmpty(self): # since EMPTY_RETURN_STATE can only appear in the last position, we # don't need to verify that size==1 return self.returnStates[0]==PredictionContext.EMPTY_RETURN_STATE def __len__(self): return len(self.returnStates) def getParent(self, index:int): return self.parents[index] def getReturnState(self, index:int): return self.returnStates[index] def __eq__(self, other): if self is other: return True elif not isinstance(other, ArrayPredictionContext): return False elif hash(self) != hash(other): return False # can't be same if hash is different else: return self.returnStates==other.returnStates and self.parents==other.parents def __str__(self): if self.isEmpty(): return "[]" with StringIO() as buf: buf.write("[") for i in range(0,len(self.returnStates)): if i>0: buf.write(", ") if self.returnStates[i]==PredictionContext.EMPTY_RETURN_STATE: buf.write("$") continue buf.write(self.returnStates[i]) if self.parents[i] is not None: buf.write(' ') buf.write(str(self.parents[i])) else: buf.write("null") buf.write("]") return buf.getvalue() # Convert a {@link RuleContext} tree to a {@link PredictionContext} graph. # Return {@link #EMPTY} if {@code outerContext} is empty or null. #/ def PredictionContextFromRuleContext(atn:ATN, outerContext:RuleContext=None): if outerContext is None: outerContext = RuleContext.EMPTY # if we are in RuleContext of start rule, s, then PredictionContext # is EMPTY. Nobody called us. (if we are empty, return empty) if outerContext.parentCtx is None or outerContext is RuleContext.EMPTY: return PredictionContext.EMPTY # If we have a parent, convert it to a PredictionContext graph parent = PredictionContextFromRuleContext(atn, outerContext.parentCtx) state = atn.states[outerContext.invokingState] transition = state.transitions[0] return SingletonPredictionContext.create(parent, transition.followState.stateNumber) def calculateListsHashCode(parents:[], returnStates:int ): with StringIO() as s: for parent in parents: s.write(str(parent)) for returnState in returnStates: s.write(str(returnState)) return hash(s.getvalue()) def merge(a:PredictionContext, b:PredictionContext, rootIsWildcard:bool, mergeCache:dict): assert a is not None and b is not None # must be empty context, never null # share same graph if both same if a==b: return a if isinstance(a, SingletonPredictionContext) and isinstance(b, SingletonPredictionContext): return mergeSingletons(a, b, rootIsWildcard, mergeCache) # At least one of a or b is array # If one is $ and rootIsWildcard, return $ as# wildcard if rootIsWildcard: if isinstance( a, EmptyPredictionContext ): return a if isinstance( b, EmptyPredictionContext ): return b # convert singleton so both are arrays to normalize if isinstance( a, SingletonPredictionContext ): a = ArrayPredictionContext([a.parent], [a.returnState]) if isinstance( b, SingletonPredictionContext): b = ArrayPredictionContext([b.parent], [b.returnState]) return mergeArrays(a, b, rootIsWildcard, mergeCache) # # Merge two {@link SingletonPredictionContext} instances. # # <p>Stack tops equal, parents merge is same; return left graph.<br> # <embed src="images/SingletonMerge_SameRootSamePar.svg" type="image/svg+xml"/></p> # # <p>Same stack top, parents differ; merge parents giving array node, then # remainders of those graphs. A new root node is created to point to the # merged parents.<br> # <embed src="images/SingletonMerge_SameRootDiffPar.svg" type="image/svg+xml"/></p> # # <p>Different stack tops pointing to same parent. Make array node for the # root where both element in the root point to the same (original) # parent.<br> # <embed src="images/SingletonMerge_DiffRootSamePar.svg" type="image/svg+xml"/></p> # # <p>Different stack tops pointing to different parents. Make array node for # the root where each element points to the corresponding original # parent.<br> # <embed src="images/SingletonMerge_DiffRootDiffPar.svg" type="image/svg+xml"/></p> # # @param a the first {@link SingletonPredictionContext} # @param b the second {@link SingletonPredictionContext} # @param rootIsWildcard {@code true} if this is a local-context merge, # otherwise false to indicate a full-context merge # @param mergeCache #/ def mergeSingletons(a:SingletonPredictionContext, b:SingletonPredictionContext, rootIsWildcard:bool, mergeCache:dict): if mergeCache is not None: previous = mergeCache.get(a,b) if previous is not None: return previous previous = mergeCache.get(b,a) if previous is not None: return previous rootMerge = mergeRoot(a, b, rootIsWildcard) if rootMerge is not None: if mergeCache is not None: mergeCache.put(a, b, rootMerge) return rootMerge if a.returnState==b.returnState: parent = merge(a.parentCtx, b.parentCtx, rootIsWildcard, mergeCache) # if parent is same as existing a or b parent or reduced to a parent, return it if parent == a.parentCtx: return a # ax + bx = ax, if a=b if parent == b.parentCtx: return b # ax + bx = bx, if a=b # else: ax + ay = a'[x,y] # merge parents x and y, giving array node with x,y then remainders # of those graphs. dup a, a' points at merged array # new joined parent so create new singleton pointing to it, a' a_ = SingletonPredictionContext.create(parent, a.returnState) if mergeCache is not None: mergeCache.put(a, b, a_) return a_ else: # a != b payloads differ # see if we can collapse parents due to $+x parents if local ctx singleParent = None if a is b or (a.parentCtx is not None and a.parentCtx==b.parentCtx): # ax + bx = [a,b]x singleParent = a.parentCtx if singleParent is not None: # parents are same # sort payloads and use same parent payloads = [ a.returnState, b.returnState ] if a.returnState > b.returnState: payloads[0] = b.returnState payloads[1] = a.returnState parents = [singleParent, singleParent] a_ = ArrayPredictionContext(parents, payloads) if mergeCache is not None: mergeCache.put(a, b, a_) return a_ # parents differ and can't merge them. Just pack together # into array; can't merge. # ax + by = [ax,by] payloads = [ a.returnState, b.returnState ] parents = [ a.parentCtx, b.parentCtx ] if a.returnState > b.returnState: # sort by payload payloads[0] = b.returnState payloads[1] = a.returnState parents = [ b.parentCtx, a.parentCtx ] a_ = ArrayPredictionContext(parents, payloads) if mergeCache is not None: mergeCache.put(a, b, a_) return a_ # # Handle case where at least one of {@code a} or {@code b} is # {@link #EMPTY}. In the following diagrams, the symbol {@code $} is used # to represent {@link #EMPTY}. # # <h2>Local-Context Merges</h2> # # <p>These local-context merge operations are used when {@code rootIsWildcard} # is true.</p> # # <p>{@link #EMPTY} is superset of any graph; return {@link #EMPTY}.<br> # <embed src="images/LocalMerge_EmptyRoot.svg" type="image/svg+xml"/></p> # # <p>{@link #EMPTY} and anything is {@code #EMPTY}, so merged parent is # {@code #EMPTY}; return left graph.<br> # <embed src="images/LocalMerge_EmptyParent.svg" type="image/svg+xml"/></p> # # <p>Special case of last merge if local context.<br> # <embed src="images/LocalMerge_DiffRoots.svg" type="image/svg+xml"/></p> # # <h2>Full-Context Merges</h2> # # <p>These full-context merge operations are used when {@code rootIsWildcard} # is false.</p> # # <p><embed src="images/FullMerge_EmptyRoots.svg" type="image/svg+xml"/></p> # # <p>Must keep all contexts; {@link #EMPTY} in array is a special value (and # null parent).<br> # <embed src="images/FullMerge_EmptyRoot.svg" type="image/svg+xml"/></p> # # <p><embed src="images/FullMerge_SameRoot.svg" type="image/svg+xml"/></p> # # @param a the first {@link SingletonPredictionContext} # @param b the second {@link SingletonPredictionContext} # @param rootIsWildcard {@code true} if this is a local-context merge, # otherwise false to indicate a full-context merge #/ def mergeRoot(a:SingletonPredictionContext, b:SingletonPredictionContext, rootIsWildcard:bool): if rootIsWildcard: if a == PredictionContext.EMPTY: return PredictionContext.EMPTY ## + b =# if b == PredictionContext.EMPTY: return PredictionContext.EMPTY # a +# =# else: if a == PredictionContext.EMPTY and b == PredictionContext.EMPTY: return PredictionContext.EMPTY # $ + $ = $ elif a == PredictionContext.EMPTY: # $ + x = [$,x] payloads = [ b.returnState, PredictionContext.EMPTY_RETURN_STATE ] parents = [ b.parentCtx, None ] return ArrayPredictionContext(parents, payloads) elif b == PredictionContext.EMPTY: # x + $ = [$,x] ($ is always first if present) payloads = [ a.returnState, PredictionContext.EMPTY_RETURN_STATE ] parents = [ a.parentCtx, None ] return ArrayPredictionContext(parents, payloads) return None # # Merge two {@link ArrayPredictionContext} instances. # # <p>Different tops, different parents.<br> # <embed src="images/ArrayMerge_DiffTopDiffPar.svg" type="image/svg+xml"/></p> # # <p>Shared top, same parents.<br> # <embed src="images/ArrayMerge_ShareTopSamePar.svg" type="image/svg+xml"/></p> # # <p>Shared top, different parents.<br> # <embed src="images/ArrayMerge_ShareTopDiffPar.svg" type="image/svg+xml"/></p> # # <p>Shared top, all shared parents.<br> # <embed src="images/ArrayMerge_ShareTopSharePar.svg" type="image/svg+xml"/></p> # # <p>Equal tops, merge parents and reduce top to # {@link SingletonPredictionContext}.<br> # <embed src="images/ArrayMerge_EqualTop.svg" type="image/svg+xml"/></p> #/ def mergeArrays(a:ArrayPredictionContext, b:ArrayPredictionContext, rootIsWildcard:bool, mergeCache:dict): if mergeCache is not None: previous = mergeCache.get(a,b) if previous is not None: return previous previous = mergeCache.get(b,a) if previous is not None: return previous # merge sorted payloads a + b => M i = 0 # walks a j = 0 # walks b k = 0 # walks target M array mergedReturnStates = [] * (len(a.returnState) + len( b.returnStates)) mergedParents = [] * len(mergedReturnStates) # walk and merge to yield mergedParents, mergedReturnStates while i<len(a.returnStates) and j<len(b.returnStates): a_parent = a.parents[i] b_parent = b.parents[j] if a.returnStates[i]==b.returnStates[j]: # same payload (stack tops are equal), must yield merged singleton payload = a.returnStates[i] # $+$ = $ bothDollars = payload == PredictionContext.EMPTY_RETURN_STATE and \ a_parent is None and b_parent is None ax_ax = (a_parent is not None and b_parent is not None) and a_parent==b_parent # ax+ax -> ax if bothDollars or ax_ax: mergedParents[k] = a_parent # choose left mergedReturnStates[k] = payload else: # ax+ay -> a'[x,y] mergedParent = merge(a_parent, b_parent, rootIsWildcard, mergeCache) mergedParents[k] = mergedParent mergedReturnStates[k] = payload i += 1 # hop over left one as usual j += 1 # but also skip one in right side since we merge elif a.returnStates[i]<b.returnStates[j]: # copy a[i] to M mergedParents[k] = a_parent mergedReturnStates[k] = a.returnStates[i] i += 1 else: # b > a, copy b[j] to M mergedParents[k] = b_parent mergedReturnStates[k] = b.returnStates[j] j += 1 k += 1 # copy over any payloads remaining in either array if i < len(a.returnStates): for p in range(i, len(a.returnStates)): mergedParents[k] = a.parents[p] mergedReturnStates[k] = a.returnStates[p] k += 1 else: for p in range(j, len(b.returnStates)): mergedParents[k] = b.parents[p] mergedReturnStates[k] = b.returnStates[p] k += 1 # trim merged if we combined a few that had same stack tops if k < len(mergedParents): # write index < last position; trim if k == 1: # for just one merged element, return singleton top a_ = SingletonPredictionContext.create(mergedParents[0], mergedReturnStates[0]) if mergeCache is not None: mergeCache.put(a,b,a_) return a_ mergedParents = mergedParents[0:k] mergedReturnStates = mergedReturnStates[0:k] M = ArrayPredictionContext(mergedParents, mergedReturnStates) # if we created same array as a or b, return that instead # TODO: track whether this is possible above during merge sort for speed if M==a: if mergeCache is not None: mergeCache.put(a,b,a) return a if M==b: if mergeCache is not None: mergeCache.put(a,b,b) return b combineCommonParents(mergedParents) if mergeCache is not None: mergeCache.put(a,b,M) return M # # Make pass over all <em>M</em> {@code parents}; merge any {@code equals()} # ones. #/ def combineCommonParents(parents:list): uniqueParents = dict() for p in range(0, len(parents)): parent = parents[p] if uniqueParents.get(parent, None) is None: uniqueParents[parent] = parent for p in range(0, len(parents)): parents[p] = uniqueParents[parents[p]] def getCachedPredictionContext(context:PredictionContext, contextCache:PredictionContextCache, visited:dict): if context.isEmpty(): return context existing = visited.get(context) if existing is not None: return existing existing = contextCache.get(context) if existing is not None: visited[context] = existing return existing changed = False parents = [None] * len(context) for i in range(0, len(parents)): parent = getCachedPredictionContext(context.getParent(i), contextCache, visited) if changed or parent is not context.getParent(i): if not changed: parents = [None] * len(context) for j in range(0, len(context)): parents[j] = context.getParent(j) changed = True parents[i] = parent if not changed: contextCache.add(context) visited[context] = context return context updated = None if len(parents) == 0: updated = PredictionContext.EMPTY elif len(parents) == 1: updated = SingletonPredictionContext.create(parents[0], context.getReturnState(0)) else: updated = ArrayPredictionContext(parents, context.returnStates) contextCache.add(updated) visited[updated] = updated visited[context] = updated return updated # # extra structures, but cut/paste/morphed works, so leave it. # # seems to do a breadth-first walk # public static List<PredictionContext> getAllNodes(PredictionContext context) { # Map<PredictionContext, PredictionContext> visited = # new IdentityHashMap<PredictionContext, PredictionContext>(); # Deque<PredictionContext> workList = new ArrayDeque<PredictionContext>(); # workList.add(context); # visited.put(context, context); # List<PredictionContext> nodes = new ArrayList<PredictionContext>(); # while (!workList.isEmpty()) { # PredictionContext current = workList.pop(); # nodes.add(current); # for (int i = 0; i < current.size(); i++) { # PredictionContext parent = current.getParent(i); # if ( parent!=null && visited.put(parent, parent) == null) { # workList.push(parent); # } # } # } # return nodes; # } # ter's recursive version of Sam's getAllNodes() def getAllContextNodes(context:PredictionContext, nodes:list=None, visited:dict=None): if nodes is None: nodes = list() return getAllContextNodes(context, nodes, visited) elif visited is None: visited = dict() return getAllContextNodes(context, nodes, visited) else: if context is None or visited.get(context, None) is not None: return nodes visited.put(context, context) nodes.add(context) for i in range(0, len(context)): getAllContextNodes(context.getParent(i), nodes, visited) return nodes
bsd-3-clause
brocade/pynos
pynos/versions/ver_6/ver_6_0_1/yang/brocade_mld_snooping.py
6
1325
#!/usr/bin/env python import xml.etree.ElementTree as ET class brocade_mld_snooping(object): """Auto generated class. """ def __init__(self, **kwargs): self._callback = kwargs.pop('callback') def mld_snooping_ipv6_mld_snooping_enable(self, **kwargs): """Auto Generated Code """ config = ET.Element("config") mld_snooping = ET.SubElement(config, "mld-snooping", xmlns="urn:brocade.com:mgmt:brocade-mld-snooping") ipv6 = ET.SubElement(mld_snooping, "ipv6") mld = ET.SubElement(ipv6, "mld") snooping = ET.SubElement(mld, "snooping") enable = ET.SubElement(snooping, "enable") callback = kwargs.pop('callback', self._callback) return callback(config) def mld_snooping_ipv6_mld_snooping_enable(self, **kwargs): """Auto Generated Code """ config = ET.Element("config") mld_snooping = ET.SubElement(config, "mld-snooping", xmlns="urn:brocade.com:mgmt:brocade-mld-snooping") ipv6 = ET.SubElement(mld_snooping, "ipv6") mld = ET.SubElement(ipv6, "mld") snooping = ET.SubElement(mld, "snooping") enable = ET.SubElement(snooping, "enable") callback = kwargs.pop('callback', self._callback) return callback(config)
apache-2.0
RPGOne/Skynet
numpy-master/numpy/lib/tests/test__iotools.py
148
14018
from __future__ import division, absolute_import, print_function import sys import time from datetime import date import numpy as np from numpy.compat import asbytes, asbytes_nested from numpy.testing import ( run_module_suite, TestCase, assert_, assert_equal, assert_allclose, assert_raises ) from numpy.lib._iotools import ( LineSplitter, NameValidator, StringConverter, has_nested_fields, easy_dtype, flatten_dtype ) class TestLineSplitter(TestCase): "Tests the LineSplitter class." def test_no_delimiter(self): "Test LineSplitter w/o delimiter" strg = asbytes(" 1 2 3 4 5 # test") test = LineSplitter()(strg) assert_equal(test, asbytes_nested(['1', '2', '3', '4', '5'])) test = LineSplitter('')(strg) assert_equal(test, asbytes_nested(['1', '2', '3', '4', '5'])) def test_space_delimiter(self): "Test space delimiter" strg = asbytes(" 1 2 3 4 5 # test") test = LineSplitter(asbytes(' '))(strg) assert_equal(test, asbytes_nested(['1', '2', '3', '4', '', '5'])) test = LineSplitter(asbytes(' '))(strg) assert_equal(test, asbytes_nested(['1 2 3 4', '5'])) def test_tab_delimiter(self): "Test tab delimiter" strg = asbytes(" 1\t 2\t 3\t 4\t 5 6") test = LineSplitter(asbytes('\t'))(strg) assert_equal(test, asbytes_nested(['1', '2', '3', '4', '5 6'])) strg = asbytes(" 1 2\t 3 4\t 5 6") test = LineSplitter(asbytes('\t'))(strg) assert_equal(test, asbytes_nested(['1 2', '3 4', '5 6'])) def test_other_delimiter(self): "Test LineSplitter on delimiter" strg = asbytes("1,2,3,4,,5") test = LineSplitter(asbytes(','))(strg) assert_equal(test, asbytes_nested(['1', '2', '3', '4', '', '5'])) # strg = asbytes(" 1,2,3,4,,5 # test") test = LineSplitter(asbytes(','))(strg) assert_equal(test, asbytes_nested(['1', '2', '3', '4', '', '5'])) def test_constant_fixed_width(self): "Test LineSplitter w/ fixed-width fields" strg = asbytes(" 1 2 3 4 5 # test") test = LineSplitter(3)(strg) assert_equal(test, asbytes_nested(['1', '2', '3', '4', '', '5', ''])) # strg = asbytes(" 1 3 4 5 6# test") test = LineSplitter(20)(strg) assert_equal(test, asbytes_nested(['1 3 4 5 6'])) # strg = asbytes(" 1 3 4 5 6# test") test = LineSplitter(30)(strg) assert_equal(test, asbytes_nested(['1 3 4 5 6'])) def test_variable_fixed_width(self): strg = asbytes(" 1 3 4 5 6# test") test = LineSplitter((3, 6, 6, 3))(strg) assert_equal(test, asbytes_nested(['1', '3', '4 5', '6'])) # strg = asbytes(" 1 3 4 5 6# test") test = LineSplitter((6, 6, 9))(strg) assert_equal(test, asbytes_nested(['1', '3 4', '5 6'])) # ----------------------------------------------------------------------------- class TestNameValidator(TestCase): def test_case_sensitivity(self): "Test case sensitivity" names = ['A', 'a', 'b', 'c'] test = NameValidator().validate(names) assert_equal(test, ['A', 'a', 'b', 'c']) test = NameValidator(case_sensitive=False).validate(names) assert_equal(test, ['A', 'A_1', 'B', 'C']) test = NameValidator(case_sensitive='upper').validate(names) assert_equal(test, ['A', 'A_1', 'B', 'C']) test = NameValidator(case_sensitive='lower').validate(names) assert_equal(test, ['a', 'a_1', 'b', 'c']) # check exceptions assert_raises(ValueError, NameValidator, case_sensitive='foobar') def test_excludelist(self): "Test excludelist" names = ['dates', 'data', 'Other Data', 'mask'] validator = NameValidator(excludelist=['dates', 'data', 'mask']) test = validator.validate(names) assert_equal(test, ['dates_', 'data_', 'Other_Data', 'mask_']) def test_missing_names(self): "Test validate missing names" namelist = ('a', 'b', 'c') validator = NameValidator() assert_equal(validator(namelist), ['a', 'b', 'c']) namelist = ('', 'b', 'c') assert_equal(validator(namelist), ['f0', 'b', 'c']) namelist = ('a', 'b', '') assert_equal(validator(namelist), ['a', 'b', 'f0']) namelist = ('', 'f0', '') assert_equal(validator(namelist), ['f1', 'f0', 'f2']) def test_validate_nb_names(self): "Test validate nb names" namelist = ('a', 'b', 'c') validator = NameValidator() assert_equal(validator(namelist, nbfields=1), ('a',)) assert_equal(validator(namelist, nbfields=5, defaultfmt="g%i"), ['a', 'b', 'c', 'g0', 'g1']) def test_validate_wo_names(self): "Test validate no names" namelist = None validator = NameValidator() assert_(validator(namelist) is None) assert_equal(validator(namelist, nbfields=3), ['f0', 'f1', 'f2']) # ----------------------------------------------------------------------------- def _bytes_to_date(s): if sys.version_info[0] >= 3: return date(*time.strptime(s.decode('latin1'), "%Y-%m-%d")[:3]) else: return date(*time.strptime(s, "%Y-%m-%d")[:3]) class TestStringConverter(TestCase): "Test StringConverter" def test_creation(self): "Test creation of a StringConverter" converter = StringConverter(int, -99999) assert_equal(converter._status, 1) assert_equal(converter.default, -99999) def test_upgrade(self): "Tests the upgrade method." converter = StringConverter() assert_equal(converter._status, 0) # test int assert_equal(converter.upgrade(asbytes('0')), 0) assert_equal(converter._status, 1) # On systems where integer defaults to 32-bit, the statuses will be # offset by one, so we check for this here. import numpy.core.numeric as nx status_offset = int(nx.dtype(nx.integer).itemsize < nx.dtype(nx.int64).itemsize) # test int > 2**32 assert_equal(converter.upgrade(asbytes('17179869184')), 17179869184) assert_equal(converter._status, 1 + status_offset) # test float assert_allclose(converter.upgrade(asbytes('0.')), 0.0) assert_equal(converter._status, 2 + status_offset) # test complex assert_equal(converter.upgrade(asbytes('0j')), complex('0j')) assert_equal(converter._status, 3 + status_offset) # test str assert_equal(converter.upgrade(asbytes('a')), asbytes('a')) assert_equal(converter._status, len(converter._mapper) - 1) def test_missing(self): "Tests the use of missing values." converter = StringConverter(missing_values=(asbytes('missing'), asbytes('missed'))) converter.upgrade(asbytes('0')) assert_equal(converter(asbytes('0')), 0) assert_equal(converter(asbytes('')), converter.default) assert_equal(converter(asbytes('missing')), converter.default) assert_equal(converter(asbytes('missed')), converter.default) try: converter('miss') except ValueError: pass def test_upgrademapper(self): "Tests updatemapper" dateparser = _bytes_to_date StringConverter.upgrade_mapper(dateparser, date(2000, 1, 1)) convert = StringConverter(dateparser, date(2000, 1, 1)) test = convert(asbytes('2001-01-01')) assert_equal(test, date(2001, 1, 1)) test = convert(asbytes('2009-01-01')) assert_equal(test, date(2009, 1, 1)) test = convert(asbytes('')) assert_equal(test, date(2000, 1, 1)) def test_string_to_object(self): "Make sure that string-to-object functions are properly recognized" conv = StringConverter(_bytes_to_date) assert_equal(conv._mapper[-2][0](0), 0j) assert_(hasattr(conv, 'default')) def test_keep_default(self): "Make sure we don't lose an explicit default" converter = StringConverter(None, missing_values=asbytes(''), default=-999) converter.upgrade(asbytes('3.14159265')) assert_equal(converter.default, -999) assert_equal(converter.type, np.dtype(float)) # converter = StringConverter( None, missing_values=asbytes(''), default=0) converter.upgrade(asbytes('3.14159265')) assert_equal(converter.default, 0) assert_equal(converter.type, np.dtype(float)) def test_keep_default_zero(self): "Check that we don't lose a default of 0" converter = StringConverter(int, default=0, missing_values=asbytes("N/A")) assert_equal(converter.default, 0) def test_keep_missing_values(self): "Check that we're not losing missing values" converter = StringConverter(int, default=0, missing_values=asbytes("N/A")) assert_equal( converter.missing_values, set(asbytes_nested(['', 'N/A']))) def test_int64_dtype(self): "Check that int64 integer types can be specified" converter = StringConverter(np.int64, default=0) val = asbytes("-9223372036854775807") assert_(converter(val) == -9223372036854775807) val = asbytes("9223372036854775807") assert_(converter(val) == 9223372036854775807) def test_uint64_dtype(self): "Check that uint64 integer types can be specified" converter = StringConverter(np.uint64, default=0) val = asbytes("9223372043271415339") assert_(converter(val) == 9223372043271415339) class TestMiscFunctions(TestCase): def test_has_nested_dtype(self): "Test has_nested_dtype" ndtype = np.dtype(np.float) assert_equal(has_nested_fields(ndtype), False) ndtype = np.dtype([('A', '|S3'), ('B', float)]) assert_equal(has_nested_fields(ndtype), False) ndtype = np.dtype([('A', int), ('B', [('BA', float), ('BB', '|S1')])]) assert_equal(has_nested_fields(ndtype), True) def test_easy_dtype(self): "Test ndtype on dtypes" # Simple case ndtype = float assert_equal(easy_dtype(ndtype), np.dtype(float)) # As string w/o names ndtype = "i4, f8" assert_equal(easy_dtype(ndtype), np.dtype([('f0', "i4"), ('f1', "f8")])) # As string w/o names but different default format assert_equal(easy_dtype(ndtype, defaultfmt="field_%03i"), np.dtype([('field_000', "i4"), ('field_001', "f8")])) # As string w/ names ndtype = "i4, f8" assert_equal(easy_dtype(ndtype, names="a, b"), np.dtype([('a', "i4"), ('b', "f8")])) # As string w/ names (too many) ndtype = "i4, f8" assert_equal(easy_dtype(ndtype, names="a, b, c"), np.dtype([('a', "i4"), ('b', "f8")])) # As string w/ names (not enough) ndtype = "i4, f8" assert_equal(easy_dtype(ndtype, names=", b"), np.dtype([('f0', "i4"), ('b', "f8")])) # ... (with different default format) assert_equal(easy_dtype(ndtype, names="a", defaultfmt="f%02i"), np.dtype([('a', "i4"), ('f00', "f8")])) # As list of tuples w/o names ndtype = [('A', int), ('B', float)] assert_equal(easy_dtype(ndtype), np.dtype([('A', int), ('B', float)])) # As list of tuples w/ names assert_equal(easy_dtype(ndtype, names="a,b"), np.dtype([('a', int), ('b', float)])) # As list of tuples w/ not enough names assert_equal(easy_dtype(ndtype, names="a"), np.dtype([('a', int), ('f0', float)])) # As list of tuples w/ too many names assert_equal(easy_dtype(ndtype, names="a,b,c"), np.dtype([('a', int), ('b', float)])) # As list of types w/o names ndtype = (int, float, float) assert_equal(easy_dtype(ndtype), np.dtype([('f0', int), ('f1', float), ('f2', float)])) # As list of types w names ndtype = (int, float, float) assert_equal(easy_dtype(ndtype, names="a, b, c"), np.dtype([('a', int), ('b', float), ('c', float)])) # As simple dtype w/ names ndtype = np.dtype(float) assert_equal(easy_dtype(ndtype, names="a, b, c"), np.dtype([(_, float) for _ in ('a', 'b', 'c')])) # As simple dtype w/o names (but multiple fields) ndtype = np.dtype(float) assert_equal( easy_dtype(ndtype, names=['', '', ''], defaultfmt="f%02i"), np.dtype([(_, float) for _ in ('f00', 'f01', 'f02')])) def test_flatten_dtype(self): "Testing flatten_dtype" # Standard dtype dt = np.dtype([("a", "f8"), ("b", "f8")]) dt_flat = flatten_dtype(dt) assert_equal(dt_flat, [float, float]) # Recursive dtype dt = np.dtype([("a", [("aa", '|S1'), ("ab", '|S2')]), ("b", int)]) dt_flat = flatten_dtype(dt) assert_equal(dt_flat, [np.dtype('|S1'), np.dtype('|S2'), int]) # dtype with shaped fields dt = np.dtype([("a", (float, 2)), ("b", (int, 3))]) dt_flat = flatten_dtype(dt) assert_equal(dt_flat, [float, int]) dt_flat = flatten_dtype(dt, True) assert_equal(dt_flat, [float] * 2 + [int] * 3) # dtype w/ titles dt = np.dtype([(("a", "A"), "f8"), (("b", "B"), "f8")]) dt_flat = flatten_dtype(dt) assert_equal(dt_flat, [float, float]) if __name__ == "__main__": run_module_suite()
bsd-3-clause
gqwest-erp/server
openerp/conf/__init__.py
442
1974
# -*- coding: utf-8 -*- ############################################################################## # # OpenERP, Open Source Management Solution # Copyright (C) 2011 OpenERP s.a. (<http://openerp.com>). # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ############################################################################## """ Library-wide configuration variables. For now, configuration code is in openerp.tools.config. It is in mainly unprocessed form, e.g. addons_path is a string with commas-separated paths. The aim is to have code related to configuration (command line parsing, configuration file loading and saving, ...) in this module and provide real Python variables, e.g. addons_paths is really a list of paths. To initialize properly this module, openerp.tools.config.parse_config() must be used. """ import deprecation # Paths to search for OpenERP addons. addons_paths = [] # List of server-wide modules to load. Those modules are supposed to provide # features not necessarily tied to a particular database. This is in contrast # to modules that are always bound to a specific database when they are # installed (i.e. the majority of OpenERP addons). This is set with the --load # command-line option. server_wide_modules = [] # vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
agpl-3.0
samarthmed/emacs-config
.python-environments/default/lib/python2.7/site-packages/pip/_vendor/requests/packages/urllib3/request.py
853
5751
try: from urllib.parse import urlencode except ImportError: from urllib import urlencode from .filepost import encode_multipart_formdata __all__ = ['RequestMethods'] class RequestMethods(object): """ Convenience mixin for classes who implement a :meth:`urlopen` method, such as :class:`~urllib3.connectionpool.HTTPConnectionPool` and :class:`~urllib3.poolmanager.PoolManager`. Provides behavior for making common types of HTTP request methods and decides which type of request field encoding to use. Specifically, :meth:`.request_encode_url` is for sending requests whose fields are encoded in the URL (such as GET, HEAD, DELETE). :meth:`.request_encode_body` is for sending requests whose fields are encoded in the *body* of the request using multipart or www-form-urlencoded (such as for POST, PUT, PATCH). :meth:`.request` is for making any kind of request, it will look up the appropriate encoding format and use one of the above two methods to make the request. Initializer parameters: :param headers: Headers to include with all requests, unless other headers are given explicitly. """ _encode_url_methods = set(['DELETE', 'GET', 'HEAD', 'OPTIONS']) def __init__(self, headers=None): self.headers = headers or {} def urlopen(self, method, url, body=None, headers=None, encode_multipart=True, multipart_boundary=None, **kw): # Abstract raise NotImplemented("Classes extending RequestMethods must implement " "their own ``urlopen`` method.") def request(self, method, url, fields=None, headers=None, **urlopen_kw): """ Make a request using :meth:`urlopen` with the appropriate encoding of ``fields`` based on the ``method`` used. This is a convenience method that requires the least amount of manual effort. It can be used in most situations, while still having the option to drop down to more specific methods when necessary, such as :meth:`request_encode_url`, :meth:`request_encode_body`, or even the lowest level :meth:`urlopen`. """ method = method.upper() if method in self._encode_url_methods: return self.request_encode_url(method, url, fields=fields, headers=headers, **urlopen_kw) else: return self.request_encode_body(method, url, fields=fields, headers=headers, **urlopen_kw) def request_encode_url(self, method, url, fields=None, **urlopen_kw): """ Make a request using :meth:`urlopen` with the ``fields`` encoded in the url. This is useful for request methods like GET, HEAD, DELETE, etc. """ if fields: url += '?' + urlencode(fields) return self.urlopen(method, url, **urlopen_kw) def request_encode_body(self, method, url, fields=None, headers=None, encode_multipart=True, multipart_boundary=None, **urlopen_kw): """ Make a request using :meth:`urlopen` with the ``fields`` encoded in the body. This is useful for request methods like POST, PUT, PATCH, etc. When ``encode_multipart=True`` (default), then :meth:`urllib3.filepost.encode_multipart_formdata` is used to encode the payload with the appropriate content type. Otherwise :meth:`urllib.urlencode` is used with the 'application/x-www-form-urlencoded' content type. Multipart encoding must be used when posting files, and it's reasonably safe to use it in other times too. However, it may break request signing, such as with OAuth. Supports an optional ``fields`` parameter of key/value strings AND key/filetuple. A filetuple is a (filename, data, MIME type) tuple where the MIME type is optional. For example:: fields = { 'foo': 'bar', 'fakefile': ('foofile.txt', 'contents of foofile'), 'realfile': ('barfile.txt', open('realfile').read()), 'typedfile': ('bazfile.bin', open('bazfile').read(), 'image/jpeg'), 'nonamefile': 'contents of nonamefile field', } When uploading a file, providing a filename (the first parameter of the tuple) is optional but recommended to best mimick behavior of browsers. Note that if ``headers`` are supplied, the 'Content-Type' header will be overwritten because it depends on the dynamic random boundary string which is used to compose the body of the request. The random boundary string can be explicitly set with the ``multipart_boundary`` parameter. """ if headers is None: headers = self.headers extra_kw = {'headers': {}} if fields: if 'body' in urlopen_kw: raise TypeError('request got values for both \'fields\' and \'body\', can only specify one.') if encode_multipart: body, content_type = encode_multipart_formdata(fields, boundary=multipart_boundary) else: body, content_type = urlencode(fields), 'application/x-www-form-urlencoded' extra_kw['body'] = body extra_kw['headers'] = {'Content-Type': content_type} extra_kw['headers'].update(headers) extra_kw.update(urlopen_kw) return self.urlopen(method, url, **extra_kw)
gpl-2.0
bliti/django-nonrel-1.5
django/contrib/gis/gdal/driver.py
221
2469
# prerequisites imports from ctypes import c_void_p from django.contrib.gis.gdal.base import GDALBase from django.contrib.gis.gdal.error import OGRException from django.contrib.gis.gdal.prototypes import ds as capi from django.utils import six from django.utils.encoding import force_bytes # For more information, see the OGR C API source code: # http://www.gdal.org/ogr/ogr__api_8h.html # # The OGR_Dr_* routines are relevant here. class Driver(GDALBase): "Wraps an OGR Data Source Driver." # Case-insensitive aliases for OGR Drivers. _alias = {'esri' : 'ESRI Shapefile', 'shp' : 'ESRI Shapefile', 'shape' : 'ESRI Shapefile', 'tiger' : 'TIGER', 'tiger/line' : 'TIGER', } def __init__(self, dr_input): "Initializes an OGR driver on either a string or integer input." if isinstance(dr_input, six.string_types): # If a string name of the driver was passed in self._register() # Checking the alias dictionary (case-insensitive) to see if an alias # exists for the given driver. if dr_input.lower() in self._alias: name = self._alias[dr_input.lower()] else: name = dr_input # Attempting to get the OGR driver by the string name. dr = capi.get_driver_by_name(force_bytes(name)) elif isinstance(dr_input, int): self._register() dr = capi.get_driver(dr_input) elif isinstance(dr_input, c_void_p): dr = dr_input else: raise OGRException('Unrecognized input type for OGR Driver: %s' % str(type(dr_input))) # Making sure we get a valid pointer to the OGR Driver if not dr: raise OGRException('Could not initialize OGR Driver on input: %s' % str(dr_input)) self.ptr = dr def __str__(self): "Returns the string name of the OGR Driver." return capi.get_driver_name(self.ptr) def _register(self): "Attempts to register all the data source drivers." # Only register all if the driver count is 0 (or else all drivers # will be registered over and over again) if not self.driver_count: capi.register_all() # Driver properties @property def driver_count(self): "Returns the number of OGR data source drivers registered." return capi.get_driver_count()
bsd-3-clause
dursk/django
django/contrib/gis/geos/geometry.py
1
23672
""" This module contains the 'base' GEOSGeometry object -- all GEOS Geometries inherit from this object. """ from __future__ import unicode_literals import json from ctypes import addressof, byref, c_double from django.contrib.gis import gdal from django.contrib.gis.geometry.regex import hex_regex, json_regex, wkt_regex from django.contrib.gis.geos import prototypes as capi from django.contrib.gis.geos.base import GEOSBase from django.contrib.gis.geos.coordseq import GEOSCoordSeq from django.contrib.gis.geos.error import GEOSException from django.contrib.gis.geos.libgeos import GEOM_PTR from django.contrib.gis.geos.mutable_list import ListMixin from django.contrib.gis.geos.prepared import PreparedGeometry from django.contrib.gis.geos.prototypes.io import ( ewkb_w, wkb_r, wkb_w, wkt_r, wkt_w, ) from django.utils import six from django.utils.encoding import force_bytes, force_text class GEOSGeometry(GEOSBase, ListMixin): "A class that, generally, encapsulates a GEOS geometry." _GEOS_CLASSES = None ptr_type = GEOM_PTR has_cs = False # Only Point, LineString, LinearRing have coordinate sequences def __init__(self, geo_input, srid=None): """ The base constructor for GEOS geometry objects, and may take the following inputs: * strings: - WKT - HEXEWKB (a PostGIS-specific canonical form) - GeoJSON (requires GDAL) * buffer: - WKB The `srid` keyword is used to specify the Source Reference Identifier (SRID) number for this Geometry. If not set, the SRID will be None. """ if isinstance(geo_input, bytes): geo_input = force_text(geo_input) if isinstance(geo_input, six.string_types): wkt_m = wkt_regex.match(geo_input) if wkt_m: # Handling WKT input. if wkt_m.group('srid'): srid = int(wkt_m.group('srid')) g = wkt_r().read(force_bytes(wkt_m.group('wkt'))) elif hex_regex.match(geo_input): # Handling HEXEWKB input. g = wkb_r().read(force_bytes(geo_input)) elif json_regex.match(geo_input): # Handling GeoJSON input. if not gdal.HAS_GDAL: raise ValueError('Initializing geometry from JSON input requires GDAL.') g = wkb_r().read(gdal.OGRGeometry(geo_input).wkb) else: raise ValueError('String or unicode input unrecognized as WKT EWKT, and HEXEWKB.') elif isinstance(geo_input, GEOM_PTR): # When the input is a pointer to a geometry (GEOM_PTR). g = geo_input elif isinstance(geo_input, six.memoryview): # When the input is a buffer (WKB). g = wkb_r().read(geo_input) elif isinstance(geo_input, GEOSGeometry): g = capi.geom_clone(geo_input.ptr) else: # Invalid geometry type. raise TypeError('Improper geometry input type: %s' % str(type(geo_input))) if g: # Setting the pointer object with a valid pointer. self.ptr = g else: raise GEOSException('Could not initialize GEOS Geometry with given input.') # Post-initialization setup. self._post_init(srid) def _post_init(self, srid): "Helper routine for performing post-initialization setup." # Setting the SRID, if given. if srid and isinstance(srid, int): self.srid = srid # Setting the class type (e.g., Point, Polygon, etc.) if GEOSGeometry._GEOS_CLASSES is None: # Lazy-loaded variable to avoid import conflicts with GEOSGeometry. from .linestring import LineString, LinearRing from .point import Point from .polygon import Polygon from .collections import ( GeometryCollection, MultiPoint, MultiLineString, MultiPolygon) GEOSGeometry._GEOS_CLASSES = { 0: Point, 1: LineString, 2: LinearRing, 3: Polygon, 4: MultiPoint, 5: MultiLineString, 6: MultiPolygon, 7: GeometryCollection, } self.__class__ = GEOSGeometry._GEOS_CLASSES[self.geom_typeid] # Setting the coordinate sequence for the geometry (will be None on # geometries that do not have coordinate sequences) self._set_cs() def __del__(self): """ Destroys this Geometry; in other words, frees the memory used by the GEOS C++ object. """ if self._ptr and capi: capi.destroy_geom(self._ptr) def __copy__(self): """ Returns a clone because the copy of a GEOSGeometry may contain an invalid pointer location if the original is garbage collected. """ return self.clone() def __deepcopy__(self, memodict): """ The `deepcopy` routine is used by the `Node` class of django.utils.tree; thus, the protocol routine needs to be implemented to return correct copies (clones) of these GEOS objects, which use C pointers. """ return self.clone() def __str__(self): "EWKT is used for the string representation." return self.ewkt def __repr__(self): "Short-hand representation because WKT may be very large." return '<%s object at %s>' % (self.geom_type, hex(addressof(self.ptr))) # Pickling support def __getstate__(self): # The pickled state is simply a tuple of the WKB (in string form) # and the SRID. return bytes(self.wkb), self.srid def __setstate__(self, state): # Instantiating from the tuple state that was pickled. wkb, srid = state ptr = wkb_r().read(six.memoryview(wkb)) if not ptr: raise GEOSException('Invalid Geometry loaded from pickled state.') self.ptr = ptr self._post_init(srid) # Comparison operators def __eq__(self, other): """ Equivalence testing, a Geometry may be compared with another Geometry or a WKT representation. """ if isinstance(other, six.string_types): return self.wkt == other elif isinstance(other, GEOSGeometry): return self.equals_exact(other) else: return False def __ne__(self, other): "The not equals operator." return not (self == other) # ### Geometry set-like operations ### # Thanks to Sean Gillies for inspiration: # http://lists.gispython.org/pipermail/community/2007-July/001034.html # g = g1 | g2 def __or__(self, other): "Returns the union of this Geometry and the other." return self.union(other) # g = g1 & g2 def __and__(self, other): "Returns the intersection of this Geometry and the other." return self.intersection(other) # g = g1 - g2 def __sub__(self, other): "Return the difference this Geometry and the other." return self.difference(other) # g = g1 ^ g2 def __xor__(self, other): "Return the symmetric difference of this Geometry and the other." return self.sym_difference(other) # #### Coordinate Sequence Routines #### def _set_cs(self): "Sets the coordinate sequence for this Geometry." if self.has_cs: self._cs = GEOSCoordSeq(capi.get_cs(self.ptr), self.hasz) else: self._cs = None @property def coord_seq(self): "Returns a clone of the coordinate sequence for this Geometry." if self.has_cs: return self._cs.clone() # #### Geometry Info #### @property def geom_type(self): "Returns a string representing the Geometry type, e.g. 'Polygon'" return capi.geos_type(self.ptr).decode() @property def geom_typeid(self): "Returns an integer representing the Geometry type." return capi.geos_typeid(self.ptr) @property def num_geom(self): "Returns the number of geometries in the Geometry." return capi.get_num_geoms(self.ptr) @property def num_coords(self): "Returns the number of coordinates in the Geometry." return capi.get_num_coords(self.ptr) @property def num_points(self): "Returns the number points, or coordinates, in the Geometry." return self.num_coords @property def dims(self): "Returns the dimension of this Geometry (0=point, 1=line, 2=surface)." return capi.get_dims(self.ptr) def normalize(self): "Converts this Geometry to normal form (or canonical form)." return capi.geos_normalize(self.ptr) # #### Unary predicates #### @property def empty(self): """ Returns a boolean indicating whether the set of points in this Geometry are empty. """ return capi.geos_isempty(self.ptr) @property def hasz(self): "Returns whether the geometry has a 3D dimension." return capi.geos_hasz(self.ptr) @property def ring(self): "Returns whether or not the geometry is a ring." return capi.geos_isring(self.ptr) @property def simple(self): "Returns false if the Geometry not simple." return capi.geos_issimple(self.ptr) @property def valid(self): "This property tests the validity of this Geometry." return capi.geos_isvalid(self.ptr) @property def valid_reason(self): """ Returns a string containing the reason for any invalidity. """ return capi.geos_isvalidreason(self.ptr).decode() # #### Binary predicates. #### def contains(self, other): "Returns true if other.within(this) returns true." return capi.geos_contains(self.ptr, other.ptr) def crosses(self, other): """ Returns true if the DE-9IM intersection matrix for the two Geometries is T*T****** (for a point and a curve,a point and an area or a line and an area) 0******** (for two curves). """ return capi.geos_crosses(self.ptr, other.ptr) def disjoint(self, other): """ Returns true if the DE-9IM intersection matrix for the two Geometries is FF*FF****. """ return capi.geos_disjoint(self.ptr, other.ptr) def equals(self, other): """ Returns true if the DE-9IM intersection matrix for the two Geometries is T*F**FFF*. """ return capi.geos_equals(self.ptr, other.ptr) def equals_exact(self, other, tolerance=0): """ Returns true if the two Geometries are exactly equal, up to a specified tolerance. """ return capi.geos_equalsexact(self.ptr, other.ptr, float(tolerance)) def intersects(self, other): "Returns true if disjoint returns false." return capi.geos_intersects(self.ptr, other.ptr) def overlaps(self, other): """ Returns true if the DE-9IM intersection matrix for the two Geometries is T*T***T** (for two points or two surfaces) 1*T***T** (for two curves). """ return capi.geos_overlaps(self.ptr, other.ptr) def relate_pattern(self, other, pattern): """ Returns true if the elements in the DE-9IM intersection matrix for the two Geometries match the elements in pattern. """ if not isinstance(pattern, six.string_types) or len(pattern) > 9: raise GEOSException('invalid intersection matrix pattern') return capi.geos_relatepattern(self.ptr, other.ptr, force_bytes(pattern)) def touches(self, other): """ Returns true if the DE-9IM intersection matrix for the two Geometries is FT*******, F**T***** or F***T****. """ return capi.geos_touches(self.ptr, other.ptr) def within(self, other): """ Returns true if the DE-9IM intersection matrix for the two Geometries is T*F**F***. """ return capi.geos_within(self.ptr, other.ptr) # #### SRID Routines #### def get_srid(self): "Gets the SRID for the geometry, returns None if no SRID is set." s = capi.geos_get_srid(self.ptr) if s == 0: return None else: return s def set_srid(self, srid): "Sets the SRID for the geometry." capi.geos_set_srid(self.ptr, 0 if srid is None else srid) srid = property(get_srid, set_srid) # #### Output Routines #### @property def ewkt(self): """ Returns the EWKT (SRID + WKT) of the Geometry. Note that Z values are only included in this representation if GEOS >= 3.3.0. """ if self.get_srid(): return 'SRID=%s;%s' % (self.srid, self.wkt) else: return self.wkt @property def wkt(self): "Returns the WKT (Well-Known Text) representation of this Geometry." return wkt_w(3 if self.hasz else 2).write(self).decode() @property def hex(self): """ Returns the WKB of this Geometry in hexadecimal form. Please note that the SRID is not included in this representation because it is not a part of the OGC specification (use the `hexewkb` property instead). """ # A possible faster, all-python, implementation: # str(self.wkb).encode('hex') return wkb_w(3 if self.hasz else 2).write_hex(self) @property def hexewkb(self): """ Returns the EWKB of this Geometry in hexadecimal form. This is an extension of the WKB specification that includes SRID value that are a part of this geometry. """ return ewkb_w(3 if self.hasz else 2).write_hex(self) @property def json(self): """ Returns GeoJSON representation of this Geometry. """ return json.dumps({'type': self.__class__.__name__, 'coordinates': self.coords}) geojson = json @property def wkb(self): """ Returns the WKB (Well-Known Binary) representation of this Geometry as a Python buffer. SRID and Z values are not included, use the `ewkb` property instead. """ return wkb_w(3 if self.hasz else 2).write(self) @property def ewkb(self): """ Return the EWKB representation of this Geometry as a Python buffer. This is an extension of the WKB specification that includes any SRID value that are a part of this geometry. """ return ewkb_w(3 if self.hasz else 2).write(self) @property def kml(self): "Returns the KML representation of this Geometry." gtype = self.geom_type return '<%s>%s</%s>' % (gtype, self.coord_seq.kml, gtype) @property def prepared(self): """ Returns a PreparedGeometry corresponding to this geometry -- it is optimized for the contains, intersects, and covers operations. """ return PreparedGeometry(self) # #### GDAL-specific output routines #### @property def ogr(self): "Returns the OGR Geometry for this Geometry." if not gdal.HAS_GDAL: raise GEOSException('GDAL required to convert to an OGRGeometry.') if self.srid: try: return gdal.OGRGeometry(self.wkb, self.srid) except gdal.SRSException: pass return gdal.OGRGeometry(self.wkb) @property def srs(self): "Returns the OSR SpatialReference for SRID of this Geometry." if not gdal.HAS_GDAL: raise GEOSException('GDAL required to return a SpatialReference object.') if self.srid: try: return gdal.SpatialReference(self.srid) except gdal.SRSException: pass return None @property def crs(self): "Alias for `srs` property." return self.srs def transform(self, ct, clone=False): """ Requires GDAL. Transforms the geometry according to the given transformation object, which may be an integer SRID, and WKT or PROJ.4 string. By default, the geometry is transformed in-place and nothing is returned. However if the `clone` keyword is set, then this geometry will not be modified and a transformed clone will be returned instead. """ srid = self.srid if ct == srid: # short-circuit where source & dest SRIDs match if clone: return self.clone() else: return if (srid is None) or (srid < 0): raise GEOSException("Calling transform() with no SRID set is not supported") if not gdal.HAS_GDAL: raise GEOSException("GDAL library is not available to transform() geometry.") # Creating an OGR Geometry, which is then transformed. g = self.ogr g.transform(ct) # Getting a new GEOS pointer ptr = wkb_r().read(g.wkb) if clone: # User wants a cloned transformed geometry returned. return GEOSGeometry(ptr, srid=g.srid) if ptr: # Reassigning pointer, and performing post-initialization setup # again due to the reassignment. capi.destroy_geom(self.ptr) self.ptr = ptr self._post_init(g.srid) else: raise GEOSException('Transformed WKB was invalid.') # #### Topology Routines #### def _topology(self, gptr): "Helper routine to return Geometry from the given pointer." return GEOSGeometry(gptr, srid=self.srid) @property def boundary(self): "Returns the boundary as a newly allocated Geometry object." return self._topology(capi.geos_boundary(self.ptr)) def buffer(self, width, quadsegs=8): """ Returns a geometry that represents all points whose distance from this Geometry is less than or equal to distance. Calculations are in the Spatial Reference System of this Geometry. The optional third parameter sets the number of segment used to approximate a quarter circle (defaults to 8). (Text from PostGIS documentation at ch. 6.1.3) """ return self._topology(capi.geos_buffer(self.ptr, width, quadsegs)) @property def centroid(self): """ The centroid is equal to the centroid of the set of component Geometries of highest dimension (since the lower-dimension geometries contribute zero "weight" to the centroid). """ return self._topology(capi.geos_centroid(self.ptr)) @property def convex_hull(self): """ Returns the smallest convex Polygon that contains all the points in the Geometry. """ return self._topology(capi.geos_convexhull(self.ptr)) def difference(self, other): """ Returns a Geometry representing the points making up this Geometry that do not make up other. """ return self._topology(capi.geos_difference(self.ptr, other.ptr)) @property def envelope(self): "Return the envelope for this geometry (a polygon)." return self._topology(capi.geos_envelope(self.ptr)) def intersection(self, other): "Returns a Geometry representing the points shared by this Geometry and other." return self._topology(capi.geos_intersection(self.ptr, other.ptr)) @property def point_on_surface(self): "Computes an interior point of this Geometry." return self._topology(capi.geos_pointonsurface(self.ptr)) def relate(self, other): "Returns the DE-9IM intersection matrix for this Geometry and the other." return capi.geos_relate(self.ptr, other.ptr).decode() def simplify(self, tolerance=0.0, preserve_topology=False): """ Returns the Geometry, simplified using the Douglas-Peucker algorithm to the specified tolerance (higher tolerance => less points). If no tolerance provided, defaults to 0. By default, this function does not preserve topology - e.g. polygons can be split, collapse to lines or disappear holes can be created or disappear, and lines can cross. By specifying preserve_topology=True, the result will have the same dimension and number of components as the input. This is significantly slower. """ if preserve_topology: return self._topology(capi.geos_preservesimplify(self.ptr, tolerance)) else: return self._topology(capi.geos_simplify(self.ptr, tolerance)) def sym_difference(self, other): """ Returns a set combining the points in this Geometry not in other, and the points in other not in this Geometry. """ return self._topology(capi.geos_symdifference(self.ptr, other.ptr)) def union(self, other): "Returns a Geometry representing all the points in this Geometry and other." return self._topology(capi.geos_union(self.ptr, other.ptr)) # #### Other Routines #### @property def area(self): "Returns the area of the Geometry." return capi.geos_area(self.ptr, byref(c_double())) def distance(self, other): """ Returns the distance between the closest points on this Geometry and the other. Units will be in those of the coordinate system of the Geometry. """ if not isinstance(other, GEOSGeometry): raise TypeError('distance() works only on other GEOS Geometries.') return capi.geos_distance(self.ptr, other.ptr, byref(c_double())) @property def extent(self): """ Returns the extent of this geometry as a 4-tuple, consisting of (xmin, ymin, xmax, ymax). """ from .point import Point env = self.envelope if isinstance(env, Point): xmin, ymin = env.tuple xmax, ymax = xmin, ymin else: xmin, ymin = env[0][0] xmax, ymax = env[0][2] return (xmin, ymin, xmax, ymax) @property def length(self): """ Returns the length of this Geometry (e.g., 0 for point, or the circumference of a Polygon). """ return capi.geos_length(self.ptr, byref(c_double())) def clone(self): "Clones this Geometry." return GEOSGeometry(capi.geom_clone(self.ptr), srid=self.srid) class ProjectInterpolateMixin(object): """ Used for LineString and MultiLineString. """ def interpolate(self, distance): return self._topology(capi.geos_interpolate(self.ptr, distance)) def interpolate_normalized(self, distance): return self._topology(capi.geos_interpolate_normalized(self.ptr, distance)) def project(self, point): from .point import Point if not isinstance(point, Point): raise TypeError('locate_point argument must be a Point') return capi.geos_project(self.ptr, point.ptr) def project_normalized(self, point): from .point import Point if not isinstance(point, Point): raise TypeError('locate_point argument must be a Point') return capi.geos_project_normalized(self.ptr, point.ptr)
bsd-3-clause
andrewjtech123/rebilly-validschema
python-client /Shopify/apis/update_api.py
1
14173
# coding: utf-8 """ Draft Orders API ### Draft Orders API This API is about operations you can perform on the draft order object as defind below: Field | Definition | Data Type -----|-----------|------------------------- `id`| `id` of the draft order |`string` `order_id` | The id of the order associated to the draft order, once created. | `string` `name` | Name of the draft order, format #D<number>, where number, is an sequential identifier unique to the shop, starting at 1. For example #D133 | `integer` `customer` | Customer object will be serialized with only the default address, however only the ID can be set in order to associate the customer to the draft order. Setting the value to null removes the customer from the draft order. | `object` `shipping_address` | The mailing address to where the draft order will be shipped. | `string` `billing_address` | The mailing address associated with the payment method. | `string` `note` | The text of an optional note that a shop owner can attach to the draft order. | `string` `email` | The email address used for sending notifications. |`string` `currency` | The three letter code for the currency to be used for the payment. | `string` `invoice_sent_at` | DateTime when the invoice was emailed to the customer by Shopify. | `dateTime` `invoice_url` | The url to send to the customer so that they can complete the checkout. When using `send_invoice`, this url is emailed to the customer. This field can be used so that an API client can use another method of communication to provide the url to the customer. | `string` `line_item`[ ] | | array of `line_item` objects `metafields`[ ] | | array of `metafield` objects `shipping_line` | | `object` `tags` | Tags are additional short descriptors, commonly used for filtering and searching, formatted as a string of comma-separated values. Each individual tag is limited to 40 characters in length. | `string` `tax_exempt` | Sets whether taxes are exempt for this draft order. If this value is `false`, Shopify will honor the `tax_exempt` value for each `line_item`. | `boolean` `tax_lines` | Tax lines describing the sum of each type of tax line for line items and shipping line. | array of `tax_line` objects `discount` | Order level discount. | `string` `taxes_included` | Shop settings taxes are included in the price | `boolean` `total_tax` | Total tax amount | `integer` `completed_at` | Date at which an order was created and the draft order moved to “completed” status. | `DateTime` `created_at` | By default, this auto-generated property is the date and time when the order was created in Shopify, in ISO 8601 format. If you are importing orders to the Shopify platform from another system, the writable `processed_at` property will override the `created_at` date. | `dateTime` `updated_at` | | `dateTime` `status` | String describing the state of the draft order. Possible values are `open`, `invoice sent`, or `completed`. | `string` OpenAPI spec version: 1.0.0 Generated by: https://github.com/swagger-api/swagger-codegen.git Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. """ from __future__ import absolute_import import sys import os import re # python 2 and python 3 compatibility library from six import iteritems from ..configuration import Configuration from ..api_client import ApiClient class UpdateApi(object): """ NOTE: This class is auto generated by the swagger code generator program. Do not edit the class manually. Ref: https://github.com/swagger-api/swagger-codegen """ def __init__(self, api_client=None): config = Configuration() if api_client: self.api_client = api_client else: if not config.api_client: config.api_client = ApiClient() self.api_client = config.api_client def admin_draft_ordersid_complete_json_put(self, id, **kwargs): """ Mark a draft order as paid or pending payment This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please define a `callback` function to be invoked when receiving the response. >>> def callback_function(response): >>> pprint(response) >>> >>> thread = api.admin_draft_ordersid_complete_json_put(id, callback=callback_function) :param callback function: The callback function for asynchronous request. (optional) :param int id: (required) :param int payment_gateway_id: Gateway used to receive payment. Either ID of custom manual gateway, or null if using the default “Mark As Paid” gateway. :param bool payment_pending: true/false :return: DraftOrder If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('callback'): return self.admin_draft_ordersid_complete_json_put_with_http_info(id, **kwargs) else: (data) = self.admin_draft_ordersid_complete_json_put_with_http_info(id, **kwargs) return data def admin_draft_ordersid_complete_json_put_with_http_info(self, id, **kwargs): """ Mark a draft order as paid or pending payment This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please define a `callback` function to be invoked when receiving the response. >>> def callback_function(response): >>> pprint(response) >>> >>> thread = api.admin_draft_ordersid_complete_json_put_with_http_info(id, callback=callback_function) :param callback function: The callback function for asynchronous request. (optional) :param int id: (required) :param int payment_gateway_id: Gateway used to receive payment. Either ID of custom manual gateway, or null if using the default “Mark As Paid” gateway. :param bool payment_pending: true/false :return: DraftOrder If the method is called asynchronously, returns the request thread. """ all_params = ['id', 'payment_gateway_id', 'payment_pending'] all_params.append('callback') all_params.append('_return_http_data_only') params = locals() for key, val in iteritems(params['kwargs']): if key not in all_params: raise TypeError( "Got an unexpected keyword argument '%s'" " to method admin_draft_ordersid_complete_json_put" % key ) params[key] = val del params['kwargs'] # verify the required parameter 'id' is set if ('id' not in params) or (params['id'] is None): raise ValueError("Missing the required parameter `id` when calling `admin_draft_ordersid_complete_json_put`") resource_path = '/admin/draft_orders/#{id}/complete.json'.replace('{format}', 'json') path_params = {} if 'id' in params: path_params['id'] = params['id'] query_params = {} if 'payment_gateway_id' in params: query_params['payment_gateway_id'] = params['payment_gateway_id'] if 'payment_pending' in params: query_params['paymentPending'] = params['payment_pending'] header_params = {} form_params = [] local_var_files = {} body_params = None # HTTP header `Accept` header_params['Accept'] = self.api_client.\ select_header_accept([]) if not header_params['Accept']: del header_params['Accept'] # HTTP header `Content-Type` header_params['Content-Type'] = self.api_client.\ select_header_content_type([]) # Authentication setting auth_settings = [] return self.api_client.call_api(resource_path, 'PUT', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='DraftOrder', auth_settings=auth_settings, callback=params.get('callback'), _return_http_data_only=params.get('_return_http_data_only')) def admin_draft_ordersid_json_put(self, id, **kwargs): """ Modify draft order You can modify an existing draft order. ### Example Only showing the parameters needed to select a particular shipping rate as returned from `GET /admin/draft_orders/#{id}/shipping_rates` draft_order: { shipping_line: { handle: , phone: } } This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please define a `callback` function to be invoked when receiving the response. >>> def callback_function(response): >>> pprint(response) >>> >>> thread = api.admin_draft_ordersid_json_put(id, callback=callback_function) :param callback function: The callback function for asynchronous request. (optional) :param int id: draft order id (required) :param DraftOrder draft_order: the draft order to modify. :return: DraftOrder If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('callback'): return self.admin_draft_ordersid_json_put_with_http_info(id, **kwargs) else: (data) = self.admin_draft_ordersid_json_put_with_http_info(id, **kwargs) return data def admin_draft_ordersid_json_put_with_http_info(self, id, **kwargs): """ Modify draft order You can modify an existing draft order. ### Example Only showing the parameters needed to select a particular shipping rate as returned from `GET /admin/draft_orders/#{id}/shipping_rates` draft_order: { shipping_line: { handle: , phone: } } This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please define a `callback` function to be invoked when receiving the response. >>> def callback_function(response): >>> pprint(response) >>> >>> thread = api.admin_draft_ordersid_json_put_with_http_info(id, callback=callback_function) :param callback function: The callback function for asynchronous request. (optional) :param int id: draft order id (required) :param DraftOrder draft_order: the draft order to modify. :return: DraftOrder If the method is called asynchronously, returns the request thread. """ all_params = ['id', 'draft_order'] all_params.append('callback') all_params.append('_return_http_data_only') params = locals() for key, val in iteritems(params['kwargs']): if key not in all_params: raise TypeError( "Got an unexpected keyword argument '%s'" " to method admin_draft_ordersid_json_put" % key ) params[key] = val del params['kwargs'] # verify the required parameter 'id' is set if ('id' not in params) or (params['id'] is None): raise ValueError("Missing the required parameter `id` when calling `admin_draft_ordersid_json_put`") resource_path = '/admin/draft_orders/#{id}.json'.replace('{format}', 'json') path_params = {} if 'id' in params: path_params['id'] = params['id'] query_params = {} header_params = {} form_params = [] local_var_files = {} body_params = None if 'draft_order' in params: body_params = params['draft_order'] # HTTP header `Accept` header_params['Accept'] = self.api_client.\ select_header_accept([]) if not header_params['Accept']: del header_params['Accept'] # HTTP header `Content-Type` header_params['Content-Type'] = self.api_client.\ select_header_content_type([]) # Authentication setting auth_settings = [] return self.api_client.call_api(resource_path, 'PUT', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='DraftOrder', auth_settings=auth_settings, callback=params.get('callback'), _return_http_data_only=params.get('_return_http_data_only'))
mit
vitan/hue
desktop/core/ext-py/Django-1.6.10/django/contrib/gis/tests/geoadmin/tests.py
104
3001
from __future__ import absolute_import from django.contrib.gis.geos import HAS_GEOS from django.contrib.gis.tests.utils import HAS_SPATIAL_DB from django.test import TestCase from django.test.utils import override_settings from django.utils.unittest import skipUnless if HAS_GEOS and HAS_SPATIAL_DB: from django.contrib.gis import admin from django.contrib.gis.geos import Point from .models import City GOOGLE_MAPS_API_KEY = 'XXXX' @skipUnless(HAS_GEOS and HAS_SPATIAL_DB, "Geos and spatial db are required.") class GeoAdminTest(TestCase): urls = 'django.contrib.gis.tests.geoadmin.urls' def test_ensure_geographic_media(self): geoadmin = admin.site._registry[City] admin_js = geoadmin.media.render_js() self.assertTrue(any([geoadmin.openlayers_url in js for js in admin_js])) def test_olmap_OSM_rendering(self): geoadmin = admin.site._registry[City] result = geoadmin.get_map_widget(City._meta.get_field('point'))( ).render('point', Point(-79.460734, 40.18476)) self.assertIn( """geodjango_point.layers.base = new OpenLayers.Layer.OSM("OpenStreetMap (Mapnik)");""", result) def test_olmap_WMS_rendering(self): geoadmin = admin.GeoModelAdmin(City, admin.site) result = geoadmin.get_map_widget(City._meta.get_field('point'))( ).render('point', Point(-79.460734, 40.18476)) self.assertIn( """geodjango_point.layers.base = new OpenLayers.Layer.WMS("OpenLayers WMS", "http://vmap0.tiles.osgeo.org/wms/vmap0", {layers: \'basic\', format: 'image/jpeg'});""", result) def test_olwidget_has_changed(self): """ Check that changes are accurately noticed by OpenLayersWidget. """ geoadmin = admin.site._registry[City] form = geoadmin.get_changelist_form(None)() has_changed = form.fields['point']._has_changed initial = Point(13.4197458572965953, 52.5194108501149799, srid=4326) data_same = "SRID=3857;POINT(1493879.2754093995 6894592.019687599)" data_almost_same = "SRID=3857;POINT(1493879.2754093990 6894592.019687590)" data_changed = "SRID=3857;POINT(1493884.0527237 6894593.8111804)" self.assertTrue(has_changed(None, data_changed)) self.assertTrue(has_changed(initial, "")) self.assertFalse(has_changed(None, "")) self.assertFalse(has_changed(initial, data_same)) self.assertFalse(has_changed(initial, data_almost_same)) self.assertTrue(has_changed(initial, data_changed)) @override_settings(GOOGLE_MAPS_API_KEY=GOOGLE_MAPS_API_KEY) def test_google_map_scripts(self): """ Testing GoogleMap.scripts() output. See #20773. """ from django.contrib.gis.maps.google.gmap import GoogleMap google_map = GoogleMap() scripts = google_map.scripts self.assertIn(GOOGLE_MAPS_API_KEY, scripts) self.assertIn("new GMap2", scripts)
apache-2.0
RJRetro/mame
scripts/build/file2str.py
51
1610
#!/usr/bin/python ## ## license:BSD-3-Clause ## copyright-holders:Aaron Giles, Andrew Gardner from __future__ import with_statement import sys import os if len(sys.argv) < 4: print('Usage:') print(' file2str <source.lay> <output.h> <varname> [<type>]') print('') print('The default <type> is char, with an assumed NULL terminator') sys.exit(0) terminate = 1 srcfile = sys.argv[1] dstfile = sys.argv[2] varname = sys.argv[3] if len(sys.argv) >= 5: type = sys.argv[4] terminate = 0 else: type = 'char' try: myfile = open(srcfile, 'rb') except IOError: sys.stderr.write("Unable to open source file '%s'\n" % srcfile) sys.exit(-1) byteCount = os.path.getsize(srcfile) try: dst = open(dstfile,'w') dst.write('extern const %s %s[];\n' % ( type, varname )) dst.write('const %s %s[] =\n{\n\t' % ( type, varname)) offs = 0 with open(srcfile, "rb") as src: while True: chunk = src.read(16) if chunk: for b in chunk: # For Python 2.x compatibility. if isinstance(b, str): b = ord(b) dst.write('0x%02x' % b) offs += 1 if offs != byteCount: dst.write(',') else: break if offs != byteCount: dst.write('\n\t') if terminate == 1: dst.write(',0x00') dst.write('\n};\n') dst.close() except IOError: sys.stderr.write("Unable to open output file '%s'\n" % dstfile) sys.exit(-1)
gpl-2.0
amozie/amozie
testzie/voice_train_test.py
1
13790
# coding: utf-8 import tensorflow as tf import numpy as np import os from collections import Counter import librosa from joblib import Parallel, delayed wav_path = 'data/wav/train' label_file = 'data/doc/trans/train.word.txt' def get_wav_files(wav_path=wav_path): wav_files = [] for (dirpath, dirnames, filenames) in os.walk(wav_path): for filename in filenames: if filename.endswith(".wav") or filename.endswith(".WAV"): filename_path = os.sep.join([dirpath, filename]) if os.stat(filename_path).st_size < 240000: continue wav_files.append(filename_path) return wav_files wav_files = get_wav_files() def get_wav_label(wav_files=wav_files, label_file=label_file): labels_dict = {} with open(label_file, "r", encoding='utf-8') as f: for label in f: label = label.strip("\n") label_id, label_text = label.split(' ', 1) labels_dict[label_id] = label_text labels = [] new_wav_files = [] for wav_file in wav_files: wav_id = os.path.basename(wav_file).split(".")[0] if wav_id in labels_dict: labels.append(labels_dict[wav_id]) new_wav_files.append(wav_file) return new_wav_files, labels def get_wav_length(wav): import numpy as np import librosa print(wav) wav, sr = librosa.load(wav) mfcc = np.transpose(librosa.feature.mfcc(wav, sr), [1, 0]) return len(mfcc) pointer = 0 def get_next_batches(batch_size, wav_max_len): global pointer batches_wavs = [] batches_labels = [] for i in range(batch_size): wav, sr = librosa.load(wav_files[pointer]) mfcc = np.transpose(librosa.feature.mfcc(wav, sr), [1, 0]) batches_wavs.append(mfcc.tolist()) batches_labels.append(labels_vector[pointer]) pointer += 1 # 取零补齐 # label append 0 , 0 对应的字符 # mfcc 默认的计算长度为20(n_mfcc of mfcc) 作为channel length for mfcc in batches_wavs: while len(mfcc) < wav_max_len: mfcc.append([0] * 20) for label in batches_labels: while len(label) < label_max_len: label.append(0) return batches_wavs, batches_labels conv1d_index = 0 def conv1d_layer(input_tensor, size, dim, activation, scale, bias): global conv1d_index with tf.variable_scope("conv1d_" + str(conv1d_index)): W = tf.get_variable('W', (size, input_tensor.get_shape().as_list()[-1], dim), dtype=tf.float32, initializer=tf.random_uniform_initializer(minval=-scale, maxval=scale)) if bias: b = tf.get_variable('b', [dim], dtype=tf.float32, initializer=tf.constant_initializer(0)) out = tf.nn.conv1d(input_tensor, W, stride=1, padding='SAME') + (b if bias else 0) if not bias: beta = tf.get_variable('beta', dim, dtype=tf.float32, initializer=tf.constant_initializer(0)) gamma = tf.get_variable('gamma', dim, dtype=tf.float32, initializer=tf.constant_initializer(1)) mean_running = tf.get_variable('mean', dim, dtype=tf.float32, initializer=tf.constant_initializer(0)) variance_running = tf.get_variable('variance', dim, dtype=tf.float32, initializer=tf.constant_initializer(1)) mean, variance = tf.nn.moments(out, axes=list(range(len(out.get_shape()) - 1))) def update_running_stat(): decay = 0.99 # 定义了均值方差指数衰减 见 http://blog.csdn.net/liyuan123zhouhui/article/details/70698264 update_op = [mean_running.assign(mean_running * decay + mean * (1 - decay)), variance_running.assign(variance_running * decay + variance * (1 - decay))] # 指定先执行均值方差的更新运算 见 http://blog.csdn.net/u012436149/article/details/72084744 with tf.control_dependencies(update_op): return tf.identity(mean), tf.identity(variance) # 条件运算(https://applenob.github.io/tf_9.html) 按照作者这里的指定 是不进行指数衰减的 m, v = tf.cond(tf.Variable(False, trainable=False), update_running_stat, lambda: (mean_running, variance_running)) out = tf.nn.batch_normalization(out, m, v, beta, gamma, 1e-8) if activation == 'tanh': out = tf.nn.tanh(out) elif activation == 'sigmoid': out = tf.nn.sigmoid(out) conv1d_index += 1 return out # 极黑卷积层 https://www.zhihu.com/question/57414498 # 其输入参数中要包含一个大于 1 的rate 输出 channels与输入相同 aconv1d_index = 0 def aconv1d_layer(input_tensor, size, rate, activation, scale, bias): global aconv1d_index with tf.variable_scope('aconv1d_' + str(aconv1d_index)): shape = input_tensor.get_shape().as_list() # 利用 2 维极黑卷积函数计算相应 1 维卷积,expand_dims squeeze做了相应维度处理 # 实际 上一个 tf.nn.conv1d 在之前的tensorflow版本中是没有的,其的一个实现也是经过维度调整后调用 tf.nn.conv2d W = tf.get_variable('W', (1, size, shape[-1], shape[-1]), dtype=tf.float32, initializer=tf.random_uniform_initializer(minval=-scale, maxval=scale)) if bias: b = tf.get_variable('b', [shape[-1]], dtype=tf.float32, initializer=tf.constant_initializer(0)) out = tf.nn.atrous_conv2d(tf.expand_dims(input_tensor, dim=1), W, rate=rate, padding='SAME') out = tf.squeeze(out, [1]) if not bias: beta = tf.get_variable('beta', shape[-1], dtype=tf.float32, initializer=tf.constant_initializer(0)) gamma = tf.get_variable('gamma', shape[-1], dtype=tf.float32, initializer=tf.constant_initializer(1)) mean_running = tf.get_variable('mean', shape[-1], dtype=tf.float32, initializer=tf.constant_initializer(0)) variance_running = tf.get_variable('variance', shape[-1], dtype=tf.float32, initializer=tf.constant_initializer(1)) mean, variance = tf.nn.moments(out, axes=list(range(len(out.get_shape()) - 1))) def update_running_stat(): decay = 0.99 update_op = [mean_running.assign(mean_running * decay + mean * (1 - decay)), variance_running.assign(variance_running * decay + variance * (1 - decay))] with tf.control_dependencies(update_op): return tf.identity(mean), tf.identity(variance) m, v = tf.cond(tf.Variable(False, trainable=False), update_running_stat, lambda: (mean_running, variance_running)) out = tf.nn.batch_normalization(out, m, v, beta, gamma, 1e-8) if activation == 'tanh': out = tf.nn.tanh(out) elif activation == 'sigmoid': out = tf.nn.sigmoid(out) aconv1d_index += 1 return out def speech_to_text_network(n_dim=128, n_blocks=3): out = conv1d_layer(input_tensor=X, size=1, dim=n_dim, activation='tanh', scale=0.14, bias=False) def residual_block(input_sensor, size, rate): conv_filter = aconv1d_layer(input_tensor=input_sensor, size=size, rate=rate, activation='tanh', scale=0.03, bias=False) conv_gate = aconv1d_layer(input_tensor=input_sensor, size=size, rate=rate, activation='sigmoid', scale=0.03, bias=False) out = conv_filter * conv_gate out = conv1d_layer(out, size=1, dim=n_dim, activation='tanh', scale=0.08, bias=False) return out + input_sensor, out skip = 0 for _ in range(n_blocks): for r in [1, 2, 4, 8, 16]: out, s = residual_block(out, size=7, rate=r) skip += s logit = conv1d_layer(skip, size=1, dim=skip.get_shape().as_list()[-1], activation='tanh', scale=0.08, bias=False) # 最后卷积层输出是词汇表大小 logit = conv1d_layer(logit, size=1, dim=words_size, activation=None, scale=0.04, bias=True) return logit # 作者自己定义了优化器 class MaxPropOptimizer(tf.train.Optimizer): def __init__(self, learning_rate=0.001, beta2=0.999, use_locking=False, name="MaxProp"): super(MaxPropOptimizer, self).__init__(use_locking, name) self._lr = learning_rate self._beta2 = beta2 self._lr_t = None self._beta2_t = None def _prepare(self): self._lr_t = tf.convert_to_tensor(self._lr, name="learning_rate") self._beta2_t = tf.convert_to_tensor(self._beta2, name="beta2") def _create_slots(self, var_list): for v in var_list: self._zeros_slot(v, "m", self._name) def _apply_dense(self, grad, var): lr_t = tf.cast(self._lr_t, var.dtype.base_dtype) beta2_t = tf.cast(self._beta2_t, var.dtype.base_dtype) if var.dtype.base_dtype == tf.float16: eps = 1e-7 else: eps = 1e-8 m = self.get_slot(var, "m") m_t = m.assign(tf.maximum(beta2_t * m + eps, tf.abs(grad))) g_t = grad / m_t var_update = tf.assign_sub(var, lr_t * g_t) return tf.group(*[var_update, m_t]) def _apply_sparse(self, grad, var): return self._apply_dense(grad, var) def train_speech_to_text_network(wav_max_len): logit = speech_to_text_network() # CTC loss indices = tf.where(tf.not_equal(tf.cast(Y, tf.float32), 0.)) target = tf.SparseTensor(indices=indices, values=tf.gather_nd(Y, indices) - 1, dense_shape=tf.cast(tf.shape(Y), tf.int64)) loss = tf.nn.ctc_loss(target, logit, sequence_len, time_major=False) # optimizer lr = tf.Variable(0.001, dtype=tf.float32, trainable=False) optimizer = MaxPropOptimizer(learning_rate=lr, beta2=0.99) var_list = [t for t in tf.trainable_variables()] gradient = optimizer.compute_gradients(loss, var_list=var_list) optimizer_op = optimizer.apply_gradients(gradient) with tf.Session() as sess: sess.run(tf.global_variables_initializer()) saver = tf.train.Saver(tf.global_variables()) for epoch in range(16): sess.run(tf.assign(lr, 0.001 * (0.97 ** epoch))) global pointer pointer = 0 for batch in range(n_batch): batches_wavs, batches_labels = get_next_batches(batch_size, wav_max_len) train_loss, _ = sess.run([loss, optimizer_op], feed_dict={X: batches_wavs, Y: batches_labels}) print(epoch, batch, train_loss) if epoch % 1 == 0: # 之前是5 saver.save(sess, r'D:\\tensorflow\\Speech_Recognition\\speech.module', global_step=epoch) # 训练 # train_speech_to_text_network() # 语音识别 # 把 batch_size 改为1 def speech_to_text(wav_file): wav, sr = librosa.load(wav_file, mono=True) mfcc = np.transpose(np.expand_dims(librosa.feature.mfcc(wav, sr), axis=0), [0, 2, 1]) logit = speech_to_text_network() saver = tf.train.Saver() with tf.Session() as sess: saver.restore(sess, tf.train.latest_checkpoint('.')) decoded = tf.transpose(logit, perm=[1, 0, 2]) decoded, _ = tf.nn.ctc_beam_search_decoder(decoded, sequence_len, merge_repeated=False) decoded = sess.run(decoded, feed_dict={X: mfcc}) # predict = tf.sparse_to_dense(decoded[0].indices, decoded[0].shape, decoded[0].values) + 1 print(decoded) predict = tf.sparse_to_dense(decoded[0].indices, decoded[0].dense_shape, decoded[0].values) + 1 # predict = decode_sparse_tensor(decoded[0]) predict = sess.run(predict) print(predict) if __name__ == "__main__": wav_files = get_wav_files() wav_files, labels = get_wav_label() print(u"样本数 :", len(wav_files)) all_words = [] for label in labels: # 字符分解 all_words += [word for word in label] counter = Counter(all_words) count_pairs = sorted(counter.items(), key=lambda x: -x[1]) words, _ = zip(*count_pairs) words_size = len(words) print(u"词汇表大小:", words_size) word_num_map = dict(zip(words, range(len(words)))) # 当字符不在已经收集的words中时,赋予其应当的num,这是一个动态的结果 to_num = lambda word: word_num_map.get(word, len(words)) # 将单个file的标签映射为num 返回对应list,最终all file组成嵌套list labels_vector = [list(map(to_num, label)) for label in labels] label_max_len = np.max([len(label) for label in labels_vector]) print(u"最长句子的字数:" + str(label_max_len)) # 下面仅仅计算了语音特征相应的最长的长度。 # 如果仅仅是计算长度是否需要施加变换后计算长度? parallel_read = False if parallel_read: wav_max_len = np.max(Parallel(n_jobs=7)(delayed(get_wav_length)(wav) for wav in wav_files)) else: wav_max_len = 673 print("最长的语音", wav_max_len) batch_size = 8 n_batch = len(wav_files) // batch_size X = tf.placeholder(dtype=tf.float32, shape=[batch_size, None, 20]) # 实际mfcc中的元素并非同号,不严格的情况下如此得到序列长度也是可行的 sequence_len = tf.reduce_sum(tf.cast(tf.not_equal(tf.reduce_sum(X, reduction_indices=2), 0.), tf.int32), reduction_indices=1) Y = tf.placeholder(dtype=tf.int32, shape=[batch_size, None]) train_speech_to_text_network(wav_max_len)
apache-2.0
JRock007/boxxy
dist/Boxxy.app/Contents/Resources/__boot__.py
1
1861
def _reset_sys_path(): # Clear generic sys.path[0] import sys, os resources = os.environ['RESOURCEPATH'] while sys.path[0] == resources: del sys.path[0] _reset_sys_path() def _chdir_resource(): import os os.chdir(os.environ['RESOURCEPATH']) _chdir_resource() def _disable_linecache(): import linecache def fake_getline(*args, **kwargs): return '' linecache.orig_getline = linecache.getline linecache.getline = fake_getline _disable_linecache() import re, sys cookie_re = re.compile(b"coding[:=]\s*([-\w.]+)") if sys.version_info[0] == 2: default_encoding = 'ascii' else: default_encoding = 'utf-8' def guess_encoding(fp): for i in range(2): ln = fp.readline() m = cookie_re.search(ln) if m is not None: return m.group(1).decode('ascii') return default_encoding def _run(): global __file__ import os, site sys.frozen = 'macosx_app' base = os.environ['RESOURCEPATH'] argv0 = os.path.basename(os.environ['ARGVZERO']) script = SCRIPT_MAP.get(argv0, DEFAULT_SCRIPT) path = os.path.join(base, script) sys.argv[0] = __file__ = path if sys.version_info[0] == 2: with open(path, 'rU') as fp: source = fp.read() + "\n" else: with open(path, 'rb') as fp: encoding = guess_encoding(fp) with open(path, 'r', encoding=encoding) as fp: source = fp.read() + '\n' exec(compile(source, path, 'exec'), globals(), globals()) def _setup_ctypes(): from ctypes.macholib import dyld import os frameworks = os.path.join(os.environ['RESOURCEPATH'], '..', 'Frameworks') dyld.DEFAULT_FRAMEWORK_FALLBACK.insert(0, frameworks) dyld.DEFAULT_LIBRARY_FALLBACK.insert(0, frameworks) _setup_ctypes() DEFAULT_SCRIPT='main.py' SCRIPT_MAP={} _run()
mit
ujuo/opencv
opencv-3.2.0/build/install/share/OpenCV/samples/python/find_obj.py
5
6389
#!/usr/bin/env python ''' Feature-based image matching sample. Note, that you will need the https://github.com/opencv/opencv_contrib repo for SIFT and SURF USAGE find_obj.py [--feature=<sift|surf|orb|akaze|brisk>[-flann]] [ <image1> <image2> ] --feature - Feature to use. Can be sift, surf, orb or brisk. Append '-flann' to feature name to use Flann-based matcher instead bruteforce. Press left mouse button on a feature point to see its matching point. ''' # Python 2/3 compatibility from __future__ import print_function import numpy as np import cv2 from common import anorm, getsize FLANN_INDEX_KDTREE = 1 # bug: flann enums are missing FLANN_INDEX_LSH = 6 def init_feature(name): chunks = name.split('-') if chunks[0] == 'sift': detector = cv2.xfeatures2d.SIFT_create() norm = cv2.NORM_L2 elif chunks[0] == 'surf': detector = cv2.xfeatures2d.SURF_create(800) norm = cv2.NORM_L2 elif chunks[0] == 'orb': detector = cv2.ORB_create(400) norm = cv2.NORM_HAMMING elif chunks[0] == 'akaze': detector = cv2.AKAZE_create() norm = cv2.NORM_HAMMING elif chunks[0] == 'brisk': detector = cv2.BRISK_create() norm = cv2.NORM_HAMMING else: return None, None if 'flann' in chunks: if norm == cv2.NORM_L2: flann_params = dict(algorithm = FLANN_INDEX_KDTREE, trees = 5) else: flann_params= dict(algorithm = FLANN_INDEX_LSH, table_number = 6, # 12 key_size = 12, # 20 multi_probe_level = 1) #2 matcher = cv2.FlannBasedMatcher(flann_params, {}) # bug : need to pass empty dict (#1329) else: matcher = cv2.BFMatcher(norm) return detector, matcher def filter_matches(kp1, kp2, matches, ratio = 0.75): mkp1, mkp2 = [], [] for m in matches: if len(m) == 2 and m[0].distance < m[1].distance * ratio: m = m[0] mkp1.append( kp1[m.queryIdx] ) mkp2.append( kp2[m.trainIdx] ) p1 = np.float32([kp.pt for kp in mkp1]) p2 = np.float32([kp.pt for kp in mkp2]) kp_pairs = zip(mkp1, mkp2) return p1, p2, list(kp_pairs) def explore_match(win, img1, img2, kp_pairs, status = None, H = None): h1, w1 = img1.shape[:2] h2, w2 = img2.shape[:2] vis = np.zeros((max(h1, h2), w1+w2), np.uint8) vis[:h1, :w1] = img1 vis[:h2, w1:w1+w2] = img2 vis = cv2.cvtColor(vis, cv2.COLOR_GRAY2BGR) if H is not None: corners = np.float32([[0, 0], [w1, 0], [w1, h1], [0, h1]]) corners = np.int32( cv2.perspectiveTransform(corners.reshape(1, -1, 2), H).reshape(-1, 2) + (w1, 0) ) cv2.polylines(vis, [corners], True, (255, 255, 255)) if status is None: status = np.ones(len(kp_pairs), np.bool_) p1, p2 = [], [] # python 2 / python 3 change of zip unpacking for kpp in kp_pairs: p1.append(np.int32(kpp[0].pt)) p2.append(np.int32(np.array(kpp[1].pt) + [w1, 0])) green = (0, 255, 0) red = (0, 0, 255) white = (255, 255, 255) kp_color = (51, 103, 236) for (x1, y1), (x2, y2), inlier in zip(p1, p2, status): if inlier: col = green cv2.circle(vis, (x1, y1), 2, col, -1) cv2.circle(vis, (x2, y2), 2, col, -1) else: col = red r = 2 thickness = 3 cv2.line(vis, (x1-r, y1-r), (x1+r, y1+r), col, thickness) cv2.line(vis, (x1-r, y1+r), (x1+r, y1-r), col, thickness) cv2.line(vis, (x2-r, y2-r), (x2+r, y2+r), col, thickness) cv2.line(vis, (x2-r, y2+r), (x2+r, y2-r), col, thickness) vis0 = vis.copy() for (x1, y1), (x2, y2), inlier in zip(p1, p2, status): if inlier: cv2.line(vis, (x1, y1), (x2, y2), green) cv2.imshow(win, vis) def onmouse(event, x, y, flags, param): cur_vis = vis if flags & cv2.EVENT_FLAG_LBUTTON: cur_vis = vis0.copy() r = 8 m = (anorm(np.array(p1) - (x, y)) < r) | (anorm(np.array(p2) - (x, y)) < r) idxs = np.where(m)[0] kp1s, kp2s = [], [] for i in idxs: (x1, y1), (x2, y2) = p1[i], p2[i] col = (red, green)[status[i]] cv2.line(cur_vis, (x1, y1), (x2, y2), col) kp1, kp2 = kp_pairs[i] kp1s.append(kp1) kp2s.append(kp2) cur_vis = cv2.drawKeypoints(cur_vis, kp1s, None, flags=4, color=kp_color) cur_vis[:,w1:] = cv2.drawKeypoints(cur_vis[:,w1:], kp2s, None, flags=4, color=kp_color) cv2.imshow(win, cur_vis) cv2.setMouseCallback(win, onmouse) return vis if __name__ == '__main__': print(__doc__) import sys, getopt opts, args = getopt.getopt(sys.argv[1:], '', ['feature=']) opts = dict(opts) feature_name = opts.get('--feature', 'brisk') try: fn1, fn2 = args except: fn1 = '../data/box.png' fn2 = '../data/box_in_scene.png' img1 = cv2.imread(fn1, 0) img2 = cv2.imread(fn2, 0) detector, matcher = init_feature(feature_name) if img1 is None: print('Failed to load fn1:', fn1) sys.exit(1) if img2 is None: print('Failed to load fn2:', fn2) sys.exit(1) if detector is None: print('unknown feature:', feature_name) sys.exit(1) print('using', feature_name) kp1, desc1 = detector.detectAndCompute(img1, None) kp2, desc2 = detector.detectAndCompute(img2, None) print('img1 - %d features, img2 - %d features' % (len(kp1), len(kp2))) def match_and_draw(win): print('matching...') raw_matches = matcher.knnMatch(desc1, trainDescriptors = desc2, k = 2) #2 p1, p2, kp_pairs = filter_matches(kp1, kp2, raw_matches) if len(p1) >= 4: H, status = cv2.findHomography(p1, p2, cv2.RANSAC, 5.0) print('%d / %d inliers/matched' % (np.sum(status), len(status))) else: H, status = None, None print('%d matches found, not enough for homography estimation' % len(p1)) vis = explore_match(win, img1, img2, kp_pairs, status, H) match_and_draw('find_obj') cv2.waitKey() cv2.destroyAllWindows()
gpl-3.0
ccastell/Transfer-System
Website/TransferSystem/TransferSystem/settings.py
1
3511
""" Django settings for TransferSystem project. Generated by 'django-admin startproject' using Django 1.11. For more information on this file, see https://docs.djangoproject.com/en/1.11/topics/settings/ For the full list of settings and their values, see https://docs.djangoproject.com/en/1.11/ref/settings/ """ import os # Build paths inside the project like this: os.path.join(BASE_DIR, ...) BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) # Quick-start development settings - unsuitable for production # See https://docs.djangoproject.com/en/1.11/howto/deployment/checklist/ # SECURITY WARNING: keep the secret key used in production secret! SECRET_KEY = 'vqmx94p9d3ee6qw@@lra3oe4-_%o-92cv)oe5*@$7z3&)5qro)' # SECURITY WARNING: don't run with debug turned on in production! DEBUG = True ALLOWED_HOSTS = [] # Application definition INSTALLED_APPS = [ 'django.contrib.admin', 'django.contrib.auth', 'django.contrib.contenttypes', 'django.contrib.sessions', 'django.contrib.messages', 'django.contrib.staticfiles', 'polls', 'TransferApp', ] MIDDLEWARE = [ 'django.middleware.security.SecurityMiddleware', 'django.contrib.sessions.middleware.SessionMiddleware', 'django.middleware.common.CommonMiddleware', 'django.middleware.csrf.CsrfViewMiddleware', 'django.contrib.auth.middleware.AuthenticationMiddleware', 'django.contrib.messages.middleware.MessageMiddleware', 'django.middleware.clickjacking.XFrameOptionsMiddleware', ] ROOT_URLCONF = 'TransferSystem.urls' TEMPLATES = [ { 'BACKEND': 'django.template.backends.django.DjangoTemplates', 'DIRS': [ { 'BACKEND': 'django.template.backends.django.DjangoTemplates', 'DIRS': [ os.path.join(BASE_DIR, "TransferApp/templates"), ], }, ], 'APP_DIRS': True, 'OPTIONS': { 'context_processors': [ 'django.template.context_processors.debug', 'django.template.context_processors.request', 'django.contrib.auth.context_processors.auth', 'django.contrib.messages.context_processors.messages', ], }, }, ] WSGI_APPLICATION = 'TransferSystem.wsgi.application' # Database # https://docs.djangoproject.com/en/1.11/ref/settings/#databases DATABASES = { 'default': { 'ENGINE': 'django.db.backends.sqlite3', 'NAME': os.path.join(BASE_DIR, 'db.sqlite3'), } } # Password validation # https://docs.djangoproject.com/en/1.11/ref/settings/#auth-password-validators AUTH_PASSWORD_VALIDATORS = [ { 'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator', }, { 'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator', }, { 'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator', }, { 'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator', }, ] # Internationalization # https://docs.djangoproject.com/en/1.11/topics/i18n/ LANGUAGE_CODE = 'en-us' TIME_ZONE = 'UTC' USE_I18N = True USE_L10N = True USE_TZ = True STATICFILES_DIRS = [ os.path.join(BASE_DIR, "static"), os.path.join(BASE_DIR, "TransferApp/static"), ] # Static files (CSS, JavaScript, Images) # https://docs.djangoproject.com/en/1.11/howto/static-files/ STATIC_URL = '/static/'
apache-2.0
natefoo/ansible-modules-extras
monitoring/sensu_check.py
14
13493
#!/usr/bin/python # -*- coding: utf-8 -*- # (c) 2014, Anders Ingemann <aim@secoya.dk> # # This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see <http://www.gnu.org/licenses/>. # DOCUMENTATION = ''' --- module: sensu_check short_description: Manage Sensu checks version_added: 2.0 description: - Manage the checks that should be run on a machine by I(Sensu). - Most options do not have a default and will not be added to the check definition unless specified. - All defaults except I(path), I(state), I(backup) and I(metric) are not managed by this module, - they are simply specified for your convenience. options: name: description: - The name of the check - This is the key that is used to determine whether a check exists required: true state: description: - Whether the check should be present or not choices: [ 'present', 'absent' ] required: false default: present path: description: - Path to the json file of the check to be added/removed. - Will be created if it does not exist (unless I(state=absent)). - The parent folders need to exist when I(state=present), otherwise an error will be thrown required: false default: /etc/sensu/conf.d/checks.json backup: description: - Create a backup file (if yes), including the timestamp information so - you can get the original file back if you somehow clobbered it incorrectly. choices: [ 'yes', 'no' ] required: false default: no command: description: - Path to the sensu check to run (not required when I(state=absent)) required: true handlers: description: - List of handlers to notify when the check fails required: false default: [] subscribers: description: - List of subscribers/channels this check should run for - See sensu_subscribers to subscribe a machine to a channel required: false default: [] interval: description: - Check interval in seconds required: false default: null timeout: description: - Timeout for the check required: false default: 10 handle: description: - Whether the check should be handled or not choices: [ 'yes', 'no' ] required: false default: yes subdue_begin: description: - When to disable handling of check failures required: false default: null subdue_end: description: - When to enable handling of check failures required: false default: null dependencies: description: - Other checks this check depends on, if dependencies fail, - handling of this check will be disabled required: false default: [] metric: description: - Whether the check is a metric choices: [ 'yes', 'no' ] required: false default: no standalone: description: - Whether the check should be scheduled by the sensu client or server - This option obviates the need for specifying the I(subscribers) option choices: [ 'yes', 'no' ] required: false default: no publish: description: - Whether the check should be scheduled at all. - You can still issue it via the sensu api choices: [ 'yes', 'no' ] required: false default: yes occurrences: description: - Number of event occurrences before the handler should take action required: false default: 1 refresh: description: - Number of seconds handlers should wait before taking second action required: false default: null aggregate: description: - Classifies the check as an aggregate check, - making it available via the aggregate API choices: [ 'yes', 'no' ] required: false default: no low_flap_threshold: description: - The low threshhold for flap detection required: false default: null high_flap_threshold: description: - The low threshhold for flap detection required: false default: null custom: version_added: "2.1" description: - A hash/dictionary of custom parameters for mixing to the configuration. - You can't rewrite others module parameters using this required: false default: {} source: version_added: "2.1" description: - The check source, used to create a JIT Sensu client for an external resource (e.g. a network switch). required: false default: null requirements: [ ] author: "Anders Ingemann (@andsens)" ''' EXAMPLES = ''' # Fetch metrics about the CPU load every 60 seconds, # the sensu server has a handler called 'relay' which forwards stats to graphite - name: get cpu metrics sensu_check: name=cpu_load command=/etc/sensu/plugins/system/cpu-mpstat-metrics.rb metric=yes handlers=relay subscribers=common interval=60 # Check whether nginx is running - name: check nginx process sensu_check: name=nginx_running command='/etc/sensu/plugins/processes/check-procs.rb -f /var/run/nginx.pid' handlers=default subscribers=nginx interval=60 # Stop monitoring the disk capacity. # Note that the check will still show up in the sensu dashboard, # to remove it completely you need to issue a DELETE request to the sensu api. - name: check disk sensu_check: name=check_disk_capacity state=absent ''' try: import json except ImportError: try: import simplejson as json except ImportError: # Let snippet from module_utils/basic.py return a proper error in this case pass def sensu_check(module, path, name, state='present', backup=False): changed = False reasons = [] stream = None try: try: stream = open(path, 'r') config = json.load(stream) except IOError, e: if e.errno is 2: # File not found, non-fatal if state == 'absent': reasons.append('file did not exist and state is `absent\'') return changed, reasons config = {} else: module.fail_json(msg=str(e)) except ValueError: msg = '{path} contains invalid JSON'.format(path=path) module.fail_json(msg=msg) finally: if stream: stream.close() if 'checks' not in config: if state == 'absent': reasons.append('`checks\' section did not exist and state is `absent\'') return changed, reasons config['checks'] = {} changed = True reasons.append('`checks\' section did not exist') if state == 'absent': if name in config['checks']: del config['checks'][name] changed = True reasons.append('check was present and state is `absent\'') if state == 'present': if name not in config['checks']: check = {} config['checks'][name] = check changed = True reasons.append('check was absent and state is `present\'') else: check = config['checks'][name] simple_opts = ['command', 'handlers', 'subscribers', 'interval', 'timeout', 'handle', 'dependencies', 'standalone', 'publish', 'occurrences', 'refresh', 'aggregate', 'low_flap_threshold', 'high_flap_threshold', 'source', ] for opt in simple_opts: if module.params[opt] is not None: if opt not in check or check[opt] != module.params[opt]: check[opt] = module.params[opt] changed = True reasons.append('`{opt}\' did not exist or was different'.format(opt=opt)) else: if opt in check: del check[opt] changed = True reasons.append('`{opt}\' was removed'.format(opt=opt)) if module.params['custom']: # Convert to json custom_params = module.params['custom'] overwrited_fields = set(custom_params.keys()) & set(simple_opts + ['type','subdue','subdue_begin','subdue_end']) if overwrited_fields: msg = 'You can\'t overwriting standard module parameters via "custom". You are trying overwrite: {opt}'.format(opt=list(overwrited_fields)) module.fail_json(msg=msg) for k,v in custom_params.items(): if k in config['checks'][name]: if not config['checks'][name][k] == v: changed = True reasons.append('`custom param {opt}\' was changed'.format(opt=k)) else: changed = True reasons.append('`custom param {opt}\' was added'.format(opt=k)) check[k] = v simple_opts += custom_params.keys() # Remove obsolete custom params for opt in set(config['checks'][name].keys()) - set(simple_opts + ['type','subdue','subdue_begin','subdue_end']): changed = True reasons.append('`custom param {opt}\' was deleted'.format(opt=opt)) del check[opt] if module.params['metric']: if 'type' not in check or check['type'] != 'metric': check['type'] = 'metric' changed = True reasons.append('`type\' was not defined or not `metric\'') if not module.params['metric'] and 'type' in check: del check['type'] changed = True reasons.append('`type\' was defined') if module.params['subdue_begin'] is not None and module.params['subdue_end'] is not None: subdue = {'begin': module.params['subdue_begin'], 'end': module.params['subdue_end'], } if 'subdue' not in check or check['subdue'] != subdue: check['subdue'] = subdue changed = True reasons.append('`subdue\' did not exist or was different') else: if 'subdue' in check: del check['subdue'] changed = True reasons.append('`subdue\' was removed') if changed and not module.check_mode: if backup: module.backup_local(path) try: try: stream = open(path, 'w') stream.write(json.dumps(config, indent=2) + '\n') except IOError, e: module.fail_json(msg=str(e)) finally: if stream: stream.close() return changed, reasons def main(): arg_spec = {'name': {'type': 'str', 'required': True}, 'path': {'type': 'str', 'default': '/etc/sensu/conf.d/checks.json'}, 'state': {'type': 'str', 'default': 'present', 'choices': ['present', 'absent']}, 'backup': {'type': 'bool', 'default': 'no'}, 'command': {'type': 'str'}, 'handlers': {'type': 'list'}, 'subscribers': {'type': 'list'}, 'interval': {'type': 'int'}, 'timeout': {'type': 'int'}, 'handle': {'type': 'bool'}, 'subdue_begin': {'type': 'str'}, 'subdue_end': {'type': 'str'}, 'dependencies': {'type': 'list'}, 'metric': {'type': 'bool', 'default': 'no'}, 'standalone': {'type': 'bool'}, 'publish': {'type': 'bool'}, 'occurrences': {'type': 'int'}, 'refresh': {'type': 'int'}, 'aggregate': {'type': 'bool'}, 'low_flap_threshold': {'type': 'int'}, 'high_flap_threshold': {'type': 'int'}, 'custom': {'type': 'dict'}, 'source': {'type': 'str'}, } required_together = [['subdue_begin', 'subdue_end']] module = AnsibleModule(argument_spec=arg_spec, required_together=required_together, supports_check_mode=True) if module.params['state'] != 'absent' and module.params['command'] is None: module.fail_json(msg="missing required arguments: %s" % ",".join(['command'])) path = module.params['path'] name = module.params['name'] state = module.params['state'] backup = module.params['backup'] changed, reasons = sensu_check(module, path, name, state, backup) module.exit_json(path=path, changed=changed, msg='OK', name=name, reasons=reasons) from ansible.module_utils.basic import * main()
gpl-3.0
thomasleese/chatterbox
chatterbox/cli.py
1
1394
from argparse import ArgumentParser from .database import Database from .importer import Importer from .irc import Bot from .generator import Generator def import_command(args): database = Database(args.database) importer = Importer(database) importer.import_directory(args.path) def speak_command(args): database = Database(args.database) generator = Generator(database) print(generator.generate_sentence()) def irc_command(args): database = Database(args.database) generator = Generator(database) bot = Bot(generator, args.channels, args.nickname, args.server, args.port) bot.start() def main(): parser = ArgumentParser() parser.add_argument('--database', default='chatterbox.sqlite3') subparsers = parser.add_subparsers() parser_import = subparsers.add_parser('import') parser_import.add_argument('path') parser_import.set_defaults(func=import_command) parser_speak = subparsers.add_parser('speak') parser_speak.set_defaults(func=speak_command) parser_irc = subparsers.add_parser('irc') parser_irc.add_argument('server') parser_irc.add_argument('nickname') parser_irc.add_argument('channels', metavar='channel', nargs='+') parser_irc.add_argument('-p', '--port', default=6667, type=int) parser_irc.set_defaults(func=irc_command) args = parser.parse_args() args.func(args)
mit
gusai-francelabs/datafari
windows/python/Lib/test/crashers/nasty_eq_vs_dict.py
168
1046
# from http://mail.python.org/pipermail/python-dev/2001-June/015239.html # if you keep changing a dictionary while looking up a key, you can # provoke an infinite recursion in C # At the time neither Tim nor Michael could be bothered to think of a # way to fix it. class Yuck: def __init__(self): self.i = 0 def make_dangerous(self): self.i = 1 def __hash__(self): # direct to slot 4 in table of size 8; slot 12 when size 16 return 4 + 8 def __eq__(self, other): if self.i == 0: # leave dict alone pass elif self.i == 1: # fiddle to 16 slots self.__fill_dict(6) self.i = 2 else: # fiddle to 8 slots self.__fill_dict(4) self.i = 1 return 1 def __fill_dict(self, n): self.i = 0 dict.clear() for i in range(n): dict[i] = i dict[self] = "OK!" y = Yuck() dict = {y: "OK!"} z = Yuck() y.make_dangerous() print dict[z]
apache-2.0
AdaptiveApplications/carnegie
tarc_bus_locator_client/geopy-0.95.1/geopy/geohash.py
82
2525
from geopy import Point class Geohash(object): ENCODE_MAP = '0123456789bcdefghjkmnpqrstuvwxyz' DECODE_MAP = dict([(char, i) for i, char in enumerate(ENCODE_MAP)]) def __init__(self, point_class=Point, precision=12): self.point_class = point_class self.precision = precision def encode(self, *args, **kwargs): precision = kwargs.pop('precision', self.precision) point = Point(*args, **kwargs) lat_min, latitude, lat_max = -90, 0, 90 long_min, longitude, long_max = -180, 0, 180 string = '' bytes = [] odd_bit = False for i in xrange(precision): byte = 0 for bit in (16, 8, 4, 2, 1): if odd_bit: if point.latitude >= latitude: byte |= bit lat_min = latitude else: lat_max = latitude latitude = (lat_min + lat_max) / 2. else: if point.longitude >= longitude: byte |= bit long_min = longitude else: long_max = longitude longitude = (long_min + long_max) / 2. odd_bit = not odd_bit bytes.append(byte) return ''.join([self.ENCODE_MAP[byte] for byte in bytes]) def decode(self, string): lat_min, latitude, lat_max = -90, 0, 90 long_min, longitude, long_max = -180, 0, 180 odd_bit = False for char in string: try: byte = self.DECODE_MAP[char] except KeyError: raise ValueError("Invalid hash: unexpected character %r." % (c,)) else: for bit in (16, 8, 4, 2, 1): if odd_bit: if byte & bit: lat_min = latitude else: lat_max = latitude latitude = (lat_min + lat_max) / 2. else: if byte & bit: long_min = longitude else: long_max = longitude longitude = (long_min + long_max) / 2. odd_bit = not odd_bit point = self.point_class((latitude, longitude)) point.error = (lat_max - latitude, long_max - longitude) return point
mit
tos-kamiya/pyrem_torq
src/samples/tinyawk/tinyawk.py
1
17046
# tinyawk # an interpreter of a extremely small subset of AWK programming language. # supported reserved words are: # BEGIN END NF NR if else next print while # ( ) [ ] { } ; , < <= > >= == != && || ! - + * / % $ # note that: # - escape sequences in string are not supported. # - regular expression is Python's one. # - all numbers are integers, no floating points. # - assignment to NF, NR or $* is undefined behavior. from pyrem_torq import * from pyrem_torq.expression import * from pyrem_torq.treeseq import seq_split_nodes_of_label def split_to_str(text): p = re.compile("|".join([ r"/[^/\r\n]*/", r'"[^"\r\n]*"', r"\d+", # literals (regex, string, integer) r"#[^\r\n]*", # comment r"[ \t]+", r"\r\n|\r|\n", # white spaces, newline r"[a-zA-Z_](\w|_)*", # identifier r"[<>!=]=|&&|[|][|]", r"[-+*/%<>!=()${},;]|\[|\]", # operators r"." # invalid chars ])) return [ 'code' ] + utility.split_to_strings(text, pattern=p) def tokenizing_expr_iter(): # identify reserved words, literals, identifiers e = Search(script.compile(r""" (r_BEGIN <- "BEGIN") | (r_END <- "END") | (r_next <- "next") | (r_print <- "print") | (r_if <- "if") | (r_else <- "else") | (r_while <- "while") | (id <- r"^[a-zA-Z_]") | (l_integer <- r"^[0-9]") | (l_string <- r"^\"") | (l_regex <- r"^/") | (op_gt <- ">") | (op_ge <- ">=") | (op_lt <- "<") | (op_le <- "<=") | (op_ne <- "!=") | (op_eq <- "==") | (op_and <- "&&") | (op_or <- "||") | (op_plus <- "+") | (op_minus <- "-") | (op_mul <- "*") | (op_div <- "/") | (op_mod <- "%") | (op_assign <- "=") | (op_not <- "!") | (op_dollar <- "$") | (LP <- "(") | (RP <- ")") | (LB <- "{") | (RB <- "}") | (LK <- "[") | (RK <- "]") | (comma <- ",") | (semicolon <- ";") | (newline <- "\r\n" | "\r" | "\n") | (null <- r"^[ \t#]") | any, error("unexpected character") ;""")) yield "identify reserved words, literals, identifiers", e # identify statement-terminating new-line chars e = Search(script.compile(r""" (comma | LB | op_or | op_and | r_else), (null <- newline) ;""")) yield "remove neglected new-line characters", e def stmt_parsing_expr_iter(): # parse pattern-actions and blocks. stmtLevelBlock = script.compile(""" (block <- (null <- LB), *(req^(RB), @0), (newline <-), (null <- RB)) # '}' can be a terminator of a statement, so insert a dummy new-line just before '}' | any ;""") actionLevelBlock = script.compile(r""" (pa <- ((r_BEGIN | r_END) | (expr_empty <-)), (block <- (null <- LB), *(req^(RB), @stmtLevelBlock), (newline <-), (null <- RB)), (null <- newline)) | (pa <- (expr <- +any^(LB | newline)), ((block <- (null <- LB), *(req^(RB), @stmtLevelBlock), (newline <-), (null <- RB)) | (block_empty <-)), (null <- newline)) | (null <- newline) ;""", replaces={ 'stmtLevelBlock' : stmtLevelBlock }) e = [0,None] * actionLevelBlock yield "parse pattern-actions and blocks", e # parse statements getSimpleStmt = script.compile(r""" (stmt <- (r_null_statement <-), semicolon | (r_print_empty <- r_print), (semicolon | newline) | r_print, (expr <- +any^(semicolon | newline)), (semicolon | newline) | r_print, error("invalid print statement") | r_next, (semicolon | newline) | r_next, error("invaild 'next' statement") | (expr <- +any^(semicolon | newline)), (semicolon | newline)) ;""") e = Search(script.compile(r""" (stmt <- (r_if, (null <- LP), (expr <- +any^(newline | RP)), (null <- RP), ?(null <- newline), ((block :: ~@0) | @getSimpleStmt), *((r_elif <- r_else, r_if), (null <- LP), (expr <- +any^(newline | RP)), (null <- RP), ?(null <- newline), ((block :: ~@0) | @getSimpleStmt)), ?(r_else, ((block :: ~@0) | @getSimpleStmt)))) | r_if, error("invalid 'if' statement") | r_else, error("'else' doesn't have a matching 'if'") | (stmt <- r_while, (null <- LP), (expr <- +any^(newline | RP)), (null <- RP), ((block :: ~@0) | @getSimpleStmt)) | r_while, error("invalid 'while' statement") | @getSimpleStmt | (null <- newline) | (block :: ~@0) | (pa :: (r_BEGIN | r_END | expr_empty | expr), (block :: ~@0)) | any, error("unexpected token") ;""", replaces={ 'getSimpleStmt' : getSimpleStmt })) yield "parse statements", e def expr_parsing_expr_iter(): def operator_parser_iter(): def markNull(expr): return BuildToNode("null", expr) n = Node kit = extra.operator_builer.OperatorBuilder() kit.atomic_term_expr = Or(n("l_integer"), n("l_string"), n("l_regex"), n("id")) kit.composed_term_node_labels = ( "expr", ) kit.generated_term_label = "expr" yield "paren", kit.build_O_expr(( markNull(n("LP")), markNull(n("RP")) )) # Drop parentheses chars yield "index", kit.build_tO_expr(( n("LK"), n("RK") )) yield "unary ops", kit.build_Ot_expr(n("op_minus"), n("op_plus"), n("op_not"), n("op_dollar")) yield "binary mul/div", kit.build_tOt_expr(n("op_mul"), n("op_div"), n("op_mod")) yield "binary add/sub", kit.build_tOt_expr(n("op_minus"), n("op_plus")) yield "binary string concatenate", kit.build_tOt_expr(InsertNode("op_cat")) # The concatenation operator is epsilon, so that insert a token 'op_cat' where the operator appears yield "binary compare ops", kit.build_tOt_expr(\ n("op_gt"), n("op_ge"), n("op_lt"), n("op_le"), n("op_ne"), n("op_eq")) yield "binary logical-and", kit.build_tOt_expr(n("op_and")) yield "binary logical-or", kit.build_tOt_expr(n("op_or")) yield "comma", kit.build_tOt_expr(n("comma")) yield "binary assign op", kit.build_tOt_expr(n("op_assign")) for desc, eParser in operator_parser_iter(): e = Search(script.compile("""(expr :: @eParser) | (stmt :: ~@0) | (block :: ~@0) | (pa :: ~@0) ;""", replaces={ "eParser" : eParser })) yield "expression " + desc, e yield "remove redundant paren", Search(script.compile(""" req(expr :: expr | l_integer | l_string | l_regex | id), ([]expr :: @0) | (expr :: id, LK, *(req^(RK), @0), RK) | (expr :: ~@0) | (stmt :: ~@0) | (block :: ~@0) | (pa :: ~@0) | LB, error("unclosed '{'") | RB, error("unexpected '}'") | LP, error("unclosed '('") | RP, error("unexpected ')'") | id, LK, error("unclosed '['") | LK, error("unexpected '['") | RK, error("unexpected ']'") | any ;""")) someExpr = script.compile("(l_integer | l_string | l_regex | id | (expr :: ~@0));") yield "reform comma expressions", Search(script.compile(""" (r_print, ([]expr :: @someExpr, +(comma, @someExpr))) | (expr :: @someExpr, LK, ([]expr :: @someExpr, +((null <- comma), @someExpr), RK)) | (stmt :: ~@0) | (block :: ~@0) | (pa :: ~@0) | comma, error("unexpected comma (,)") ;""", replaces={ "someExpr" : someExpr })) class ExprInterpreter(object): def __init__(self): self.nr, self.line, self.curFields = None, None, None self.varTable = {} def eval_expr(self, exprNode): def cast_to_int(s): return 0 if not s else int(s) # an empty string is converted to 0 if exprNode[0] != "expr": assert len(exprNode) == 2 lbl, val = exprNode[0], exprNode[1] if lbl == "id": return self.varTable[val] # may raise KeyError elif lbl == "l_integer": return int(val) elif lbl == "l_string": return val[1:-1] # remove enclosing double quotes. elif lbl == "l_regex": reString = val[1:-1] # remove /-chars. return 1 if re.match(reString, self.line) else 0 assert False seq = exprNode[1:] assert seq if seq[0][0] in ( "op_minus", "op_plus", "op_not", "op_dollar" ): # unary assert len(seq) >= 2 value = self.eval_expr(seq[-1]) for op in reversed(seq[:-1]): opLbl = op[0] if opLbl == "op_minus": value = -cast_to_int(value) elif opLbl == "op_plus": value = cast_to_int(value) elif opLbl == "op_not": value = (1 if value in (0, '') else 0) elif opLbl == "op_dollar": index = cast_to_int(value) if index < 0: raise IndexError value = self.curFields[index] # may raise IndexError else: assert False return value assert len(seq) >= 3 seq1lbl = seq[1][0] if seq1lbl == "LK": # a[...] #assert seq[0][0] == "id"; assert seq[-1][0] == "RK" indexStr = "\t".join(str(self.eval_expr(v)) for v in seq[2::2]) var = self.varTable.setdefault(seq[0][1], {}) return var.setdefault(indexStr, "") if seq1lbl == "op_cat": return "".join(str(self.eval_expr(v)) for v in seq[0::2]) if seq1lbl in ( "op_mul", "op_div", "op_mod", "op_minus", "op_plus" ): value = cast_to_int(self.eval_expr(seq[0])) for op, rightExpr in zip(seq[1::2], seq[2::2]): opLbl = op[0] rightValue = cast_to_int(self.eval_expr(rightExpr)) if opLbl == "op_mul": value = value * rightValue elif opLbl == "op_div": value = value // rightValue elif opLbl == "op_mod": value = value % rightValue elif opLbl == "op_minus": value = value - rightValue elif opLbl == "op_plus": value = value + rightValue else: assert False return value if seq1lbl in ( "op_gt", "op_ge", "op_lt", "op_le", "op_ne", "op_eq" ): assert len(seq) == 3 leftValue = self.eval_expr(seq[0]) opLbl = seq[1][0] rightValue = self.eval_expr(seq[2]) if isinstance(leftValue, str): rightValue = str(rightValue) elif isinstance(rightValue, str): leftValue = str(leftValue) if opLbl == "op_gt": return leftValue > rightValue elif opLbl == "op_ge": return leftValue >= rightValue elif opLbl == "op_lt": return leftValue < rightValue elif opLbl == "op_le": return leftValue <= rightValue elif opLbl == "op_ne": return leftValue != rightValue elif opLbl == "op_eq": return leftValue == rightValue else: assert False return value if seq1lbl == "op_and": for e in seq[0::2]: value = self.eval_expr(e) if value in (0, ''): return 0 return value if seq1lbl == "op_or": for e in seq[0::2]: value = self.eval_expr(e) if value not in (0, ''): return value return 0 if seq1lbl == "op_assign": assingedValue = self.eval_expr(seq[-1]) for e, op in zip(seq[0::2], seq[1::2]): assert op[0] == "op_assign" if e[0] == "id": self.varTable[e[1]] = assingedValue elif e[0] == "expr" and len(e) >= 5 and e[2][0] == "LK": #assert e[1][0] == "id"; assert e[-1][0] == "RK" indexStr = "\t".join(str(self.eval_expr(v)) for v in e[3::2]) var = self.varTable.setdefault(e[1][1], {}) var[indexStr] = assingedValue else: assert False # invalid l-value return assingedValue assert False # unknown operator/invalid expression class StmtInterpreter(ExprInterpreter): class NextStmt(Exception): pass def __init__(self, ast): ExprInterpreter.__init__(self) self.beginActions = []; self.endActions = []; self.patternActions = [] d = { "r_BEGIN" : self.beginActions, "r_END" : self.endActions } for paNode in ast[1:]: exprNode, blockNode = paNode[1], paNode[2] d.get(exprNode[0], self.patternActions).append(( exprNode, blockNode )) def expects_input(self): return len(self.patternActions + self.endActions) > 0 def apply_begin(self): self.nr, self.line, self.curFields = 0, None, [] self.varTable.update([ ( "NR", self.nr ), ( "NF", 0 ) ]) for _, blockNode in self.beginActions: self.exec_stmt(blockNode) def apply_end(self): self.nr, self.line, self.curFields = -1, None, [] self.varTable.update([ ( "NF", 0 ) ]) for _, blockNode in self.endActions: self.exec_stmt(blockNode) def apply_line(self, nr, line): assert nr >= 1 fields = line.split() self.nr, self.line, self.curFields = nr, line, [ line ] + fields self.varTable.update([ ( "NR", self.nr ), ( "NF", len(fields) ) ]) try: for exprNode, blockNode in self.patternActions: if exprNode[0] == "expr_empty" or self.eval_expr(exprNode): self.exec_stmt(blockNode) except StmtInterpreter.NextStmt: pass def exec_stmt(self, stmtNode): if stmtNode[-1][0] in ("newline", "semicolon"): stmtNode = stmtNode[:-1] if stmtNode[0] == "block_empty": stmtNode = [ "stmt", "r_print_empty" ] if stmtNode[0] == "block": for stmt in stmtNode[1:]: self.exec_stmt(stmt) return assert stmtNode[0] == "stmt" cmdLbl = stmtNode[1][0] if cmdLbl == "r_if": seq = stmtNode[2:] while seq: if self.eval_expr(seq[0]) not in (0, ''): self.exec_stmt(seq[1]) break # while seq if len(seq) == 2: break # while seq if seq[2][0] == "r_else": self.exec_stmt(seq[3]) break # while seq seq = seq[3:] elif cmdLbl == "r_while": while self.eval_expr(stmtNode[2]) not in (0, ''): self.exec_stmt(stmtNode[3]) elif cmdLbl == "r_print": print " ".join(str(self.eval_expr(v)) for v in stmtNode[2::2]) elif cmdLbl == "r_print_empty": print self.line elif cmdLbl == "r_next": raise StmtInterpreter.NextStmt elif cmdLbl == "r_null_statement": pass else: assert len(stmtNode) == 2 self.eval_expr(stmtNode[1]) def main(debugTrace=False): import sys if len(sys.argv) == 1: print "usage: tinyawk -f <script> [ <input> ]\nAn interpreter of a awk-like small language." return assert len(sys.argv) in (3, 4) assert sys.argv[1] == "-f" scriptFile = sys.argv[2] inputFile = sys.argv[3] if len(sys.argv) == 4 else None debugWrite = sys.stderr.write if debugTrace else None f = open(scriptFile, "r") try: script = f.read() finally: f.close() script = script + "\n" # prepare for missing new-line char at the last line # parsing seq = split_to_str(script) des = [] des.extend(tokenizing_expr_iter()) des.extend(stmt_parsing_expr_iter()) des.extend(expr_parsing_expr_iter()) for desc, expr in des: if debugWrite: debugWrite("\n".join(treeseq.seq_pretty(treeseq.seq_remove_strattrs(seq))) + "\n") # prints a seq debugWrite("step: %s\n" % desc) newSeq = expr.parse(seq) if newSeq is None: sys.exit("parse error") seq = newSeq seq = seq_split_nodes_of_label(seq, "null")[0] if debugWrite: debugWrite("\n".join(treeseq.seq_pretty(treeseq.seq_remove_strattrs(seq))) + "\n") # prints a seq seq = treeseq.seq_remove_strattrs(seq) # interpretation interp = StmtInterpreter(seq) def dbgwrite(): if debugWrite: debugWrite("variables=%s\n" % repr(interp.varTable)) interp.apply_begin(); dbgwrite() if interp.expects_input(): f = open(inputFile, "r") if inputFile else sys.stdin for lnum, L in enumerate(f): interp.apply_line(lnum+1, L.rstrip()); dbgwrite() if inputFile: f.close() interp.apply_end(); dbgwrite() if __name__ == '__main__': main(debugTrace=True)
mit
mcltn/ansible-modules-extras
cloud/vmware/vmware_migrate_vmk.py
71
7217
#!/usr/bin/python # -*- coding: utf-8 -*- # (c) 2015, Joseph Callen <jcallen () csc.com> # # This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see <http://www.gnu.org/licenses/>. DOCUMENTATION = ''' --- module: vmware_migrate_vmk short_description: Migrate a VMK interface from VSS to VDS description: - Migrate a VMK interface from VSS to VDS version_added: 2.0 author: "Joseph Callen (@jcpowermac), Russell Teague (@mtnbikenc)" notes: - Tested on vSphere 5.5 requirements: - "python >= 2.6" - PyVmomi options: esxi_hostname: description: - ESXi hostname to be managed required: True device: description: - VMK interface name required: True current_switch_name: description: - Switch VMK interface is currently on required: True current_portgroup_name: description: - Portgroup name VMK interface is currently on required: True migrate_switch_name: description: - Switch name to migrate VMK interface to required: True migrate_portgroup_name: description: - Portgroup name to migrate VMK interface to required: True extends_documentation_fragment: vmware.documentation ''' EXAMPLES = ''' Example from Ansible playbook - name: Migrate Management vmk local_action: module: vmware_migrate_vmk hostname: vcsa_host username: vcsa_user password: vcsa_pass esxi_hostname: esxi_hostname device: vmk1 current_switch_name: temp_vswitch current_portgroup_name: esx-mgmt migrate_switch_name: dvSwitch migrate_portgroup_name: Management ''' try: from pyVmomi import vim, vmodl HAS_PYVMOMI = True except ImportError: HAS_PYVMOMI = False class VMwareMigrateVmk(object): def __init__(self, module): self.module = module self.host_system = None self.migrate_switch_name = self.module.params['migrate_switch_name'] self.migrate_portgroup_name = self.module.params['migrate_portgroup_name'] self.device = self.module.params['device'] self.esxi_hostname = self.module.params['esxi_hostname'] self.current_portgroup_name = self.module.params['current_portgroup_name'] self.current_switch_name = self.module.params['current_switch_name'] self.content = connect_to_api(module) def process_state(self): try: vmk_migration_states = { 'migrate_vss_vds': self.state_migrate_vss_vds, 'migrate_vds_vss': self.state_migrate_vds_vss, 'migrated': self.state_exit_unchanged } vmk_migration_states[self.check_vmk_current_state()]() except vmodl.RuntimeFault as runtime_fault: self.module.fail_json(msg=runtime_fault.msg) except vmodl.MethodFault as method_fault: self.module.fail_json(msg=method_fault.msg) except Exception as e: self.module.fail_json(msg=str(e)) def state_exit_unchanged(self): self.module.exit_json(changed=False) def state_migrate_vds_vss(self): self.module.exit_json(changed=False, msg="Currently Not Implemented") def create_host_vnic_config(self, dv_switch_uuid, portgroup_key): host_vnic_config = vim.host.VirtualNic.Config() host_vnic_config.spec = vim.host.VirtualNic.Specification() host_vnic_config.changeOperation = "edit" host_vnic_config.device = self.device host_vnic_config.portgroup = "" host_vnic_config.spec.distributedVirtualPort = vim.dvs.PortConnection() host_vnic_config.spec.distributedVirtualPort.switchUuid = dv_switch_uuid host_vnic_config.spec.distributedVirtualPort.portgroupKey = portgroup_key return host_vnic_config def create_port_group_config(self): port_group_config = vim.host.PortGroup.Config() port_group_config.spec = vim.host.PortGroup.Specification() port_group_config.changeOperation = "remove" port_group_config.spec.name = self.current_portgroup_name port_group_config.spec.vlanId = -1 port_group_config.spec.vswitchName = self.current_switch_name port_group_config.spec.policy = vim.host.NetworkPolicy() return port_group_config def state_migrate_vss_vds(self): host_network_system = self.host_system.configManager.networkSystem dv_switch = find_dvs_by_name(self.content, self.migrate_switch_name) pg = find_dvspg_by_name(dv_switch, self.migrate_portgroup_name) config = vim.host.NetworkConfig() config.portgroup = [self.create_port_group_config()] config.vnic = [self.create_host_vnic_config(dv_switch.uuid, pg.key)] host_network_system.UpdateNetworkConfig(config, "modify") self.module.exit_json(changed=True) def check_vmk_current_state(self): self.host_system = find_hostsystem_by_name(self.content, self.esxi_hostname) for vnic in self.host_system.configManager.networkSystem.networkInfo.vnic: if vnic.device == self.device: #self.vnic = vnic if vnic.spec.distributedVirtualPort is None: if vnic.portgroup == self.current_portgroup_name: return "migrate_vss_vds" else: dvs = find_dvs_by_name(self.content, self.current_switch_name) if dvs is None: return "migrated" if vnic.spec.distributedVirtualPort.switchUuid == dvs.uuid: return "migrate_vds_vss" def main(): argument_spec = vmware_argument_spec() argument_spec.update(dict(esxi_hostname=dict(required=True, type='str'), device=dict(required=True, type='str'), current_switch_name=dict(required=True, type='str'), current_portgroup_name=dict(required=True, type='str'), migrate_switch_name=dict(required=True, type='str'), migrate_portgroup_name=dict(required=True, type='str'))) module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=False) if not HAS_PYVMOMI: self.module.fail_json(msg='pyvmomi required for this module') vmware_migrate_vmk = VMwareMigrateVmk(module) vmware_migrate_vmk.process_state() from ansible.module_utils.vmware import * from ansible.module_utils.basic import * if __name__ == '__main__': main()
gpl-3.0
jaez/finna-be-octo-robot
contrib/testgen/gen_base58_test_vectors.py
1064
4344
#!/usr/bin/env python ''' Generate valid and invalid base58 address and private key test vectors. Usage: gen_base58_test_vectors.py valid 50 > ../../src/test/data/base58_keys_valid.json gen_base58_test_vectors.py invalid 50 > ../../src/test/data/base58_keys_invalid.json ''' # 2012 Wladimir J. van der Laan # Released under MIT License import os from itertools import islice from base58 import b58encode, b58decode, b58encode_chk, b58decode_chk, b58chars import random from binascii import b2a_hex # key types PUBKEY_ADDRESS = 48 SCRIPT_ADDRESS = 5 PUBKEY_ADDRESS_TEST = 111 SCRIPT_ADDRESS_TEST = 196 PRIVKEY = 176 PRIVKEY_TEST = 239 metadata_keys = ['isPrivkey', 'isTestnet', 'addrType', 'isCompressed'] # templates for valid sequences templates = [ # prefix, payload_size, suffix, metadata # None = N/A ((PUBKEY_ADDRESS,), 20, (), (False, False, 'pubkey', None)), ((SCRIPT_ADDRESS,), 20, (), (False, False, 'script', None)), ((PUBKEY_ADDRESS_TEST,), 20, (), (False, True, 'pubkey', None)), ((SCRIPT_ADDRESS_TEST,), 20, (), (False, True, 'script', None)), ((PRIVKEY,), 32, (), (True, False, None, False)), ((PRIVKEY,), 32, (1,), (True, False, None, True)), ((PRIVKEY_TEST,), 32, (), (True, True, None, False)), ((PRIVKEY_TEST,), 32, (1,), (True, True, None, True)) ] def is_valid(v): '''Check vector v for validity''' result = b58decode_chk(v) if result is None: return False valid = False for template in templates: prefix = str(bytearray(template[0])) suffix = str(bytearray(template[2])) if result.startswith(prefix) and result.endswith(suffix): if (len(result) - len(prefix) - len(suffix)) == template[1]: return True return False def gen_valid_vectors(): '''Generate valid test vectors''' while True: for template in templates: prefix = str(bytearray(template[0])) payload = os.urandom(template[1]) suffix = str(bytearray(template[2])) rv = b58encode_chk(prefix + payload + suffix) assert is_valid(rv) metadata = dict([(x,y) for (x,y) in zip(metadata_keys,template[3]) if y is not None]) yield (rv, b2a_hex(payload), metadata) def gen_invalid_vector(template, corrupt_prefix, randomize_payload_size, corrupt_suffix): '''Generate possibly invalid vector''' if corrupt_prefix: prefix = os.urandom(1) else: prefix = str(bytearray(template[0])) if randomize_payload_size: payload = os.urandom(max(int(random.expovariate(0.5)), 50)) else: payload = os.urandom(template[1]) if corrupt_suffix: suffix = os.urandom(len(template[2])) else: suffix = str(bytearray(template[2])) return b58encode_chk(prefix + payload + suffix) def randbool(p = 0.5): '''Return True with P(p)''' return random.random() < p def gen_invalid_vectors(): '''Generate invalid test vectors''' # start with some manual edge-cases yield "", yield "x", while True: # kinds of invalid vectors: # invalid prefix # invalid payload length # invalid (randomized) suffix (add random data) # corrupt checksum for template in templates: val = gen_invalid_vector(template, randbool(0.2), randbool(0.2), randbool(0.2)) if random.randint(0,10)<1: # line corruption if randbool(): # add random character to end val += random.choice(b58chars) else: # replace random character in the middle n = random.randint(0, len(val)) val = val[0:n] + random.choice(b58chars) + val[n+1:] if not is_valid(val): yield val, if __name__ == '__main__': import sys, json iters = {'valid':gen_valid_vectors, 'invalid':gen_invalid_vectors} try: uiter = iters[sys.argv[1]] except IndexError: uiter = gen_valid_vectors try: count = int(sys.argv[2]) except IndexError: count = 0 data = list(islice(uiter(), count)) json.dump(data, sys.stdout, sort_keys=True, indent=4) sys.stdout.write('\n')
mit
uwplse/synapse
experiments/plots/all-benchmarks.py
1
2813
import collections import csv import os import re import subprocess import sys import tempfile def rename_benchmark(bm): if "hd-d0" in bm: return "%02d-d0" % int(re.match("\(.+ ([0-9]+)\)", bm).group(1)) elif "hd-d5" in bm: return "%02d-d5" % int(re.match("\(.+ ([0-9]+)\)", bm).group(1)) elif "array-search" in bm: return "arraysearch-%02d" % int(re.match("\(.+ ([0-9]+)\)", bm).group(1)) elif "inversek2j-theta1" in bm: return "inversek2j-1" elif "inversek2j-theta2" in bm: return "inversek2j-2" elif "qm" in bm: return "%s" % re.match("\(qm (.+)\)", bm).group(1) else: return bm[1:-1] # s-expr f = open("%s.out.csv" % sys.argv[1]) rdr = csv.DictReader(f) f2 = open("%s.csv" % sys.argv[1], "w") f2.write("\"benchmark\",\"group\",\"time\",\"timeout\"\n") for row in rdr: f2.write("%s,%s,%s,%s\n" % (rename_benchmark(row["benchmark"]), row["group"], row["time"], row["timeout"])) f2.close() f.close() fR = tempfile.NamedTemporaryFile() fR.write(''' library(ggplot2) library(grid) library(reshape2) library(gridExtra) library(scales) df <- read.csv("%s") df$timeout_label = sapply(df$timeout, function(x) if (x=="True") return("*") else return("")) df$group <- factor(df$group, c("arraysearch", "qm", "hd-d0", "hd-d5", "parrot")) levels(df$group)[levels(df$group)=="hd-d0"] <- "Hacker's Delight d0" levels(df$group)[levels(df$group)=="hd-d5"] <- "Hacker's Delight d5" levels(df$group)[levels(df$group)=="parrot"] <- "Parrot" levels(df$group)[levels(df$group)=="arraysearch"] <- "Array Search" levels(df$group)[levels(df$group)=="qm"] <- "CIA" clean_names <- gsub("arraysearch-0(.)", "arraysearch-\\\\1", df$benchmark) labels <- setNames(clean_names, df$benchmark) print(labels) p <- ggplot(df, aes(x=benchmark, y=time)) p <- p + geom_bar(stat="identity", fill="#356384", width=0.85) p <- p + facet_grid(. ~ group, scales="free_x", space="free_x") p <- p + geom_text(aes(label=timeout_label, x=benchmark, y=time+1), size=3) p <- p + theme_bw(9) p <- p + theme(plot.margin=unit(c(0.2, 0.2, 0, 0), "cm")) p <- p + scale_y_log10(expand=c(0,0), breaks=c(10, 100, 1000, 10000), limits=c(1, 20000)) p <- p + scale_x_discrete(labels=labels) p <- p + labs(x="Benchmark", y="Solving time (secs)") p <- p + theme(legend.position="none") p <- p + theme(axis.text.x=element_text(angle=90, vjust=0.5, hjust=1.0, size=5,margin=margin(0))) p <- p + theme(strip.background=element_rect(fill="#eeeeee", size=0.4, colour="#aaaaaa")) p <- p + theme(panel.border=element_rect(fill=NA, size=0.4, colour="#aaaaaa")) p <- p + theme(axis.ticks.x=element_blank()) ggsave("./%s.pdf", p, width=7, height=2.16) ''' % (f2.name, sys.argv[1])) fR.flush() subprocess.check_call(["Rscript", fR.name]) fR.close()
mit
beddari/calico
calico/monotonic.py
4
1697
# -*- coding: utf-8 -*- # Copyright 2015 Metaswitch Networks # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ calico.monotonic ~~~~~~~~~~~~~~~~ Monotonic clock functions. monotonic_time() should be used for timing and calculating timer pops in preference to time.time() which can be non-monotonic or jump wildly, especially in a VM. """ import logging _log = logging.getLogger(__name__) __all__ = ["monotonic_time"] import ctypes import os CLOCK_MONOTONIC_RAW = 4 # see <linux/time.h> class Timespec(ctypes.Structure): _fields_ = [ ('tv_sec', ctypes.c_long), ('tv_nsec', ctypes.c_long) ] librt = ctypes.CDLL('librt.so.1', use_errno=True) clock_gettime = librt.clock_gettime clock_gettime.argtypes = [ctypes.c_int, ctypes.POINTER(Timespec)] def monotonic_time(): """ :returns: a time in seconds from an unspecified epoch (which may vary between processes). Guaranteed to be monotonic within the life of a process. """ t = Timespec() if clock_gettime(CLOCK_MONOTONIC_RAW , ctypes.pointer(t)) != 0: errno_ = ctypes.get_errno() raise OSError(errno_, os.strerror(errno_)) return t.tv_sec + t.tv_nsec * 1e-9
apache-2.0
feigames/Odoo
addons/account_bank_statement_extensions/wizard/confirm_statement_line.py
381
1490
# -*- encoding: utf-8 -*- ############################################################################## # # OpenERP, Open Source Management Solution # # Copyright (c) 2011 Noviat nv/sa (www.noviat.be). All rights reserved. # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ############################################################################## from openerp.osv import osv class confirm_statement_line(osv.osv_memory): _name = 'confirm.statement.line' _description = 'Confirm selected statement lines' def confirm_lines(self, cr, uid, ids, context): line_ids = context['active_ids'] line_obj = self.pool.get('account.bank.statement.line') line_obj.write(cr, uid, line_ids, {'state': 'confirm'}, context=context) return {} # vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
agpl-3.0
cycloverid/plate-tectonics
test/googletest/test/gtest_catch_exceptions_test.py
2139
9901
#!/usr/bin/env python # # Copyright 2010 Google Inc. All Rights Reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above # copyright notice, this list of conditions and the following disclaimer # in the documentation and/or other materials provided with the # distribution. # * Neither the name of Google Inc. nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. """Tests Google Test's exception catching behavior. This script invokes gtest_catch_exceptions_test_ and gtest_catch_exceptions_ex_test_ (programs written with Google Test) and verifies their output. """ __author__ = 'vladl@google.com (Vlad Losev)' import os import gtest_test_utils # Constants. FLAG_PREFIX = '--gtest_' LIST_TESTS_FLAG = FLAG_PREFIX + 'list_tests' NO_CATCH_EXCEPTIONS_FLAG = FLAG_PREFIX + 'catch_exceptions=0' FILTER_FLAG = FLAG_PREFIX + 'filter' # Path to the gtest_catch_exceptions_ex_test_ binary, compiled with # exceptions enabled. EX_EXE_PATH = gtest_test_utils.GetTestExecutablePath( 'gtest_catch_exceptions_ex_test_') # Path to the gtest_catch_exceptions_test_ binary, compiled with # exceptions disabled. EXE_PATH = gtest_test_utils.GetTestExecutablePath( 'gtest_catch_exceptions_no_ex_test_') environ = gtest_test_utils.environ SetEnvVar = gtest_test_utils.SetEnvVar # Tests in this file run a Google-Test-based test program and expect it # to terminate prematurely. Therefore they are incompatible with # the premature-exit-file protocol by design. Unset the # premature-exit filepath to prevent Google Test from creating # the file. SetEnvVar(gtest_test_utils.PREMATURE_EXIT_FILE_ENV_VAR, None) TEST_LIST = gtest_test_utils.Subprocess( [EXE_PATH, LIST_TESTS_FLAG], env=environ).output SUPPORTS_SEH_EXCEPTIONS = 'ThrowsSehException' in TEST_LIST if SUPPORTS_SEH_EXCEPTIONS: BINARY_OUTPUT = gtest_test_utils.Subprocess([EXE_PATH], env=environ).output EX_BINARY_OUTPUT = gtest_test_utils.Subprocess( [EX_EXE_PATH], env=environ).output # The tests. if SUPPORTS_SEH_EXCEPTIONS: # pylint:disable-msg=C6302 class CatchSehExceptionsTest(gtest_test_utils.TestCase): """Tests exception-catching behavior.""" def TestSehExceptions(self, test_output): self.assert_('SEH exception with code 0x2a thrown ' 'in the test fixture\'s constructor' in test_output) self.assert_('SEH exception with code 0x2a thrown ' 'in the test fixture\'s destructor' in test_output) self.assert_('SEH exception with code 0x2a thrown in SetUpTestCase()' in test_output) self.assert_('SEH exception with code 0x2a thrown in TearDownTestCase()' in test_output) self.assert_('SEH exception with code 0x2a thrown in SetUp()' in test_output) self.assert_('SEH exception with code 0x2a thrown in TearDown()' in test_output) self.assert_('SEH exception with code 0x2a thrown in the test body' in test_output) def testCatchesSehExceptionsWithCxxExceptionsEnabled(self): self.TestSehExceptions(EX_BINARY_OUTPUT) def testCatchesSehExceptionsWithCxxExceptionsDisabled(self): self.TestSehExceptions(BINARY_OUTPUT) class CatchCxxExceptionsTest(gtest_test_utils.TestCase): """Tests C++ exception-catching behavior. Tests in this test case verify that: * C++ exceptions are caught and logged as C++ (not SEH) exceptions * Exception thrown affect the remainder of the test work flow in the expected manner. """ def testCatchesCxxExceptionsInFixtureConstructor(self): self.assert_('C++ exception with description ' '"Standard C++ exception" thrown ' 'in the test fixture\'s constructor' in EX_BINARY_OUTPUT) self.assert_('unexpected' not in EX_BINARY_OUTPUT, 'This failure belongs in this test only if ' '"CxxExceptionInConstructorTest" (no quotes) ' 'appears on the same line as words "called unexpectedly"') if ('CxxExceptionInDestructorTest.ThrowsExceptionInDestructor' in EX_BINARY_OUTPUT): def testCatchesCxxExceptionsInFixtureDestructor(self): self.assert_('C++ exception with description ' '"Standard C++ exception" thrown ' 'in the test fixture\'s destructor' in EX_BINARY_OUTPUT) self.assert_('CxxExceptionInDestructorTest::TearDownTestCase() ' 'called as expected.' in EX_BINARY_OUTPUT) def testCatchesCxxExceptionsInSetUpTestCase(self): self.assert_('C++ exception with description "Standard C++ exception"' ' thrown in SetUpTestCase()' in EX_BINARY_OUTPUT) self.assert_('CxxExceptionInConstructorTest::TearDownTestCase() ' 'called as expected.' in EX_BINARY_OUTPUT) self.assert_('CxxExceptionInSetUpTestCaseTest constructor ' 'called as expected.' in EX_BINARY_OUTPUT) self.assert_('CxxExceptionInSetUpTestCaseTest destructor ' 'called as expected.' in EX_BINARY_OUTPUT) self.assert_('CxxExceptionInSetUpTestCaseTest::SetUp() ' 'called as expected.' in EX_BINARY_OUTPUT) self.assert_('CxxExceptionInSetUpTestCaseTest::TearDown() ' 'called as expected.' in EX_BINARY_OUTPUT) self.assert_('CxxExceptionInSetUpTestCaseTest test body ' 'called as expected.' in EX_BINARY_OUTPUT) def testCatchesCxxExceptionsInTearDownTestCase(self): self.assert_('C++ exception with description "Standard C++ exception"' ' thrown in TearDownTestCase()' in EX_BINARY_OUTPUT) def testCatchesCxxExceptionsInSetUp(self): self.assert_('C++ exception with description "Standard C++ exception"' ' thrown in SetUp()' in EX_BINARY_OUTPUT) self.assert_('CxxExceptionInSetUpTest::TearDownTestCase() ' 'called as expected.' in EX_BINARY_OUTPUT) self.assert_('CxxExceptionInSetUpTest destructor ' 'called as expected.' in EX_BINARY_OUTPUT) self.assert_('CxxExceptionInSetUpTest::TearDown() ' 'called as expected.' in EX_BINARY_OUTPUT) self.assert_('unexpected' not in EX_BINARY_OUTPUT, 'This failure belongs in this test only if ' '"CxxExceptionInSetUpTest" (no quotes) ' 'appears on the same line as words "called unexpectedly"') def testCatchesCxxExceptionsInTearDown(self): self.assert_('C++ exception with description "Standard C++ exception"' ' thrown in TearDown()' in EX_BINARY_OUTPUT) self.assert_('CxxExceptionInTearDownTest::TearDownTestCase() ' 'called as expected.' in EX_BINARY_OUTPUT) self.assert_('CxxExceptionInTearDownTest destructor ' 'called as expected.' in EX_BINARY_OUTPUT) def testCatchesCxxExceptionsInTestBody(self): self.assert_('C++ exception with description "Standard C++ exception"' ' thrown in the test body' in EX_BINARY_OUTPUT) self.assert_('CxxExceptionInTestBodyTest::TearDownTestCase() ' 'called as expected.' in EX_BINARY_OUTPUT) self.assert_('CxxExceptionInTestBodyTest destructor ' 'called as expected.' in EX_BINARY_OUTPUT) self.assert_('CxxExceptionInTestBodyTest::TearDown() ' 'called as expected.' in EX_BINARY_OUTPUT) def testCatchesNonStdCxxExceptions(self): self.assert_('Unknown C++ exception thrown in the test body' in EX_BINARY_OUTPUT) def testUnhandledCxxExceptionsAbortTheProgram(self): # Filters out SEH exception tests on Windows. Unhandled SEH exceptions # cause tests to show pop-up windows there. FITLER_OUT_SEH_TESTS_FLAG = FILTER_FLAG + '=-*Seh*' # By default, Google Test doesn't catch the exceptions. uncaught_exceptions_ex_binary_output = gtest_test_utils.Subprocess( [EX_EXE_PATH, NO_CATCH_EXCEPTIONS_FLAG, FITLER_OUT_SEH_TESTS_FLAG], env=environ).output self.assert_('Unhandled C++ exception terminating the program' in uncaught_exceptions_ex_binary_output) self.assert_('unexpected' not in uncaught_exceptions_ex_binary_output) if __name__ == '__main__': gtest_test_utils.Main()
lgpl-3.0
dimagol/trex-core
scripts/automation/trex_control_plane/stf/trex_stf_lib/trex_client.py
1
61546
#!/router/bin/python # internal libs import sys import os import socket import errno import time import re import copy import binascii from distutils.util import strtobool from collections import deque, OrderedDict import json import traceback import signal try: from . import outer_packages from .trex_status_e import TRexStatus from .trex_exceptions import * from .trex_exceptions import exception_handler from .general_utils import * except Exception as e: # is __main__ import outer_packages from trex_status_e import TRexStatus from trex_exceptions import * from trex_exceptions import exception_handler from general_utils import * # external libs import jsonrpclib from jsonrpclib import ProtocolError, AppError class CTRexClient(object): """ This class defines the client side of the RESTfull interaction with TRex """ def __init__(self, trex_host, max_history_size = 100, filtered_latency_amount = 0.001, trex_daemon_port = 8090, master_daemon_port = 8091, trex_zmq_port = 4500, verbose = False, debug_image = False, trex_args = '', timeout = 30): """ Instantiate a TRex client object, and connecting it to listening daemon-server :parameters: trex_host : str a string of the TRex ip address or hostname. max_history_size : int a number to set the maximum history size of a single TRex run. Each sampling adds a new item to history. default value : **100** filtered_latency_amount : float Ignore high latency for this ammount of packets. (by default take value of 99.9% measurements) default value : **0.001** trex_daemon_port : int the port number on which the trex-daemon server can be reached default value: **8090** master_daemon_port : int the port number on which the master-daemon server can be reached default value: **8091** trex_zmq_port : int the port number on which trex's zmq module will interact with daemon server default value: **4500** verbose : bool sets a verbose output on supported class method. default value : **False** trex_args : string additional arguments passed to TRex. For example, "-w 3 --no-watchdog" timeout : int timeout in seconds to wait for socket response default value: **30** :raises: socket errors, in case server could not be reached. """ try: self.trex_host = socket.gethostbyname(trex_host) except: # give it another try self.trex_host = socket.gethostbyname(trex_host) self.trex_daemon_port = trex_daemon_port self.master_daemon_port = master_daemon_port self.trex_zmq_port = trex_zmq_port self.seq = None self._last_sample = time.time() self.__default_user = get_current_user() self.verbose = verbose self.result_obj = CTRexResult(max_history_size, filtered_latency_amount) self.history = jsonrpclib.history.History() self.master_daemon_path = "http://{hostname}:{port}/".format( hostname = self.trex_host, port = master_daemon_port ) self.master_daemon = jsonrpclib.Server(self.master_daemon_path, history = self.history, timeout = timeout) self.trex_server_path = "http://{hostname}:{port}/".format( hostname = self.trex_host, port = trex_daemon_port ) self.server = jsonrpclib.Server(self.trex_server_path, history = self.history, timeout = timeout) self.debug_image = debug_image self.trex_args = trex_args self.sample_to_run_finish = self.sample_until_finish # alias for legacy def add (self, x, y): try: return self.server.add(x,y) except AppError as err: self._handle_AppError_exception(err.args[0]) except ProtocolError: raise finally: self.prompt_verbose_data() # internal method which polls for TRex state until it's running or timeout happens def _block_to_success(self, timeout, poll_interval = 1): if not timeout: raise ValueError("'timeout' should be positive integer in case of 'block_to_success'") start_time = time.time() while time.time() < start_time + timeout: status = self.get_running_status() if status['state'] == TRexStatus.Running: return if status['state'] == TRexStatus.Idle: raise Exception('TRex is back to Idle state, verbose output:\n%s' % status['verbose']) time.sleep(poll_interval) raise Exception("Timeout of %ss happened during wait for TRex to become in 'Running' state" % timeout) def start_trex (self, f, d, block_to_success = True, timeout = 40, user = None, trex_development = False, **trex_cmd_options): """ Request to start a TRex run on server in stateful mode. :parameters: f : str a path (on server) for the injected traffic data (.yaml file) d : int the desired duration of the test. must be at least 30 seconds long. block_to_success : bool determine if this method blocks until TRex changes state from 'Starting' to either 'Idle' or 'Running' default value : **True** timeout : int maximum time (in seconds) to wait in blocking state until TRex changes state from 'Starting' to either 'Idle' or 'Running' default value: **40** user : str the identity of the the run issuer. trex_cmd_options : key, val sets desired TRex options using key=val syntax, separated by comma. for keys with no value, state key=True :return: **True** on success :raises: + :exc:`ValueError`, in case 'd' parameter inserted with wrong value. + :exc:`trex_exceptions.TRexError`, in case one of the trex_cmd_options raised an exception at server. + :exc:`trex_exceptions.TRexInUseError`, in case TRex is already taken. + :exc:`trex_exceptions.TRexRequestDenied`, in case TRex is reserved for another user than the one trying start TRex. + ProtocolError, in case of error in JSON-RPC protocol. """ user = user or self.__default_user try: d = int(d) except ValueError: raise ValueError('d parameter must be integer, specifying how long TRex run.') trex_cmd_options.update( {'f' : f, 'd' : d} ) self.result_obj.latency_checked = 'l' in trex_cmd_options if 'k' in trex_cmd_options: timeout += int(trex_cmd_options['k']) # during 'k' seconds TRex stays in 'Starting' state self.result_obj.clear_results() try: issue_time = time.time() retval = self.server.start_trex(trex_cmd_options, user, False, None, False, self.debug_image, self.trex_args) except AppError as err: self._handle_AppError_exception(err.args[0]) except ProtocolError: raise finally: self.prompt_verbose_data() if block_to_success: self._block_to_success(timeout) if retval!=0: self.seq = retval # update seq num only on successful submission return True else: # TRex is has been started by another user raise TRexInUseError('TRex is already being used by another user or process. Try again once TRex is back in IDLE state.') def start_stateless(self, block_to_success = True, timeout = 40, user = None, **trex_cmd_options): """ Request to start a TRex run on server in stateless mode. :parameters: block_to_success : bool determine if this method blocks until TRex changes state from 'Starting' to either 'Idle' or 'Running' default value : **True** timeout : int maximum time (in seconds) to wait in blocking state until TRex changes state from 'Starting' to either 'Idle' or 'Running' default value: **40** user : str the identity of the the run issuer. trex_cmd_options : key, val sets desired TRex options using key=val syntax, separated by comma. for keys with no value, state key=True :return: **True** on success :raises: + :exc:`trex_exceptions.TRexError`, in case one of the trex_cmd_options raised an exception at server. + :exc:`trex_exceptions.TRexInUseError`, in case TRex is already taken. + :exc:`trex_exceptions.TRexRequestDenied`, in case TRex is reserved for another user than the one trying start TRex. + ProtocolError, in case of error in JSON-RPC protocol. """ try: user = user or self.__default_user self.result_obj.latency_checked = False self.result_obj.clear_results() retval = self.server.start_trex(trex_cmd_options, user, False, None, True, self.debug_image, self.trex_args) except AppError as err: self._handle_AppError_exception(err.args[0]) except ProtocolError: raise finally: self.prompt_verbose_data() if block_to_success: self._block_to_success(timeout) if retval!=0: self.seq = retval # update seq num only on successful submission return True else: # TRex is has been started by another user raise TRexInUseError('TRex is already being used by another user or process. Try again once TRex is back in IDLE state.') def stop_trex (self): """ Request to stop a TRex run on server. The request is only valid if the stop initiator is the same client as the TRex run initiator. :parameters: None :return: + **True** on successful termination + **False** if request issued but TRex wasn't running. :raises: + :exc:`trex_exceptions.TRexRequestDenied`, in case TRex ir running but started by another user. + :exc:`trex_exceptions.TRexIncompleteRunError`, in case one of failed TRex run (unexpected termination). + ProtocolError, in case of error in JSON-RPC protocol. """ try: return self.server.stop_trex(self.seq) except AppError as err: self._handle_AppError_exception(err.args[0]) except ProtocolError: raise finally: self.prompt_verbose_data() def force_kill (self, confirm = True): """ Force killing of running TRex process (if exists) on the server. .. tip:: This method is a safety method and **overrides any running or reserved resources**, and as such isn't designed to be used on a regular basis. Always consider using :func:`trex_client.CTRexClient.stop_trex` instead. In the end of this method, TRex will return to IDLE state with no reservation. :parameters: confirm : bool Prompt a user confirmation before continue terminating TRex session :return: + **True** on successful termination + **False** otherwise. :raises: + ProtocolError, in case of error in JSON-RPC protocol. """ if confirm: prompt = "WARNING: This will terminate active TRex session indiscriminately.\nAre you sure? " sys.stdout.write('%s [y/n]\n' % prompt) while True: try: if strtobool(user_input().lower()): break else: return except ValueError: sys.stdout.write('Please respond with \'y\' or \'n\'.\n') try: return self.server.force_trex_kill() except AppError as err: # Silence any kind of application errors- by design return False except ProtocolError: raise finally: self.prompt_verbose_data() def kill_all_trexes(self, timeout = 15): """ Kills running TRex processes (if exists) on the server, not only owned by current daemon. Raises exception upon error killing. :return: + **True** if processes killed/not running + **False** otherwise. """ try: poll_rate = 0.1 # try Ctrl+C, usual kill, -9 for signal_name in [signal.SIGINT, signal.SIGTERM, signal.SIGKILL]: self.server.kill_all_trexes(signal_name) for i in range(int(timeout / poll_rate)): if not self.get_trex_cmds(): return True time.sleep(poll_rate) if self.get_trex_cmds(): return False return True except AppError as err: self._handle_AppError_exception(err.args[0]) finally: self.prompt_verbose_data() def get_trex_cmds(self): """ Gets list of running TRex pids and command lines. Can be used to verify if any TRex is running. :return: List of tuples (pid, command) of running TRexes """ try: return self.server.get_trex_cmds() except AppError as err: self._handle_AppError_exception(err.args[0]) finally: self.prompt_verbose_data() def get_trex_path(self): ''' Returns TRex path on server ''' try: return str(self.master_daemon.get_trex_path()) except AppError as err: self._handle_AppError_exception(err.args[0]) finally: self.prompt_verbose_data() def wait_until_kickoff_finish(self, timeout = 40): """ Block the client application until TRex changes state from 'Starting' to either 'Idle' or 'Running' The request is only valid if the stop initiator is the same client as the TRex run initiator. :parameters: timeout : int maximum time (in seconds) to wait in blocking state until TRex changes state from 'Starting' to either 'Idle' or 'Running' :return: + **True** on successful termination + **False** if request issued but TRex wasn't running. :raises: + :exc:`trex_exceptions.TRexIncompleteRunError`, in case one of failed TRex run (unexpected termination). + ProtocolError, in case of error in JSON-RPC protocol. .. note:: Exceptions are throws only when start_trex did not block in the first place, i.e. `block_to_success` parameter was set to `False` """ try: return self.server.wait_until_kickoff_finish(timeout) except AppError as err: self._handle_AppError_exception(err.args[0]) except ProtocolError: raise finally: self.prompt_verbose_data() def is_running (self, dump_out = False): """ Poll for TRex running status. If TRex is running, a history item will be added into result_obj and processed. .. tip:: This method is especially useful for iterating until TRex run is finished. :parameters: dump_out : dict if passed, the pointer object is cleared and the latest dump stored in it. :return: + **True** if TRex is running. + **False** if TRex is not running. :raises: + :exc:`trex_exceptions.TRexIncompleteRunError`, in case one of failed TRex run (unexpected termination). + :exc:`TypeError`, in case JSON stream decoding error. + ProtocolError, in case of error in JSON-RPC protocol. """ try: res = self.get_running_info() if res == {}: return False if (dump_out != False) and (isinstance(dump_out, dict)): # save received dump to given 'dump_out' pointer dump_out.clear() dump_out.update(res) return True except TRexWarning as err: if err.code == -12: # TRex is either still at 'Starting' state or in Idle state, however NO error occured return False except TRexException: raise except ProtocolError as err: raise #is printed by self.get_running_info() #finally: # self.prompt_verbose_data() def is_idle (self): """ Poll for TRex running status, check if TRex is in Idle state. :parameters: None :return: + **True** if TRex is idle. + **False** if TRex is starting or running. :raises: + :exc:`trex_exceptions.TRexIncompleteRunError`, in case one of failed TRex run (unexpected termination). + :exc:`TypeError`, in case JSON stream decoding error. + ProtocolError, in case of error in JSON-RPC protocol. """ try: if self.get_running_status()['state'] == TRexStatus.Idle: return True return False except TRexException: raise except ProtocolError as err: raise finally: self.prompt_verbose_data() def get_trex_files_path (self): """ Fetches the local path in which files are stored when pushed to TRex server from client. :parameters: None :return: string representation of the desired path .. note:: The returned path represents a path on the TRex server **local machine** :raises: ProtocolError, in case of error in JSON-RPC protocol. """ try: return (self.server.get_files_path() + '/') except AppError as err: self._handle_AppError_exception(err.args[0]) except ProtocolError: raise finally: self.prompt_verbose_data() def get_running_status (self): """ Fetches the current TRex status. If available, a verbose data will accompany the state itself. :parameters: None :return: dictionary with 'state' and 'verbose' keys. :raises: ProtocolError, in case of error in JSON-RPC protocol. """ try: res = self.server.get_running_status() res['state'] = TRexStatus(res['state']) return res except AppError as err: self._handle_AppError_exception(err.args[0]) except ProtocolError: raise finally: self.prompt_verbose_data() def get_running_info (self): """ Performs single poll of TRex running data and process it into the result object (named `result_obj`). .. tip:: This method will throw an exception if TRex isn't running. Always consider using :func:`trex_client.CTRexClient.is_running` which handles a single poll operation in safer manner. :parameters: None :return: dictionary containing the most updated data dump from TRex. :raises: + :exc:`trex_exceptions.TRexIncompleteRunError`, in case one of failed TRex run (unexpected termination). + :exc:`TypeError`, in case JSON stream decoding error. + ProtocolError, in case of error in JSON-RPC protocol. """ if not self.is_query_relevance(): # if requested in timeframe smaller than the original sample rate, return the last known data without interacting with server return self.result_obj.get_latest_dump() else: try: latest_dump = json.loads( self.server.get_running_info() ) # latest dump is not a dict, but json string. decode it. self.result_obj.update_result_data(latest_dump) return latest_dump except TypeError as inst: raise TypeError('JSON-RPC data decoding failed. Check out incoming JSON stream.') except AppError as err: self._handle_AppError_exception(err.args[0]) except ProtocolError: raise finally: self.prompt_verbose_data() def sample_until_condition (self, condition_func, time_between_samples = 1): """ Automatically sets ongoing sampling of TRex data, with sampling rate described by time_between_samples. On each fetched dump, the condition_func is applied on the result objects, and if returns True, the sampling will stop. :parameters: condition_func : function function that operates on result_obj and checks if a condition has been met .. note:: `condition_finc` is applied on `CTRexResult` object. Make sure to design a relevant method. time_between_samples : int determines the time between each sample of the server default value : **1** :return: the first result object (see :class:`CTRexResult` for further details) of the TRex run on which the condition has been met. :raises: + :exc:`UserWarning`, in case the condition_func method condition hasn't been met + :exc:`trex_exceptions.TRexIncompleteRunError`, in case one of failed TRex run (unexpected termination). + :exc:`TypeError`, in case JSON stream decoding error. + ProtocolError, in case of error in JSON-RPC protocol. + :exc:`Exception`, in case the condition_func suffered from any kind of exception """ # make sure TRex is running. raise exceptions here if any self.wait_until_kickoff_finish() try: while self.is_running(): results = self.get_result_obj() if condition_func(results): # if condition satisfied, stop TRex and return result object self.stop_trex() return results time.sleep(time_between_samples) except TRexWarning: # means we're back to Idle state, and didn't meet our condition raise UserWarning("TRex results condition wasn't met during TRex run.") except Exception: # this could come from provided method 'condition_func' raise def sample_until_finish (self, time_between_samples = 1): """ Automatically samples TRex data with sampling rate described by time_between_samples until TRex run finishes. :parameters: time_between_samples : int determines the time between each sample of the server default value : **1** :return: the latest result object (see :class:`CTRexResult` for further details) with sampled data. :raises: + :exc:`UserWarning`, in case the condition_func method condition hasn't been met + :exc:`trex_exceptions.TRexIncompleteRunError`, in case one of failed TRex run (unexpected termination). + :exc:`TypeError`, in case JSON stream decoding error. + ProtocolError, in case of error in JSON-RPC protocol. """ self.wait_until_kickoff_finish() try: while self.is_running(): time.sleep(time_between_samples) except TRexWarning: pass # try to get final server dump try: latest_server_dump = json.loads(self.server.get_latest_dump()) if latest_server_dump != self.result_obj.get_latest_dump(): self.result_obj.update_result_data(latest_server_dump) except ProtocolError: pass results = self.get_result_obj() return results def sample_x_seconds (self, sample_time, time_between_samples = 1): """ Automatically sets ongoing sampling of TRex data for sample_time seconds, with sampling rate described by time_between_samples. Does not stop the TRex afterwards! .. tip:: Useful for changing the device (Router, ASA etc.) configuration after given time. :parameters: sample_time : int sample the TRex this number of seconds time_between_samples : int determines the time between each sample of the server default value : **1** :return: the first result object (see :class:`CTRexResult` for further details) of the TRex run after given sample_time. :raises: + :exc:`UserWarning`, in case the TRex run ended before sample_time duration + :exc:`trex_exceptions.TRexIncompleteRunError`, in case one of failed TRex run (unexpected termination). + :exc:`TypeError`, in case JSON stream decoding error. + ProtocolError, in case of error in JSON-RPC protocol. """ # make sure TRex is running. raise exceptions here if any self.wait_until_kickoff_finish() end_time = time.time() + sample_time while self.is_running(): if time.time() < end_time: time.sleep(time_between_samples) else: return self.get_result_obj() raise UserWarning("TRex has stopped at %s seconds (before expected %s seconds)\nTry increasing test duration or decreasing sample_time" % (elapsed_time, sample_time)) def get_result_obj (self, copy_obj = True): """ Returns the result object of the trex_client's instance. By default, returns a **copy** of the objects (so that changes to the original object are masked). :parameters: copy_obj : bool False means that a reference to the original (possibly changing) object are passed defaul value : **True** :return: the latest result object (see :class:`CTRexResult` for further details) with sampled data. """ if copy_obj: return copy.deepcopy(self.result_obj) else: return self.result_obj def is_reserved (self): """ Checks if TRex is currently reserved to any user or not. :parameters: None :return: + **True** if TRex is reserved. + **False** otherwise. :raises: ProtocolError, in case of error in JSON-RPC protocol. """ try: return self.server.is_reserved() except AppError as err: self._handle_AppError_exception(err.args[0]) except ProtocolError: raise finally: self.prompt_verbose_data() def get_trex_daemon_log (self): """ Get Trex daemon log. :return: String representation of TRex daemon log :raises: + :exc:`trex_exceptions.TRexRequestDenied`, in case file could not be read. + ProtocolError, in case of error in JSON-RPC protocol. """ try: res = binascii.a2b_base64(self.server.get_trex_daemon_log()) if type(res) is bytes: return res.decode() return res except AppError as err: self._handle_AppError_exception(err.args[0]) except ProtocolError: raise finally: self.prompt_verbose_data() def get_trex_log (self): """ Get TRex CLI output log :return: String representation of TRex log :raises: + :exc:`trex_exceptions.TRexRequestDenied`, in case file could not be fetched at server side. + ProtocolError, in case of error in JSON-RPC protocol. """ try: res = binascii.a2b_base64(self.server.get_trex_log()) if type(res) is bytes: return res.decode() return res except AppError as err: self._handle_AppError_exception(err.args[0]) except ProtocolError: raise finally: self.prompt_verbose_data() def get_trex_version (self): """ Get TRex version details. :return: Trex details (Version, User, Date, Uuid, Git SHA) as ordered dictionary :raises: + :exc:`trex_exceptions.TRexRequestDenied`, in case TRex version could not be determined. + ProtocolError, in case of error in JSON-RPC protocol. + KeyError is case one of the keys is missing in response """ try: version_dict = OrderedDict() res = binascii.a2b_base64(self.server.get_trex_version()) if type(res) is bytes: res = res.decode() result_lines = res.split('\n') for line in result_lines: if not line: continue key, value = line.strip().split(':', 1) version_dict[key.strip()] = value.strip() for key in ('Version', 'User', 'Date', 'Uuid', 'Git SHA'): if key not in version_dict: raise KeyError('get_trex_version: got server response without key: {0}'.format(key)) return version_dict except AppError as err: self._handle_AppError_exception(err.args[0]) except ProtocolError: raise finally: self.prompt_verbose_data() def reserve_trex (self, user = None): """ Reserves the usage of TRex to a certain user. When TRex is reserved, it can't be reserved. :parameters: user : str a username of the desired owner of TRex default: current logged user :return: **True** if reservation made successfully :raises: + :exc:`trex_exceptions.TRexRequestDenied`, in case TRex is reserved for another user than the one trying to make the reservation. + :exc:`trex_exceptions.TRexInUseError`, in case TRex is currently running. + ProtocolError, in case of error in JSON-RPC protocol. """ username = user or self.__default_user try: return self.server.reserve_trex(user = username) except AppError as err: self._handle_AppError_exception(err.args[0]) except ProtocolError: raise finally: self.prompt_verbose_data() def cancel_reservation (self, user = None): """ Cancels a current reservation of TRex to a certain user. When TRex is reserved, no other user can start new TRex runs. :parameters: user : str a username of the desired owner of TRex default: current logged user :return: + **True** if reservation canceled successfully, + **False** if there was no reservation at all. :raises: + :exc:`trex_exceptions.TRexRequestDenied`, in case TRex is reserved for another user than the one trying to cancel the reservation. + ProtocolError, in case of error in JSON-RPC protocol. """ username = user or self.__default_user try: return self.server.cancel_reservation(user = username) except AppError as err: self._handle_AppError_exception(err.args[0]) except ProtocolError: raise finally: self.prompt_verbose_data() def get_files_list (self, path): """ Gets a list of dirs and files either from /tmp/trex_files or path relative to TRex server. :parameters: path : str a path to directory to read. :return: Tuple: list of dirs and list of files in given path :raises: + :exc:`trex_exceptions.TRexRequestDenied`, in case TRex is reserved for another user than the one trying to cancel the reservation. + ProtocolError, in case of error in JSON-RPC protocol. """ try: return self.server.get_files_list(path) except AppError as err: self._handle_AppError_exception(err.args[0]) except ProtocolError: raise finally: self.prompt_verbose_data() def get_file(self, filepath): """ Gets content of file as bytes string from /tmp/trex_files or TRex server directory. :parameters: filepath : str a path to a file at server. it can be either relative to TRex server or absolute path starting with /tmp/trex_files :return: Content of the file :raises: + :exc:`trex_exceptions.TRexRequestDenied`, in case TRex is reserved for another user than the one trying to cancel the reservation. + ProtocolError, in case of error in JSON-RPC protocol. """ try: return binascii.a2b_base64(self.server.get_file(filepath)) except AppError as err: self._handle_AppError_exception(err.args[0]) except ProtocolError: raise finally: self.prompt_verbose_data() def get_trex_config(self): """ Get Trex config file (/etc/trex_cfg.yaml). :return: String representation of TRex config file :raises: + :exc:`trex_exceptions.TRexRequestDenied`, in case file could not be read. + ProtocolError, in case of error in JSON-RPC protocol. """ try: res = binascii.a2b_base64(self.server.get_trex_config()) if type(res) is bytes: return res.decode() return res except AppError as err: self._handle_AppError_exception(err.args[0]) except ProtocolError: raise finally: self.prompt_verbose_data() def push_files (self, filepaths): """ Pushes a file (or a list of files) to store locally on server. :parameters: filepaths : str or list a path to a file to be pushed to server. if a list of paths is passed, all of those will be pushed to server :return: + **True** if file(s) copied successfully. + **False** otherwise. :raises: + :exc:`IOError`, in case specified file wasn't found or could not be accessed. + ProtocolError, in case of error in JSON-RPC protocol. """ paths_list = None if isinstance(filepaths, str): paths_list = [filepaths] elif isinstance(filepaths, list): paths_list = filepaths else: raise TypeError("filepaths argument must be of type str or list") for filepath in paths_list: try: if not os.path.exists(filepath): raise IOError(errno.ENOENT, "The requested `{fname}` file wasn't found. Operation aborted.".format( fname = filepath) ) else: filename = os.path.basename(filepath) with open(filepath, 'rb') as f: file_content = f.read() self.server.push_file(filename, binascii.b2a_base64(file_content).decode()) finally: self.prompt_verbose_data() return True def is_query_relevance(self): """ Checks if time between any two consecutive server queries (asking for live running data) passed. .. note:: The allowed minimum time between each two consecutive samples is 0.5 seconds. :parameters: None :return: + **True** if more than 0.5 seconds has been past from last server query. + **False** otherwise. """ cur_time = time.time() if cur_time-self._last_sample < 0.5: return False else: self._last_sample = cur_time return True def call_server_mathod_safely (self, method_to_call): try: return method_to_call() except socket.error as e: if e.errno == errno.ECONNREFUSED: raise SocketError(errno.ECONNREFUSED, "Connection to TRex daemon server was refused. Please make sure the server is up.") def check_server_connectivity (self): """ Checks TRex daemon server for connectivity. """ try: socket.gethostbyname(self.trex_host) return self.server.connectivity_check() except socket.gaierror as e: raise socket.gaierror(e.errno, "Could not resolve server hostname. Please make sure hostname entered correctly.") except socket.error as e: if e.errno == errno.ECONNREFUSED: raise socket.error(errno.ECONNREFUSED, "Connection to TRex daemon server was refused. Please make sure the server is up.") raise finally: self.prompt_verbose_data() def master_add(self, x, y): ''' Sanity check for Master daemon ''' try: return self.master_daemon.add(x,y) except AppError as err: self._handle_AppError_exception(err.args[0]) finally: self.prompt_verbose_data() def check_master_connectivity (self): ''' Check Master daemon for connectivity. Return True upon success ''' try: socket.gethostbyname(self.trex_host) return self.master_daemon.check_connectivity() except socket.gaierror as e: raise socket.gaierror(e.errno, "Could not resolve server hostname. Please make sure hostname entered correctly.") except socket.error as e: if e.errno == errno.ECONNREFUSED: raise socket.error(errno.ECONNREFUSED, "Connection to Master daemon was refused. Please make sure the server is up.") raise finally: self.prompt_verbose_data() def is_trex_daemon_running(self): ''' Check if TRex server daemon is running. Returns True/False ''' try: return self.master_daemon.is_trex_daemon_running() except AppError as err: self._handle_AppError_exception(err.args[0]) finally: self.prompt_verbose_data() def restart_trex_daemon(self, tries = 1): ''' Restart TRex server daemon. Useful after update. Will not fail if daemon is initially stopped. ''' for _ in range(tries): try: return self.master_daemon.restart_trex_daemon() except AppError as err: self._handle_AppError_exception(err.args[0]) except Exception as e: print('Exception during request: %s' % e) finally: self.prompt_verbose_data() time.sleep(1) raise def start_trex_daemon(self): ''' Start TRex server daemon. :return: + **True** if success. + **False** if TRex server daemon already running. ''' try: return self.master_daemon.start_trex_daemon() except AppError as err: self._handle_AppError_exception(err.args[0]) finally: self.prompt_verbose_data() def stop_trex_daemon(self): ''' Stop TRex server daemon. :return: + **True** if success. + **False** if TRex server daemon already running. ''' try: return self.master_daemon.stop_trex_daemon() except AppError as err: self._handle_AppError_exception(err.args[0]) finally: self.prompt_verbose_data() def prompt_verbose_data(self): """ This method prompts any verbose data available, only if `verbose` option has been turned on. """ if self.verbose: print ('\n') print ("(*) JSON-RPC request:", self.history.request) print ("(*) JSON-RPC response:", self.history.response) def __verbose_print(self, print_str): """ This private method prints the `print_str` string only in case self.verbose flag is turned on. :parameters: print_str : str a string to be printed :returns: None """ if self.verbose: print (print_str) def _handle_AppError_exception(self, err): """ This private method triggres the TRex dedicated exception generation in case a general ProtocolError has been raised. """ # handle known exceptions based on known error codes. # if error code is not known, raise ProtocolError exc = exception_handler.gen_exception(err) exc.__cause__ = None # remove "During handling of the above exception, another exception occurred:" in Python3.3+ raise exc class CTRexResult(object): """ A class containing all results received from TRex. Ontop to containing the results, this class offers easier data access and extended results processing options """ def __init__(self, max_history_size, filtered_latency_amount = 0.001): """ Instatiate a TRex result object :parameters: max_history_size : int A number to set the maximum history size of a single TRex run. Each sampling adds a new item to history. filtered_latency_amount : float Ignore high latency for this ammount of packets. (by default take into account 99.9%) """ self._history = deque(maxlen = max_history_size) self.clear_results() self.latency_checked = True self.filtered_latency_amount = filtered_latency_amount self.set_warmup_default() def set_warmup_default (self): self.set_warmup(0.96) def set_warmup (self,new_warmup_max): self.warmup_max = new_warmup_max def __repr__(self): return ("Is valid history? {arg}\n".format( arg = self.is_valid_hist() ) + "Done warmup? {arg}\n".format( arg = self.is_done_warmup() ) + "Expected tx rate: {arg}\n".format( arg = self.get_expected_tx_rate() ) + "Current tx rate: {arg}\n".format( arg = self.get_current_tx_rate() ) + "Minimum latency: {arg}\n".format( arg = self.get_min_latency() ) + "Maximum latency: {arg}\n".format( arg = self.get_max_latency() ) + "Average latency: {arg}\n".format( arg = self.get_avg_latency() ) + "Average window latency: {arg}\n".format( arg = self.get_avg_window_latency() ) + "Total drops: {arg}\n".format( arg = self.get_total_drops() ) + "Drop rate: {arg}\n".format( arg = self.get_drop_rate() ) + "History size so far: {arg}\n".format( arg = len(self._history) ) ) def get_expected_tx_rate (self): """ Fetches the expected TX rate in various units representation :parameters: None :return: dictionary containing the expected TX rate, where the key is the measurement units, and the value is the measurement value. """ return self._expected_tx_rate def get_current_tx_rate (self): """ Fetches the current TX rate in various units representation :parameters: None :return: dictionary containing the current TX rate, where the key is the measurement units, and the value is the measurement value. """ return self._current_tx_rate def get_max_latency (self): """ Fetches the maximum latency measured on each of the interfaces :parameters: None :return: dictionary containing the maximum latency, where the key is the measurement interface (`c` indicates client), and the value is the measurement value. """ return self._max_latency def get_min_latency (self): """ Fetches the minimum latency measured on each of the interfaces :parameters: None :return: dictionary containing the maximum latency, where the key is the measurement interface (`c` indicates client), and the value is the measurement value. """ return self._min_latency def get_is_latency_exists (self): """ return True if latency information exists :parameters: None :return: True or False """ if self._min_latency != None: return True; else: return False; def get_jitter_latency (self): """ Fetches the jitter latency measured on each of the interfaces from the start of TRex run :parameters: None :return: dictionary containing the average latency, where the key is the measurement interface (`c` indicates client), and the value is the measurement value. The `all` key represents the average of all interfaces' average """ return self._jitter_latency def get_avg_latency (self): """ Fetches the average latency measured on each of the interfaces from the start of TRex run :parameters: None :return: dictionary containing the average latency, where the key is the measurement interface (`c` indicates client), and the value is the measurement value. The `all` key represents the average of all interfaces' average """ return self._avg_latency def get_avg_window_latency (self): """ Fetches the average latency measured on each of the interfaces from all the sampled currently stored in window. :parameters: None :return: dictionary containing the average latency, where the key is the measurement interface (`c` indicates client), and the value is the measurement value. The `all` key represents the average of all interfaces' average """ return self._avg_window_latency def get_total_drops (self): """ Fetches the total number of drops identified from the moment TRex run began. :parameters: None :return: total drops count (as int) """ return self._total_drops def get_drop_rate (self): """ Fetches the most recent drop rate in pkts/sec units. :parameters: None :return: current drop rate (as float) """ return self._drop_rate def is_valid_hist (self): """ Checks if result obejct contains valid data. :parameters: None :return: + **True** if history is valid. + **False** otherwise. """ return self.valid def set_valid_hist (self, valid_stat = True): """ Sets result obejct validity status. :parameters: valid_stat : bool defines the validity status dafault value : **True** :return: None """ self.valid = valid_stat def is_done_warmup (self): """ Checks if TRex latest results TX-rate indicates that TRex has reached its expected TX-rate. :parameters: None :return: + **True** if expected TX-rate has been reached. + **False** otherwise. """ return self._done_warmup def get_last_value (self, tree_path_to_key, regex = None): """ A dynamic getter from the latest sampled data item stored in the result object. :parameters: tree_path_to_key : str defines a path to desired data. .. tip:: | Use '.' to enter one level deeper in dictionary hierarchy. | Use '[i]' to access the i'th indexed object of an array. regex : regex apply a regex to filter results out from a multiple results set. Filter applies only on keys of dictionary type. dafault value : **None** :return: + a list of values relevant to the specified path + None if no results were fetched or the history isn't valid. """ if not self.is_valid_hist(): return None else: return CTRexResult.__get_value_by_path(self._history[-1], tree_path_to_key, regex) def get_value_list (self, tree_path_to_key, regex = None, filter_none = True): """ A dynamic getter from all sampled data items stored in the result object. :parameters: tree_path_to_key : str defines a path to desired data. .. tip:: | Use '.' to enter one level deeper in dictionary hierarchy. | Use '[i]' to access the i'th indexed object of an array. regex : regex apply a regex to filter results out from a multiple results set. Filter applies only on keys of dictionary type. dafault value : **None** filter_none : bool specify if None results should be filtered out or not. dafault value : **True** :return: + a list of values relevant to the specified path. Each item on the list refers to a single server sample. + None if no results were fetched or the history isn't valid. """ if not self.is_valid_hist(): return None else: raw_list = list( map(lambda x: CTRexResult.__get_value_by_path(x, tree_path_to_key, regex), self._history) ) if filter_none: return list (filter(lambda x: x!=None, raw_list) ) else: return raw_list def get_latest_dump(self): """ A getter to the latest sampled data item stored in the result object. :parameters: None :return: + a dictionary of the latest data item + an empty dictionary if history is empty. """ if len(self._history): return self._history[-1] return {} def get_ports_count(self): """ Returns number of ports based on TRex result :return: + number of ports in TRex result + -1 if history is empty. """ if not len(self._history): return -1 return len(self.get_last_value('trex-global.data', 'opackets-\d+')) def update_result_data (self, latest_dump): """ Integrates a `latest_dump` dictionary into the CTRexResult object. :parameters: latest_dump : dict a dictionary with the items desired to be integrated into the object history and stats :return: None """ # add latest dump to history if latest_dump: self._history.append(latest_dump) if not self.valid: self.valid = True # parse important fields and calculate averages and others if self._expected_tx_rate is None: # get the expected data only once since it doesn't change self._expected_tx_rate = CTRexResult.__get_value_by_path(latest_dump, "trex-global.data", "m_tx_expected_\w+") self._current_tx_rate = CTRexResult.__get_value_by_path(latest_dump, "trex-global.data", "m_tx_(?!expected_)\w+") if not self._done_warmup and self._expected_tx_rate is not None: # check for up to 4% change between expected and actual if (self._current_tx_rate['m_tx_bps'] > self.warmup_max * self._expected_tx_rate['m_tx_expected_bps']): self._done_warmup = True latest_dump['warmup_barrier'] = True # handle latency data if self.latency_checked and 'trex-latecny-v2' in latest_dump and 'trex-latecny' in latest_dump: # fix typos, by "pointer" if 'trex-latecny-v2' in latest_dump and 'trex-latency-v2' not in latest_dump: latest_dump['trex-latency-v2'] = latest_dump['trex-latecny-v2'] if 'trex-latecny' in latest_dump and 'trex-latency' not in latest_dump: latest_dump['trex-latency'] = latest_dump['trex-latecny'] latency_per_port = self.get_last_value("trex-latency-v2.data", "port-") self._max_latency = self.__get_filtered_max_latency(latency_per_port, self.filtered_latency_amount) self._min_latency = self.__get_filtered_min_latency(latency_per_port) avg_latency = self.get_last_value("trex-latency.data", "avg-") self._avg_latency = CTRexResult.__avg_all_and_rename_keys(avg_latency) jitter_latency = self.get_last_value("trex-latency.data", "jitter-") self._jitter_latency = CTRexResult.__avg_all_and_rename_keys(jitter_latency) avg_win_latency_list = self.get_value_list("trex-latency.data", "avg-") self._avg_window_latency = CTRexResult.__calc_latency_win_stats(avg_win_latency_list) tx_pkts = CTRexResult.__get_value_by_path(latest_dump, "trex-global.data.m_total_tx_pkts") rx_pkts = CTRexResult.__get_value_by_path(latest_dump, "trex-global.data.m_total_rx_pkts") if tx_pkts is not None and rx_pkts is not None: self._total_drops = tx_pkts - rx_pkts self._drop_rate = CTRexResult.__get_value_by_path(latest_dump, "trex-global.data.m_rx_drop_bps") def clear_results (self): """ Clears all results and sets the history's validity to `False` :parameters: None :return: None """ self.valid = False self._done_warmup = False self._expected_tx_rate = None self._current_tx_rate = None self._max_latency = None self._min_latency = None self._avg_latency = None self._jitter_latency = None self._avg_window_latency = None self._total_drops = None self._drop_rate = None self._history.clear() @staticmethod def __get_value_by_path (dct, tree_path, regex = None): try: for i, p in re.findall(r'(\d+)|([\w|-]+)', tree_path): dct = dct[p or int(i)] if regex is not None and isinstance(dct, dict): res = {} for key,val in dct.items(): match = re.match(regex, key) if match: res[key]=val return res else: return dct except (KeyError, TypeError): return None @staticmethod def __calc_latency_win_stats (latency_win_list): res = {'all' : None } port_dict = {'all' : []} list( map(lambda x: CTRexResult.__update_port_dict(x, port_dict), latency_win_list) ) # finally, calculate everages for each list res['all'] = float("%.3f" % (sum(port_dict['all'])/float(len(port_dict['all']))) ) port_dict.pop('all') for port, avg_list in port_dict.items(): res[port] = float("%.3f" % (sum(avg_list)/float(len(avg_list))) ) return res @staticmethod def __update_port_dict (src_avg_dict, dest_port_dict): all_list = src_avg_dict.values() dest_port_dict['all'].extend(all_list) for key, val in src_avg_dict.items(): reg_res = re.match("avg-(\d+)", key) if reg_res: tmp_key = "port"+reg_res.group(1) if tmp_key in dest_port_dict: dest_port_dict[tmp_key].append(val) else: dest_port_dict[tmp_key] = [val] @staticmethod def __avg_all_and_rename_keys (src_dict): res = {} all_list = src_dict.values() res['all'] = float("%.3f" % (sum(all_list)/float(len(all_list))) ) for key, val in src_dict.items(): reg_res = re.match("avg-(\d+)", key) if reg_res: tmp_key = "port"+reg_res.group(1) res[tmp_key] = val # don't touch original fields values return res @staticmethod def __get_filtered_min_latency(src_dict): result = {} if src_dict: for port, data in src_dict.items(): if not port.startswith('port-'): continue res = data['hist']['min_usec'] min_port = 'min-%s' % port[5:] result[min_port] = int(res) return(result); @staticmethod def __get_filtered_max_latency (src_dict, filtered_latency_amount = 0.001): result = {} if src_dict: for port, data in src_dict.items(): if not port.startswith('port-'): continue max_port = 'max-%s' % port[5:] res = data['hist'] if not len(res['histogram']): result[max_port] = 0 continue result[max_port] = 5 # if sum below will not get to filtered amount, use this value sum_high = 0.0 for elem in reversed(res['histogram']): sum_high += elem['val'] if sum_high >= filtered_latency_amount * res['cnt']: result[max_port] = elem['key'] + int('5' + repr(elem['key'])[2:]) break return result # history iterator after warmup period def _get_steady_state_history_iterator(self): if not self.is_done_warmup(): raise Exception('Warm-up period not finished') for index, res in enumerate(self._history): if 'warmup_barrier' in res: for steady_state_index in range(index, max(index, len(self._history) - 1)): yield self._history[steady_state_index] return for index in range(len(self._history) - 1): yield self._history[index] def get_avg_steady_state_value(self, tree_path_to_key): ''' Gets average value after warmup period. For example: <result object>.get_avg_steady_state_value('trex-global.data.m_tx_bps') Usually more accurate than latest history value. :parameters: tree_path_to_key : str defines a path to desired data. :return: average value at steady state :raises: KeyError in case steady state period was not reached or tree_path_to_key was not found in result. ''' values_arr = [self.__get_value_by_path(res, tree_path_to_key) for res in self._get_steady_state_history_iterator()] values_arr = list(filter(lambda x: x is not None, values_arr)) if not values_arr: raise KeyError('All the keys are None, probably wrong tree_path_to_key: %s' % tree_path_to_key) return sum(values_arr) / float(len(values_arr)) if __name__ == "__main__": c = CTRexClient('127.0.0.1') print('restarting daemon') c.restart_trex_daemon() print('kill any running') c.kill_all_trexes() print('start') c.start_stateless() print('sleep') time.sleep(5) print('done')
apache-2.0
nzavagli/UnrealPy
UnrealPyEmbed/Development/Python/2015.08.07-Python2710-x64-Source-vs2015/Python27/Source/django-1.8.2/tests/gis_tests/distapp/tests.py
13
19350
from __future__ import unicode_literals from django.contrib.gis.geos import HAS_GEOS from django.contrib.gis.measure import D # alias for Distance from django.db import connection from django.db.models import Q from django.test import TestCase, skipUnlessDBFeature from ..utils import no_oracle, oracle, postgis, spatialite if HAS_GEOS: from django.contrib.gis.geos import GEOSGeometry, LineString, Point from .models import (AustraliaCity, Interstate, SouthTexasInterstate, SouthTexasCity, SouthTexasCityFt, CensusZipcode, SouthTexasZipcode) @skipUnlessDBFeature("gis_enabled") class DistanceTest(TestCase): fixtures = ['initial'] if HAS_GEOS: # A point we are testing distances with -- using a WGS84 # coordinate that'll be implicitly transformed to that to # the coordinate system of the field, EPSG:32140 (Texas South Central # w/units in meters) stx_pnt = GEOSGeometry('POINT (-95.370401017314293 29.704867409475465)', 4326) # Another one for Australia au_pnt = GEOSGeometry('POINT (150.791 -34.4919)', 4326) def get_names(self, qs): cities = [c.name for c in qs] cities.sort() return cities def test_init(self): """ Test initialization of distance models. """ self.assertEqual(9, SouthTexasCity.objects.count()) self.assertEqual(9, SouthTexasCityFt.objects.count()) self.assertEqual(11, AustraliaCity.objects.count()) self.assertEqual(4, SouthTexasZipcode.objects.count()) self.assertEqual(4, CensusZipcode.objects.count()) self.assertEqual(1, Interstate.objects.count()) self.assertEqual(1, SouthTexasInterstate.objects.count()) @skipUnlessDBFeature("supports_dwithin_lookup") def test_dwithin(self): """ Test the `dwithin` lookup type. """ # Distances -- all should be equal (except for the # degree/meter pair in au_cities, that's somewhat # approximate). tx_dists = [(7000, 22965.83), D(km=7), D(mi=4.349)] au_dists = [(0.5, 32000), D(km=32), D(mi=19.884)] # Expected cities for Australia and Texas. tx_cities = ['Downtown Houston', 'Southside Place'] au_cities = ['Mittagong', 'Shellharbour', 'Thirroul', 'Wollongong'] # Performing distance queries on two projected coordinate systems one # with units in meters and the other in units of U.S. survey feet. for dist in tx_dists: if isinstance(dist, tuple): dist1, dist2 = dist else: dist1 = dist2 = dist qs1 = SouthTexasCity.objects.filter(point__dwithin=(self.stx_pnt, dist1)) qs2 = SouthTexasCityFt.objects.filter(point__dwithin=(self.stx_pnt, dist2)) for qs in qs1, qs2: self.assertEqual(tx_cities, self.get_names(qs)) # Now performing the `dwithin` queries on a geodetic coordinate system. for dist in au_dists: if isinstance(dist, D) and not oracle: type_error = True else: type_error = False if isinstance(dist, tuple): if oracle: dist = dist[1] else: dist = dist[0] # Creating the query set. qs = AustraliaCity.objects.order_by('name') if type_error: # A ValueError should be raised on PostGIS when trying to pass # Distance objects into a DWithin query using a geodetic field. self.assertRaises(ValueError, AustraliaCity.objects.filter(point__dwithin=(self.au_pnt, dist)).count) else: self.assertListEqual(au_cities, self.get_names(qs.filter(point__dwithin=(self.au_pnt, dist)))) @skipUnlessDBFeature("has_distance_method") def test_distance_projected(self): """ Test the `distance` GeoQuerySet method on projected coordinate systems. """ # The point for La Grange, TX lagrange = GEOSGeometry('POINT(-96.876369 29.905320)', 4326) # Reference distances in feet and in meters. Got these values from # using the provided raw SQL statements. # SELECT ST_Distance(point, ST_Transform(ST_GeomFromText('POINT(-96.876369 29.905320)', 4326), 32140)) # FROM distapp_southtexascity; m_distances = [147075.069813, 139630.198056, 140888.552826, 138809.684197, 158309.246259, 212183.594374, 70870.188967, 165337.758878, 139196.085105] # SELECT ST_Distance(point, ST_Transform(ST_GeomFromText('POINT(-96.876369 29.905320)', 4326), 2278)) # FROM distapp_southtexascityft; # Oracle 11 thinks this is not a projected coordinate system, so it's # not tested. ft_distances = [482528.79154625, 458103.408123001, 462231.860397575, 455411.438904354, 519386.252102563, 696139.009211594, 232513.278304279, 542445.630586414, 456679.155883207] # Testing using different variations of parameters and using models # with different projected coordinate systems. dist1 = SouthTexasCity.objects.distance(lagrange, field_name='point').order_by('id') dist2 = SouthTexasCity.objects.distance(lagrange).order_by('id') # Using GEOSGeometry parameter if spatialite or oracle: dist_qs = [dist1, dist2] else: dist3 = SouthTexasCityFt.objects.distance(lagrange.ewkt).order_by('id') # Using EWKT string parameter. dist4 = SouthTexasCityFt.objects.distance(lagrange).order_by('id') dist_qs = [dist1, dist2, dist3, dist4] # Original query done on PostGIS, have to adjust AlmostEqual tolerance # for Oracle. tol = 2 if oracle else 5 # Ensuring expected distances are returned for each distance queryset. for qs in dist_qs: for i, c in enumerate(qs): self.assertAlmostEqual(m_distances[i], c.distance.m, tol) self.assertAlmostEqual(ft_distances[i], c.distance.survey_ft, tol) @skipUnlessDBFeature("has_distance_method", "supports_distance_geodetic") def test_distance_geodetic(self): """ Test the `distance` GeoQuerySet method on geodetic coordinate systems. """ tol = 2 if oracle else 5 # Testing geodetic distance calculation with a non-point geometry # (a LineString of Wollongong and Shellharbour coords). ls = LineString(((150.902, -34.4245), (150.87, -34.5789))) # Reference query: # SELECT ST_distance_sphere(point, ST_GeomFromText('LINESTRING(150.9020 -34.4245,150.8700 -34.5789)', 4326)) # FROM distapp_australiacity ORDER BY name; distances = [1120954.92533513, 140575.720018241, 640396.662906304, 60580.9693849269, 972807.955955075, 568451.8357838, 40435.4335201384, 0, 68272.3896586844, 12375.0643697706, 0] qs = AustraliaCity.objects.distance(ls).order_by('name') for city, distance in zip(qs, distances): # Testing equivalence to within a meter. self.assertAlmostEqual(distance, city.distance.m, 0) # Got the reference distances using the raw SQL statements: # SELECT ST_distance_spheroid(point, ST_GeomFromText('POINT(151.231341 -33.952685)', 4326), # 'SPHEROID["WGS 84",6378137.0,298.257223563]') FROM distapp_australiacity WHERE (NOT (id = 11)); # SELECT ST_distance_sphere(point, ST_GeomFromText('POINT(151.231341 -33.952685)', 4326)) # FROM distapp_australiacity WHERE (NOT (id = 11)); st_distance_sphere if connection.ops.postgis and connection.ops.proj_version_tuple() >= (4, 7, 0): # PROJ.4 versions 4.7+ have updated datums, and thus different # distance values. spheroid_distances = [60504.0628957201, 77023.9489850262, 49154.8867574404, 90847.4358768573, 217402.811919332, 709599.234564757, 640011.483550888, 7772.00667991925, 1047861.78619339, 1165126.55236034] sphere_distances = [60580.9693849267, 77144.0435286473, 49199.4415344719, 90804.7533823494, 217713.384600405, 709134.127242793, 639828.157159169, 7786.82949717788, 1049204.06569028, 1162623.7238134] else: spheroid_distances = [60504.0628825298, 77023.948962654, 49154.8867507115, 90847.435881812, 217402.811862568, 709599.234619957, 640011.483583758, 7772.00667666425, 1047861.7859506, 1165126.55237647] sphere_distances = [60580.7612632291, 77143.7785056615, 49199.2725132184, 90804.4414289463, 217712.63666124, 709131.691061906, 639825.959074112, 7786.80274606706, 1049200.46122281, 1162619.7297006] # Testing with spheroid distances first. hillsdale = AustraliaCity.objects.get(name='Hillsdale') qs = AustraliaCity.objects.exclude(id=hillsdale.id).distance(hillsdale.point, spheroid=True).order_by('id') for i, c in enumerate(qs): self.assertAlmostEqual(spheroid_distances[i], c.distance.m, tol) if postgis: # PostGIS uses sphere-only distances by default, testing these as well. qs = AustraliaCity.objects.exclude(id=hillsdale.id).distance(hillsdale.point).order_by('id') for i, c in enumerate(qs): self.assertAlmostEqual(sphere_distances[i], c.distance.m, tol) @no_oracle # Oracle already handles geographic distance calculation. @skipUnlessDBFeature("has_distance_method") def test_distance_transform(self): """ Test the `distance` GeoQuerySet method used with `transform` on a geographic field. """ # We'll be using a Polygon (created by buffering the centroid # of 77005 to 100m) -- which aren't allowed in geographic distance # queries normally, however our field has been transformed to # a non-geographic system. z = SouthTexasZipcode.objects.get(name='77005') # Reference query: # SELECT ST_Distance(ST_Transform("distapp_censuszipcode"."poly", 32140), # ST_GeomFromText('<buffer_wkt>', 32140)) # FROM "distapp_censuszipcode"; dists_m = [3553.30384972258, 1243.18391525602, 2186.15439472242] # Having our buffer in the SRID of the transformation and of the field # -- should get the same results. The first buffer has no need for # transformation SQL because it is the same SRID as what was given # to `transform()`. The second buffer will need to be transformed, # however. buf1 = z.poly.centroid.buffer(100) buf2 = buf1.transform(4269, clone=True) ref_zips = ['77002', '77025', '77401'] for buf in [buf1, buf2]: qs = CensusZipcode.objects.exclude(name='77005').transform(32140).distance(buf).order_by('name') self.assertListEqual(ref_zips, self.get_names(qs)) for i, z in enumerate(qs): self.assertAlmostEqual(z.distance.m, dists_m[i], 5) @skipUnlessDBFeature("supports_distances_lookups") def test_distance_lookups(self): """ Test the `distance_lt`, `distance_gt`, `distance_lte`, and `distance_gte` lookup types. """ # Retrieving the cities within a 20km 'donut' w/a 7km radius 'hole' # (thus, Houston and Southside place will be excluded as tested in # the `test02_dwithin` above). qs1 = SouthTexasCity.objects.filter(point__distance_gte=(self.stx_pnt, D(km=7))).filter( point__distance_lte=(self.stx_pnt, D(km=20)), ) # Can't determine the units on SpatiaLite from PROJ.4 string, and # Oracle 11 incorrectly thinks it is not projected. if spatialite or oracle: dist_qs = (qs1,) else: qs2 = SouthTexasCityFt.objects.filter(point__distance_gte=(self.stx_pnt, D(km=7))).filter( point__distance_lte=(self.stx_pnt, D(km=20)), ) dist_qs = (qs1, qs2) for qs in dist_qs: cities = self.get_names(qs) self.assertEqual(cities, ['Bellaire', 'Pearland', 'West University Place']) # Doing a distance query using Polygons instead of a Point. z = SouthTexasZipcode.objects.get(name='77005') qs = SouthTexasZipcode.objects.exclude(name='77005').filter(poly__distance_lte=(z.poly, D(m=275))) self.assertEqual(['77025', '77401'], self.get_names(qs)) # If we add a little more distance 77002 should be included. qs = SouthTexasZipcode.objects.exclude(name='77005').filter(poly__distance_lte=(z.poly, D(m=300))) self.assertEqual(['77002', '77025', '77401'], self.get_names(qs)) @skipUnlessDBFeature("supports_distances_lookups", "supports_distance_geodetic") def test_geodetic_distance_lookups(self): """ Test distance lookups on geodetic coordinate systems. """ # Line is from Canberra to Sydney. Query is for all other cities within # a 100km of that line (which should exclude only Hobart & Adelaide). line = GEOSGeometry('LINESTRING(144.9630 -37.8143,151.2607 -33.8870)', 4326) dist_qs = AustraliaCity.objects.filter(point__distance_lte=(line, D(km=100))) self.assertEqual(9, dist_qs.count()) self.assertEqual(['Batemans Bay', 'Canberra', 'Hillsdale', 'Melbourne', 'Mittagong', 'Shellharbour', 'Sydney', 'Thirroul', 'Wollongong'], self.get_names(dist_qs)) # Too many params (4 in this case) should raise a ValueError. queryset = AustraliaCity.objects.filter(point__distance_lte=('POINT(5 23)', D(km=100), 'spheroid', '4')) self.assertRaises(ValueError, len, queryset) # Not enough params should raise a ValueError. self.assertRaises(ValueError, len, AustraliaCity.objects.filter(point__distance_lte=('POINT(5 23)',))) # Getting all cities w/in 550 miles of Hobart. hobart = AustraliaCity.objects.get(name='Hobart') qs = AustraliaCity.objects.exclude(name='Hobart').filter(point__distance_lte=(hobart.point, D(mi=550))) cities = self.get_names(qs) self.assertEqual(cities, ['Batemans Bay', 'Canberra', 'Melbourne']) # Cities that are either really close or really far from Wollongong -- # and using different units of distance. wollongong = AustraliaCity.objects.get(name='Wollongong') d1, d2 = D(yd=19500), D(nm=400) # Yards (~17km) & Nautical miles. # Normal geodetic distance lookup (uses `distance_sphere` on PostGIS. gq1 = Q(point__distance_lte=(wollongong.point, d1)) gq2 = Q(point__distance_gte=(wollongong.point, d2)) qs1 = AustraliaCity.objects.exclude(name='Wollongong').filter(gq1 | gq2) # Geodetic distance lookup but telling GeoDjango to use `distance_spheroid` # instead (we should get the same results b/c accuracy variance won't matter # in this test case). querysets = [qs1] if connection.features.has_distance_spheroid_method: gq3 = Q(point__distance_lte=(wollongong.point, d1, 'spheroid')) gq4 = Q(point__distance_gte=(wollongong.point, d2, 'spheroid')) qs2 = AustraliaCity.objects.exclude(name='Wollongong').filter(gq3 | gq4) querysets.append(qs2) for qs in querysets: cities = self.get_names(qs) self.assertEqual(cities, ['Adelaide', 'Hobart', 'Shellharbour', 'Thirroul']) @skipUnlessDBFeature("has_area_method") def test_area(self): """ Test the `area` GeoQuerySet method. """ # Reference queries: # SELECT ST_Area(poly) FROM distapp_southtexaszipcode; area_sq_m = [5437908.90234375, 10183031.4389648, 11254471.0073242, 9881708.91772461] # Tolerance has to be lower for Oracle tol = 2 for i, z in enumerate(SouthTexasZipcode.objects.order_by('name').area()): self.assertAlmostEqual(area_sq_m[i], z.area.sq_m, tol) @skipUnlessDBFeature("has_length_method") def test_length(self): """ Test the `length` GeoQuerySet method. """ # Reference query (should use `length_spheroid`). # SELECT ST_length_spheroid(ST_GeomFromText('<wkt>', 4326) 'SPHEROID["WGS 84",6378137,298.257223563, # AUTHORITY["EPSG","7030"]]'); len_m1 = 473504.769553813 len_m2 = 4617.668 if connection.features.supports_distance_geodetic: qs = Interstate.objects.length() tol = 2 if oracle else 3 self.assertAlmostEqual(len_m1, qs[0].length.m, tol) else: # Does not support geodetic coordinate systems. self.assertRaises(ValueError, Interstate.objects.length) # Now doing length on a projected coordinate system. i10 = SouthTexasInterstate.objects.length().get(name='I-10') self.assertAlmostEqual(len_m2, i10.length.m, 2) @skipUnlessDBFeature("has_perimeter_method") def test_perimeter(self): """ Test the `perimeter` GeoQuerySet method. """ # Reference query: # SELECT ST_Perimeter(distapp_southtexaszipcode.poly) FROM distapp_southtexaszipcode; perim_m = [18404.3550889361, 15627.2108551001, 20632.5588368978, 17094.5996143697] tol = 2 if oracle else 7 for i, z in enumerate(SouthTexasZipcode.objects.order_by('name').perimeter()): self.assertAlmostEqual(perim_m[i], z.perimeter.m, tol) # Running on points; should return 0. for i, c in enumerate(SouthTexasCity.objects.perimeter(model_att='perim')): self.assertEqual(0, c.perim.m) @skipUnlessDBFeature("has_area_method", "has_distance_method") def test_measurement_null_fields(self): """ Test the measurement GeoQuerySet methods on fields with NULL values. """ # Creating SouthTexasZipcode w/NULL value. SouthTexasZipcode.objects.create(name='78212') # Performing distance/area queries against the NULL PolygonField, # and ensuring the result of the operations is None. htown = SouthTexasCity.objects.get(name='Downtown Houston') z = SouthTexasZipcode.objects.distance(htown.point).area().get(name='78212') self.assertIsNone(z.distance) self.assertIsNone(z.area) @skipUnlessDBFeature("has_distance_method") def test_distance_order_by(self): qs = SouthTexasCity.objects.distance(Point(3, 3)).order_by( 'distance' ).values_list('name', flat=True).filter(name__in=('San Antonio', 'Pearland')) self.assertQuerysetEqual(qs, ['San Antonio', 'Pearland'], lambda x: x)
mit
ehashman/oh-mainline
vendor/packages/django-extensions/django_extensions/management/commands/syncdata.py
44
9559
""" SyncData ======== Django command similar to 'loaddata' but also deletes. After 'syncdata' has run, the database will have the same data as the fixture - anything missing will of been added, anything different will of been updated, and anything extra will of been deleted. """ from django.core.management.base import BaseCommand from django.core.management.color import no_style from optparse import make_option import sys import os class Command(BaseCommand): """ syncdata command """ help = 'Makes the current database have the same data as the fixture(s), no more, no less.' args = "fixture [fixture ...]" def remove_objects_not_in(self, objects_to_keep, verbosity): """ Deletes all the objects in the database that are not in objects_to_keep. - objects_to_keep: A map where the keys are classes, and the values are a set of the objects of that class we should keep. """ for class_ in objects_to_keep.keys(): current = class_.objects.all() current_ids = set([x.id for x in current]) keep_ids = set([x.id for x in objects_to_keep[class_]]) remove_these_ones = current_ids.difference(keep_ids) if remove_these_ones: for obj in current: if obj.id in remove_these_ones: obj.delete() if verbosity >= 2: print "Deleted object: %s" % unicode(obj) if verbosity > 0 and remove_these_ones: num_deleted = len(remove_these_ones) if num_deleted > 1: type_deleted = unicode(class_._meta.verbose_name_plural) else: type_deleted = unicode(class_._meta.verbose_name) print "Deleted %s %s" % (str(num_deleted), type_deleted) def handle(self, *fixture_labels, **options): """ Main method of a Django command """ from django.db.models import get_apps from django.core import serializers from django.db import connection, transaction from django.conf import settings self.style = no_style() verbosity = int(options.get('verbosity', 1)) show_traceback = options.get('traceback', False) # Keep a count of the installed objects and fixtures fixture_count = 0 object_count = 0 objects_per_fixture = [] models = set() humanize = lambda dirname: dirname and "'%s'" % dirname or 'absolute path' # Get a cursor (even though we don't need one yet). This has # the side effect of initializing the test database (if # it isn't already initialized). cursor = connection.cursor() # Start transaction management. All fixtures are installed in a # single transaction to ensure that all references are resolved. transaction.commit_unless_managed() transaction.enter_transaction_management() transaction.managed(True) app_fixtures = [os.path.join(os.path.dirname(app.__file__), 'fixtures') \ for app in get_apps()] for fixture_label in fixture_labels: parts = fixture_label.split('.') if len(parts) == 1: fixture_name = fixture_label formats = serializers.get_public_serializer_formats() else: fixture_name, format = '.'.join(parts[:-1]), parts[-1] if format in serializers.get_public_serializer_formats(): formats = [format] else: formats = [] if formats: if verbosity > 1: print "Loading '%s' fixtures..." % fixture_name else: sys.stderr.write( self.style.ERROR("Problem installing fixture '%s': %s is not a known " + \ "serialization format." % (fixture_name, format)) ) transaction.rollback() transaction.leave_transaction_management() return if os.path.isabs(fixture_name): fixture_dirs = [fixture_name] else: fixture_dirs = app_fixtures + list(settings.FIXTURE_DIRS) + [''] for fixture_dir in fixture_dirs: if verbosity > 1: print "Checking %s for fixtures..." % humanize(fixture_dir) label_found = False for format in formats: serializer = serializers.get_serializer(format) if verbosity > 1: print "Trying %s for %s fixture '%s'..." % \ (humanize(fixture_dir), format, fixture_name) try: full_path = os.path.join(fixture_dir, '.'.join([fixture_name, format])) fixture = open(full_path, 'r') if label_found: fixture.close() print self.style.ERROR("Multiple fixtures named '%s' in %s. Aborting." % (fixture_name, humanize(fixture_dir))) transaction.rollback() transaction.leave_transaction_management() return else: fixture_count += 1 objects_per_fixture.append(0) if verbosity > 0: print "Installing %s fixture '%s' from %s." % \ (format, fixture_name, humanize(fixture_dir)) try: objects_to_keep = {} objects = serializers.deserialize(format, fixture) for obj in objects: object_count += 1 objects_per_fixture[-1] += 1 class_ = obj.object.__class__ if not class_ in objects_to_keep: objects_to_keep[class_] = set() objects_to_keep[class_].add(obj.object) models.add(class_) obj.save() self.remove_objects_not_in(objects_to_keep, verbosity) label_found = True except (SystemExit, KeyboardInterrupt): raise except Exception: import traceback fixture.close() transaction.rollback() transaction.leave_transaction_management() if show_traceback: traceback.print_exc() else: sys.stderr.write( self.style.ERROR("Problem installing fixture '%s': %s\n" % (full_path, traceback.format_exc()))) return fixture.close() except: if verbosity > 1: print "No %s fixture '%s' in %s." % \ (format, fixture_name, humanize(fixture_dir)) # If any of the fixtures we loaded contain 0 objects, assume that an # error was encountered during fixture loading. if 0 in objects_per_fixture: sys.stderr.write( self.style.ERROR("No fixture data found for '%s'. (File format may be invalid.)" % (fixture_name))) transaction.rollback() transaction.leave_transaction_management() return # If we found even one object in a fixture, we need to reset the # database sequences. if object_count > 0: sequence_sql = connection.ops.sequence_reset_sql(self.style, models) if sequence_sql: if verbosity > 1: print "Resetting sequences" for line in sequence_sql: cursor.execute(line) transaction.commit() transaction.leave_transaction_management() if object_count == 0: if verbosity > 1: print "No fixtures found." else: if verbosity > 0: print "Installed %d object(s) from %d fixture(s)" % (object_count, fixture_count) # Close the DB connection. This is required as a workaround for an # edge case in MySQL: if the same connection is used to # create tables, load data, and query, the query can return # incorrect results. See Django #7572, MySQL #37735. connection.close() # Backwards compatibility for Django r9110 if not [opt for opt in Command.option_list if opt.dest == 'verbosity']: Command.option_list += ( make_option('--verbosity', '-v', action="store", dest="verbosity", default='1', type='choice', choices=['0', '1', '2'], help="Verbosity level; 0=minimal output, 1=normal output, 2=all output"), )
agpl-3.0
nrwahl2/ansible
lib/ansible/modules/network/ios/ios_static_route.py
4
6963
#!/usr/bin/python # -*- coding: utf-8 -*- # (c) 2017, Ansible by Red Hat, inc # # This file is part of Ansible by Red Hat # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see <http://www.gnu.org/licenses/>. # ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ['preview'], 'supported_by': 'network'} DOCUMENTATION = """ --- module: ios_static_route version_added: "2.4" author: "Ricardo Carrillo Cruz (@rcarrillocruz)" short_description: Manage static IP routes on Cisco IOS network devices description: - This module provides declarative management of static IP routes on Cisco IOS network devices. options: prefix: description: - Network prefix of the static route. mask: description: - Network prefix mask of the static route. next_hop: description: - Next hop IP of the static route. admin_distance: description: - Admin distance of the static route. default: 1 aggregate: description: List of static route definitions state: description: - State of the static route configuration. default: present choices: ['present', 'absent'] """ EXAMPLES = """ - name: configure static route ios_static_route: prefix: 192.168.2.0 mask: 255.255.255.0 next_hop: 10.0.0.1 - name: remove configuration ios_static_route: prefix: 192.168.2.0 mask: 255.255.255.0 next_hop: 10.0.0.1 state: absent - name: Add static route aggregates ios_static_route: aggregate: - { prefix: 172.16.32.0, mask: 255.255.255.0, next_hop: 10.0.0.8 } - { prefix: 172.16.33.0, mask: 255.255.255.0, next_hop: 10.0.0.8 } - name: Add static route aggregates ios_static_route: aggregate: - { prefix: 172.16.32.0, mask: 255.255.255.0, next_hop: 10.0.0.8 } - { prefix: 172.16.33.0, mask: 255.255.255.0, next_hop: 10.0.0.8 } state: absent """ RETURN = """ commands: description: The list of configuration mode commands to send to the device returned: always type: list sample: - ip route 192.168.2.0 255.255.255.0 10.0.0.1 """ from copy import deepcopy from ansible.module_utils._text import to_text from ansible.module_utils.basic import AnsibleModule from ansible.module_utils.connection import exec_command from ansible.module_utils.network_common import remove_default_spec from ansible.module_utils.ios import load_config, run_commands from ansible.module_utils.ios import ios_argument_spec, check_args from ipaddress import ip_network import re def map_obj_to_commands(updates, module): commands = list() want, have = updates for w in want: prefix = w['prefix'] mask = w['mask'] next_hop = w['next_hop'] admin_distance = w['admin_distance'] state = w['state'] del w['state'] if state == 'absent' and w in have: commands.append('no ip route %s %s %s' % (prefix, mask, next_hop)) elif state == 'present' and w not in have: commands.append('ip route %s %s %s %s' % (prefix, mask, next_hop, admin_distance)) return commands def map_config_to_obj(module): obj = [] rc, out, err = exec_command(module, 'show ip static route') match = re.search(r'.*Static local RIB for default\s*(.*)$', out, re.DOTALL) if match and match.group(1): for r in match.group(1).splitlines(): splitted_line = r.split() cidr = ip_network(to_text(splitted_line[1])) prefix = str(cidr.network_address) mask = str(cidr.netmask) next_hop = splitted_line[4] admin_distance = splitted_line[2][1] obj.append({'prefix': prefix, 'mask': mask, 'next_hop': next_hop, 'admin_distance': admin_distance}) return obj def map_params_to_obj(module, required_together=None): obj = [] aggregate = module.params.get('aggregate') if aggregate: for item in aggregate: for key in item: if item.get(key) is None: item[key] = module.params[key] module._check_required_together(required_together, item) d = item.copy() d['admin_distance'] = str(module.params['admin_distance']) obj.append(d) else: obj.append({ 'prefix': module.params['prefix'].strip(), 'mask': module.params['mask'].strip(), 'next_hop': module.params['next_hop'].strip(), 'admin_distance': str(module.params['admin_distance']), 'state': module.params['state'] }) return obj def main(): """ main entry point for module execution """ element_spec = dict( prefix=dict(type='str'), mask=dict(type='str'), next_hop=dict(type='str'), admin_distance=dict(default=1, type='int'), state=dict(default='present', choices=['present', 'absent']) ) aggregate_spec = deepcopy(element_spec) aggregate_spec['prefix'] = dict(required=True) # remove default in aggregate spec, to handle common arguments remove_default_spec(aggregate_spec) argument_spec = dict( aggregate=dict(type='list', elements='dict', options=aggregate_spec), ) argument_spec.update(element_spec) argument_spec.update(ios_argument_spec) required_one_of = [['aggregate', 'prefix']] required_together = [['prefix', 'mask', 'next_hop']] mutually_exclusive = [['aggregate', 'prefix']] module = AnsibleModule(argument_spec=argument_spec, required_one_of=required_one_of, required_together=required_together, mutually_exclusive=mutually_exclusive, supports_check_mode=True) warnings = list() check_args(module, warnings) result = {'changed': False} if warnings: result['warnings'] = warnings want = map_params_to_obj(module, required_together=required_together) have = map_config_to_obj(module) commands = map_obj_to_commands((want, have), module) result['commands'] = commands if commands: if not module.check_mode: load_config(module, commands) result['changed'] = True module.exit_json(**result) if __name__ == '__main__': main()
gpl-3.0
punalpatel/st2
st2client/st2client/config.py
6
1186
# Licensed to the StackStorm, Inc ('StackStorm') under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. __all__ = [ 'get_config', 'set_config' ] # Stores parsed config dictionary CONFIG = {} def get_config(): """ Retrieve parsed config object. :rtype: ``dict`` """ global CONFIG return CONFIG def set_config(config): """ Store parsing config object. :type config: ``dict`` :rtype: ``dict`` """ global CONFIG CONFIG = config return config
apache-2.0
jmartinm/invenio
modules/bibfield/lib/functions/is_type_str.py
24
1206
# -*- coding: utf-8 -*- ## ## This file is part of Invenio. ## Copyright (C) 2004, 2005, 2006, 2007, 2008, 2010, 2011 CERN. ## ## Invenio is free software; you can redistribute it and/or ## modify it under the terms of the GNU General Public License as ## published by the Free Software Foundation; either version 2 of the ## License, or (at your option) any later version. ## ## Invenio is distributed in the hope that it will be useful, but ## WITHOUT ANY WARRANTY; without even the implied warranty of ## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU ## General Public License for more details. ## ## You should have received a copy of the GNU General Public License ## along with Invenio; if not, write to the Free Software Foundation, Inc., ## 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA. def is_type_str(field): if isinstance(field, list): for value in field: if not is_type_str(value): return False elif isinstance(field, dict): for value in field.itervalues(): if not is_type_str(value): return False elif field and not isinstance(field, str): return False return True
gpl-2.0
mdrumond/tensorflow
tensorflow/contrib/lookup/__init__.py
78
1628
# Copyright 2016 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Ops for lookup operations. @@string_to_index @@string_to_index_table_from_file @@string_to_index_table_from_tensor @@index_table_from_file @@index_table_from_tensor @@index_to_string @@index_to_string_table_from_file @@index_to_string_table_from_tensor @@LookupInterface @@InitializableLookupTableBase @@IdTableWithHashBuckets @@HashTable @@MutableHashTable @@MutableDenseHashTable @@TableInitializerBase @@KeyValueTensorInitializer @@TextFileIndex @@TextFileInitializer @@TextFileIdTableInitializer @@TextFileStringTableInitializer @@HasherSpec @@StrongHashSpec @@FastHashSpec """ from __future__ import absolute_import from __future__ import division from __future__ import print_function # pylint: disable=unused-import,wildcard-import from tensorflow.contrib.lookup.lookup_ops import * # pylint: enable=unused-import,wildcard-import from tensorflow.python.util.all_util import remove_undocumented remove_undocumented(__name__)
apache-2.0
noironetworks/nova
nova/keymgr/barbican.py
59
14399
# Copyright (c) 2015 The Johns Hopkins University/Applied Physics Laboratory # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Key manager implementation for Barbican """ import array import base64 import binascii from barbicanclient import client as barbican_client from keystoneclient import session from oslo_config import cfg from oslo_log import log as logging from oslo_utils import excutils from nova import exception from nova.i18n import _ from nova.i18n import _LE from nova.keymgr import key as keymgr_key from nova.keymgr import key_mgr barbican_opts = [ cfg.StrOpt('catalog_info', default='key-manager:barbican:public', help='Info to match when looking for barbican in the service ' 'catalog. Format is: separated values of the form: ' '<service_type>:<service_name>:<endpoint_type>'), cfg.StrOpt('endpoint_template', help='Override service catalog lookup with template for ' 'barbican endpoint e.g. ' 'http://localhost:9311/v1/%(project_id)s'), cfg.StrOpt('os_region_name', help='Region name of this node'), ] CONF = cfg.CONF BARBICAN_OPT_GROUP = 'barbican' CONF.register_opts(barbican_opts, group=BARBICAN_OPT_GROUP) session.Session.register_conf_options(CONF, BARBICAN_OPT_GROUP) LOG = logging.getLogger(__name__) class BarbicanKeyManager(key_mgr.KeyManager): """Key Manager Interface that wraps the Barbican client API.""" def __init__(self): self._barbican_client = None self._base_url = None def _get_barbican_client(self, ctxt): """Creates a client to connect to the Barbican service. :param ctxt: the user context for authentication :return: a Barbican Client object :raises Forbidden: if the ctxt is None """ if not self._barbican_client: # Confirm context is provided, if not raise forbidden if not ctxt: msg = _("User is not authorized to use key manager.") LOG.error(msg) raise exception.Forbidden(msg) try: _SESSION = session.Session.load_from_conf_options( CONF, BARBICAN_OPT_GROUP) auth = ctxt.get_auth_plugin() service_type, service_name, interface = (CONF. barbican. catalog_info. split(':')) region_name = CONF.barbican.os_region_name service_parameters = {'service_type': service_type, 'service_name': service_name, 'interface': interface, 'region_name': region_name} if CONF.barbican.endpoint_template: self._base_url = (CONF.barbican.endpoint_template % ctxt.to_dict()) else: self._base_url = _SESSION.get_endpoint( auth, **service_parameters) # the barbican endpoint can't have the '/v1' on the end self._barbican_endpoint = self._base_url.rpartition('/')[0] sess = session.Session(auth=auth) self._barbican_client = barbican_client.Client( session=sess, endpoint=self._barbican_endpoint) except Exception as e: with excutils.save_and_reraise_exception(): LOG.error(_LE("Error creating Barbican client: %s"), e) return self._barbican_client def create_key(self, ctxt, expiration=None, name='Nova Compute Key', payload_content_type='application/octet-stream', mode='CBC', algorithm='AES', length=256): """Creates a key. :param ctxt: contains information of the user and the environment for the request (nova/context.py) :param expiration: the date the key will expire :param name: a friendly name for the secret :param payload_content_type: the format/type of the secret data :param mode: the algorithm mode (e.g. CBC or CTR mode) :param algorithm: the algorithm associated with the secret :param length: the bit length of the secret :return: the UUID of the new key :raises Exception: if key creation fails """ barbican_client = self._get_barbican_client(ctxt) try: key_order = barbican_client.orders.create_key( name, algorithm, length, mode, payload_content_type, expiration) order_ref = key_order.submit() order = barbican_client.orders.get(order_ref) return self._retrieve_secret_uuid(order.secret_ref) except Exception as e: with excutils.save_and_reraise_exception(): LOG.error(_LE("Error creating key: %s"), e) def store_key(self, ctxt, key, expiration=None, name='Nova Compute Key', payload_content_type='application/octet-stream', payload_content_encoding='base64', algorithm='AES', bit_length=256, mode='CBC', from_copy=False): """Stores (i.e., registers) a key with the key manager. :param ctxt: contains information of the user and the environment for the request (nova/context.py) :param key: the unencrypted secret data. Known as "payload" to the barbicanclient api :param expiration: the expiration time of the secret in ISO 8601 format :param name: a friendly name for the key :param payload_content_type: the format/type of the secret data :param payload_content_encoding: the encoding of the secret data :param algorithm: the algorithm associated with this secret key :param bit_length: the bit length of this secret key :param mode: the algorithm mode used with this secret key :param from_copy: establishes whether the function is being used to copy a key. In case of the latter, it does not try to decode the key :returns: the UUID of the stored key :raises Exception: if key storage fails """ barbican_client = self._get_barbican_client(ctxt) try: if key.get_algorithm(): algorithm = key.get_algorithm() if payload_content_type == 'text/plain': payload_content_encoding = None encoded_key = key.get_encoded() elif (payload_content_type == 'application/octet-stream' and not from_copy): key_list = key.get_encoded() string_key = ''.join(map(lambda byte: "%02x" % byte, key_list)) encoded_key = base64.b64encode(binascii.unhexlify(string_key)) else: encoded_key = key.get_encoded() secret = barbican_client.secrets.create(name, encoded_key, payload_content_type, payload_content_encoding, algorithm, bit_length, mode, expiration) secret_ref = secret.store() return self._retrieve_secret_uuid(secret_ref) except Exception as e: with excutils.save_and_reraise_exception(): LOG.error(_LE("Error storing key: %s"), e) def copy_key(self, ctxt, key_id): """Copies (i.e., clones) a key stored by barbican. :param ctxt: contains information of the user and the environment for the request (nova/context.py) :param key_id: the UUID of the key to copy :return: the UUID of the key copy :raises Exception: if key copying fails """ try: secret = self._get_secret(ctxt, key_id) con_type = secret.content_types['default'] secret_data = self._get_secret_data(secret, payload_content_type=con_type) key = keymgr_key.SymmetricKey(secret.algorithm, secret_data) copy_uuid = self.store_key(ctxt, key, secret.expiration, secret.name, con_type, 'base64', secret.algorithm, secret.bit_length, secret.mode, True) return copy_uuid except Exception as e: with excutils.save_and_reraise_exception(): LOG.error(_LE("Error copying key: %s"), e) def _create_secret_ref(self, key_id): """Creates the URL required for accessing a secret. :param key_id: the UUID of the key to copy :return: the URL of the requested secret """ if not key_id: msg = "Key ID is None" raise exception.KeyManagerError(msg) return self._base_url + "/secrets/" + key_id def _retrieve_secret_uuid(self, secret_ref): """Retrieves the UUID of the secret from the secret_ref. :param secret_ref: the href of the secret :return: the UUID of the secret """ # The secret_ref is assumed to be of a form similar to # http://host:9311/v1/secrets/d152fa13-2b41-42ca-a934-6c21566c0f40 # with the UUID at the end. This command retrieves everything # after the last '/', which is the UUID. return secret_ref.rpartition('/')[2] def _get_secret_data(self, secret, payload_content_type='application/octet-stream'): """Retrieves the secret data given a secret and content_type. :param ctxt: contains information of the user and the environment for the request (nova/context.py) :param secret: the secret from barbican with the payload of data :param payload_content_type: the format/type of the secret data :returns: the secret data :raises Exception: if data cannot be retrieved """ try: generated_data = secret.payload if payload_content_type == 'application/octet-stream': secret_data = base64.b64encode(generated_data) else: secret_data = generated_data return secret_data except Exception as e: with excutils.save_and_reraise_exception(): LOG.error(_LE("Error getting secret data: %s"), e) def _get_secret(self, ctxt, key_id): """Returns the metadata of the secret. :param ctxt: contains information of the user and the environment for the request (nova/context.py) :param key_id: UUID of the secret :return: the secret's metadata :raises Exception: if there is an error retrieving the data """ barbican_client = self._get_barbican_client(ctxt) try: secret_ref = self._create_secret_ref(key_id) return barbican_client.secrets.get(secret_ref) except Exception as e: with excutils.save_and_reraise_exception(): LOG.error(_LE("Error getting secret metadata: %s"), e) def get_key(self, ctxt, key_id, payload_content_type='application/octet-stream'): """Retrieves the specified key. :param ctxt: contains information of the user and the environment for the request (nova/context.py) :param key_id: the UUID of the key to retrieve :param payload_content_type: The format/type of the secret data :return: SymmetricKey representation of the key :raises Exception: if key retrieval fails """ try: secret = self._get_secret(ctxt, key_id) secret_data = self._get_secret_data(secret, payload_content_type) if payload_content_type == 'application/octet-stream': # convert decoded string to list of unsigned ints for each byte key_data = array.array('B', base64.b64decode(secret_data)).tolist() else: key_data = secret_data key = keymgr_key.SymmetricKey(secret.algorithm, key_data) return key except Exception as e: with excutils.save_and_reraise_exception(): LOG.error(_LE("Error getting key: %s"), e) def delete_key(self, ctxt, key_id): """Deletes the specified key. :param ctxt: contains information of the user and the environment for the request (nova/context.py) :param key_id: the UUID of the key to delete :raises Exception: if key deletion fails """ barbican_client = self._get_barbican_client(ctxt) try: secret_ref = self._create_secret_ref(key_id) barbican_client.secrets.delete(secret_ref) except Exception as e: with excutils.save_and_reraise_exception(): LOG.error(_LE("Error deleting key: %s"), e)
apache-2.0
shawnsschen/shadowsocks
shadowsocks/manager.py
925
9692
#!/usr/bin/python # -*- coding: utf-8 -*- # # Copyright 2015 clowwindy # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from __future__ import absolute_import, division, print_function, \ with_statement import errno import traceback import socket import logging import json import collections from shadowsocks import common, eventloop, tcprelay, udprelay, asyncdns, shell BUF_SIZE = 1506 STAT_SEND_LIMIT = 100 class Manager(object): def __init__(self, config): self._config = config self._relays = {} # (tcprelay, udprelay) self._loop = eventloop.EventLoop() self._dns_resolver = asyncdns.DNSResolver() self._dns_resolver.add_to_loop(self._loop) self._statistics = collections.defaultdict(int) self._control_client_addr = None try: manager_address = config['manager_address'] if ':' in manager_address: addr = manager_address.rsplit(':', 1) addr = addr[0], int(addr[1]) addrs = socket.getaddrinfo(addr[0], addr[1]) if addrs: family = addrs[0][0] else: logging.error('invalid address: %s', manager_address) exit(1) else: addr = manager_address family = socket.AF_UNIX self._control_socket = socket.socket(family, socket.SOCK_DGRAM) self._control_socket.bind(addr) self._control_socket.setblocking(False) except (OSError, IOError) as e: logging.error(e) logging.error('can not bind to manager address') exit(1) self._loop.add(self._control_socket, eventloop.POLL_IN, self) self._loop.add_periodic(self.handle_periodic) port_password = config['port_password'] del config['port_password'] for port, password in port_password.items(): a_config = config.copy() a_config['server_port'] = int(port) a_config['password'] = password self.add_port(a_config) def add_port(self, config): port = int(config['server_port']) servers = self._relays.get(port, None) if servers: logging.error("server already exists at %s:%d" % (config['server'], port)) return logging.info("adding server at %s:%d" % (config['server'], port)) t = tcprelay.TCPRelay(config, self._dns_resolver, False, self.stat_callback) u = udprelay.UDPRelay(config, self._dns_resolver, False, self.stat_callback) t.add_to_loop(self._loop) u.add_to_loop(self._loop) self._relays[port] = (t, u) def remove_port(self, config): port = int(config['server_port']) servers = self._relays.get(port, None) if servers: logging.info("removing server at %s:%d" % (config['server'], port)) t, u = servers t.close(next_tick=False) u.close(next_tick=False) del self._relays[port] else: logging.error("server not exist at %s:%d" % (config['server'], port)) def handle_event(self, sock, fd, event): if sock == self._control_socket and event == eventloop.POLL_IN: data, self._control_client_addr = sock.recvfrom(BUF_SIZE) parsed = self._parse_command(data) if parsed: command, config = parsed a_config = self._config.copy() if config: # let the command override the configuration file a_config.update(config) if 'server_port' not in a_config: logging.error('can not find server_port in config') else: if command == 'add': self.add_port(a_config) self._send_control_data(b'ok') elif command == 'remove': self.remove_port(a_config) self._send_control_data(b'ok') elif command == 'ping': self._send_control_data(b'pong') else: logging.error('unknown command %s', command) def _parse_command(self, data): # commands: # add: {"server_port": 8000, "password": "foobar"} # remove: {"server_port": 8000"} data = common.to_str(data) parts = data.split(':', 1) if len(parts) < 2: return data, None command, config_json = parts try: config = shell.parse_json_in_str(config_json) return command, config except Exception as e: logging.error(e) return None def stat_callback(self, port, data_len): self._statistics[port] += data_len def handle_periodic(self): r = {} i = 0 def send_data(data_dict): if data_dict: # use compact JSON format (without space) data = common.to_bytes(json.dumps(data_dict, separators=(',', ':'))) self._send_control_data(b'stat: ' + data) for k, v in self._statistics.items(): r[k] = v i += 1 # split the data into segments that fit in UDP packets if i >= STAT_SEND_LIMIT: send_data(r) r.clear() send_data(r) self._statistics.clear() def _send_control_data(self, data): if self._control_client_addr: try: self._control_socket.sendto(data, self._control_client_addr) except (socket.error, OSError, IOError) as e: error_no = eventloop.errno_from_exception(e) if error_no in (errno.EAGAIN, errno.EINPROGRESS, errno.EWOULDBLOCK): return else: shell.print_exception(e) if self._config['verbose']: traceback.print_exc() def run(self): self._loop.run() def run(config): Manager(config).run() def test(): import time import threading import struct from shadowsocks import encrypt logging.basicConfig(level=5, format='%(asctime)s %(levelname)-8s %(message)s', datefmt='%Y-%m-%d %H:%M:%S') enc = [] eventloop.TIMEOUT_PRECISION = 1 def run_server(): config = { 'server': '127.0.0.1', 'local_port': 1081, 'port_password': { '8381': 'foobar1', '8382': 'foobar2' }, 'method': 'aes-256-cfb', 'manager_address': '127.0.0.1:6001', 'timeout': 60, 'fast_open': False, 'verbose': 2 } manager = Manager(config) enc.append(manager) manager.run() t = threading.Thread(target=run_server) t.start() time.sleep(1) manager = enc[0] cli = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) cli.connect(('127.0.0.1', 6001)) # test add and remove time.sleep(1) cli.send(b'add: {"server_port":7001, "password":"asdfadsfasdf"}') time.sleep(1) assert 7001 in manager._relays data, addr = cli.recvfrom(1506) assert b'ok' in data cli.send(b'remove: {"server_port":8381}') time.sleep(1) assert 8381 not in manager._relays data, addr = cli.recvfrom(1506) assert b'ok' in data logging.info('add and remove test passed') # test statistics for TCP header = common.pack_addr(b'google.com') + struct.pack('>H', 80) data = encrypt.encrypt_all(b'asdfadsfasdf', 'aes-256-cfb', 1, header + b'GET /\r\n\r\n') tcp_cli = socket.socket() tcp_cli.connect(('127.0.0.1', 7001)) tcp_cli.send(data) tcp_cli.recv(4096) tcp_cli.close() data, addr = cli.recvfrom(1506) data = common.to_str(data) assert data.startswith('stat: ') data = data.split('stat:')[1] stats = shell.parse_json_in_str(data) assert '7001' in stats logging.info('TCP statistics test passed') # test statistics for UDP header = common.pack_addr(b'127.0.0.1') + struct.pack('>H', 80) data = encrypt.encrypt_all(b'foobar2', 'aes-256-cfb', 1, header + b'test') udp_cli = socket.socket(type=socket.SOCK_DGRAM) udp_cli.sendto(data, ('127.0.0.1', 8382)) tcp_cli.close() data, addr = cli.recvfrom(1506) data = common.to_str(data) assert data.startswith('stat: ') data = data.split('stat:')[1] stats = json.loads(data) assert '8382' in stats logging.info('UDP statistics test passed') manager._loop.stop() t.join() if __name__ == '__main__': test()
apache-2.0
eleonrk/SickRage
lib/bs4/tests/test_tree.py
20
78279
# -*- coding: utf-8 -*- """Tests for Beautiful Soup's tree traversal methods. The tree traversal methods are the main advantage of using Beautiful Soup over just using a parser. Different parsers will build different Beautiful Soup trees given the same markup, but all Beautiful Soup trees can be traversed with the methods tested here. """ from pdb import set_trace import copy import pickle import re import warnings from bs4 import BeautifulSoup from bs4.builder import ( builder_registry, HTMLParserTreeBuilder, ) from bs4.element import ( PY3K, CData, Comment, Declaration, Doctype, NavigableString, SoupStrainer, Tag, ) from bs4.testing import ( SoupTest, skipIf, ) XML_BUILDER_PRESENT = (builder_registry.lookup("xml") is not None) LXML_PRESENT = (builder_registry.lookup("lxml") is not None) class TreeTest(SoupTest): def assertSelects(self, tags, should_match): """Make sure that the given tags have the correct text. This is used in tests that define a bunch of tags, each containing a single string, and then select certain strings by some mechanism. """ self.assertEqual([tag.string for tag in tags], should_match) def assertSelectsIDs(self, tags, should_match): """Make sure that the given tags have the correct IDs. This is used in tests that define a bunch of tags, each containing a single string, and then select certain strings by some mechanism. """ self.assertEqual([tag['id'] for tag in tags], should_match) class TestFind(TreeTest): """Basic tests of the find() method. find() just calls find_all() with limit=1, so it's not tested all that thouroughly here. """ def test_find_tag(self): soup = self.soup("<a>1</a><b>2</b><a>3</a><b>4</b>") self.assertEqual(soup.find("b").string, "2") def test_unicode_text_find(self): soup = self.soup(u'<h1>Räksmörgås</h1>') self.assertEqual(soup.find(string=u'Räksmörgås'), u'Räksmörgås') def test_unicode_attribute_find(self): soup = self.soup(u'<h1 id="Räksmörgås">here it is</h1>') str(soup) self.assertEqual("here it is", soup.find(id=u'Räksmörgås').text) def test_find_everything(self): """Test an optimization that finds all tags.""" soup = self.soup("<a>foo</a><b>bar</b>") self.assertEqual(2, len(soup.find_all())) def test_find_everything_with_name(self): """Test an optimization that finds all tags with a given name.""" soup = self.soup("<a>foo</a><b>bar</b><a>baz</a>") self.assertEqual(2, len(soup.find_all('a'))) class TestFindAll(TreeTest): """Basic tests of the find_all() method.""" def test_find_all_text_nodes(self): """You can search the tree for text nodes.""" soup = self.soup("<html>Foo<b>bar</b>\xbb</html>") # Exact match. self.assertEqual(soup.find_all(string="bar"), [u"bar"]) self.assertEqual(soup.find_all(text="bar"), [u"bar"]) # Match any of a number of strings. self.assertEqual( soup.find_all(text=["Foo", "bar"]), [u"Foo", u"bar"]) # Match a regular expression. self.assertEqual(soup.find_all(text=re.compile('.*')), [u"Foo", u"bar", u'\xbb']) # Match anything. self.assertEqual(soup.find_all(text=True), [u"Foo", u"bar", u'\xbb']) def test_find_all_limit(self): """You can limit the number of items returned by find_all.""" soup = self.soup("<a>1</a><a>2</a><a>3</a><a>4</a><a>5</a>") self.assertSelects(soup.find_all('a', limit=3), ["1", "2", "3"]) self.assertSelects(soup.find_all('a', limit=1), ["1"]) self.assertSelects( soup.find_all('a', limit=10), ["1", "2", "3", "4", "5"]) # A limit of 0 means no limit. self.assertSelects( soup.find_all('a', limit=0), ["1", "2", "3", "4", "5"]) def test_calling_a_tag_is_calling_findall(self): soup = self.soup("<a>1</a><b>2<a id='foo'>3</a></b>") self.assertSelects(soup('a', limit=1), ["1"]) self.assertSelects(soup.b(id="foo"), ["3"]) def test_find_all_with_self_referential_data_structure_does_not_cause_infinite_recursion(self): soup = self.soup("<a></a>") # Create a self-referential list. l = [] l.append(l) # Without special code in _normalize_search_value, this would cause infinite # recursion. self.assertEqual([], soup.find_all(l)) def test_find_all_resultset(self): """All find_all calls return a ResultSet""" soup = self.soup("<a></a>") result = soup.find_all("a") self.assertTrue(hasattr(result, "source")) result = soup.find_all(True) self.assertTrue(hasattr(result, "source")) result = soup.find_all(text="foo") self.assertTrue(hasattr(result, "source")) class TestFindAllBasicNamespaces(TreeTest): def test_find_by_namespaced_name(self): soup = self.soup('<mathml:msqrt>4</mathml:msqrt><a svg:fill="red">') self.assertEqual("4", soup.find("mathml:msqrt").string) self.assertEqual("a", soup.find(attrs= { "svg:fill" : "red" }).name) class TestFindAllByName(TreeTest): """Test ways of finding tags by tag name.""" def setUp(self): super(TreeTest, self).setUp() self.tree = self.soup("""<a>First tag.</a> <b>Second tag.</b> <c>Third <a>Nested tag.</a> tag.</c>""") def test_find_all_by_tag_name(self): # Find all the <a> tags. self.assertSelects( self.tree.find_all('a'), ['First tag.', 'Nested tag.']) def test_find_all_by_name_and_text(self): self.assertSelects( self.tree.find_all('a', text='First tag.'), ['First tag.']) self.assertSelects( self.tree.find_all('a', text=True), ['First tag.', 'Nested tag.']) self.assertSelects( self.tree.find_all('a', text=re.compile("tag")), ['First tag.', 'Nested tag.']) def test_find_all_on_non_root_element(self): # You can call find_all on any node, not just the root. self.assertSelects(self.tree.c.find_all('a'), ['Nested tag.']) def test_calling_element_invokes_find_all(self): self.assertSelects(self.tree('a'), ['First tag.', 'Nested tag.']) def test_find_all_by_tag_strainer(self): self.assertSelects( self.tree.find_all(SoupStrainer('a')), ['First tag.', 'Nested tag.']) def test_find_all_by_tag_names(self): self.assertSelects( self.tree.find_all(['a', 'b']), ['First tag.', 'Second tag.', 'Nested tag.']) def test_find_all_by_tag_dict(self): self.assertSelects( self.tree.find_all({'a' : True, 'b' : True}), ['First tag.', 'Second tag.', 'Nested tag.']) def test_find_all_by_tag_re(self): self.assertSelects( self.tree.find_all(re.compile('^[ab]$')), ['First tag.', 'Second tag.', 'Nested tag.']) def test_find_all_with_tags_matching_method(self): # You can define an oracle method that determines whether # a tag matches the search. def id_matches_name(tag): return tag.name == tag.get('id') tree = self.soup("""<a id="a">Match 1.</a> <a id="1">Does not match.</a> <b id="b">Match 2.</a>""") self.assertSelects( tree.find_all(id_matches_name), ["Match 1.", "Match 2."]) def test_find_with_multi_valued_attribute(self): soup = self.soup( "<div class='a b'>1</div><div class='a c'>2</div><div class='a d'>3</div>" ) r1 = soup.find('div', 'a d'); r2 = soup.find('div', re.compile(r'a d')); r3, r4 = soup.find_all('div', ['a b', 'a d']); self.assertEqual('3', r1.string) self.assertEqual('3', r2.string) self.assertEqual('1', r3.string) self.assertEqual('3', r4.string) class TestFindAllByAttribute(TreeTest): def test_find_all_by_attribute_name(self): # You can pass in keyword arguments to find_all to search by # attribute. tree = self.soup(""" <a id="first">Matching a.</a> <a id="second"> Non-matching <b id="first">Matching b.</b>a. </a>""") self.assertSelects(tree.find_all(id='first'), ["Matching a.", "Matching b."]) def test_find_all_by_utf8_attribute_value(self): peace = u"םולש".encode("utf8") data = u'<a title="םולש"></a>'.encode("utf8") soup = self.soup(data) self.assertEqual([soup.a], soup.find_all(title=peace)) self.assertEqual([soup.a], soup.find_all(title=peace.decode("utf8"))) self.assertEqual([soup.a], soup.find_all(title=[peace, "something else"])) def test_find_all_by_attribute_dict(self): # You can pass in a dictionary as the argument 'attrs'. This # lets you search for attributes like 'name' (a fixed argument # to find_all) and 'class' (a reserved word in Python.) tree = self.soup(""" <a name="name1" class="class1">Name match.</a> <a name="name2" class="class2">Class match.</a> <a name="name3" class="class3">Non-match.</a> <name1>A tag called 'name1'.</name1> """) # This doesn't do what you want. self.assertSelects(tree.find_all(name='name1'), ["A tag called 'name1'."]) # This does what you want. self.assertSelects(tree.find_all(attrs={'name' : 'name1'}), ["Name match."]) self.assertSelects(tree.find_all(attrs={'class' : 'class2'}), ["Class match."]) def test_find_all_by_class(self): tree = self.soup(""" <a class="1">Class 1.</a> <a class="2">Class 2.</a> <b class="1">Class 1.</b> <c class="3 4">Class 3 and 4.</c> """) # Passing in the class_ keyword argument will search against # the 'class' attribute. self.assertSelects(tree.find_all('a', class_='1'), ['Class 1.']) self.assertSelects(tree.find_all('c', class_='3'), ['Class 3 and 4.']) self.assertSelects(tree.find_all('c', class_='4'), ['Class 3 and 4.']) # Passing in a string to 'attrs' will also search the CSS class. self.assertSelects(tree.find_all('a', '1'), ['Class 1.']) self.assertSelects(tree.find_all(attrs='1'), ['Class 1.', 'Class 1.']) self.assertSelects(tree.find_all('c', '3'), ['Class 3 and 4.']) self.assertSelects(tree.find_all('c', '4'), ['Class 3 and 4.']) def test_find_by_class_when_multiple_classes_present(self): tree = self.soup("<gar class='foo bar'>Found it</gar>") f = tree.find_all("gar", class_=re.compile("o")) self.assertSelects(f, ["Found it"]) f = tree.find_all("gar", class_=re.compile("a")) self.assertSelects(f, ["Found it"]) # If the search fails to match the individual strings "foo" and "bar", # it will be tried against the combined string "foo bar". f = tree.find_all("gar", class_=re.compile("o b")) self.assertSelects(f, ["Found it"]) def test_find_all_with_non_dictionary_for_attrs_finds_by_class(self): soup = self.soup("<a class='bar'>Found it</a>") self.assertSelects(soup.find_all("a", re.compile("ba")), ["Found it"]) def big_attribute_value(value): return len(value) > 3 self.assertSelects(soup.find_all("a", big_attribute_value), []) def small_attribute_value(value): return len(value) <= 3 self.assertSelects( soup.find_all("a", small_attribute_value), ["Found it"]) def test_find_all_with_string_for_attrs_finds_multiple_classes(self): soup = self.soup('<a class="foo bar"></a><a class="foo"></a>') a, a2 = soup.find_all("a") self.assertEqual([a, a2], soup.find_all("a", "foo")) self.assertEqual([a], soup.find_all("a", "bar")) # If you specify the class as a string that contains a # space, only that specific value will be found. self.assertEqual([a], soup.find_all("a", class_="foo bar")) self.assertEqual([a], soup.find_all("a", "foo bar")) self.assertEqual([], soup.find_all("a", "bar foo")) def test_find_all_by_attribute_soupstrainer(self): tree = self.soup(""" <a id="first">Match.</a> <a id="second">Non-match.</a>""") strainer = SoupStrainer(attrs={'id' : 'first'}) self.assertSelects(tree.find_all(strainer), ['Match.']) def test_find_all_with_missing_attribute(self): # You can pass in None as the value of an attribute to find_all. # This will match tags that do not have that attribute set. tree = self.soup("""<a id="1">ID present.</a> <a>No ID present.</a> <a id="">ID is empty.</a>""") self.assertSelects(tree.find_all('a', id=None), ["No ID present."]) def test_find_all_with_defined_attribute(self): # You can pass in None as the value of an attribute to find_all. # This will match tags that have that attribute set to any value. tree = self.soup("""<a id="1">ID present.</a> <a>No ID present.</a> <a id="">ID is empty.</a>""") self.assertSelects( tree.find_all(id=True), ["ID present.", "ID is empty."]) def test_find_all_with_numeric_attribute(self): # If you search for a number, it's treated as a string. tree = self.soup("""<a id=1>Unquoted attribute.</a> <a id="1">Quoted attribute.</a>""") expected = ["Unquoted attribute.", "Quoted attribute."] self.assertSelects(tree.find_all(id=1), expected) self.assertSelects(tree.find_all(id="1"), expected) def test_find_all_with_list_attribute_values(self): # You can pass a list of attribute values instead of just one, # and you'll get tags that match any of the values. tree = self.soup("""<a id="1">1</a> <a id="2">2</a> <a id="3">3</a> <a>No ID.</a>""") self.assertSelects(tree.find_all(id=["1", "3", "4"]), ["1", "3"]) def test_find_all_with_regular_expression_attribute_value(self): # You can pass a regular expression as an attribute value, and # you'll get tags whose values for that attribute match the # regular expression. tree = self.soup("""<a id="a">One a.</a> <a id="aa">Two as.</a> <a id="ab">Mixed as and bs.</a> <a id="b">One b.</a> <a>No ID.</a>""") self.assertSelects(tree.find_all(id=re.compile("^a+$")), ["One a.", "Two as."]) def test_find_by_name_and_containing_string(self): soup = self.soup("<b>foo</b><b>bar</b><a>foo</a>") a = soup.a self.assertEqual([a], soup.find_all("a", text="foo")) self.assertEqual([], soup.find_all("a", text="bar")) self.assertEqual([], soup.find_all("a", text="bar")) def test_find_by_name_and_containing_string_when_string_is_buried(self): soup = self.soup("<a>foo</a><a><b><c>foo</c></b></a>") self.assertEqual(soup.find_all("a"), soup.find_all("a", text="foo")) def test_find_by_attribute_and_containing_string(self): soup = self.soup('<b id="1">foo</b><a id="2">foo</a>') a = soup.a self.assertEqual([a], soup.find_all(id=2, text="foo")) self.assertEqual([], soup.find_all(id=1, text="bar")) class TestIndex(TreeTest): """Test Tag.index""" def test_index(self): tree = self.soup("""<div> <a>Identical</a> <b>Not identical</b> <a>Identical</a> <c><d>Identical with child</d></c> <b>Also not identical</b> <c><d>Identical with child</d></c> </div>""") div = tree.div for i, element in enumerate(div.contents): self.assertEqual(i, div.index(element)) self.assertRaises(ValueError, tree.index, 1) class TestParentOperations(TreeTest): """Test navigation and searching through an element's parents.""" def setUp(self): super(TestParentOperations, self).setUp() self.tree = self.soup('''<ul id="empty"></ul> <ul id="top"> <ul id="middle"> <ul id="bottom"> <b>Start here</b> </ul> </ul>''') self.start = self.tree.b def test_parent(self): self.assertEqual(self.start.parent['id'], 'bottom') self.assertEqual(self.start.parent.parent['id'], 'middle') self.assertEqual(self.start.parent.parent.parent['id'], 'top') def test_parent_of_top_tag_is_soup_object(self): top_tag = self.tree.contents[0] self.assertEqual(top_tag.parent, self.tree) def test_soup_object_has_no_parent(self): self.assertEqual(None, self.tree.parent) def test_find_parents(self): self.assertSelectsIDs( self.start.find_parents('ul'), ['bottom', 'middle', 'top']) self.assertSelectsIDs( self.start.find_parents('ul', id="middle"), ['middle']) def test_find_parent(self): self.assertEqual(self.start.find_parent('ul')['id'], 'bottom') self.assertEqual(self.start.find_parent('ul', id='top')['id'], 'top') def test_parent_of_text_element(self): text = self.tree.find(text="Start here") self.assertEqual(text.parent.name, 'b') def test_text_element_find_parent(self): text = self.tree.find(text="Start here") self.assertEqual(text.find_parent('ul')['id'], 'bottom') def test_parent_generator(self): parents = [parent['id'] for parent in self.start.parents if parent is not None and 'id' in parent.attrs] self.assertEqual(parents, ['bottom', 'middle', 'top']) class ProximityTest(TreeTest): def setUp(self): super(TreeTest, self).setUp() self.tree = self.soup( '<html id="start"><head></head><body><b id="1">One</b><b id="2">Two</b><b id="3">Three</b></body></html>') class TestNextOperations(ProximityTest): def setUp(self): super(TestNextOperations, self).setUp() self.start = self.tree.b def test_next(self): self.assertEqual(self.start.next_element, "One") self.assertEqual(self.start.next_element.next_element['id'], "2") def test_next_of_last_item_is_none(self): last = self.tree.find(text="Three") self.assertEqual(last.next_element, None) def test_next_of_root_is_none(self): # The document root is outside the next/previous chain. self.assertEqual(self.tree.next_element, None) def test_find_all_next(self): self.assertSelects(self.start.find_all_next('b'), ["Two", "Three"]) self.start.find_all_next(id=3) self.assertSelects(self.start.find_all_next(id=3), ["Three"]) def test_find_next(self): self.assertEqual(self.start.find_next('b')['id'], '2') self.assertEqual(self.start.find_next(text="Three"), "Three") def test_find_next_for_text_element(self): text = self.tree.find(text="One") self.assertEqual(text.find_next("b").string, "Two") self.assertSelects(text.find_all_next("b"), ["Two", "Three"]) def test_next_generator(self): start = self.tree.find(text="Two") successors = [node for node in start.next_elements] # There are two successors: the final <b> tag and its text contents. tag, contents = successors self.assertEqual(tag['id'], '3') self.assertEqual(contents, "Three") class TestPreviousOperations(ProximityTest): def setUp(self): super(TestPreviousOperations, self).setUp() self.end = self.tree.find(text="Three") def test_previous(self): self.assertEqual(self.end.previous_element['id'], "3") self.assertEqual(self.end.previous_element.previous_element, "Two") def test_previous_of_first_item_is_none(self): first = self.tree.find('html') self.assertEqual(first.previous_element, None) def test_previous_of_root_is_none(self): # The document root is outside the next/previous chain. # XXX This is broken! #self.assertEqual(self.tree.previous_element, None) pass def test_find_all_previous(self): # The <b> tag containing the "Three" node is the predecessor # of the "Three" node itself, which is why "Three" shows up # here. self.assertSelects( self.end.find_all_previous('b'), ["Three", "Two", "One"]) self.assertSelects(self.end.find_all_previous(id=1), ["One"]) def test_find_previous(self): self.assertEqual(self.end.find_previous('b')['id'], '3') self.assertEqual(self.end.find_previous(text="One"), "One") def test_find_previous_for_text_element(self): text = self.tree.find(text="Three") self.assertEqual(text.find_previous("b").string, "Three") self.assertSelects( text.find_all_previous("b"), ["Three", "Two", "One"]) def test_previous_generator(self): start = self.tree.find(text="One") predecessors = [node for node in start.previous_elements] # There are four predecessors: the <b> tag containing "One" # the <body> tag, the <head> tag, and the <html> tag. b, body, head, html = predecessors self.assertEqual(b['id'], '1') self.assertEqual(body.name, "body") self.assertEqual(head.name, "head") self.assertEqual(html.name, "html") class SiblingTest(TreeTest): def setUp(self): super(SiblingTest, self).setUp() markup = '''<html> <span id="1"> <span id="1.1"></span> </span> <span id="2"> <span id="2.1"></span> </span> <span id="3"> <span id="3.1"></span> </span> <span id="4"></span> </html>''' # All that whitespace looks good but makes the tests more # difficult. Get rid of it. markup = re.compile("\n\s*").sub("", markup) self.tree = self.soup(markup) class TestNextSibling(SiblingTest): def setUp(self): super(TestNextSibling, self).setUp() self.start = self.tree.find(id="1") def test_next_sibling_of_root_is_none(self): self.assertEqual(self.tree.next_sibling, None) def test_next_sibling(self): self.assertEqual(self.start.next_sibling['id'], '2') self.assertEqual(self.start.next_sibling.next_sibling['id'], '3') # Note the difference between next_sibling and next_element. self.assertEqual(self.start.next_element['id'], '1.1') def test_next_sibling_may_not_exist(self): self.assertEqual(self.tree.html.next_sibling, None) nested_span = self.tree.find(id="1.1") self.assertEqual(nested_span.next_sibling, None) last_span = self.tree.find(id="4") self.assertEqual(last_span.next_sibling, None) def test_find_next_sibling(self): self.assertEqual(self.start.find_next_sibling('span')['id'], '2') def test_next_siblings(self): self.assertSelectsIDs(self.start.find_next_siblings("span"), ['2', '3', '4']) self.assertSelectsIDs(self.start.find_next_siblings(id='3'), ['3']) def test_next_sibling_for_text_element(self): soup = self.soup("Foo<b>bar</b>baz") start = soup.find(text="Foo") self.assertEqual(start.next_sibling.name, 'b') self.assertEqual(start.next_sibling.next_sibling, 'baz') self.assertSelects(start.find_next_siblings('b'), ['bar']) self.assertEqual(start.find_next_sibling(text="baz"), "baz") self.assertEqual(start.find_next_sibling(text="nonesuch"), None) class TestPreviousSibling(SiblingTest): def setUp(self): super(TestPreviousSibling, self).setUp() self.end = self.tree.find(id="4") def test_previous_sibling_of_root_is_none(self): self.assertEqual(self.tree.previous_sibling, None) def test_previous_sibling(self): self.assertEqual(self.end.previous_sibling['id'], '3') self.assertEqual(self.end.previous_sibling.previous_sibling['id'], '2') # Note the difference between previous_sibling and previous_element. self.assertEqual(self.end.previous_element['id'], '3.1') def test_previous_sibling_may_not_exist(self): self.assertEqual(self.tree.html.previous_sibling, None) nested_span = self.tree.find(id="1.1") self.assertEqual(nested_span.previous_sibling, None) first_span = self.tree.find(id="1") self.assertEqual(first_span.previous_sibling, None) def test_find_previous_sibling(self): self.assertEqual(self.end.find_previous_sibling('span')['id'], '3') def test_previous_siblings(self): self.assertSelectsIDs(self.end.find_previous_siblings("span"), ['3', '2', '1']) self.assertSelectsIDs(self.end.find_previous_siblings(id='1'), ['1']) def test_previous_sibling_for_text_element(self): soup = self.soup("Foo<b>bar</b>baz") start = soup.find(text="baz") self.assertEqual(start.previous_sibling.name, 'b') self.assertEqual(start.previous_sibling.previous_sibling, 'Foo') self.assertSelects(start.find_previous_siblings('b'), ['bar']) self.assertEqual(start.find_previous_sibling(text="Foo"), "Foo") self.assertEqual(start.find_previous_sibling(text="nonesuch"), None) class TestTagCreation(SoupTest): """Test the ability to create new tags.""" def test_new_tag(self): soup = self.soup("") new_tag = soup.new_tag("foo", bar="baz") self.assertTrue(isinstance(new_tag, Tag)) self.assertEqual("foo", new_tag.name) self.assertEqual(dict(bar="baz"), new_tag.attrs) self.assertEqual(None, new_tag.parent) def test_tag_inherits_self_closing_rules_from_builder(self): if XML_BUILDER_PRESENT: xml_soup = BeautifulSoup("", "lxml-xml") xml_br = xml_soup.new_tag("br") xml_p = xml_soup.new_tag("p") # Both the <br> and <p> tag are empty-element, just because # they have no contents. self.assertEqual(b"<br/>", xml_br.encode()) self.assertEqual(b"<p/>", xml_p.encode()) html_soup = BeautifulSoup("", "html.parser") html_br = html_soup.new_tag("br") html_p = html_soup.new_tag("p") # The HTML builder users HTML's rules about which tags are # empty-element tags, and the new tags reflect these rules. self.assertEqual(b"<br/>", html_br.encode()) self.assertEqual(b"<p></p>", html_p.encode()) def test_new_string_creates_navigablestring(self): soup = self.soup("") s = soup.new_string("foo") self.assertEqual("foo", s) self.assertTrue(isinstance(s, NavigableString)) def test_new_string_can_create_navigablestring_subclass(self): soup = self.soup("") s = soup.new_string("foo", Comment) self.assertEqual("foo", s) self.assertTrue(isinstance(s, Comment)) class TestTreeModification(SoupTest): def test_attribute_modification(self): soup = self.soup('<a id="1"></a>') soup.a['id'] = 2 self.assertEqual(soup.decode(), self.document_for('<a id="2"></a>')) del(soup.a['id']) self.assertEqual(soup.decode(), self.document_for('<a></a>')) soup.a['id2'] = 'foo' self.assertEqual(soup.decode(), self.document_for('<a id2="foo"></a>')) def test_new_tag_creation(self): builder = builder_registry.lookup('html')() soup = self.soup("<body></body>", builder=builder) a = Tag(soup, builder, 'a') ol = Tag(soup, builder, 'ol') a['href'] = 'http://foo.com/' soup.body.insert(0, a) soup.body.insert(1, ol) self.assertEqual( soup.body.encode(), b'<body><a href="http://foo.com/"></a><ol></ol></body>') def test_append_to_contents_moves_tag(self): doc = """<p id="1">Don't leave me <b>here</b>.</p> <p id="2">Don\'t leave!</p>""" soup = self.soup(doc) second_para = soup.find(id='2') bold = soup.b # Move the <b> tag to the end of the second paragraph. soup.find(id='2').append(soup.b) # The <b> tag is now a child of the second paragraph. self.assertEqual(bold.parent, second_para) self.assertEqual( soup.decode(), self.document_for( '<p id="1">Don\'t leave me .</p>\n' '<p id="2">Don\'t leave!<b>here</b></p>')) def test_replace_with_returns_thing_that_was_replaced(self): text = "<a></a><b><c></c></b>" soup = self.soup(text) a = soup.a new_a = a.replace_with(soup.c) self.assertEqual(a, new_a) def test_unwrap_returns_thing_that_was_replaced(self): text = "<a><b></b><c></c></a>" soup = self.soup(text) a = soup.a new_a = a.unwrap() self.assertEqual(a, new_a) def test_replace_with_and_unwrap_give_useful_exception_when_tag_has_no_parent(self): soup = self.soup("<a><b>Foo</b></a><c>Bar</c>") a = soup.a a.extract() self.assertEqual(None, a.parent) self.assertRaises(ValueError, a.unwrap) self.assertRaises(ValueError, a.replace_with, soup.c) def test_replace_tag_with_itself(self): text = "<a><b></b><c>Foo<d></d></c></a><a><e></e></a>" soup = self.soup(text) c = soup.c soup.c.replace_with(c) self.assertEqual(soup.decode(), self.document_for(text)) def test_replace_tag_with_its_parent_raises_exception(self): text = "<a><b></b></a>" soup = self.soup(text) self.assertRaises(ValueError, soup.b.replace_with, soup.a) def test_insert_tag_into_itself_raises_exception(self): text = "<a><b></b></a>" soup = self.soup(text) self.assertRaises(ValueError, soup.a.insert, 0, soup.a) def test_replace_with_maintains_next_element_throughout(self): soup = self.soup('<p><a>one</a><b>three</b></p>') a = soup.a b = a.contents[0] # Make it so the <a> tag has two text children. a.insert(1, "two") # Now replace each one with the empty string. left, right = a.contents left.replaceWith('') right.replaceWith('') # The <b> tag is still connected to the tree. self.assertEqual("three", soup.b.string) def test_replace_final_node(self): soup = self.soup("<b>Argh!</b>") soup.find(text="Argh!").replace_with("Hooray!") new_text = soup.find(text="Hooray!") b = soup.b self.assertEqual(new_text.previous_element, b) self.assertEqual(new_text.parent, b) self.assertEqual(new_text.previous_element.next_element, new_text) self.assertEqual(new_text.next_element, None) def test_consecutive_text_nodes(self): # A builder should never create two consecutive text nodes, # but if you insert one next to another, Beautiful Soup will # handle it correctly. soup = self.soup("<a><b>Argh!</b><c></c></a>") soup.b.insert(1, "Hooray!") self.assertEqual( soup.decode(), self.document_for( "<a><b>Argh!Hooray!</b><c></c></a>")) new_text = soup.find(text="Hooray!") self.assertEqual(new_text.previous_element, "Argh!") self.assertEqual(new_text.previous_element.next_element, new_text) self.assertEqual(new_text.previous_sibling, "Argh!") self.assertEqual(new_text.previous_sibling.next_sibling, new_text) self.assertEqual(new_text.next_sibling, None) self.assertEqual(new_text.next_element, soup.c) def test_insert_string(self): soup = self.soup("<a></a>") soup.a.insert(0, "bar") soup.a.insert(0, "foo") # The string were added to the tag. self.assertEqual(["foo", "bar"], soup.a.contents) # And they were converted to NavigableStrings. self.assertEqual(soup.a.contents[0].next_element, "bar") def test_insert_tag(self): builder = self.default_builder soup = self.soup( "<a><b>Find</b><c>lady!</c><d></d></a>", builder=builder) magic_tag = Tag(soup, builder, 'magictag') magic_tag.insert(0, "the") soup.a.insert(1, magic_tag) self.assertEqual( soup.decode(), self.document_for( "<a><b>Find</b><magictag>the</magictag><c>lady!</c><d></d></a>")) # Make sure all the relationships are hooked up correctly. b_tag = soup.b self.assertEqual(b_tag.next_sibling, magic_tag) self.assertEqual(magic_tag.previous_sibling, b_tag) find = b_tag.find(text="Find") self.assertEqual(find.next_element, magic_tag) self.assertEqual(magic_tag.previous_element, find) c_tag = soup.c self.assertEqual(magic_tag.next_sibling, c_tag) self.assertEqual(c_tag.previous_sibling, magic_tag) the = magic_tag.find(text="the") self.assertEqual(the.parent, magic_tag) self.assertEqual(the.next_element, c_tag) self.assertEqual(c_tag.previous_element, the) def test_append_child_thats_already_at_the_end(self): data = "<a><b></b></a>" soup = self.soup(data) soup.a.append(soup.b) self.assertEqual(data, soup.decode()) def test_move_tag_to_beginning_of_parent(self): data = "<a><b></b><c></c><d></d></a>" soup = self.soup(data) soup.a.insert(0, soup.d) self.assertEqual("<a><d></d><b></b><c></c></a>", soup.decode()) def test_insert_works_on_empty_element_tag(self): # This is a little strange, since most HTML parsers don't allow # markup like this to come through. But in general, we don't # know what the parser would or wouldn't have allowed, so # I'm letting this succeed for now. soup = self.soup("<br/>") soup.br.insert(1, "Contents") self.assertEqual(str(soup.br), "<br>Contents</br>") def test_insert_before(self): soup = self.soup("<a>foo</a><b>bar</b>") soup.b.insert_before("BAZ") soup.a.insert_before("QUUX") self.assertEqual( soup.decode(), self.document_for("QUUX<a>foo</a>BAZ<b>bar</b>")) soup.a.insert_before(soup.b) self.assertEqual( soup.decode(), self.document_for("QUUX<b>bar</b><a>foo</a>BAZ")) def test_insert_after(self): soup = self.soup("<a>foo</a><b>bar</b>") soup.b.insert_after("BAZ") soup.a.insert_after("QUUX") self.assertEqual( soup.decode(), self.document_for("<a>foo</a>QUUX<b>bar</b>BAZ")) soup.b.insert_after(soup.a) self.assertEqual( soup.decode(), self.document_for("QUUX<b>bar</b><a>foo</a>BAZ")) def test_insert_after_raises_exception_if_after_has_no_meaning(self): soup = self.soup("") tag = soup.new_tag("a") string = soup.new_string("") self.assertRaises(ValueError, string.insert_after, tag) self.assertRaises(NotImplementedError, soup.insert_after, tag) self.assertRaises(ValueError, tag.insert_after, tag) def test_insert_before_raises_notimplementederror_if_before_has_no_meaning(self): soup = self.soup("") tag = soup.new_tag("a") string = soup.new_string("") self.assertRaises(ValueError, string.insert_before, tag) self.assertRaises(NotImplementedError, soup.insert_before, tag) self.assertRaises(ValueError, tag.insert_before, tag) def test_replace_with(self): soup = self.soup( "<p>There's <b>no</b> business like <b>show</b> business</p>") no, show = soup.find_all('b') show.replace_with(no) self.assertEqual( soup.decode(), self.document_for( "<p>There's business like <b>no</b> business</p>")) self.assertEqual(show.parent, None) self.assertEqual(no.parent, soup.p) self.assertEqual(no.next_element, "no") self.assertEqual(no.next_sibling, " business") def test_replace_first_child(self): data = "<a><b></b><c></c></a>" soup = self.soup(data) soup.b.replace_with(soup.c) self.assertEqual("<a><c></c></a>", soup.decode()) def test_replace_last_child(self): data = "<a><b></b><c></c></a>" soup = self.soup(data) soup.c.replace_with(soup.b) self.assertEqual("<a><b></b></a>", soup.decode()) def test_nested_tag_replace_with(self): soup = self.soup( """<a>We<b>reserve<c>the</c><d>right</d></b></a><e>to<f>refuse</f><g>service</g></e>""") # Replace the entire <b> tag and its contents ("reserve the # right") with the <f> tag ("refuse"). remove_tag = soup.b move_tag = soup.f remove_tag.replace_with(move_tag) self.assertEqual( soup.decode(), self.document_for( "<a>We<f>refuse</f></a><e>to<g>service</g></e>")) # The <b> tag is now an orphan. self.assertEqual(remove_tag.parent, None) self.assertEqual(remove_tag.find(text="right").next_element, None) self.assertEqual(remove_tag.previous_element, None) self.assertEqual(remove_tag.next_sibling, None) self.assertEqual(remove_tag.previous_sibling, None) # The <f> tag is now connected to the <a> tag. self.assertEqual(move_tag.parent, soup.a) self.assertEqual(move_tag.previous_element, "We") self.assertEqual(move_tag.next_element.next_element, soup.e) self.assertEqual(move_tag.next_sibling, None) # The gap where the <f> tag used to be has been mended, and # the word "to" is now connected to the <g> tag. to_text = soup.find(text="to") g_tag = soup.g self.assertEqual(to_text.next_element, g_tag) self.assertEqual(to_text.next_sibling, g_tag) self.assertEqual(g_tag.previous_element, to_text) self.assertEqual(g_tag.previous_sibling, to_text) def test_unwrap(self): tree = self.soup(""" <p>Unneeded <em>formatting</em> is unneeded</p> """) tree.em.unwrap() self.assertEqual(tree.em, None) self.assertEqual(tree.p.text, "Unneeded formatting is unneeded") def test_wrap(self): soup = self.soup("I wish I was bold.") value = soup.string.wrap(soup.new_tag("b")) self.assertEqual(value.decode(), "<b>I wish I was bold.</b>") self.assertEqual( soup.decode(), self.document_for("<b>I wish I was bold.</b>")) def test_wrap_extracts_tag_from_elsewhere(self): soup = self.soup("<b></b>I wish I was bold.") soup.b.next_sibling.wrap(soup.b) self.assertEqual( soup.decode(), self.document_for("<b>I wish I was bold.</b>")) def test_wrap_puts_new_contents_at_the_end(self): soup = self.soup("<b>I like being bold.</b>I wish I was bold.") soup.b.next_sibling.wrap(soup.b) self.assertEqual(2, len(soup.b.contents)) self.assertEqual( soup.decode(), self.document_for( "<b>I like being bold.I wish I was bold.</b>")) def test_extract(self): soup = self.soup( '<html><body>Some content. <div id="nav">Nav crap</div> More content.</body></html>') self.assertEqual(len(soup.body.contents), 3) extracted = soup.find(id="nav").extract() self.assertEqual( soup.decode(), "<html><body>Some content. More content.</body></html>") self.assertEqual(extracted.decode(), '<div id="nav">Nav crap</div>') # The extracted tag is now an orphan. self.assertEqual(len(soup.body.contents), 2) self.assertEqual(extracted.parent, None) self.assertEqual(extracted.previous_element, None) self.assertEqual(extracted.next_element.next_element, None) # The gap where the extracted tag used to be has been mended. content_1 = soup.find(text="Some content. ") content_2 = soup.find(text=" More content.") self.assertEqual(content_1.next_element, content_2) self.assertEqual(content_1.next_sibling, content_2) self.assertEqual(content_2.previous_element, content_1) self.assertEqual(content_2.previous_sibling, content_1) def test_extract_distinguishes_between_identical_strings(self): soup = self.soup("<a>foo</a><b>bar</b>") foo_1 = soup.a.string bar_1 = soup.b.string foo_2 = soup.new_string("foo") bar_2 = soup.new_string("bar") soup.a.append(foo_2) soup.b.append(bar_2) # Now there are two identical strings in the <a> tag, and two # in the <b> tag. Let's remove the first "foo" and the second # "bar". foo_1.extract() bar_2.extract() self.assertEqual(foo_2, soup.a.string) self.assertEqual(bar_2, soup.b.string) def test_extract_multiples_of_same_tag(self): soup = self.soup(""" <html> <head> <script>foo</script> </head> <body> <script>bar</script> <a></a> </body> <script>baz</script> </html>""") [soup.script.extract() for i in soup.find_all("script")] self.assertEqual("<body>\n\n<a></a>\n</body>", unicode(soup.body)) def test_extract_works_when_element_is_surrounded_by_identical_strings(self): soup = self.soup( '<html>\n' '<body>hi</body>\n' '</html>') soup.find('body').extract() self.assertEqual(None, soup.find('body')) def test_clear(self): """Tag.clear()""" soup = self.soup("<p><a>String <em>Italicized</em></a> and another</p>") # clear using extract() a = soup.a soup.p.clear() self.assertEqual(len(soup.p.contents), 0) self.assertTrue(hasattr(a, "contents")) # clear using decompose() em = a.em a.clear(decompose=True) self.assertEqual(0, len(em.contents)) def test_string_set(self): """Tag.string = 'string'""" soup = self.soup("<a></a> <b><c></c></b>") soup.a.string = "foo" self.assertEqual(soup.a.contents, ["foo"]) soup.b.string = "bar" self.assertEqual(soup.b.contents, ["bar"]) def test_string_set_does_not_affect_original_string(self): soup = self.soup("<a><b>foo</b><c>bar</c>") soup.b.string = soup.c.string self.assertEqual(soup.a.encode(), b"<a><b>bar</b><c>bar</c></a>") def test_set_string_preserves_class_of_string(self): soup = self.soup("<a></a>") cdata = CData("foo") soup.a.string = cdata self.assertTrue(isinstance(soup.a.string, CData)) class TestElementObjects(SoupTest): """Test various features of element objects.""" def test_len(self): """The length of an element is its number of children.""" soup = self.soup("<top>1<b>2</b>3</top>") # The BeautifulSoup object itself contains one element: the # <top> tag. self.assertEqual(len(soup.contents), 1) self.assertEqual(len(soup), 1) # The <top> tag contains three elements: the text node "1", the # <b> tag, and the text node "3". self.assertEqual(len(soup.top), 3) self.assertEqual(len(soup.top.contents), 3) def test_member_access_invokes_find(self): """Accessing a Python member .foo invokes find('foo')""" soup = self.soup('<b><i></i></b>') self.assertEqual(soup.b, soup.find('b')) self.assertEqual(soup.b.i, soup.find('b').find('i')) self.assertEqual(soup.a, None) def test_deprecated_member_access(self): soup = self.soup('<b><i></i></b>') with warnings.catch_warnings(record=True) as w: tag = soup.bTag self.assertEqual(soup.b, tag) self.assertEqual( '.bTag is deprecated, use .find("b") instead.', str(w[0].message)) def test_has_attr(self): """has_attr() checks for the presence of an attribute. Please note note: has_attr() is different from __in__. has_attr() checks the tag's attributes and __in__ checks the tag's chidlren. """ soup = self.soup("<foo attr='bar'>") self.assertTrue(soup.foo.has_attr('attr')) self.assertFalse(soup.foo.has_attr('attr2')) def test_attributes_come_out_in_alphabetical_order(self): markup = '<b a="1" z="5" m="3" f="2" y="4"></b>' self.assertSoupEquals(markup, '<b a="1" f="2" m="3" y="4" z="5"></b>') def test_string(self): # A tag that contains only a text node makes that node # available as .string. soup = self.soup("<b>foo</b>") self.assertEqual(soup.b.string, 'foo') def test_empty_tag_has_no_string(self): # A tag with no children has no .stirng. soup = self.soup("<b></b>") self.assertEqual(soup.b.string, None) def test_tag_with_multiple_children_has_no_string(self): # A tag with no children has no .string. soup = self.soup("<a>foo<b></b><b></b></b>") self.assertEqual(soup.b.string, None) soup = self.soup("<a>foo<b></b>bar</b>") self.assertEqual(soup.b.string, None) # Even if all the children are strings, due to trickery, # it won't work--but this would be a good optimization. soup = self.soup("<a>foo</b>") soup.a.insert(1, "bar") self.assertEqual(soup.a.string, None) def test_tag_with_recursive_string_has_string(self): # A tag with a single child which has a .string inherits that # .string. soup = self.soup("<a><b>foo</b></a>") self.assertEqual(soup.a.string, "foo") self.assertEqual(soup.string, "foo") def test_lack_of_string(self): """Only a tag containing a single text node has a .string.""" soup = self.soup("<b>f<i>e</i>o</b>") self.assertFalse(soup.b.string) soup = self.soup("<b></b>") self.assertFalse(soup.b.string) def test_all_text(self): """Tag.text and Tag.get_text(sep=u"") -> all child text, concatenated""" soup = self.soup("<a>a<b>r</b> <r> t </r></a>") self.assertEqual(soup.a.text, "ar t ") self.assertEqual(soup.a.get_text(strip=True), "art") self.assertEqual(soup.a.get_text(","), "a,r, , t ") self.assertEqual(soup.a.get_text(",", strip=True), "a,r,t") def test_get_text_ignores_comments(self): soup = self.soup("foo<!--IGNORE-->bar") self.assertEqual(soup.get_text(), "foobar") self.assertEqual( soup.get_text(types=(NavigableString, Comment)), "fooIGNOREbar") self.assertEqual( soup.get_text(types=None), "fooIGNOREbar") def test_all_strings_ignores_comments(self): soup = self.soup("foo<!--IGNORE-->bar") self.assertEqual(['foo', 'bar'], list(soup.strings)) class TestCDAtaListAttributes(SoupTest): """Testing cdata-list attributes like 'class'. """ def test_single_value_becomes_list(self): soup = self.soup("<a class='foo'>") self.assertEqual(["foo"],soup.a['class']) def test_multiple_values_becomes_list(self): soup = self.soup("<a class='foo bar'>") self.assertEqual(["foo", "bar"], soup.a['class']) def test_multiple_values_separated_by_weird_whitespace(self): soup = self.soup("<a class='foo\tbar\nbaz'>") self.assertEqual(["foo", "bar", "baz"],soup.a['class']) def test_attributes_joined_into_string_on_output(self): soup = self.soup("<a class='foo\tbar'>") self.assertEqual(b'<a class="foo bar"></a>', soup.a.encode()) def test_get_attribute_list(self): soup = self.soup("<a id='abc def'>") self.assertEqual(['abc def'], soup.a.get_attribute_list('id')) def test_accept_charset(self): soup = self.soup('<form accept-charset="ISO-8859-1 UTF-8">') self.assertEqual(['ISO-8859-1', 'UTF-8'], soup.form['accept-charset']) def test_cdata_attribute_applying_only_to_one_tag(self): data = '<a accept-charset="ISO-8859-1 UTF-8"></a>' soup = self.soup(data) # We saw in another test that accept-charset is a cdata-list # attribute for the <form> tag. But it's not a cdata-list # attribute for any other tag. self.assertEqual('ISO-8859-1 UTF-8', soup.a['accept-charset']) def test_string_has_immutable_name_property(self): string = self.soup("s").string self.assertEqual(None, string.name) def t(): string.name = 'foo' self.assertRaises(AttributeError, t) class TestPersistence(SoupTest): "Testing features like pickle and deepcopy." def setUp(self): super(TestPersistence, self).setUp() self.page = """<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.0 Transitional//EN" "http://www.w3.org/TR/REC-html40/transitional.dtd"> <html> <head> <meta http-equiv="Content-Type" content="text/html; charset=utf-8"> <title>Beautiful Soup: We called him Tortoise because he taught us.</title> <link rev="made" href="mailto:leonardr@segfault.org"> <meta name="Description" content="Beautiful Soup: an HTML parser optimized for screen-scraping."> <meta name="generator" content="Markov Approximation 1.4 (module: leonardr)"> <meta name="author" content="Leonard Richardson"> </head> <body> <a href="foo">foo</a> <a href="foo"><b>bar</b></a> </body> </html>""" self.tree = self.soup(self.page) def test_pickle_and_unpickle_identity(self): # Pickling a tree, then unpickling it, yields a tree identical # to the original. dumped = pickle.dumps(self.tree, 2) loaded = pickle.loads(dumped) self.assertEqual(loaded.__class__, BeautifulSoup) self.assertEqual(loaded.decode(), self.tree.decode()) def test_deepcopy_identity(self): # Making a deepcopy of a tree yields an identical tree. copied = copy.deepcopy(self.tree) self.assertEqual(copied.decode(), self.tree.decode()) def test_copy_preserves_encoding(self): soup = BeautifulSoup(b'<p>&nbsp;</p>', 'html.parser') encoding = soup.original_encoding copy = soup.__copy__() self.assertEqual(u"<p> </p>", unicode(copy)) self.assertEqual(encoding, copy.original_encoding) def test_unicode_pickle(self): # A tree containing Unicode characters can be pickled. html = u"<b>\N{SNOWMAN}</b>" soup = self.soup(html) dumped = pickle.dumps(soup, pickle.HIGHEST_PROTOCOL) loaded = pickle.loads(dumped) self.assertEqual(loaded.decode(), soup.decode()) def test_copy_navigablestring_is_not_attached_to_tree(self): html = u"<b>Foo<a></a></b><b>Bar</b>" soup = self.soup(html) s1 = soup.find(string="Foo") s2 = copy.copy(s1) self.assertEqual(s1, s2) self.assertEqual(None, s2.parent) self.assertEqual(None, s2.next_element) self.assertNotEqual(None, s1.next_sibling) self.assertEqual(None, s2.next_sibling) self.assertEqual(None, s2.previous_element) def test_copy_navigablestring_subclass_has_same_type(self): html = u"<b><!--Foo--></b>" soup = self.soup(html) s1 = soup.string s2 = copy.copy(s1) self.assertEqual(s1, s2) self.assertTrue(isinstance(s2, Comment)) def test_copy_entire_soup(self): html = u"<div><b>Foo<a></a></b><b>Bar</b></div>end" soup = self.soup(html) soup_copy = copy.copy(soup) self.assertEqual(soup, soup_copy) def test_copy_tag_copies_contents(self): html = u"<div><b>Foo<a></a></b><b>Bar</b></div>end" soup = self.soup(html) div = soup.div div_copy = copy.copy(div) # The two tags look the same, and evaluate to equal. self.assertEqual(unicode(div), unicode(div_copy)) self.assertEqual(div, div_copy) # But they're not the same object. self.assertFalse(div is div_copy) # And they don't have the same relation to the parse tree. The # copy is not associated with a parse tree at all. self.assertEqual(None, div_copy.parent) self.assertEqual(None, div_copy.previous_element) self.assertEqual(None, div_copy.find(string='Bar').next_element) self.assertNotEqual(None, div.find(string='Bar').next_element) class TestSubstitutions(SoupTest): def test_default_formatter_is_minimal(self): markup = u"<b>&lt;&lt;Sacr\N{LATIN SMALL LETTER E WITH ACUTE} bleu!&gt;&gt;</b>" soup = self.soup(markup) decoded = soup.decode(formatter="minimal") # The < is converted back into &lt; but the e-with-acute is left alone. self.assertEqual( decoded, self.document_for( u"<b>&lt;&lt;Sacr\N{LATIN SMALL LETTER E WITH ACUTE} bleu!&gt;&gt;</b>")) def test_formatter_html(self): markup = u"<b>&lt;&lt;Sacr\N{LATIN SMALL LETTER E WITH ACUTE} bleu!&gt;&gt;</b>" soup = self.soup(markup) decoded = soup.decode(formatter="html") self.assertEqual( decoded, self.document_for("<b>&lt;&lt;Sacr&eacute; bleu!&gt;&gt;</b>")) def test_formatter_minimal(self): markup = u"<b>&lt;&lt;Sacr\N{LATIN SMALL LETTER E WITH ACUTE} bleu!&gt;&gt;</b>" soup = self.soup(markup) decoded = soup.decode(formatter="minimal") # The < is converted back into &lt; but the e-with-acute is left alone. self.assertEqual( decoded, self.document_for( u"<b>&lt;&lt;Sacr\N{LATIN SMALL LETTER E WITH ACUTE} bleu!&gt;&gt;</b>")) def test_formatter_null(self): markup = u"<b>&lt;&lt;Sacr\N{LATIN SMALL LETTER E WITH ACUTE} bleu!&gt;&gt;</b>" soup = self.soup(markup) decoded = soup.decode(formatter=None) # Neither the angle brackets nor the e-with-acute are converted. # This is not valid HTML, but it's what the user wanted. self.assertEqual(decoded, self.document_for(u"<b><<Sacr\N{LATIN SMALL LETTER E WITH ACUTE} bleu!>></b>")) def test_formatter_custom(self): markup = u"<b>&lt;foo&gt;</b><b>bar</b>" soup = self.soup(markup) decoded = soup.decode(formatter = lambda x: x.upper()) # Instead of normal entity conversion code, the custom # callable is called on every string. self.assertEqual( decoded, self.document_for(u"<b><FOO></b><b>BAR</b>")) def test_formatter_is_run_on_attribute_values(self): markup = u'<a href="http://a.com?a=b&c=é">e</a>' soup = self.soup(markup) a = soup.a expect_minimal = u'<a href="http://a.com?a=b&amp;c=é">e</a>' self.assertEqual(expect_minimal, a.decode()) self.assertEqual(expect_minimal, a.decode(formatter="minimal")) expect_html = u'<a href="http://a.com?a=b&amp;c=&eacute;">e</a>' self.assertEqual(expect_html, a.decode(formatter="html")) self.assertEqual(markup, a.decode(formatter=None)) expect_upper = u'<a href="HTTP://A.COM?A=B&C=É">E</a>' self.assertEqual(expect_upper, a.decode(formatter=lambda x: x.upper())) def test_formatter_skips_script_tag_for_html_documents(self): doc = """ <script type="text/javascript"> console.log("< < hey > > "); </script> """ encoded = BeautifulSoup(doc, 'html.parser').encode() self.assertTrue(b"< < hey > >" in encoded) def test_formatter_skips_style_tag_for_html_documents(self): doc = """ <style type="text/css"> console.log("< < hey > > "); </style> """ encoded = BeautifulSoup(doc, 'html.parser').encode() self.assertTrue(b"< < hey > >" in encoded) def test_prettify_leaves_preformatted_text_alone(self): soup = self.soup("<div> foo <pre> \tbar\n \n </pre> baz ") # Everything outside the <pre> tag is reformatted, but everything # inside is left alone. self.assertEqual( u'<div>\n foo\n <pre> \tbar\n \n </pre>\n baz\n</div>', soup.div.prettify()) def test_prettify_accepts_formatter(self): soup = BeautifulSoup("<html><body>foo</body></html>", 'html.parser') pretty = soup.prettify(formatter = lambda x: x.upper()) self.assertTrue("FOO" in pretty) def test_prettify_outputs_unicode_by_default(self): soup = self.soup("<a></a>") self.assertEqual(unicode, type(soup.prettify())) def test_prettify_can_encode_data(self): soup = self.soup("<a></a>") self.assertEqual(bytes, type(soup.prettify("utf-8"))) def test_html_entity_substitution_off_by_default(self): markup = u"<b>Sacr\N{LATIN SMALL LETTER E WITH ACUTE} bleu!</b>" soup = self.soup(markup) encoded = soup.b.encode("utf-8") self.assertEqual(encoded, markup.encode('utf-8')) def test_encoding_substitution(self): # Here's the <meta> tag saying that a document is # encoded in Shift-JIS. meta_tag = ('<meta content="text/html; charset=x-sjis" ' 'http-equiv="Content-type"/>') soup = self.soup(meta_tag) # Parse the document, and the charset apprears unchanged. self.assertEqual(soup.meta['content'], 'text/html; charset=x-sjis') # Encode the document into some encoding, and the encoding is # substituted into the meta tag. utf_8 = soup.encode("utf-8") self.assertTrue(b"charset=utf-8" in utf_8) euc_jp = soup.encode("euc_jp") self.assertTrue(b"charset=euc_jp" in euc_jp) shift_jis = soup.encode("shift-jis") self.assertTrue(b"charset=shift-jis" in shift_jis) utf_16_u = soup.encode("utf-16").decode("utf-16") self.assertTrue("charset=utf-16" in utf_16_u) def test_encoding_substitution_doesnt_happen_if_tag_is_strained(self): markup = ('<head><meta content="text/html; charset=x-sjis" ' 'http-equiv="Content-type"/></head><pre>foo</pre>') # Beautiful Soup used to try to rewrite the meta tag even if the # meta tag got filtered out by the strainer. This test makes # sure that doesn't happen. strainer = SoupStrainer('pre') soup = self.soup(markup, parse_only=strainer) self.assertEqual(soup.contents[0].name, 'pre') class TestEncoding(SoupTest): """Test the ability to encode objects into strings.""" def test_unicode_string_can_be_encoded(self): html = u"<b>\N{SNOWMAN}</b>" soup = self.soup(html) self.assertEqual(soup.b.string.encode("utf-8"), u"\N{SNOWMAN}".encode("utf-8")) def test_tag_containing_unicode_string_can_be_encoded(self): html = u"<b>\N{SNOWMAN}</b>" soup = self.soup(html) self.assertEqual( soup.b.encode("utf-8"), html.encode("utf-8")) def test_encoding_substitutes_unrecognized_characters_by_default(self): html = u"<b>\N{SNOWMAN}</b>" soup = self.soup(html) self.assertEqual(soup.b.encode("ascii"), b"<b>&#9731;</b>") def test_encoding_can_be_made_strict(self): html = u"<b>\N{SNOWMAN}</b>" soup = self.soup(html) self.assertRaises( UnicodeEncodeError, soup.encode, "ascii", errors="strict") def test_decode_contents(self): html = u"<b>\N{SNOWMAN}</b>" soup = self.soup(html) self.assertEqual(u"\N{SNOWMAN}", soup.b.decode_contents()) def test_encode_contents(self): html = u"<b>\N{SNOWMAN}</b>" soup = self.soup(html) self.assertEqual( u"\N{SNOWMAN}".encode("utf8"), soup.b.encode_contents( encoding="utf8")) def test_deprecated_renderContents(self): html = u"<b>\N{SNOWMAN}</b>" soup = self.soup(html) self.assertEqual( u"\N{SNOWMAN}".encode("utf8"), soup.b.renderContents()) def test_repr(self): html = u"<b>\N{SNOWMAN}</b>" soup = self.soup(html) if PY3K: self.assertEqual(html, repr(soup)) else: self.assertEqual(b'<b>\\u2603</b>', repr(soup)) class TestNavigableStringSubclasses(SoupTest): def test_cdata(self): # None of the current builders turn CDATA sections into CData # objects, but you can create them manually. soup = self.soup("") cdata = CData("foo") soup.insert(1, cdata) self.assertEqual(str(soup), "<![CDATA[foo]]>") self.assertEqual(soup.find(text="foo"), "foo") self.assertEqual(soup.contents[0], "foo") def test_cdata_is_never_formatted(self): """Text inside a CData object is passed into the formatter. But the return value is ignored. """ self.count = 0 def increment(*args): self.count += 1 return "BITTER FAILURE" soup = self.soup("") cdata = CData("<><><>") soup.insert(1, cdata) self.assertEqual( b"<![CDATA[<><><>]]>", soup.encode(formatter=increment)) self.assertEqual(1, self.count) def test_doctype_ends_in_newline(self): # Unlike other NavigableString subclasses, a DOCTYPE always ends # in a newline. doctype = Doctype("foo") soup = self.soup("") soup.insert(1, doctype) self.assertEqual(soup.encode(), b"<!DOCTYPE foo>\n") def test_declaration(self): d = Declaration("foo") self.assertEqual("<?foo?>", d.output_ready()) class TestSoupSelector(TreeTest): HTML = """ <!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01//EN" "http://www.w3.org/TR/html4/strict.dtd"> <html> <head> <title>The title</title> <link rel="stylesheet" href="blah.css" type="text/css" id="l1"> </head> <body> <custom-dashed-tag class="dashed" id="dash1">Hello there.</custom-dashed-tag> <div id="main" class="fancy"> <div id="inner"> <h1 id="header1">An H1</h1> <p>Some text</p> <p class="onep" id="p1">Some more text</p> <h2 id="header2">An H2</h2> <p class="class1 class2 class3" id="pmulti">Another</p> <a href="http://bob.example.org/" rel="friend met" id="bob">Bob</a> <h2 id="header3">Another H2</h2> <a id="me" href="http://simonwillison.net/" rel="me">me</a> <span class="s1"> <a href="#" id="s1a1">span1a1</a> <a href="#" id="s1a2">span1a2 <span id="s1a2s1">test</span></a> <span class="span2"> <a href="#" id="s2a1">span2a1</a> </span> <span class="span3"></span> <custom-dashed-tag class="dashed" id="dash2"/> <div data-tag="dashedvalue" id="data1"/> </span> </div> <x id="xid"> <z id="zida"/> <z id="zidab"/> <z id="zidac"/> </x> <y id="yid"> <z id="zidb"/> </y> <p lang="en" id="lang-en">English</p> <p lang="en-gb" id="lang-en-gb">English UK</p> <p lang="en-us" id="lang-en-us">English US</p> <p lang="fr" id="lang-fr">French</p> </div> <div id="footer"> </div> """ def setUp(self): self.soup = BeautifulSoup(self.HTML, 'html.parser') def assertSelects(self, selector, expected_ids, **kwargs): el_ids = [el['id'] for el in self.soup.select(selector, **kwargs)] el_ids.sort() expected_ids.sort() self.assertEqual(expected_ids, el_ids, "Selector %s, expected [%s], got [%s]" % ( selector, ', '.join(expected_ids), ', '.join(el_ids) ) ) assertSelect = assertSelects def assertSelectMultiple(self, *tests): for selector, expected_ids in tests: self.assertSelect(selector, expected_ids) def test_one_tag_one(self): els = self.soup.select('title') self.assertEqual(len(els), 1) self.assertEqual(els[0].name, 'title') self.assertEqual(els[0].contents, [u'The title']) def test_one_tag_many(self): els = self.soup.select('div') self.assertEqual(len(els), 4) for div in els: self.assertEqual(div.name, 'div') el = self.soup.select_one('div') self.assertEqual('main', el['id']) def test_select_one_returns_none_if_no_match(self): match = self.soup.select_one('nonexistenttag') self.assertEqual(None, match) def test_tag_in_tag_one(self): els = self.soup.select('div div') self.assertSelects('div div', ['inner', 'data1']) def test_tag_in_tag_many(self): for selector in ('html div', 'html body div', 'body div'): self.assertSelects(selector, ['data1', 'main', 'inner', 'footer']) def test_limit(self): self.assertSelects('html div', ['main'], limit=1) self.assertSelects('html body div', ['inner', 'main'], limit=2) self.assertSelects('body div', ['data1', 'main', 'inner', 'footer'], limit=10) def test_tag_no_match(self): self.assertEqual(len(self.soup.select('del')), 0) def test_invalid_tag(self): self.assertRaises(ValueError, self.soup.select, 'tag%t') def test_select_dashed_tag_ids(self): self.assertSelects('custom-dashed-tag', ['dash1', 'dash2']) def test_select_dashed_by_id(self): dashed = self.soup.select('custom-dashed-tag[id=\"dash2\"]') self.assertEqual(dashed[0].name, 'custom-dashed-tag') self.assertEqual(dashed[0]['id'], 'dash2') def test_dashed_tag_text(self): self.assertEqual(self.soup.select('body > custom-dashed-tag')[0].text, u'Hello there.') def test_select_dashed_matches_find_all(self): self.assertEqual(self.soup.select('custom-dashed-tag'), self.soup.find_all('custom-dashed-tag')) def test_header_tags(self): self.assertSelectMultiple( ('h1', ['header1']), ('h2', ['header2', 'header3']), ) def test_class_one(self): for selector in ('.onep', 'p.onep', 'html p.onep'): els = self.soup.select(selector) self.assertEqual(len(els), 1) self.assertEqual(els[0].name, 'p') self.assertEqual(els[0]['class'], ['onep']) def test_class_mismatched_tag(self): els = self.soup.select('div.onep') self.assertEqual(len(els), 0) def test_one_id(self): for selector in ('div#inner', '#inner', 'div div#inner'): self.assertSelects(selector, ['inner']) def test_bad_id(self): els = self.soup.select('#doesnotexist') self.assertEqual(len(els), 0) def test_items_in_id(self): els = self.soup.select('div#inner p') self.assertEqual(len(els), 3) for el in els: self.assertEqual(el.name, 'p') self.assertEqual(els[1]['class'], ['onep']) self.assertFalse(els[0].has_attr('class')) def test_a_bunch_of_emptys(self): for selector in ('div#main del', 'div#main div.oops', 'div div#main'): self.assertEqual(len(self.soup.select(selector)), 0) def test_multi_class_support(self): for selector in ('.class1', 'p.class1', '.class2', 'p.class2', '.class3', 'p.class3', 'html p.class2', 'div#inner .class2'): self.assertSelects(selector, ['pmulti']) def test_multi_class_selection(self): for selector in ('.class1.class3', '.class3.class2', '.class1.class2.class3'): self.assertSelects(selector, ['pmulti']) def test_child_selector(self): self.assertSelects('.s1 > a', ['s1a1', 's1a2']) self.assertSelects('.s1 > a span', ['s1a2s1']) def test_child_selector_id(self): self.assertSelects('.s1 > a#s1a2 span', ['s1a2s1']) def test_attribute_equals(self): self.assertSelectMultiple( ('p[class="onep"]', ['p1']), ('p[id="p1"]', ['p1']), ('[class="onep"]', ['p1']), ('[id="p1"]', ['p1']), ('link[rel="stylesheet"]', ['l1']), ('link[type="text/css"]', ['l1']), ('link[href="blah.css"]', ['l1']), ('link[href="no-blah.css"]', []), ('[rel="stylesheet"]', ['l1']), ('[type="text/css"]', ['l1']), ('[href="blah.css"]', ['l1']), ('[href="no-blah.css"]', []), ('p[href="no-blah.css"]', []), ('[href="no-blah.css"]', []), ) def test_attribute_tilde(self): self.assertSelectMultiple( ('p[class~="class1"]', ['pmulti']), ('p[class~="class2"]', ['pmulti']), ('p[class~="class3"]', ['pmulti']), ('[class~="class1"]', ['pmulti']), ('[class~="class2"]', ['pmulti']), ('[class~="class3"]', ['pmulti']), ('a[rel~="friend"]', ['bob']), ('a[rel~="met"]', ['bob']), ('[rel~="friend"]', ['bob']), ('[rel~="met"]', ['bob']), ) def test_attribute_startswith(self): self.assertSelectMultiple( ('[rel^="style"]', ['l1']), ('link[rel^="style"]', ['l1']), ('notlink[rel^="notstyle"]', []), ('[rel^="notstyle"]', []), ('link[rel^="notstyle"]', []), ('link[href^="bla"]', ['l1']), ('a[href^="http://"]', ['bob', 'me']), ('[href^="http://"]', ['bob', 'me']), ('[id^="p"]', ['pmulti', 'p1']), ('[id^="m"]', ['me', 'main']), ('div[id^="m"]', ['main']), ('a[id^="m"]', ['me']), ('div[data-tag^="dashed"]', ['data1']) ) def test_attribute_endswith(self): self.assertSelectMultiple( ('[href$=".css"]', ['l1']), ('link[href$=".css"]', ['l1']), ('link[id$="1"]', ['l1']), ('[id$="1"]', ['data1', 'l1', 'p1', 'header1', 's1a1', 's2a1', 's1a2s1', 'dash1']), ('div[id$="1"]', ['data1']), ('[id$="noending"]', []), ) def test_attribute_contains(self): self.assertSelectMultiple( # From test_attribute_startswith ('[rel*="style"]', ['l1']), ('link[rel*="style"]', ['l1']), ('notlink[rel*="notstyle"]', []), ('[rel*="notstyle"]', []), ('link[rel*="notstyle"]', []), ('link[href*="bla"]', ['l1']), ('[href*="http://"]', ['bob', 'me']), ('[id*="p"]', ['pmulti', 'p1']), ('div[id*="m"]', ['main']), ('a[id*="m"]', ['me']), # From test_attribute_endswith ('[href*=".css"]', ['l1']), ('link[href*=".css"]', ['l1']), ('link[id*="1"]', ['l1']), ('[id*="1"]', ['data1', 'l1', 'p1', 'header1', 's1a1', 's1a2', 's2a1', 's1a2s1', 'dash1']), ('div[id*="1"]', ['data1']), ('[id*="noending"]', []), # New for this test ('[href*="."]', ['bob', 'me', 'l1']), ('a[href*="."]', ['bob', 'me']), ('link[href*="."]', ['l1']), ('div[id*="n"]', ['main', 'inner']), ('div[id*="nn"]', ['inner']), ('div[data-tag*="edval"]', ['data1']) ) def test_attribute_exact_or_hypen(self): self.assertSelectMultiple( ('p[lang|="en"]', ['lang-en', 'lang-en-gb', 'lang-en-us']), ('[lang|="en"]', ['lang-en', 'lang-en-gb', 'lang-en-us']), ('p[lang|="fr"]', ['lang-fr']), ('p[lang|="gb"]', []), ) def test_attribute_exists(self): self.assertSelectMultiple( ('[rel]', ['l1', 'bob', 'me']), ('link[rel]', ['l1']), ('a[rel]', ['bob', 'me']), ('[lang]', ['lang-en', 'lang-en-gb', 'lang-en-us', 'lang-fr']), ('p[class]', ['p1', 'pmulti']), ('[blah]', []), ('p[blah]', []), ('div[data-tag]', ['data1']) ) def test_quoted_space_in_selector_name(self): html = """<div style="display: wrong">nope</div> <div style="display: right">yes</div> """ soup = BeautifulSoup(html, 'html.parser') [chosen] = soup.select('div[style="display: right"]') self.assertEqual("yes", chosen.string) def test_unsupported_pseudoclass(self): self.assertRaises( NotImplementedError, self.soup.select, "a:no-such-pseudoclass") self.assertRaises( NotImplementedError, self.soup.select, "a:nth-of-type(a)") def test_nth_of_type(self): # Try to select first paragraph els = self.soup.select('div#inner p:nth-of-type(1)') self.assertEqual(len(els), 1) self.assertEqual(els[0].string, u'Some text') # Try to select third paragraph els = self.soup.select('div#inner p:nth-of-type(3)') self.assertEqual(len(els), 1) self.assertEqual(els[0].string, u'Another') # Try to select (non-existent!) fourth paragraph els = self.soup.select('div#inner p:nth-of-type(4)') self.assertEqual(len(els), 0) # Pass in an invalid value. self.assertRaises( ValueError, self.soup.select, 'div p:nth-of-type(0)') def test_nth_of_type_direct_descendant(self): els = self.soup.select('div#inner > p:nth-of-type(1)') self.assertEqual(len(els), 1) self.assertEqual(els[0].string, u'Some text') def test_id_child_selector_nth_of_type(self): self.assertSelects('#inner > p:nth-of-type(2)', ['p1']) def test_select_on_element(self): # Other tests operate on the tree; this operates on an element # within the tree. inner = self.soup.find("div", id="main") selected = inner.select("div") # The <div id="inner"> tag was selected. The <div id="footer"> # tag was not. self.assertSelectsIDs(selected, ['inner', 'data1']) def test_overspecified_child_id(self): self.assertSelects(".fancy #inner", ['inner']) self.assertSelects(".normal #inner", []) def test_adjacent_sibling_selector(self): self.assertSelects('#p1 + h2', ['header2']) self.assertSelects('#p1 + h2 + p', ['pmulti']) self.assertSelects('#p1 + #header2 + .class1', ['pmulti']) self.assertEqual([], self.soup.select('#p1 + p')) def test_general_sibling_selector(self): self.assertSelects('#p1 ~ h2', ['header2', 'header3']) self.assertSelects('#p1 ~ #header2', ['header2']) self.assertSelects('#p1 ~ h2 + a', ['me']) self.assertSelects('#p1 ~ h2 + [rel="me"]', ['me']) self.assertEqual([], self.soup.select('#inner ~ h2')) def test_dangling_combinator(self): self.assertRaises(ValueError, self.soup.select, 'h1 >') def test_sibling_combinator_wont_select_same_tag_twice(self): self.assertSelects('p[lang] ~ p', ['lang-en-gb', 'lang-en-us', 'lang-fr']) # Test the selector grouping operator (the comma) def test_multiple_select(self): self.assertSelects('x, y', ['xid', 'yid']) def test_multiple_select_with_no_space(self): self.assertSelects('x,y', ['xid', 'yid']) def test_multiple_select_with_more_space(self): self.assertSelects('x, y', ['xid', 'yid']) def test_multiple_select_duplicated(self): self.assertSelects('x, x', ['xid']) def test_multiple_select_sibling(self): self.assertSelects('x, y ~ p[lang=fr]', ['xid', 'lang-fr']) def test_multiple_select_tag_and_direct_descendant(self): self.assertSelects('x, y > z', ['xid', 'zidb']) def test_multiple_select_direct_descendant_and_tags(self): self.assertSelects('div > x, y, z', ['xid', 'yid', 'zida', 'zidb', 'zidab', 'zidac']) def test_multiple_select_indirect_descendant(self): self.assertSelects('div x,y, z', ['xid', 'yid', 'zida', 'zidb', 'zidab', 'zidac']) def test_invalid_multiple_select(self): self.assertRaises(ValueError, self.soup.select, ',x, y') self.assertRaises(ValueError, self.soup.select, 'x,,y') def test_multiple_select_attrs(self): self.assertSelects('p[lang=en], p[lang=en-gb]', ['lang-en', 'lang-en-gb']) def test_multiple_select_ids(self): self.assertSelects('x, y > z[id=zida], z[id=zidab], z[id=zidb]', ['xid', 'zidb', 'zidab']) def test_multiple_select_nested(self): self.assertSelects('body > div > x, y > z', ['xid', 'zidb'])
gpl-3.0
40223227/w16b_test
wsgi.py
2
4770
#@+leo-ver=5-thin #@+node:2014fall.20141212095015.1775: * @file wsgi.py # coding=utf-8 # 上面的程式內容編碼必須在程式的第一或者第二行才會有作用 ################# (1) 模組導入區 # 導入 cherrypy 模組, 為了在 OpenShift 平台上使用 cherrypy 模組, 必須透過 setup.py 安裝 #@@language python #@@tabwidth -4 #@+<<declarations>> #@+node:2014fall.20141212095015.1776: ** <<declarations>> (wsgi) import cherrypy # 導入 Python 內建的 os 模組, 因為 os 模組為 Python 內建, 所以無需透過 setup.py 安裝 import os # 導入 random 模組 import random # 導入 gear 模組 import gear ################# (2) 廣域變數設定區 # 確定程式檔案所在目錄, 在 Windows 下有最後的反斜線 _curdir = os.path.join(os.getcwd(), os.path.dirname(__file__)) # 設定在雲端與近端的資料儲存目錄 if 'OPENSHIFT_REPO_DIR' in os.environ.keys(): # 表示程式在雲端執行 download_root_dir = os.environ['OPENSHIFT_DATA_DIR'] data_dir = os.environ['OPENSHIFT_DATA_DIR'] else: # 表示程式在近端執行 download_root_dir = _curdir + "/local_data/" data_dir = _curdir + "/local_data/" '''以下為近端 input() 與 for 迴圈應用的程式碼, 若要將程式送到 OpenShift 執行, 除了採用 CherryPy 網際框架外, 還要轉為 html 列印 # 利用 input() 取得的資料型別為字串 toprint = input("要印甚麼內容?") # 若要將 input() 取得的字串轉為整數使用, 必須利用 int() 轉換 repeat_no = int(input("重複列印幾次?")) for i in range(repeat_no): print(toprint) ''' #@-<<declarations>> #@+others #@+node:2014fall.20141212095015.1777: ** class Hello ################# (3) 程式類別定義區 # 以下改用 CherryPy 網際框架程式架構 # 以下為 Hello 類別的設計內容, 其中的 object 使用, 表示 Hello 類別繼承 object 的所有特性, 包括方法與屬性設計 class Hello(object): # Hello 類別的啟動設定 _cp_config = { 'tools.encode.encoding': 'utf-8', 'tools.sessions.on' : True, 'tools.sessions.storage_type' : 'file', #'tools.sessions.locking' : 'explicit', # session 以檔案儲存, 而且位於 data_dir 下的 tmp 目錄 'tools.sessions.storage_path' : data_dir+'/tmp', # session 有效時間設為 60 分鐘 'tools.sessions.timeout' : 60 } #@+others #@+node:2014fall.20141215194146.1791: *3* index # 以 @ 開頭的 cherrypy.expose 為 decorator, 用來表示隨後的成員方法, 可以直接讓使用者以 URL 連結執行 @cherrypy.expose # index 方法為 CherryPy 各類別成員方法中的內建(default)方法, 當使用者執行時未指定方法, 系統將會優先執行 index 方法 # 有 self 的方法為類別中的成員方法, Python 程式透過此一 self 在各成員方法間傳遞物件內容 def index(self): outstring = ''' <!DOCTYPE html> <html> <head> <meta http-equiv="content-type" content="text/html;charset=utf-8"> </head> <body> <a href="spur">a_學號</a><br /> </body> </html> ''' return outstring #@+node:2015.20150330144929.1713: *3* spur @cherrypy.expose # N 為齒數, M 為模數, P 為壓力角 def spur(self, N=40223227, M=5, P=15): outstring = ''' <!DOCTYPE html> <html> <head> <meta http-equiv="content-type" content="text/html;charset=utf-8"> <!-- 載入 brython.js --> <script type="text/javascript" src="/static/Brython3.1.3-20150514-095342/brython.js"></script> </head> <!-- 啟動 brython() --> <body onload="brython()"> <form method=POST action=spuraction> 個人學號:<input type=text name=N value='''+str(N)+'''><br /> </form> <br /><a href="index">index</a><br /> </body> </html> ''' return outstring #@-others #@-others ################# (4) 程式啟動區 # 配合程式檔案所在目錄設定靜態目錄或靜態檔案 application_conf = {'/static':{ 'tools.staticdir.on': True, # 程式執行目錄下, 必須自行建立 static 目錄 'tools.staticdir.dir': _curdir+"/static"}, '/downloads':{ 'tools.staticdir.on': True, 'tools.staticdir.dir': data_dir+"/downloads"}, '/images':{ 'tools.staticdir.on': True, 'tools.staticdir.dir': data_dir+"/images"} } root = Hello() root.gear = gear.Gear() if 'OPENSHIFT_REPO_DIR' in os.environ.keys(): # 表示在 OpenSfhit 執行 application = cherrypy.Application(root, config=application_conf) else: # 表示在近端執行 cherrypy.quickstart(root, config=application_conf) #@-leo
gpl-3.0
av8ramit/tensorflow
tensorflow/python/keras/_impl/keras/optimizers_test.py
8
5783
# Copyright 2016 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for Keras optimizers.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import numpy as np from tensorflow.python.keras._impl import keras from tensorflow.python.keras._impl.keras import testing_utils from tensorflow.python.platform import test from tensorflow.python.training.adam import AdamOptimizer def _get_model(input_dim, num_hidden, output_dim): model = keras.models.Sequential() model.add(keras.layers.Dense(num_hidden, activation='relu', input_shape=(input_dim,))) model.add(keras.layers.Dense(output_dim, activation='softmax')) return model def _test_optimizer(optimizer, target=0.75): np.random.seed(1337) (x_train, y_train), _ = testing_utils.get_test_data(train_samples=1000, test_samples=200, input_shape=(10,), num_classes=2) y_train = keras.utils.to_categorical(y_train) model = _get_model(x_train.shape[1], 20, y_train.shape[1]) model.compile(loss='categorical_crossentropy', optimizer=optimizer, metrics=['accuracy']) history = model.fit(x_train, y_train, epochs=2, batch_size=16, verbose=0) assert history.history['acc'][-1] >= target config = keras.optimizers.serialize(optimizer) optim = keras.optimizers.deserialize(config) new_config = keras.optimizers.serialize(optim) new_config['class_name'] = new_config['class_name'].lower() assert config == new_config # Test constraints. model = keras.models.Sequential() dense = keras.layers.Dense(10, input_shape=(x_train.shape[1],), kernel_constraint=lambda x: 0. * x + 1., bias_constraint=lambda x: 0. * x + 2., activation='relu') model.add(dense) model.add(keras.layers.Dense(y_train.shape[1], activation='softmax')) model.compile(loss='categorical_crossentropy', optimizer=optimizer, metrics=['accuracy']) model.train_on_batch(x_train[:10], y_train[:10]) kernel, bias = dense.get_weights() np.testing.assert_allclose(kernel, 1., atol=1e-3) np.testing.assert_allclose(bias, 2., atol=1e-3) class KerasOptimizersTest(test.TestCase): def test_sgd(self): with self.test_session(): _test_optimizer(keras.optimizers.SGD(lr=0.01, momentum=0.9, nesterov=True)) def test_rmsprop(self): with self.test_session(): _test_optimizer(keras.optimizers.RMSprop()) _test_optimizer(keras.optimizers.RMSprop(decay=1e-3)) def test_adagrad(self): with self.test_session(): _test_optimizer(keras.optimizers.Adagrad()) _test_optimizer(keras.optimizers.Adagrad(decay=1e-3)) def test_adadelta(self): with self.test_session(): _test_optimizer(keras.optimizers.Adadelta(), target=0.6) # Accuracy seems dependent on the initialization. Even adding tf.Print # nodes in the graph seemed to affect the initialization seed, and hence # the accuracy. _test_optimizer(keras.optimizers.Adadelta(decay=1e-3), target=0.4) def test_adam(self): with self.test_session(): _test_optimizer(keras.optimizers.Adam()) _test_optimizer(keras.optimizers.Adam(decay=1e-3)) _test_optimizer(keras.optimizers.Adam(amsgrad=True)) def test_adamax(self): with self.test_session(): _test_optimizer(keras.optimizers.Adamax()) _test_optimizer(keras.optimizers.Adamax(decay=1e-3)) def test_nadam(self): with self.test_session(): _test_optimizer(keras.optimizers.Nadam()) def test_clipnorm(self): with self.test_session(): _test_optimizer(keras.optimizers.SGD(lr=0.01, momentum=0.9, clipnorm=0.5)) def test_clipvalue(self): with self.test_session(): _test_optimizer(keras.optimizers.SGD(lr=0.01, momentum=0.9, clipvalue=0.5)) def test_tfoptimizer(self): optimizer = keras.optimizers.TFOptimizer(AdamOptimizer(0.01)) model = keras.models.Sequential() model.add(keras.layers.Dense( 2, input_shape=(3,), kernel_constraint=keras.constraints.MaxNorm(1))) # This is possible model.compile(loss='mean_squared_error', optimizer=optimizer) model.fit(np.random.random((5, 3)), np.random.random((5, 2)), epochs=1, batch_size=5, verbose=0) # not supported with self.assertRaises(NotImplementedError): _ = optimizer.weights with self.assertRaises(NotImplementedError): optimizer.get_config() with self.assertRaises(NotImplementedError): optimizer.from_config(None) if __name__ == '__main__': test.main()
apache-2.0
log2timeline/dfvfs
dfvfs/lib/gzipfile.py
2
19892
# -*- coding: utf-8 -*- """Gzip compressed stream file.""" # Note: do not rename file to gzip.py this can cause the exception: # AttributeError: 'module' object has no attribute 'GzipFile' # when using pip. import collections import os from dtfabric.runtime import fabric as dtfabric_fabric from dfvfs.compression import zlib_decompressor from dfvfs.lib import data_format from dfvfs.lib import errors class _GzipDecompressorState(object): """Deflate decompressor wrapper for reading a gzip member. This class encapsulates the state of a deflate decompression object, as well as the location of the decompressor's source data. Attributes: uncompressed_offset (int): offset into the uncompressed data in a gzip member last emitted by the state object. """ _MAXIMUM_READ_SIZE = 16 * 1024 * 1024 def __init__(self, stream_start): """Initializes a gzip member decompressor wrapper. Args: stream_start (int): offset to the compressed stream within the containing file object. """ self._compressed_data = b'' self._decompressor = zlib_decompressor.DeflateDecompressor() self._last_read = stream_start self.uncompressed_offset = 0 def Read(self, file_object): """Reads the next uncompressed data from the gzip stream. Args: file_object (FileIO): file object that contains the compressed stream. Returns: bytes: next uncompressed data from the compressed stream. """ file_object.seek(self._last_read, os.SEEK_SET) read_data = file_object.read(self._MAXIMUM_READ_SIZE) self._last_read = file_object.get_offset() compressed_data = b''.join([self._compressed_data, read_data]) decompressed_data, remaining_compressed_data = ( self._decompressor.Decompress(compressed_data)) self._compressed_data = remaining_compressed_data self.uncompressed_offset += len(decompressed_data) return decompressed_data def GetUnusedData(self): """Retrieves any bytes past the end of the compressed data. See https://docs.python.org/2/library/zlib.html#zlib.Decompress.unused_data Unused data can be any bytes after a Deflate compressed block (or chunk). Returns: bytes: data past the end of the compressed data, if any has been read from the gzip file. """ return self._decompressor.unused_data class GzipMember(data_format.DataFormat): """Gzip member. Gzip files have no index of members, so each member must be read sequentially before metadata and random seeks are possible. This class provides caching of gzip member data during the initial read of each member. Attributes: comment (str): comment stored in the member. member_end_offset (int): offset to the end of the member in the parent file object. member_start_offset (int): offset to the start of the member in the parent file object. operating_system (int): type of file system on which the compression took place. original_filename (str): original filename of the uncompressed file. uncompressed_data_offset (int): offset of the start of the uncompressed data in this member relative to the whole gzip file's uncompressed data. uncompressed_data_size (int): total size of the data in this gzip member after decompression. """ _DATA_TYPE_FABRIC_DEFINITION_FILE = os.path.join( os.path.dirname(__file__), 'gzipfile.yaml') with open(_DATA_TYPE_FABRIC_DEFINITION_FILE, 'rb') as file_object: _DATA_TYPE_FABRIC_DEFINITION = file_object.read() _DATA_TYPE_FABRIC = dtfabric_fabric.DataTypeFabric( yaml_definition=_DATA_TYPE_FABRIC_DEFINITION) _MEMBER_HEADER = _DATA_TYPE_FABRIC.CreateDataTypeMap( 'gzip_member_header') _MEMBER_HEADER_SIZE = _MEMBER_HEADER.GetByteSize() _MEMBER_FOOTER = _DATA_TYPE_FABRIC.CreateDataTypeMap( 'gzip_member_footer') _MEMBER_FOOTER_SIZE = _MEMBER_FOOTER.GetByteSize() _UINT16LE = _DATA_TYPE_FABRIC.CreateDataTypeMap('uint16le') _UINT16LE_SIZE = _UINT16LE.GetByteSize() _CSTRING = _DATA_TYPE_FABRIC.CreateDataTypeMap('cstring') _GZIP_SIGNATURE = 0x8b1f _COMPRESSION_METHOD_DEFLATE = 8 _FLAG_FTEXT = 0x01 _FLAG_FHCRC = 0x02 _FLAG_FEXTRA = 0x04 _FLAG_FNAME = 0x08 _FLAG_FCOMMENT = 0x10 # The maximum size of the uncompressed data cache. _UNCOMPRESSED_DATA_CACHE_SIZE = 2 * 1024 * 1024 def __init__( self, file_object, member_start_offset, uncompressed_data_offset): """Initializes a gzip member. Args: file_object (FileIO): file-like object, containing the gzip member. member_start_offset (int): offset to the beginning of the gzip member in the containing file. uncompressed_data_offset (int): offset of the start of the uncompressed data in this member relative to the whole gzip file's uncompressed data. """ self._cache = b'' # End offset of the cached uncompressed data of the member. self._cache_end_offset = None # Start offset of the cached uncompressed data of the member. self._cache_start_offset = None self.comment = None self.modification_time = None self.operating_system = None self.original_filename = None file_size = file_object.get_size() file_object.seek(member_start_offset, os.SEEK_SET) self._ReadMemberHeader(file_object) data_offset = 0 uncompressed_data_size = 0 compressed_data_offset = file_object.get_offset() decompressor_state = _GzipDecompressorState(compressed_data_offset) # Read the member data to determine the uncompressed data size and # the offset of the member footer. file_offset = compressed_data_offset while file_offset < file_size: data_offset += uncompressed_data_size decompressed_data = decompressor_state.Read(file_object) uncompressed_data_size += len(decompressed_data) # Note that unused data will be set when the decompressor reads beyond # the end of the compressed data stream. unused_data = decompressor_state.GetUnusedData() if unused_data: file_object.seek(-len(unused_data), os.SEEK_CUR) file_offset = file_object.get_offset() break file_offset = file_object.get_offset() # Do not read the the last member footer if it is missing, which is # a common corruption scenario. if file_offset < file_size: self._ReadStructure( file_object, file_offset, self._MEMBER_FOOTER_SIZE, self._MEMBER_FOOTER, 'member footer') member_end_offset = file_object.get_offset() # Initialize the member with data. self._file_object = file_object self._file_object.seek(member_start_offset, os.SEEK_SET) # Cache uncompressed data of gzip files that fit entirely in the cache. if (data_offset == 0 and uncompressed_data_size < self._UNCOMPRESSED_DATA_CACHE_SIZE): self._cache = decompressed_data self._cache_start_offset = 0 self._cache_end_offset = uncompressed_data_size # Offset to the beginning of the compressed data in the file object. self._compressed_data_start = compressed_data_offset self._decompressor_state = _GzipDecompressorState(compressed_data_offset) # Offset to the start of the member in the parent file object. self.member_start_offset = member_start_offset # Offset to the end of the member in the parent file object. self.member_end_offset = member_end_offset # Total size of the data in this gzip member after decompression. self.uncompressed_data_size = uncompressed_data_size # Offset of the start of the uncompressed data in this member relative to # the whole gzip file's uncompressed data. self.uncompressed_data_offset = uncompressed_data_offset def _ReadMemberHeader(self, file_object): """Reads a member header. Args: file_object (FileIO): file-like object to read from. Raises: FileFormatError: if the member header cannot be read. """ file_offset = file_object.get_offset() member_header = self._ReadStructure( file_object, file_offset, self._MEMBER_HEADER_SIZE, self._MEMBER_HEADER, 'member header') if member_header.signature != self._GZIP_SIGNATURE: raise errors.FileFormatError( 'Unsupported signature: 0x{0:04x}.'.format(member_header.signature)) if member_header.compression_method != self._COMPRESSION_METHOD_DEFLATE: raise errors.FileFormatError( 'Unsupported compression method: {0:d}.'.format( member_header.compression_method)) self.modification_time = member_header.modification_time self.operating_system = member_header.operating_system if member_header.flags & self._FLAG_FEXTRA: file_offset = file_object.get_offset() extra_field_data_size = self._ReadStructure( file_object, file_offset, self._UINT16LE_SIZE, self._UINT16LE, 'extra field data size') file_object.seek(extra_field_data_size, os.SEEK_CUR) if member_header.flags & self._FLAG_FNAME: file_offset = file_object.get_offset() string_value = self._ReadString( file_object, file_offset, self._CSTRING, 'original filename') self.original_filename = string_value.rstrip('\x00') if member_header.flags & self._FLAG_FCOMMENT: file_offset = file_object.get_offset() string_value = self._ReadString( file_object, file_offset, self._CSTRING, 'comment') self.comment = string_value.rstrip('\x00') if member_header.flags & self._FLAG_FHCRC: file_object.read(2) def _ResetDecompressorState(self): """Resets the state of the internal decompression object.""" self._decompressor_state = _GzipDecompressorState( self._compressed_data_start) def FlushCache(self): """Empties the cache that holds cached decompressed data.""" self._cache = b'' self._cache_start_offset = None self._cache_end_offset = None self._ResetDecompressorState() def GetCacheSize(self): """Determines the size of the uncompressed cached data. Returns: int: number of cached bytes. """ if not self._cache_start_offset or not self._cache_end_offset: return 0 return self._cache_end_offset - self._cache_start_offset def IsCacheFull(self): """Checks whether the uncompressed data cache is full. Returns: bool: True if the cache is full. """ return self.GetCacheSize() >= self._UNCOMPRESSED_DATA_CACHE_SIZE def ReadAtOffset(self, offset, size=None): """Reads a byte string from the gzip member at the specified offset. The function will read a byte string of the specified size or all of the remaining data if no size was specified. Args: offset (int): offset within the uncompressed data in this member to read from. size (Optional[int]): maximum number of bytes to read, where None represents all remaining data, to a maximum of the uncompressed cache size. Returns: bytes: data read. Raises: IOError: if the read failed. ValueError: if a negative read size or offset is specified. """ if size is not None and size < 0: raise ValueError('Invalid size value {0!s}'.format(size)) if offset < 0: raise ValueError('Invalid offset value {0!s}'.format(offset)) if size == 0 or offset >= self.uncompressed_data_size: return b'' if self._cache_start_offset is None: self._LoadDataIntoCache(self._file_object, offset) if offset > self._cache_end_offset or offset < self._cache_start_offset: self.FlushCache() self._LoadDataIntoCache(self._file_object, offset) cache_offset = offset - self._cache_start_offset if not size: return self._cache[cache_offset:] data_end_offset = cache_offset + size if data_end_offset > self._cache_end_offset: return self._cache[cache_offset:] return self._cache[cache_offset:data_end_offset] def _LoadDataIntoCache(self, file_object, minimum_offset): """Reads and decompresses the data in the member. This function already loads as much data as possible in the cache, up to UNCOMPRESSED_DATA_CACHE_SIZE bytes. Args: file_object (FileIO): file-like object. minimum_offset (int): offset into this member's uncompressed data at which the cache should start. """ # Decompression can only be performed from beginning to end of the stream. # So, if data before the current position of the decompressor in the stream # is required, it's necessary to throw away the current decompression # state and start again. if minimum_offset < self._decompressor_state.uncompressed_offset: self._ResetDecompressorState() while not self.IsCacheFull(): decompressed_data = self._decompressor_state.Read(file_object) # Note that decompressed_data will be empty if there is no data left # to read and decompress. if not decompressed_data: break decompressed_data_length = len(decompressed_data) decompressed_end_offset = self._decompressor_state.uncompressed_offset decompressed_start_offset = ( decompressed_end_offset - decompressed_data_length) data_to_add = decompressed_data added_data_start_offset = decompressed_start_offset if decompressed_start_offset < minimum_offset: data_to_add = None if decompressed_start_offset < minimum_offset < decompressed_end_offset: data_add_offset = decompressed_end_offset - minimum_offset data_to_add = decompressed_data[-data_add_offset:] added_data_start_offset = decompressed_end_offset - data_add_offset if not self.IsCacheFull() and data_to_add: self._cache = b''.join([self._cache, data_to_add]) if self._cache_start_offset is None: self._cache_start_offset = added_data_start_offset if self._cache_end_offset is None: self._cache_end_offset = self._cache_start_offset + len(data_to_add) else: self._cache_end_offset += len(data_to_add) # If there's no more data in the member, the unused_data value is # populated in the decompressor. When this situation arises, we rewind # to the end of the compressed_data section. unused_data = self._decompressor_state.GetUnusedData() if unused_data: seek_offset = -len(unused_data) file_object.seek(seek_offset, os.SEEK_CUR) self._ResetDecompressorState() break class GzipCompressedStream(object): """File-like object of a gzip compressed stream (file). The gzip file format is defined in RFC1952: http://www.zlib.org/rfc-gzip.html Attributes: uncompressed_data_size (int): total size of the decompressed data stored in the gzip file. """ def __init__(self): """Initializes a file-like object.""" super(GzipCompressedStream, self).__init__() self._compressed_data_size = -1 self._current_offset = 0 self._file_object = None self._members_by_end_offset = collections.OrderedDict() self.uncompressed_data_size = 0 @property def members(self): """list(GzipMember): members in the gzip file.""" return list(self._members_by_end_offset.values()) def _GetMemberForOffset(self, offset): """Finds the member whose data includes the provided offset. Args: offset (int): offset in the uncompressed data to find the containing member for. Returns: GzipMember: gzip file member or None if not available. Raises: ValueError: if the provided offset is outside of the bounds of the uncompressed data. """ if offset < 0 or offset >= self.uncompressed_data_size: raise ValueError('Offset {0:d} is larger than file size {1:d}.'.format( offset, self.uncompressed_data_size)) for end_offset, member in self._members_by_end_offset.items(): if offset < end_offset: return member return None def Open(self, file_object): """Opens the file-like object defined by path specification. Args: file_object (FileIO): file-like object that contains the gzip compressed stream. Raises: IOError: if the file-like object could not be opened. OSError: if the file-like object could not be opened. """ file_size = file_object.get_size() file_object.seek(0, os.SEEK_SET) uncompressed_data_offset = 0 next_member_offset = 0 while next_member_offset < file_size: member = GzipMember( file_object, next_member_offset, uncompressed_data_offset) uncompressed_data_offset = ( uncompressed_data_offset + member.uncompressed_data_size) self._members_by_end_offset[uncompressed_data_offset] = member self.uncompressed_data_size += member.uncompressed_data_size next_member_offset = member.member_end_offset self._file_object = file_object # Note: that the following functions do not follow the style guide # because they are part of the file-like object interface. # pylint: disable=invalid-name def close(self): """Closes the file-like object.""" self._members_by_end_offset = [] if self._file_object: self._file_object = None def read(self, size=None): """Reads a byte string from the gzip file at the current offset. The function will read a byte string up to the specified size or all of the remaining data if no size was specified. Args: size (Optional[int]): number of bytes to read, where None is all remaining data. Returns: bytes: data read. Raises: IOError: if the read failed. OSError: if the read failed. """ data = b'' while ((size and len(data) < size) and self._current_offset < self.uncompressed_data_size): member = self._GetMemberForOffset(self._current_offset) member_offset = self._current_offset - member.uncompressed_data_offset data_read = member.ReadAtOffset(member_offset, size) if not data_read: break self._current_offset += len(data_read) data = b''.join([data, data_read]) return data def seek(self, offset, whence=os.SEEK_SET): """Seeks to an offset within the file-like object. Args: offset (int): offset to seek to. whence (Optional(int)): value that indicates whether offset is an absolute or relative position within the file. Raises: IOError: if the seek failed or the file has not been opened. OSError: if the seek failed or the file has not been opened. """ if not self._file_object: raise IOError('Not opened.') if whence == os.SEEK_CUR: offset += self._current_offset elif whence == os.SEEK_END: offset += self.uncompressed_data_size elif whence != os.SEEK_SET: raise IOError('Unsupported whence.') if offset < 0: raise IOError('Invalid offset value less than zero.') self._current_offset = offset def get_offset(self): """Retrieves the current offset into the file-like object. Returns: int: current offset into the file-like object. Raises: IOError: if the file-like object has not been opened. OSError: if the file-like object has not been opened. """ if not self._file_object: raise IOError('Not opened.') return self._current_offset def get_size(self): """Retrieves the size of the file-like object. Returns: int: size of the file-like object data. Raises: IOError: if the file-like object has not been opened. OSError: if the file-like object has not been opened. """ if not self._file_object: raise IOError('Not opened.') return self.uncompressed_data_size
apache-2.0
damdam-s/OpenUpgrade
addons/calendar/contacts.py
389
1414
# -*- coding: utf-8 -*- ############################################################################## # # OpenERP, Open Source Business Applications # Copyright (c) 2011 OpenERP S.A. <http://openerp.com> # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ############################################################################## from openerp.osv import fields, osv class calendar_contacts(osv.osv): _name = 'calendar.contacts' _columns = { 'user_id': fields.many2one('res.users','Me'), 'partner_id': fields.many2one('res.partner','Employee',required=True, domain=[]), 'active':fields.boolean('active'), } _defaults = { 'user_id': lambda self, cr, uid, ctx: uid, 'active' : True, }
agpl-3.0
adsabs/orcid-service
orcid_service/tests/base.py
1
1380
from flask_testing import TestCase import testing.postgresql from orcid_service.models import Base class TestCaseDatabase(TestCase): """ Base test class for when databases are being used. """ postgresql_url_dict = { 'port': 1234, 'host': '127.0.0.1', 'user': 'postgres', 'database': 'test' } postgresql_url = 'postgresql://{user}@{host}:{port}/{database}' \ .format( user=postgresql_url_dict['user'], host=postgresql_url_dict['host'], port=postgresql_url_dict['port'], database=postgresql_url_dict['database'] ) def create_app(self): '''Start the wsgi application''' from orcid_service import app a = app.create_app(**{ 'SQLALCHEMY_DATABASE_URI': self.postgresql_url, 'SQLALCHEMY_ECHO': False, 'TESTING': True, 'PROPAGATE_EXCEPTIONS': True, 'TRAP_BAD_REQUEST_ERRORS': True }) return a @classmethod def setUpClass(cls): cls.postgresql = \ testing.postgresql.Postgresql(**cls.postgresql_url_dict) @classmethod def tearDownClass(cls): cls.postgresql.stop() def setUp(self): Base.metadata.create_all(bind=self.app.db.engine) def tearDown(self): self.app.db.session.remove() self.app.db.drop_all()
mit
balloob/home-assistant
homeassistant/components/octoprint/binary_sensor.py
21
2538
"""Support for monitoring OctoPrint binary sensors.""" import logging import requests from homeassistant.components.binary_sensor import BinarySensorEntity from . import BINARY_SENSOR_TYPES, DOMAIN as COMPONENT_DOMAIN _LOGGER = logging.getLogger(__name__) def setup_platform(hass, config, add_entities, discovery_info=None): """Set up the available OctoPrint binary sensors.""" if discovery_info is None: return name = discovery_info["name"] base_url = discovery_info["base_url"] monitored_conditions = discovery_info["sensors"] octoprint_api = hass.data[COMPONENT_DOMAIN][base_url] devices = [] for octo_type in monitored_conditions: new_sensor = OctoPrintBinarySensor( octoprint_api, octo_type, BINARY_SENSOR_TYPES[octo_type][2], name, BINARY_SENSOR_TYPES[octo_type][3], BINARY_SENSOR_TYPES[octo_type][0], BINARY_SENSOR_TYPES[octo_type][1], "flags", ) devices.append(new_sensor) add_entities(devices, True) class OctoPrintBinarySensor(BinarySensorEntity): """Representation an OctoPrint binary sensor.""" def __init__( self, api, condition, sensor_type, sensor_name, unit, endpoint, group, tool=None ): """Initialize a new OctoPrint sensor.""" self.sensor_name = sensor_name if tool is None: self._name = f"{sensor_name} {condition}" else: self._name = f"{sensor_name} {condition}" self.sensor_type = sensor_type self.api = api self._state = False self._unit_of_measurement = unit self.api_endpoint = endpoint self.api_group = group self.api_tool = tool _LOGGER.debug("Created OctoPrint binary sensor %r", self) @property def name(self): """Return the name of the sensor.""" return self._name @property def is_on(self): """Return true if binary sensor is on.""" return bool(self._state) @property def device_class(self): """Return the class of this sensor, from DEVICE_CLASSES.""" return None def update(self): """Update state of sensor.""" try: self._state = self.api.update( self.sensor_type, self.api_endpoint, self.api_group, self.api_tool ) except requests.exceptions.ConnectionError: # Error calling the api, already logged in api.update() return
apache-2.0
vipul-sharma20/oh-mainline
vendor/packages/zope.interface/src/zope/interface/tests/test_verify.py
22
4553
############################################################################## # # Copyright (c) 2001, 2002 Zope Foundation and Contributors. # All Rights Reserved. # # This software is subject to the provisions of the Zope Public License, # Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution. # THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED # WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED # WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS # FOR A PARTICULAR PURPOSE. # ############################################################################## """Interface Verify tests """ import doctest import unittest from zope.interface import Interface, implements, classImplements, Attribute from zope.interface.verify import verifyClass, verifyObject from zope.interface.exceptions import DoesNotImplement, BrokenImplementation from zope.interface.exceptions import BrokenMethodImplementation class Test(unittest.TestCase): def testNotImplemented(self): class C(object): pass class I(Interface): pass self.assertRaises(DoesNotImplement, verifyClass, I, C) classImplements(C, I) verifyClass(I, C) def testMissingAttr(self): class I(Interface): def f(): pass class C(object): implements(I) self.assertRaises(BrokenImplementation, verifyClass, I, C) C.f=lambda self: None verifyClass(I, C) def testMissingAttr_with_Extended_Interface(self): class II(Interface): def f(): pass class I(II): pass class C(object): implements(I) self.assertRaises(BrokenImplementation, verifyClass, I, C) C.f=lambda self: None verifyClass(I, C) def testWrongArgs(self): class I(Interface): def f(a): pass class C(object): def f(self, b): pass implements(I) # We no longer require names to match. #self.assertRaises(BrokenMethodImplementation, verifyClass, I, C) C.f=lambda self, a: None verifyClass(I, C) C.f=lambda self, **kw: None self.assertRaises(BrokenMethodImplementation, verifyClass, I, C) C.f=lambda self, a, *args: None verifyClass(I, C) C.f=lambda self, a, *args, **kw: None verifyClass(I, C) C.f=lambda self, *args: None verifyClass(I, C) def testExtraArgs(self): class I(Interface): def f(a): pass class C(object): def f(self, a, b): pass implements(I) self.assertRaises(BrokenMethodImplementation, verifyClass, I, C) C.f=lambda self, a: None verifyClass(I, C) C.f=lambda self, a, b=None: None verifyClass(I, C) def testNoVar(self): class I(Interface): def f(a, *args): pass class C(object): def f(self, a): pass implements(I) self.assertRaises(BrokenMethodImplementation, verifyClass, I, C) C.f=lambda self, a, *foo: None verifyClass(I, C) def testNoKW(self): class I(Interface): def f(a, **args): pass class C(object): def f(self, a): pass implements(I) self.assertRaises(BrokenMethodImplementation, verifyClass, I, C) C.f=lambda self, a, **foo: None verifyClass(I, C) def testModule(self): from zope.interface.tests.ifoo import IFoo from zope.interface.tests import dummy verifyObject(IFoo, dummy) def testMethodForAttr(self): class IFoo(Interface): foo = Attribute("The foo Attribute") class Foo: implements(IFoo) def foo(self): pass verifyClass(IFoo, Foo) def testNonMethodForMethod(self): class IBar(Interface): def foo(): pass class Bar: implements(IBar) foo = 1 self.assertRaises(BrokenMethodImplementation, verifyClass, IBar, Bar) def test_suite(): loader=unittest.TestLoader() return unittest.TestSuite(( doctest.DocFileSuite( '../verify.txt', optionflags=doctest.NORMALIZE_WHITESPACE), loader.loadTestsFromTestCase(Test), )) if __name__=='__main__': unittest.TextTestRunner().run(test_suite())
agpl-3.0
ballotify/django-backend
ballotify/apps/questions/migrations/0001_initial.py
1
2184
# -*- coding: utf-8 -*- from __future__ import unicode_literals from django.db import models, migrations import model_utils.fields import django.utils.timezone from django.conf import settings import uuid class Migration(migrations.Migration): dependencies = [ ('streams', '0001_initial'), migrations.swappable_dependency(settings.AUTH_USER_MODEL), ] operations = [ migrations.CreateModel( name='Choice', fields=[ ('created', model_utils.fields.AutoCreatedField(default=django.utils.timezone.now, verbose_name='created', editable=False)), ('modified', model_utils.fields.AutoLastModifiedField(default=django.utils.timezone.now, verbose_name='modified', editable=False)), ('id', models.UUIDField(default=uuid.uuid4, serialize=False, editable=False, primary_key=True)), ('title', models.CharField(max_length=255)), ], options={ 'ordering': ('created',), }, ), migrations.CreateModel( name='Question', fields=[ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), ('created', model_utils.fields.AutoCreatedField(default=django.utils.timezone.now, verbose_name='created', editable=False)), ('modified', model_utils.fields.AutoLastModifiedField(default=django.utils.timezone.now, verbose_name='modified', editable=False)), ('title', models.CharField(max_length=255)), ('slug', models.SlugField(unique=True, max_length=255)), ('stream', models.ForeignKey(related_name='questions', blank=True, to='streams.Stream', null=True)), ('user', models.ForeignKey(related_name='questions', to=settings.AUTH_USER_MODEL)), ], options={ 'ordering': ('-created',), }, ), migrations.AddField( model_name='choice', name='question', field=models.ForeignKey(related_name='choices', to='questions.Question'), ), ]
agpl-3.0
dhananjay92/servo
tests/wpt/web-platform-tests/referrer-policy/generic/tools/spec_validator.py
326
6673
#!/usr/bin/env python import json, sys from common_paths import * def assert_non_empty_string(obj, field): assert field in obj, 'Missing field "%s"' % field assert isinstance(obj[field], basestring), \ 'Field "%s" must be a string' % field assert len(obj[field]) > 0, 'Field "%s" must not be empty' % field def assert_non_empty_list(obj, field): assert isinstance(obj[field], list), \ '%s must be a list' % field assert len(obj[field]) > 0, \ '%s list must not be empty' % field def assert_non_empty_dict(obj, field): assert isinstance(obj[field], dict), \ '%s must be a dict' % field assert len(obj[field]) > 0, \ '%s dict must not be empty' % field def assert_contains(obj, field): assert field in obj, 'Must contain field "%s"' % field def assert_value_from(obj, field, items): assert obj[field] in items, \ 'Field "%s" must be from: %s' % (field, str(items)) def assert_atom_or_list_items_from(obj, field, items): if isinstance(obj[field], basestring) or isinstance(obj[field], int): assert_value_from(obj, field, items) return assert_non_empty_list(obj, field) for allowed_value in obj[field]: assert allowed_value != '*', "Wildcard is not supported for lists!" assert allowed_value in items, \ 'Field "%s" must be from: %s' % (field, str(items)) def assert_contains_only_fields(obj, expected_fields): for expected_field in expected_fields: assert_contains(obj, expected_field) for actual_field in obj: assert actual_field in expected_fields, \ 'Unexpected field "%s".' % actual_field def assert_value_unique_in(value, used_values): assert value not in used_values, 'Duplicate value "%s"!' % str(value) used_values[value] = True def validate(spec_json, details): """ Validates the json specification for generating tests. """ details['object'] = spec_json assert_contains_only_fields(spec_json, ["specification", "referrer_policy_schema", "test_expansion_schema", "subresource_path", "excluded_tests"]) assert_non_empty_list(spec_json, "specification") assert_non_empty_list(spec_json, "referrer_policy_schema") assert_non_empty_dict(spec_json, "test_expansion_schema") assert_non_empty_list(spec_json, "excluded_tests") specification = spec_json['specification'] referrer_policy_schema = spec_json['referrer_policy_schema'] test_expansion_schema = spec_json['test_expansion_schema'] excluded_tests = spec_json['excluded_tests'] subresource_path = spec_json['subresource_path'] valid_test_expansion_fields = ['name'] + test_expansion_schema.keys() # Validate each single spec. for spec in specification: details['object'] = spec # Validate required fields for a single spec. assert_contains_only_fields(spec, ['name', 'title', 'description', 'referrer_policy', 'specification_url', 'test_expansion']) assert_non_empty_string(spec, 'name') assert_non_empty_string(spec, 'title') assert_non_empty_string(spec, 'description') assert_non_empty_string(spec, 'specification_url') assert_value_from(spec, 'referrer_policy', referrer_policy_schema) assert_non_empty_list(spec, 'test_expansion') # Validate spec's test expansion. used_spec_names = {} for spec_exp in spec['test_expansion']: details['object'] = spec_exp assert_non_empty_string(spec_exp, 'name') # The name is unique in same expansion group. assert_value_unique_in((spec_exp['expansion'], spec_exp['name']), used_spec_names) assert_contains_only_fields(spec_exp, valid_test_expansion_fields) for artifact in test_expansion_schema: details['test_expansion_field'] = artifact assert_atom_or_list_items_from( spec_exp, artifact, ['*'] + test_expansion_schema[artifact]) del details['test_expansion_field'] # Validate the test_expansion schema members. details['object'] = test_expansion_schema assert_contains_only_fields(test_expansion_schema, ['expansion', 'delivery_method', 'redirection', 'origin', 'source_protocol', 'target_protocol', 'subresource', 'referrer_url']) # Validate excluded tests. details['object'] = excluded_tests for excluded_test_expansion in excluded_tests: assert_contains_only_fields(excluded_test_expansion, valid_test_expansion_fields) details['object'] = excluded_test_expansion for artifact in test_expansion_schema: details['test_expansion_field'] = artifact assert_atom_or_list_items_from( excluded_test_expansion, artifact, ['*'] + test_expansion_schema[artifact]) del details['test_expansion_field'] # Validate subresource paths. details['object'] = subresource_path assert_contains_only_fields(subresource_path, test_expansion_schema['subresource']); for subresource in subresource_path: local_rel_path = "." + subresource_path[subresource] full_path = os.path.join(test_root_directory, local_rel_path) assert os.path.isfile(full_path), "%s is not an existing file" % path del details['object'] def assert_valid_spec_json(spec_json): error_details = {} try: validate(spec_json, error_details) except AssertionError, err: print 'ERROR:', err.message print json.dumps(error_details, indent=4) sys.exit(1) def main(): spec_json = load_spec_json(); assert_valid_spec_json(spec_json) print "Spec JSON is valid." if __name__ == '__main__': main()
mpl-2.0
thomasgilgenast/gilgistatus-nonrel
django/templatetags/cache.py
309
2406
from django.template import Library, Node, TemplateSyntaxError, Variable, VariableDoesNotExist from django.template import resolve_variable from django.core.cache import cache from django.utils.encoding import force_unicode from django.utils.http import urlquote from django.utils.hashcompat import md5_constructor register = Library() class CacheNode(Node): def __init__(self, nodelist, expire_time_var, fragment_name, vary_on): self.nodelist = nodelist self.expire_time_var = Variable(expire_time_var) self.fragment_name = fragment_name self.vary_on = vary_on def render(self, context): try: expire_time = self.expire_time_var.resolve(context) except VariableDoesNotExist: raise TemplateSyntaxError('"cache" tag got an unknown variable: %r' % self.expire_time_var.var) try: expire_time = int(expire_time) except (ValueError, TypeError): raise TemplateSyntaxError('"cache" tag got a non-integer timeout value: %r' % expire_time) # Build a unicode key for this fragment and all vary-on's. args = md5_constructor(u':'.join([urlquote(resolve_variable(var, context)) for var in self.vary_on])) cache_key = 'template.cache.%s.%s' % (self.fragment_name, args.hexdigest()) value = cache.get(cache_key) if value is None: value = self.nodelist.render(context) cache.set(cache_key, value, expire_time) return value def do_cache(parser, token): """ This will cache the contents of a template fragment for a given amount of time. Usage:: {% load cache %} {% cache [expire_time] [fragment_name] %} .. some expensive processing .. {% endcache %} This tag also supports varying by a list of arguments:: {% load cache %} {% cache [expire_time] [fragment_name] [var1] [var2] .. %} .. some expensive processing .. {% endcache %} Each unique set of arguments will result in a unique cache entry. """ nodelist = parser.parse(('endcache',)) parser.delete_first_token() tokens = token.contents.split() if len(tokens) < 3: raise TemplateSyntaxError(u"'%r' tag requires at least 2 arguments." % tokens[0]) return CacheNode(nodelist, tokens[1], tokens[2], tokens[3:]) register.tag('cache', do_cache)
bsd-3-clause
trabucayre/gnuradio
gnuradio-runtime/python/gnuradio/ctrlport/monitor.py
6
1776
#!/usr/bin/env python # # Copyright 2012 Free Software Foundation, Inc. # # This file is part of GNU Radio # # SPDX-License-Identifier: GPL-3.0-or-later # # import sys, subprocess, re, signal, time, atexit, os from gnuradio import gr class monitor(object): def __init__(self,tool="gr-ctrlport-monitor"): print("ControlPort Monitor running.") self.started = False self.tool = tool atexit.register(self.shutdown) # setup export prefs gr.prefs().singleton().set_bool("ControlPort","on",True) gr.prefs().singleton().set_bool("PerfCounters","on",True) gr.prefs().singleton().set_bool("PerfCounters","export",True) if(tool == "gr-perf-monitorx"): gr.prefs().singleton().set_bool("ControlPort","edges_list",True) def start(self): try: print("monitor::endpoints() = %s" % (gr.rpcmanager_get().endpoints())) ep = gr.rpcmanager_get().endpoints()[0] cmd = [self.tool, re.search(r"-h (\S+|\d+\.\d+\.\d+\.\d+)", ep).group(1), re.search(r"-p (\d+)", ep).group(1)] print("running: %s"%(str(cmd))) self.proc = subprocess.Popen(cmd) self.started = True except: self.proc = None print("Failed to to connect to ControlPort. Please make sure that you have Thrift installed and check your firewall rules.") def stop(self): if(self.proc): if(self.proc.returncode == None): print("\tcalling stop on shutdown") self.proc.terminate() else: print("\tno proc to shut down, exiting") def shutdown(self): print("ctrlport monitor received shutdown signal") if(self.started): self.stop()
gpl-3.0
nw328/bioCode
diveintopython-5.4/py/soundextest.py
4
1122
"""Unit test for soundex.py This program is part of "Dive Into Python", a free Python book for experienced programmers. Visit http://diveintopython.org/ for the latest version. """ __author__ = "Mark Pilgrim (mark@diveintopython.org)" __version__ = "$Revision: 1.1 $" __date__ = "$Date: 2004/05/06 17:18:17 $" __copyright__ = "Copyright (c) 2004 Mark Pilgrim" __license__ = "Python" import soundex import unittest class KnownValues(unittest.TestCase): knownValues = (('', '0000'), ('Woo', 'W000'), ('Pilgrim', 'P426'), ('Radiohead', 'R330'), ('Flingjingwaller', 'F452'), ('Euler', 'E460'), ('Ellery', 'E460'), ('Gauss', 'G200'), ('Ghosh', 'G200'), ('Hilbert', 'H416'), ('Heilbronn', 'H416'), ('Knuth', 'K530'), ('Kant', 'K530'), ('Lukasiewicz', 'L222'), ('Lissajous', 'L222') ) def testKnownValues(self): """soundex should give known result with known input""" for name, result in self.knownValues: self.assertEqual(soundex.soundex(name), result) if __name__ == "__main__": unittest.main()
apache-2.0
rahushen/ansible
lib/ansible/modules/database/proxysql/proxysql_mysql_users.py
42
16189
#!/usr/bin/python # -*- coding: utf-8 -*- # Copyright: (c) 2017, Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) from __future__ import absolute_import, division, print_function __metaclass__ = type DOCUMENTATION = ''' --- module: proxysql_mysql_users version_added: "2.3" author: "Ben Mildren (@bmildren)" short_description: Adds or removes mysql users from proxysql admin interface. description: - The M(proxysql_mysql_users) module adds or removes mysql users using the proxysql admin interface. options: username: description: - Name of the user connecting to the mysqld or ProxySQL instance. required: True password: description: - Password of the user connecting to the mysqld or ProxySQL instance. active: description: - A user with I(active) set to C(False) will be tracked in the database, but will be never loaded in the in-memory data structures. If omitted the proxysql database default for I(active) is C(True). use_ssl: description: - If I(use_ssl) is set to C(True), connections by this user will be made using SSL connections. If omitted the proxysql database default for I(use_ssl) is C(False). default_hostgroup: description: - If there is no matching rule for the queries sent by this user, the traffic it generates is sent to the specified hostgroup. If omitted the proxysql database default for I(use_ssl) is 0. default_schema: description: - The schema to which the connection should change to by default. transaction_persistent: description: - If this is set for the user with which the MySQL client is connecting to ProxySQL (thus a "frontend" user), transactions started within a hostgroup will remain within that hostgroup regardless of any other rules. If omitted the proxysql database default for I(transaction_persistent) is C(False). fast_forward: description: - If I(fast_forward) is set to C(True), I(fast_forward) will bypass the query processing layer (rewriting, caching) and pass through the query directly as is to the backend server. If omitted the proxysql database default for I(fast_forward) is C(False). backend: description: - If I(backend) is set to C(True), this (username, password) pair is used for authenticating to the ProxySQL instance. default: True frontend: description: - If I(frontend) is set to C(True), this (username, password) pair is used for authenticating to the mysqld servers against any hostgroup. default: True max_connections: description: - The maximum number of connections ProxySQL will open to the backend for this user. If omitted the proxysql database default for I(max_connections) is 10000. state: description: - When C(present) - adds the user, when C(absent) - removes the user. choices: [ "present", "absent" ] default: present extends_documentation_fragment: - proxysql.managing_config - proxysql.connectivity ''' EXAMPLES = ''' --- # This example adds a user, it saves the mysql user config to disk, but # avoids loading the mysql user config to runtime (this might be because # several users are being added and the user wants to push the config to # runtime in a single batch using the M(proxysql_manage_config) module). It # uses supplied credentials to connect to the proxysql admin interface. - proxysql_mysql_users: login_user: 'admin' login_password: 'admin' username: 'productiondba' state: present load_to_runtime: False # This example removes a user, saves the mysql user config to disk, and # dynamically loads the mysql user config to runtime. It uses credentials # in a supplied config file to connect to the proxysql admin interface. - proxysql_mysql_users: config_file: '~/proxysql.cnf' username: 'mysqlboy' state: absent ''' RETURN = ''' stdout: description: The mysql user modified or removed from proxysql returned: On create/update will return the newly modified user, on delete it will return the deleted record. type: dict sample: changed: true msg: Added user to mysql_users state: present user: active: 1 backend: 1 default_hostgroup: 1 default_schema: null fast_forward: 0 frontend: 1 max_connections: 10000 password: VALUE_SPECIFIED_IN_NO_LOG_PARAMETER schema_locked: 0 transaction_persistent: 0 use_ssl: 0 username: guest_ro username: guest_ro ''' ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ['stableinterface'], 'supported_by': 'community'} from ansible.module_utils.basic import AnsibleModule from ansible.module_utils.mysql import mysql_connect from ansible.module_utils.six import iteritems from ansible.module_utils._text import to_native try: import MySQLdb import MySQLdb.cursors except ImportError: MYSQLDB_FOUND = False else: MYSQLDB_FOUND = True # =========================================== # proxysql module specific support methods. # def perform_checks(module): if module.params["login_port"] < 0 \ or module.params["login_port"] > 65535: module.fail_json( msg="login_port must be a valid unix port number (0-65535)" ) if not MYSQLDB_FOUND: module.fail_json( msg="the python mysqldb module is required" ) def save_config_to_disk(cursor): cursor.execute("SAVE MYSQL USERS TO DISK") return True def load_config_to_runtime(cursor): cursor.execute("LOAD MYSQL USERS TO RUNTIME") return True class ProxySQLUser(object): def __init__(self, module): self.state = module.params["state"] self.save_to_disk = module.params["save_to_disk"] self.load_to_runtime = module.params["load_to_runtime"] self.username = module.params["username"] self.backend = module.params["backend"] self.frontend = module.params["frontend"] config_data_keys = ["password", "active", "use_ssl", "default_hostgroup", "default_schema", "transaction_persistent", "fast_forward", "max_connections"] self.config_data = dict((k, module.params[k]) for k in config_data_keys) def check_user_config_exists(self, cursor): query_string = \ """SELECT count(*) AS `user_count` FROM mysql_users WHERE username = %s AND backend = %s AND frontend = %s""" query_data = \ [self.username, self.backend, self.frontend] cursor.execute(query_string, query_data) check_count = cursor.fetchone() return (int(check_count['user_count']) > 0) def check_user_privs(self, cursor): query_string = \ """SELECT count(*) AS `user_count` FROM mysql_users WHERE username = %s AND backend = %s AND frontend = %s""" query_data = \ [self.username, self.backend, self.frontend] for col, val in iteritems(self.config_data): if val is not None: query_data.append(val) query_string += "\n AND " + col + " = %s" cursor.execute(query_string, query_data) check_count = cursor.fetchone() return (int(check_count['user_count']) > 0) def get_user_config(self, cursor): query_string = \ """SELECT * FROM mysql_users WHERE username = %s AND backend = %s AND frontend = %s""" query_data = \ [self.username, self.backend, self.frontend] cursor.execute(query_string, query_data) user = cursor.fetchone() return user def create_user_config(self, cursor): query_string = \ """INSERT INTO mysql_users ( username, backend, frontend""" cols = 3 query_data = \ [self.username, self.backend, self.frontend] for col, val in iteritems(self.config_data): if val is not None: cols += 1 query_data.append(val) query_string += ",\n" + col query_string += \ (")\n" + "VALUES (" + "%s ," * cols) query_string = query_string[:-2] query_string += ")" cursor.execute(query_string, query_data) return True def update_user_config(self, cursor): query_string = """UPDATE mysql_users""" cols = 0 query_data = [] for col, val in iteritems(self.config_data): if val is not None: cols += 1 query_data.append(val) if cols == 1: query_string += "\nSET " + col + "= %s," else: query_string += "\n " + col + " = %s," query_string = query_string[:-1] query_string += ("\nWHERE username = %s\n AND backend = %s" + "\n AND frontend = %s") query_data.append(self.username) query_data.append(self.backend) query_data.append(self.frontend) cursor.execute(query_string, query_data) return True def delete_user_config(self, cursor): query_string = \ """DELETE FROM mysql_users WHERE username = %s AND backend = %s AND frontend = %s""" query_data = \ [self.username, self.backend, self.frontend] cursor.execute(query_string, query_data) return True def manage_config(self, cursor, state): if state: if self.save_to_disk: save_config_to_disk(cursor) if self.load_to_runtime: load_config_to_runtime(cursor) def create_user(self, check_mode, result, cursor): if not check_mode: result['changed'] = \ self.create_user_config(cursor) result['msg'] = "Added user to mysql_users" result['user'] = \ self.get_user_config(cursor) self.manage_config(cursor, result['changed']) else: result['changed'] = True result['msg'] = ("User would have been added to" + " mysql_users, however check_mode" + " is enabled.") def update_user(self, check_mode, result, cursor): if not check_mode: result['changed'] = \ self.update_user_config(cursor) result['msg'] = "Updated user in mysql_users" result['user'] = \ self.get_user_config(cursor) self.manage_config(cursor, result['changed']) else: result['changed'] = True result['msg'] = ("User would have been updated in" + " mysql_users, however check_mode" + " is enabled.") def delete_user(self, check_mode, result, cursor): if not check_mode: result['user'] = \ self.get_user_config(cursor) result['changed'] = \ self.delete_user_config(cursor) result['msg'] = "Deleted user from mysql_users" self.manage_config(cursor, result['changed']) else: result['changed'] = True result['msg'] = ("User would have been deleted from" + " mysql_users, however check_mode is" + " enabled.") # =========================================== # Module execution. # def main(): module = AnsibleModule( argument_spec=dict( login_user=dict(default=None, type='str'), login_password=dict(default=None, no_log=True, type='str'), login_host=dict(default="127.0.0.1"), login_unix_socket=dict(default=None), login_port=dict(default=6032, type='int'), config_file=dict(default='', type='path'), username=dict(required=True, type='str'), password=dict(no_log=True, type='str'), active=dict(type='bool'), use_ssl=dict(type='bool'), default_hostgroup=dict(type='int'), default_schema=dict(type='str'), transaction_persistent=dict(type='bool'), fast_forward=dict(type='bool'), backend=dict(default=True, type='bool'), frontend=dict(default=True, type='bool'), max_connections=dict(type='int'), state=dict(default='present', choices=['present', 'absent']), save_to_disk=dict(default=True, type='bool'), load_to_runtime=dict(default=True, type='bool') ), supports_check_mode=True ) perform_checks(module) login_user = module.params["login_user"] login_password = module.params["login_password"] config_file = module.params["config_file"] cursor = None try: cursor = mysql_connect(module, login_user, login_password, config_file, cursor_class=MySQLdb.cursors.DictCursor) except MySQLdb.Error as e: module.fail_json( msg="unable to connect to ProxySQL Admin Module.. %s" % to_native(e) ) proxysql_user = ProxySQLUser(module) result = {} result['state'] = proxysql_user.state if proxysql_user.username: result['username'] = proxysql_user.username if proxysql_user.state == "present": try: if not proxysql_user.check_user_privs(cursor): if not proxysql_user.check_user_config_exists(cursor): proxysql_user.create_user(module.check_mode, result, cursor) else: proxysql_user.update_user(module.check_mode, result, cursor) else: result['changed'] = False result['msg'] = ("The user already exists in mysql_users" + " and doesn't need to be updated.") result['user'] = \ proxysql_user.get_user_config(cursor) except MySQLdb.Error as e: module.fail_json( msg="unable to modify user.. %s" % to_native(e) ) elif proxysql_user.state == "absent": try: if proxysql_user.check_user_config_exists(cursor): proxysql_user.delete_user(module.check_mode, result, cursor) else: result['changed'] = False result['msg'] = ("The user is already absent from the" + " mysql_users memory configuration") except MySQLdb.Error as e: module.fail_json( msg="unable to remove user.. %s" % to_native(e) ) module.exit_json(**result) if __name__ == '__main__': main()
gpl-3.0
vicnet/weboob
modules/billetreduc/test.py
2
1622
# -*- coding: utf-8 -*- # Copyright(C) 2017 Vincent A # # This file is part of a weboob module. # # This weboob module is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This weboob module is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this weboob module. If not, see <http://www.gnu.org/licenses/>. from __future__ import unicode_literals from weboob.capabilities.base import empty from weboob.capabilities.calendar import Query from weboob.tools.test import BackendTest class BilletreducTest(BackendTest): MODULE = 'billetreduc' def test_basic_search(self): q = Query() q.city = 'paris' event = None for n, event in enumerate(self.backend.search_events(q)): assert event.summary assert event.description assert event.start_date assert event.end_date assert event.start_date <= event.end_date assert event.city assert event.location assert not empty(event.price) assert event.category if n == 9: break else: assert False, 'not enough events'
lgpl-3.0
tuxfux-hlp-notes/python-batches
archieves/batch-57/modules/sheets/lib/python2.7/site-packages/setuptools/archive_util.py
320
6613
"""Utilities for extracting common archive formats""" import zipfile import tarfile import os import shutil import posixpath import contextlib from distutils.errors import DistutilsError from pkg_resources import ensure_directory, ContextualZipFile __all__ = [ "unpack_archive", "unpack_zipfile", "unpack_tarfile", "default_filter", "UnrecognizedFormat", "extraction_drivers", "unpack_directory", ] class UnrecognizedFormat(DistutilsError): """Couldn't recognize the archive type""" def default_filter(src, dst): """The default progress/filter callback; returns True for all files""" return dst def unpack_archive(filename, extract_dir, progress_filter=default_filter, drivers=None): """Unpack `filename` to `extract_dir`, or raise ``UnrecognizedFormat`` `progress_filter` is a function taking two arguments: a source path internal to the archive ('/'-separated), and a filesystem path where it will be extracted. The callback must return the desired extract path (which may be the same as the one passed in), or else ``None`` to skip that file or directory. The callback can thus be used to report on the progress of the extraction, as well as to filter the items extracted or alter their extraction paths. `drivers`, if supplied, must be a non-empty sequence of functions with the same signature as this function (minus the `drivers` argument), that raise ``UnrecognizedFormat`` if they do not support extracting the designated archive type. The `drivers` are tried in sequence until one is found that does not raise an error, or until all are exhausted (in which case ``UnrecognizedFormat`` is raised). If you do not supply a sequence of drivers, the module's ``extraction_drivers`` constant will be used, which means that ``unpack_zipfile`` and ``unpack_tarfile`` will be tried, in that order. """ for driver in drivers or extraction_drivers: try: driver(filename, extract_dir, progress_filter) except UnrecognizedFormat: continue else: return else: raise UnrecognizedFormat( "Not a recognized archive type: %s" % filename ) def unpack_directory(filename, extract_dir, progress_filter=default_filter): """"Unpack" a directory, using the same interface as for archives Raises ``UnrecognizedFormat`` if `filename` is not a directory """ if not os.path.isdir(filename): raise UnrecognizedFormat("%s is not a directory" % filename) paths = { filename: ('', extract_dir), } for base, dirs, files in os.walk(filename): src, dst = paths[base] for d in dirs: paths[os.path.join(base, d)] = src + d + '/', os.path.join(dst, d) for f in files: target = os.path.join(dst, f) target = progress_filter(src + f, target) if not target: # skip non-files continue ensure_directory(target) f = os.path.join(base, f) shutil.copyfile(f, target) shutil.copystat(f, target) def unpack_zipfile(filename, extract_dir, progress_filter=default_filter): """Unpack zip `filename` to `extract_dir` Raises ``UnrecognizedFormat`` if `filename` is not a zipfile (as determined by ``zipfile.is_zipfile()``). See ``unpack_archive()`` for an explanation of the `progress_filter` argument. """ if not zipfile.is_zipfile(filename): raise UnrecognizedFormat("%s is not a zip file" % (filename,)) with ContextualZipFile(filename) as z: for info in z.infolist(): name = info.filename # don't extract absolute paths or ones with .. in them if name.startswith('/') or '..' in name.split('/'): continue target = os.path.join(extract_dir, *name.split('/')) target = progress_filter(name, target) if not target: continue if name.endswith('/'): # directory ensure_directory(target) else: # file ensure_directory(target) data = z.read(info.filename) with open(target, 'wb') as f: f.write(data) unix_attributes = info.external_attr >> 16 if unix_attributes: os.chmod(target, unix_attributes) def unpack_tarfile(filename, extract_dir, progress_filter=default_filter): """Unpack tar/tar.gz/tar.bz2 `filename` to `extract_dir` Raises ``UnrecognizedFormat`` if `filename` is not a tarfile (as determined by ``tarfile.open()``). See ``unpack_archive()`` for an explanation of the `progress_filter` argument. """ try: tarobj = tarfile.open(filename) except tarfile.TarError: raise UnrecognizedFormat( "%s is not a compressed or uncompressed tar file" % (filename,) ) with contextlib.closing(tarobj): # don't do any chowning! tarobj.chown = lambda *args: None for member in tarobj: name = member.name # don't extract absolute paths or ones with .. in them if not name.startswith('/') and '..' not in name.split('/'): prelim_dst = os.path.join(extract_dir, *name.split('/')) # resolve any links and to extract the link targets as normal # files while member is not None and (member.islnk() or member.issym()): linkpath = member.linkname if member.issym(): base = posixpath.dirname(member.name) linkpath = posixpath.join(base, linkpath) linkpath = posixpath.normpath(linkpath) member = tarobj._getmember(linkpath) if member is not None and (member.isfile() or member.isdir()): final_dst = progress_filter(name, prelim_dst) if final_dst: if final_dst.endswith(os.sep): final_dst = final_dst[:-1] try: # XXX Ugh tarobj._extract_member(member, final_dst) except tarfile.ExtractError: # chown/chmod/mkfifo/mknode/makedev failed pass return True extraction_drivers = unpack_directory, unpack_zipfile, unpack_tarfile
gpl-3.0
hurrinico/stock-logistics-workflow
__unported__/stock_move_backdating/__init__.py
13
1117
# -*- coding: utf-8 -*- ############################################################################## # # Copyright (C) 2012 BREMSKERL-REIBBELAGWERKE EMMERLING GmbH & Co. KG # Author Marco Dieckhoff # Copyright (C) 2013 Agile Business Group sagl (<http://www.agilebg.com>) # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ############################################################################## import stock import wizard import account
agpl-3.0
huang4fstudio/django
django/conf/locale/fi/formats.py
504
1390
# -*- encoding: utf-8 -*- # This file is distributed under the same license as the Django package. # from __future__ import unicode_literals # The *_FORMAT strings use the Django date format syntax, # see http://docs.djangoproject.com/en/dev/ref/templates/builtins/#date DATE_FORMAT = 'j. E Y' TIME_FORMAT = 'G.i' DATETIME_FORMAT = r'j. E Y \k\e\l\l\o G.i' YEAR_MONTH_FORMAT = 'F Y' MONTH_DAY_FORMAT = 'j. F' SHORT_DATE_FORMAT = 'j.n.Y' SHORT_DATETIME_FORMAT = 'j.n.Y G.i' FIRST_DAY_OF_WEEK = 1 # Monday # The *_INPUT_FORMATS strings use the Python strftime format syntax, # see http://docs.python.org/library/datetime.html#strftime-strptime-behavior DATE_INPUT_FORMATS = [ '%d.%m.%Y', # '20.3.2014' '%d.%m.%y', # '20.3.14' ] DATETIME_INPUT_FORMATS = [ '%d.%m.%Y %H.%M.%S', # '20.3.2014 14.30.59' '%d.%m.%Y %H.%M.%S.%f', # '20.3.2014 14.30.59.000200' '%d.%m.%Y %H.%M', # '20.3.2014 14.30' '%d.%m.%Y', # '20.3.2014' '%d.%m.%y %H.%M.%S', # '20.3.14 14.30.59' '%d.%m.%y %H.%M.%S.%f', # '20.3.14 14.30.59.000200' '%d.%m.%y %H.%M', # '20.3.14 14.30' '%d.%m.%y', # '20.3.14' ] TIME_INPUT_FORMATS = [ '%H.%M.%S', # '14.30.59' '%H.%M.%S.%f', # '14.30.59.000200' '%H.%M', # '14.30' ] DECIMAL_SEPARATOR = ',' THOUSAND_SEPARATOR = '\xa0' # Non-breaking space NUMBER_GROUPING = 3
bsd-3-clause
ryfeus/lambda-packs
Keras_tensorflow_nightly/source2.7/numpy/testing/nose_tools/noseclasses.py
8
14599
# These classes implement a doctest runner plugin for nose, a "known failure" # error class, and a customized TestProgram for NumPy. # Because this module imports nose directly, it should not # be used except by nosetester.py to avoid a general NumPy # dependency on nose. from __future__ import division, absolute_import, print_function import os import sys import doctest import inspect import numpy import nose from nose.plugins import doctests as npd from nose.plugins.errorclass import ErrorClass, ErrorClassPlugin from nose.plugins.base import Plugin from nose.util import src from .nosetester import get_package_name from .utils import KnownFailureException, KnownFailureTest # Some of the classes in this module begin with 'Numpy' to clearly distinguish # them from the plethora of very similar names from nose/unittest/doctest #----------------------------------------------------------------------------- # Modified version of the one in the stdlib, that fixes a python bug (doctests # not found in extension modules, http://bugs.python.org/issue3158) class NumpyDocTestFinder(doctest.DocTestFinder): def _from_module(self, module, object): """ Return true if the given object is defined in the given module. """ if module is None: return True elif inspect.isfunction(object): return module.__dict__ is object.__globals__ elif inspect.isbuiltin(object): return module.__name__ == object.__module__ elif inspect.isclass(object): return module.__name__ == object.__module__ elif inspect.ismethod(object): # This one may be a bug in cython that fails to correctly set the # __module__ attribute of methods, but since the same error is easy # to make by extension code writers, having this safety in place # isn't such a bad idea return module.__name__ == object.__self__.__class__.__module__ elif inspect.getmodule(object) is not None: return module is inspect.getmodule(object) elif hasattr(object, '__module__'): return module.__name__ == object.__module__ elif isinstance(object, property): return True # [XX] no way not be sure. else: raise ValueError("object must be a class or function") def _find(self, tests, obj, name, module, source_lines, globs, seen): """ Find tests for the given object and any contained objects, and add them to `tests`. """ doctest.DocTestFinder._find(self, tests, obj, name, module, source_lines, globs, seen) # Below we re-run pieces of the above method with manual modifications, # because the original code is buggy and fails to correctly identify # doctests in extension modules. # Local shorthands from inspect import ( isroutine, isclass, ismodule, isfunction, ismethod ) # Look for tests in a module's contained objects. if ismodule(obj) and self._recurse: for valname, val in obj.__dict__.items(): valname1 = '%s.%s' % (name, valname) if ( (isroutine(val) or isclass(val)) and self._from_module(module, val)): self._find(tests, val, valname1, module, source_lines, globs, seen) # Look for tests in a class's contained objects. if isclass(obj) and self._recurse: for valname, val in obj.__dict__.items(): # Special handling for staticmethod/classmethod. if isinstance(val, staticmethod): val = getattr(obj, valname) if isinstance(val, classmethod): val = getattr(obj, valname).__func__ # Recurse to methods, properties, and nested classes. if ((isfunction(val) or isclass(val) or ismethod(val) or isinstance(val, property)) and self._from_module(module, val)): valname = '%s.%s' % (name, valname) self._find(tests, val, valname, module, source_lines, globs, seen) # second-chance checker; if the default comparison doesn't # pass, then see if the expected output string contains flags that # tell us to ignore the output class NumpyOutputChecker(doctest.OutputChecker): def check_output(self, want, got, optionflags): ret = doctest.OutputChecker.check_output(self, want, got, optionflags) if not ret: if "#random" in want: return True # it would be useful to normalize endianness so that # bigendian machines don't fail all the tests (and there are # actually some bigendian examples in the doctests). Let's try # making them all little endian got = got.replace("'>", "'<") want = want.replace("'>", "'<") # try to normalize out 32 and 64 bit default int sizes for sz in [4, 8]: got = got.replace("'<i%d'" % sz, "int") want = want.replace("'<i%d'" % sz, "int") ret = doctest.OutputChecker.check_output(self, want, got, optionflags) return ret # Subclass nose.plugins.doctests.DocTestCase to work around a bug in # its constructor that blocks non-default arguments from being passed # down into doctest.DocTestCase class NumpyDocTestCase(npd.DocTestCase): def __init__(self, test, optionflags=0, setUp=None, tearDown=None, checker=None, obj=None, result_var='_'): self._result_var = result_var self._nose_obj = obj doctest.DocTestCase.__init__(self, test, optionflags=optionflags, setUp=setUp, tearDown=tearDown, checker=checker) print_state = numpy.get_printoptions() class NumpyDoctest(npd.Doctest): name = 'numpydoctest' # call nosetests with --with-numpydoctest score = 1000 # load late, after doctest builtin # always use whitespace and ellipsis options for doctests doctest_optflags = doctest.NORMALIZE_WHITESPACE | doctest.ELLIPSIS # files that should be ignored for doctests doctest_ignore = ['generate_numpy_api.py', 'setup.py'] # Custom classes; class variables to allow subclassing doctest_case_class = NumpyDocTestCase out_check_class = NumpyOutputChecker test_finder_class = NumpyDocTestFinder # Don't use the standard doctest option handler; hard-code the option values def options(self, parser, env=os.environ): Plugin.options(self, parser, env) # Test doctests in 'test' files / directories. Standard plugin default # is False self.doctest_tests = True # Variable name; if defined, doctest results stored in this variable in # the top-level namespace. None is the standard default self.doctest_result_var = None def configure(self, options, config): # parent method sets enabled flag from command line --with-numpydoctest Plugin.configure(self, options, config) self.finder = self.test_finder_class() self.parser = doctest.DocTestParser() if self.enabled: # Pull standard doctest out of plugin list; there's no reason to run # both. In practice the Unplugger plugin above would cover us when # run from a standard numpy.test() call; this is just in case # someone wants to run our plugin outside the numpy.test() machinery config.plugins.plugins = [p for p in config.plugins.plugins if p.name != 'doctest'] def set_test_context(self, test): """ Configure `test` object to set test context We set the numpy / scipy standard doctest namespace Parameters ---------- test : test object with ``globs`` dictionary defining namespace Returns ------- None Notes ----- `test` object modified in place """ # set the namespace for tests pkg_name = get_package_name(os.path.dirname(test.filename)) # Each doctest should execute in an environment equivalent to # starting Python and executing "import numpy as np", and, # for SciPy packages, an additional import of the local # package (so that scipy.linalg.basic.py's doctests have an # implicit "from scipy import linalg" as well. # # Note: __file__ allows the doctest in NoseTester to run # without producing an error test.globs = {'__builtins__':__builtins__, '__file__':'__main__', '__name__':'__main__', 'np':numpy} # add appropriate scipy import for SciPy tests if 'scipy' in pkg_name: p = pkg_name.split('.') p2 = p[-1] test.globs[p2] = __import__(pkg_name, test.globs, {}, [p2]) # Override test loading to customize test context (with set_test_context # method), set standard docstring options, and install our own test output # checker def loadTestsFromModule(self, module): if not self.matches(module.__name__): npd.log.debug("Doctest doesn't want module %s", module) return try: tests = self.finder.find(module) except AttributeError: # nose allows module.__test__ = False; doctest does not and # throws AttributeError return if not tests: return tests.sort() module_file = src(module.__file__) for test in tests: if not test.examples: continue if not test.filename: test.filename = module_file # Set test namespace; test altered in place self.set_test_context(test) yield self.doctest_case_class(test, optionflags=self.doctest_optflags, checker=self.out_check_class(), result_var=self.doctest_result_var) # Add an afterContext method to nose.plugins.doctests.Doctest in order # to restore print options to the original state after each doctest def afterContext(self): numpy.set_printoptions(**print_state) # Ignore NumPy-specific build files that shouldn't be searched for tests def wantFile(self, file): bn = os.path.basename(file) if bn in self.doctest_ignore: return False return npd.Doctest.wantFile(self, file) class Unplugger(object): """ Nose plugin to remove named plugin late in loading By default it removes the "doctest" plugin. """ name = 'unplugger' enabled = True # always enabled score = 4000 # load late in order to be after builtins def __init__(self, to_unplug='doctest'): self.to_unplug = to_unplug def options(self, parser, env): pass def configure(self, options, config): # Pull named plugin out of plugins list config.plugins.plugins = [p for p in config.plugins.plugins if p.name != self.to_unplug] class KnownFailurePlugin(ErrorClassPlugin): '''Plugin that installs a KNOWNFAIL error class for the KnownFailureClass exception. When KnownFailure is raised, the exception will be logged in the knownfail attribute of the result, 'K' or 'KNOWNFAIL' (verbose) will be output, and the exception will not be counted as an error or failure.''' enabled = True knownfail = ErrorClass(KnownFailureException, label='KNOWNFAIL', isfailure=False) def options(self, parser, env=os.environ): env_opt = 'NOSE_WITHOUT_KNOWNFAIL' parser.add_option('--no-knownfail', action='store_true', dest='noKnownFail', default=env.get(env_opt, False), help='Disable special handling of KnownFailure ' 'exceptions') def configure(self, options, conf): if not self.can_configure: return self.conf = conf disable = getattr(options, 'noKnownFail', False) if disable: self.enabled = False KnownFailure = KnownFailurePlugin # backwards compat class FPUModeCheckPlugin(Plugin): """ Plugin that checks the FPU mode before and after each test, raising failures if the test changed the mode. """ def prepareTestCase(self, test): from numpy.core.multiarray_tests import get_fpu_mode def run(result): old_mode = get_fpu_mode() test.test(result) new_mode = get_fpu_mode() if old_mode != new_mode: try: raise AssertionError( "FPU mode changed from {0:#x} to {1:#x} during the " "test".format(old_mode, new_mode)) except AssertionError: result.addFailure(test, sys.exc_info()) return run # Class allows us to save the results of the tests in runTests - see runTests # method docstring for details class NumpyTestProgram(nose.core.TestProgram): def runTests(self): """Run Tests. Returns true on success, false on failure, and sets self.success to the same value. Because nose currently discards the test result object, but we need to return it to the user, override TestProgram.runTests to retain the result """ if self.testRunner is None: self.testRunner = nose.core.TextTestRunner(stream=self.config.stream, verbosity=self.config.verbosity, config=self.config) plug_runner = self.config.plugins.prepareTestRunner(self.testRunner) if plug_runner is not None: self.testRunner = plug_runner self.result = self.testRunner.run(self.test) self.success = self.result.wasSuccessful() return self.success
mit
arifsetiawan/edx-platform
lms/djangoapps/courseware/management/commands/tests/test_dump_course.py
40
9067
# coding=utf-8 """Tests for Django management commands""" import json from nose.plugins.attrib import attr from path import path import shutil from StringIO import StringIO import tarfile from tempfile import mkdtemp import factory from django.conf import settings from django.core.management import call_command from xmodule.modulestore import ModuleStoreEnum from xmodule.modulestore.django import modulestore from xmodule.modulestore.tests.django_utils import ModuleStoreTestCase, mixed_store_config from xmodule.modulestore.tests.django_utils import ( TEST_DATA_MONGO_MODULESTORE, TEST_DATA_SPLIT_MODULESTORE ) from xmodule.modulestore.tests.factories import CourseFactory from xmodule.modulestore.xml_importer import import_course_from_xml DATA_DIR = settings.COMMON_TEST_DATA_ROOT XML_COURSE_DIRS = ['toy', 'simple', 'open_ended'] MAPPINGS = { 'edX/toy/2012_Fall': 'xml', 'edX/simple/2012_Fall': 'xml', 'edX/open_ended/2012_Fall': 'xml', } TEST_DATA_MIXED_XML_MODULESTORE = mixed_store_config( DATA_DIR, MAPPINGS, include_xml=True, xml_source_dirs=XML_COURSE_DIRS, ) @attr('shard_1') class CommandsTestBase(ModuleStoreTestCase): """ Base class for testing different django commands. Must be subclassed using override_settings set to the modulestore to be tested. """ __test__ = False url_name = '2012_Fall' def setUp(self): super(CommandsTestBase, self).setUp() self.test_course_key = modulestore().make_course_key("edX", "simple", "2012_Fall") self.loaded_courses = self.load_courses() def load_courses(self): """Load test courses and return list of ids""" store = modulestore() # Add a course with a unicode name. unique_org = factory.Sequence(lambda n: u'ëḋẌ.%d' % n) CourseFactory.create( org=unique_org, course=u'śíḿṕĺé', display_name=u'2012_Fáĺĺ', modulestore=store ) courses = store.get_courses() # NOTE: if xml store owns these, it won't import them into mongo if self.test_course_key not in [c.id for c in courses]: import_course_from_xml( store, ModuleStoreEnum.UserID.mgmt_command, DATA_DIR, XML_COURSE_DIRS, create_if_not_present=True ) return [course.id for course in store.get_courses()] def call_command(self, name, *args, **kwargs): """Call management command and return output""" out = StringIO() # To Capture the output of the command call_command(name, *args, stdout=out, **kwargs) out.seek(0) return out.read() def test_dump_course_ids(self): kwargs = {'modulestore': 'default'} output = self.call_command('dump_course_ids', **kwargs) dumped_courses = output.decode('utf-8').strip().split('\n') course_ids = {unicode(course_id) for course_id in self.loaded_courses} dumped_ids = set(dumped_courses) self.assertEqual(course_ids, dumped_ids) def test_correct_course_structure_metadata(self): course_id = unicode(modulestore().make_course_key('edX', 'open_ended', '2012_Fall')) args = [course_id] kwargs = {'modulestore': 'default'} try: output = self.call_command('dump_course_structure', *args, **kwargs) except TypeError, exception: self.fail(exception) dump = json.loads(output) self.assertGreater(len(dump.values()), 0) def test_dump_course_structure(self): args = [unicode(self.test_course_key)] kwargs = {'modulestore': 'default'} output = self.call_command('dump_course_structure', *args, **kwargs) dump = json.loads(output) # check that all elements in the course structure have metadata, # but not inherited metadata: for element in dump.itervalues(): self.assertIn('metadata', element) self.assertIn('children', element) self.assertIn('category', element) self.assertNotIn('inherited_metadata', element) # Check a few elements in the course dump test_course_key = self.test_course_key parent_id = unicode(test_course_key.make_usage_key('chapter', 'Overview')) self.assertEqual(dump[parent_id]['category'], 'chapter') self.assertEqual(len(dump[parent_id]['children']), 3) child_id = dump[parent_id]['children'][1] self.assertEqual(dump[child_id]['category'], 'videosequence') self.assertEqual(len(dump[child_id]['children']), 2) video_id = unicode(test_course_key.make_usage_key('video', 'Welcome')) self.assertEqual(dump[video_id]['category'], 'video') self.assertItemsEqual( dump[video_id]['metadata'].keys(), ['download_video', 'youtube_id_0_75', 'youtube_id_1_0', 'youtube_id_1_25', 'youtube_id_1_5'] ) self.assertIn('youtube_id_1_0', dump[video_id]['metadata']) # Check if there are the right number of elements self.assertEqual(len(dump), 16) def test_dump_inherited_course_structure(self): args = [unicode(self.test_course_key)] kwargs = {'modulestore': 'default', 'inherited': True} output = self.call_command('dump_course_structure', *args, **kwargs) dump = json.loads(output) # check that all elements in the course structure have inherited metadata, # and that it contains a particular value as well: for element in dump.itervalues(): self.assertIn('metadata', element) self.assertIn('children', element) self.assertIn('category', element) self.assertIn('inherited_metadata', element) self.assertIsNone(element['inherited_metadata']['ispublic']) # ... but does not contain inherited metadata containing a default value: self.assertNotIn('due', element['inherited_metadata']) def test_dump_inherited_course_structure_with_defaults(self): args = [unicode(self.test_course_key)] kwargs = {'modulestore': 'default', 'inherited': True, 'inherited_defaults': True} output = self.call_command('dump_course_structure', *args, **kwargs) dump = json.loads(output) # check that all elements in the course structure have inherited metadata, # and that it contains a particular value as well: for element in dump.itervalues(): self.assertIn('metadata', element) self.assertIn('children', element) self.assertIn('category', element) self.assertIn('inherited_metadata', element) self.assertIsNone(element['inherited_metadata']['ispublic']) # ... and contains inherited metadata containing a default value: self.assertIsNone(element['inherited_metadata']['due']) def test_export_course(self): tmp_dir = path(mkdtemp()) self.addCleanup(shutil.rmtree, tmp_dir) filename = tmp_dir / 'test.tar.gz' self.run_export_course(filename) with tarfile.open(filename) as tar_file: self.check_export_file(tar_file) def test_export_course_stdout(self): output = self.run_export_course('-') with tarfile.open(fileobj=StringIO(output)) as tar_file: self.check_export_file(tar_file) def run_export_course(self, filename): # pylint: disable=missing-docstring args = [unicode(self.test_course_key), filename] kwargs = {'modulestore': 'default'} return self.call_command('export_course', *args, **kwargs) def check_export_file(self, tar_file): # pylint: disable=missing-docstring names = tar_file.getnames() # Check if some of the files are present. # The rest is of the code should be covered by the tests for # xmodule.modulestore.xml_exporter, used by the dump_course command assert_in = self.assertIn assert_in('edX-simple-2012_Fall', names) assert_in('edX-simple-2012_Fall/policies/{}/policy.json'.format(self.url_name), names) assert_in('edX-simple-2012_Fall/html/toylab.html', names) assert_in('edX-simple-2012_Fall/videosequence/A_simple_sequence.xml', names) assert_in('edX-simple-2012_Fall/sequential/Lecture_2.xml', names) class CommandsXMLTestCase(CommandsTestBase): """ Test case for management commands with the xml modulestore present. """ MODULESTORE = TEST_DATA_MIXED_XML_MODULESTORE __test__ = True class CommandsMongoTestCase(CommandsTestBase): """ Test case for management commands using the mixed mongo modulestore with old mongo as the default. """ MODULESTORE = TEST_DATA_MONGO_MODULESTORE __test__ = True class CommandSplitMongoTestCase(CommandsTestBase): """ Test case for management commands using the mixed mongo modulestore with split as the default. """ MODULESTORE = TEST_DATA_SPLIT_MODULESTORE __test__ = True url_name = 'course'
agpl-3.0
colinligertwood/odoo
addons/hr_timesheet_sheet/hr_timesheet_sheet.py
27
33412
# -*- coding: utf-8 -*- ############################################################################## # # OpenERP, Open Source Management Solution # Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>). # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ############################################################################## import time from datetime import datetime from dateutil.relativedelta import relativedelta from pytz import timezone import pytz from openerp.osv import fields, osv from openerp.tools import DEFAULT_SERVER_DATE_FORMAT, DEFAULT_SERVER_DATETIME_FORMAT from openerp.tools.translate import _ class hr_timesheet_sheet(osv.osv): _name = "hr_timesheet_sheet.sheet" _inherit = "mail.thread" _table = 'hr_timesheet_sheet_sheet' _order = "id desc" _description="Timesheet" def _total(self, cr, uid, ids, name, args, context=None): """ Compute the attendances, analytic lines timesheets and differences between them for all the days of a timesheet and the current day """ res = {} for sheet in self.browse(cr, uid, ids, context=context or {}): res.setdefault(sheet.id, { 'total_attendance': 0.0, 'total_timesheet': 0.0, 'total_difference': 0.0, }) for period in sheet.period_ids: res[sheet.id]['total_attendance'] += period.total_attendance res[sheet.id]['total_timesheet'] += period.total_timesheet res[sheet.id]['total_difference'] += period.total_attendance - period.total_timesheet return res def check_employee_attendance_state(self, cr, uid, sheet_id, context=None): ids_signin = self.pool.get('hr.attendance').search(cr,uid,[('sheet_id', '=', sheet_id),('action','=','sign_in')]) ids_signout = self.pool.get('hr.attendance').search(cr,uid,[('sheet_id', '=', sheet_id),('action','=','sign_out')]) if len(ids_signin) != len(ids_signout): raise osv.except_osv(('Warning!'),_('The timesheet cannot be validated as it does not contain an equal number of sign ins and sign outs.')) return True def copy(self, cr, uid, ids, *args, **argv): raise osv.except_osv(_('Error!'), _('You cannot duplicate a timesheet.')) def create(self, cr, uid, vals, context=None): if 'employee_id' in vals: if not self.pool.get('hr.employee').browse(cr, uid, vals['employee_id'], context=context).user_id: raise osv.except_osv(_('Error!'), _('In order to create a timesheet for this employee, you must link him/her to a user.')) if not self.pool.get('hr.employee').browse(cr, uid, vals['employee_id'], context=context).product_id: raise osv.except_osv(_('Error!'), _('In order to create a timesheet for this employee, you must link the employee to a product, like \'Consultant\'.')) if not self.pool.get('hr.employee').browse(cr, uid, vals['employee_id'], context=context).journal_id: raise osv.except_osv(_('Configuration Error!'), _('In order to create a timesheet for this employee, you must assign an analytic journal to the employee, like \'Timesheet Journal\'.')) if vals.get('attendances_ids'): # If attendances, we sort them by date asc before writing them, to satisfy the alternance constraint vals['attendances_ids'] = self.sort_attendances(cr, uid, vals['attendances_ids'], context=context) return super(hr_timesheet_sheet, self).create(cr, uid, vals, context=context) def write(self, cr, uid, ids, vals, context=None): if 'employee_id' in vals: new_user_id = self.pool.get('hr.employee').browse(cr, uid, vals['employee_id'], context=context).user_id.id or False if not new_user_id: raise osv.except_osv(_('Error!'), _('In order to create a timesheet for this employee, you must link him/her to a user.')) if not self._sheet_date(cr, uid, ids, forced_user_id=new_user_id, context=context): raise osv.except_osv(_('Error!'), _('You cannot have 2 timesheets that overlap!\nYou should use the menu \'My Timesheet\' to avoid this problem.')) if not self.pool.get('hr.employee').browse(cr, uid, vals['employee_id'], context=context).product_id: raise osv.except_osv(_('Error!'), _('In order to create a timesheet for this employee, you must link the employee to a product.')) if not self.pool.get('hr.employee').browse(cr, uid, vals['employee_id'], context=context).journal_id: raise osv.except_osv(_('Configuration Error!'), _('In order to create a timesheet for this employee, you must assign an analytic journal to the employee, like \'Timesheet Journal\'.')) if vals.get('attendances_ids'): # If attendances, we sort them by date asc before writing them, to satisfy the alternance constraint # In addition to the date order, deleting attendances are done before inserting attendances vals['attendances_ids'] = self.sort_attendances(cr, uid, vals['attendances_ids'], context=context) res = super(hr_timesheet_sheet, self).write(cr, uid, ids, vals, context=context) if vals.get('attendances_ids'): for timesheet in self.browse(cr, uid, ids): if not self.pool['hr.attendance']._altern_si_so(cr, uid, [att.id for att in timesheet.attendances_ids]): raise osv.except_osv(_('Warning !'), _('Error ! Sign in (resp. Sign out) must follow Sign out (resp. Sign in)')) return res def sort_attendances(self, cr, uid, attendance_tuples, context=None): date_attendances = [] for att_tuple in attendance_tuples: if att_tuple[0] in [0,1,4]: if att_tuple[0] in [0,1]: name = att_tuple[2]['name'] else: name = self.pool['hr.attendance'].browse(cr, uid, att_tuple[1]).name date_attendances.append((1, name, att_tuple)) elif att_tuple[0] in [2,3]: date_attendances.append((0, self.pool['hr.attendance'].browse(cr, uid, att_tuple[1]).name, att_tuple)) else: date_attendances.append((0, False, att_tuple)) date_attendances.sort() return [att[2] for att in date_attendances] def button_confirm(self, cr, uid, ids, context=None): for sheet in self.browse(cr, uid, ids, context=context): if sheet.employee_id and sheet.employee_id.parent_id and sheet.employee_id.parent_id.user_id: self.message_subscribe_users(cr, uid, [sheet.id], user_ids=[sheet.employee_id.parent_id.user_id.id], context=context) self.check_employee_attendance_state(cr, uid, sheet.id, context=context) di = sheet.user_id.company_id.timesheet_max_difference if (abs(sheet.total_difference) < di) or not di: self.signal_confirm(cr, uid, [sheet.id]) else: raise osv.except_osv(_('Warning!'), _('Please verify that the total difference of the sheet is lower than %.2f.') %(di,)) return True def attendance_action_change(self, cr, uid, ids, context=None): hr_employee = self.pool.get('hr.employee') employee_ids = [] for sheet in self.browse(cr, uid, ids, context=context): if sheet.employee_id.id not in employee_ids: employee_ids.append(sheet.employee_id.id) return hr_employee.attendance_action_change(cr, uid, employee_ids, context=context) def _count_all(self, cr, uid, ids, field_name, arg, context=None): Timesheet = self.pool['hr.analytic.timesheet'] Attendance = self.pool['hr.attendance'] return { sheet_id: { 'timesheet_activity_count': Timesheet.search_count(cr,uid, [('sheet_id','=', sheet_id)], context=context), 'attendance_count': Attendance.search_count(cr,uid, [('sheed_id', '=', sheet_id)], context=context) } for sheet_id in ids } _columns = { 'name': fields.char('Note', size=64, select=1, states={'confirm':[('readonly', True)], 'done':[('readonly', True)]}), 'employee_id': fields.many2one('hr.employee', 'Employee', required=True), 'user_id': fields.related('employee_id', 'user_id', type="many2one", relation="res.users", store=True, string="User", required=False, readonly=True),#fields.many2one('res.users', 'User', required=True, select=1, states={'confirm':[('readonly', True)], 'done':[('readonly', True)]}), 'date_from': fields.date('Date from', required=True, select=1, readonly=True, states={'new':[('readonly', False)]}), 'date_to': fields.date('Date to', required=True, select=1, readonly=True, states={'new':[('readonly', False)]}), 'timesheet_ids' : fields.one2many('hr.analytic.timesheet', 'sheet_id', 'Timesheet lines', readonly=True, states={ 'draft': [('readonly', False)], 'new': [('readonly', False)]} ), 'attendances_ids' : fields.one2many('hr.attendance', 'sheet_id', 'Attendances'), 'state' : fields.selection([ ('new', 'New'), ('draft','Open'), ('confirm','Waiting Approval'), ('done','Approved')], 'Status', select=True, required=True, readonly=True, help=' * The \'Draft\' status is used when a user is encoding a new and unconfirmed timesheet. \ \n* The \'Confirmed\' status is used for to confirm the timesheet by user. \ \n* The \'Done\' status is used when users timesheet is accepted by his/her senior.'), 'state_attendance' : fields.related('employee_id', 'state', type='selection', selection=[('absent', 'Absent'), ('present', 'Present')], string='Current Status', readonly=True), 'total_attendance': fields.function(_total, method=True, string='Total Attendance', multi="_total"), 'total_timesheet': fields.function(_total, method=True, string='Total Timesheet', multi="_total"), 'total_difference': fields.function(_total, method=True, string='Difference', multi="_total"), 'period_ids': fields.one2many('hr_timesheet_sheet.sheet.day', 'sheet_id', 'Period', readonly=True), 'account_ids': fields.one2many('hr_timesheet_sheet.sheet.account', 'sheet_id', 'Analytic accounts', readonly=True), 'company_id': fields.many2one('res.company', 'Company'), 'department_id':fields.many2one('hr.department','Department'), 'timesheet_activity_count': fields.function(_count_all, type='integer', string='Timesheet Activities', multi=True), 'attendance_count': fields.function(_count_all, type='integer', string="Attendances", multi=True), } def _default_date_from(self, cr, uid, context=None): user = self.pool.get('res.users').browse(cr, uid, uid, context=context) r = user.company_id and user.company_id.timesheet_range or 'month' if r=='month': return time.strftime('%Y-%m-01') elif r=='week': return (datetime.today() + relativedelta(weekday=0, days=-6)).strftime('%Y-%m-%d') elif r=='year': return time.strftime('%Y-01-01') return time.strftime('%Y-%m-%d') def _default_date_to(self, cr, uid, context=None): user = self.pool.get('res.users').browse(cr, uid, uid, context=context) r = user.company_id and user.company_id.timesheet_range or 'month' if r=='month': return (datetime.today() + relativedelta(months=+1,day=1,days=-1)).strftime('%Y-%m-%d') elif r=='week': return (datetime.today() + relativedelta(weekday=6)).strftime('%Y-%m-%d') elif r=='year': return time.strftime('%Y-12-31') return time.strftime('%Y-%m-%d') def _default_employee(self, cr, uid, context=None): emp_ids = self.pool.get('hr.employee').search(cr, uid, [('user_id','=',uid)], context=context) return emp_ids and emp_ids[0] or False _defaults = { 'date_from' : _default_date_from, 'date_to' : _default_date_to, 'state': 'new', 'employee_id': _default_employee, 'company_id': lambda self, cr, uid, c: self.pool.get('res.company')._company_default_get(cr, uid, 'hr_timesheet_sheet.sheet', context=c) } def _sheet_date(self, cr, uid, ids, forced_user_id=False, context=None): for sheet in self.browse(cr, uid, ids, context=context): new_user_id = forced_user_id or sheet.user_id and sheet.user_id.id if new_user_id: cr.execute('SELECT id \ FROM hr_timesheet_sheet_sheet \ WHERE (date_from <= %s and %s <= date_to) \ AND user_id=%s \ AND id <> %s',(sheet.date_to, sheet.date_from, new_user_id, sheet.id)) if cr.fetchall(): return False return True _constraints = [ (_sheet_date, 'You cannot have 2 timesheets that overlap!\nPlease use the menu \'My Current Timesheet\' to avoid this problem.', ['date_from','date_to']), ] def action_set_to_draft(self, cr, uid, ids, *args): self.write(cr, uid, ids, {'state': 'draft'}) self.create_workflow(cr, uid, ids) return True def name_get(self, cr, uid, ids, context=None): if not ids: return [] if isinstance(ids, (long, int)): ids = [ids] return [(r['id'], _('Week ')+datetime.strptime(r['date_from'], '%Y-%m-%d').strftime('%U')) \ for r in self.read(cr, uid, ids, ['date_from'], context=context, load='_classic_write')] def unlink(self, cr, uid, ids, context=None): sheets = self.read(cr, uid, ids, ['state','total_attendance'], context=context) for sheet in sheets: if sheet['state'] in ('confirm', 'done'): raise osv.except_osv(_('Invalid Action!'), _('You cannot delete a timesheet which is already confirmed.')) elif sheet['total_attendance'] <> 0.00: raise osv.except_osv(_('Invalid Action!'), _('You cannot delete a timesheet which have attendance entries.')) return super(hr_timesheet_sheet, self).unlink(cr, uid, ids, context=context) def onchange_employee_id(self, cr, uid, ids, employee_id, context=None): department_id = False user_id = False if employee_id: empl_id = self.pool.get('hr.employee').browse(cr, uid, employee_id, context=context) department_id = empl_id.department_id.id user_id = empl_id.user_id.id return {'value': {'department_id': department_id, 'user_id': user_id,}} # ------------------------------------------------ # OpenChatter methods and notifications # ------------------------------------------------ def _needaction_domain_get(self, cr, uid, context=None): emp_obj = self.pool.get('hr.employee') empids = emp_obj.search(cr, uid, [('parent_id.user_id', '=', uid)], context=context) if not empids: return False dom = ['&', ('state', '=', 'confirm'), ('employee_id', 'in', empids)] return dom class account_analytic_line(osv.osv): _inherit = "account.analytic.line" def _get_default_date(self, cr, uid, context=None): if context is None: context = {} #get the default date (should be: today) res = super(account_analytic_line, self)._get_default_date(cr, uid, context=context) #if we got the dates from and to from the timesheet and if the default date is in between, we use the default #but if the default isn't included in those dates, we use the date start of the timesheet as default if context.get('timesheet_date_from') and context.get('timesheet_date_to'): if context['timesheet_date_from'] <= res <= context['timesheet_date_to']: return res return context.get('timesheet_date_from') #if we don't get the dates from the timesheet, we return the default value from super() return res class account_analytic_account(osv.osv): _inherit = "account.analytic.account" def name_create(self, cr, uid, name, context=None): if context is None: context = {} group_template_required = self.pool['res.users'].has_group(cr, uid, 'account_analytic_analysis.group_template_required') if not context.get('default_use_timesheets') or group_template_required: return super(account_analytic_account, self).name_create(cr, uid, name, context=context) rec_id = self.create(cr, uid, {self._rec_name: name}, context) return self.name_get(cr, uid, [rec_id], context)[0] class hr_timesheet_line(osv.osv): _inherit = "hr.analytic.timesheet" def _sheet(self, cursor, user, ids, name, args, context=None): sheet_obj = self.pool.get('hr_timesheet_sheet.sheet') res = {}.fromkeys(ids, False) for ts_line in self.browse(cursor, user, ids, context=context): sheet_ids = sheet_obj.search(cursor, user, [('date_to', '>=', ts_line.date), ('date_from', '<=', ts_line.date), ('employee_id.user_id', '=', ts_line.user_id.id)], context=context) if sheet_ids: # [0] because only one sheet possible for an employee between 2 dates res[ts_line.id] = sheet_obj.name_get(cursor, user, sheet_ids, context=context)[0] return res def _get_hr_timesheet_sheet(self, cr, uid, ids, context=None): ts_line_ids = [] for ts in self.browse(cr, uid, ids, context=context): cr.execute(""" SELECT l.id FROM hr_analytic_timesheet l INNER JOIN account_analytic_line al ON (l.line_id = al.id) WHERE %(date_to)s >= al.date AND %(date_from)s <= al.date AND %(user_id)s = al.user_id GROUP BY l.id""", {'date_from': ts.date_from, 'date_to': ts.date_to, 'user_id': ts.employee_id.user_id.id,}) ts_line_ids.extend([row[0] for row in cr.fetchall()]) return ts_line_ids def _get_account_analytic_line(self, cr, uid, ids, context=None): ts_line_ids = self.pool.get('hr.analytic.timesheet').search(cr, uid, [('line_id', 'in', ids)]) return ts_line_ids _columns = { 'sheet_id': fields.function(_sheet, string='Sheet', select="1", type='many2one', relation='hr_timesheet_sheet.sheet', ondelete="cascade", store={ 'hr_timesheet_sheet.sheet': (_get_hr_timesheet_sheet, ['employee_id', 'date_from', 'date_to'], 10), 'account.analytic.line': (_get_account_analytic_line, ['user_id', 'date'], 10), 'hr.analytic.timesheet': (lambda self,cr,uid,ids,context=None: ids, None, 10), }, ), } def _check_sheet_state(self, cr, uid, ids, context=None): if context is None: context = {} for timesheet_line in self.browse(cr, uid, ids, context=context): if timesheet_line.sheet_id and timesheet_line.sheet_id.state not in ('draft', 'new'): return False return True _constraints = [ (_check_sheet_state, 'You cannot modify an entry in a Confirmed/Done timesheet !', ['state']), ] def unlink(self, cr, uid, ids, *args, **kwargs): if isinstance(ids, (int, long)): ids = [ids] self._check(cr, uid, ids) return super(hr_timesheet_line,self).unlink(cr, uid, ids,*args, **kwargs) def _check(self, cr, uid, ids): for att in self.browse(cr, uid, ids): if att.sheet_id and att.sheet_id.state not in ('draft', 'new'): raise osv.except_osv(_('Error!'), _('You cannot modify an entry in a confirmed timesheet.')) return True def multi_on_change_account_id(self, cr, uid, ids, account_ids, context=None): return dict([(el, self.on_change_account_id(cr, uid, ids, el, context.get('user_id', uid))) for el in account_ids]) class hr_attendance(osv.osv): _inherit = "hr.attendance" def _get_default_date(self, cr, uid, context=None): if context is None: context = {} if 'name' in context: return context['name'] + time.strftime(' %H:%M:%S') return time.strftime('%Y-%m-%d %H:%M:%S') def _get_hr_timesheet_sheet(self, cr, uid, ids, context=None): attendance_ids = [] for ts in self.browse(cr, uid, ids, context=context): cr.execute(""" SELECT a.id FROM hr_attendance a INNER JOIN hr_employee e INNER JOIN resource_resource r ON (e.resource_id = r.id) ON (a.employee_id = e.id) WHERE %(date_to)s >= date_trunc('day', a.name) AND %(date_from)s <= a.name AND %(user_id)s = r.user_id GROUP BY a.id""", {'date_from': ts.date_from, 'date_to': ts.date_to, 'user_id': ts.employee_id.user_id.id,}) attendance_ids.extend([row[0] for row in cr.fetchall()]) return attendance_ids def _get_attendance_employee_tz(self, cr, uid, employee_id, date, context=None): """ Simulate timesheet in employee timezone Return the attendance date in string format in the employee tz converted from utc timezone as we consider date of employee timesheet is in employee timezone """ employee_obj = self.pool['hr.employee'] tz = False if employee_id: employee = employee_obj.browse(cr, uid, employee_id, context=context) tz = employee.user_id.partner_id.tz if not date: date = time.strftime(DEFAULT_SERVER_DATETIME_FORMAT) att_tz = timezone(tz or 'utc') attendance_dt = datetime.strptime(date, DEFAULT_SERVER_DATETIME_FORMAT) att_tz_dt = pytz.utc.localize(attendance_dt) att_tz_dt = att_tz_dt.astimezone(att_tz) # We take only the date omiting the hours as we compare with timesheet # date_from which is a date format thus using hours would lead to # be out of scope of timesheet att_tz_date_str = datetime.strftime(att_tz_dt, DEFAULT_SERVER_DATE_FORMAT) return att_tz_date_str def _get_current_sheet(self, cr, uid, employee_id, date=False, context=None): sheet_obj = self.pool['hr_timesheet_sheet.sheet'] if not date: date = time.strftime(DEFAULT_SERVER_DATETIME_FORMAT) att_tz_date_str = self._get_attendance_employee_tz( cr, uid, employee_id, date=date, context=context) sheet_ids = sheet_obj.search(cr, uid, [('date_from', '<=', att_tz_date_str), ('date_to', '>=', att_tz_date_str), ('employee_id', '=', employee_id)], limit=1, context=context) return sheet_ids and sheet_ids[0] or False def _sheet(self, cursor, user, ids, name, args, context=None): res = {}.fromkeys(ids, False) for attendance in self.browse(cursor, user, ids, context=context): res[attendance.id] = self._get_current_sheet( cursor, user, attendance.employee_id.id, attendance.name, context=context) return res _columns = { 'sheet_id': fields.function(_sheet, string='Sheet', type='many2one', relation='hr_timesheet_sheet.sheet', store={ 'hr_timesheet_sheet.sheet': (_get_hr_timesheet_sheet, ['employee_id', 'date_from', 'date_to'], 10), 'hr.attendance': (lambda self,cr,uid,ids,context=None: ids, ['employee_id', 'name', 'day'], 10), }, ) } _defaults = { 'name': _get_default_date, } def create(self, cr, uid, vals, context=None): if context is None: context = {} sheet_id = context.get('sheet_id') or self._get_current_sheet(cr, uid, vals.get('employee_id'), vals.get('name'), context=context) if sheet_id: att_tz_date_str = self._get_attendance_employee_tz( cr, uid, vals.get('employee_id'), date=vals.get('name'), context=context) ts = self.pool.get('hr_timesheet_sheet.sheet').browse(cr, uid, sheet_id, context=context) if ts.state not in ('draft', 'new'): raise osv.except_osv(_('Error!'), _('You can not enter an attendance in a submitted timesheet. Ask your manager to reset it before adding attendance.')) elif ts.date_from > att_tz_date_str or ts.date_to < att_tz_date_str: raise osv.except_osv(_('User Error!'), _('You can not enter an attendance date outside the current timesheet dates.')) return super(hr_attendance,self).create(cr, uid, vals, context=context) def unlink(self, cr, uid, ids, *args, **kwargs): if isinstance(ids, (int, long)): ids = [ids] self._check(cr, uid, ids) return super(hr_attendance,self).unlink(cr, uid, ids,*args, **kwargs) def write(self, cr, uid, ids, vals, context=None): if context is None: context = {} if isinstance(ids, (int, long)): ids = [ids] self._check(cr, uid, ids) res = super(hr_attendance,self).write(cr, uid, ids, vals, context=context) if 'sheet_id' in context: for attendance in self.browse(cr, uid, ids, context=context): if context['sheet_id'] != attendance.sheet_id.id: raise osv.except_osv(_('User Error!'), _('You cannot enter an attendance ' \ 'date outside the current timesheet dates.')) return res def _check(self, cr, uid, ids): for att in self.browse(cr, uid, ids): if att.sheet_id and att.sheet_id.state not in ('draft', 'new'): raise osv.except_osv(_('Error!'), _('You cannot modify an entry in a confirmed timesheet')) return True class hr_timesheet_sheet_sheet_day(osv.osv): _name = "hr_timesheet_sheet.sheet.day" _description = "Timesheets by Period" _auto = False _order='name' _columns = { 'name': fields.date('Date', readonly=True), 'sheet_id': fields.many2one('hr_timesheet_sheet.sheet', 'Sheet', readonly=True, select="1"), 'total_timesheet': fields.float('Total Timesheet', readonly=True), 'total_attendance': fields.float('Attendance', readonly=True), 'total_difference': fields.float('Difference', readonly=True), } def init(self, cr): cr.execute("""create or replace view hr_timesheet_sheet_sheet_day as SELECT id, name, sheet_id, total_timesheet, total_attendance, cast(round(cast(total_attendance - total_timesheet as Numeric),2) as Double Precision) AS total_difference FROM (( SELECT MAX(id) as id, name, sheet_id, SUM(total_timesheet) as total_timesheet, CASE WHEN SUM(total_attendance) < 0 THEN (SUM(total_attendance) + CASE WHEN current_date <> name THEN 1440 ELSE (EXTRACT(hour FROM current_time AT TIME ZONE 'UTC') * 60) + EXTRACT(minute FROM current_time AT TIME ZONE 'UTC') END ) ELSE SUM(total_attendance) END /60 as total_attendance FROM (( select min(hrt.id) as id, l.date::date as name, s.id as sheet_id, sum(l.unit_amount) as total_timesheet, 0.0 as total_attendance from hr_analytic_timesheet hrt JOIN account_analytic_line l ON l.id = hrt.line_id LEFT JOIN hr_timesheet_sheet_sheet s ON s.id = hrt.sheet_id group by l.date::date, s.id ) union ( select -min(a.id) as id, a.name::date as name, s.id as sheet_id, 0.0 as total_timesheet, SUM(((EXTRACT(hour FROM a.name) * 60) + EXTRACT(minute FROM a.name)) * (CASE WHEN a.action = 'sign_in' THEN -1 ELSE 1 END)) as total_attendance from hr_attendance a LEFT JOIN hr_timesheet_sheet_sheet s ON s.id = a.sheet_id WHERE action in ('sign_in', 'sign_out') group by a.name::date, s.id )) AS foo GROUP BY name, sheet_id )) AS bar""") class hr_timesheet_sheet_sheet_account(osv.osv): _name = "hr_timesheet_sheet.sheet.account" _description = "Timesheets by Period" _auto = False _order='name' _columns = { 'name': fields.many2one('account.analytic.account', 'Project / Analytic Account', readonly=True), 'sheet_id': fields.many2one('hr_timesheet_sheet.sheet', 'Sheet', readonly=True), 'total': fields.float('Total Time', digits=(16,2), readonly=True), 'invoice_rate': fields.many2one('hr_timesheet_invoice.factor', 'Invoice rate', readonly=True), } def init(self, cr): cr.execute("""create or replace view hr_timesheet_sheet_sheet_account as ( select min(hrt.id) as id, l.account_id as name, s.id as sheet_id, sum(l.unit_amount) as total, l.to_invoice as invoice_rate from hr_analytic_timesheet hrt left join (account_analytic_line l LEFT JOIN hr_timesheet_sheet_sheet s ON (s.date_to >= l.date AND s.date_from <= l.date AND s.user_id = l.user_id)) on (l.id = hrt.line_id) group by l.account_id, s.id, l.to_invoice )""") class res_company(osv.osv): _inherit = 'res.company' _columns = { 'timesheet_range': fields.selection( [('day','Day'),('week','Week'),('month','Month')], 'Timesheet range', help="Periodicity on which you validate your timesheets."), 'timesheet_max_difference': fields.float('Timesheet allowed difference(Hours)', help="Allowed difference in hours between the sign in/out and the timesheet " \ "computation for one sheet. Set this to 0 if you do not want any control."), } _defaults = { 'timesheet_range': lambda *args: 'week', 'timesheet_max_difference': lambda *args: 0.0 } class hr_employee(osv.osv): ''' Employee ''' _inherit = 'hr.employee' _description = 'Employee' def _timesheet_count(self, cr, uid, ids, field_name, arg, context=None): Sheet = self.pool['hr_timesheet_sheet.sheet'] return { employee_id: Sheet.search_count(cr,uid, [('employee_id', '=', employee_id)], context=context) for employee_id in ids } _columns = { 'timesheet_count': fields.function(_timesheet_count, type='integer', string='Timesheets'), } # vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
agpl-3.0
Mailea/detailed-skyrim-calculator
skycalc/widgets.py
2
15672
"""Contains templates for most GUI components.""" import tkinter as tk # different kinds of text and labels class BreadcrumbLabel(tk.Label): """Display title of current stage. Attributes: parent (Frame): parent frame text (str): label text/image file name i (int): position/index """ def __init__(self, parent, text, i): tk.Label.__init__(self, parent, bg=parent.cget("bg"), borderwidth=0) self.__i = i # index/id self.__empty = ImageImporter.load("bread/labels/empty") self.__filled = ImageImporter.load("bread/labels/" + text) def refresh(self, n=0): if self.__i == n: self.config(image=self.__filled) else: self.config(image=self.__empty) class Message(tk.Label): """Standard message with 2 modes (normal and error). Attributes: parent (Frame): frame that contains this text text_ (str): displayed text """ def __init__(self, parent, text_): tk.Label.__init__(self, parent, bg=parent.cget("bg"), font=("Helvetica", 10), wraplength=500) self.show_normal(text_) def show_error(self, new_text): self.config(text=new_text, fg=Colors.ERROR) def show_normal(self, new_text): self.config(text=new_text, fg=Colors.DARK) class TableEntry(tk.Label): """Text, usually a table entry. Attributes: parent (Frame): frame that contains this text text_ (str): displayed text highlighted (boolean): text will be bigger and brighter if True """ def __init__(self, parent, text_, highlighted=False): tk.Label.__init__(self, parent, bg=parent.cget("bg"), text=text_) if highlighted: self.config(fg=Colors.TEXT, font=("Helvetica", 11)) else: self.config(fg=Colors.LIGHT, font=("Helvetica", 10)) # classic buttons class ImageButton(tk.Button): """Button with an image. Attributes: parent (Frame): frame that contains this button label (str): default text / name command_: what happens when button is active """ def __init__(self, parent, label, command_): tk.Button.__init__(self, parent, command=command_, activebackground=parent.cget("bg"), bg=parent.cget("bg"), borderwidth=0, cursor="hand2", relief="flat") self.__image = ImageImporter.load("button/" + label) self.config(image=self.__image) class NavButton(tk.Button): """Standard layout for 'previous'- or 'next'-buttons. Attributes: parent (Frame): frame that contains this button label (str): default text alt_label (str): optional, alternative text """ def __init__(self, parent, label, alt_label=None): tk.Button.__init__(self, parent, activebackground=parent.cget("bg"), bg=parent.cget("bg"), borderwidth=0, cursor="hand2", relief="flat") self.__image = ImageImporter.load("nav/" + label) self.__alt_image = None if alt_label is not None: self.__alt_image = ImageImporter.load("nav/" + alt_label) self.show_default() def show_default(self): self.config(image=self.__image) def set_command(self, command_): self.config(command=command_) self.bind("<Return>", command_) def show_alternative(self): if self.__alt_image is not None: self.config(image=self.__alt_image) class TabButton(tk.Button): """Tab index button. For results view Attributes: parent (Frame): container frame text_ (str): displayed text tab (Frame): frame that will be shown when the button is clicked marker: corresponding tab marker """ def __init__(self, parent, text_, tab, buttons, marker=None): tk.Button.__init__(self, parent, activebackground=parent.cget("bg"), bg=parent.cget("bg"), borderwidth=0, cursor="hand2", relief="flat") self.__parent = parent self.__tab = tab self.__buttons = buttons self.__marker = marker self.__deselected = ImageImporter.load("tab/names/" + text_) self.__selected = ImageImporter.load( "tab/names/" + text_ + "_SELECTED") self.config(command=lambda: self.__on_call()) self.deselect() def deselect(self): self.config(image=self.__deselected) def select(self): for button in self.__buttons: button.deselect() self.config(image=self.__selected) if self.__marker is not None: self.__marker.select() def __on_call(self): self.__tab.tkraise() self.select() class ToggleButton(tk.Button): """'Toggle button' used for sorting method selection. Attributes: parent (Frame): container frame text_ (str): button text, usually 'alphabetically', 'by category', ... """ def __init__(self, parent, text_, command_): tk.Button.__init__(self, parent, text=text_, command=command_, activebackground=parent.cget("bg"), activeforeground=Colors.DARK, bg=parent.cget("bg"), borderwidth=0, cursor="hand2", fg=Colors.DARK, font="-size 10", padx=14, relief="flat" ) self.bind("<Return>", command_) def change_text(self, new_text): self.config(text=new_text) # getting user input class Selectable(tk.Button): """Selectable text, can be toggled (selected - deselected). Attributes: parent (Frame): container frame text_ (str): displayed text """ def __init__(self, parent, text_): tk.Button.__init__(self, parent, text=text_, activebackground=parent.cget("bg"), activeforeground=Colors.MEDIUM, bg=parent.cget("bg"), borderwidth=0, compound="center", cursor="hand2", font="-size 11", relief="flat" ) self.__text = text_ self.__normal_img = ImageImporter.load("selectable/empty") self.__selected_img = ImageImporter.load("selectable/SELECTED") self.config(command=lambda: self.select()) self.bind("<Return>", lambda x: self.select()) self.mark_unselected() def get_label(self): return self.__text def mark_selected(self): self.config(image=self.__selected_img, fg=Colors.LIGHT) def mark_unselected(self): self.config(image=self.__normal_img, fg=Colors.WHITE) def select(self): self.mark_selected() class MultiSelectable(Selectable): """Selectable; more than one element can be selected. Attributes: parent (Frame): container frame text_ (str): displayed text """ def __init__(self, parent, text_): Selectable.__init__(self, parent, text_) self.__selected = False def is_selected(self): return self.__selected def select(self): if self.__selected: self.mark_unselected() else: self.mark_selected() self.__selected = not self.__selected class Option(Selectable): """Selectable; only one element can be selected at a time. Attributes: parent (Frame): container frame text_ (str): displayed text provider: object that offers this option """ def __init__(self, parent, text_, provider): Selectable.__init__(self, parent, text_) self.__object = provider def select(self): self.__object.select(self.get_label()) class BigField(tk.Frame): """Big input field with text. Perfect for character level input. Attributes: parent (Frame): container frame name_ (str): bg image file name """ def __init__(self, parent, name_): tk.Frame.__init__(self, parent, bg=parent.cget("bg")) self.__selected_bg = ImageImporter.load("bigfield/SELECTED_" + name_) self.__error_bg = ImageImporter.load("bigfield/ERROR_" + name_) self.__background_label = tk.Label(self, bg=self.cget("bg")) self.__background_label.grid(row=0, column=0) self.__entry = tk.Entry(self, bg=Colors.SHADOW, borderwidth=0, fg=Colors.TEXT, font="-size 38", insertwidth=2, justify="center", relief="flat", width=3 ) self.__entry.grid(row=0, column=0) self.mark_valid() def get_input(self): return self.__entry.get() def mark_invalid(self): self.__background_label.config(image=self.__error_bg) self.__entry.config(insertbackground=Colors.ERROR) self.set_focus() def mark_valid(self): self.__background_label.config(image=self.__selected_bg) self.__entry.config(insertbackground=Colors.MEDIUM) def set_focus(self): self.__entry.focus_set() class SmallField(tk.Frame): """Small input field with text. Made for skill level input. Attributes: parent (Frame): container frame name_ (str): field name """ def __init__(self, parent, name_): tk.Frame.__init__(self, parent, bg=parent.cget("bg")) self.__name = name_ file_name = name_.replace(" ", "_") # no whitespace in file names self.__selected_bg = ImageImporter.load("smallfield/" + file_name) self.__error_bg = ImageImporter.load("smallfield/ERROR_" + file_name) self.__background_label = tk.Label(self, bg=self.cget("bg")) self.__background_label.grid(row=0, column=0) self.__entry = tk.Entry(self, bg=Colors.SHADOW, borderwidth=0, fg=Colors.TEXT, font="-size 24", insertwidth=2, justify="center", relief="flat", width=3 ) self.__entry.grid(row=0, column=0, pady=24, sticky="s") self.mark_valid() def get_input(self): return self.__entry.get() def get_label(self): return self.__name def mark_invalid(self): self.__background_label.config(image=self.__error_bg) self.__entry.config(insertbackground=Colors.ERROR) self.set_focus() def mark_valid(self): self.__background_label.config(image=self.__selected_bg) self.__entry.config(insertbackground=Colors.MEDIUM) def set_focus(self): self.__entry.focus_set() # visualisation class BreadcrumbMarker(tk.Label): """Breadcrumb button displaying status of a step. Attributes: parent (Frame): frame that contains this image i (int): position/index """ def __init__(self, parent, i): tk.Label.__init__(self, parent, bg=parent.cget("bg"), borderwidth=0) self.__i = i # index/id self.__old = ImageImporter.load("bread/OLD") self.__now = ImageImporter.load("bread/NOW") self.__new = ImageImporter.load("bread/NEW") self.refresh() def refresh(self, n=0): if self.__i < n: self.config(image=self.__old) elif self.__i == n: self.config(image=self.__now) else: self.config(image=self.__new) # TODO: make more reusable class ResultTable(tk.Frame): """Displays result data (returned by calculator-functions) in a table. Attributes: parent (tk.Frame): container data (dict): displayed result data """ def __init__(self, parent, data): tk.Frame.__init__(self, parent, bg=parent.cget("bg")) headlines = ["SKILL", "CURRENT", "GOAL", "TRAIN", "LEGENDARY"] for i in range(len(headlines)): Image(self, "headlines/" + headlines[i]).grid(row=0, column=i, pady=15) sorted_relevant_skills = sorted(skill for skill in data.keys() if data[skill]["Times Leveled"] != 0) for i in range(len(sorted_relevant_skills)): skill = sorted_relevant_skills[i] entry = data[skill] TableEntry(self, skill, True).grid(row=i + 1, column=0, pady=7) TableEntry(self, entry["Start Level"]).grid(row=i + 1, column=1) TableEntry(self, entry["Final Level"]).grid(row=i + 1, column=2) TableEntry(self, str(entry["Times Leveled"]) + "x", True).grid( row=i + 1, column=3) TableEntry(self, str(entry["Times Legendary"]) + "x").grid( row=i + 1, column=4) class TabMarker(tk.Label): """Display if a tab is selected. Can be controlled by a TabButton. Attributes: parent (Frame): container frame markers (list): list of all markers """ def __init__(self, parent, markers): tk.Label.__init__(self, parent, bg=parent.cget("bg"), borderwidth=0) self.__markers = markers self.__deselected = ImageImporter.load("tab/markers/deselected") self.__selected = ImageImporter.load("tab/markers/selected") self.deselect() def deselect(self): self.config(image=self.__deselected) def select(self): for marker in self.__markers: marker.deselect() self.config(image=self.__selected) # other class Colors: """Some predefined colors.""" BG = "#1A1816" SHADOW = "#12110F" TEXT = "#C0BFBF" ERROR = "#F22613" WHITE = "#EFEFEF" BLACK = "#080706" LIGHT = "#D0B180" MEDIUM = "#937E62" DARK = "#584D45" DARKER = "#2F2924" class Image(tk.Label): """Display a .png-image imported by the ImageImporter. Attributes: parent (Frame): parent frame name_ (str): image file name """ def __init__(self, parent, name_): tk.Label.__init__(self, parent, bg=parent.cget("bg"), borderwidth=0) self.__image = ImageImporter.load(name_) self.config(image=self.__image) class ImageImporter: """Import a .png-image from /res.""" @staticmethod def load(image): from PIL import Image, ImageTk return ImageTk.PhotoImage(Image.open("res/" + image + ".png")) if __name__ == "__main__": import sys import inspect print(__doc__, "\n") for name, obj in inspect.getmembers(sys.modules[__name__]): if inspect.isclass(obj): print(obj.__name__, "\n", obj.__doc__, "\n\n")
mit
ValeriaVG/eselcss
node_modules/node-gyp/gyp/pylib/gyp/generator/dump_dependency_json.py
1534
3426
# Copyright (c) 2012 Google Inc. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. import collections import os import gyp import gyp.common import gyp.msvs_emulation import json import sys generator_supports_multiple_toolsets = True generator_wants_static_library_dependencies_adjusted = False generator_filelist_paths = { } generator_default_variables = { } for dirname in ['INTERMEDIATE_DIR', 'SHARED_INTERMEDIATE_DIR', 'PRODUCT_DIR', 'LIB_DIR', 'SHARED_LIB_DIR']: # Some gyp steps fail if these are empty(!). generator_default_variables[dirname] = 'dir' for unused in ['RULE_INPUT_PATH', 'RULE_INPUT_ROOT', 'RULE_INPUT_NAME', 'RULE_INPUT_DIRNAME', 'RULE_INPUT_EXT', 'EXECUTABLE_PREFIX', 'EXECUTABLE_SUFFIX', 'STATIC_LIB_PREFIX', 'STATIC_LIB_SUFFIX', 'SHARED_LIB_PREFIX', 'SHARED_LIB_SUFFIX', 'CONFIGURATION_NAME']: generator_default_variables[unused] = '' def CalculateVariables(default_variables, params): generator_flags = params.get('generator_flags', {}) for key, val in generator_flags.items(): default_variables.setdefault(key, val) default_variables.setdefault('OS', gyp.common.GetFlavor(params)) flavor = gyp.common.GetFlavor(params) if flavor =='win': # Copy additional generator configuration data from VS, which is shared # by the Windows Ninja generator. import gyp.generator.msvs as msvs_generator generator_additional_non_configuration_keys = getattr(msvs_generator, 'generator_additional_non_configuration_keys', []) generator_additional_path_sections = getattr(msvs_generator, 'generator_additional_path_sections', []) gyp.msvs_emulation.CalculateCommonVariables(default_variables, params) def CalculateGeneratorInputInfo(params): """Calculate the generator specific info that gets fed to input (called by gyp).""" generator_flags = params.get('generator_flags', {}) if generator_flags.get('adjust_static_libraries', False): global generator_wants_static_library_dependencies_adjusted generator_wants_static_library_dependencies_adjusted = True toplevel = params['options'].toplevel_dir generator_dir = os.path.relpath(params['options'].generator_output or '.') # output_dir: relative path from generator_dir to the build directory. output_dir = generator_flags.get('output_dir', 'out') qualified_out_dir = os.path.normpath(os.path.join( toplevel, generator_dir, output_dir, 'gypfiles')) global generator_filelist_paths generator_filelist_paths = { 'toplevel': toplevel, 'qualified_out_dir': qualified_out_dir, } def GenerateOutput(target_list, target_dicts, data, params): # Map of target -> list of targets it depends on. edges = {} # Queue of targets to visit. targets_to_visit = target_list[:] while len(targets_to_visit) > 0: target = targets_to_visit.pop() if target in edges: continue edges[target] = [] for dep in target_dicts[target].get('dependencies', []): edges[target].append(dep) targets_to_visit.append(dep) try: filepath = params['generator_flags']['output_dir'] except KeyError: filepath = '.' filename = os.path.join(filepath, 'dump.json') f = open(filename, 'w') json.dump(edges, f) f.close() print 'Wrote json to %s.' % filename
mpl-2.0
nanditav/15712-TensorFlow
tensorflow/python/kernel_tests/matrix_solve_ls_op_test.py
12
8164
# Copyright 2015 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for tensorflow.ops.math_ops.matrix_solve.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import numpy as np import tensorflow as tf def BatchMatMul(a, b): # A numpy implementation of tf.batch_matmul(). if a.ndim < 3: return np.dot(a, b) # Get the number of matrices. n = np.prod(a.shape[:-2]) assert n == np.prod(b.shape[:-2]) a_flat = np.reshape(a, tuple([n]) + a.shape[-2:]) b_flat = np.reshape(b, tuple([n]) + b.shape[-2:]) c_flat_shape = [n, a.shape[-2], b.shape[-1]] c_flat = np.empty(c_flat_shape) for i in range(n): c_flat[i, :, :] = np.dot(a_flat[i, :, :], b_flat[i, :, :]) return np.reshape(c_flat, a.shape[:-1] + b_flat.shape[-1:]) def BatchRegularizedLeastSquares(matrices, rhss, l2_regularization=0.0): # A numpy implementation of regularized least squares solver using # the normal equations. matrix_dims = matrices.shape matrices_transposed = np.swapaxes(matrices, -2, -1) rows = matrix_dims[-2] cols = matrix_dims[-1] if rows >= cols: preconditioner = l2_regularization * np.identity(cols) gramian = BatchMatMul(matrices_transposed, matrices) + preconditioner inverse = np.linalg.inv(gramian) left_pseudo_inverse = BatchMatMul(inverse, matrices_transposed) return BatchMatMul(left_pseudo_inverse, rhss) else: preconditioner = l2_regularization * np.identity(rows) gramian = BatchMatMul(matrices, matrices_transposed) + preconditioner inverse = np.linalg.inv(gramian) right_pseudo_inverse = BatchMatMul(matrices_transposed, inverse) return BatchMatMul(right_pseudo_inverse, rhss) class MatrixSolveLsOpTest(tf.test.TestCase): def _verifySolve(self, x, y): for np_type in [np.float32, np.float64]: a = x.astype(np_type) b = y.astype(np_type) np_ans, _, _, _ = np.linalg.lstsq(a, b) for fast in [True, False]: with self.test_session(): tf_ans = tf.matrix_solve_ls(a, b, fast=fast) ans = tf_ans.eval() self.assertEqual(np_ans.shape, tf_ans.get_shape()) self.assertEqual(np_ans.shape, ans.shape) # Check residual norm. tf_r = b - BatchMatMul(a, ans) tf_r_norm = np.sum(tf_r * tf_r) np_r = b - BatchMatMul(a, np_ans) np_r_norm = np.sum(np_r * np_r) self.assertAllClose(np_r_norm, tf_r_norm) # Check solution. if fast or a.shape[0] >= a.shape[1]: # We skip this test for the underdetermined case when using the # slow path, because Eigen does not return a minimum norm solution. # TODO(rmlarsen): Enable this check for all paths if/when we fix # Eigen's solver. self.assertAllClose(np_ans, ans, atol=1e-5, rtol=1e-5) def _verifySolveBatch(self, x, y): # Since numpy.linalg.lsqr does not support batch solves, as opposed # to numpy.linalg.solve, we just perform this test for a fixed batch size # of 2x3. for np_type in [np.float32, np.float64]: a = np.tile(x.astype(np_type), [2, 3, 1, 1]) b = np.tile(y.astype(np_type), [2, 3, 1, 1]) np_ans = np.empty([2, 3, a.shape[-1], b.shape[-1]]) for dim1 in range(2): for dim2 in range(3): np_ans[dim1, dim2, :, :], _, _, _ = np.linalg.lstsq( a[dim1, dim2, :, :], b[dim1, dim2, :, :]) for fast in [True, False]: with self.test_session(): tf_ans = tf.matrix_solve_ls(a, b, fast=fast).eval() self.assertEqual(np_ans.shape, tf_ans.shape) # Check residual norm. tf_r = b - BatchMatMul(a, tf_ans) tf_r_norm = np.sum(tf_r * tf_r) np_r = b - BatchMatMul(a, np_ans) np_r_norm = np.sum(np_r * np_r) self.assertAllClose(np_r_norm, tf_r_norm) # Check solution. if fast or a.shape[-2] >= a.shape[-1]: # We skip this test for the underdetermined case when using the # slow path, because Eigen does not return a minimum norm solution. # TODO(rmlarsen): Enable this check for all paths if/when we fix # Eigen's solver. self.assertAllClose(np_ans, tf_ans, atol=1e-5, rtol=1e-5) def _verifyRegularized(self, x, y, l2_regularizer): for np_type in [np.float32, np.float64]: # Test with a single matrix. a = x.astype(np_type) b = y.astype(np_type) np_ans = BatchRegularizedLeastSquares(a, b, l2_regularizer) with self.test_session(): # Test matrix_solve_ls on regular matrices tf_ans = tf.matrix_solve_ls( a, b, l2_regularizer=l2_regularizer, fast=True).eval() self.assertAllClose(np_ans, tf_ans, atol=1e-5, rtol=1e-5) # Test with a 2x3 batch of matrices. a = np.tile(x.astype(np_type), [2, 3, 1, 1]) b = np.tile(y.astype(np_type), [2, 3, 1, 1]) np_ans = BatchRegularizedLeastSquares(a, b, l2_regularizer) with self.test_session(): tf_ans = tf.matrix_solve_ls( a, b, l2_regularizer=l2_regularizer, fast=True).eval() self.assertAllClose(np_ans, tf_ans, atol=1e-5, rtol=1e-5) def testSquare(self): # 2x2 matrices, 2x3 right-hand sides. matrix = np.array([[1., 2.], [3., 4.]]) rhs = np.array([[1., 0., 1.], [0., 1., 1.]]) self._verifySolve(matrix, rhs) self._verifySolveBatch(matrix, rhs) self._verifyRegularized(matrix, rhs, l2_regularizer=0.1) def testOverdetermined(self): # 2x2 matrices, 2x3 right-hand sides. matrix = np.array([[1., 2.], [3., 4.], [5., 6.]]) rhs = np.array([[1., 0., 1.], [0., 1., 1.], [1., 1., 0.]]) self._verifySolve(matrix, rhs) self._verifySolveBatch(matrix, rhs) self._verifyRegularized(matrix, rhs, l2_regularizer=0.1) def testUnderdetermined(self): # 2x2 matrices, 2x3 right-hand sides. matrix = np.array([[1., 2., 3], [4., 5., 6.]]) rhs = np.array([[1., 0., 1.], [0., 1., 1.]]) self._verifySolve(matrix, rhs) self._verifySolveBatch(matrix, rhs) self._verifyRegularized(matrix, rhs, l2_regularizer=0.1) def testWrongDimensions(self): # The matrix and right-hand sides should have the same number of rows. with self.test_session(): matrix = tf.constant([[1., 0.], [0., 1.]]) rhs = tf.constant([[1., 0.]]) with self.assertRaises(ValueError): tf.matrix_solve_ls(matrix, rhs) def testEmpty(self): full = np.array([[1., 2.], [3., 4.], [5., 6.]]) empty0 = np.empty([3, 0]) empty1 = np.empty([0, 2]) for fast in [True, False]: with self.test_session(): tf_ans = tf.matrix_solve_ls(empty0, empty0, fast=fast).eval() self.assertEqual(tf_ans.shape, (0, 0)) tf_ans = tf.matrix_solve_ls(empty0, full, fast=fast).eval() self.assertEqual(tf_ans.shape, (0, 2)) tf_ans = tf.matrix_solve_ls(full, empty0, fast=fast).eval() self.assertEqual(tf_ans.shape, (2, 0)) tf_ans = tf.matrix_solve_ls(empty1, empty1, fast=fast).eval() self.assertEqual(tf_ans.shape, (2, 2)) def testBatchResultSize(self): # 3x3x3 matrices, 3x3x1 right-hand sides. matrix = np.array([1., 2., 3., 4., 5., 6., 7., 8., 9.] * 3).reshape(3, 3, 3) rhs = np.array([1., 2., 3.] * 3).reshape(3, 3, 1) answer = tf.matrix_solve(matrix, rhs) ls_answer = tf.matrix_solve_ls(matrix, rhs) self.assertEqual(ls_answer.get_shape(), [3, 3, 1]) self.assertEqual(answer.get_shape(), [3, 3, 1]) if __name__ == "__main__": tf.test.main()
apache-2.0
iuliat/nova
nova/virt/ironic/ironic_states.py
36
4259
# Copyright (c) 2012 NTT DOCOMO, INC. # Copyright 2010 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Mapping of bare metal node states. Setting the node `power_state` is handled by the conductor's power synchronization thread. Based on the power state retrieved from the driver for the node, the state is set to POWER_ON or POWER_OFF, accordingly. Should this fail, the `power_state` value is left unchanged, and the node is placed into maintenance mode. The `power_state` can also be set manually via the API. A failure to change the state leaves the current state unchanged. The node is NOT placed into maintenance mode in this case. """ ##################### # Provisioning states ##################### NOSTATE = None """ No state information. This state is used with power_state to represent a lack of knowledge of power state, and in target_*_state fields when there is no target. Prior to the Kilo release, Ironic set node.provision_state to NOSTATE when the node was available for provisioning. During Kilo cycle, this was changed to the AVAILABLE state. """ MANAGEABLE = 'manageable' """ Node is in a manageable state. This state indicates that Ironic has verified, at least once, that it had sufficient information to manage the hardware. While in this state, the node is not available for provisioning (it must be in the AVAILABLE state for that). """ AVAILABLE = 'available' """ Node is available for use and scheduling. This state is replacing the NOSTATE state used prior to Kilo. """ ACTIVE = 'active' """ Node is successfully deployed and associated with an instance. """ DEPLOYWAIT = 'wait call-back' """ Node is waiting to be deployed. This will be the node `provision_state` while the node is waiting for the driver to finish deployment. """ DEPLOYING = 'deploying' """ Node is ready to receive a deploy request, or is currently being deployed. A node will have its `provision_state` set to DEPLOYING briefly before it receives its initial deploy request. It will also move to this state from DEPLOYWAIT after the callback is triggered and deployment is continued (disk partitioning and image copying). """ DEPLOYFAIL = 'deploy failed' """ Node deployment failed. """ DEPLOYDONE = 'deploy complete' """ Node was successfully deployed. This is mainly a target provision state used during deployment. A successfully deployed node should go to ACTIVE status. """ DELETING = 'deleting' """ Node is actively being torn down. """ DELETED = 'deleted' """ Node tear down was successful. In Juno, target_provision_state was set to this value during node tear down. In Kilo, this will be a transitory value of provision_state, and never represented in target_provision_state. """ CLEANING = 'cleaning' """ Node is being automatically cleaned to prepare it for provisioning. """ CLEANFAIL = 'clean failed' """ Node failed cleaning. This requires operator intervention to resolve. """ ERROR = 'error' """ An error occurred during node processing. The `last_error` attribute of the node details should contain an error message. """ REBUILD = 'rebuild' """ Node is to be rebuilt. This is not used as a state, but rather as a "verb" when changing the node's provision_state via the REST API. """ INSPECTING = 'inspecting' """ Node is under inspection. This is the provision state used when inspection is started. A successfully inspected node shall transition to MANAGEABLE status. """ INSPECTFAIL = 'inspect failed' """ Node inspection failed. """ ############## # Power states ############## POWER_ON = 'power on' """ Node is powered on. """ POWER_OFF = 'power off' """ Node is powered off. """ REBOOT = 'rebooting' """ Node is rebooting. """
apache-2.0
CatsAndDogsbvba/odoo
addons/website_sale/models/payment.py
46
2553
# -*- coding: utf-8 -*- import logging from openerp import SUPERUSER_ID from openerp.osv import orm, fields from openerp.tools import float_compare _logger = logging.getLogger(__name__) class PaymentTransaction(orm.Model): _inherit = 'payment.transaction' _columns = { # link with the sale order 'sale_order_id': fields.many2one('sale.order', 'Sale Order'), } def form_feedback(self, cr, uid, data, acquirer_name, context=None): """ Override to confirm the sale order, if defined, and if the transaction is done. """ tx = None res = super(PaymentTransaction, self).form_feedback(cr, uid, data, acquirer_name, context=context) # fetch the tx, check its state, confirm the potential SO try: tx_find_method_name = '_%s_form_get_tx_from_data' % acquirer_name if hasattr(self, tx_find_method_name): tx = getattr(self, tx_find_method_name)(cr, uid, data, context=context) _logger.info('<%s> transaction processed: tx ref:%s, tx amount: %s', acquirer_name, tx.reference if tx else 'n/a', tx.amount if tx else 'n/a') if tx and tx.sale_order_id: # verify SO/TX match, excluding tx.fees which are currently not included in SO amount_matches = (tx.sale_order_id.state in ['draft', 'sent'] and float_compare(tx.amount, tx.sale_order_id.amount_total, 2) == 0) if amount_matches: if tx.state == 'done': _logger.info('<%s> transaction completed, confirming order %s (ID %s)', acquirer_name, tx.sale_order_id.name, tx.sale_order_id.id) self.pool['sale.order'].action_button_confirm(cr, SUPERUSER_ID, [tx.sale_order_id.id], context=dict(context, send_email=True)) elif tx.state != 'cancel' and tx.sale_order_id.state == 'draft': _logger.info('<%s> transaction pending, sending quote email for order %s (ID %s)', acquirer_name, tx.sale_order_id.name, tx.sale_order_id.id) self.pool['sale.order'].force_quotation_send(cr, SUPERUSER_ID, [tx.sale_order_id.id], context=context) else: _logger.warning('<%s> transaction MISMATCH for order %s (ID %s)', acquirer_name, tx.sale_order_id.name, tx.sale_order_id.id) except Exception: _logger.exception('Fail to confirm the order or send the confirmation email%s', tx and ' for the transaction %s' % tx.reference or '') return res
agpl-3.0
FlorianLudwig/odoo
addons/subscription/__openerp__.py
261
1885
# -*- coding: utf-8 -*- ############################################################################## # # OpenERP, Open Source Management Solution # Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>). # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ############################################################################## { 'name': 'Recurring Documents', 'version': '1.0', 'category': 'Tools', 'description': """ Create recurring documents. =========================== This module allows to create new documents and add subscriptions on that document. e.g. To have an invoice generated automatically periodically: ------------------------------------------------------------- * Define a document type based on Invoice object * Define a subscription whose source document is the document defined as above. Specify the interval information and partner to be invoice. """, 'author': 'OpenERP SA', 'depends': ['base'], 'data': ['security/subcription_security.xml', 'security/ir.model.access.csv', 'subscription_view.xml'], 'demo': ['subscription_demo.xml',], 'installable': True, 'auto_install': False, } # vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
agpl-3.0
gauribhoite/personfinder
env/google_appengine/lib/grizzled/grizzled/test/TestProxy.py
19
2316
# Nose program for testing grizzled.proxy class. from __future__ import absolute_import # --------------------------------------------------------------------------- # Imports # --------------------------------------------------------------------------- from grizzled.proxy import Forwarder import tempfile from grizzled.file import unlink_quietly from .test_helpers import exception_expected # --------------------------------------------------------------------------- # Globals # --------------------------------------------------------------------------- # --------------------------------------------------------------------------- # Classes # --------------------------------------------------------------------------- class ForwardToFile(Forwarder): def __init__(self, file, *exceptions): Forwarder.__init__(self, file, exceptions) class TestProxyPackage(object): def test_forward_all(self): path = self._create_file() try: with open(path) as f: contents = ''.join(f.readlines()) with open(path) as f: fwd = ForwardToFile(f) contents2 = ''.join(fwd.readlines()) assert contents2 == contents finally: unlink_quietly(path) def test_forward_all_but_name(self): path = self._create_file() try: with exception_expected(AttributeError): with open(path) as f: fwd = ForwardToFile(f, 'name', 'foo') fwd.name finally: unlink_quietly(path) def test_forward_all_but_name_mode(self): path = self._create_file() try: with open(path) as f: fwd = ForwardToFile(f, 'name', 'mode') fwd.closed # should not fail with exception_expected(AttributeError): fwd.name with exception_expected(AttributeError): fwd.mode finally: unlink_quietly(path) def _create_file(self): temp = tempfile.NamedTemporaryFile(prefix="fwdtest", delete=False) temp.write(', '.join([str(x) for x in range(1, 81)])) temp.write(', '.join([str(x) for x in range(1, 21)])) temp.close return temp.name
apache-2.0
gauribhoite/personfinder
env/google_appengine/lib/setuptools-0.6c11/setuptools/command/rotate.py
167
2021
import distutils, os from setuptools import Command from distutils.util import convert_path from distutils import log from distutils.errors import * class rotate(Command): """Delete older distributions""" description = "delete older distributions, keeping N newest files" user_options = [ ('match=', 'm', "patterns to match (required)"), ('dist-dir=', 'd', "directory where the distributions are"), ('keep=', 'k', "number of matching distributions to keep"), ] boolean_options = [] def initialize_options(self): self.match = None self.dist_dir = None self.keep = None def finalize_options(self): if self.match is None: raise DistutilsOptionError( "Must specify one or more (comma-separated) match patterns " "(e.g. '.zip' or '.egg')" ) if self.keep is None: raise DistutilsOptionError("Must specify number of files to keep") try: self.keep = int(self.keep) except ValueError: raise DistutilsOptionError("--keep must be an integer") if isinstance(self.match, basestring): self.match = [ convert_path(p.strip()) for p in self.match.split(',') ] self.set_undefined_options('bdist',('dist_dir', 'dist_dir')) def run(self): self.run_command("egg_info") from glob import glob for pattern in self.match: pattern = self.distribution.get_name()+'*'+pattern files = glob(os.path.join(self.dist_dir,pattern)) files = [(os.path.getmtime(f),f) for f in files] files.sort() files.reverse() log.info("%d file(s) matching %s", len(files), pattern) files = files[self.keep:] for (t,f) in files: log.info("Deleting %s", f) if not self.dry_run: os.unlink(f)
apache-2.0
3quarterstack/simple_blog
django/views/decorators/clickjacking.py
550
1759
from functools import wraps from django.utils.decorators import available_attrs def xframe_options_deny(view_func): """ Modifies a view function so its response has the X-Frame-Options HTTP header set to 'DENY' as long as the response doesn't already have that header set. e.g. @xframe_options_deny def some_view(request): ... """ def wrapped_view(*args, **kwargs): resp = view_func(*args, **kwargs) if resp.get('X-Frame-Options', None) is None: resp['X-Frame-Options'] = 'DENY' return resp return wraps(view_func, assigned=available_attrs(view_func))(wrapped_view) def xframe_options_sameorigin(view_func): """ Modifies a view function so its response has the X-Frame-Options HTTP header set to 'SAMEORIGIN' as long as the response doesn't already have that header set. e.g. @xframe_options_sameorigin def some_view(request): ... """ def wrapped_view(*args, **kwargs): resp = view_func(*args, **kwargs) if resp.get('X-Frame-Options', None) is None: resp['X-Frame-Options'] = 'SAMEORIGIN' return resp return wraps(view_func, assigned=available_attrs(view_func))(wrapped_view) def xframe_options_exempt(view_func): """ Modifies a view function by setting a response variable that instructs XFrameOptionsMiddleware to NOT set the X-Frame-Options HTTP header. e.g. @xframe_options_exempt def some_view(request): ... """ def wrapped_view(*args, **kwargs): resp = view_func(*args, **kwargs) resp.xframe_options_exempt = True return resp return wraps(view_func, assigned=available_attrs(view_func))(wrapped_view)
mit
naveentata/coala-bears
bears/go/GoErrCheckBear.py
21
2453
from coalib.bearlib.abstractions.Linter import linter from dependency_management.requirements.GoRequirement import GoRequirement from coalib.settings.Setting import typed_list @linter(executable='errcheck', output_format='regex', output_regex=r'[^:]+:(?P<line>\d+):' r'(?P<column>\d+)\s*(?P<message>.*)', result_message='This function call has an unchecked error.') class GoErrCheckBear: """ Checks the code for all function calls that have unchecked errors. GoErrCheckBear runs ``errcheck`` over each file to find such functions. For more information on the analysis visit <https://github.com/kisielk/errcheck>. """ LANGUAGES = {'Go'} REQUIREMENTS = {GoRequirement( package='github.com/kisielk/errcheck', flag='-u')} AUTHORS = {'The coala developers'} AUTHORS_EMAILS = {'coala-devel@googlegroups.com'} LICENSE = 'AGPL-3.0' ASCIINEMA_URL = 'https://asciinema.org/a/46834' CAN_DETECT = {'Syntax'} @staticmethod def create_arguments(filename, file, config_file, ignore: typed_list(str)=[], ignorepkg: typed_list(str)=[], asserts: bool=False, blank: bool=False): """ Bear configuration arguments. :param ignore: Comma-separated list of pairs of the form package:regex. For each package, the regex describes which functions to ignore within that package. The package may be omitted to have the regex apply to all packages. :param ignorepkg: Takes a comma-separated list of package import paths to ignore. :param asserts: Enables checking for ignored type assertion results. :param blank: Enables checking for assignments of errors to the blank identifier. """ args = () if ignore: args += ('-ignore', ','.join(part.strip() for part in ignore)) if ignorepkg: args += ('-ignorepkg', ','.join(part.strip() for part in ignorepkg)) if blank: args += ('-blank',) if asserts: args += ('-asserts',) return args + (filename,)
agpl-3.0
pasiegel/SickGear
lib/chardet/jpcntx.py
3
19947
######################## BEGIN LICENSE BLOCK ######################## # The Original Code is Mozilla Communicator client code. # # The Initial Developer of the Original Code is # Netscape Communications Corporation. # Portions created by the Initial Developer are Copyright (C) 1998 # the Initial Developer. All Rights Reserved. # # Contributor(s): # Mark Pilgrim - port to Python # # This library is free software; you can redistribute it and/or # modify it under the terms of the GNU Lesser General Public # License as published by the Free Software Foundation; either # version 2.1 of the License, or (at your option) any later version. # # This library is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public # License along with this library; if not, write to the Free Software # Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA # 02110-1301 USA ######################### END LICENSE BLOCK ######################### from .compat import wrap_ord # This is hiragana 2-char sequence table, the number in each cell represents its frequency category jp2CharContext = ( (0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,1,0,0,0,0,0,0,0,0,0,0,1), (2,4,0,4,0,3,0,4,0,3,4,4,4,2,4,3,3,4,3,2,3,3,4,2,3,3,3,2,4,1,4,3,3,1,5,4,3,4,3,4,3,5,3,0,3,5,4,2,0,3,1,0,3,3,0,3,3,0,1,1,0,4,3,0,3,3,0,4,0,2,0,3,5,5,5,5,4,0,4,1,0,3,4), (0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2), (0,4,0,5,0,5,0,4,0,4,5,4,4,3,5,3,5,1,5,3,4,3,4,4,3,4,3,3,4,3,5,4,4,3,5,5,3,5,5,5,3,5,5,3,4,5,5,3,1,3,2,0,3,4,0,4,2,0,4,2,1,5,3,2,3,5,0,4,0,2,0,5,4,4,5,4,5,0,4,0,0,4,4), (0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0), (0,3,0,4,0,3,0,3,0,4,5,4,3,3,3,3,4,3,5,4,4,3,5,4,4,3,4,3,4,4,4,4,5,3,4,4,3,4,5,5,4,5,5,1,4,5,4,3,0,3,3,1,3,3,0,4,4,0,3,3,1,5,3,3,3,5,0,4,0,3,0,4,4,3,4,3,3,0,4,1,1,3,4), (0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0), (0,4,0,3,0,3,0,4,0,3,4,4,3,2,2,1,2,1,3,1,3,3,3,3,3,4,3,1,3,3,5,3,3,0,4,3,0,5,4,3,3,5,4,4,3,4,4,5,0,1,2,0,1,2,0,2,2,0,1,0,0,5,2,2,1,4,0,3,0,1,0,4,4,3,5,4,3,0,2,1,0,4,3), (0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0), (0,3,0,5,0,4,0,2,1,4,4,2,4,1,4,2,4,2,4,3,3,3,4,3,3,3,3,1,4,2,3,3,3,1,4,4,1,1,1,4,3,3,2,0,2,4,3,2,0,3,3,0,3,1,1,0,0,0,3,3,0,4,2,2,3,4,0,4,0,3,0,4,4,5,3,4,4,0,3,0,0,1,4), (1,4,0,4,0,4,0,4,0,3,5,4,4,3,4,3,5,4,3,3,4,3,5,4,4,4,4,3,4,2,4,3,3,1,5,4,3,2,4,5,4,5,5,4,4,5,4,4,0,3,2,2,3,3,0,4,3,1,3,2,1,4,3,3,4,5,0,3,0,2,0,4,5,5,4,5,4,0,4,0,0,5,4), (0,5,0,5,0,4,0,3,0,4,4,3,4,3,3,3,4,0,4,4,4,3,4,3,4,3,3,1,4,2,4,3,4,0,5,4,1,4,5,4,4,5,3,2,4,3,4,3,2,4,1,3,3,3,2,3,2,0,4,3,3,4,3,3,3,4,0,4,0,3,0,4,5,4,4,4,3,0,4,1,0,1,3), (0,3,1,4,0,3,0,2,0,3,4,4,3,1,4,2,3,3,4,3,4,3,4,3,4,4,3,2,3,1,5,4,4,1,4,4,3,5,4,4,3,5,5,4,3,4,4,3,1,2,3,1,2,2,0,3,2,0,3,1,0,5,3,3,3,4,3,3,3,3,4,4,4,4,5,4,2,0,3,3,2,4,3), (0,2,0,3,0,1,0,1,0,0,3,2,0,0,2,0,1,0,2,1,3,3,3,1,2,3,1,0,1,0,4,2,1,1,3,3,0,4,3,3,1,4,3,3,0,3,3,2,0,0,0,0,1,0,0,2,0,0,0,0,0,4,1,0,2,3,2,2,2,1,3,3,3,4,4,3,2,0,3,1,0,3,3), (0,4,0,4,0,3,0,3,0,4,4,4,3,3,3,3,3,3,4,3,4,2,4,3,4,3,3,2,4,3,4,5,4,1,4,5,3,5,4,5,3,5,4,0,3,5,5,3,1,3,3,2,2,3,0,3,4,1,3,3,2,4,3,3,3,4,0,4,0,3,0,4,5,4,4,5,3,0,4,1,0,3,4), (0,2,0,3,0,3,0,0,0,2,2,2,1,0,1,0,0,0,3,0,3,0,3,0,1,3,1,0,3,1,3,3,3,1,3,3,3,0,1,3,1,3,4,0,0,3,1,1,0,3,2,0,0,0,0,1,3,0,1,0,0,3,3,2,0,3,0,0,0,0,0,3,4,3,4,3,3,0,3,0,0,2,3), (2,3,0,3,0,2,0,1,0,3,3,4,3,1,3,1,1,1,3,1,4,3,4,3,3,3,0,0,3,1,5,4,3,1,4,3,2,5,5,4,4,4,4,3,3,4,4,4,0,2,1,1,3,2,0,1,2,0,0,1,0,4,1,3,3,3,0,3,0,1,0,4,4,4,5,5,3,0,2,0,0,4,4), (0,2,0,1,0,3,1,3,0,2,3,3,3,0,3,1,0,0,3,0,3,2,3,1,3,2,1,1,0,0,4,2,1,0,2,3,1,4,3,2,0,4,4,3,1,3,1,3,0,1,0,0,1,0,0,0,1,0,0,0,0,4,1,1,1,2,0,3,0,0,0,3,4,2,4,3,2,0,1,0,0,3,3), (0,1,0,4,0,5,0,4,0,2,4,4,2,3,3,2,3,3,5,3,3,3,4,3,4,2,3,0,4,3,3,3,4,1,4,3,2,1,5,5,3,4,5,1,3,5,4,2,0,3,3,0,1,3,0,4,2,0,1,3,1,4,3,3,3,3,0,3,0,1,0,3,4,4,4,5,5,0,3,0,1,4,5), (0,2,0,3,0,3,0,0,0,2,3,1,3,0,4,0,1,1,3,0,3,4,3,2,3,1,0,3,3,2,3,1,3,0,2,3,0,2,1,4,1,2,2,0,0,3,3,0,0,2,0,0,0,1,0,0,0,0,2,2,0,3,2,1,3,3,0,2,0,2,0,0,3,3,1,2,4,0,3,0,2,2,3), (2,4,0,5,0,4,0,4,0,2,4,4,4,3,4,3,3,3,1,2,4,3,4,3,4,4,5,0,3,3,3,3,2,0,4,3,1,4,3,4,1,4,4,3,3,4,4,3,1,2,3,0,4,2,0,4,1,0,3,3,0,4,3,3,3,4,0,4,0,2,0,3,5,3,4,5,2,0,3,0,0,4,5), (0,3,0,4,0,1,0,1,0,1,3,2,2,1,3,0,3,0,2,0,2,0,3,0,2,0,0,0,1,0,1,1,0,0,3,1,0,0,0,4,0,3,1,0,2,1,3,0,0,0,0,0,0,3,0,0,0,0,0,0,0,4,2,2,3,1,0,3,0,0,0,1,4,4,4,3,0,0,4,0,0,1,4), (1,4,1,5,0,3,0,3,0,4,5,4,4,3,5,3,3,4,4,3,4,1,3,3,3,3,2,1,4,1,5,4,3,1,4,4,3,5,4,4,3,5,4,3,3,4,4,4,0,3,3,1,2,3,0,3,1,0,3,3,0,5,4,4,4,4,4,4,3,3,5,4,4,3,3,5,4,0,3,2,0,4,4), (0,2,0,3,0,1,0,0,0,1,3,3,3,2,4,1,3,0,3,1,3,0,2,2,1,1,0,0,2,0,4,3,1,0,4,3,0,4,4,4,1,4,3,1,1,3,3,1,0,2,0,0,1,3,0,0,0,0,2,0,0,4,3,2,4,3,5,4,3,3,3,4,3,3,4,3,3,0,2,1,0,3,3), (0,2,0,4,0,3,0,2,0,2,5,5,3,4,4,4,4,1,4,3,3,0,4,3,4,3,1,3,3,2,4,3,0,3,4,3,0,3,4,4,2,4,4,0,4,5,3,3,2,2,1,1,1,2,0,1,5,0,3,3,2,4,3,3,3,4,0,3,0,2,0,4,4,3,5,5,0,0,3,0,2,3,3), (0,3,0,4,0,3,0,1,0,3,4,3,3,1,3,3,3,0,3,1,3,0,4,3,3,1,1,0,3,0,3,3,0,0,4,4,0,1,5,4,3,3,5,0,3,3,4,3,0,2,0,1,1,1,0,1,3,0,1,2,1,3,3,2,3,3,0,3,0,1,0,1,3,3,4,4,1,0,1,2,2,1,3), (0,1,0,4,0,4,0,3,0,1,3,3,3,2,3,1,1,0,3,0,3,3,4,3,2,4,2,0,1,0,4,3,2,0,4,3,0,5,3,3,2,4,4,4,3,3,3,4,0,1,3,0,0,1,0,0,1,0,0,0,0,4,2,3,3,3,0,3,0,0,0,4,4,4,5,3,2,0,3,3,0,3,5), (0,2,0,3,0,0,0,3,0,1,3,0,2,0,0,0,1,0,3,1,1,3,3,0,0,3,0,0,3,0,2,3,1,0,3,1,0,3,3,2,0,4,2,2,0,2,0,0,0,4,0,0,0,0,0,0,0,0,0,0,0,2,1,2,0,1,0,1,0,0,0,1,3,1,2,0,0,0,1,0,0,1,4), (0,3,0,3,0,5,0,1,0,2,4,3,1,3,3,2,1,1,5,2,1,0,5,1,2,0,0,0,3,3,2,2,3,2,4,3,0,0,3,3,1,3,3,0,2,5,3,4,0,3,3,0,1,2,0,2,2,0,3,2,0,2,2,3,3,3,0,2,0,1,0,3,4,4,2,5,4,0,3,0,0,3,5), (0,3,0,3,0,3,0,1,0,3,3,3,3,0,3,0,2,0,2,1,1,0,2,0,1,0,0,0,2,1,0,0,1,0,3,2,0,0,3,3,1,2,3,1,0,3,3,0,0,1,0,0,0,0,0,2,0,0,0,0,0,2,3,1,2,3,0,3,0,1,0,3,2,1,0,4,3,0,1,1,0,3,3), (0,4,0,5,0,3,0,3,0,4,5,5,4,3,5,3,4,3,5,3,3,2,5,3,4,4,4,3,4,3,4,5,5,3,4,4,3,4,4,5,4,4,4,3,4,5,5,4,2,3,4,2,3,4,0,3,3,1,4,3,2,4,3,3,5,5,0,3,0,3,0,5,5,5,5,4,4,0,4,0,1,4,4), (0,4,0,4,0,3,0,3,0,3,5,4,4,2,3,2,5,1,3,2,5,1,4,2,3,2,3,3,4,3,3,3,3,2,5,4,1,3,3,5,3,4,4,0,4,4,3,1,1,3,1,0,2,3,0,2,3,0,3,0,0,4,3,1,3,4,0,3,0,2,0,4,4,4,3,4,5,0,4,0,0,3,4), (0,3,0,3,0,3,1,2,0,3,4,4,3,3,3,0,2,2,4,3,3,1,3,3,3,1,1,0,3,1,4,3,2,3,4,4,2,4,4,4,3,4,4,3,2,4,4,3,1,3,3,1,3,3,0,4,1,0,2,2,1,4,3,2,3,3,5,4,3,3,5,4,4,3,3,0,4,0,3,2,2,4,4), (0,2,0,1,0,0,0,0,0,1,2,1,3,0,0,0,0,0,2,0,1,2,1,0,0,1,0,0,0,0,3,0,0,1,0,1,1,3,1,0,0,0,1,1,0,1,1,0,0,0,0,0,2,0,0,0,0,0,0,0,0,1,1,2,2,0,3,4,0,0,0,1,1,0,0,1,0,0,0,0,0,1,1), (0,1,0,0,0,1,0,0,0,0,4,0,4,1,4,0,3,0,4,0,3,0,4,0,3,0,3,0,4,1,5,1,4,0,0,3,0,5,0,5,2,0,1,0,0,0,2,1,4,0,1,3,0,0,3,0,0,3,1,1,4,1,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0), (1,4,0,5,0,3,0,2,0,3,5,4,4,3,4,3,5,3,4,3,3,0,4,3,3,3,3,3,3,2,4,4,3,1,3,4,4,5,4,4,3,4,4,1,3,5,4,3,3,3,1,2,2,3,3,1,3,1,3,3,3,5,3,3,4,5,0,3,0,3,0,3,4,3,4,4,3,0,3,0,2,4,3), (0,1,0,4,0,0,0,0,0,1,4,0,4,1,4,2,4,0,3,0,1,0,1,0,0,0,0,0,2,0,3,1,1,1,0,3,0,0,0,1,2,1,0,0,1,1,1,1,0,1,0,0,0,1,0,0,3,0,0,0,0,3,2,0,2,2,0,1,0,0,0,2,3,2,3,3,0,0,0,0,2,1,0), (0,5,1,5,0,3,0,3,0,5,4,4,5,1,5,3,3,0,4,3,4,3,5,3,4,3,3,2,4,3,4,3,3,0,3,3,1,4,4,3,4,4,4,3,4,5,5,3,2,3,1,1,3,3,1,3,1,1,3,3,2,4,5,3,3,5,0,4,0,3,0,4,4,3,5,3,3,0,3,4,0,4,3), (0,5,0,5,0,3,0,2,0,4,4,3,5,2,4,3,3,3,4,4,4,3,5,3,5,3,3,1,4,0,4,3,3,0,3,3,0,4,4,4,4,5,4,3,3,5,5,3,2,3,1,2,3,2,0,1,0,0,3,2,2,4,4,3,1,5,0,4,0,3,0,4,3,1,3,2,1,0,3,3,0,3,3), (0,4,0,5,0,5,0,4,0,4,5,5,5,3,4,3,3,2,5,4,4,3,5,3,5,3,4,0,4,3,4,4,3,2,4,4,3,4,5,4,4,5,5,0,3,5,5,4,1,3,3,2,3,3,1,3,1,0,4,3,1,4,4,3,4,5,0,4,0,2,0,4,3,4,4,3,3,0,4,0,0,5,5), (0,4,0,4,0,5,0,1,1,3,3,4,4,3,4,1,3,0,5,1,3,0,3,1,3,1,1,0,3,0,3,3,4,0,4,3,0,4,4,4,3,4,4,0,3,5,4,1,0,3,0,0,2,3,0,3,1,0,3,1,0,3,2,1,3,5,0,3,0,1,0,3,2,3,3,4,4,0,2,2,0,4,4), (2,4,0,5,0,4,0,3,0,4,5,5,4,3,5,3,5,3,5,3,5,2,5,3,4,3,3,4,3,4,5,3,2,1,5,4,3,2,3,4,5,3,4,1,2,5,4,3,0,3,3,0,3,2,0,2,3,0,4,1,0,3,4,3,3,5,0,3,0,1,0,4,5,5,5,4,3,0,4,2,0,3,5), (0,5,0,4,0,4,0,2,0,5,4,3,4,3,4,3,3,3,4,3,4,2,5,3,5,3,4,1,4,3,4,4,4,0,3,5,0,4,4,4,4,5,3,1,3,4,5,3,3,3,3,3,3,3,0,2,2,0,3,3,2,4,3,3,3,5,3,4,1,3,3,5,3,2,0,0,0,0,4,3,1,3,3), (0,1,0,3,0,3,0,1,0,1,3,3,3,2,3,3,3,0,3,0,0,0,3,1,3,0,0,0,2,2,2,3,0,0,3,2,0,1,2,4,1,3,3,0,0,3,3,3,0,1,0,0,2,1,0,0,3,0,3,1,0,3,0,0,1,3,0,2,0,1,0,3,3,1,3,3,0,0,1,1,0,3,3), (0,2,0,3,0,2,1,4,0,2,2,3,1,1,3,1,1,0,2,0,3,1,2,3,1,3,0,0,1,0,4,3,2,3,3,3,1,4,2,3,3,3,3,1,0,3,1,4,0,1,1,0,1,2,0,1,1,0,1,1,0,3,1,3,2,2,0,1,0,0,0,2,3,3,3,1,0,0,0,0,0,2,3), (0,5,0,4,0,5,0,2,0,4,5,5,3,3,4,3,3,1,5,4,4,2,4,4,4,3,4,2,4,3,5,5,4,3,3,4,3,3,5,5,4,5,5,1,3,4,5,3,1,4,3,1,3,3,0,3,3,1,4,3,1,4,5,3,3,5,0,4,0,3,0,5,3,3,1,4,3,0,4,0,1,5,3), (0,5,0,5,0,4,0,2,0,4,4,3,4,3,3,3,3,3,5,4,4,4,4,4,4,5,3,3,5,2,4,4,4,3,4,4,3,3,4,4,5,5,3,3,4,3,4,3,3,4,3,3,3,3,1,2,2,1,4,3,3,5,4,4,3,4,0,4,0,3,0,4,4,4,4,4,1,0,4,2,0,2,4), (0,4,0,4,0,3,0,1,0,3,5,2,3,0,3,0,2,1,4,2,3,3,4,1,4,3,3,2,4,1,3,3,3,0,3,3,0,0,3,3,3,5,3,3,3,3,3,2,0,2,0,0,2,0,0,2,0,0,1,0,0,3,1,2,2,3,0,3,0,2,0,4,4,3,3,4,1,0,3,0,0,2,4), (0,0,0,4,0,0,0,0,0,0,1,0,1,0,2,0,0,0,0,0,1,0,2,0,1,0,0,0,0,0,3,1,3,0,3,2,0,0,0,1,0,3,2,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,3,4,0,2,0,0,0,0,0,0,2), (0,2,1,3,0,2,0,2,0,3,3,3,3,1,3,1,3,3,3,3,3,3,4,2,2,1,2,1,4,0,4,3,1,3,3,3,2,4,3,5,4,3,3,3,3,3,3,3,0,1,3,0,2,0,0,1,0,0,1,0,0,4,2,0,2,3,0,3,3,0,3,3,4,2,3,1,4,0,1,2,0,2,3), (0,3,0,3,0,1,0,3,0,2,3,3,3,0,3,1,2,0,3,3,2,3,3,2,3,2,3,1,3,0,4,3,2,0,3,3,1,4,3,3,2,3,4,3,1,3,3,1,1,0,1,1,0,1,0,1,0,1,0,0,0,4,1,1,0,3,0,3,1,0,2,3,3,3,3,3,1,0,0,2,0,3,3), (0,0,0,0,0,0,0,0,0,0,3,0,2,0,3,0,0,0,0,0,0,0,3,0,0,0,0,0,0,0,3,0,3,0,3,1,0,1,0,1,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,3,0,2,0,2,3,0,0,0,0,0,0,0,0,3), (0,2,0,3,1,3,0,3,0,2,3,3,3,1,3,1,3,1,3,1,3,3,3,1,3,0,2,3,1,1,4,3,3,2,3,3,1,2,2,4,1,3,3,0,1,4,2,3,0,1,3,0,3,0,0,1,3,0,2,0,0,3,3,2,1,3,0,3,0,2,0,3,4,4,4,3,1,0,3,0,0,3,3), (0,2,0,1,0,2,0,0,0,1,3,2,2,1,3,0,1,1,3,0,3,2,3,1,2,0,2,0,1,1,3,3,3,0,3,3,1,1,2,3,2,3,3,1,2,3,2,0,0,1,0,0,0,0,0,0,3,0,1,0,0,2,1,2,1,3,0,3,0,0,0,3,4,4,4,3,2,0,2,0,0,2,4), (0,0,0,1,0,1,0,0,0,0,1,0,0,0,1,0,0,0,0,0,0,0,1,1,1,0,0,0,0,0,0,0,0,0,2,2,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,1,3,1,0,0,0,0,0,0,0,3), (0,3,0,3,0,2,0,3,0,3,3,3,2,3,2,2,2,0,3,1,3,3,3,2,3,3,0,0,3,0,3,2,2,0,2,3,1,4,3,4,3,3,2,3,1,5,4,4,0,3,1,2,1,3,0,3,1,1,2,0,2,3,1,3,1,3,0,3,0,1,0,3,3,4,4,2,1,0,2,1,0,2,4), (0,1,0,3,0,1,0,2,0,1,4,2,5,1,4,0,2,0,2,1,3,1,4,0,2,1,0,0,2,1,4,1,1,0,3,3,0,5,1,3,2,3,3,1,0,3,2,3,0,1,0,0,0,0,0,0,1,0,0,0,0,4,0,1,0,3,0,2,0,1,0,3,3,3,4,3,3,0,0,0,0,2,3), (0,0,0,1,0,0,0,0,0,0,2,0,1,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,3,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,1,0,0,1,0,0,0,0,0,3), (0,1,0,3,0,4,0,3,0,2,4,3,1,0,3,2,2,1,3,1,2,2,3,1,1,1,2,1,3,0,1,2,0,1,3,2,1,3,0,5,5,1,0,0,1,3,2,1,0,3,0,0,1,0,0,0,0,0,3,4,0,1,1,1,3,2,0,2,0,1,0,2,3,3,1,2,3,0,1,0,1,0,4), (0,0,0,1,0,3,0,3,0,2,2,1,0,0,4,0,3,0,3,1,3,0,3,0,3,0,1,0,3,0,3,1,3,0,3,3,0,0,1,2,1,1,1,0,1,2,0,0,0,1,0,0,1,0,0,0,0,0,0,0,0,2,2,1,2,0,0,2,0,0,0,0,2,3,3,3,3,0,0,0,0,1,4), (0,0,0,3,0,3,0,0,0,0,3,1,1,0,3,0,1,0,2,0,1,0,0,0,0,0,0,0,1,0,3,0,2,0,2,3,0,0,2,2,3,1,2,0,0,1,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,3,0,0,2,0,0,0,0,2,3), (2,4,0,5,0,5,0,4,0,3,4,3,3,3,4,3,3,3,4,3,4,4,5,4,5,5,5,2,3,0,5,5,4,1,5,4,3,1,5,4,3,4,4,3,3,4,3,3,0,3,2,0,2,3,0,3,0,0,3,3,0,5,3,2,3,3,0,3,0,3,0,3,4,5,4,5,3,0,4,3,0,3,4), (0,3,0,3,0,3,0,3,0,3,3,4,3,2,3,2,3,0,4,3,3,3,3,3,3,3,3,0,3,2,4,3,3,1,3,4,3,4,4,4,3,4,4,3,2,4,4,1,0,2,0,0,1,1,0,2,0,0,3,1,0,5,3,2,1,3,0,3,0,1,2,4,3,2,4,3,3,0,3,2,0,4,4), (0,3,0,3,0,1,0,0,0,1,4,3,3,2,3,1,3,1,4,2,3,2,4,2,3,4,3,0,2,2,3,3,3,0,3,3,3,0,3,4,1,3,3,0,3,4,3,3,0,1,1,0,1,0,0,0,4,0,3,0,0,3,1,2,1,3,0,4,0,1,0,4,3,3,4,3,3,0,2,0,0,3,3), (0,3,0,4,0,1,0,3,0,3,4,3,3,0,3,3,3,1,3,1,3,3,4,3,3,3,0,0,3,1,5,3,3,1,3,3,2,5,4,3,3,4,5,3,2,5,3,4,0,1,0,0,0,0,0,2,0,0,1,1,0,4,2,2,1,3,0,3,0,2,0,4,4,3,5,3,2,0,1,1,0,3,4), (0,5,0,4,0,5,0,2,0,4,4,3,3,2,3,3,3,1,4,3,4,1,5,3,4,3,4,0,4,2,4,3,4,1,5,4,0,4,4,4,4,5,4,1,3,5,4,2,1,4,1,1,3,2,0,3,1,0,3,2,1,4,3,3,3,4,0,4,0,3,0,4,4,4,3,3,3,0,4,2,0,3,4), (1,4,0,4,0,3,0,1,0,3,3,3,1,1,3,3,2,2,3,3,1,0,3,2,2,1,2,0,3,1,2,1,2,0,3,2,0,2,2,3,3,4,3,0,3,3,1,2,0,1,1,3,1,2,0,0,3,0,1,1,0,3,2,2,3,3,0,3,0,0,0,2,3,3,4,3,3,0,1,0,0,1,4), (0,4,0,4,0,4,0,0,0,3,4,4,3,1,4,2,3,2,3,3,3,1,4,3,4,0,3,0,4,2,3,3,2,2,5,4,2,1,3,4,3,4,3,1,3,3,4,2,0,2,1,0,3,3,0,0,2,0,3,1,0,4,4,3,4,3,0,4,0,1,0,2,4,4,4,4,4,0,3,2,0,3,3), (0,0,0,1,0,4,0,0,0,0,0,0,1,1,1,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,1,0,3,2,0,0,1,0,0,0,1,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,2), (0,2,0,3,0,4,0,4,0,1,3,3,3,0,4,0,2,1,2,1,1,1,2,0,3,1,1,0,1,0,3,1,0,0,3,3,2,0,1,1,0,0,0,0,0,1,0,2,0,2,2,0,3,1,0,0,1,0,1,1,0,1,2,0,3,0,0,0,0,1,0,0,3,3,4,3,1,0,1,0,3,0,2), (0,0,0,3,0,5,0,0,0,0,1,0,2,0,3,1,0,1,3,0,0,0,2,0,0,0,1,0,0,0,1,1,0,0,4,0,0,0,2,3,0,1,4,1,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,3,0,0,0,0,0,1,0,0,0,0,0,0,0,2,0,0,3,0,0,0,0,0,3), (0,2,0,5,0,5,0,1,0,2,4,3,3,2,5,1,3,2,3,3,3,0,4,1,2,0,3,0,4,0,2,2,1,1,5,3,0,0,1,4,2,3,2,0,3,3,3,2,0,2,4,1,1,2,0,1,1,0,3,1,0,1,3,1,2,3,0,2,0,0,0,1,3,5,4,4,4,0,3,0,0,1,3), (0,4,0,5,0,4,0,4,0,4,5,4,3,3,4,3,3,3,4,3,4,4,5,3,4,5,4,2,4,2,3,4,3,1,4,4,1,3,5,4,4,5,5,4,4,5,5,5,2,3,3,1,4,3,1,3,3,0,3,3,1,4,3,4,4,4,0,3,0,4,0,3,3,4,4,5,0,0,4,3,0,4,5), (0,4,0,4,0,3,0,3,0,3,4,4,4,3,3,2,4,3,4,3,4,3,5,3,4,3,2,1,4,2,4,4,3,1,3,4,2,4,5,5,3,4,5,4,1,5,4,3,0,3,2,2,3,2,1,3,1,0,3,3,3,5,3,3,3,5,4,4,2,3,3,4,3,3,3,2,1,0,3,2,1,4,3), (0,4,0,5,0,4,0,3,0,3,5,5,3,2,4,3,4,0,5,4,4,1,4,4,4,3,3,3,4,3,5,5,2,3,3,4,1,2,5,5,3,5,5,2,3,5,5,4,0,3,2,0,3,3,1,1,5,1,4,1,0,4,3,2,3,5,0,4,0,3,0,5,4,3,4,3,0,0,4,1,0,4,4), (1,3,0,4,0,2,0,2,0,2,5,5,3,3,3,3,3,0,4,2,3,4,4,4,3,4,0,0,3,4,5,4,3,3,3,3,2,5,5,4,5,5,5,4,3,5,5,5,1,3,1,0,1,0,0,3,2,0,4,2,0,5,2,3,2,4,1,3,0,3,0,4,5,4,5,4,3,0,4,2,0,5,4), (0,3,0,4,0,5,0,3,0,3,4,4,3,2,3,2,3,3,3,3,3,2,4,3,3,2,2,0,3,3,3,3,3,1,3,3,3,0,4,4,3,4,4,1,1,4,4,2,0,3,1,0,1,1,0,4,1,0,2,3,1,3,3,1,3,4,0,3,0,1,0,3,1,3,0,0,1,0,2,0,0,4,4), (0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0), (0,3,0,3,0,2,0,3,0,1,5,4,3,3,3,1,4,2,1,2,3,4,4,2,4,4,5,0,3,1,4,3,4,0,4,3,3,3,2,3,2,5,3,4,3,2,2,3,0,0,3,0,2,1,0,1,2,0,0,0,0,2,1,1,3,1,0,2,0,4,0,3,4,4,4,5,2,0,2,0,0,1,3), (0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,1,1,1,0,0,1,1,0,0,0,4,2,1,1,0,1,0,3,2,0,0,3,1,1,1,2,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,3,0,1,0,0,0,2,0,0,0,1,4,0,4,2,1,0,0,0,0,0,1), (0,0,0,0,0,0,0,0,0,1,0,1,0,0,0,0,1,0,0,0,0,0,0,1,0,1,0,0,0,0,3,1,0,0,0,2,0,2,1,0,0,1,2,1,0,1,1,0,0,3,0,0,0,0,0,0,0,0,0,0,0,1,3,1,0,0,0,0,0,1,0,0,2,1,0,0,0,0,0,0,0,0,2), (0,4,0,4,0,4,0,3,0,4,4,3,4,2,4,3,2,0,4,4,4,3,5,3,5,3,3,2,4,2,4,3,4,3,1,4,0,2,3,4,4,4,3,3,3,4,4,4,3,4,1,3,4,3,2,1,2,1,3,3,3,4,4,3,3,5,0,4,0,3,0,4,3,3,3,2,1,0,3,0,0,3,3), (0,4,0,3,0,3,0,3,0,3,5,5,3,3,3,3,4,3,4,3,3,3,4,4,4,3,3,3,3,4,3,5,3,3,1,3,2,4,5,5,5,5,4,3,4,5,5,3,2,2,3,3,3,3,2,3,3,1,2,3,2,4,3,3,3,4,0,4,0,2,0,4,3,2,2,1,2,0,3,0,0,4,1), ) class JapaneseContextAnalysis(object): NUM_OF_CATEGORY = 6 DONT_KNOW = -1 ENOUGH_REL_THRESHOLD = 100 MAX_REL_THRESHOLD = 1000 MINIMUM_DATA_THRESHOLD = 4 def __init__(self): self._total_rel = None self._rel_sample = None self._need_to_skip_char_num = None self._last_char_order = None self._done = None self.reset() def reset(self): self._total_rel = 0 # total sequence received # category counters, each interger counts sequence in its category self._rel_sample = [0] * self.NUM_OF_CATEGORY # if last byte in current buffer is not the last byte of a character, # we need to know how many bytes to skip in next buffer self._need_to_skip_char_num = 0 self._last_char_order = -1 # The order of previous char # If this flag is set to True, detection is done and conclusion has # been made self._done = False def feed(self, byte_str, num_bytes): if self._done: return # The buffer we got is byte oriented, and a character may span in more than one # buffers. In case the last one or two byte in last buffer is not # complete, we record how many byte needed to complete that character # and skip these bytes here. We can choose to record those bytes as # well and analyse the character once it is complete, but since a # character will not make much difference, by simply skipping # this character will simply our logic and improve performance. i = self._need_to_skip_char_num while i < num_bytes: order, char_len = self.get_order(byte_str[i:i + 2]) i += char_len if i > num_bytes: self._need_to_skip_char_num = i - num_bytes self._last_char_order = -1 else: if (order != -1) and (self._last_char_order != -1): self._total_rel += 1 if self._total_rel > self.MAX_REL_THRESHOLD: self._done = True break self._rel_sample[jp2CharContext[self._last_char_order][order]] += 1 self._last_char_order = order def got_enough_data(self): return self._total_rel > self.ENOUGH_REL_THRESHOLD def get_confidence(self): # This is just one way to calculate confidence. It works well for me. if self._total_rel > self.MINIMUM_DATA_THRESHOLD: return (self._total_rel - self._rel_sample[0]) / self._total_rel else: return self.DONT_KNOW def get_order(self, byte_str): return -1, 1 class SJISContextAnalysis(JapaneseContextAnalysis): def __init__(self): super(SJISContextAnalysis, self).__init__() self._charset_name = "SHIFT_JIS" @property def charset_name(self): return self._charset_name def get_order(self, byte_str): if not byte_str: return -1, 1 # find out current char's byte length first_char = wrap_ord(byte_str[0]) if (0x81 <= first_char <= 0x9F) or (0xE0 <= first_char <= 0xFC): char_len = 2 if (first_char == 0x87) or (0xFA <= first_char <= 0xFC): self._charset_name = "CP932" else: char_len = 1 # return its order if it is hiragana if len(byte_str) > 1: second_char = wrap_ord(byte_str[1]) if (first_char == 202) and (0x9F <= second_char <= 0xF1): return second_char - 0x9F, char_len return -1, char_len class EUCJPContextAnalysis(JapaneseContextAnalysis): def get_order(self, byte_str): if not byte_str: return -1, 1 # find out current char's byte length first_char = wrap_ord(byte_str[0]) if (first_char == 0x8E) or (0xA1 <= first_char <= 0xFE): char_len = 2 elif first_char == 0x8F: char_len = 3 else: char_len = 1 # return its order if it is hiragana if len(byte_str) > 1: second_char = wrap_ord(byte_str[1]) if (first_char == 0xA4) and (0xA1 <= second_char <= 0xF3): return second_char - 0xA1, char_len return -1, char_len
gpl-3.0
AndroidSecurityTools/Mobile-Security-Framework-MobSF
DynamicAnalyzer/tools/pyWebProxy/proxy.py
32
19839
#!/usr/bin/env python ''' owtf is an OWASP+PTES-focused try to unite great tools & facilitate pentesting Copyright (c) 2013, Abraham Aranguren <name.surname@gmail.com> http://7-a.org All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. * Neither the name of the copyright owner nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # Inbound Proxy Module developed by Bharadwaj Machiraju (blog.tunnelshade.in) # as a part of Google Summer of Code 2013 ''' import tornado.httpserver import tornado.ioloop import tornado.iostream import tornado.web import tornado.httpclient import tornado.escape import tornado.httputil import tornado.options import tornado.template import tornado.websocket import tornado.gen import socket import ssl import os import datetime import uuid import re,sys from multiprocessing import Process, Value, Lock from socket_wrapper import wrap_socket LOG='' #This function create logs def Logz(request,response,log): TRAFFIC='' rdat='' dat=response.request.body if response.request.body else '' TRAFFIC+= "\n\nREQUEST: " + str(response.request.method)+ " " + str(response.request.url) + '\n' for header, value in list(response.headers.items()): TRAFFIC+= header + ": " + value +"\n" TRAFFIC+= "\n\n" + str(dat) + "\n" TRAFFIC+= "\n\nRESPONSE: " +str(response.code) + " " + str(response.reason) + "\n" for header, value in list(response.headers.items()): TRAFFIC+= header + ": " + value + "\n" if "content-type" in header.lower(): if re.findall("json|xml|application\/javascript",value.lower()): rdat=request.response_buffer else: rdat='' TRAFFIC+= "\n\n" +str(rdat) + "\n" #print TRAFFIC with open(log,'a') as f: f.write(TRAFFIC) class ProxyHandler(tornado.web.RequestHandler): """ This RequestHandler processes all the requests that the application received """ SUPPORTED_METHODS = ['GET', 'POST', 'CONNECT', 'HEAD', 'PUT', 'DELETE', 'OPTIONS', 'TRACE'] def __new__(cls, application, request, **kwargs): # http://stackoverflow.com/questions/3209233/how-to-replace-an-instance-in-init-with-a-different-object # Based on upgrade header, websocket request handler must be used try: if request.headers['Upgrade'].lower() == 'websocket': return CustomWebSocketHandler(application, request, **kwargs) except KeyError: pass return tornado.web.RequestHandler.__new__(cls, application, request, **kwargs) def set_default_headers(self): # This is automatically called by Tornado :P # XD Using this to remove "Server" header set by tornado del self._headers["Server"] def set_status(self, status_code, reason=None): """ Sets the status code for our response. Overriding is done so as to handle unknown response codes gracefully. """ self._status_code = status_code if reason is not None: self._reason = tornado.escape.native_str(reason) else: try: self._reason = tornado.httputil.responses[status_code] except KeyError: self._reason = tornado.escape.native_str("Server Not Found") # This function writes a new response & caches it def finish_response(self, response): Logz(self.request,response,LOG) self.set_status(response.code) for header, value in list(response.headers.items()): if header == "Set-Cookie": self.add_header(header, value) else: if header not in restricted_response_headers: self.set_header(header, value) self.finish() # This function is a callback when a small chunk is received def handle_data_chunk(self, data): if data: self.write(data) self.request.response_buffer += data @tornado.web.asynchronous @tornado.gen.coroutine def get(self): """ * This function handles all requests except the connect request. * Once ssl stream is formed between browser and proxy, the requests are then processed by this function """ # The flow starts here self.request.response_buffer = '' # The requests that come through ssl streams are relative requests, so transparent # proxying is required. The following snippet decides the url that should be passed # to the async client if self.request.uri.startswith(self.request.protocol,0): # Normal Proxy Request self.request.url = self.request.uri else: # Transparent Proxy Request self.request.url = self.request.protocol + "://" + self.request.host if self.request.uri != '/': # Add uri only if needed self.request.url += self.request.uri # Request header cleaning for header in restricted_request_headers: try: del self.request.headers[header] except: continue # httprequest object is created and then passed to async client with a callback request = tornado.httpclient.HTTPRequest( url=self.request.url, method=self.request.method, body=self.request.body if self.request.body else None, headers=self.request.headers, follow_redirects=False, use_gzip=True, streaming_callback=self.handle_data_chunk, header_callback=None, proxy_host=self.application.outbound_ip, proxy_port=self.application.outbound_port, proxy_username=self.application.outbound_username, proxy_password=self.application.outbound_password, allow_nonstandard_methods=True, validate_cert=False) response = yield tornado.gen.Task(self.application.async_client.fetch, request) self.finish_response(response) # The following 5 methods can be handled through the above implementation @tornado.web.asynchronous def post(self): return self.get() @tornado.web.asynchronous def head(self): return self.get() @tornado.web.asynchronous def put(self): return self.get() @tornado.web.asynchronous def delete(self): return self.get() @tornado.web.asynchronous def options(self): return self.get() @tornado.web.asynchronous def trace(self): return self.get() @tornado.web.asynchronous def connect(self): """ This function gets called when a connect request is received. * The host and port are obtained from the request uri * A socket is created, wrapped in ssl and then added to SSLIOStream * This stream is used to connect to speak to the remote host on given port * If the server speaks ssl on that port, callback start_tunnel is called * An OK response is written back to client * The client side socket is wrapped in ssl * If the wrapping is successful, a new SSLIOStream is made using that socket * The stream is added back to the server for monitoring """ host, port = self.request.uri.split(':') def start_tunnel(): try: base=os.path.dirname(os.path.realpath(__file__)) ca_crt=os.path.join(base,"ca.crt") ca_key=os.path.join(base,"ca.key") self.request.connection.stream.write(b"HTTP/1.1 200 Connection established\r\n\r\n") wrap_socket( self.request.connection.stream.socket, host, ca_crt, ca_key, "mobsec-yso", "logs", success=ssl_success ) except tornado.iostream.StreamClosedError: pass def ssl_success(client_socket): client = tornado.iostream.SSLIOStream(client_socket) server.handle_stream(client, self.application.inbound_ip) # Tiny Hack to satisfy proxychains CONNECT request to HTTP port. # HTTPS fail check has to be improvised def ssl_fail(): self.request.connection.stream.write(b"HTTP/1.1 200 Connection established\r\n\r\n") server.handle_stream(self.request.connection.stream, self.application.inbound_ip) ###### # Hacking to be done here, so as to check for ssl using proxy and auth try: s = ssl.wrap_socket(socket.socket(socket.AF_INET, socket.SOCK_STREAM, 0)) upstream = tornado.iostream.SSLIOStream(s) #start_tunnel() upstream.set_close_callback(ssl_fail) upstream.connect((host, int(port)), start_tunnel) except Exception: self.finish() class CustomWebSocketHandler(tornado.websocket.WebSocketHandler): """ * See docs XD * This class is used for handling websocket traffic. * Object of this class replaces the main request handler for a request with header => "Upgrade: websocket" * wss:// - CONNECT request is handled by main handler """ def upstream_connect(self, io_loop=None, callback=None): """ Implemented as a custom alternative to tornado.websocket.websocket_connect """ # io_loop is needed, how else will it work with tornado :P if io_loop is None: io_loop = tornado.ioloop.IOLoop.current() # During secure communication, we get relative URI, so make them absolute if self.request.uri.startswith(self.request.protocol,0): # Normal Proxy Request self.request.url = self.request.uri else: # Transparent Proxy Request self.request.url = self.request.protocol + "://" + self.request.host + self.request.uri # WebSocketClientConnection expects ws:// & wss:// self.request.url = self.request.url.replace("http", "ws", 1) # Have to add cookies and stuff request_headers = tornado.httputil.HTTPHeaders() for name, value in self.request.headers.iteritems(): if name not in restricted_request_headers: request_headers.add(name, value) # Build a custom request request = tornado.httpclient.HTTPRequest( url=self.request.url, headers=request_headers, proxy_host=self.application.outbound_ip, proxy_port=self.application.outbound_port, proxy_username=self.application.outbound_username, proxy_password=self.application.outbound_password ) self.upstream_connection = CustomWebSocketClientConnection(io_loop, request) if callback is not None: io_loop.add_future(self.upstream_connection.connect_future, callback) return self.upstream_connection.connect_future # This returns a future def _execute(self, transforms, *args, **kwargs): """ Overriding of a method of WebSocketHandler """ def start_tunnel(future): """ A callback which is called when connection to url is successful """ self.upstream = future.result() # We need upstream to write further messages self.handshake_request = self.upstream_connection.request # HTTPRequest needed for caching :P self.handshake_request.response_buffer = "" # Needed for websocket data & compliance with cache_handler stuff self.handshake_request.version = "HTTP/1.1" # Tiny hack to protect caching (But according to websocket standards) self.handshake_request.body = self.handshake_request.body or "" # I dont know why a None is coming :P tornado.websocket.WebSocketHandler._execute(self, transforms, *args, **kwargs) # The regular procedures are to be done # We try to connect to provided URL & then we proceed with connection on client side. self.upstream = self.upstream_connect(callback=start_tunnel) def store_upstream_data(self, message): """ Save websocket data sent from client to server, i.e add it to HTTPRequest.response_buffer with direction (>>) """ try: # Cannot write binary content as a string, so catch it self.handshake_request.response_buffer += (">>> %s\r\n"%(message)) except TypeError: self.handshake_request.response_buffer += (">>> May be binary\r\n") def store_downstream_data(self, message): """ Save websocket data sent from client to server, i.e add it to HTTPRequest.response_buffer with direction (<<) """ try: # Cannot write binary content as a string, so catch it self.handshake_request.response_buffer += ("<<< %s\r\n"%(message)) except TypeError: self.handshake_request.response_buffer += ("<<< May be binary\r\n") def on_message(self, message): """ Everytime a message is received from client side, this instance method is called """ self.upstream.write_message(message) # The obtained message is written to upstream self.store_upstream_data(message) # The following check ensures that if a callback is added for reading message from upstream, another one is not added if not self.upstream.read_future: self.upstream.read_message(callback=self.on_response) # A callback is added to read the data when upstream responds def on_response(self, message): """ A callback when a message is recieved from upstream *** Here message is a future """ # The following check ensures that if a callback is added for reading message from upstream, another one is not added if not self.upstream.read_future: self.upstream.read_message(callback=self.on_response) if self.ws_connection: # Check if connection still exists if message.result(): # Check if it is not NULL ( Indirect checking of upstream connection ) self.write_message(message.result()) # Write obtained message to client self.store_downstream_data(message.result()) else: self.close() def on_close(self): """ Called when websocket is closed. So handshake request-response pair along with websocket data as response body is saved """ # Required for cache_handler self.handshake_response = tornado.httpclient.HTTPResponse( self.handshake_request, self.upstream_connection.code, headers=self.upstream_connection.headers, request_time=0 ) # Close fd descriptor class CustomWebSocketClientConnection(tornado.websocket.WebSocketClientConnection): # Had to extract response code, so it is necessary to override def _handle_1xx(self, code): self.code = code super(CustomWebSocketClientConnection, self)._handle_1xx(code) # The tornado application, which is used to pass variables to request handler application = tornado.web.Application(handlers=[ (r'.*', ProxyHandler) ], debug=False, gzip=True, ) application.async_client = tornado.httpclient.AsyncHTTPClient() instances = "1" # SSL MiTM # SSL certs, keys and other settings (os.path.expanduser because they are stored in users home directory ~/.owtf/proxy ) application.outbound_ip = None application.outbound_port = None application.outbound_username = None application.outbound_password = None application.inbound_ip="0.0.0.0" #try: # Ensure CA.crt and Key exist #assert os.path.exists(application.ca_cert) #assert os.path.exists(application.ca_key) #except AssertionError: #print ("Files required for SSL MiTM are missing. Please run the install script") # Server has to be global, because it is used inside request handler to attach sockets for monitoring global server server = tornado.httpserver.HTTPServer(application) server = server # Header filters # Restricted headers are picked from framework/config/framework_config.cfg # These headers are removed from the response obtained from webserver, before sending it to browser global restricted_response_headers rresh=["Content-Length","Content-Encoding","Etag","Transfer-Encoding","Connection","Vary","Accept-Ranges","Pragma"] restricted_response_headers = rresh # These headers are removed from request obtained from browser, before sending it to webserver global restricted_request_headers rreqh=["Connection","Pragma","Cache-Control","If-Modified-Since"] restricted_request_headers = rreqh # "0" equals the number of cores present in a machine if len(sys.argv)==4: LOG=sys.argv[3] try: server.bind(sys.argv[2], address=sys.argv[1]) # Useful for using custom loggers because of relative paths in secure requests # http://www.joet3ch.com/blog/2011/09/08/alternative-tornado-logging/ #ornado.options.parse_command_line(args=["dummy_arg","--log_file_prefix="+application.Core.DB.Config.Get("PROXY_LOG"),"--logging=info"]) tornado.options.parse_command_line(args=["dummy_arg","--log_file_prefix=logs/proxy.log","--logging=info"]) # To run any number of instances server.start(int(1)) tornado.ioloop.IOLoop.instance().start() except Exception as e: print "[WebProxy Error] "+str(e) else: print "proxy.py <IP> <PORT> <LOGFILE>"
gpl-3.0
Teagan42/home-assistant
script/hassfest/dependencies.py
1
7786
"""Validate dependencies.""" import ast from pathlib import Path from typing import Dict, Set from homeassistant.requirements import DISCOVERY_INTEGRATIONS from .model import Integration class ImportCollector(ast.NodeVisitor): """Collect all integrations referenced.""" def __init__(self, integration: Integration): """Initialize the import collector.""" self.integration = integration self.referenced: Dict[Path, Set[str]] = {} # Current file or dir we're inspecting self._cur_fil_dir = None def collect(self) -> None: """Collect imports from a source file.""" for fil in self.integration.path.glob("**/*.py"): if not fil.is_file(): continue self._cur_fil_dir = fil.relative_to(self.integration.path) self.referenced[self._cur_fil_dir] = set() self.visit(ast.parse(fil.read_text())) self._cur_fil_dir = None def _add_reference(self, reference_domain: str): """Add a reference.""" self.referenced[self._cur_fil_dir].add(reference_domain) def visit_ImportFrom(self, node): """Visit ImportFrom node.""" if node.module is None: return if node.module.startswith("homeassistant.components."): # from homeassistant.components.alexa.smart_home import EVENT_ALEXA_SMART_HOME # from homeassistant.components.logbook import bla self._add_reference(node.module.split(".")[2]) elif node.module == "homeassistant.components": # from homeassistant.components import sun for name_node in node.names: self._add_reference(name_node.name) def visit_Import(self, node): """Visit Import node.""" # import homeassistant.components.hue as hue for name_node in node.names: if name_node.name.startswith("homeassistant.components."): self._add_reference(name_node.name.split(".")[2]) def visit_Attribute(self, node): """Visit Attribute node.""" # hass.components.hue.async_create() # Name(id=hass) # .Attribute(attr=hue) # .Attribute(attr=async_create) # self.hass.components.hue.async_create() # Name(id=self) # .Attribute(attr=hass) # .Attribute(attr=hue) # .Attribute(attr=async_create) if ( isinstance(node.value, ast.Attribute) and node.value.attr == "components" and ( ( isinstance(node.value.value, ast.Name) and node.value.value.id == "hass" ) or ( isinstance(node.value.value, ast.Attribute) and node.value.value.attr == "hass" ) ) ): self._add_reference(node.attr) else: # Have it visit other kids self.generic_visit(node) ALLOWED_USED_COMPONENTS = { # This component will always be set up "persistent_notification", # These allow to register things without being set up "conversation", "frontend", "hassio", "system_health", "websocket_api", "automation", "device_automation", "zone", "homeassistant", "system_log", "person", # Discovery "discovery", # Other "mjpeg", # base class, has no reqs or component to load. "stream", # Stream cannot install on all systems, can be imported without reqs. } IGNORE_VIOLATIONS = [ # Has same requirement, gets defaults. ("sql", "recorder"), # Sharing a base class ("openalpr_cloud", "openalpr_local"), ("lutron_caseta", "lutron"), ("ffmpeg_noise", "ffmpeg_motion"), # Demo ("demo", "manual"), ("demo", "openalpr_local"), # This should become a helper method that integrations can submit data to ("websocket_api", "lovelace"), # Expose HA to external systems "homekit", "alexa", "google_assistant", "emulated_hue", "prometheus", "conversation", "logbook", "mobile_app", # These should be extracted to external package "pvoutput", "dwd_weather_warnings", # Should be rewritten to use own data fetcher "scrape", ] def calc_allowed_references(integration: Integration) -> Set[str]: """Return a set of allowed references.""" allowed_references = ( ALLOWED_USED_COMPONENTS | set(integration.manifest["dependencies"]) | set(integration.manifest.get("after_dependencies", [])) ) # Discovery requirements are ok if referenced in manifest for check_domain, to_check in DISCOVERY_INTEGRATIONS.items(): if any(check in integration.manifest for check in to_check): allowed_references.add(check_domain) return allowed_references def find_non_referenced_integrations( integrations: Dict[str, Integration], integration: Integration, references: Dict[Path, Set[str]], ): """Find intergrations that are not allowed to be referenced.""" allowed_references = calc_allowed_references(integration) referenced = set() for path, refs in references.items(): if len(path.parts) == 1: # climate.py is stored as climate cur_fil_dir = path.stem else: # climate/__init__.py is stored as climate cur_fil_dir = path.parts[0] is_platform_other_integration = cur_fil_dir in integrations for ref in refs: # We are always allowed to import from ourselves if ref == integration.domain: continue # These references are approved based on the manifest if ref in allowed_references: continue # Some violations are whitelisted if (integration.domain, ref) in IGNORE_VIOLATIONS: continue # If it's a platform for another integration, the other integration is ok if is_platform_other_integration and cur_fil_dir == ref: continue # These have a platform specified in this integration if not is_platform_other_integration and ( (integration.path / f"{ref}.py").is_file() # Platform dir or (integration.path / ref).is_dir() ): continue referenced.add(ref) return referenced def validate_dependencies( integrations: Dict[str, Integration], integration: Integration ): """Validate all dependencies.""" # Some integrations are allowed to have violations. if integration.domain in IGNORE_VIOLATIONS: return # Find usage of hass.components collector = ImportCollector(integration) collector.collect() for domain in sorted( find_non_referenced_integrations( integrations, integration, collector.referenced ) ): integration.add_error( "dependencies", f"Using component {domain} but it's not in 'dependencies' " "or 'after_dependencies'", ) def validate(integrations: Dict[str, Integration], config): """Handle dependencies for integrations.""" # check for non-existing dependencies for integration in integrations.values(): if not integration.manifest: continue validate_dependencies(integrations, integration) # check that all referenced dependencies exist for dep in integration.manifest["dependencies"]: if dep not in integrations: integration.add_error( "dependencies", f"Dependency {dep} does not exist" )
apache-2.0
kevintaw/django
django/db/backends/oracle/utils.py
539
1252
import datetime from django.utils.encoding import force_bytes, force_text from .base import Database # Check whether cx_Oracle was compiled with the WITH_UNICODE option if cx_Oracle is pre-5.1. This will # also be True for cx_Oracle 5.1 and in Python 3.0. See #19606 if int(Database.version.split('.', 1)[0]) >= 5 and \ (int(Database.version.split('.', 2)[1]) >= 1 or not hasattr(Database, 'UNICODE')): convert_unicode = force_text else: convert_unicode = force_bytes class InsertIdVar(object): """ A late-binding cursor variable that can be passed to Cursor.execute as a parameter, in order to receive the id of the row created by an insert statement. """ def bind_parameter(self, cursor): param = cursor.cursor.var(Database.NUMBER) cursor._insert_id_var = param return param class Oracle_datetime(datetime.datetime): """ A datetime object, with an additional class attribute to tell cx_Oracle to save the microseconds too. """ input_size = Database.TIMESTAMP @classmethod def from_datetime(cls, dt): return Oracle_datetime( dt.year, dt.month, dt.day, dt.hour, dt.minute, dt.second, dt.microsecond, )
bsd-3-clause