repo_name
stringlengths
6
100
path
stringlengths
4
294
copies
stringlengths
1
5
size
stringlengths
4
6
content
stringlengths
606
896k
license
stringclasses
15 values
jazztpt/edx-platform
common/djangoapps/enrollment/api.py
56
15361
""" Enrollment API for creating, updating, and deleting enrollments. Also provides access to enrollment information at a course level, such as available course modes. """ from django.utils import importlib import logging from django.conf import settings from django.core.cache import cache from enrollment import errors log = logging.getLogger(__name__) DEFAULT_DATA_API = 'enrollment.data' def get_enrollments(user_id): """Retrieves all the courses a user is enrolled in. Takes a user and retrieves all relative enrollments. Includes information regarding how the user is enrolled in the the course. Args: user_id (str): The username of the user we want to retrieve course enrollment information for. Returns: A list of enrollment information for the given user. Examples: >>> get_enrollments("Bob") [ { "created": "2014-10-20T20:18:00Z", "mode": "honor", "is_active": True, "user": "Bob", "course": { "course_id": "edX/DemoX/2014T2", "enrollment_end": "2014-12-20T20:18:00Z", "enrollment_start": "2014-10-15T20:18:00Z", "course_start": "2015-02-03T00:00:00Z", "course_end": "2015-05-06T00:00:00Z", "course_modes": [ { "slug": "honor", "name": "Honor Code Certificate", "min_price": 0, "suggested_prices": "", "currency": "usd", "expiration_datetime": null, "description": null, "sku": null } ], "invite_only": False } }, { "created": "2014-10-25T20:18:00Z", "mode": "verified", "is_active": True, "user": "Bob", "course": { "course_id": "edX/edX-Insider/2014T2", "enrollment_end": "2014-12-20T20:18:00Z", "enrollment_start": "2014-10-15T20:18:00Z", "course_start": "2015-02-03T00:00:00Z", "course_end": "2015-05-06T00:00:00Z", "course_modes": [ { "slug": "honor", "name": "Honor Code Certificate", "min_price": 0, "suggested_prices": "", "currency": "usd", "expiration_datetime": null, "description": null, "sku": null } ], "invite_only": True } } ] """ return _data_api().get_course_enrollments(user_id) def get_enrollment(user_id, course_id): """Retrieves all enrollment information for the user in respect to a specific course. Gets all the course enrollment information specific to a user in a course. Args: user_id (str): The user to get course enrollment information for. course_id (str): The course to get enrollment information for. Returns: A serializable dictionary of the course enrollment. Example: >>> get_enrollment("Bob", "edX/DemoX/2014T2") { "created": "2014-10-20T20:18:00Z", "mode": "honor", "is_active": True, "user": "Bob", "course": { "course_id": "edX/DemoX/2014T2", "enrollment_end": "2014-12-20T20:18:00Z", "enrollment_start": "2014-10-15T20:18:00Z", "course_start": "2015-02-03T00:00:00Z", "course_end": "2015-05-06T00:00:00Z", "course_modes": [ { "slug": "honor", "name": "Honor Code Certificate", "min_price": 0, "suggested_prices": "", "currency": "usd", "expiration_datetime": null, "description": null, "sku": null } ], "invite_only": False } } """ return _data_api().get_course_enrollment(user_id, course_id) def add_enrollment(user_id, course_id, mode='honor', is_active=True): """Enrolls a user in a course. Enrolls a user in a course. If the mode is not specified, this will default to 'honor'. Arguments: user_id (str): The user to enroll. course_id (str): The course to enroll the user in. Keyword Arguments: mode (str): Optional argument for the type of enrollment to create. Ex. 'audit', 'honor', 'verified', 'professional'. If not specified, this defaults to 'honor'. is_active (boolean): Optional argument for making the new enrollment inactive. If not specified, is_active defaults to True. Returns: A serializable dictionary of the new course enrollment. Example: >>> add_enrollment("Bob", "edX/DemoX/2014T2", mode="audit") { "created": "2014-10-20T20:18:00Z", "mode": "honor", "is_active": True, "user": "Bob", "course": { "course_id": "edX/DemoX/2014T2", "enrollment_end": "2014-12-20T20:18:00Z", "enrollment_start": "2014-10-15T20:18:00Z", "course_start": "2015-02-03T00:00:00Z", "course_end": "2015-05-06T00:00:00Z", "course_modes": [ { "slug": "honor", "name": "Honor Code Certificate", "min_price": 0, "suggested_prices": "", "currency": "usd", "expiration_datetime": null, "description": null, "sku": null } ], "invite_only": False } } """ _validate_course_mode(course_id, mode, is_active=is_active) return _data_api().create_course_enrollment(user_id, course_id, mode, is_active) def update_enrollment(user_id, course_id, mode=None, is_active=None, enrollment_attributes=None): """Updates the course mode for the enrolled user. Update a course enrollment for the given user and course. Arguments: user_id (str): The user associated with the updated enrollment. course_id (str): The course associated with the updated enrollment. Keyword Arguments: mode (str): The new course mode for this enrollment. is_active (bool): Sets whether the enrollment is active or not. enrollment_attributes (list): Attributes to be set the enrollment. Returns: A serializable dictionary representing the updated enrollment. Example: >>> update_enrollment("Bob", "edX/DemoX/2014T2", "honor") { "created": "2014-10-20T20:18:00Z", "mode": "honor", "is_active": True, "user": "Bob", "course": { "course_id": "edX/DemoX/2014T2", "enrollment_end": "2014-12-20T20:18:00Z", "enrollment_start": "2014-10-15T20:18:00Z", "course_start": "2015-02-03T00:00:00Z", "course_end": "2015-05-06T00:00:00Z", "course_modes": [ { "slug": "honor", "name": "Honor Code Certificate", "min_price": 0, "suggested_prices": "", "currency": "usd", "expiration_datetime": null, "description": null, "sku": null } ], "invite_only": False } } """ if mode is not None: _validate_course_mode(course_id, mode, is_active=is_active) enrollment = _data_api().update_course_enrollment(user_id, course_id, mode=mode, is_active=is_active) if enrollment is None: msg = u"Course Enrollment not found for user {user} in course {course}".format(user=user_id, course=course_id) log.warn(msg) raise errors.EnrollmentNotFoundError(msg) else: if enrollment_attributes is not None: set_enrollment_attributes(user_id, course_id, enrollment_attributes) return enrollment def get_course_enrollment_details(course_id, include_expired=False): """Get the course modes for course. Also get enrollment start and end date, invite only, etc. Given a course_id, return a serializable dictionary of properties describing course enrollment information. Args: course_id (str): The Course to get enrollment information for. include_expired (bool): Boolean denoting whether expired course modes should be included in the returned JSON data. Returns: A serializable dictionary of course enrollment information. Example: >>> get_course_enrollment_details("edX/DemoX/2014T2") { "course_id": "edX/DemoX/2014T2", "enrollment_end": "2014-12-20T20:18:00Z", "enrollment_start": "2014-10-15T20:18:00Z", "course_start": "2015-02-03T00:00:00Z", "course_end": "2015-05-06T00:00:00Z", "course_modes": [ { "slug": "honor", "name": "Honor Code Certificate", "min_price": 0, "suggested_prices": "", "currency": "usd", "expiration_datetime": null, "description": null, "sku": null } ], "invite_only": False } """ cache_key = u'enrollment.course.details.{course_id}.{include_expired}'.format( course_id=course_id, include_expired=include_expired ) cached_enrollment_data = None try: cached_enrollment_data = cache.get(cache_key) except Exception: # The cache backend could raise an exception (for example, memcache keys that contain spaces) log.exception(u"Error occurred while retrieving course enrollment details from the cache") if cached_enrollment_data: log.info(u"Get enrollment data for course %s (cached)", course_id) return cached_enrollment_data course_enrollment_details = _data_api().get_course_enrollment_info(course_id, include_expired) try: cache_time_out = getattr(settings, 'ENROLLMENT_COURSE_DETAILS_CACHE_TIMEOUT', 60) cache.set(cache_key, course_enrollment_details, cache_time_out) except Exception: # Catch any unexpected errors during caching. log.exception(u"Error occurred while caching course enrollment details for course %s", course_id) raise errors.CourseEnrollmentError(u"An unexpected error occurred while retrieving course enrollment details.") log.info(u"Get enrollment data for course %s", course_id) return course_enrollment_details def set_enrollment_attributes(user_id, course_id, attributes): """Set enrollment attributes for the enrollment of given user in the course provided. Args: course_id (str): The Course to set enrollment attributes for. user_id (str): The User to set enrollment attributes for. attributes (list): Attributes to be set. Example: >>>set_enrollment_attributes( "Bob", "course-v1-edX-DemoX-1T2015", [ { "namespace": "credit", "name": "provider_id", "value": "hogwarts", }, ] ) """ _data_api().add_or_update_enrollment_attr(user_id, course_id, attributes) def get_enrollment_attributes(user_id, course_id): """Retrieve enrollment attributes for given user for provided course. Args: user_id: The User to get enrollment attributes for course_id (str): The Course to get enrollment attributes for. Example: >>>get_enrollment_attributes("Bob", "course-v1-edX-DemoX-1T2015") [ { "namespace": "credit", "name": "provider_id", "value": "hogwarts", }, ] Returns: list """ return _data_api().get_enrollment_attributes(user_id, course_id) def _validate_course_mode(course_id, mode, is_active=None): """Checks to see if the specified course mode is valid for the course. If the requested course mode is not available for the course, raise an error with corresponding course enrollment information. 'honor' is special cased. If there are no course modes configured, and the specified mode is 'honor', return true, allowing the enrollment to be 'honor' even if the mode is not explicitly set for the course. Arguments: course_id (str): The course to check against for available course modes. mode (str): The slug for the course mode specified in the enrollment. Keyword Arguments: is_active (bool): Whether the enrollment is to be activated or deactivated. Returns: None Raises: CourseModeNotFound: raised if the course mode is not found. """ # If the client has requested an enrollment deactivation, we want to include expired modes # in the set of available modes. This allows us to unenroll users from expired modes. include_expired = not is_active if is_active is not None else False course_enrollment_info = _data_api().get_course_enrollment_info(course_id, include_expired=include_expired) course_modes = course_enrollment_info["course_modes"] available_modes = [m['slug'] for m in course_modes] if mode not in available_modes: msg = ( u"Specified course mode '{mode}' unavailable for course {course_id}. " u"Available modes were: {available}" ).format( mode=mode, course_id=course_id, available=", ".join(available_modes) ) log.warn(msg) raise errors.CourseModeNotFoundError(msg, course_enrollment_info) def _data_api(): """Returns a Data API. This relies on Django settings to find the appropriate data API. """ # We retrieve the settings in-line here (rather than using the # top-level constant), so that @override_settings will work # in the test suite. api_path = getattr(settings, "ENROLLMENT_DATA_API", DEFAULT_DATA_API) try: return importlib.import_module(api_path) except (ImportError, ValueError): log.exception(u"Could not load module at '{path}'".format(path=api_path)) raise errors.EnrollmentApiLoadError(api_path)
agpl-3.0
wRieDen/imapx210-nb-linux-kernel
tools/perf/scripts/python/syscall-counts.py
944
1429
# system call counts # (c) 2010, Tom Zanussi <tzanussi@gmail.com> # Licensed under the terms of the GNU GPL License version 2 # # Displays system-wide system call totals, broken down by syscall. # If a [comm] arg is specified, only syscalls called by [comm] are displayed. import os import sys sys.path.append(os.environ['PERF_EXEC_PATH'] + \ '/scripts/python/Perf-Trace-Util/lib/Perf/Trace') from perf_trace_context import * from Core import * usage = "perf trace -s syscall-counts.py [comm]\n"; for_comm = None if len(sys.argv) > 2: sys.exit(usage) if len(sys.argv) > 1: for_comm = sys.argv[1] syscalls = autodict() def trace_begin(): pass def trace_end(): print_syscall_totals() def raw_syscalls__sys_enter(event_name, context, common_cpu, common_secs, common_nsecs, common_pid, common_comm, id, args): if for_comm is not None: if common_comm != for_comm: return try: syscalls[id] += 1 except TypeError: syscalls[id] = 1 def print_syscall_totals(): if for_comm is not None: print "\nsyscall events for %s:\n\n" % (for_comm), else: print "\nsyscall events:\n\n", print "%-40s %10s\n" % ("event", "count"), print "%-40s %10s\n" % ("----------------------------------------", \ "-----------"), for id, val in sorted(syscalls.iteritems(), key = lambda(k, v): (v, k), \ reverse = True): print "%-40d %10d\n" % (id, val),
gpl-2.0
cjaymes/pyscap
src/scap/model/oval_5/sc/OvalSystemCharacteristicsElement.py
1
1356
# Copyright 2016 Casey Jaymes # This file is part of PySCAP. # # PySCAP is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # PySCAP is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with PySCAP. If not, see <http://www.gnu.org/licenses/>. import logging from scap.Model import Model logger = logging.getLogger(__name__) class OvalDefinitionsElement(Model): MODEL_MAP = { 'tag_name' : 'oval_system_characteristics', 'elements': [ {'tag_name': 'generator', 'class': 'scap.model.oval_5.GeneratorType'}, {'tag_name': 'system_info', 'class': 'SystemInfoType'}, {'tag_name': '_collected_objects', 'class': 'CollectedObjectsType', 'min': 0, 'max': 1}, {'tag_name': 'system_data', 'class': 'SystemDataType', 'min': 0, 'max': 1}, {'xmlns': 'http://www.w3.org/2000/09/xmldsig#', 'tag_name': 'Signature', 'min': 0, 'max': 1}, ], }
gpl-3.0
rananda/cfme_tests
fixtures/pytest_store.py
2
6554
"""Storage for pytest objects during test runs The objects in the module will change during the course of a test run, so they have been stashed into the 'store' namespace Usage: # as pytest.store import pytest pytest.store.config, pytest.store.pluginmanager, pytest.store.session # imported directly (store is pytest.store) from fixtures.pytest_store import store store.config, store.pluginmanager, store.session The availability of these objects varies during a test run, but all should be available in the collection and testing phases of a test run. """ import fauxfactory import os import sys import pytest # NOQA: import to trigger initial pluginmanager from _pytest.terminal import TerminalReporter from cached_property import cached_property from py.io import TerminalWriter from utils import diaper, property_or_none class FlexibleTerminalReporter(TerminalReporter): """A TerminalReporter stand-in that pretends to work even without a py.test config.""" def __init__(self, config=None, file=None): if config: # If we have a config, nothing more needs to be done return TerminalReporter.__init__(self, config, file) # Without a config, pretend to be a TerminalReporter # hook-related functions (logreport, collection, etc) will be outrigt broken, # but the line writers should still be usable if file is None: file = sys.stdout self._tw = self.writer = TerminalWriter(file) self.hasmarkup = self._tw.hasmarkup self.reportchars = '' self.currentfspath = None class Store(object): """pytest object store If a property isn't available for any reason (including being accessed outside of a pytest run), it will be None. """ @property def current_appliance(self): # layz import due to loops and loops and loops from utils import appliance return appliance.current_appliance def __init__(self): #: The py.test config instance, None if not in py.test self.config = None #: The current py.test session, None if not in a py.test session self.session = None #: Parallelizer role, None if not running a parallelized session self.parallelizer_role = None # Stash of the "real" terminal reporter once we get it, # so we don't have to keep going through pluginmanager self._terminalreporter = None #: hack variable until we get a more sustainable solution self.ssh_clients_to_close = [] @property def has_config(self): return self.config is not None @property def base_url(self): """ If there is a current appliance the base url of that appliance is returned else, the base_url from the config is returned.""" return self.current_appliance.url @property def in_pytest_session(self): return self.session is not None @property_or_none def fixturemanager(self): # "publicize" the fixturemanager return self.session._fixturemanager @property_or_none def capturemanager(self): return self.pluginmanager.getplugin('capturemanager') @property_or_none def pluginmanager(self): # Expose this directly on the store for convenience in getting/setting plugins return self.config.pluginmanager @property_or_none def terminalreporter(self): if self._terminalreporter is not None: return self._terminalreporter if self.pluginmanager is not None: reporter = self.pluginmanager.getplugin('terminalreporter') if reporter and isinstance(reporter, TerminalReporter): self._terminalreporter = reporter return reporter return FlexibleTerminalReporter(self.config) @property_or_none def terminaldistreporter(self): if self.pluginmanager is not None: reporter = self.pluginmanager.getplugin('terminaldistreporter') if reporter: return reporter @property_or_none def parallel_session(self): return self.pluginmanager.getplugin('parallel_session') @property_or_none def slave_manager(self): return self.pluginmanager.getplugin('slave_manager') @cached_property def my_ip_address(self): try: # Check the environment first return os.environ['CFME_MY_IP_ADDRESS'] except KeyError: # Fall back to having an appliance tell us what it thinks our IP # address is return self.current_appliance.ssh_client.client_address() def write_line(self, line, **kwargs): return write_line(line, **kwargs) store = Store() def pytest_namespace(): # Expose the pytest store as pytest.store return {'store': store} def pytest_configure(config): store.config = config def pytest_sessionstart(session): store.session = session # populate my_ip_address if it hasn't been done yet store.my_ip_address def write_line(line, **kwargs): """A write-line helper that should *always* write a line to the terminal It knows all of py.tests dirty tricks, including ones that we made, and works around them. Args: **kwargs: Normal kwargs for pytest line formatting, stripped from slave messages """ if store.slave_manager: # We're a pytest slave! Write out the vnc info through the slave manager store.slave_manager.message(line, **kwargs) else: # If py.test is supressing stdout/err, turn that off for a moment with diaper: store.capturemanager.suspendcapture() # terminal reporter knows whether or not to write a newline based on currentfspath # so stash it, then use rewrite to blow away the line that printed the current # test name, then clear currentfspath so the test name is reprinted with the # write_ensure_prefix call. shenanigans! cfp = store.terminalreporter.currentfspath # carriage return, write spaces for the whole line, carriage return, write the new line store.terminalreporter.line('\r' + ' ' * store.terminalreporter._tw.fullwidth + '\r' + line, **kwargs) store.terminalreporter.currentfspath = fauxfactory.gen_alphanumeric(8) store.terminalreporter.write_ensure_prefix(cfp) # resume capturing with diaper: store.capturemanager.resumecapture()
gpl-2.0
ucloud/uai-sdk
examples/tensorflow/inference/im2txt/code/ops/inputs.py
7
7468
# Copyright 2016 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Input ops.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import tensorflow as tf def parse_sequence_example(serialized, image_feature, caption_feature): """Parses a tensorflow.SequenceExample into an image and caption. Args: serialized: A scalar string Tensor; a single serialized SequenceExample. image_feature: Name of SequenceExample context feature containing image data. caption_feature: Name of SequenceExample feature list containing integer captions. Returns: encoded_image: A scalar string Tensor containing a JPEG encoded image. caption: A 1-D uint64 Tensor with dynamically specified length. """ context, sequence = tf.parse_single_sequence_example( serialized, context_features={ image_feature: tf.FixedLenFeature([], dtype=tf.string) }, sequence_features={ caption_feature: tf.FixedLenSequenceFeature([], dtype=tf.int64), }) encoded_image = context[image_feature] caption = sequence[caption_feature] return encoded_image, caption def prefetch_input_data(reader, file_pattern, is_training, batch_size, values_per_shard, input_queue_capacity_factor=16, num_reader_threads=1, shard_queue_name="filename_queue", value_queue_name="input_queue"): """Prefetches string values from disk into an input queue. In training the capacity of the queue is important because a larger queue means better mixing of training examples between shards. The minimum number of values kept in the queue is values_per_shard * input_queue_capacity_factor, where input_queue_memory factor should be chosen to trade-off better mixing with memory usage. Args: reader: Instance of tf.ReaderBase. file_pattern: Comma-separated list of file patterns (e.g. /tmp/train_data-?????-of-00100). is_training: Boolean; whether prefetching for training or eval. batch_size: Model batch size used to determine queue capacity. values_per_shard: Approximate number of values per shard. input_queue_capacity_factor: Minimum number of values to keep in the queue in multiples of values_per_shard. See comments above. num_reader_threads: Number of reader threads to fill the queue. shard_queue_name: Name for the shards filename queue. value_queue_name: Name for the values input queue. Returns: A Queue containing prefetched string values. """ data_files = [] for pattern in file_pattern.split(","): data_files.extend(tf.gfile.Glob(pattern)) if not data_files: tf.logging.fatal("Found no input files matching %s", file_pattern) else: tf.logging.info("Prefetching values from %d files matching %s", len(data_files), file_pattern) if is_training: filename_queue = tf.train.string_input_producer( data_files, shuffle=True, capacity=16, name=shard_queue_name) min_queue_examples = values_per_shard * input_queue_capacity_factor capacity = min_queue_examples + 100 * batch_size values_queue = tf.RandomShuffleQueue( capacity=capacity, min_after_dequeue=min_queue_examples, dtypes=[tf.string], name="random_" + value_queue_name) else: filename_queue = tf.train.string_input_producer( data_files, shuffle=False, capacity=1, name=shard_queue_name) capacity = values_per_shard + 3 * batch_size values_queue = tf.FIFOQueue( capacity=capacity, dtypes=[tf.string], name="fifo_" + value_queue_name) enqueue_ops = [] for _ in range(num_reader_threads): _, value = reader.read(filename_queue) enqueue_ops.append(values_queue.enqueue([value])) tf.train.queue_runner.add_queue_runner(tf.train.queue_runner.QueueRunner( values_queue, enqueue_ops)) tf.summary.scalar( "queue/%s/fraction_of_%d_full" % (values_queue.name, capacity), tf.cast(values_queue.size(), tf.float32) * (1. / capacity)) return values_queue def batch_with_dynamic_pad(images_and_captions, batch_size, queue_capacity, add_summaries=True): """Batches input images and captions. This function splits the caption into an input sequence and a target sequence, where the target sequence is the input sequence right-shifted by 1. Input and target sequences are batched and padded up to the maximum length of sequences in the batch. A mask is created to distinguish real words from padding words. Example: Actual captions in the batch ('-' denotes padded character): [ [ 1 2 3 4 5 ], [ 1 2 3 4 - ], [ 1 2 3 - - ], ] input_seqs: [ [ 1 2 3 4 ], [ 1 2 3 - ], [ 1 2 - - ], ] target_seqs: [ [ 2 3 4 5 ], [ 2 3 4 - ], [ 2 3 - - ], ] mask: [ [ 1 1 1 1 ], [ 1 1 1 0 ], [ 1 1 0 0 ], ] Args: images_and_captions: A list of pairs [image, caption], where image is a Tensor of shape [height, width, channels] and caption is a 1-D Tensor of any length. Each pair will be processed and added to the queue in a separate thread. batch_size: Batch size. queue_capacity: Queue capacity. add_summaries: If true, add caption length summaries. Returns: images: A Tensor of shape [batch_size, height, width, channels]. input_seqs: An int32 Tensor of shape [batch_size, padded_length]. target_seqs: An int32 Tensor of shape [batch_size, padded_length]. mask: An int32 0/1 Tensor of shape [batch_size, padded_length]. """ enqueue_list = [] for image, caption in images_and_captions: caption_length = tf.shape(caption)[0] input_length = tf.expand_dims(tf.subtract(caption_length, 1), 0) input_seq = tf.slice(caption, [0], input_length) target_seq = tf.slice(caption, [1], input_length) indicator = tf.ones(input_length, dtype=tf.int32) enqueue_list.append([image, input_seq, target_seq, indicator]) images, input_seqs, target_seqs, mask = tf.train.batch_join( enqueue_list, batch_size=batch_size, capacity=queue_capacity, dynamic_pad=True, name="batch_and_pad") if add_summaries: lengths = tf.add(tf.reduce_sum(mask, 1), 1) tf.summary.scalar("caption_length/batch_min", tf.reduce_min(lengths)) tf.summary.scalar("caption_length/batch_max", tf.reduce_max(lengths)) tf.summary.scalar("caption_length/batch_mean", tf.reduce_mean(lengths)) return images, input_seqs, target_seqs, mask
apache-2.0
guiquanz/node-gyp
gyp/pylib/gyp/xml_fix.py
2767
2174
# Copyright (c) 2011 Google Inc. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. """Applies a fix to CR LF TAB handling in xml.dom. Fixes this: http://code.google.com/p/chromium/issues/detail?id=76293 Working around this: http://bugs.python.org/issue5752 TODO(bradnelson): Consider dropping this when we drop XP support. """ import xml.dom.minidom def _Replacement_write_data(writer, data, is_attrib=False): """Writes datachars to writer.""" data = data.replace("&", "&amp;").replace("<", "&lt;") data = data.replace("\"", "&quot;").replace(">", "&gt;") if is_attrib: data = data.replace( "\r", "&#xD;").replace( "\n", "&#xA;").replace( "\t", "&#x9;") writer.write(data) def _Replacement_writexml(self, writer, indent="", addindent="", newl=""): # indent = current indentation # addindent = indentation to add to higher levels # newl = newline string writer.write(indent+"<" + self.tagName) attrs = self._get_attributes() a_names = attrs.keys() a_names.sort() for a_name in a_names: writer.write(" %s=\"" % a_name) _Replacement_write_data(writer, attrs[a_name].value, is_attrib=True) writer.write("\"") if self.childNodes: writer.write(">%s" % newl) for node in self.childNodes: node.writexml(writer, indent + addindent, addindent, newl) writer.write("%s</%s>%s" % (indent, self.tagName, newl)) else: writer.write("/>%s" % newl) class XmlFix(object): """Object to manage temporary patching of xml.dom.minidom.""" def __init__(self): # Preserve current xml.dom.minidom functions. self.write_data = xml.dom.minidom._write_data self.writexml = xml.dom.minidom.Element.writexml # Inject replacement versions of a function and a method. xml.dom.minidom._write_data = _Replacement_write_data xml.dom.minidom.Element.writexml = _Replacement_writexml def Cleanup(self): if self.write_data: xml.dom.minidom._write_data = self.write_data xml.dom.minidom.Element.writexml = self.writexml self.write_data = None def __del__(self): self.Cleanup()
mit
tylerpedley/Tetris
gtest/test/gtest_output_test.py
1733
12005
#!/usr/bin/env python # # Copyright 2008, Google Inc. # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above # copyright notice, this list of conditions and the following disclaimer # in the documentation and/or other materials provided with the # distribution. # * Neither the name of Google Inc. nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. """Tests the text output of Google C++ Testing Framework. SYNOPSIS gtest_output_test.py --build_dir=BUILD/DIR --gengolden # where BUILD/DIR contains the built gtest_output_test_ file. gtest_output_test.py --gengolden gtest_output_test.py """ __author__ = 'wan@google.com (Zhanyong Wan)' import os import re import sys import gtest_test_utils # The flag for generating the golden file GENGOLDEN_FLAG = '--gengolden' CATCH_EXCEPTIONS_ENV_VAR_NAME = 'GTEST_CATCH_EXCEPTIONS' IS_WINDOWS = os.name == 'nt' # TODO(vladl@google.com): remove the _lin suffix. GOLDEN_NAME = 'gtest_output_test_golden_lin.txt' PROGRAM_PATH = gtest_test_utils.GetTestExecutablePath('gtest_output_test_') # At least one command we exercise must not have the # --gtest_internal_skip_environment_and_ad_hoc_tests flag. COMMAND_LIST_TESTS = ({}, [PROGRAM_PATH, '--gtest_list_tests']) COMMAND_WITH_COLOR = ({}, [PROGRAM_PATH, '--gtest_color=yes']) COMMAND_WITH_TIME = ({}, [PROGRAM_PATH, '--gtest_print_time', '--gtest_internal_skip_environment_and_ad_hoc_tests', '--gtest_filter=FatalFailureTest.*:LoggingTest.*']) COMMAND_WITH_DISABLED = ( {}, [PROGRAM_PATH, '--gtest_also_run_disabled_tests', '--gtest_internal_skip_environment_and_ad_hoc_tests', '--gtest_filter=*DISABLED_*']) COMMAND_WITH_SHARDING = ( {'GTEST_SHARD_INDEX': '1', 'GTEST_TOTAL_SHARDS': '2'}, [PROGRAM_PATH, '--gtest_internal_skip_environment_and_ad_hoc_tests', '--gtest_filter=PassingTest.*']) GOLDEN_PATH = os.path.join(gtest_test_utils.GetSourceDir(), GOLDEN_NAME) def ToUnixLineEnding(s): """Changes all Windows/Mac line endings in s to UNIX line endings.""" return s.replace('\r\n', '\n').replace('\r', '\n') def RemoveLocations(test_output): """Removes all file location info from a Google Test program's output. Args: test_output: the output of a Google Test program. Returns: output with all file location info (in the form of 'DIRECTORY/FILE_NAME:LINE_NUMBER: 'or 'DIRECTORY\\FILE_NAME(LINE_NUMBER): ') replaced by 'FILE_NAME:#: '. """ return re.sub(r'.*[/\\](.+)(\:\d+|\(\d+\))\: ', r'\1:#: ', test_output) def RemoveStackTraceDetails(output): """Removes all stack traces from a Google Test program's output.""" # *? means "find the shortest string that matches". return re.sub(r'Stack trace:(.|\n)*?\n\n', 'Stack trace: (omitted)\n\n', output) def RemoveStackTraces(output): """Removes all traces of stack traces from a Google Test program's output.""" # *? means "find the shortest string that matches". return re.sub(r'Stack trace:(.|\n)*?\n\n', '', output) def RemoveTime(output): """Removes all time information from a Google Test program's output.""" return re.sub(r'\(\d+ ms', '(? ms', output) def RemoveTypeInfoDetails(test_output): """Removes compiler-specific type info from Google Test program's output. Args: test_output: the output of a Google Test program. Returns: output with type information normalized to canonical form. """ # some compilers output the name of type 'unsigned int' as 'unsigned' return re.sub(r'unsigned int', 'unsigned', test_output) def NormalizeToCurrentPlatform(test_output): """Normalizes platform specific output details for easier comparison.""" if IS_WINDOWS: # Removes the color information that is not present on Windows. test_output = re.sub('\x1b\\[(0;3\d)?m', '', test_output) # Changes failure message headers into the Windows format. test_output = re.sub(r': Failure\n', r': error: ', test_output) # Changes file(line_number) to file:line_number. test_output = re.sub(r'((\w|\.)+)\((\d+)\):', r'\1:\3:', test_output) return test_output def RemoveTestCounts(output): """Removes test counts from a Google Test program's output.""" output = re.sub(r'\d+ tests?, listed below', '? tests, listed below', output) output = re.sub(r'\d+ FAILED TESTS', '? FAILED TESTS', output) output = re.sub(r'\d+ tests? from \d+ test cases?', '? tests from ? test cases', output) output = re.sub(r'\d+ tests? from ([a-zA-Z_])', r'? tests from \1', output) return re.sub(r'\d+ tests?\.', '? tests.', output) def RemoveMatchingTests(test_output, pattern): """Removes output of specified tests from a Google Test program's output. This function strips not only the beginning and the end of a test but also all output in between. Args: test_output: A string containing the test output. pattern: A regex string that matches names of test cases or tests to remove. Returns: Contents of test_output with tests whose names match pattern removed. """ test_output = re.sub( r'.*\[ RUN \] .*%s(.|\n)*?\[( FAILED | OK )\] .*%s.*\n' % ( pattern, pattern), '', test_output) return re.sub(r'.*%s.*\n' % pattern, '', test_output) def NormalizeOutput(output): """Normalizes output (the output of gtest_output_test_.exe).""" output = ToUnixLineEnding(output) output = RemoveLocations(output) output = RemoveStackTraceDetails(output) output = RemoveTime(output) return output def GetShellCommandOutput(env_cmd): """Runs a command in a sub-process, and returns its output in a string. Args: env_cmd: The shell command. A 2-tuple where element 0 is a dict of extra environment variables to set, and element 1 is a string with the command and any flags. Returns: A string with the command's combined standard and diagnostic output. """ # Spawns cmd in a sub-process, and gets its standard I/O file objects. # Set and save the environment properly. environ = os.environ.copy() environ.update(env_cmd[0]) p = gtest_test_utils.Subprocess(env_cmd[1], env=environ) return p.output def GetCommandOutput(env_cmd): """Runs a command and returns its output with all file location info stripped off. Args: env_cmd: The shell command. A 2-tuple where element 0 is a dict of extra environment variables to set, and element 1 is a string with the command and any flags. """ # Disables exception pop-ups on Windows. environ, cmdline = env_cmd environ = dict(environ) # Ensures we are modifying a copy. environ[CATCH_EXCEPTIONS_ENV_VAR_NAME] = '1' return NormalizeOutput(GetShellCommandOutput((environ, cmdline))) def GetOutputOfAllCommands(): """Returns concatenated output from several representative commands.""" return (GetCommandOutput(COMMAND_WITH_COLOR) + GetCommandOutput(COMMAND_WITH_TIME) + GetCommandOutput(COMMAND_WITH_DISABLED) + GetCommandOutput(COMMAND_WITH_SHARDING)) test_list = GetShellCommandOutput(COMMAND_LIST_TESTS) SUPPORTS_DEATH_TESTS = 'DeathTest' in test_list SUPPORTS_TYPED_TESTS = 'TypedTest' in test_list SUPPORTS_THREADS = 'ExpectFailureWithThreadsTest' in test_list SUPPORTS_STACK_TRACES = False CAN_GENERATE_GOLDEN_FILE = (SUPPORTS_DEATH_TESTS and SUPPORTS_TYPED_TESTS and SUPPORTS_THREADS) class GTestOutputTest(gtest_test_utils.TestCase): def RemoveUnsupportedTests(self, test_output): if not SUPPORTS_DEATH_TESTS: test_output = RemoveMatchingTests(test_output, 'DeathTest') if not SUPPORTS_TYPED_TESTS: test_output = RemoveMatchingTests(test_output, 'TypedTest') test_output = RemoveMatchingTests(test_output, 'TypedDeathTest') test_output = RemoveMatchingTests(test_output, 'TypeParamDeathTest') if not SUPPORTS_THREADS: test_output = RemoveMatchingTests(test_output, 'ExpectFailureWithThreadsTest') test_output = RemoveMatchingTests(test_output, 'ScopedFakeTestPartResultReporterTest') test_output = RemoveMatchingTests(test_output, 'WorksConcurrently') if not SUPPORTS_STACK_TRACES: test_output = RemoveStackTraces(test_output) return test_output def testOutput(self): output = GetOutputOfAllCommands() golden_file = open(GOLDEN_PATH, 'rb') # A mis-configured source control system can cause \r appear in EOL # sequences when we read the golden file irrespective of an operating # system used. Therefore, we need to strip those \r's from newlines # unconditionally. golden = ToUnixLineEnding(golden_file.read()) golden_file.close() # We want the test to pass regardless of certain features being # supported or not. # We still have to remove type name specifics in all cases. normalized_actual = RemoveTypeInfoDetails(output) normalized_golden = RemoveTypeInfoDetails(golden) if CAN_GENERATE_GOLDEN_FILE: self.assertEqual(normalized_golden, normalized_actual) else: normalized_actual = NormalizeToCurrentPlatform( RemoveTestCounts(normalized_actual)) normalized_golden = NormalizeToCurrentPlatform( RemoveTestCounts(self.RemoveUnsupportedTests(normalized_golden))) # This code is very handy when debugging golden file differences: if os.getenv('DEBUG_GTEST_OUTPUT_TEST'): open(os.path.join( gtest_test_utils.GetSourceDir(), '_gtest_output_test_normalized_actual.txt'), 'wb').write( normalized_actual) open(os.path.join( gtest_test_utils.GetSourceDir(), '_gtest_output_test_normalized_golden.txt'), 'wb').write( normalized_golden) self.assertEqual(normalized_golden, normalized_actual) if __name__ == '__main__': if sys.argv[1:] == [GENGOLDEN_FLAG]: if CAN_GENERATE_GOLDEN_FILE: output = GetOutputOfAllCommands() golden_file = open(GOLDEN_PATH, 'wb') golden_file.write(output) golden_file.close() else: message = ( """Unable to write a golden file when compiled in an environment that does not support all the required features (death tests, typed tests, and multiple threads). Please generate the golden file using a binary built with those features enabled.""") sys.stderr.write(message) sys.exit(1) else: gtest_test_utils.Main()
gpl-3.0
ninegrid/dotfiles-vim
bundle/vim-python-mode/pymode/libs/pylama/config.py
13
6016
""" Parse arguments from command line and configuration files. """ import fnmatch import sys import os from re import compile as re import logging from argparse import ArgumentParser from . import __version__ from .libs.inirama import Namespace from .lint.extensions import LINTERS # Setup a logger LOGGER = logging.getLogger('pylama') LOGGER.propagate = False STREAM = logging.StreamHandler(sys.stdout) LOGGER.addHandler(STREAM) #: A default checkers DEFAULT_LINTERS = 'pep8', 'pyflakes', 'mccabe' CURDIR = os.getcwd() CONFIG_FILES = [ os.path.join(CURDIR, basename) for basename in ('pylama.ini', 'setup.cfg', 'tox.ini', 'pytest.ini') ] class _Default(object): def __init__(self, value=None): self.value = value def __str__(self): return str(self.value) def __repr__(self): return "<_Default [%s]>" % self.value def split_csp_str(s): """ Split commaseparated string. :returns: list of splitted values """ if isinstance(s, (list, tuple)): return s return list(set(i for i in s.strip().split(',') if i)) def parse_linters(linters): """ Initialize choosen linters. :returns: list of inited linters """ result = list() for name in split_csp_str(linters): linter = LINTERS.get(name) if linter: result.append((name, linter)) else: logging.warn("Linter `%s` not found.", name) return result PARSER = ArgumentParser(description="Code audit tool for python.") PARSER.add_argument( "path", nargs='?', default=_Default(CURDIR), help="Path on file or directory for code check.") PARSER.add_argument( "--verbose", "-v", action='store_true', help="Verbose mode.") PARSER.add_argument('--version', action='version', version='%(prog)s ' + __version__) PARSER.add_argument( "--format", "-f", default=_Default('pep8'), choices=['pep8', 'pylint'], help="Choose errors format (pep8, pylint).") PARSER.add_argument( "--select", "-s", default=_Default(''), type=split_csp_str, help="Select errors and warnings. (comma-separated list)") PARSER.add_argument( "--linters", "-l", default=_Default(','.join(DEFAULT_LINTERS)), type=parse_linters, help=( "Select linters. (comma-separated). Choices are %s." % ','.join(s for s in LINTERS.keys()) )) PARSER.add_argument( "--ignore", "-i", default=_Default(''), type=split_csp_str, help="Ignore errors and warnings. (comma-separated)") PARSER.add_argument( "--skip", default=_Default(''), type=lambda s: [re(fnmatch.translate(p)) for p in s.split(',') if p], help="Skip files by masks (comma-separated, Ex. */messages.py)") PARSER.add_argument("--report", "-r", help="Send report to file [REPORT]") PARSER.add_argument( "--hook", action="store_true", help="Install Git (Mercurial) hook.") PARSER.add_argument( "--async", action="store_true", help="Enable async mode. Usefull for checking a lot of files. " "Dont supported with pylint.") PARSER.add_argument( "--options", "-o", default="", help="Select configuration file. By default is '<CURDIR>/pylama.ini'") PARSER.add_argument( "--force", "-F", action='store_true', default=_Default(False), help="Force code checking (if linter doesnt allow)") ACTIONS = dict((a.dest, a) for a in PARSER._actions) def parse_options(args=None, config=True, **overrides): # noqa """ Parse options from command line and configuration files. :return argparse.Namespace: """ if args is None: args = [] # Parse args from command string options = PARSER.parse_args(args) options.file_params = dict() options.linters_params = dict() # Override options for k, v in overrides.items(): passed_value = getattr(options, k, _Default()) if isinstance(passed_value, _Default): setattr(options, k, _Default(v)) # Compile options from ini if config: cfg = get_config(str(options.options)) for k, v in cfg.default.items(): LOGGER.info('Find option %s (%s)', k, v) passed_value = getattr(options, k, _Default()) if isinstance(passed_value, _Default): setattr(options, k, _Default(v)) # Parse file related options for name, opts in cfg.sections.items(): if not name.startswith('pylama'): continue if name == cfg.default_section: continue name = name[7:] if name in LINTERS: options.linters_params[name] = dict(opts) continue mask = re(fnmatch.translate(name)) options.file_params[mask] = dict(opts) # Postprocess options opts = dict(options.__dict__.items()) for name, value in opts.items(): if isinstance(value, _Default): setattr(options, name, process_value(name, value.value)) return options def process_value(name, value): """ Compile option value. """ action = ACTIONS.get(name) if not action: return value if callable(action.type): return action.type(value) if action.const: return bool(int(value)) return value def get_config(ini_path=None): """ Load configuration from INI. :return Namespace: """ config = Namespace() config.default_section = 'pylama' if not ini_path: for path in CONFIG_FILES: if os.path.isfile(path) and os.access(path, os.R_OK): config.read(path) else: config.read(ini_path) return config def setup_logger(options): """ Setup logger with options. """ LOGGER.setLevel(logging.INFO if options.verbose else logging.WARN) if options.report: LOGGER.removeHandler(STREAM) LOGGER.addHandler(logging.FileHandler(options.report, mode='w')) LOGGER.info('Try to read configuration from: ' + options.options) # pylama:ignore=W0212
unlicense
HandyMenny/android_kernel_sony_u8500
tools/perf/scripts/python/failed-syscalls-by-pid.py
11180
2058
# failed system call counts, by pid # (c) 2010, Tom Zanussi <tzanussi@gmail.com> # Licensed under the terms of the GNU GPL License version 2 # # Displays system-wide failed system call totals, broken down by pid. # If a [comm] arg is specified, only syscalls called by [comm] are displayed. import os import sys sys.path.append(os.environ['PERF_EXEC_PATH'] + \ '/scripts/python/Perf-Trace-Util/lib/Perf/Trace') from perf_trace_context import * from Core import * from Util import * usage = "perf script -s syscall-counts-by-pid.py [comm|pid]\n"; for_comm = None for_pid = None if len(sys.argv) > 2: sys.exit(usage) if len(sys.argv) > 1: try: for_pid = int(sys.argv[1]) except: for_comm = sys.argv[1] syscalls = autodict() def trace_begin(): print "Press control+C to stop and show the summary" def trace_end(): print_error_totals() def raw_syscalls__sys_exit(event_name, context, common_cpu, common_secs, common_nsecs, common_pid, common_comm, id, ret): if (for_comm and common_comm != for_comm) or \ (for_pid and common_pid != for_pid ): return if ret < 0: try: syscalls[common_comm][common_pid][id][ret] += 1 except TypeError: syscalls[common_comm][common_pid][id][ret] = 1 def print_error_totals(): if for_comm is not None: print "\nsyscall errors for %s:\n\n" % (for_comm), else: print "\nsyscall errors:\n\n", print "%-30s %10s\n" % ("comm [pid]", "count"), print "%-30s %10s\n" % ("------------------------------", \ "----------"), comm_keys = syscalls.keys() for comm in comm_keys: pid_keys = syscalls[comm].keys() for pid in pid_keys: print "\n%s [%d]\n" % (comm, pid), id_keys = syscalls[comm][pid].keys() for id in id_keys: print " syscall: %-16s\n" % syscall_name(id), ret_keys = syscalls[comm][pid][id].keys() for ret, val in sorted(syscalls[comm][pid][id].iteritems(), key = lambda(k, v): (v, k), reverse = True): print " err = %-20s %10d\n" % (strerror(ret), val),
gpl-2.0
vrjuggler/maestro
playpen/cluster_options/ClusterEditorBase.py
2
4323
# Maestro is Copyright (C) 2006-2008 by Infiscape Corporation # # Original Author: Aron Bierbaum # # This program is free software; you can redistribute it and/or # modify it under the terms of the GNU General Public License # as published by the Free Software Foundation; either version 2 # of the License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. # -*- coding: utf-8 -*- # Form implementation generated from reading ui file 'playpen/cluster_options/ClusterEditorBase.ui' # # WARNING! All changes made in this file will be lost! from PyQt4 import QtCore, QtGui class Ui_ClusterEditor(object): def setupUi(self, ClusterEditor): ClusterEditor.setObjectName("ClusterEditor") ClusterEditor.resize(QtCore.QSize(QtCore.QRect(0,0,554,415).size()).expandedTo(ClusterEditor.minimumSizeHint())) self.vboxlayout = QtGui.QVBoxLayout(ClusterEditor) self.vboxlayout.setMargin(9) self.vboxlayout.setSpacing(6) self.vboxlayout.setObjectName("vboxlayout") self.mSplitter = QtGui.QSplitter(ClusterEditor) self.mSplitter.setOrientation(QtCore.Qt.Horizontal) self.mSplitter.setObjectName("mSplitter") self.mTreeView = QtGui.QTreeView(self.mSplitter) self.mTreeView.setObjectName("mTreeView") self.mTableView = QtGui.QTableView(self.mSplitter) self.mTableView.setObjectName("mTableView") self.vboxlayout.addWidget(self.mSplitter) self.hboxlayout = QtGui.QHBoxLayout() self.hboxlayout.setMargin(0) self.hboxlayout.setSpacing(6) self.hboxlayout.setObjectName("hboxlayout") spacerItem = QtGui.QSpacerItem(40,20,QtGui.QSizePolicy.Expanding,QtGui.QSizePolicy.Minimum) self.hboxlayout.addItem(spacerItem) self.mAddBtn = QtGui.QPushButton(ClusterEditor) self.mAddBtn.setObjectName("mAddBtn") self.hboxlayout.addWidget(self.mAddBtn) self.mRemoveBtn = QtGui.QPushButton(ClusterEditor) self.mRemoveBtn.setObjectName("mRemoveBtn") self.hboxlayout.addWidget(self.mRemoveBtn) self.mSaveBtn = QtGui.QPushButton(ClusterEditor) self.mSaveBtn.setObjectName("mSaveBtn") self.hboxlayout.addWidget(self.mSaveBtn) self.vboxlayout.addLayout(self.hboxlayout) self.mLaunchFrame = QtGui.QFrame(ClusterEditor) self.mLaunchFrame.setFrameShape(QtGui.QFrame.StyledPanel) self.mLaunchFrame.setFrameShadow(QtGui.QFrame.Raised) self.mLaunchFrame.setObjectName("mLaunchFrame") self.vboxlayout1 = QtGui.QVBoxLayout(self.mLaunchFrame) self.vboxlayout1.setMargin(9) self.vboxlayout1.setSpacing(6) self.vboxlayout1.setObjectName("vboxlayout1") self.mAppComboBox = QtGui.QComboBox(self.mLaunchFrame) self.mAppComboBox.setObjectName("mAppComboBox") self.vboxlayout1.addWidget(self.mAppComboBox) spacerItem1 = QtGui.QSpacerItem(20,40,QtGui.QSizePolicy.Minimum,QtGui.QSizePolicy.Expanding) self.vboxlayout1.addItem(spacerItem1) self.vboxlayout.addWidget(self.mLaunchFrame) self.retranslateUi(ClusterEditor) QtCore.QMetaObject.connectSlotsByName(ClusterEditor) def retranslateUi(self, ClusterEditor): ClusterEditor.setWindowTitle(QtGui.QApplication.translate("ClusterEditor", "Cluster Editor", None, QtGui.QApplication.UnicodeUTF8)) self.mAddBtn.setText(QtGui.QApplication.translate("ClusterEditor", "&Add", None, QtGui.QApplication.UnicodeUTF8)) self.mRemoveBtn.setText(QtGui.QApplication.translate("ClusterEditor", "&Remove", None, QtGui.QApplication.UnicodeUTF8)) self.mSaveBtn.setText(QtGui.QApplication.translate("ClusterEditor", "&Save", None, QtGui.QApplication.UnicodeUTF8)) if __name__ == "__main__": import sys app = QtGui.QApplication(sys.argv) ClusterEditor = QtGui.QWidget() ui = Ui_ClusterEditor() ui.setupUi(ClusterEditor) ClusterEditor.show() sys.exit(app.exec_())
gpl-2.0
DepthDeluxe/ansible
lib/ansible/modules/messaging/rabbitmq_plugin.py
69
4777
#!/usr/bin/python # -*- coding: utf-8 -*- # (c) 2013, Chatham Financial <oss@chathamfinancial.com> # # This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see <http://www.gnu.org/licenses/>. ANSIBLE_METADATA = {'metadata_version': '1.0', 'status': ['preview'], 'supported_by': 'community'} DOCUMENTATION = ''' --- module: rabbitmq_plugin short_description: Adds or removes plugins to RabbitMQ description: - Enables or disables RabbitMQ plugins version_added: "1.1" author: '"Chris Hoffman (@chrishoffman)"' options: names: description: - Comma-separated list of plugin names required: true default: null aliases: [name] new_only: description: - Only enable missing plugins - Does not disable plugins that are not in the names list required: false default: "no" choices: [ "yes", "no" ] state: description: - Specify if plugins are to be enabled or disabled required: false default: enabled choices: [enabled, disabled] prefix: description: - Specify a custom install prefix to a Rabbit required: false version_added: "1.3" default: null ''' EXAMPLES = ''' # Enables the rabbitmq_management plugin - rabbitmq_plugin: names: rabbitmq_management state: enabled ''' import os class RabbitMqPlugins(object): def __init__(self, module): self.module = module if module.params['prefix']: if os.path.isdir(os.path.join(module.params['prefix'], 'bin')): bin_path = os.path.join(module.params['prefix'], 'bin') elif os.path.isdir(os.path.join(module.params['prefix'], 'sbin')): bin_path = os.path.join(module.params['prefix'], 'sbin') else: # No such path exists. raise Exception("No binary folder in prefix %s" % module.params['prefix']) self._rabbitmq_plugins = bin_path + "/rabbitmq-plugins" else: self._rabbitmq_plugins = module.get_bin_path('rabbitmq-plugins', True) def _exec(self, args, run_in_check_mode=False): if not self.module.check_mode or (self.module.check_mode and run_in_check_mode): cmd = [self._rabbitmq_plugins] rc, out, err = self.module.run_command(cmd + args, check_rc=True) return out.splitlines() return list() def get_all(self): list_output = self._exec(['list', '-E', '-m'], True) plugins = [] for plugin in list_output: if not plugin: break plugins.append(plugin) return plugins def enable(self, name): self._exec(['enable', name]) def disable(self, name): self._exec(['disable', name]) def main(): arg_spec = dict( names=dict(required=True, aliases=['name']), new_only=dict(default='no', type='bool'), state=dict(default='enabled', choices=['enabled', 'disabled']), prefix=dict(required=False, default=None) ) module = AnsibleModule( argument_spec=arg_spec, supports_check_mode=True ) names = module.params['names'].split(',') new_only = module.params['new_only'] state = module.params['state'] rabbitmq_plugins = RabbitMqPlugins(module) enabled_plugins = rabbitmq_plugins.get_all() enabled = [] disabled = [] if state == 'enabled': if not new_only: for plugin in enabled_plugins: if plugin not in names: rabbitmq_plugins.disable(plugin) disabled.append(plugin) for name in names: if name not in enabled_plugins: rabbitmq_plugins.enable(name) enabled.append(name) else: for plugin in enabled_plugins: if plugin in names: rabbitmq_plugins.disable(plugin) disabled.append(plugin) changed = len(enabled) > 0 or len(disabled) > 0 module.exit_json(changed=changed, enabled=enabled, disabled=disabled) # import module snippets from ansible.module_utils.basic import * if __name__ == '__main__': main()
gpl-3.0
degustaf/github3.py
github3/notifications.py
10
5135
# -*- coding: utf-8 -*- """ github3.notifications ===================== This module contains the classes relating to notifications. See also: http://developer.github.com/v3/activity/notifications/ """ from __future__ import unicode_literals from json import dumps from .models import GitHubCore class Thread(GitHubCore): """The :class:`Thread <Thread>` object wraps notification threads. This contains information about the repository generating the notification, the subject, and the reason. Two thread instances can be checked like so:: t1 == t2 t1 != t2 And is equivalent to:: t1.id == t2.id t1.id != t2.id See also: http://developer.github.com/v3/activity/notifications/#view-a-single-thread """ def _update_attributes(self, notif): self._api = notif.get('url') #: Comment responsible for the notification self.comment = notif.get('comment', {}) #: Thread information self.thread = notif.get('thread', {}) from .repos import Repository #: Repository the comment was made on self.repository = Repository(notif.get('repository', {}), self) #: When the thread was last updated self.updated_at = self._strptime(notif.get('updated_at')) #: Id of the thread self.id = notif.get('id') #: Dictionary of urls for the thread self.urls = notif.get('urls') #: datetime object representing the last time the user read the thread self.last_read_at = self._strptime(notif.get('last_read_at')) #: The reason you're receiving the notification self.reason = notif.get('reason') #: Subject of the Notification, e.g., which issue/pull/diff is this in #: relation to. This is a dictionary self.subject = notif.get('subject') self.unread = notif.get('unread') def _repr(self): return '<Thread [{0}]>'.format(self.subject.get('title')) def delete_subscription(self): """Delete subscription for this thread. :returns: bool """ url = self._build_url('subscription', base_url=self._api) return self._boolean(self._delete(url), 204, 404) def is_unread(self): """Tells you if the thread is unread or not.""" return self.unread def mark(self): """Mark the thread as read. :returns: bool """ return self._boolean(self._patch(self._api), 205, 404) def set_subscription(self, subscribed, ignored): """Set the user's subscription for this thread :param bool subscribed: (required), determines if notifications should be received from this thread. :param bool ignored: (required), determines if notifications should be ignored from this thread. :returns: :class:`Subscription <Subscription>` """ url = self._build_url('subscription', base_url=self._api) sub = {'subscribed': subscribed, 'ignored': ignored} json = self._json(self._put(url, data=dumps(sub)), 200) return self._instance_or_null(Subscription, json) def subscription(self): """Checks the status of the user's subscription to this thread. :returns: :class:`Subscription <Subscription>` """ url = self._build_url('subscription', base_url=self._api) json = self._json(self._get(url), 200) return self._instance_or_null(Subscription, json) class Subscription(GitHubCore): """This object wraps thread and repository subscription information. See also: developer.github.com/v3/activity/notifications/#get-a-thread-subscription """ def _update_attributes(self, sub): self._api = sub.get('url') #: reason user is subscribed to this thread/repository self.reason = sub.get('reason') #: datetime representation of when the subscription was created self.created_at = self._strptime(sub.get('created_at')) #: API url of the thread if it exists self.thread_url = sub.get('thread_url') #: API url of the repository if it exists self.repository_url = sub.get('repository_url') self.ignored = sub.get('ignored', False) self.subscribed = sub.get('subscribed', False) def _repr(self): return '<Subscription [{0}]>'.format(self.subscribed) def delete(self): return self._boolean(self._delete(self._api), 204, 404) def is_ignored(self): return self.ignored def is_subscribed(self): return self.subscribed def set(self, subscribed, ignored): """Set the user's subscription for this subscription :param bool subscribed: (required), determines if notifications should be received from this thread. :param bool ignored: (required), determines if notifications should be ignored from this thread. """ sub = {'subscribed': subscribed, 'ignored': ignored} json = self._json(self._put(self._api, data=dumps(sub)), 200) self._update_attributes(json)
bsd-3-clause
aequitas/home-assistant
homeassistant/components/teksavvy/sensor.py
2
6043
"""Support for TekSavvy Bandwidth Monitor.""" from datetime import timedelta import logging import async_timeout import voluptuous as vol from homeassistant.components.sensor import PLATFORM_SCHEMA from homeassistant.const import ( CONF_API_KEY, CONF_MONITORED_VARIABLES, CONF_NAME) from homeassistant.helpers.aiohttp_client import async_get_clientsession import homeassistant.helpers.config_validation as cv from homeassistant.helpers.entity import Entity from homeassistant.util import Throttle _LOGGER = logging.getLogger(__name__) DEFAULT_NAME = 'TekSavvy' CONF_TOTAL_BANDWIDTH = 'total_bandwidth' GIGABYTES = 'GB' # type: str PERCENT = '%' # type: str MIN_TIME_BETWEEN_UPDATES = timedelta(hours=1) REQUEST_TIMEOUT = 5 # seconds SENSOR_TYPES = { 'usage': ['Usage Ratio', PERCENT, 'mdi:percent'], 'usage_gb': ['Usage', GIGABYTES, 'mdi:download'], 'limit': ['Data limit', GIGABYTES, 'mdi:download'], 'onpeak_download': ['On Peak Download', GIGABYTES, 'mdi:download'], 'onpeak_upload': ['On Peak Upload', GIGABYTES, 'mdi:upload'], 'onpeak_total': ['On Peak Total', GIGABYTES, 'mdi:download'], 'offpeak_download': ['Off Peak download', GIGABYTES, 'mdi:download'], 'offpeak_upload': ['Off Peak Upload', GIGABYTES, 'mdi:upload'], 'offpeak_total': ['Off Peak Total', GIGABYTES, 'mdi:download'], 'onpeak_remaining': ['Remaining', GIGABYTES, 'mdi:download'] } API_HA_MAP = ( ('OnPeakDownload', 'onpeak_download'), ('OnPeakUpload', 'onpeak_upload'), ('OffPeakDownload', 'offpeak_download'), ('OffPeakUpload', 'offpeak_upload')) PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({ vol.Required(CONF_MONITORED_VARIABLES): vol.All(cv.ensure_list, [vol.In(SENSOR_TYPES)]), vol.Required(CONF_API_KEY): cv.string, vol.Required(CONF_TOTAL_BANDWIDTH): cv.positive_int, vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string, }) async def async_setup_platform(hass, config, async_add_entities, discovery_info=None): """Set up the sensor platform.""" websession = async_get_clientsession(hass) apikey = config.get(CONF_API_KEY) bandwidthcap = config.get(CONF_TOTAL_BANDWIDTH) ts_data = TekSavvyData(hass.loop, websession, apikey, bandwidthcap) ret = await ts_data.async_update() if ret is False: _LOGGER.error("Invalid Teksavvy API key: %s", apikey) return name = config.get(CONF_NAME) sensors = [] for variable in config[CONF_MONITORED_VARIABLES]: sensors.append(TekSavvySensor(ts_data, variable, name)) async_add_entities(sensors, True) class TekSavvySensor(Entity): """Representation of TekSavvy Bandwidth sensor.""" def __init__(self, teksavvydata, sensor_type, name): """Initialize the sensor.""" self.client_name = name self.type = sensor_type self._name = SENSOR_TYPES[sensor_type][0] self._unit_of_measurement = SENSOR_TYPES[sensor_type][1] self._icon = SENSOR_TYPES[sensor_type][2] self.teksavvydata = teksavvydata self._state = None @property def name(self): """Return the name of the sensor.""" return '{} {}'.format(self.client_name, self._name) @property def state(self): """Return the state of the sensor.""" return self._state @property def unit_of_measurement(self): """Return the unit of measurement of this entity, if any.""" return self._unit_of_measurement @property def icon(self): """Icon to use in the frontend, if any.""" return self._icon async def async_update(self): """Get the latest data from TekSavvy and update the state.""" await self.teksavvydata.async_update() if self.type in self.teksavvydata.data: self._state = round(self.teksavvydata.data[self.type], 2) class TekSavvyData: """Get data from TekSavvy API.""" def __init__(self, loop, websession, api_key, bandwidth_cap): """Initialize the data object.""" self.loop = loop self.websession = websession self.api_key = api_key self.bandwidth_cap = bandwidth_cap # Set unlimited users to infinite, otherwise the cap. self.data = {"limit": self.bandwidth_cap} if self.bandwidth_cap > 0 \ else {"limit": float('inf')} @Throttle(MIN_TIME_BETWEEN_UPDATES) async def async_update(self): """Get the TekSavvy bandwidth data from the web service.""" headers = {"TekSavvy-APIKey": self.api_key} _LOGGER.debug("Updating TekSavvy data") url = "https://api.teksavvy.com/"\ "web/Usage/UsageSummaryRecords?$filter=IsCurrent%20eq%20true" with async_timeout.timeout(REQUEST_TIMEOUT): req = await self.websession.get(url, headers=headers) if req.status != 200: _LOGGER.error("Request failed with status: %u", req.status) return False try: data = await req.json() for (api, ha_name) in API_HA_MAP: self.data[ha_name] = float(data["value"][0][api]) on_peak_download = self.data["onpeak_download"] on_peak_upload = self.data["onpeak_upload"] off_peak_download = self.data["offpeak_download"] off_peak_upload = self.data["offpeak_upload"] limit = self.data["limit"] # Support "unlimited" users if self.bandwidth_cap > 0: self.data["usage"] = 100*on_peak_download/self.bandwidth_cap else: self.data["usage"] = 0 self.data["usage_gb"] = on_peak_download self.data["onpeak_total"] = on_peak_download + on_peak_upload self.data["offpeak_total"] =\ off_peak_download + off_peak_upload self.data["onpeak_remaining"] = limit - on_peak_download return True except ValueError: _LOGGER.error("JSON Decode Failed") return False
apache-2.0
mplewis/platformio
platformio/builder/scripts/timsp430.py
6
2409
# Copyright (C) Ivan Kravets <me@ikravets.com> # See LICENSE for details. """ Builder for Texas Instruments MSP430 Ultra-Low Power 16-bit microcontrollers """ from os.path import join from platform import system from SCons.Script import (COMMAND_LINE_TARGETS, AlwaysBuild, Builder, Default, DefaultEnvironment) env = DefaultEnvironment() env.Replace( AR="msp430-ar", AS="msp430-as", CC="msp430-gcc", CXX="msp430-g++", OBJCOPY="msp430-objcopy", RANLIB="msp430-ranlib", SIZETOOL="msp430-size", ARFLAGS=["rcs"], ASPPFLAGS=["-x", "assembler-with-cpp"], CCFLAGS=[ "-g", # include debugging info (so errors include line numbers) "-Os", # optimize for size # "-Wall", # show warnings "-ffunction-sections", # place each function in its own section "-fdata-sections", "-MMD", # output dependancy info "-mmcu=$BOARD_MCU" ], CPPDEFINES=[ "F_CPU=$BOARD_F_CPU" ], LINK="$CC", LINKFLAGS=[ "-Os", "-mmcu=$BOARD_MCU", "-Wl,-gc-sections,-u,main" ], LIBS=["m"], SIZEPRINTCMD='"$SIZETOOL" -B -d $SOURCES', UPLOADER=join("$PIOPACKAGES_DIR", "tool-mspdebug", "mspdebug"), UPLOADERFLAGS=[ "$UPLOAD_PROTOCOL" if system() != "Windows" else "tilib", "--force-reset" ], UPLOADCMD='$UPLOADER $UPLOADERFLAGS "prog $SOURCES"', PROGNAME="firmware", PROGSUFFIX=".elf" ) env.Append( BUILDERS=dict( ElfToHex=Builder( action=" ".join([ "$OBJCOPY", "-O", "ihex", "-R", ".eeprom", "$SOURCES", "$TARGET"]), suffix=".hex" ) ) ) # # Target: Build executable and linkable firmware # target_elf = env.BuildProgram() # # Target: Build the .hex # if "uploadlazy" in COMMAND_LINE_TARGETS: target_firm = join("$BUILD_DIR", "firmware.hex") else: target_firm = env.ElfToHex(join("$BUILD_DIR", "firmware"), target_elf) # # Target: Print binary size # target_size = env.Alias("size", target_elf, "$SIZEPRINTCMD") AlwaysBuild(target_size) # # Target: Upload firmware # upload = env.Alias(["upload", "uploadlazy"], target_firm, "$UPLOADCMD") AlwaysBuild(upload) # # Target: Define targets # Default([target_firm, target_size])
mit
nikste/tensorflow
tensorflow/python/debug/examples/debug_errors.py
150
2655
# Copyright 2016 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Example of debugging TensorFlow runtime errors using tfdbg.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import argparse import sys import numpy as np import tensorflow as tf from tensorflow.python import debug as tf_debug def main(_): sess = tf.Session() # Construct the TensorFlow network. ph_float = tf.placeholder(tf.float32, name="ph_float") x = tf.transpose(ph_float, name="x") v = tf.Variable(np.array([[-2.0], [-3.0], [6.0]], dtype=np.float32), name="v") m = tf.constant( np.array([[0.0, 1.0, 2.0], [-4.0, -1.0, 0.0]]), dtype=tf.float32, name="m") y = tf.matmul(m, x, name="y") z = tf.matmul(m, v, name="z") if FLAGS.debug: sess = tf_debug.LocalCLIDebugWrapperSession(sess, ui_type=FLAGS.ui_type) if FLAGS.error == "shape_mismatch": print(sess.run(y, feed_dict={ph_float: np.array([[0.0], [1.0], [2.0]])})) elif FLAGS.error == "uninitialized_variable": print(sess.run(z)) elif FLAGS.error == "no_error": print(sess.run(y, feed_dict={ph_float: np.array([[0.0, 1.0, 2.0]])})) else: raise ValueError("Unrecognized error type: " + FLAGS.error) if __name__ == "__main__": parser = argparse.ArgumentParser() parser.register("type", "bool", lambda v: v.lower() == "true") parser.add_argument( "--error", type=str, default="shape_mismatch", help="""\ Type of the error to generate (shape_mismatch | uninitialized_variable | no_error).\ """) parser.add_argument( "--ui_type", type=str, default="curses", help="Command-line user interface type (curses | readline)") parser.add_argument( "--debug", type="bool", nargs="?", const=True, default=False, help="Use debugger to track down bad values during training") FLAGS, unparsed = parser.parse_known_args() tf.app.run(main=main, argv=[sys.argv[0]] + unparsed)
apache-2.0
zhongyi-zhang/azure-quickstart-templates
bosh-cf-crossregion/scripts/setup_env.py
121
9192
#!/usr/bin/env python import json import netaddr import os import random import re import requests import sys from azure.storage.blob import AppendBlobService from azure.storage.table import TableService import azure.mgmt.network from azure.common.credentials import ServicePrincipalCredentials from azure.mgmt.network import NetworkManagementClient, NetworkManagementClientConfiguration def prepare_storage(settings): default_storage_account_name = settings["DEFAULT_STORAGE_ACCOUNT_NAME"] storage_access_key = settings["STORAGE_ACCESS_KEY"] endpoint_suffix = settings["SERVICE_HOST_BASE"] blob_service = AppendBlobService(account_name=default_storage_account_name, account_key=storage_access_key, endpoint_suffix=endpoint_suffix) blob_service.create_container('bosh') blob_service.create_container( container_name='stemcell', public_access='blob' ) # Prepare the table for storing meta datas of storage account and stemcells table_service = TableService(account_name=default_storage_account_name, account_key=storage_access_key, endpoint_suffix=endpoint_suffix) table_service.create_table('stemcells') # For secondary default_storage_account_name_secondary = settings["DEFAULT_STORAGE_ACCOUNT_NAME_SECONDARY"] default_storage_access_key_secondary = settings["DEFAULT_STORAGE_ACCESS_KEY_SECONDARY"] endpoint_suffix = settings["SERVICE_HOST_BASE"] blob_service = AppendBlobService(account_name=default_storage_account_name_secondary, account_key=default_storage_access_key_secondary, endpoint_suffix=endpoint_suffix) blob_service.create_container('bosh') blob_service.create_container( container_name='stemcell', public_access='blob' ) # Prepare the table for storing meta datas of storage account and stemcells table_service = TableService(account_name=default_storage_account_name_secondary, account_key=default_storage_access_key_secondary, endpoint_suffix=endpoint_suffix) table_service.create_table('stemcells') # Prepare primary premium storage account storage_account_name_primary = settings["STORAGE_ACCOUNT_NAME_PRIMARY"] storage_access_key_primary = settings["STORAGE_ACCESS_KEY_PRIMARY"] endpoint_suffix = settings["SERVICE_HOST_BASE"] blob_service = AppendBlobService(account_name=storage_account_name_primary, account_key=storage_access_key_primary, endpoint_suffix=endpoint_suffix) blob_service.create_container('bosh') blob_service.create_container('stemcell') # Prepare secondary premium storage account storage_account_name_secondary = settings["STORAGE_ACCOUNT_NAME_SECONDARY"] storage_access_key_secondary = settings["STORAGE_ACCESS_KEY_SECONDARY"] endpoint_suffix = settings["SERVICE_HOST_BASE"] blob_service = AppendBlobService(account_name=storage_account_name_secondary, account_key=storage_access_key_secondary, endpoint_suffix=endpoint_suffix) blob_service.create_container('bosh') blob_service.create_container('stemcell') def render_bosh_manifest(settings): with open('bosh.pub', 'r') as tmpfile: ssh_public_key = tmpfile.read() ip = netaddr.IPNetwork(settings['SUBNET_ADDRESS_RANGE_FOR_BOSH']) gateway_ip = str(ip[1]) bosh_director_ip = str(ip[4]) # Render the manifest for bosh-init bosh_template = 'bosh.yml' if os.path.exists(bosh_template): with open(bosh_template, 'r') as tmpfile: contents = tmpfile.read() keys = [ "SUBNET_ADDRESS_RANGE_FOR_BOSH", "VNET_NAME", "SUBNET_NAME_FOR_BOSH", "SUBSCRIPTION_ID", "DEFAULT_STORAGE_ACCOUNT_NAME", "RESOURCE_GROUP_NAME", "KEEP_UNREACHABLE_VMS", "TENANT_ID", "CLIENT_ID", "CLIENT_SECRET", "BOSH_PUBLIC_IP", "NSG_NAME_FOR_BOSH", "BOSH_RELEASE_URL", "BOSH_RELEASE_SHA1", "BOSH_AZURE_CPI_RELEASE_URL", "BOSH_AZURE_CPI_RELEASE_SHA1", "STEMCELL_URL", "STEMCELL_SHA1", "ENVIRONMENT" ] for k in keys: v = settings[k] contents = re.compile(re.escape("REPLACE_WITH_{0}".format(k))).sub(str(v), contents) contents = re.compile(re.escape("REPLACE_WITH_SSH_PUBLIC_KEY")).sub(ssh_public_key, contents) contents = re.compile(re.escape("REPLACE_WITH_GATEWAY_IP")).sub(gateway_ip, contents) contents = re.compile(re.escape("REPLACE_WITH_BOSH_DIRECTOR_IP")).sub(bosh_director_ip, contents) with open(bosh_template, 'w') as tmpfile: tmpfile.write(contents) return bosh_director_ip def get_cloud_foundry_configuration(scenario, settings): config = {} for key in ["SUBNET_ADDRESS_RANGE_FOR_CLOUD_FOUNDRY", "VNET_NAME", "VNET_NAME_SECONDARY", "SUBNET_NAME_FOR_CLOUD_FOUNDRY", "CLOUD_FOUNDRY_PUBLIC_IP", "NSG_NAME_FOR_CLOUD_FOUNDRY"]: config[key] = settings[key] with open('cloudfoundry.cert', 'r') as tmpfile: ssl_cert = tmpfile.read() with open('cloudfoundry.key', 'r') as tmpfile: ssl_key = tmpfile.read() ssl_cert_and_key = "{0}{1}".format(ssl_cert, ssl_key) indentation = " " * 8 ssl_cert_and_key = ("\n"+indentation).join([line for line in ssl_cert_and_key.split('\n')]) config["SSL_CERT_AND_KEY"] = ssl_cert_and_key ip = netaddr.IPNetwork(settings['SUBNET_ADDRESS_RANGE_FOR_CLOUD_FOUNDRY']) config["GATEWAY_IP"] = str(ip[1]) config["RESERVED_IP_FROM"] = str(ip[2]) config["RESERVED_IP_TO"] = str(ip[3]) config["CLOUD_FOUNDRY_INTERNAL_IP"] = str(ip[4]) # config["SYSTEM_DOMAIN"] = "{0}.xip.io".format(settings["CLOUD_FOUNDRY_PUBLIC_IP"]) # Get and replace SYSTEM_DOMAIN from parameter json, e.g custom domain that is mapped to Traffic Manager config["SYSTEM_DOMAIN"] = settings["CUSTOM_SYSTEM_DOMAIN"] # Get and replace for REPLACE_WITH_EXTERNAL_DATABASE_ENDPOINT from parameter json, e.g dxmariadblb.northeurope.cloudapp.azure.com config["EXTERNAL_DATABASE_ENDPOINT"] = settings["EXTERNAL_DATABASE_ENDPOINT"] # Get and replace REPLACE_WITH_EXTERNAL_NFS_ENDPOINT with external NFS cluster from parameter json config["EXTERNAL_NFS_ENDPOINT"] = settings["EXTERNAL_NFS_ENDPOINT"] # Get and replace REPLACE_WITH_STORAGE_ACCOUNT_NAME_SECONDARY config["STORAGE_ACCOUNT_NAME_SECONDARY"] = settings["STORAGE_ACCOUNT_NAME_SECONDARY"] # Get and replace REPLACE_WITH_CLOUD_FOUNDRY_PUBLIC_IP_SECONDARY config["CLOUD_FOUNDRY_PUBLIC_IP_SECONDARY"] = settings["CLOUD_FOUNDRY_PUBLIC_IP_SECONDARY"] # Get and replace parameters related to storage account config["STORAGE_ACCOUNT_NAME_PRIMARY"] = settings["STORAGE_ACCOUNT_NAME_PRIMARY"] config["STORAGE_ACCOUNT_NAME_SECONDARY"] = settings["STORAGE_ACCOUNT_NAME_SECONDARY"] config["STATIC_IP_FROM"] = str(ip[4]) config["STATIC_IP_TO"] = str(ip[100]) config["HAPROXY_IP"] = str(ip[4]) config["POSTGRES_IP"] = str(ip[11]) config["ROUTER_IP"] = str(ip[12]) config["NATS_IP"] = str(ip[13]) config["ETCD_IP"] = str(ip[14]) config["NFS_IP"] = str(ip[15]) config["CONSUL_IP"] = str(ip[16]) return config def render_cloud_foundry_manifest(settings): for scenario in ["cross"]: cloudfoundry_template = "{0}.yml".format(scenario) if os.path.exists(cloudfoundry_template): with open(cloudfoundry_template, 'r') as tmpfile: contents = tmpfile.read() config = get_cloud_foundry_configuration(scenario, settings) for key in config: value = config[key] contents = re.compile(re.escape("REPLACE_WITH_{0}".format(key))).sub(value, contents) with open(cloudfoundry_template, 'w') as tmpfile: tmpfile.write(contents) def render_cloud_foundry_deployment_cmd(settings): cloudfoundry_deployment_cmd = "deploy_cloudfoundry.sh" if os.path.exists(cloudfoundry_deployment_cmd): with open(cloudfoundry_deployment_cmd, 'r') as tmpfile: contents = tmpfile.read() keys = ["CF_RELEASE_URL", "STEMCELL_URL"] for key in keys: value = settings[key] contents = re.compile(re.escape("REPLACE_WITH_{0}".format(key))).sub(value, contents) with open(cloudfoundry_deployment_cmd, 'w') as tmpfile: tmpfile.write(contents) def get_settings(): settings = dict() config_file = sys.argv[4] with open(config_file) as f: settings = json.load(f)["runtimeSettings"][0]["handlerSettings"]["publicSettings"] settings['TENANT_ID'] = sys.argv[1] settings['CLIENT_ID'] = sys.argv[2] settings['CLIENT_SECRET'] = sys.argv[3] return settings def main(): settings = get_settings() with open('settings', "w") as tmpfile: tmpfile.write(json.dumps(settings, indent=4, sort_keys=True)) prepare_storage(settings) bosh_director_ip = render_bosh_manifest(settings) print bosh_director_ip render_cloud_foundry_manifest(settings) render_cloud_foundry_deployment_cmd(settings) if __name__ == "__main__": main()
mit
firebase/grpc
examples/python/cancellation/client.py
13
3496
# Copyright 2019 the gRPC authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """An example of cancelling requests in gRPC.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import argparse import logging import signal import sys import grpc from examples.python.cancellation import hash_name_pb2 from examples.python.cancellation import hash_name_pb2_grpc _DESCRIPTION = "A client for finding hashes similar to names." _LOGGER = logging.getLogger(__name__) def run_unary_client(server_target, name, ideal_distance): with grpc.insecure_channel(server_target) as channel: stub = hash_name_pb2_grpc.HashFinderStub(channel) future = stub.Find.future(hash_name_pb2.HashNameRequest( desired_name=name, ideal_hamming_distance=ideal_distance), wait_for_ready=True) def cancel_request(unused_signum, unused_frame): future.cancel() sys.exit(0) signal.signal(signal.SIGINT, cancel_request) result = future.result() print(result) def run_streaming_client(server_target, name, ideal_distance, interesting_distance): with grpc.insecure_channel(server_target) as channel: stub = hash_name_pb2_grpc.HashFinderStub(channel) result_generator = stub.FindRange(hash_name_pb2.HashNameRequest( desired_name=name, ideal_hamming_distance=ideal_distance, interesting_hamming_distance=interesting_distance), wait_for_ready=True) def cancel_request(unused_signum, unused_frame): result_generator.cancel() sys.exit(0) signal.signal(signal.SIGINT, cancel_request) for result in result_generator: print(result) def main(): parser = argparse.ArgumentParser(description=_DESCRIPTION) parser.add_argument("name", type=str, help='The desired name.') parser.add_argument("--ideal-distance", default=0, nargs='?', type=int, help="The desired Hamming distance.") parser.add_argument('--server', default='localhost:50051', type=str, nargs='?', help='The host-port pair at which to reach the server.') parser.add_argument( '--show-inferior', default=None, type=int, nargs='?', help='Also show candidates with a Hamming distance less than this value.' ) args = parser.parse_args() if args.show_inferior is not None: run_streaming_client(args.server, args.name, args.ideal_distance, args.show_inferior) else: run_unary_client(args.server, args.name, args.ideal_distance) if __name__ == "__main__": logging.basicConfig() main()
apache-2.0
mtnbikenc/ansible-modules-extras
cloud/profitbricks/profitbricks_nic.py
132
8769
#!/usr/bin/python # This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see <http://www.gnu.org/licenses/>. DOCUMENTATION = ''' --- module: profitbricks_nic short_description: Create or Remove a NIC. description: - This module allows you to create or restore a volume snapshot. This module has a dependency on profitbricks >= 1.0.0 version_added: "2.0" options: datacenter: description: - The datacenter in which to operate. required: true server: description: - The server name or ID. required: true name: description: - The name or ID of the NIC. This is only required on deletes, but not on create. required: true lan: description: - The LAN to place the NIC on. You can pass a LAN that doesn't exist and it will be created. Required on create. required: true subscription_user: description: - The ProfitBricks username. Overrides the PB_SUBSCRIPTION_ID environement variable. required: false subscription_password: description: - THe ProfitBricks password. Overrides the PB_PASSWORD environement variable. required: false wait: description: - wait for the operation to complete before returning required: false default: "yes" choices: [ "yes", "no" ] wait_timeout: description: - how long before wait gives up, in seconds default: 600 state: description: - Indicate desired state of the resource required: false default: 'present' choices: ["present", "absent"] requirements: [ "profitbricks" ] author: Matt Baldwin (baldwin@stackpointcloud.com) ''' EXAMPLES = ''' # Create a NIC - profitbricks_nic: datacenter: Tardis One server: node002 lan: 2 wait_timeout: 500 state: present # Remove a NIC - profitbricks_nic: datacenter: Tardis One server: node002 name: 7341c2454f wait_timeout: 500 state: absent ''' import re import uuid import time HAS_PB_SDK = True try: from profitbricks.client import ProfitBricksService, NIC except ImportError: HAS_PB_SDK = False uuid_match = re.compile( '[\w]{8}-[\w]{4}-[\w]{4}-[\w]{4}-[\w]{12}', re.I) def _wait_for_completion(profitbricks, promise, wait_timeout, msg): if not promise: return wait_timeout = time.time() + wait_timeout while wait_timeout > time.time(): time.sleep(5) operation_result = profitbricks.get_request( request_id=promise['requestId'], status=True) if operation_result['metadata']['status'] == "DONE": return elif operation_result['metadata']['status'] == "FAILED": raise Exception( 'Request failed to complete ' + msg + ' "' + str( promise['requestId']) + '" to complete.') raise Exception( 'Timed out waiting for async operation ' + msg + ' "' + str( promise['requestId'] ) + '" to complete.') def create_nic(module, profitbricks): """ Creates a NIC. module : AnsibleModule object profitbricks: authenticated profitbricks object. Returns: True if the nic creates, false otherwise """ datacenter = module.params.get('datacenter') server = module.params.get('server') lan = module.params.get('lan') name = module.params.get('name') wait = module.params.get('wait') wait_timeout = module.params.get('wait_timeout') # Locate UUID for Datacenter if not (uuid_match.match(datacenter)): datacenter_list = profitbricks.list_datacenters() for d in datacenter_list['items']: dc = profitbricks.get_datacenter(d['id']) if datacenter == dc['properties']['name']: datacenter = d['id'] break # Locate UUID for Server if not (uuid_match.match(server)): server_list = profitbricks.list_servers(datacenter) for s in server_list['items']: if server == s['properties']['name']: server = s['id'] break try: n = NIC( name=name, lan=lan ) nic_response = profitbricks.create_nic(datacenter, server, n) if wait: _wait_for_completion(profitbricks, nic_response, wait_timeout, "create_nic") return nic_response except Exception as e: module.fail_json(msg="failed to create the NIC: %s" % str(e)) def delete_nic(module, profitbricks): """ Removes a NIC module : AnsibleModule object profitbricks: authenticated profitbricks object. Returns: True if the NIC was removed, false otherwise """ datacenter = module.params.get('datacenter') server = module.params.get('server') name = module.params.get('name') # Locate UUID for Datacenter if not (uuid_match.match(datacenter)): datacenter_list = profitbricks.list_datacenters() for d in datacenter_list['items']: dc = profitbricks.get_datacenter(d['id']) if datacenter == dc['properties']['name']: datacenter = d['id'] break # Locate UUID for Server server_found = False if not (uuid_match.match(server)): server_list = profitbricks.list_servers(datacenter) for s in server_list['items']: if server == s['properties']['name']: server_found = True server = s['id'] break if not server_found: return False # Locate UUID for NIC nic_found = False if not (uuid_match.match(name)): nic_list = profitbricks.list_nics(datacenter, server) for n in nic_list['items']: if name == n['properties']['name']: nic_found = True name = n['id'] break if not nic_found: return False try: nic_response = profitbricks.delete_nic(datacenter, server, name) return nic_response except Exception as e: module.fail_json(msg="failed to remove the NIC: %s" % str(e)) def main(): module = AnsibleModule( argument_spec=dict( datacenter=dict(), server=dict(), name=dict(default=str(uuid.uuid4()).replace('-','')[:10]), lan=dict(), subscription_user=dict(), subscription_password=dict(), wait=dict(type='bool', default=True), wait_timeout=dict(type='int', default=600), state=dict(default='present'), ) ) if not HAS_PB_SDK: module.fail_json(msg='profitbricks required for this module') if not module.params.get('subscription_user'): module.fail_json(msg='subscription_user parameter is required') if not module.params.get('subscription_password'): module.fail_json(msg='subscription_password parameter is required') if not module.params.get('datacenter'): module.fail_json(msg='datacenter parameter is required') if not module.params.get('server'): module.fail_json(msg='server parameter is required') subscription_user = module.params.get('subscription_user') subscription_password = module.params.get('subscription_password') profitbricks = ProfitBricksService( username=subscription_user, password=subscription_password) state = module.params.get('state') if state == 'absent': if not module.params.get('name'): module.fail_json(msg='name parameter is required') try: (changed) = delete_nic(module, profitbricks) module.exit_json(changed=changed) except Exception as e: module.fail_json(msg='failed to set nic state: %s' % str(e)) elif state == 'present': if not module.params.get('lan'): module.fail_json(msg='lan parameter is required') try: (nic_dict) = create_nic(module, profitbricks) module.exit_json(nics=nic_dict) except Exception as e: module.fail_json(msg='failed to set nic state: %s' % str(e)) from ansible.module_utils.basic import * main()
gpl-3.0
jeffery9/mixprint_addons
analytic/__openerp__.py
112
1877
# -*- coding: utf-8 -*- ############################################################################## # # OpenERP, Open Source Management Solution # Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>). # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ############################################################################## { 'name' : 'Analytic Accounting', 'version': '1.1', 'author' : 'OpenERP SA', 'website' : 'http://www.openerp.com', 'category': 'Hidden/Dependency', 'depends' : ['base', 'decimal_precision', 'mail'], 'description': """ Module for defining analytic accounting object. =============================================== In OpenERP, analytic accounts are linked to general accounts but are treated totally independently. So, you can enter various different analytic operations that have no counterpart in the general financial accounts. """, 'data': [ 'security/analytic_security.xml', 'security/ir.model.access.csv', 'analytic_sequence.xml', 'analytic_view.xml', 'analytic_data.xml', ], 'demo': [], 'installable': True, 'auto_install': False, } # vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
agpl-3.0
yinquan529/platform-external-chromium_org
native_client_sdk/src/build_tools/dsc_info.py
70
1816
#!/usr/bin/env python # Copyright 2013 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. """Extracts information from a library.dsc file.""" import optparse import os import sys import parse_dsc def Error(msg): print >> sys.stderr, 'dsc_info: %s' % msg sys.exit(1) def FindTarget(tree, target_name): targets = tree['TARGETS'] for target in targets: if target['NAME'] == target_name: return target Error('Target %s not found' % target_name) def GetSources(lib_dir, tree, target_name): result = [] target = FindTarget(tree, target_name) for filename in target['SOURCES']: result.append('/'.join([lib_dir, filename])) return result def DoMain(argv): "Entry point for gyp's pymod_do_main command." parser = optparse.OptionParser(usage='%prog: [OPTIONS] TARGET') # Give a clearer error message when this is used as a module. parser.prog = 'dsc_info' parser.add_option('-s', '--sources', help='Print a list of source files for the target', action='store_true', default=False) parser.add_option('-l', '--libdir', help='Directory where the library.dsc file is located', metavar='DIR') options, args = parser.parse_args(argv) if len(args) != 1: parser.error('Expecting exactly one argument.') target = args[0] libdir = options.libdir or '' tree = parse_dsc.LoadProject(os.path.join(libdir, 'library.dsc')) if options.sources: return '\n'.join(GetSources(libdir, tree, target)) parser.error('No action specified') def main(argv): print DoMain(argv[1:]) if __name__ == '__main__': try: sys.exit(main(sys.argv)) except KeyboardInterrupt: Error('interrupted')
bsd-3-clause
Jayflux/servo
tests/wpt/web-platform-tests/tools/pywebsocket/test/test_util.py
449
7538
#!/usr/bin/env python # # Copyright 2011, Google Inc. # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above # copyright notice, this list of conditions and the following disclaimer # in the documentation and/or other materials provided with the # distribution. # * Neither the name of Google Inc. nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. """Tests for util module.""" import os import random import sys import unittest import set_sys_path # Update sys.path to locate mod_pywebsocket module. from mod_pywebsocket import util _TEST_DATA_DIR = os.path.join(os.path.split(__file__)[0], 'testdata') class UtilTest(unittest.TestCase): """A unittest for util module.""" def test_get_stack_trace(self): self.assertEqual('None\n', util.get_stack_trace()) try: a = 1 / 0 # Intentionally raise exception. except Exception: trace = util.get_stack_trace() self.failUnless(trace.startswith('Traceback')) self.failUnless(trace.find('ZeroDivisionError') != -1) def test_prepend_message_to_exception(self): exc = Exception('World') self.assertEqual('World', str(exc)) util.prepend_message_to_exception('Hello ', exc) self.assertEqual('Hello World', str(exc)) def test_get_script_interp(self): cygwin_path = 'c:\\cygwin\\bin' cygwin_perl = os.path.join(cygwin_path, 'perl') self.assertEqual(None, util.get_script_interp( os.path.join(_TEST_DATA_DIR, 'README'))) self.assertEqual(None, util.get_script_interp( os.path.join(_TEST_DATA_DIR, 'README'), cygwin_path)) self.assertEqual('/usr/bin/perl -wT', util.get_script_interp( os.path.join(_TEST_DATA_DIR, 'hello.pl'))) self.assertEqual(cygwin_perl + ' -wT', util.get_script_interp( os.path.join(_TEST_DATA_DIR, 'hello.pl'), cygwin_path)) def test_hexify(self): self.assertEqual('61 7a 41 5a 30 39 20 09 0d 0a 00 ff', util.hexify('azAZ09 \t\r\n\x00\xff')) class RepeatedXorMaskerTest(unittest.TestCase): """A unittest for RepeatedXorMasker class.""" def test_mask(self): # Sample input e6,97,a5 is U+65e5 in UTF-8 masker = util.RepeatedXorMasker('\xff\xff\xff\xff') result = masker.mask('\xe6\x97\xa5') self.assertEqual('\x19\x68\x5a', result) masker = util.RepeatedXorMasker('\x00\x00\x00\x00') result = masker.mask('\xe6\x97\xa5') self.assertEqual('\xe6\x97\xa5', result) masker = util.RepeatedXorMasker('\xe6\x97\xa5\x20') result = masker.mask('\xe6\x97\xa5') self.assertEqual('\x00\x00\x00', result) def test_mask_twice(self): masker = util.RepeatedXorMasker('\x00\x7f\xff\x20') # mask[0], mask[1], ... will be used. result = masker.mask('\x00\x00\x00\x00\x00') self.assertEqual('\x00\x7f\xff\x20\x00', result) # mask[2], mask[0], ... will be used for the next call. result = masker.mask('\x00\x00\x00\x00\x00') self.assertEqual('\x7f\xff\x20\x00\x7f', result) def test_mask_large_data(self): masker = util.RepeatedXorMasker('mASk') original = ''.join([chr(i % 256) for i in xrange(1000)]) result = masker.mask(original) expected = ''.join( [chr((i % 256) ^ ord('mASk'[i % 4])) for i in xrange(1000)]) self.assertEqual(expected, result) masker = util.RepeatedXorMasker('MaSk') first_part = 'The WebSocket Protocol enables two-way communication.' result = masker.mask(first_part) self.assertEqual( '\x19\t6K\x1a\x0418"\x028\x0e9A\x03\x19"\x15<\x08"\rs\x0e#' '\x001\x07(\x12s\x1f:\x0e~\x1c,\x18s\x08"\x0c>\x1e#\x080\n9' '\x08<\x05c', result) second_part = 'It has two parts: a handshake and the data transfer.' result = masker.mask(second_part) self.assertEqual( "('K%\x00 K9\x16<K=\x00!\x1f>[s\nm\t2\x05)\x12;\n&\x04s\n#" "\x05s\x1f%\x04s\x0f,\x152K9\x132\x05>\x076\x19c", result) def get_random_section(source, min_num_chunks): chunks = [] bytes_chunked = 0 while bytes_chunked < len(source): chunk_size = random.randint( 1, min(len(source) / min_num_chunks, len(source) - bytes_chunked)) chunk = source[bytes_chunked:bytes_chunked + chunk_size] chunks.append(chunk) bytes_chunked += chunk_size return chunks class InflaterDeflaterTest(unittest.TestCase): """A unittest for _Inflater and _Deflater class.""" def test_inflate_deflate_default(self): input = b'hello' + '-' * 30000 + b'hello' inflater15 = util._Inflater(15) deflater15 = util._Deflater(15) inflater8 = util._Inflater(8) deflater8 = util._Deflater(8) compressed15 = deflater15.compress_and_finish(input) compressed8 = deflater8.compress_and_finish(input) inflater15.append(compressed15) inflater8.append(compressed8) self.assertNotEqual(compressed15, compressed8) self.assertEqual(input, inflater15.decompress(-1)) self.assertEqual(input, inflater8.decompress(-1)) def test_random_section(self): random.seed(a=0) source = ''.join( [chr(random.randint(0, 255)) for i in xrange(100 * 1024)]) chunked_input = get_random_section(source, 10) print "Input chunk sizes: %r" % [len(c) for c in chunked_input] deflater = util._Deflater(15) compressed = [] for chunk in chunked_input: compressed.append(deflater.compress(chunk)) compressed.append(deflater.compress_and_finish('')) chunked_expectation = get_random_section(source, 10) print ("Expectation chunk sizes: %r" % [len(c) for c in chunked_expectation]) inflater = util._Inflater(15) inflater.append(''.join(compressed)) for chunk in chunked_expectation: decompressed = inflater.decompress(len(chunk)) self.assertEqual(chunk, decompressed) self.assertEqual('', inflater.decompress(-1)) if __name__ == '__main__': unittest.main() # vi:sts=4 sw=4 et
mpl-2.0
rmfitzpatrick/ansible
lib/ansible/utils/module_docs_fragments/purestorage.py
71
1297
# # (c) 2017, Simon Dodsley <simon@purestorage.com> # # This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see <http://www.gnu.org/licenses/>. class ModuleDocFragment(object): # Standard Pure Storage documentation fragment DOCUMENTATION = ''' options: fa_url: description: - FlashArray management IPv4 address or Hostname. required: true api_token: description: - FlashArray API token for admin privilaged user. required: true notes: - This module requires purestorage python library - You must set C(PUREFA_URL) and C(PUREFA_API) environment variables if I(url) and I(api_token) arguments are not passed to the module directly requirements: - "python >= 2.7" - purestorage '''
gpl-3.0
adafruit/micropython
tests/basics/exceptpoly2.py
63
1789
try: raise MemoryError except Exception: print("Caught MemoryError via Exception") try: raise MemoryError except MemoryError: print("Caught MemoryError") try: raise NameError except Exception: print("Caught NameError via Exception") try: raise NameError except NameError: print("Caught NameError") try: raise NotImplementedError except RuntimeError: print("Caught NotImplementedError via RuntimeError") try: raise NotImplementedError except NotImplementedError: print("Caught NotImplementedError") try: raise OSError except Exception: print("Caught OSError via Exception") try: raise OSError except OSError: print("Caught OSError") try: raise OverflowError except ArithmeticError: print("Caught OverflowError via ArithmeticError") try: raise OverflowError except OverflowError: print("Caught OverflowError") try: raise RuntimeError except Exception: print("Caught RuntimeError via Exception") try: raise RuntimeError except RuntimeError: print("Caught RuntimeError") try: raise SyntaxError except Exception: print("Caught SyntaxError via Exception") try: raise SyntaxError except SyntaxError: print("Caught SyntaxError") try: raise TypeError except Exception: print("Caught TypeError via Exception") try: raise TypeError except TypeError: print("Caught TypeError") try: raise ValueError except Exception: print("Caught ValueError via Exception") try: raise ValueError except ValueError: print("Caught ValueError") try: raise ZeroDivisionError except ArithmeticError: print("Caught ZeroDivisionError via ArithmeticError") try: raise ZeroDivisionError except ZeroDivisionError: print("Caught ZeroDivisionError")
mit
ravsa/python-for-android
src/buildlib/jinja2.egg/jinja2/parser.py
637
35186
# -*- coding: utf-8 -*- """ jinja2.parser ~~~~~~~~~~~~~ Implements the template parser. :copyright: (c) 2010 by the Jinja Team. :license: BSD, see LICENSE for more details. """ from jinja2 import nodes from jinja2.exceptions import TemplateSyntaxError, TemplateAssertionError from jinja2.lexer import describe_token, describe_token_expr from jinja2._compat import next, imap #: statements that callinto _statement_keywords = frozenset(['for', 'if', 'block', 'extends', 'print', 'macro', 'include', 'from', 'import', 'set']) _compare_operators = frozenset(['eq', 'ne', 'lt', 'lteq', 'gt', 'gteq']) class Parser(object): """This is the central parsing class Jinja2 uses. It's passed to extensions and can be used to parse expressions or statements. """ def __init__(self, environment, source, name=None, filename=None, state=None): self.environment = environment self.stream = environment._tokenize(source, name, filename, state) self.name = name self.filename = filename self.closed = False self.extensions = {} for extension in environment.iter_extensions(): for tag in extension.tags: self.extensions[tag] = extension.parse self._last_identifier = 0 self._tag_stack = [] self._end_token_stack = [] def fail(self, msg, lineno=None, exc=TemplateSyntaxError): """Convenience method that raises `exc` with the message, passed line number or last line number as well as the current name and filename. """ if lineno is None: lineno = self.stream.current.lineno raise exc(msg, lineno, self.name, self.filename) def _fail_ut_eof(self, name, end_token_stack, lineno): expected = [] for exprs in end_token_stack: expected.extend(imap(describe_token_expr, exprs)) if end_token_stack: currently_looking = ' or '.join( "'%s'" % describe_token_expr(expr) for expr in end_token_stack[-1]) else: currently_looking = None if name is None: message = ['Unexpected end of template.'] else: message = ['Encountered unknown tag \'%s\'.' % name] if currently_looking: if name is not None and name in expected: message.append('You probably made a nesting mistake. Jinja ' 'is expecting this tag, but currently looking ' 'for %s.' % currently_looking) else: message.append('Jinja was looking for the following tags: ' '%s.' % currently_looking) if self._tag_stack: message.append('The innermost block that needs to be ' 'closed is \'%s\'.' % self._tag_stack[-1]) self.fail(' '.join(message), lineno) def fail_unknown_tag(self, name, lineno=None): """Called if the parser encounters an unknown tag. Tries to fail with a human readable error message that could help to identify the problem. """ return self._fail_ut_eof(name, self._end_token_stack, lineno) def fail_eof(self, end_tokens=None, lineno=None): """Like fail_unknown_tag but for end of template situations.""" stack = list(self._end_token_stack) if end_tokens is not None: stack.append(end_tokens) return self._fail_ut_eof(None, stack, lineno) def is_tuple_end(self, extra_end_rules=None): """Are we at the end of a tuple?""" if self.stream.current.type in ('variable_end', 'block_end', 'rparen'): return True elif extra_end_rules is not None: return self.stream.current.test_any(extra_end_rules) return False def free_identifier(self, lineno=None): """Return a new free identifier as :class:`~jinja2.nodes.InternalName`.""" self._last_identifier += 1 rv = object.__new__(nodes.InternalName) nodes.Node.__init__(rv, 'fi%d' % self._last_identifier, lineno=lineno) return rv def parse_statement(self): """Parse a single statement.""" token = self.stream.current if token.type != 'name': self.fail('tag name expected', token.lineno) self._tag_stack.append(token.value) pop_tag = True try: if token.value in _statement_keywords: return getattr(self, 'parse_' + self.stream.current.value)() if token.value == 'call': return self.parse_call_block() if token.value == 'filter': return self.parse_filter_block() ext = self.extensions.get(token.value) if ext is not None: return ext(self) # did not work out, remove the token we pushed by accident # from the stack so that the unknown tag fail function can # produce a proper error message. self._tag_stack.pop() pop_tag = False self.fail_unknown_tag(token.value, token.lineno) finally: if pop_tag: self._tag_stack.pop() def parse_statements(self, end_tokens, drop_needle=False): """Parse multiple statements into a list until one of the end tokens is reached. This is used to parse the body of statements as it also parses template data if appropriate. The parser checks first if the current token is a colon and skips it if there is one. Then it checks for the block end and parses until if one of the `end_tokens` is reached. Per default the active token in the stream at the end of the call is the matched end token. If this is not wanted `drop_needle` can be set to `True` and the end token is removed. """ # the first token may be a colon for python compatibility self.stream.skip_if('colon') # in the future it would be possible to add whole code sections # by adding some sort of end of statement token and parsing those here. self.stream.expect('block_end') result = self.subparse(end_tokens) # we reached the end of the template too early, the subparser # does not check for this, so we do that now if self.stream.current.type == 'eof': self.fail_eof(end_tokens) if drop_needle: next(self.stream) return result def parse_set(self): """Parse an assign statement.""" lineno = next(self.stream).lineno target = self.parse_assign_target() self.stream.expect('assign') expr = self.parse_tuple() return nodes.Assign(target, expr, lineno=lineno) def parse_for(self): """Parse a for loop.""" lineno = self.stream.expect('name:for').lineno target = self.parse_assign_target(extra_end_rules=('name:in',)) self.stream.expect('name:in') iter = self.parse_tuple(with_condexpr=False, extra_end_rules=('name:recursive',)) test = None if self.stream.skip_if('name:if'): test = self.parse_expression() recursive = self.stream.skip_if('name:recursive') body = self.parse_statements(('name:endfor', 'name:else')) if next(self.stream).value == 'endfor': else_ = [] else: else_ = self.parse_statements(('name:endfor',), drop_needle=True) return nodes.For(target, iter, body, else_, test, recursive, lineno=lineno) def parse_if(self): """Parse an if construct.""" node = result = nodes.If(lineno=self.stream.expect('name:if').lineno) while 1: node.test = self.parse_tuple(with_condexpr=False) node.body = self.parse_statements(('name:elif', 'name:else', 'name:endif')) token = next(self.stream) if token.test('name:elif'): new_node = nodes.If(lineno=self.stream.current.lineno) node.else_ = [new_node] node = new_node continue elif token.test('name:else'): node.else_ = self.parse_statements(('name:endif',), drop_needle=True) else: node.else_ = [] break return result def parse_block(self): node = nodes.Block(lineno=next(self.stream).lineno) node.name = self.stream.expect('name').value node.scoped = self.stream.skip_if('name:scoped') # common problem people encounter when switching from django # to jinja. we do not support hyphens in block names, so let's # raise a nicer error message in that case. if self.stream.current.type == 'sub': self.fail('Block names in Jinja have to be valid Python ' 'identifiers and may not contain hyphens, use an ' 'underscore instead.') node.body = self.parse_statements(('name:endblock',), drop_needle=True) self.stream.skip_if('name:' + node.name) return node def parse_extends(self): node = nodes.Extends(lineno=next(self.stream).lineno) node.template = self.parse_expression() return node def parse_import_context(self, node, default): if self.stream.current.test_any('name:with', 'name:without') and \ self.stream.look().test('name:context'): node.with_context = next(self.stream).value == 'with' self.stream.skip() else: node.with_context = default return node def parse_include(self): node = nodes.Include(lineno=next(self.stream).lineno) node.template = self.parse_expression() if self.stream.current.test('name:ignore') and \ self.stream.look().test('name:missing'): node.ignore_missing = True self.stream.skip(2) else: node.ignore_missing = False return self.parse_import_context(node, True) def parse_import(self): node = nodes.Import(lineno=next(self.stream).lineno) node.template = self.parse_expression() self.stream.expect('name:as') node.target = self.parse_assign_target(name_only=True).name return self.parse_import_context(node, False) def parse_from(self): node = nodes.FromImport(lineno=next(self.stream).lineno) node.template = self.parse_expression() self.stream.expect('name:import') node.names = [] def parse_context(): if self.stream.current.value in ('with', 'without') and \ self.stream.look().test('name:context'): node.with_context = next(self.stream).value == 'with' self.stream.skip() return True return False while 1: if node.names: self.stream.expect('comma') if self.stream.current.type == 'name': if parse_context(): break target = self.parse_assign_target(name_only=True) if target.name.startswith('_'): self.fail('names starting with an underline can not ' 'be imported', target.lineno, exc=TemplateAssertionError) if self.stream.skip_if('name:as'): alias = self.parse_assign_target(name_only=True) node.names.append((target.name, alias.name)) else: node.names.append(target.name) if parse_context() or self.stream.current.type != 'comma': break else: break if not hasattr(node, 'with_context'): node.with_context = False self.stream.skip_if('comma') return node def parse_signature(self, node): node.args = args = [] node.defaults = defaults = [] self.stream.expect('lparen') while self.stream.current.type != 'rparen': if args: self.stream.expect('comma') arg = self.parse_assign_target(name_only=True) arg.set_ctx('param') if self.stream.skip_if('assign'): defaults.append(self.parse_expression()) args.append(arg) self.stream.expect('rparen') def parse_call_block(self): node = nodes.CallBlock(lineno=next(self.stream).lineno) if self.stream.current.type == 'lparen': self.parse_signature(node) else: node.args = [] node.defaults = [] node.call = self.parse_expression() if not isinstance(node.call, nodes.Call): self.fail('expected call', node.lineno) node.body = self.parse_statements(('name:endcall',), drop_needle=True) return node def parse_filter_block(self): node = nodes.FilterBlock(lineno=next(self.stream).lineno) node.filter = self.parse_filter(None, start_inline=True) node.body = self.parse_statements(('name:endfilter',), drop_needle=True) return node def parse_macro(self): node = nodes.Macro(lineno=next(self.stream).lineno) node.name = self.parse_assign_target(name_only=True).name self.parse_signature(node) node.body = self.parse_statements(('name:endmacro',), drop_needle=True) return node def parse_print(self): node = nodes.Output(lineno=next(self.stream).lineno) node.nodes = [] while self.stream.current.type != 'block_end': if node.nodes: self.stream.expect('comma') node.nodes.append(self.parse_expression()) return node def parse_assign_target(self, with_tuple=True, name_only=False, extra_end_rules=None): """Parse an assignment target. As Jinja2 allows assignments to tuples, this function can parse all allowed assignment targets. Per default assignments to tuples are parsed, that can be disable however by setting `with_tuple` to `False`. If only assignments to names are wanted `name_only` can be set to `True`. The `extra_end_rules` parameter is forwarded to the tuple parsing function. """ if name_only: token = self.stream.expect('name') target = nodes.Name(token.value, 'store', lineno=token.lineno) else: if with_tuple: target = self.parse_tuple(simplified=True, extra_end_rules=extra_end_rules) else: target = self.parse_primary() target.set_ctx('store') if not target.can_assign(): self.fail('can\'t assign to %r' % target.__class__. __name__.lower(), target.lineno) return target def parse_expression(self, with_condexpr=True): """Parse an expression. Per default all expressions are parsed, if the optional `with_condexpr` parameter is set to `False` conditional expressions are not parsed. """ if with_condexpr: return self.parse_condexpr() return self.parse_or() def parse_condexpr(self): lineno = self.stream.current.lineno expr1 = self.parse_or() while self.stream.skip_if('name:if'): expr2 = self.parse_or() if self.stream.skip_if('name:else'): expr3 = self.parse_condexpr() else: expr3 = None expr1 = nodes.CondExpr(expr2, expr1, expr3, lineno=lineno) lineno = self.stream.current.lineno return expr1 def parse_or(self): lineno = self.stream.current.lineno left = self.parse_and() while self.stream.skip_if('name:or'): right = self.parse_and() left = nodes.Or(left, right, lineno=lineno) lineno = self.stream.current.lineno return left def parse_and(self): lineno = self.stream.current.lineno left = self.parse_not() while self.stream.skip_if('name:and'): right = self.parse_not() left = nodes.And(left, right, lineno=lineno) lineno = self.stream.current.lineno return left def parse_not(self): if self.stream.current.test('name:not'): lineno = next(self.stream).lineno return nodes.Not(self.parse_not(), lineno=lineno) return self.parse_compare() def parse_compare(self): lineno = self.stream.current.lineno expr = self.parse_add() ops = [] while 1: token_type = self.stream.current.type if token_type in _compare_operators: next(self.stream) ops.append(nodes.Operand(token_type, self.parse_add())) elif self.stream.skip_if('name:in'): ops.append(nodes.Operand('in', self.parse_add())) elif self.stream.current.test('name:not') and \ self.stream.look().test('name:in'): self.stream.skip(2) ops.append(nodes.Operand('notin', self.parse_add())) else: break lineno = self.stream.current.lineno if not ops: return expr return nodes.Compare(expr, ops, lineno=lineno) def parse_add(self): lineno = self.stream.current.lineno left = self.parse_sub() while self.stream.current.type == 'add': next(self.stream) right = self.parse_sub() left = nodes.Add(left, right, lineno=lineno) lineno = self.stream.current.lineno return left def parse_sub(self): lineno = self.stream.current.lineno left = self.parse_concat() while self.stream.current.type == 'sub': next(self.stream) right = self.parse_concat() left = nodes.Sub(left, right, lineno=lineno) lineno = self.stream.current.lineno return left def parse_concat(self): lineno = self.stream.current.lineno args = [self.parse_mul()] while self.stream.current.type == 'tilde': next(self.stream) args.append(self.parse_mul()) if len(args) == 1: return args[0] return nodes.Concat(args, lineno=lineno) def parse_mul(self): lineno = self.stream.current.lineno left = self.parse_div() while self.stream.current.type == 'mul': next(self.stream) right = self.parse_div() left = nodes.Mul(left, right, lineno=lineno) lineno = self.stream.current.lineno return left def parse_div(self): lineno = self.stream.current.lineno left = self.parse_floordiv() while self.stream.current.type == 'div': next(self.stream) right = self.parse_floordiv() left = nodes.Div(left, right, lineno=lineno) lineno = self.stream.current.lineno return left def parse_floordiv(self): lineno = self.stream.current.lineno left = self.parse_mod() while self.stream.current.type == 'floordiv': next(self.stream) right = self.parse_mod() left = nodes.FloorDiv(left, right, lineno=lineno) lineno = self.stream.current.lineno return left def parse_mod(self): lineno = self.stream.current.lineno left = self.parse_pow() while self.stream.current.type == 'mod': next(self.stream) right = self.parse_pow() left = nodes.Mod(left, right, lineno=lineno) lineno = self.stream.current.lineno return left def parse_pow(self): lineno = self.stream.current.lineno left = self.parse_unary() while self.stream.current.type == 'pow': next(self.stream) right = self.parse_unary() left = nodes.Pow(left, right, lineno=lineno) lineno = self.stream.current.lineno return left def parse_unary(self, with_filter=True): token_type = self.stream.current.type lineno = self.stream.current.lineno if token_type == 'sub': next(self.stream) node = nodes.Neg(self.parse_unary(False), lineno=lineno) elif token_type == 'add': next(self.stream) node = nodes.Pos(self.parse_unary(False), lineno=lineno) else: node = self.parse_primary() node = self.parse_postfix(node) if with_filter: node = self.parse_filter_expr(node) return node def parse_primary(self): token = self.stream.current if token.type == 'name': if token.value in ('true', 'false', 'True', 'False'): node = nodes.Const(token.value in ('true', 'True'), lineno=token.lineno) elif token.value in ('none', 'None'): node = nodes.Const(None, lineno=token.lineno) else: node = nodes.Name(token.value, 'load', lineno=token.lineno) next(self.stream) elif token.type == 'string': next(self.stream) buf = [token.value] lineno = token.lineno while self.stream.current.type == 'string': buf.append(self.stream.current.value) next(self.stream) node = nodes.Const(''.join(buf), lineno=lineno) elif token.type in ('integer', 'float'): next(self.stream) node = nodes.Const(token.value, lineno=token.lineno) elif token.type == 'lparen': next(self.stream) node = self.parse_tuple(explicit_parentheses=True) self.stream.expect('rparen') elif token.type == 'lbracket': node = self.parse_list() elif token.type == 'lbrace': node = self.parse_dict() else: self.fail("unexpected '%s'" % describe_token(token), token.lineno) return node def parse_tuple(self, simplified=False, with_condexpr=True, extra_end_rules=None, explicit_parentheses=False): """Works like `parse_expression` but if multiple expressions are delimited by a comma a :class:`~jinja2.nodes.Tuple` node is created. This method could also return a regular expression instead of a tuple if no commas where found. The default parsing mode is a full tuple. If `simplified` is `True` only names and literals are parsed. The `no_condexpr` parameter is forwarded to :meth:`parse_expression`. Because tuples do not require delimiters and may end in a bogus comma an extra hint is needed that marks the end of a tuple. For example for loops support tuples between `for` and `in`. In that case the `extra_end_rules` is set to ``['name:in']``. `explicit_parentheses` is true if the parsing was triggered by an expression in parentheses. This is used to figure out if an empty tuple is a valid expression or not. """ lineno = self.stream.current.lineno if simplified: parse = self.parse_primary elif with_condexpr: parse = self.parse_expression else: parse = lambda: self.parse_expression(with_condexpr=False) args = [] is_tuple = False while 1: if args: self.stream.expect('comma') if self.is_tuple_end(extra_end_rules): break args.append(parse()) if self.stream.current.type == 'comma': is_tuple = True else: break lineno = self.stream.current.lineno if not is_tuple: if args: return args[0] # if we don't have explicit parentheses, an empty tuple is # not a valid expression. This would mean nothing (literally # nothing) in the spot of an expression would be an empty # tuple. if not explicit_parentheses: self.fail('Expected an expression, got \'%s\'' % describe_token(self.stream.current)) return nodes.Tuple(args, 'load', lineno=lineno) def parse_list(self): token = self.stream.expect('lbracket') items = [] while self.stream.current.type != 'rbracket': if items: self.stream.expect('comma') if self.stream.current.type == 'rbracket': break items.append(self.parse_expression()) self.stream.expect('rbracket') return nodes.List(items, lineno=token.lineno) def parse_dict(self): token = self.stream.expect('lbrace') items = [] while self.stream.current.type != 'rbrace': if items: self.stream.expect('comma') if self.stream.current.type == 'rbrace': break key = self.parse_expression() self.stream.expect('colon') value = self.parse_expression() items.append(nodes.Pair(key, value, lineno=key.lineno)) self.stream.expect('rbrace') return nodes.Dict(items, lineno=token.lineno) def parse_postfix(self, node): while 1: token_type = self.stream.current.type if token_type == 'dot' or token_type == 'lbracket': node = self.parse_subscript(node) # calls are valid both after postfix expressions (getattr # and getitem) as well as filters and tests elif token_type == 'lparen': node = self.parse_call(node) else: break return node def parse_filter_expr(self, node): while 1: token_type = self.stream.current.type if token_type == 'pipe': node = self.parse_filter(node) elif token_type == 'name' and self.stream.current.value == 'is': node = self.parse_test(node) # calls are valid both after postfix expressions (getattr # and getitem) as well as filters and tests elif token_type == 'lparen': node = self.parse_call(node) else: break return node def parse_subscript(self, node): token = next(self.stream) if token.type == 'dot': attr_token = self.stream.current next(self.stream) if attr_token.type == 'name': return nodes.Getattr(node, attr_token.value, 'load', lineno=token.lineno) elif attr_token.type != 'integer': self.fail('expected name or number', attr_token.lineno) arg = nodes.Const(attr_token.value, lineno=attr_token.lineno) return nodes.Getitem(node, arg, 'load', lineno=token.lineno) if token.type == 'lbracket': args = [] while self.stream.current.type != 'rbracket': if args: self.stream.expect('comma') args.append(self.parse_subscribed()) self.stream.expect('rbracket') if len(args) == 1: arg = args[0] else: arg = nodes.Tuple(args, 'load', lineno=token.lineno) return nodes.Getitem(node, arg, 'load', lineno=token.lineno) self.fail('expected subscript expression', self.lineno) def parse_subscribed(self): lineno = self.stream.current.lineno if self.stream.current.type == 'colon': next(self.stream) args = [None] else: node = self.parse_expression() if self.stream.current.type != 'colon': return node next(self.stream) args = [node] if self.stream.current.type == 'colon': args.append(None) elif self.stream.current.type not in ('rbracket', 'comma'): args.append(self.parse_expression()) else: args.append(None) if self.stream.current.type == 'colon': next(self.stream) if self.stream.current.type not in ('rbracket', 'comma'): args.append(self.parse_expression()) else: args.append(None) else: args.append(None) return nodes.Slice(lineno=lineno, *args) def parse_call(self, node): token = self.stream.expect('lparen') args = [] kwargs = [] dyn_args = dyn_kwargs = None require_comma = False def ensure(expr): if not expr: self.fail('invalid syntax for function call expression', token.lineno) while self.stream.current.type != 'rparen': if require_comma: self.stream.expect('comma') # support for trailing comma if self.stream.current.type == 'rparen': break if self.stream.current.type == 'mul': ensure(dyn_args is None and dyn_kwargs is None) next(self.stream) dyn_args = self.parse_expression() elif self.stream.current.type == 'pow': ensure(dyn_kwargs is None) next(self.stream) dyn_kwargs = self.parse_expression() else: ensure(dyn_args is None and dyn_kwargs is None) if self.stream.current.type == 'name' and \ self.stream.look().type == 'assign': key = self.stream.current.value self.stream.skip(2) value = self.parse_expression() kwargs.append(nodes.Keyword(key, value, lineno=value.lineno)) else: ensure(not kwargs) args.append(self.parse_expression()) require_comma = True self.stream.expect('rparen') if node is None: return args, kwargs, dyn_args, dyn_kwargs return nodes.Call(node, args, kwargs, dyn_args, dyn_kwargs, lineno=token.lineno) def parse_filter(self, node, start_inline=False): while self.stream.current.type == 'pipe' or start_inline: if not start_inline: next(self.stream) token = self.stream.expect('name') name = token.value while self.stream.current.type == 'dot': next(self.stream) name += '.' + self.stream.expect('name').value if self.stream.current.type == 'lparen': args, kwargs, dyn_args, dyn_kwargs = self.parse_call(None) else: args = [] kwargs = [] dyn_args = dyn_kwargs = None node = nodes.Filter(node, name, args, kwargs, dyn_args, dyn_kwargs, lineno=token.lineno) start_inline = False return node def parse_test(self, node): token = next(self.stream) if self.stream.current.test('name:not'): next(self.stream) negated = True else: negated = False name = self.stream.expect('name').value while self.stream.current.type == 'dot': next(self.stream) name += '.' + self.stream.expect('name').value dyn_args = dyn_kwargs = None kwargs = [] if self.stream.current.type == 'lparen': args, kwargs, dyn_args, dyn_kwargs = self.parse_call(None) elif self.stream.current.type in ('name', 'string', 'integer', 'float', 'lparen', 'lbracket', 'lbrace') and not \ self.stream.current.test_any('name:else', 'name:or', 'name:and'): if self.stream.current.test('name:is'): self.fail('You cannot chain multiple tests with is') args = [self.parse_expression()] else: args = [] node = nodes.Test(node, name, args, kwargs, dyn_args, dyn_kwargs, lineno=token.lineno) if negated: node = nodes.Not(node, lineno=token.lineno) return node def subparse(self, end_tokens=None): body = [] data_buffer = [] add_data = data_buffer.append if end_tokens is not None: self._end_token_stack.append(end_tokens) def flush_data(): if data_buffer: lineno = data_buffer[0].lineno body.append(nodes.Output(data_buffer[:], lineno=lineno)) del data_buffer[:] try: while self.stream: token = self.stream.current if token.type == 'data': if token.value: add_data(nodes.TemplateData(token.value, lineno=token.lineno)) next(self.stream) elif token.type == 'variable_begin': next(self.stream) add_data(self.parse_tuple(with_condexpr=True)) self.stream.expect('variable_end') elif token.type == 'block_begin': flush_data() next(self.stream) if end_tokens is not None and \ self.stream.current.test_any(*end_tokens): return body rv = self.parse_statement() if isinstance(rv, list): body.extend(rv) else: body.append(rv) self.stream.expect('block_end') else: raise AssertionError('internal parsing error') flush_data() finally: if end_tokens is not None: self._end_token_stack.pop() return body def parse(self): """Parse the whole template into a `Template` node.""" result = nodes.Template(self.subparse(), lineno=1) result.set_environment(self.environment) return result
mit
qedsoftware/commcare-hq
corehq/apps/userreports/tests/test_report_charts.py
1
5758
from django.test import SimpleTestCase from corehq.apps.userreports.models import ReportConfiguration from corehq.apps.userreports.exceptions import BadSpecError from corehq.apps.userreports.reports.factory import ChartFactory from corehq.apps.userreports.reports.specs import PieChartSpec, MultibarChartSpec, MultibarAggregateChartSpec, \ GraphDisplayColumn class ChartTestCase(SimpleTestCase): def test_no_type(self): with self.assertRaises(BadSpecError): ChartFactory.from_spec({ "title": "Chart Title", "aggregation_column": "agg_col", "value_column": "count", }) def test_bad_type(self): with self.assertRaises(BadSpecError): ChartFactory.from_spec({ "type": "invalid_type", "title": "Chart Title", "aggregation_column": "agg_col", "value_column": "count", }) class PieChartTestCase(SimpleTestCase): def test_make_pie_chart(self): chart = ChartFactory.from_spec({ "type": "pie", "title": "Chart Title", "aggregation_column": "agg_col", "value_column": "count", }) self.assertEqual(PieChartSpec, type(chart)) def test_missing_value(self): with self.assertRaises(BadSpecError): ChartFactory.from_spec({ "type": "pie", "title": "Chart Title", "aggregation_column": "agg_col", }) class MultibarTestCase(SimpleTestCase): def test_make_multibar_chart(self): chart = ChartFactory.from_spec({ "type": "multibar", "title": "Property Matches by clinic", "x_axis_column": "clinic", "y_axis_columns": [ { "column_id": "property_no", "display": "No" }, { "column_id": "property_yes", "display": "Yes" } ], }) self.assertEqual(MultibarChartSpec, type(chart)) def test_make_multibar_chart_legacy_columns(self): chart = ChartFactory.from_spec({ "type": "multibar", "title": "Property Matches by clinic", "x_axis_column": "clinic", "y_axis_columns": [ "property_no", {"column_id": "property_yes"} ], }) self.assertEqual(MultibarChartSpec, type(chart)) col_0, col_1 = chart.y_axis_columns self.assertTrue(isinstance(col_0, GraphDisplayColumn)) self.assertEqual('property_no', col_0.display) self.assertEqual('property_no', col_0.column_id) self.assertTrue(isinstance(col_1, GraphDisplayColumn)) self.assertEqual('property_yes', col_1.display) self.assertEqual('property_yes', col_1.column_id) def test_make_multibar_chart_bad_columns(self): for test_case in [None, 5, [], {}, {"display": "missing column id"}]: with self.assertRaises(BadSpecError): ChartFactory.from_spec({ "type": "multibar", "title": "Property Matches by clinic", "x_axis_column": "clinic", "y_axis_columns": [test_case], }) def test_missing_x_axis(self): with self.assertRaises(BadSpecError): ChartFactory.from_spec({ "type": "multibar", "title": "Property Matches by clinic", "y_axis_columns": [ "property_no", "property_yes" ], }) class MultibarAggregateTestCase(SimpleTestCase): def test_make_multibar_chart(self): chart = ChartFactory.from_spec({ "type": "multibar-aggregate", "title": "Applicants by type and location", "primary_aggregation": "remote", "secondary_aggregation": "applicant_type", "value_column": "count", }) self.assertEqual(MultibarAggregateChartSpec, type(chart)) def test_missing_value(self): with self.assertRaises(BadSpecError): ChartFactory.from_spec({ "type": "multibar-aggregate", "title": "Applicants by type and location", "primary_aggregation": "remote", "secondary_aggregation": "applicant_type", }) def test_missing_primary(self): with self.assertRaises(BadSpecError): ChartFactory.from_spec({ "type": "multibar-aggregate", "title": "Applicants by type and location", "secondary_aggregation": "applicant_type", "value_column": "count", }) def test_missing_secondary(self): with self.assertRaises(BadSpecError): ChartFactory.from_spec({ "type": "multibar-aggregate", "title": "Applicants by type and location", "primary_aggregation": "remote", "value_column": "count", }) class ChartJsonTest(SimpleTestCase): def test_charts_to_json(self): # this tests a regression - namely that calling to_json on a chart config # when accessed via a report would crash. report = ReportConfiguration(configured_charts=[ dict( type=u'pie', value_column=u'count', aggregation_column=u'remote', title=u'Remote status' ) ]) chart = report.charts[0] chart.to_json() # this is the line that used to crash
bsd-3-clause
PRIMEDesigner15/PRIMEDesigner15
dependencies/Lib/weakref.py
769
11495
"""Weak reference support for Python. This module is an implementation of PEP 205: http://www.python.org/dev/peps/pep-0205/ """ # Naming convention: Variables named "wr" are weak reference objects; # they are called this instead of "ref" to avoid name collisions with # the module-global ref() function imported from _weakref. from _weakref import ( getweakrefcount, getweakrefs, ref, proxy, CallableProxyType, ProxyType, ReferenceType) from _weakrefset import WeakSet, _IterationGuard import collections # Import after _weakref to avoid circular import. ProxyTypes = (ProxyType, CallableProxyType) __all__ = ["ref", "proxy", "getweakrefcount", "getweakrefs", "WeakKeyDictionary", "ReferenceType", "ProxyType", "CallableProxyType", "ProxyTypes", "WeakValueDictionary", "WeakSet"] class WeakValueDictionary(collections.MutableMapping): """Mapping class that references values weakly. Entries in the dictionary will be discarded when no strong reference to the value exists anymore """ # We inherit the constructor without worrying about the input # dictionary; since it uses our .update() method, we get the right # checks (if the other dictionary is a WeakValueDictionary, # objects are unwrapped on the way out, and we always wrap on the # way in). def __init__(self, *args, **kw): def remove(wr, selfref=ref(self)): self = selfref() if self is not None: if self._iterating: self._pending_removals.append(wr.key) else: del self.data[wr.key] self._remove = remove # A list of keys to be removed self._pending_removals = [] self._iterating = set() self.data = d = {} self.update(*args, **kw) def _commit_removals(self): l = self._pending_removals d = self.data # We shouldn't encounter any KeyError, because this method should # always be called *before* mutating the dict. while l: del d[l.pop()] def __getitem__(self, key): o = self.data[key]() if o is None: raise KeyError(key) else: return o def __delitem__(self, key): if self._pending_removals: self._commit_removals() del self.data[key] def __len__(self): return len(self.data) - len(self._pending_removals) def __contains__(self, key): try: o = self.data[key]() except KeyError: return False return o is not None def __repr__(self): return "<WeakValueDictionary at %s>" % id(self) def __setitem__(self, key, value): if self._pending_removals: self._commit_removals() self.data[key] = KeyedRef(value, self._remove, key) def copy(self): new = WeakValueDictionary() for key, wr in self.data.items(): o = wr() if o is not None: new[key] = o return new __copy__ = copy def __deepcopy__(self, memo): from copy import deepcopy new = self.__class__() for key, wr in self.data.items(): o = wr() if o is not None: new[deepcopy(key, memo)] = o return new def get(self, key, default=None): try: wr = self.data[key] except KeyError: return default else: o = wr() if o is None: # This should only happen return default else: return o def items(self): with _IterationGuard(self): for k, wr in self.data.items(): v = wr() if v is not None: yield k, v def keys(self): with _IterationGuard(self): for k, wr in self.data.items(): if wr() is not None: yield k __iter__ = keys def itervaluerefs(self): """Return an iterator that yields the weak references to the values. The references are not guaranteed to be 'live' at the time they are used, so the result of calling the references needs to be checked before being used. This can be used to avoid creating references that will cause the garbage collector to keep the values around longer than needed. """ with _IterationGuard(self): for wr in self.data.values(): yield wr def values(self): with _IterationGuard(self): for wr in self.data.values(): obj = wr() if obj is not None: yield obj def popitem(self): if self._pending_removals: self._commit_removals() while True: key, wr = self.data.popitem() o = wr() if o is not None: return key, o def pop(self, key, *args): if self._pending_removals: self._commit_removals() try: o = self.data.pop(key)() except KeyError: if args: return args[0] raise if o is None: raise KeyError(key) else: return o def setdefault(self, key, default=None): try: wr = self.data[key] except KeyError: if self._pending_removals: self._commit_removals() self.data[key] = KeyedRef(default, self._remove, key) return default else: return wr() def update(self, dict=None, **kwargs): if self._pending_removals: self._commit_removals() d = self.data if dict is not None: if not hasattr(dict, "items"): dict = type({})(dict) for key, o in dict.items(): d[key] = KeyedRef(o, self._remove, key) if len(kwargs): self.update(kwargs) def valuerefs(self): """Return a list of weak references to the values. The references are not guaranteed to be 'live' at the time they are used, so the result of calling the references needs to be checked before being used. This can be used to avoid creating references that will cause the garbage collector to keep the values around longer than needed. """ return list(self.data.values()) class KeyedRef(ref): """Specialized reference that includes a key corresponding to the value. This is used in the WeakValueDictionary to avoid having to create a function object for each key stored in the mapping. A shared callback object can use the 'key' attribute of a KeyedRef instead of getting a reference to the key from an enclosing scope. """ __slots__ = "key", def __new__(type, ob, callback, key): self = ref.__new__(type, ob, callback) self.key = key return self def __init__(self, ob, callback, key): super().__init__(ob, callback) class WeakKeyDictionary(collections.MutableMapping): """ Mapping class that references keys weakly. Entries in the dictionary will be discarded when there is no longer a strong reference to the key. This can be used to associate additional data with an object owned by other parts of an application without adding attributes to those objects. This can be especially useful with objects that override attribute accesses. """ def __init__(self, dict=None): self.data = {} def remove(k, selfref=ref(self)): self = selfref() if self is not None: if self._iterating: self._pending_removals.append(k) else: del self.data[k] self._remove = remove # A list of dead weakrefs (keys to be removed) self._pending_removals = [] self._iterating = set() if dict is not None: self.update(dict) def _commit_removals(self): # NOTE: We don't need to call this method before mutating the dict, # because a dead weakref never compares equal to a live weakref, # even if they happened to refer to equal objects. # However, it means keys may already have been removed. l = self._pending_removals d = self.data while l: try: del d[l.pop()] except KeyError: pass def __delitem__(self, key): del self.data[ref(key)] def __getitem__(self, key): return self.data[ref(key)] def __len__(self): return len(self.data) - len(self._pending_removals) def __repr__(self): return "<WeakKeyDictionary at %s>" % id(self) def __setitem__(self, key, value): self.data[ref(key, self._remove)] = value def copy(self): new = WeakKeyDictionary() for key, value in self.data.items(): o = key() if o is not None: new[o] = value return new __copy__ = copy def __deepcopy__(self, memo): from copy import deepcopy new = self.__class__() for key, value in self.data.items(): o = key() if o is not None: new[o] = deepcopy(value, memo) return new def get(self, key, default=None): return self.data.get(ref(key),default) def __contains__(self, key): try: wr = ref(key) except TypeError: return False return wr in self.data def items(self): with _IterationGuard(self): for wr, value in self.data.items(): key = wr() if key is not None: yield key, value def keys(self): with _IterationGuard(self): for wr in self.data: obj = wr() if obj is not None: yield obj __iter__ = keys def values(self): with _IterationGuard(self): for wr, value in self.data.items(): if wr() is not None: yield value def keyrefs(self): """Return a list of weak references to the keys. The references are not guaranteed to be 'live' at the time they are used, so the result of calling the references needs to be checked before being used. This can be used to avoid creating references that will cause the garbage collector to keep the keys around longer than needed. """ return list(self.data) def popitem(self): while True: key, value = self.data.popitem() o = key() if o is not None: return o, value def pop(self, key, *args): return self.data.pop(ref(key), *args) def setdefault(self, key, default=None): return self.data.setdefault(ref(key, self._remove),default) def update(self, dict=None, **kwargs): d = self.data if dict is not None: if not hasattr(dict, "items"): dict = type({})(dict) for key, value in dict.items(): d[ref(key, self._remove)] = value if len(kwargs): self.update(kwargs)
bsd-3-clause
tectronics/mavrec
smra/smra_portal/rfc3339.py
2
10277
#!/usr/bin/env python # # Copyright (c) 2009, 2010, Henry Precheur <henry@precheur.org> # # Permission to use, copy, modify, and/or distribute this software for any # purpose with or without fee is hereby granted, provided that the above # copyright notice and this permission notice appear in all copies. # # THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH # REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND # FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, # INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM # LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR # OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR # PERFORMANCE OF THIS SOFTWARE. # '''Formats dates according to the :RFC:`3339`. Report bugs & problems on BitBucket_ .. _BitBucket: https://bitbucket.org/henry/clan.cx/issues ''' __author__ = 'Henry Precheur <henry@precheur.org>' __license__ = 'ISCL' __version__ = '5.1' __all__ = ('rfc3339', ) import datetime import time import unittest def _timezone(utc_offset): ''' Return a string representing the timezone offset. >>> _timezone(0) '+00:00' >>> _timezone(3600) '+01:00' >>> _timezone(-28800) '-08:00' >>> _timezone(-1800) '-00:30' ''' # Python's division uses floor(), not round() like in other languages: # -1 / 2 == -1 and not -1 / 2 == 0 # That's why we use abs(utc_offset). hours = abs(utc_offset) // 3600 minutes = abs(utc_offset) % 3600 // 60 return '%c%02d:%02d' % ('-' if utc_offset < 0 else '+', hours, minutes) def _timedelta_to_seconds(timedelta): ''' >>> _timedelta_to_seconds(datetime.timedelta(hours=3)) 10800 >>> _timedelta_to_seconds(datetime.timedelta(hours=3, minutes=15)) 11700 ''' return (timedelta.days * 86400 + timedelta.seconds + timedelta.microseconds // 1000) def _utc_offset(date, use_system_timezone): ''' Return the UTC offset of `date`. If `date` does not have any `tzinfo`, use the timezone informations stored locally on the system. >>> if time.localtime().tm_isdst: ... system_timezone = -time.altzone ... else: ... system_timezone = -time.timezone >>> _utc_offset(datetime.datetime.now(), True) == system_timezone True >>> _utc_offset(datetime.datetime.now(), False) 0 ''' if isinstance(date, datetime.datetime) and date.tzinfo is not None: return _timedelta_to_seconds(date.dst() or date.utcoffset()) elif use_system_timezone: if date.year < 1970: # We use 1972 because 1970 doesn't have a leap day (feb 29) t = time.mktime(date.replace(year=1972).timetuple()) else: t = time.mktime(date.timetuple()) if time.localtime(t).tm_isdst: # pragma: no cover return -time.altzone else: return -time.timezone else: return 0 def _string(d, timezone): return ('%04d-%02d-%02dT%02d:%02d:%02d%s' % (d.year, d.month, d.day, d.hour, d.minute, d.second, timezone)) def rfc3339(date, utc=False, use_system_timezone=True): ''' Return a string formatted according to the :RFC:`3339`. If called with `utc=True`, it normalizes `date` to the UTC date. If `date` does not have any timezone information, uses the local timezone:: >>> d = datetime.datetime(2008, 4, 2, 20) >>> rfc3339(d, utc=True, use_system_timezone=False) '2008-04-02T20:00:00Z' >>> rfc3339(d) # doctest: +ELLIPSIS '2008-04-02T20:00:00...' If called with `user_system_timezone=False` don't use the local timezone if `date` does not have timezone informations and consider the offset to UTC to be zero:: >>> rfc3339(d, use_system_timezone=False) '2008-04-02T20:00:00+00:00' `date` must be a `datetime.datetime`, `datetime.date` or a timestamp as returned by `time.time()`:: >>> rfc3339(0, utc=True, use_system_timezone=False) '1970-01-01T00:00:00Z' >>> rfc3339(datetime.date(2008, 9, 6), utc=True, ... use_system_timezone=False) '2008-09-06T00:00:00Z' >>> rfc3339(datetime.date(2008, 9, 6), ... use_system_timezone=False) '2008-09-06T00:00:00+00:00' >>> rfc3339('foo bar') Traceback (most recent call last): ... TypeError: Expected timestamp or date object. Got <type 'str'>. For dates before January 1st 1970, the timezones will be the ones used in 1970. It might not be accurate, but on most sytem there is no timezone information before 1970. ''' # Try to convert timestamp to datetime try: if use_system_timezone: date = datetime.datetime.fromtimestamp(date) else: date = datetime.datetime.utcfromtimestamp(date) except TypeError: pass if not isinstance(date, datetime.date): raise TypeError('Expected timestamp or date object. Got %r.' % type(date)) if not isinstance(date, datetime.datetime): date = datetime.datetime(*date.timetuple()[:3]) utc_offset = _utc_offset(date, use_system_timezone) if utc: return _string(date + datetime.timedelta(seconds=utc_offset), 'Z') else: return _string(date, _timezone(utc_offset)) class LocalTimeTestCase(unittest.TestCase): ''' Test the use of the timezone saved locally. Since it is hard to test using doctest. ''' def setUp(self): local_utcoffset = _utc_offset(datetime.datetime.now(), True) self.local_utcoffset = datetime.timedelta(seconds=local_utcoffset) self.local_timezone = _timezone(local_utcoffset) def test_datetime(self): d = datetime.datetime.now() self.assertEqual(rfc3339(d), d.strftime('%Y-%m-%dT%H:%M:%S') + self.local_timezone) def test_datetime_timezone(self): class FixedNoDst(datetime.tzinfo): 'A timezone info with fixed offset, not DST' def utcoffset(self, dt): return datetime.timedelta(hours=2, minutes=30) def dst(self, dt): return None fixed_no_dst = FixedNoDst() class Fixed(FixedNoDst): 'A timezone info with DST' def dst(self, dt): return datetime.timedelta(hours=3, minutes=15) fixed = Fixed() d = datetime.datetime.now().replace(tzinfo=fixed_no_dst) timezone = _timezone(_timedelta_to_seconds(fixed_no_dst.\ utcoffset(None))) self.assertEqual(rfc3339(d), d.strftime('%Y-%m-%dT%H:%M:%S') + timezone) d = datetime.datetime.now().replace(tzinfo=fixed) timezone = _timezone(_timedelta_to_seconds(fixed.dst(None))) self.assertEqual(rfc3339(d), d.strftime('%Y-%m-%dT%H:%M:%S') + timezone) def test_datetime_utc(self): d = datetime.datetime.now() d_utc = d + self.local_utcoffset self.assertEqual(rfc3339(d, utc=True), d_utc.strftime('%Y-%m-%dT%H:%M:%SZ')) def test_date(self): d = datetime.date.today() self.assertEqual(rfc3339(d), d.strftime('%Y-%m-%dT%H:%M:%S') + self.local_timezone) def test_date_utc(self): d = datetime.date.today() # Convert `date` to `datetime`, since `date` ignores seconds and hours # in timedeltas: # >>> datetime.date(2008, 9, 7) + datetime.timedelta(hours=23) # datetime.date(2008, 9, 7) d_utc = datetime.datetime(*d.timetuple()[:3]) + self.local_utcoffset self.assertEqual(rfc3339(d, utc=True), d_utc.strftime('%Y-%m-%dT%H:%M:%SZ')) def test_timestamp(self): d = time.time() self.assertEqual(rfc3339(d), datetime.datetime.fromtimestamp(d).\ strftime('%Y-%m-%dT%H:%M:%S') + self.local_timezone) def test_timestamp_utc(self): d = time.time() d_utc = datetime.datetime.utcfromtimestamp(d) + self.local_utcoffset self.assertEqual(rfc3339(d), (d_utc.strftime('%Y-%m-%dT%H:%M:%S') + self.local_timezone)) def test_before_1970(self): d = datetime.date(1885, 01, 04) self.assertEqual(rfc3339(d), '1885-01-04T00:00:00' + self.local_timezone) self.assertEqual(rfc3339(d, utc=True, use_system_timezone=False), '1885-01-04T00:00:00Z') def test_1920(self): d = datetime.date(1920, 02, 29) self.assertEqual(rfc3339(d, utc=False, use_system_timezone=True), '1920-02-29T00:00:00' + self.local_timezone) # If these tests start failing it probably means there was a policy change # for the Pacific time zone. # See http://en.wikipedia.org/wiki/Pacific_Time_Zone. if 'PST' in time.tzname: def testPDTChange(self): '''Test Daylight saving change''' # PDT switch happens at 2AM on March 14, 2010 # 1:59AM PST self.assertEqual(rfc3339(datetime.datetime(2010, 3, 14, 1, 59)), '2010-03-14T01:59:00-08:00') # 3AM PDT self.assertEqual(rfc3339(datetime.datetime(2010, 3, 14, 3, 0)), '2010-03-14T03:00:00-07:00') def testPSTChange(self): '''Test Standard time change''' # PST switch happens at 2AM on November 6, 2010 # 0:59AM PDT self.assertEqual(rfc3339(datetime.datetime(2010, 11, 7, 0, 59)), '2010-11-07T00:59:00-07:00') # 1:00AM PST # There's no way to have 1:00AM PST without a proper tzinfo self.assertEqual(rfc3339(datetime.datetime(2010, 11, 7, 1, 0)), '2010-11-07T01:00:00-07:00') if __name__ == '__main__': # pragma: no cover import doctest doctest.testmod() unittest.main()
bsd-3-clause
simudream/django-rest-framework
rest_framework/authtoken/serializers.py
80
1032
from django.contrib.auth import authenticate from django.utils.translation import ugettext_lazy as _ from rest_framework import exceptions, serializers class AuthTokenSerializer(serializers.Serializer): username = serializers.CharField() password = serializers.CharField(style={'input_type': 'password'}) def validate(self, attrs): username = attrs.get('username') password = attrs.get('password') if username and password: user = authenticate(username=username, password=password) if user: if not user.is_active: msg = _('User account is disabled.') raise exceptions.ValidationError(msg) else: msg = _('Unable to log in with provided credentials.') raise exceptions.ValidationError(msg) else: msg = _('Must include "username" and "password".') raise exceptions.ValidationError(msg) attrs['user'] = user return attrs
bsd-2-clause
zbqf109/goodo
openerp/addons/decimal_precision/decimal_precision.py
47
2671
# -*- encoding: utf-8 -*- # Part of Odoo. See LICENSE file for full copyright and licensing details. import openerp from openerp import SUPERUSER_ID from openerp import tools from openerp.osv import orm, fields from openerp.modules.registry import RegistryManager class decimal_precision(orm.Model): _name = 'decimal.precision' _columns = { 'name': fields.char('Usage', select=True, required=True), 'digits': fields.integer('Digits', required=True), } _defaults = { 'digits': 2, } _sql_constraints = [ ('name_uniq', 'unique (name)', """Only one value can be defined for each given usage!"""), ] @tools.ormcache('application') def precision_get(self, cr, uid, application): cr.execute('select digits from decimal_precision where name=%s', (application,)) res = cr.fetchone() return res[0] if res else 2 def clear_cache(self, cr): """ Deprecated, use `clear_caches` instead. """ self.clear_caches() def create(self, cr, uid, data, context=None): res = super(decimal_precision, self).create(cr, uid, data, context=context) self.clear_caches() return res def unlink(self, cr, uid, ids, context=None): res = super(decimal_precision, self).unlink(cr, uid, ids, context=context) self.clear_caches() return res def write(self, cr, uid, ids, data, *args, **argv): res = super(decimal_precision, self).write(cr, uid, ids, data, *args, **argv) self.clear_caches() return res def get_precision(application): def change_digit(cr): decimal_precision = openerp.registry(cr.dbname)['decimal.precision'] res = decimal_precision.precision_get(cr, SUPERUSER_ID, application) return (16, res) return change_digit class DecimalPrecisionFloat(orm.AbstractModel): """ Override qweb.field.float to add a `decimal_precision` domain option and use that instead of the column's own value if it is specified """ _inherit = 'ir.qweb.field.float' def precision(self, cr, uid, field, options=None, context=None): dp = options and options.get('decimal_precision') if dp: return self.pool['decimal.precision'].precision_get( cr, uid, dp) return super(DecimalPrecisionFloat, self).precision( cr, uid, field, options=options, context=context) class DecimalPrecisionTestModel(orm.Model): _name = 'decimal.precision.test' _columns = { 'float': fields.float(), 'float_2': fields.float(digits=(16, 2)), 'float_4': fields.float(digits=(16, 4)), }
gpl-3.0
ehashman/oh-mainline
mysite/search/migrations/0005_add_original_bug_link.py
17
2325
# This file is part of OpenHatch. # Copyright (C) 2009 OpenHatch, Inc. # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. from south.db import db from django.db import models from mysite.search.models import * class Migration: def forwards(self, orm): "Write your forwards migration here" def backwards(self, orm): "Write your backwards migration here" models = { 'search.project': { 'icon_url': ('models.URLField', [], {'max_length': '200'}), 'id': ('models.AutoField', [], {'primary_key': 'True'}), 'language': ('models.CharField', [], {'max_length': '200'}), 'name': ('models.CharField', [], {'max_length': '200'}) }, 'search.bug': { 'canonical_bug_link': ('models.URLField', [], {'max_length': '200'}), 'date_reported': ('models.DateTimeField', [], {}), 'description': ('models.TextField', [], {}), 'id': ('models.AutoField', [], {'primary_key': 'True'}), 'importance': ('models.CharField', [], {'max_length': '200'}), 'last_polled': ('models.DateTimeField', [], {}), 'last_touched': ('models.DateTimeField', [], {}), 'people_involved': ('models.IntegerField', [], {}), 'project': ('models.ForeignKey', ['Project'], {}), 'status': ('models.CharField', [], {'max_length': '200'}), 'submitter_realname': ('models.CharField', [], {'max_length': '200'}), 'submitter_username': ('models.CharField', [], {'max_length': '200'}), 'title': ('models.CharField', [], {'max_length': '200'}) } } complete_apps = ['search']
agpl-3.0
mshafiq9/django
tests/utils_tests/test_baseconv.py
326
1787
from unittest import TestCase from django.utils.baseconv import ( BaseConverter, base2, base16, base36, base56, base62, base64, ) from django.utils.six.moves import range class TestBaseConv(TestCase): def test_baseconv(self): nums = [-10 ** 10, 10 ** 10] + list(range(-100, 100)) for converter in [base2, base16, base36, base56, base62, base64]: for i in nums: self.assertEqual(i, converter.decode(converter.encode(i))) def test_base11(self): base11 = BaseConverter('0123456789-', sign='$') self.assertEqual(base11.encode(1234), '-22') self.assertEqual(base11.decode('-22'), 1234) self.assertEqual(base11.encode(-1234), '$-22') self.assertEqual(base11.decode('$-22'), -1234) def test_base20(self): base20 = BaseConverter('0123456789abcdefghij') self.assertEqual(base20.encode(1234), '31e') self.assertEqual(base20.decode('31e'), 1234) self.assertEqual(base20.encode(-1234), '-31e') self.assertEqual(base20.decode('-31e'), -1234) def test_base64(self): self.assertEqual(base64.encode(1234), 'JI') self.assertEqual(base64.decode('JI'), 1234) self.assertEqual(base64.encode(-1234), '$JI') self.assertEqual(base64.decode('$JI'), -1234) def test_base7(self): base7 = BaseConverter('cjdhel3', sign='g') self.assertEqual(base7.encode(1234), 'hejd') self.assertEqual(base7.decode('hejd'), 1234) self.assertEqual(base7.encode(-1234), 'ghejd') self.assertEqual(base7.decode('ghejd'), -1234) def test_exception(self): self.assertRaises(ValueError, BaseConverter, 'abc', sign='a') self.assertIsInstance(BaseConverter('abc', sign='d'), BaseConverter)
bsd-3-clause
postla/e2-gui
lib/python/Components/Converter/TemplatedMultiContent.py
80
2879
from Components.Converter.StringList import StringList class TemplatedMultiContent(StringList): """Turns a python tuple list into a multi-content list which can be used in a listbox renderer.""" def __init__(self, args): StringList.__init__(self, args) from enigma import eListboxPythonMultiContent, gFont, RT_HALIGN_LEFT, RT_HALIGN_CENTER, RT_HALIGN_RIGHT, RT_VALIGN_TOP, RT_VALIGN_CENTER, RT_VALIGN_BOTTOM, RT_WRAP from Components.MultiContent import MultiContentEntryText, MultiContentEntryPixmap, MultiContentEntryPixmapAlphaTest, MultiContentEntryPixmapAlphaBlend, MultiContentTemplateColor, MultiContentEntryProgress l = locals() del l["self"] # cleanup locals a bit del l["args"] self.active_style = None self.template = eval(args, {}, l) assert "fonts" in self.template assert "itemHeight" in self.template assert "template" in self.template or "templates" in self.template assert "template" in self.template or "default" in self.template["templates"] # we need to have a default template if not "template" in self.template: # default template can be ["template"] or ["templates"]["default"] self.template["template"] = self.template["templates"]["default"][1] self.template["itemHeight"] = self.template["template"][0] def changed(self, what): if not self.content: from enigma import eListboxPythonMultiContent self.content = eListboxPythonMultiContent() # also setup fonts (also given by source) index = 0 for f in self.template["fonts"]: self.content.setFont(index, f) index += 1 # if only template changed, don't reload list if what[0] == self.CHANGED_SPECIFIC and what[1] == "style": pass elif self.source: self.content.setList(self.source.list) self.setTemplate() self.downstream_elements.changed(what) def setTemplate(self): if self.source: style = self.source.style if style == self.active_style: return # if skin defined "templates", that means that it defines multiple styles in a dict. template should still be a default templates = self.template.get("templates") template = self.template.get("template") itemheight = self.template["itemHeight"] selectionEnabled = self.template.get("selectionEnabled", True) scrollbarMode = self.template.get("scrollbarMode", "showOnDemand") if templates and style and style in templates: # if we have a custom style defined in the source, and different templates in the skin, look it up template = templates[style][1] itemheight = templates[style][0] if len(templates[style]) > 2: selectionEnabled = templates[style][2] if len(templates[style]) > 3: scrollbarMode = templates[style][3] self.content.setTemplate(template) self.content.setItemHeight(itemheight) self.selectionEnabled = selectionEnabled self.scrollbarMode = scrollbarMode self.active_style = style
gpl-2.0
imsparsh/python-for-android
python-modules/twisted/twisted/names/srvconnect.py
52
6386
# -*- test-case-name: twisted.names.test.test_srvconnect -*- # Copyright (c) 2001-2009 Twisted Matrix Laboratories. # See LICENSE for details. import random from zope.interface import implements from twisted.internet import error, interfaces from twisted.names import client, dns from twisted.names.error import DNSNameError from twisted.python.compat import reduce class _SRVConnector_ClientFactoryWrapper: def __init__(self, connector, wrappedFactory): self.__connector = connector self.__wrappedFactory = wrappedFactory def startedConnecting(self, connector): self.__wrappedFactory.startedConnecting(self.__connector) def clientConnectionFailed(self, connector, reason): self.__connector.connectionFailed(reason) def clientConnectionLost(self, connector, reason): self.__connector.connectionLost(reason) def __getattr__(self, key): return getattr(self.__wrappedFactory, key) class SRVConnector: """A connector that looks up DNS SRV records. See RFC2782.""" implements(interfaces.IConnector) stopAfterDNS=0 def __init__(self, reactor, service, domain, factory, protocol='tcp', connectFuncName='connectTCP', connectFuncArgs=(), connectFuncKwArgs={}, ): self.reactor = reactor self.service = service self.domain = domain self.factory = factory self.protocol = protocol self.connectFuncName = connectFuncName self.connectFuncArgs = connectFuncArgs self.connectFuncKwArgs = connectFuncKwArgs self.connector = None self.servers = None self.orderedServers = None # list of servers already used in this round def connect(self): """Start connection to remote server.""" self.factory.doStart() self.factory.startedConnecting(self) if not self.servers: if self.domain is None: self.connectionFailed(error.DNSLookupError("Domain is not defined.")) return d = client.lookupService('_%s._%s.%s' % (self.service, self.protocol, self.domain)) d.addCallbacks(self._cbGotServers, self._ebGotServers) d.addCallback(lambda x, self=self: self._reallyConnect()) d.addErrback(self.connectionFailed) elif self.connector is None: self._reallyConnect() else: self.connector.connect() def _ebGotServers(self, failure): failure.trap(DNSNameError) # Some DNS servers reply with NXDOMAIN when in fact there are # just no SRV records for that domain. Act as if we just got an # empty response and use fallback. self.servers = [] self.orderedServers = [] def _cbGotServers(self, (answers, auth, add)): if len(answers) == 1 and answers[0].type == dns.SRV \ and answers[0].payload \ and answers[0].payload.target == dns.Name('.'): # decidedly not available raise error.DNSLookupError("Service %s not available for domain %s." % (repr(self.service), repr(self.domain))) self.servers = [] self.orderedServers = [] for a in answers: if a.type != dns.SRV or not a.payload: continue self.orderedServers.append((a.payload.priority, a.payload.weight, str(a.payload.target), a.payload.port)) def _serverCmp(self, a, b): if a[0]!=b[0]: return cmp(a[0], b[0]) else: return cmp(a[1], b[1]) def pickServer(self): assert self.servers is not None assert self.orderedServers is not None if not self.servers and not self.orderedServers: # no SRV record, fall back.. return self.domain, self.service if not self.servers and self.orderedServers: # start new round self.servers = self.orderedServers self.orderedServers = [] assert self.servers self.servers.sort(self._serverCmp) minPriority=self.servers[0][0] weightIndex = zip(xrange(len(self.servers)), [x[1] for x in self.servers if x[0]==minPriority]) weightSum = reduce(lambda x, y: (None, x[1]+y[1]), weightIndex, (None, 0))[1] rand = random.randint(0, weightSum) for index, weight in weightIndex: weightSum -= weight if weightSum <= 0: chosen = self.servers[index] del self.servers[index] self.orderedServers.append(chosen) p, w, host, port = chosen return host, port raise RuntimeError, 'Impossible %s pickServer result.' % self.__class__.__name__ def _reallyConnect(self): if self.stopAfterDNS: self.stopAfterDNS=0 return self.host, self.port = self.pickServer() assert self.host is not None, 'Must have a host to connect to.' assert self.port is not None, 'Must have a port to connect to.' connectFunc = getattr(self.reactor, self.connectFuncName) self.connector=connectFunc( self.host, self.port, _SRVConnector_ClientFactoryWrapper(self, self.factory), *self.connectFuncArgs, **self.connectFuncKwArgs) def stopConnecting(self): """Stop attempting to connect.""" if self.connector: self.connector.stopConnecting() else: self.stopAfterDNS=1 def disconnect(self): """Disconnect whatever our are state is.""" if self.connector is not None: self.connector.disconnect() else: self.stopConnecting() def getDestination(self): assert self.connector return self.connector.getDestination() def connectionFailed(self, reason): self.factory.clientConnectionFailed(self, reason) self.factory.doStop() def connectionLost(self, reason): self.factory.clientConnectionLost(self, reason) self.factory.doStop()
apache-2.0
rrampage/rethinkdb
test/rql_test/connections/http_support/werkzeug/contrib/fixers.py
464
9949
# -*- coding: utf-8 -*- """ werkzeug.contrib.fixers ~~~~~~~~~~~~~~~~~~~~~~~ .. versionadded:: 0.5 This module includes various helpers that fix bugs in web servers. They may be necessary for some versions of a buggy web server but not others. We try to stay updated with the status of the bugs as good as possible but you have to make sure whether they fix the problem you encounter. If you notice bugs in webservers not fixed in this module consider contributing a patch. :copyright: Copyright 2009 by the Werkzeug Team, see AUTHORS for more details. :license: BSD, see LICENSE for more details. """ try: from urllib import unquote except ImportError: from urllib.parse import unquote from werkzeug.http import parse_options_header, parse_cache_control_header, \ parse_set_header from werkzeug.useragents import UserAgent from werkzeug.datastructures import Headers, ResponseCacheControl class CGIRootFix(object): """Wrap the application in this middleware if you are using FastCGI or CGI and you have problems with your app root being set to the cgi script's path instead of the path users are going to visit .. versionchanged:: 0.9 Added `app_root` parameter and renamed from `LighttpdCGIRootFix`. :param app: the WSGI application :param app_root: Defaulting to ``'/'``, you can set this to something else if your app is mounted somewhere else. """ def __init__(self, app, app_root='/'): self.app = app self.app_root = app_root def __call__(self, environ, start_response): # only set PATH_INFO for older versions of Lighty or if no # server software is provided. That's because the test was # added in newer Werkzeug versions and we don't want to break # people's code if they are using this fixer in a test that # does not set the SERVER_SOFTWARE key. if 'SERVER_SOFTWARE' not in environ or \ environ['SERVER_SOFTWARE'] < 'lighttpd/1.4.28': environ['PATH_INFO'] = environ.get('SCRIPT_NAME', '') + \ environ.get('PATH_INFO', '') environ['SCRIPT_NAME'] = self.app_root.strip('/') return self.app(environ, start_response) # backwards compatibility LighttpdCGIRootFix = CGIRootFix class PathInfoFromRequestUriFix(object): """On windows environment variables are limited to the system charset which makes it impossible to store the `PATH_INFO` variable in the environment without loss of information on some systems. This is for example a problem for CGI scripts on a Windows Apache. This fixer works by recreating the `PATH_INFO` from `REQUEST_URI`, `REQUEST_URL`, or `UNENCODED_URL` (whatever is available). Thus the fix can only be applied if the webserver supports either of these variables. :param app: the WSGI application """ def __init__(self, app): self.app = app def __call__(self, environ, start_response): for key in 'REQUEST_URL', 'REQUEST_URI', 'UNENCODED_URL': if key not in environ: continue request_uri = unquote(environ[key]) script_name = unquote(environ.get('SCRIPT_NAME', '')) if request_uri.startswith(script_name): environ['PATH_INFO'] = request_uri[len(script_name):] \ .split('?', 1)[0] break return self.app(environ, start_response) class ProxyFix(object): """This middleware can be applied to add HTTP proxy support to an application that was not designed with HTTP proxies in mind. It sets `REMOTE_ADDR`, `HTTP_HOST` from `X-Forwarded` headers. If you have more than one proxy server in front of your app, set `num_proxies` accordingly. Do not use this middleware in non-proxy setups for security reasons. The original values of `REMOTE_ADDR` and `HTTP_HOST` are stored in the WSGI environment as `werkzeug.proxy_fix.orig_remote_addr` and `werkzeug.proxy_fix.orig_http_host`. :param app: the WSGI application :param num_proxies: the number of proxy servers in front of the app. """ def __init__(self, app, num_proxies=1): self.app = app self.num_proxies = num_proxies def get_remote_addr(self, forwarded_for): """Selects the new remote addr from the given list of ips in X-Forwarded-For. By default it picks the one that the `num_proxies` proxy server provides. Before 0.9 it would always pick the first. .. versionadded:: 0.8 """ if len(forwarded_for) >= self.num_proxies: return forwarded_for[-1 * self.num_proxies] def __call__(self, environ, start_response): getter = environ.get forwarded_proto = getter('HTTP_X_FORWARDED_PROTO', '') forwarded_for = getter('HTTP_X_FORWARDED_FOR', '').split(',') forwarded_host = getter('HTTP_X_FORWARDED_HOST', '') environ.update({ 'werkzeug.proxy_fix.orig_wsgi_url_scheme': getter('wsgi.url_scheme'), 'werkzeug.proxy_fix.orig_remote_addr': getter('REMOTE_ADDR'), 'werkzeug.proxy_fix.orig_http_host': getter('HTTP_HOST') }) forwarded_for = [x for x in [x.strip() for x in forwarded_for] if x] remote_addr = self.get_remote_addr(forwarded_for) if remote_addr is not None: environ['REMOTE_ADDR'] = remote_addr if forwarded_host: environ['HTTP_HOST'] = forwarded_host if forwarded_proto: environ['wsgi.url_scheme'] = forwarded_proto return self.app(environ, start_response) class HeaderRewriterFix(object): """This middleware can remove response headers and add others. This is for example useful to remove the `Date` header from responses if you are using a server that adds that header, no matter if it's present or not or to add `X-Powered-By` headers:: app = HeaderRewriterFix(app, remove_headers=['Date'], add_headers=[('X-Powered-By', 'WSGI')]) :param app: the WSGI application :param remove_headers: a sequence of header keys that should be removed. :param add_headers: a sequence of ``(key, value)`` tuples that should be added. """ def __init__(self, app, remove_headers=None, add_headers=None): self.app = app self.remove_headers = set(x.lower() for x in (remove_headers or ())) self.add_headers = list(add_headers or ()) def __call__(self, environ, start_response): def rewriting_start_response(status, headers, exc_info=None): new_headers = [] for key, value in headers: if key.lower() not in self.remove_headers: new_headers.append((key, value)) new_headers += self.add_headers return start_response(status, new_headers, exc_info) return self.app(environ, rewriting_start_response) class InternetExplorerFix(object): """This middleware fixes a couple of bugs with Microsoft Internet Explorer. Currently the following fixes are applied: - removing of `Vary` headers for unsupported mimetypes which causes troubles with caching. Can be disabled by passing ``fix_vary=False`` to the constructor. see: http://support.microsoft.com/kb/824847/en-us - removes offending headers to work around caching bugs in Internet Explorer if `Content-Disposition` is set. Can be disabled by passing ``fix_attach=False`` to the constructor. If it does not detect affected Internet Explorer versions it won't touch the request / response. """ # This code was inspired by Django fixers for the same bugs. The # fix_vary and fix_attach fixers were originally implemented in Django # by Michael Axiak and is available as part of the Django project: # http://code.djangoproject.com/ticket/4148 def __init__(self, app, fix_vary=True, fix_attach=True): self.app = app self.fix_vary = fix_vary self.fix_attach = fix_attach def fix_headers(self, environ, headers, status=None): if self.fix_vary: header = headers.get('content-type', '') mimetype, options = parse_options_header(header) if mimetype not in ('text/html', 'text/plain', 'text/sgml'): headers.pop('vary', None) if self.fix_attach and 'content-disposition' in headers: pragma = parse_set_header(headers.get('pragma', '')) pragma.discard('no-cache') header = pragma.to_header() if not header: headers.pop('pragma', '') else: headers['Pragma'] = header header = headers.get('cache-control', '') if header: cc = parse_cache_control_header(header, cls=ResponseCacheControl) cc.no_cache = None cc.no_store = False header = cc.to_header() if not header: headers.pop('cache-control', '') else: headers['Cache-Control'] = header def run_fixed(self, environ, start_response): def fixing_start_response(status, headers, exc_info=None): headers = Headers(headers) self.fix_headers(environ, headers, status) return start_response(status, headers.to_wsgi_list(), exc_info) return self.app(environ, fixing_start_response) def __call__(self, environ, start_response): ua = UserAgent(environ) if ua.browser != 'msie': return self.app(environ, start_response) return self.run_fixed(environ, start_response)
agpl-3.0
Jenselme/AutobahnPython
examples/twisted/websocket/broadcast/server.py
3
3885
############################################################################### # # The MIT License (MIT) # # Copyright (c) Tavendo GmbH # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN # THE SOFTWARE. # ############################################################################### import sys from twisted.internet import reactor from twisted.python import log from twisted.web.server import Site from twisted.web.static import File from autobahn.twisted.websocket import WebSocketServerFactory, \ WebSocketServerProtocol, \ listenWS class BroadcastServerProtocol(WebSocketServerProtocol): def onOpen(self): self.factory.register(self) def onMessage(self, payload, isBinary): if not isBinary: msg = "{} from {}".format(payload.decode('utf8'), self.peer) self.factory.broadcast(msg) def connectionLost(self, reason): WebSocketServerProtocol.connectionLost(self, reason) self.factory.unregister(self) class BroadcastServerFactory(WebSocketServerFactory): """ Simple broadcast server broadcasting any message it receives to all currently connected clients. """ def __init__(self, url): WebSocketServerFactory.__init__(self, url) self.clients = [] self.tickcount = 0 self.tick() def tick(self): self.tickcount += 1 self.broadcast("tick %d from server" % self.tickcount) reactor.callLater(1, self.tick) def register(self, client): if client not in self.clients: print("registered client {}".format(client.peer)) self.clients.append(client) def unregister(self, client): if client in self.clients: print("unregistered client {}".format(client.peer)) self.clients.remove(client) def broadcast(self, msg): print("broadcasting message '{}' ..".format(msg)) for c in self.clients: c.sendMessage(msg.encode('utf8')) print("message sent to {}".format(c.peer)) class BroadcastPreparedServerFactory(BroadcastServerFactory): """ Functionally same as above, but optimized broadcast using prepareMessage and sendPreparedMessage. """ def broadcast(self, msg): print("broadcasting prepared message '{}' ..".format(msg)) preparedMsg = self.prepareMessage(msg) for c in self.clients: c.sendPreparedMessage(preparedMsg) print("prepared message sent to {}".format(c.peer)) if __name__ == '__main__': log.startLogging(sys.stdout) ServerFactory = BroadcastServerFactory # ServerFactory = BroadcastPreparedServerFactory factory = ServerFactory(u"ws://127.0.0.1:9000") factory.protocol = BroadcastServerProtocol listenWS(factory) webdir = File(".") web = Site(webdir) reactor.listenTCP(8080, web) reactor.run()
mit
acsone/stock-logistics-warehouse
stock_account_quant_merge/tests/test_merge.py
3
4743
# -*- coding: utf-8 -*- # © 2016 Eficent Business and IT Consulting Services S.L. # License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl.html). from openerp.addons.stock.tests.common import TestStockCommon class TestMerge(TestStockCommon): """Test the potential quantity on a product with a multi-line BoM""" def setUp(self): super(TestMerge, self).setUp() loc_supplier_id = self.env.ref('stock.stock_location_suppliers') self.loc_stock = self.env.ref('stock.stock_location_stock') self.loc_scrap = self.env.ref('stock.stock_location_scrapped') self.product = self.env.ref('product.product_product_36') # Zero out the inventory of the product inventory = self.env['stock.inventory'].create( {'name': 'Remove product for test', 'location_id': self.loc_stock.id, 'filter': 'product', 'product_id': self.product.id}) inventory.prepare_inventory() inventory.reset_real_qty() inventory.action_done() self.picking_obj = self.env['stock.picking'] move_obj = self.env['stock.move'] self.picking_type = self.env.ref('stock.picking_type_in') # Change the cost method to 'Real Price' self.product.cost_method = 'real' self.picking_1 = self.picking_obj.create( {'picking_type_id': self.picking_type.id}) move_obj.create({'name': '/', 'picking_id': self.picking_1.id, 'product_uom': self.product.uom_id.id, 'location_id': loc_supplier_id.id, 'location_dest_id': self.loc_stock.id, 'product_id': self.product.id, 'price_unit': 10, 'product_uom_qty': 10}) self.picking_1.action_confirm() self._process_picking(self.picking_1) self.picking_2 = self.picking_obj.create( {'picking_type_id': self.picking_type.id}) move_obj.create({'name': '/', 'picking_id': self.picking_2.id, 'product_uom': self.product.uom_id.id, 'location_id': loc_supplier_id.id, 'location_dest_id': self.loc_stock.id, 'product_id': self.product.id, 'price_unit': 20, 'product_uom_qty': 10}) self.picking_2.action_confirm() self._process_picking(self.picking_2) def _process_picking(self, picking): """ Receive the picking """ wiz_detail_obj = self.env['stock.transfer_details'] wiz_detail = wiz_detail_obj.with_context( active_model='stock.picking', active_ids=[picking.id], active_id=picking.id).create({'picking_id': picking.id}) wiz_detail.item_ids[0].quantity = 10 wiz_detail.do_detailed_transfer() def test_merge(self): quant_obj = self.env['stock.quant'] domain = [('location_id', '=', self.loc_stock.id), ('product_id', '=', self.product.id)] quants = quant_obj.search(domain) self.assertEqual(len(quants), 2, "There should be 2 quants") # Make a reservation to split the quants move_1 = self.env['stock.move'].create( {'name': 'Test move', 'product_id': self.product.id, 'location_id': self.loc_stock.id, 'location_dest_id': self.loc_scrap.id, 'product_uom_qty': 15.0, 'product_uom': self.product.uom_id.id}) move_1.action_confirm() move_1.action_assign() # Make a reservation to split the quants move_2 = self.env['stock.move'].create( {'name': 'Test move', 'product_id': self.product.id, 'location_id': self.loc_stock.id, 'location_dest_id': self.loc_scrap.id, 'product_uom_qty': 3.0, 'product_uom': self.product.uom_id.id}) move_2.action_confirm() move_2.action_assign() quants = quant_obj.search(domain) self.assertEqual(len(quants), 4, "There should be 4 quants") # Cancel the second move : the quants with unit cost 20 should be # merged back together move_2.action_cancel() quants = quant_obj.search(domain) self.assertEqual(len(quants), 3, "There should be 3 quants") # Cancel the first move : the quants with unit cost 20 should be # merged back together move_1.action_cancel() quants = quant_obj.search(domain) self.assertEqual(len(quants), 2, "There should be 2 quants")
agpl-3.0
acemod/ace3-misc
tools/setup.py
2
3202
#!/usr/bin/env python3 ####################### # ACEX Setup Script # ####################### import os import sys import shutil import platform import subprocess import winreg ######## GLOBALS ######### MAINDIR = "z" PROJECTDIR = "acex" ########################## def main(): FULLDIR = "{}\\{}".format(MAINDIR,PROJECTDIR) print(""" ###################################### # ACEX Development Environment Setup # ###################################### This script will create your ACEX dev environment for you. Before you run this, you should already have: - A properly setup ACE3 Development Environment If you have not done those things yet, please abort this script in the next step and do so first. This script will create two hard links on your system, both pointing to your ACEX project folder: [Arma 3 installation directory]\\{} => ACEX project folder P:\\{} => ACEX project folder """.format(FULLDIR,FULLDIR)) print("\n") try: reg = winreg.ConnectRegistry(None, winreg.HKEY_LOCAL_MACHINE) key = winreg.OpenKey(reg, r"SOFTWARE\Wow6432Node\bohemia interactive\arma 3") armapath = winreg.EnumValue(key,1)[1] except: print("Failed to determine Arma 3 Path.") return 1 if not os.path.exists("P:\\z\\ace"): print("No ACE3 Development Environment detected.") return 2 scriptpath = os.path.realpath(__file__) projectpath = os.path.dirname(os.path.dirname(scriptpath)) print("# Detected Paths:") print(" Arma Path: {}".format(armapath)) print(" Project Path: {}".format(projectpath)) repl = input("\nAre these correct? (y/n): ") if repl.lower() != "y": return 3 print("\n# Creating links ...") if os.path.exists("P:\\{}\\{}".format(MAINDIR,PROJECTDIR)): print("Link on P: already exists. Please finish the setup manually.") return 4 if os.path.exists(os.path.join(armapath, MAINDIR, PROJECTDIR)): print("Link in Arma directory already exists. Please finish the setup manually.") return 5 try: if not os.path.exists("P:\\{}".format(MAINDIR)): os.mkdir("P:\\{}".format(MAINDIR)) if not os.path.exists(os.path.join(armapath, MAINDIR)): os.mkdir(os.path.join(armapath, MAINDIR)) subprocess.call(["cmd", "/c", "mklink", "/J", "P:\\{}\\{}".format(MAINDIR,PROJECTDIR), projectpath]) subprocess.call(["cmd", "/c", "mklink", "/J", os.path.join(armapath, MAINDIR, PROJECTDIR), projectpath]) except: raise print("Something went wrong during the link creation. Please finish the setup manually.") return 6 print("# Links created successfully.") return 0 if __name__ == "__main__": exitcode = main() if exitcode > 0: print("\nSomething went wrong during the setup. Make sure you run this script as administrator. If these issues persist, please follow the instructions on the ACE3 wiki to perform the setup manually.") else: print("\nSetup successfully completed.") input("\nPress enter to exit ...") sys.exit(exitcode)
gpl-2.0
ruleant/weblate
manage.py
2
1084
#!/usr/bin/env python # -*- coding: utf-8 -*- # # Copyright © 2012 - 2014 Michal Čihař <michal@cihar.com> # # This file is part of Weblate <http://weblate.org/> # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # import os import sys if __name__ == "__main__": os.environ.setdefault("DJANGO_SETTINGS_MODULE", "weblate.settings") os.environ['DJANGO_IS_MANAGEMENT_COMMAND'] = '1' from django.core.management import execute_from_command_line execute_from_command_line(sys.argv)
gpl-3.0
miniconfig/home-assistant
homeassistant/components/device_tracker/bt_home_hub_5.py
13
4161
""" Support for BT Home Hub 5. For more details about this platform, please refer to the documentation at https://home-assistant.io/components/device_tracker.bt_home_hub_5/ """ import logging import re import threading from datetime import timedelta import xml.etree.ElementTree as ET import json from urllib.parse import unquote import requests import voluptuous as vol import homeassistant.helpers.config_validation as cv from homeassistant.components.device_tracker import ( DOMAIN, PLATFORM_SCHEMA, DeviceScanner) from homeassistant.const import CONF_HOST from homeassistant.util import Throttle # Return cached results if last scan was less then this time ago. MIN_TIME_BETWEEN_SCANS = timedelta(seconds=10) _LOGGER = logging.getLogger(__name__) _MAC_REGEX = re.compile(r'(([0-9A-Fa-f]{1,2}\:){5}[0-9A-Fa-f]{1,2})') PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({ vol.Required(CONF_HOST): cv.string }) # pylint: disable=unused-argument def get_scanner(hass, config): """Return a BT Home Hub 5 scanner if successful.""" scanner = BTHomeHub5DeviceScanner(config[DOMAIN]) return scanner if scanner.success_init else None class BTHomeHub5DeviceScanner(DeviceScanner): """This class queries a BT Home Hub 5.""" def __init__(self, config): """Initialise the scanner.""" _LOGGER.info('Initialising BT Home Hub 5') self.host = config.get(CONF_HOST, '192.168.1.254') self.lock = threading.Lock() self.last_results = {} self.url = 'http://{}/nonAuth/home_status.xml'.format(self.host) # Test the router is accessible data = _get_homehub_data(self.url) self.success_init = data is not None def scan_devices(self): """Scan for new devices and return a list with found device IDs.""" self._update_info() return (device for device in self.last_results) def get_device_name(self, device): """Return the name of the given device or None if we don't know.""" with self.lock: # If not initialised and not already scanned and not found. if device not in self.last_results: self._update_info() if not self.last_results: return None return self.last_results.get(device) @Throttle(MIN_TIME_BETWEEN_SCANS) def _update_info(self): """Ensure the information from the BT Home Hub 5 is up to date. Return boolean if scanning successful. """ if not self.success_init: return False with self.lock: _LOGGER.info('Scanning') data = _get_homehub_data(self.url) if not data: _LOGGER.warning('Error scanning devices') return False self.last_results = data return True def _get_homehub_data(url): """Retrieve data from BT Home Hub 5 and return parsed result.""" try: response = requests.get(url, timeout=5) except requests.exceptions.Timeout: _LOGGER.exception("Connection to the router timed out") return if response.status_code == 200: return _parse_homehub_response(response.text) else: _LOGGER.error("Invalid response from Home Hub: %s", response) def _parse_homehub_response(data_str): """Parse the BT Home Hub 5 data format.""" root = ET.fromstring(data_str) dirty_json = root.find('known_device_list').get('value') # Normalise the JavaScript data to JSON. clean_json = unquote(dirty_json.replace('\'', '\"') .replace('{', '{\"') .replace(':\"', '\":\"') .replace('\",', '\",\"')) known_devices = [x for x in json.loads(clean_json) if x] devices = {} for device in known_devices: name = device.get('name') mac = device.get('mac') if _MAC_REGEX.match(mac) or ',' in mac: for mac_addr in mac.split(','): if _MAC_REGEX.match(mac_addr): devices[mac_addr] = name else: devices[mac] = name return devices
mit
dankcoin/dankcoin
qa/rpc-tests/bipdersig.py
1
3247
#!/usr/bin/env python3 # Copyright (c) 2014-2016 The Bitcoin Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. # # Test the BIP66 changeover logic # from test_framework.test_framework import DankcoinTestFramework from test_framework.util import * class BIP66Test(DankcoinTestFramework): def __init__(self): super().__init__() self.num_nodes = 3 self.setup_clean_chain = False def setup_network(self): self.nodes = [] self.nodes.append(start_node(0, self.options.tmpdir, [])) self.nodes.append(start_node(1, self.options.tmpdir, ["-blockversion=2"])) self.nodes.append(start_node(2, self.options.tmpdir, ["-blockversion=3"])) connect_nodes(self.nodes[1], 0) connect_nodes(self.nodes[2], 0) self.is_network_split = False self.sync_all() def run_test(self): cnt = self.nodes[0].getblockcount() # Mine some old-version blocks self.nodes[1].generate(100) self.sync_all() if (self.nodes[0].getblockcount() != cnt + 100): raise AssertionError("Failed to mine 100 version=2 blocks") # Mine 750 new-version blocks for i in range(15): self.nodes[2].generate(50) self.sync_all() if (self.nodes[0].getblockcount() != cnt + 850): raise AssertionError("Failed to mine 750 version=3 blocks") # TODO: check that new DERSIG rules are not enforced # Mine 1 new-version block self.nodes[2].generate(1) self.sync_all() if (self.nodes[0].getblockcount() != cnt + 851): raise AssertionError("Failed to mine a version=3 blocks") # TODO: check that new DERSIG rules are enforced # Mine 198 new-version blocks for i in range(2): self.nodes[2].generate(99) self.sync_all() if (self.nodes[0].getblockcount() != cnt + 1049): raise AssertionError("Failed to mine 198 version=3 blocks") # Mine 1 old-version block self.nodes[1].generate(1) self.sync_all() if (self.nodes[0].getblockcount() != cnt + 1050): raise AssertionError("Failed to mine a version=2 block after 949 version=3 blocks") # Mine 1 new-version blocks self.nodes[2].generate(1) self.sync_all() if (self.nodes[0].getblockcount() != cnt + 1051): raise AssertionError("Failed to mine a version=3 block") # Mine 1 old-version blocks try: self.nodes[1].generate(1) raise AssertionError("Succeeded to mine a version=2 block after 950 version=3 blocks") except JSONRPCException: pass self.sync_all() if (self.nodes[0].getblockcount() != cnt + 1051): raise AssertionError("Accepted a version=2 block after 950 version=3 blocks") # Mine 1 new-version blocks self.nodes[2].generate(1) self.sync_all() if (self.nodes[0].getblockcount() != cnt + 1052): raise AssertionError("Failed to mine a version=3 block") if __name__ == '__main__': BIP66Test().main()
mit
jeeftor/alfredToday
src/lib/ntlm3/U32.py
4
3800
# This file is part of 'NTLM Authorization Proxy Server' http://sourceforge.net/projects/ntlmaps/ # Copyright 2001 Dmitry A. Rozmanov <dima@xenon.spb.ru> # # This library is free software: you can redistribute it and/or # modify it under the terms of the GNU Lesser General Public # License as published by the Free Software Foundation, either # version 3 of the License, or (at your option) any later version. # This library is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # Lesser General Public License for more details. # You should have received a copy of the GNU Lesser General Public # License along with this library. If not, see <http://www.gnu.org/licenses/> or <http://www.gnu.org/licenses/lgpl.txt>. from __future__ import division import six C = 0x1000000000 def norm(n): return n & 0xFFFFFFFF class U32: v = 0 def __init__(self, value=0): if not isinstance(value, six.integer_types): value = six.byte2int(value) self.v = C + norm(abs(int(value))) def set(self, value=0): self.v = C + norm(abs(int(value))) def __repr__(self): return hex(norm(self.v)) def __long__(self): return int(norm(self.v)) def __int__(self): return int(norm(self.v)) def __chr__(self): return chr(norm(self.v)) def __add__(self, b): r = U32() r.v = C + norm(self.v + b.v) return r def __sub__(self, b): r = U32() if self.v < b.v: r.v = C + norm(0x100000000 - (b.v - self.v)) else: r.v = C + norm(self.v - b.v) return r def __mul__(self, b): r = U32() r.v = C + norm(self.v * b.v) return r def __div__(self, b): r = U32() r.v = C + (norm(self.v) // norm(b.v)) return r def __truediv__(self, b): r = U32() r.v = C + (norm(self.v) / norm(b.v)) return r def __mod__(self, b): r = U32() r.v = C + (norm(self.v) % norm(b.v)) return r def __neg__(self): return U32(self.v) def __pos__(self): return U32(self.v) def __abs__(self): return U32(self.v) def __invert__(self): r = U32() r.v = C + norm(~self.v) return r def __lshift__(self, b): r = U32() r.v = C + norm(self.v << b) return r def __rshift__(self, b): r = U32() r.v = C + (norm(self.v) >> b) return r def __and__(self, b): r = U32() r.v = C + norm(self.v & b.v) return r def __or__(self, b): r = U32() r.v = C + norm(self.v | b.v) return r def __xor__(self, b): r = U32() r.v = C + norm(self.v ^ b.v) return r def __not__(self): return U32(not norm(self.v)) def truth(self): return norm(self.v) def __cmp__(self, b): if norm(self.v) > norm(b.v): return 1 elif norm(self.v) < norm(b.v): return -1 else: return 0 def __lt__(self, other): return self.v < other.v def __gt__(self, other): return self.v > other.v def __eq__(self, other): return self.v == other.v def __le__(self, other): return self.v <= other.v def __ge__(self, other): return self.v >= other.v def __ne__(self, other): return self.v != other.v def __nonzero__(self): return norm(self.v)
mit
tareqalayan/ansible
lib/ansible/modules/inventory/add_host.py
66
2701
# -*- mode: python -*- # # Copyright: Ansible Team # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) from __future__ import absolute_import, division, print_function __metaclass__ = type ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ['stableinterface'], 'supported_by': 'core'} DOCUMENTATION = r''' --- module: add_host short_description: add a host (and alternatively a group) to the ansible-playbook in-memory inventory description: - Use variables to create new hosts and groups in inventory for use in later plays of the same playbook. Takes variables so you can define the new hosts more fully. - This module is also supported for Windows targets. version_added: "0.9" options: name: aliases: [ 'hostname', 'host' ] description: - The hostname/ip of the host to add to the inventory, can include a colon and a port number. required: true groups: aliases: [ 'groupname', 'group' ] description: - The groups to add the hostname to, comma separated. required: false notes: - This module bypasses the play host loop and only runs once for all the hosts in the play, if you need it to iterate use a with\_ directive. - Windows targets are supported by this module. - The alias 'host' of the parameter 'name' is only available on >=2.4 - Since Ansible version 2.4, the ``inventory_dir`` variable is now set to ``None`` instead of the 'global inventory source', because you can now have multiple sources. An example was added that shows how to partially restore the previous behaviour. author: - "Ansible Core Team" - "Seth Vidal" ''' EXAMPLES = ''' - name: add host to group 'just_created' with variable foo=42 add_host: name: "{{ ip_from_ec2 }}" groups: just_created foo: 42 - name: add host to multiple groups add_host: hostname: "{{ new_ip }}" groups: - group1 - group2 - name: add a host with a non-standard port local to your machines add_host: name: "{{ new_ip }}:{{ new_port }}" - name: add a host alias that we reach through a tunnel (Ansible <= 1.9) add_host: hostname: "{{ new_ip }}" ansible_ssh_host: "{{ inventory_hostname }}" ansible_ssh_port: "{{ new_port }}" - name: add a host alias that we reach through a tunnel (Ansible >= 2.0) add_host: hostname: "{{ new_ip }}" ansible_host: "{{ inventory_hostname }}" ansible_port: "{{ new_port }}" - name: Ensure inventory vars are set to the same value as the inventory_hostname has (close to pre 2.4 behaviour) add_host: hostname: charlie inventory_dir: "{{inventory_dir}}" '''
gpl-3.0
pshen/ansible
lib/ansible/modules/packaging/language/maven_artifact.py
63
16002
#!/usr/bin/python # -*- coding: utf-8 -*- # Copyright (c) 2014, Chris Schmidt <chris.schmidt () contrastsecurity.com> # # Built using https://github.com/hamnis/useful-scripts/blob/master/python/download-maven-artifact # as a reference and starting point. # # This module is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This software is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this software. If not, see <http://www.gnu.org/licenses/>. ANSIBLE_METADATA = {'metadata_version': '1.0', 'status': ['preview'], 'supported_by': 'community'} DOCUMENTATION = ''' --- module: maven_artifact short_description: Downloads an Artifact from a Maven Repository version_added: "2.0" description: - Downloads an artifact from a maven repository given the maven coordinates provided to the module. Can retrieve - snapshots or release versions of the artifact and will resolve the latest available version if one is not - available. author: "Chris Schmidt (@chrisisbeef)" requirements: - "python >= 2.6" - lxml - boto if using a S3 repository (s3://...) options: group_id: description: - The Maven groupId coordinate required: true artifact_id: description: - The maven artifactId coordinate required: true version: description: - The maven version coordinate required: false default: latest classifier: description: - The maven classifier coordinate required: false default: null extension: description: - The maven type/extension coordinate required: false default: jar repository_url: description: - The URL of the Maven Repository to download from. - Use s3://... if the repository is hosted on Amazon S3, added in version 2.2. required: false default: http://repo1.maven.org/maven2 username: description: - The username to authenticate as to the Maven Repository. Use AWS secret key of the repository is hosted on S3 required: false default: null aliases: [ "aws_secret_key" ] password: description: - The password to authenticate with to the Maven Repository. Use AWS secret access key of the repository is hosted on S3 required: false default: null aliases: [ "aws_secret_access_key" ] dest: description: - The path where the artifact should be written to required: true default: false state: description: - The desired state of the artifact required: true default: present choices: [present,absent] timeout: description: - Specifies a timeout in seconds for the connection attempt required: false default: 10 version_added: "2.3" validate_certs: description: - If C(no), SSL certificates will not be validated. This should only be set to C(no) when no other option exists. required: false default: 'yes' choices: ['yes', 'no'] version_added: "1.9.3" ''' EXAMPLES = ''' # Download the latest version of the JUnit framework artifact from Maven Central - maven_artifact: group_id: junit artifact_id: junit dest: /tmp/junit-latest.jar # Download JUnit 4.11 from Maven Central - maven_artifact: group_id: junit artifact_id: junit version: 4.11 dest: /tmp/junit-4.11.jar # Download an artifact from a private repository requiring authentication - maven_artifact: group_id: com.company artifact_id: library-name repository_url: 'https://repo.company.com/maven' username: user password: pass dest: /tmp/library-name-latest.jar # Download a WAR File to the Tomcat webapps directory to be deployed - maven_artifact: group_id: com.company artifact_id: web-app extension: war repository_url: 'https://repo.company.com/maven' dest: /var/lib/tomcat7/webapps/web-app.war ''' from lxml import etree import os import hashlib import sys import posixpath import urlparse from ansible.module_utils.basic import * from ansible.module_utils.urls import * try: import boto3 HAS_BOTO = True except ImportError: HAS_BOTO = False class Artifact(object): def __init__(self, group_id, artifact_id, version, classifier=None, extension='jar'): if not group_id: raise ValueError("group_id must be set") if not artifact_id: raise ValueError("artifact_id must be set") self.group_id = group_id self.artifact_id = artifact_id self.version = version self.classifier = classifier if not extension: self.extension = "jar" else: self.extension = extension def is_snapshot(self): return self.version and self.version.endswith("SNAPSHOT") def path(self, with_version=True): base = posixpath.join(self.group_id.replace(".", "/"), self.artifact_id) if with_version and self.version: return posixpath.join(base, self.version) else: return base def _generate_filename(self): if not self.classifier: return self.artifact_id + "." + self.extension else: return self.artifact_id + "-" + self.classifier + "." + self.extension def get_filename(self, filename=None): if not filename: filename = self._generate_filename() elif os.path.isdir(filename): filename = os.path.join(filename, self._generate_filename()) return filename def __str__(self): if self.classifier: return "%s:%s:%s:%s:%s" % (self.group_id, self.artifact_id, self.extension, self.classifier, self.version) elif self.extension != "jar": return "%s:%s:%s:%s" % (self.group_id, self.artifact_id, self.extension, self.version) else: return "%s:%s:%s" % (self.group_id, self.artifact_id, self.version) @staticmethod def parse(input): parts = input.split(":") if len(parts) >= 3: g = parts[0] a = parts[1] v = parts[len(parts) - 1] t = None c = None if len(parts) == 4: t = parts[2] if len(parts) == 5: t = parts[2] c = parts[3] return Artifact(g, a, v, c, t) else: return None class MavenDownloader: def __init__(self, module, base="http://repo1.maven.org/maven2"): self.module = module if base.endswith("/"): base = base.rstrip("/") self.base = base self.user_agent = "Maven Artifact Downloader/1.0" def _find_latest_version_available(self, artifact): path = "/%s/maven-metadata.xml" % (artifact.path(False)) xml = self._request(self.base + path, "Failed to download maven-metadata.xml", lambda r: etree.parse(r)) v = xml.xpath("/metadata/versioning/versions/version[last()]/text()") if v: return v[0] def find_uri_for_artifact(self, artifact): if artifact.version == "latest": artifact.version = self._find_latest_version_available(artifact) if artifact.is_snapshot(): path = "/%s/maven-metadata.xml" % (artifact.path()) xml = self._request(self.base + path, "Failed to download maven-metadata.xml", lambda r: etree.parse(r)) timestamp = xml.xpath("/metadata/versioning/snapshot/timestamp/text()")[0] buildNumber = xml.xpath("/metadata/versioning/snapshot/buildNumber/text()")[0] for snapshotArtifact in xml.xpath("/metadata/versioning/snapshotVersions/snapshotVersion"): if (len(snapshotArtifact.xpath("classifier/text()")) > 0 and snapshotArtifact.xpath("classifier/text()")[0] == artifact.classifier and len(snapshotArtifact.xpath("extension/text()")) > 0 and snapshotArtifact.xpath("extension/text()")[0] == artifact.extension): return self._uri_for_artifact(artifact, snapshotArtifact.xpath("value/text()")[0]) return self._uri_for_artifact(artifact, artifact.version.replace("SNAPSHOT", timestamp + "-" + buildNumber)) return self._uri_for_artifact(artifact, artifact.version) def _uri_for_artifact(self, artifact, version=None): if artifact.is_snapshot() and not version: raise ValueError("Expected uniqueversion for snapshot artifact " + str(artifact)) elif not artifact.is_snapshot(): version = artifact.version if artifact.classifier: return posixpath.join(self.base, artifact.path(), artifact.artifact_id + "-" + version + "-" + artifact.classifier + "." + artifact.extension) return posixpath.join(self.base, artifact.path(), artifact.artifact_id + "-" + version + "." + artifact.extension) def _request(self, url, failmsg, f): url_to_use = url parsed_url = urlparse(url) if parsed_url.scheme=='s3': parsed_url = urlparse(url) bucket_name = parsed_url.netloc key_name = parsed_url.path[1:] client = boto3.client('s3',aws_access_key_id=self.module.params.get('username', ''), aws_secret_access_key=self.module.params.get('password', '')) url_to_use = client.generate_presigned_url('get_object',Params={'Bucket':bucket_name,'Key':key_name},ExpiresIn=10) req_timeout = self.module.params.get('timeout') # Hack to add parameters in the way that fetch_url expects self.module.params['url_username'] = self.module.params.get('username', '') self.module.params['url_password'] = self.module.params.get('password', '') self.module.params['http_agent'] = self.module.params.get('user_agent', None) response, info = fetch_url(self.module, url_to_use, timeout=req_timeout) if info['status'] != 200: raise ValueError(failmsg + " because of " + info['msg'] + "for URL " + url_to_use) else: return f(response) def download(self, artifact, filename=None): filename = artifact.get_filename(filename) if not artifact.version or artifact.version == "latest": artifact = Artifact(artifact.group_id, artifact.artifact_id, self._find_latest_version_available(artifact), artifact.classifier, artifact.extension) url = self.find_uri_for_artifact(artifact) if not self.verify_md5(filename, url + ".md5"): response = self._request(url, "Failed to download artifact " + str(artifact), lambda r: r) if response: f = open(filename, 'w') # f.write(response.read()) self._write_chunks(response, f, report_hook=self.chunk_report) f.close() return True else: return False else: return True def chunk_report(self, bytes_so_far, chunk_size, total_size): percent = float(bytes_so_far) / total_size percent = round(percent * 100, 2) sys.stdout.write("Downloaded %d of %d bytes (%0.2f%%)\r" % (bytes_so_far, total_size, percent)) if bytes_so_far >= total_size: sys.stdout.write('\n') def _write_chunks(self, response, file, chunk_size=8192, report_hook=None): total_size = response.info().getheader('Content-Length').strip() total_size = int(total_size) bytes_so_far = 0 while 1: chunk = response.read(chunk_size) bytes_so_far += len(chunk) if not chunk: break file.write(chunk) if report_hook: report_hook(bytes_so_far, chunk_size, total_size) return bytes_so_far def verify_md5(self, file, remote_md5): if not os.path.exists(file): return False else: local_md5 = self._local_md5(file) remote = self._request(remote_md5, "Failed to download MD5", lambda r: r.read()) return local_md5 == remote def _local_md5(self, file): md5 = hashlib.md5() f = open(file, 'rb') for chunk in iter(lambda: f.read(8192), ''): md5.update(chunk) f.close() return md5.hexdigest() def main(): module = AnsibleModule( argument_spec = dict( group_id = dict(default=None), artifact_id = dict(default=None), version = dict(default="latest"), classifier = dict(default=None), extension = dict(default='jar'), repository_url = dict(default=None), username = dict(default=None,aliases=['aws_secret_key']), password = dict(default=None, no_log=True,aliases=['aws_secret_access_key']), state = dict(default="present", choices=["present","absent"]), # TODO - Implement a "latest" state timeout = dict(default=10, type='int'), dest = dict(type="path", default=None), validate_certs = dict(required=False, default=True, type='bool'), ) ) repository_url = module.params["repository_url"] if not repository_url: repository_url = "http://repo1.maven.org/maven2" try: parsed_url = urlparse(repository_url) except AttributeError as e: module.fail_json(msg='url parsing went wrong %s' % e) if parsed_url.scheme=='s3' and not HAS_BOTO: module.fail_json(msg='boto3 required for this module, when using s3:// repository URLs') group_id = module.params["group_id"] artifact_id = module.params["artifact_id"] version = module.params["version"] classifier = module.params["classifier"] extension = module.params["extension"] repository_username = module.params["username"] repository_password = module.params["password"] state = module.params["state"] dest = module.params["dest"] #downloader = MavenDownloader(module, repository_url, repository_username, repository_password) downloader = MavenDownloader(module, repository_url) try: artifact = Artifact(group_id, artifact_id, version, classifier, extension) except ValueError as e: module.fail_json(msg=e.args[0]) prev_state = "absent" if os.path.isdir(dest): dest = posixpath.join(dest, artifact_id + "-" + version + "." + extension) if os.path.lexists(dest) and downloader.verify_md5(dest, downloader.find_uri_for_artifact(artifact) + '.md5'): prev_state = "present" else: path = os.path.dirname(dest) if not os.path.exists(path): os.makedirs(path) if prev_state == "present": module.exit_json(dest=dest, state=state, changed=False) try: if downloader.download(artifact, dest): module.exit_json(state=state, dest=dest, group_id=group_id, artifact_id=artifact_id, version=version, classifier=classifier, extension=extension, repository_url=repository_url, changed=True) else: module.fail_json(msg="Unable to download the artifact") except ValueError as e: module.fail_json(msg=e.args[0]) if __name__ == '__main__': main()
gpl-3.0
qedsoftware/commcare-hq
custom/apps/crs_reports/fields.py
2
1360
from corehq.apps.reports.dont_use.fields import ReportSelectField from corehq.apps.reports.filters.users import SelectCaseOwnerFilter from django.utils.translation import ugettext_noop from django.utils.translation import ugettext as _ from corehq.apps.es import CaseES def _get_blocks(domain): query = (CaseES('report_cases') .domain(domain) .case_type(['pregnant_mother', 'baby']) .size(0) .terms_aggregation('block.#value', 'block')) return query.run().aggregations.block.keys class SelectBlockField(ReportSelectField): slug = "block" name = ugettext_noop("Name of the Block") cssId = "opened_closed" cssClasses = "span3" def update_params(self): blocks = _get_blocks(self.domain) block = self.request.GET.get(self.slug, '') self.selected = block self.options = [dict(val=block_item, text="%s" % block_item) for block_item in blocks] self.default_option = _("Select Block") class SelectSubCenterField(ReportSelectField): slug = "sub_center" name = ugettext_noop("Sub Center") cssId = "opened_closed" cssClasses = "span3" default_option = "Select Sub Center" options = [] class SelectASHAField(SelectCaseOwnerFilter): name = ugettext_noop("ASHA") default_option = ugettext_noop("Type ASHA name")
bsd-3-clause
shssoichiro/servo
tests/wpt/css-tests/css-text-decor-3_dev/xhtml1print/support/generate-text-emphasis-style-property-tests.py
841
3434
#!/usr/bin/env python # - * - coding: UTF-8 - * - """ This script generates tests text-emphasis-style-property-011 ~ 020 which cover all possible values of text-emphasis-style property, except none and <string>, with horizontal writing mode. It outputs a list of all tests it generated in the format of Mozilla reftest.list to the stdout. """ from __future__ import unicode_literals TEST_FILE = 'text-emphasis-style-property-{:03}{}.html' TEST_TEMPLATE = '''<!DOCTYPE html> <meta charset="utf-8"> <title>CSS Test: text-emphasis-style: {title}</title> <link rel="author" title="Xidorn Quan" href="https://www.upsuper.org"> <link rel="author" title="Mozilla" href="https://www.mozilla.org"> <link rel="help" href="https://drafts.csswg.org/css-text-decor-3/#text-emphasis-style-property"> <meta name="assert" content="'text-emphasis-style: {value}' produces {code} as emphasis marks."> <link rel="match" href="text-emphasis-style-property-{index:03}-ref.html"> <p>Pass if there is a '{char}' above every character below:</p> <div style="line-height: 5; text-emphasis-style: {value}">試験テスト</div> ''' REF_FILE = 'text-emphasis-style-property-{:03}-ref.html' REF_TEMPLATE = '''<!DOCTYPE html> <meta charset="utf-8"> <title>CSS Reference: text-emphasis-style: {0}</title> <link rel="author" title="Xidorn Quan" href="https://www.upsuper.org"> <link rel="author" title="Mozilla" href="https://www.mozilla.org"> <style> rt {{ font-variant-east-asian: inherit; }} </style> <p>Pass if there is a '{1}' above every character below:</p> <div style="line-height: 5;"><ruby>試<rt>{1}</rt>験<rt>{1}</rt>テ<rt>{1}</rt>ス<rt>{1}</rt>ト<rt>{1}</rt></ruby></div> ''' DATA_SET = [ ('dot', 0x2022, 0x25e6), ('circle', 0x25cf, 0x25cb), ('double-circle', 0x25c9, 0x25ce), ('triangle', 0x25b2, 0x25b3), ('sesame', 0xfe45, 0xfe46), ] SUFFIXES = ['', 'a', 'b', 'c', 'd', 'e'] def get_html_entity(code): return '&#x{:04X};'.format(code) def write_file(filename, content): with open(filename, 'wb') as f: f.write(content.encode('UTF-8')) def write_test_file(idx, suffix, style, code, name=None): if not name: name = style filename = TEST_FILE.format(idx, suffix) write_file(filename, TEST_TEMPLATE.format(index=idx, value=style, char=get_html_entity(code), code='U+{:04X}'.format(code), title=name)) print("== {} {}".format(filename, REF_FILE.format(idx))) idx = 10 def write_files(style, code): global idx idx += 1 fill, shape = style basic_style = "{} {}".format(fill, shape) write_file(REF_FILE.format(idx), REF_TEMPLATE.format(basic_style, get_html_entity(code))) suffix = iter(SUFFIXES) write_test_file(idx, next(suffix), basic_style, code) write_test_file(idx, next(suffix), "{} {}".format(shape, fill), code) if fill == 'filled': write_test_file(idx, next(suffix), shape, code) if shape == 'circle': write_test_file(idx, next(suffix), fill, code, fill + ', horizontal') print("# START tests from {}".format(__file__)) for name, code, _ in DATA_SET: write_files(('filled', name), code) for name, _, code in DATA_SET: write_files(('open', name), code) print("# END tests from {}".format(__file__))
mpl-2.0
sayanchowdhury/fedmsg_meta_fedora_infrastructure
fedmsg_meta_fedora_infrastructure/autocloud.py
1
2497
# This file is part of fedmsg. # Copyright (C) 2015 Sayan Chowdhury. # # fedmsg is free software; you can redistribute it and/or # modify it under the terms of the GNU Lesser General Public # License as published by the Free Software Foundation; either # version 2.1 of the License, or (at your option) any later version. # # fedmsg is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public # License along with fedmsg; if not, write to the Free Software # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA # # Authors: Sayan Chowdhury <sayanchowdhury@fedoraproject.org> from fedmsg_meta_fedora_infrastructure import BaseProcessor class AutoCloudProcessor(BaseProcessor): __name__ = "autocloud" __description__ = "Automated Fedora Cloud Image Testing service" __link__ = "https://github.com/kushaldas/autocloud" __docs__ = "https://github.com/kushaldas/autocloud" __icon__ = "https://apps.fedoraproject.org/img/icons/fedimg.png" __obj__ = "Cloud Image Test" def subtitle(self, msg, **config): image_name = msg['msg']['image_name'] status = msg['msg']['status'] if 'autocloud.image' in msg['topic']: if status == "queued": tmpl = self._("{image_name} is {status} for testing") if status == "running": tmpl = self._("The tests for the {image_name} has " "started {status}") if status == "aborted": tmpl = self._("The tests for the {image_name} has " "been {status}") if status == "failed": tmpl = self._("The tests for the {image_name} {status}") if status == "success": tmpl = self._("The tests for {image_name} was {status}") return tmpl.format(image_name=image_name, status=status) def link(self, msg, **config): return self.__link__ def secondary_icon(self, msg, **config): return self.__icon__ def link(self, msg, **config): image_url = msg['msg']['image_url'] return image_url def objects(self, msg, **config): status = msg['msg']['status'] return set(['autocloud/image/' + status])
lgpl-2.1
chenxulong/quanteco
quantecon/quad.py
7
30082
""" Filename: quad.py Authors: Chase Coleman, Spencer Lyon Date: 2014-07-01 Defining various quadrature routines. Based on the quadrature routines found in the CompEcon toolbox by Miranda and Fackler. References ---------- Miranda, Mario J, and Paul L Fackler. Applied Computational Economics and Finance, MIT Press, 2002. """ from __future__ import division import math import numpy as np import scipy.linalg as la from scipy.special import gammaln import sympy as sym from .ce_util import ckron, gridmake __all__ = ['qnwcheb', 'qnwequi', 'qnwlege', 'qnwnorm', 'qnwlogn', 'qnwsimp', 'qnwtrap', 'qnwunif', 'quadrect', 'qnwbeta', 'qnwgamma'] # ------------------ # # Exported Functions # # ------------------ # def qnwcheb(n, a=1, b=1): """ Computes multivariate Guass-Checbychev quadrature nodes and weights. Parameters ---------- n : int or array_like(float) A length-d iterable of the number of nodes in each dimension a : scalar or array_like(float) A length-d iterable of lower endpoints. If a scalar is given, that constant is repeated d times, where d is the number of dimensions b : scalar or array_like(float) A length-d iterable of upper endpoints. If a scalar is given, that constant is repeated d times, where d is the number of dimensions Returns ------- nodes : np.ndarray(dtype=float) Quadrature nodes weights : np.ndarray(dtype=float) Weights for quadrature nodes Notes ----- Based of original function ``qnwcheb`` in CompEcon toolbox by Miranda and Fackler References ---------- Miranda, Mario J, and Paul L Fackler. Applied Computational Economics and Finance, MIT Press, 2002. """ return _make_multidim_func(_qnwcheb1, n, a, b) def qnwequi(n, a, b, kind="N", equidist_pp=None): """ Generates equidistributed sequences with property that averages value of integrable function evaluated over the sequence converges to the integral as n goes to infinity. Parameters ---------- n : int Number of sequence points a : scalar or array_like(float) A length-d iterable of lower endpoints. If a scalar is given, that constant is repeated d times, where d is the number of dimensions b : scalar or array_like(float) A length-d iterable of upper endpoints. If a scalar is given, that constant is repeated d times, where d is the number of dimensions kind : string, optional(default="N") One of the following: - N - Neiderreiter (default) - W - Weyl - H - Haber - R - pseudo Random equidist_pp : array_like, optional(default=None) TODO: I don't know what this does Returns ------- nodes : np.ndarray(dtype=float) Quadrature nodes weights : np.ndarray(dtype=float) Weights for quadrature nodes Notes ----- Based of original function ``qnwequi`` in CompEcon toolbox by Miranda and Fackler References ---------- Miranda, Mario J, and Paul L Fackler. Applied Computational Economics and Finance, MIT Press, 2002. """ if equidist_pp is None: equidist_pp = np.sqrt(np.array(list(sym.primerange(0, 7920)))) n, a, b = list(map(np.atleast_1d, list(map(np.asarray, [n, a, b])))) d = max(list(map(len, [n, a, b]))) n = np.prod(n) if a.size == 1: a = np.repeat(a, d) if b.size == 1: b = np.repeat(b, d) i = np.arange(1, n + 1) if kind.upper() == "N": # Neiderreiter j = 2.0 ** (np.arange(1, d+1) / (d+1)) nodes = np.outer(i, j) nodes = (nodes - np.fix(nodes)).squeeze() elif kind.upper() == "W": # Weyl j = equidist_pp[:d] nodes = np.outer(i, j) nodes = (nodes - np.fix(nodes)).squeeze() elif kind.upper() == "H": # Haber j = equidist_pp[:d] nodes = np.outer(i * (i+1) / 2, j) nodes = (nodes - np.fix(nodes)).squeeze() elif kind.upper() == "R": # pseudo-random nodes = np.random.rand(n, d).squeeze() else: raise ValueError("Unknown sequence requested") # compute nodes and weights r = b - a nodes = a + nodes * r weights = (np.prod(r) / n) * np.ones(n) return nodes, weights def qnwlege(n, a, b): """ Computes multivariate Guass-Legendre quadrature nodes and weights. Parameters ---------- n : int or array_like(float) A length-d iterable of the number of nodes in each dimension a : scalar or array_like(float) A length-d iterable of lower endpoints. If a scalar is given, that constant is repeated d times, where d is the number of dimensions b : scalar or array_like(float) A length-d iterable of upper endpoints. If a scalar is given, that constant is repeated d times, where d is the number of dimensions Returns ------- nodes : np.ndarray(dtype=float) Quadrature nodes weights : np.ndarray(dtype=float) Weights for quadrature nodes Notes ----- Based of original function ``qnwlege`` in CompEcon toolbox by Miranda and Fackler References ---------- Miranda, Mario J, and Paul L Fackler. Applied Computational Economics and Finance, MIT Press, 2002. """ return _make_multidim_func(_qnwlege1, n, a, b) def qnwnorm(n, mu=None, sig2=None, usesqrtm=False): """ Computes nodes and weights for multivariate normal distribution Parameters ---------- n : int or array_like(float) A length-d iterable of the number of nodes in each dimension mu : scalar or array_like(float), optional(default=zeros(d)) The means of each dimension of the random variable. If a scalar is given, that constant is repeated d times, where d is the number of dimensions sig2 : array_like(float), optional(default=eye(d)) A d x d array representing the variance-covariance matrix of the multivariate normal distribution. Returns ------- nodes : np.ndarray(dtype=float) Quadrature nodes weights : np.ndarray(dtype=float) Weights for quadrature nodes Notes ----- Based of original function ``qnwnorm`` in CompEcon toolbox by Miranda and Fackler References ---------- Miranda, Mario J, and Paul L Fackler. Applied Computational Economics and Finance, MIT Press, 2002. """ n = np.asarray(n) d = n.size if mu is None: mu = np.zeros(d) else: mu = np.asarray(mu) if sig2 is None: sig2 = np.eye(d) else: sig2 = np.asarray(sig2).reshape(d, d) if all([x.size == 1 for x in [n, mu, sig2]]): nodes, weights = _qnwnorm1(n) else: nodes = [] weights = [] for i in range(d): _1d = _qnwnorm1(n[i]) nodes.append(_1d[0]) weights.append(_1d[1]) nodes = gridmake(*nodes) weights = ckron(*weights[::-1]) if usesqrtm: new_sig2 = la.sqrtm(sig2) else: # cholesky new_sig2 = la.cholesky(sig2) if d > 1: nodes = nodes.dot(new_sig2) + mu # Broadcast ok else: # nodes.dot(sig) will not be aligned in scalar case. nodes = nodes * new_sig2 + mu return nodes.squeeze(), weights def qnwlogn(n, mu=None, sig2=None): """ Computes nodes and weights for multivariate lognormal distribution Parameters ---------- n : int or array_like(float) A length-d iterable of the number of nodes in each dimension mu : scalar or array_like(float), optional(default=zeros(d)) The means of each dimension of the random variable. If a scalar is given, that constant is repeated d times, where d is the number of dimensions sig2 : array_like(float), optional(default=eye(d)) A d x d array representing the variance-covariance matrix of the multivariate normal distribution. Returns ------- nodes : np.ndarray(dtype=float) Quadrature nodes weights : np.ndarray(dtype=float) Weights for quadrature nodes Notes ----- Based of original function ``qnwlogn`` in CompEcon toolbox by Miranda and Fackler References ---------- Miranda, Mario J, and Paul L Fackler. Applied Computational Economics and Finance, MIT Press, 2002. """ nodes, weights = qnwnorm(n, mu, sig2) return np.exp(nodes), weights def qnwsimp(n, a, b): """ Computes multivariate Simpson quadrature nodes and weights. Parameters ---------- n : int or array_like(float) A length-d iterable of the number of nodes in each dimension a : scalar or array_like(float) A length-d iterable of lower endpoints. If a scalar is given, that constant is repeated d times, where d is the number of dimensions b : scalar or array_like(float) A length-d iterable of upper endpoints. If a scalar is given, that constant is repeated d times, where d is the number of dimensions Returns ------- nodes : np.ndarray(dtype=float) Quadrature nodes weights : np.ndarray(dtype=float) Weights for quadrature nodes Notes ----- Based of original function ``qnwsimp`` in CompEcon toolbox by Miranda and Fackler References ---------- Miranda, Mario J, and Paul L Fackler. Applied Computational Economics and Finance, MIT Press, 2002. """ return _make_multidim_func(_qnwsimp1, n, a, b) def qnwtrap(n, a, b): """ Computes multivariate trapezoid rule quadrature nodes and weights. Parameters ---------- n : int or array_like(float) A length-d iterable of the number of nodes in each dimension a : scalar or array_like(float) A length-d iterable of lower endpoints. If a scalar is given, that constant is repeated d times, where d is the number of dimensions b : scalar or array_like(float) A length-d iterable of upper endpoints. If a scalar is given, that constant is repeated d times, where d is the number of dimensions Returns ------- nodes : np.ndarray(dtype=float) Quadrature nodes weights : np.ndarray(dtype=float) Weights for quadrature nodes Notes ----- Based of original function ``qnwtrap`` in CompEcon toolbox by Miranda and Fackler References ---------- Miranda, Mario J, and Paul L Fackler. Applied Computational Economics and Finance, MIT Press, 2002. """ return _make_multidim_func(_qnwtrap1, n, a, b) def qnwunif(n, a, b): """ Computes quadrature nodes and weights for multivariate uniform distribution Parameters ---------- n : int or array_like(float) A length-d iterable of the number of nodes in each dimension a : scalar or array_like(float) A length-d iterable of lower endpoints. If a scalar is given, that constant is repeated d times, where d is the number of dimensions b : scalar or array_like(float) A length-d iterable of upper endpoints. If a scalar is given, that constant is repeated d times, where d is the number of dimensions Returns ------- nodes : np.ndarray(dtype=float) Quadrature nodes weights : np.ndarray(dtype=float) Weights for quadrature nodes Notes ----- Based of original function ``qnwunif`` in CompEcon toolbox by Miranda and Fackler References ---------- Miranda, Mario J, and Paul L Fackler. Applied Computational Economics and Finance, MIT Press, 2002. """ n, a, b = list(map(np.asarray, [n, a, b])) nodes, weights = qnwlege(n, a, b) weights = weights / np.prod(b - a) return nodes, weights def quadrect(f, n, a, b, kind='lege', *args, **kwargs): """ Integrate the d-dimensional function f on a rectangle with lower and upper bound for dimension i defined by a[i] and b[i], respectively; using n[i] points. Parameters ---------- f : function The function to integrate over. This should be a function that accepts as its first argument a matrix representing points along each dimension (each dimension is a column). Other arguments that need to be passed to the function are caught by *args and **kwargs n : int or array_like(float) A length-d iterable of the number of nodes in each dimension a : scalar or array_like(float) A length-d iterable of lower endpoints. If a scalar is given, that constant is repeated d times, where d is the number of dimensions b : scalar or array_like(float) A length-d iterable of upper endpoints. If a scalar is given, that constant is repeated d times, where d is the number of dimensions kind : string, optional(default='lege') Specifies which type of integration to perform. Valid values are: lege - Gauss-Legendre cheb - Gauss-Chebyshev trap - trapezoid rule simp - Simpson rule N - Neiderreiter equidistributed sequence W - Weyl equidistributed sequence H - Haber equidistributed sequence R - Monte Carlo *args, **kwargs : Other arguments passed to the function f Returns ------- out : scalar (float) The value of the integral on the region [a, b] Notes ----- Based of original function ``quadrect`` in CompEcon toolbox by Miranda and Fackler References ---------- Miranda, Mario J, and Paul L Fackler. Applied Computational Economics and Finance, MIT Press, 2002. """ if kind.lower() == "lege": nodes, weights = qnwlege(n, a, b) elif kind.lower() == "cheb": nodes, weights = qnwcheb(n, a, b) elif kind.lower() == "trap": nodes, weights = qnwtrap(n, a, b) elif kind.lower() == "simp": nodes, weights = qnwsimp(n, a, b) else: nodes, weights = qnwequi(n, a, b, kind) out = weights.dot(f(nodes, *args, **kwargs)) return out def qnwbeta(n, a=1.0, b=1.0): """ Computes nodes and weights for beta distribution Parameters ---------- n : int or array_like(float) A length-d iterable of the number of nodes in each dimension a : scalar or array_like(float), optional(default=1.0) A length-d b : array_like(float), optional(default=1.0) A d x d array representing the variance-covariance matrix of the multivariate normal distribution. Returns ------- nodes : np.ndarray(dtype=float) Quadrature nodes weights : np.ndarray(dtype=float) Weights for quadrature nodes Notes ----- Based of original function ``qnwbeta`` in CompEcon toolbox by Miranda and Fackler References ---------- Miranda, Mario J, and Paul L Fackler. Applied Computational Economics and Finance, MIT Press, 2002. """ return _make_multidim_func(_qnwbeta1, n, a, b) def qnwgamma(n, a=None): """ Computes nodes and weights for gamma distribution Parameters ---------- n : int or array_like(float) A length-d iterable of the number of nodes in each dimension mu : scalar or array_like(float), optional(default=zeros(d)) The means of each dimension of the random variable. If a scalar is given, that constant is repeated d times, where d is the number of dimensions sig2 : array_like(float), optional(default=eye(d)) A d x d array representing the variance-covariance matrix of the multivariate normal distribution. Returns ------- nodes : np.ndarray(dtype=float) Quadrature nodes weights : np.ndarray(dtype=float) Weights for quadrature nodes Notes ----- Based of original function ``qnwgamma`` in CompEcon toolbox by Miranda and Fackler References ---------- Miranda, Mario J, and Paul L Fackler. Applied Computational Economics and Finance, MIT Press, 2002. """ return _make_multidim_func(_qnwgamma1, n, a) # ------------------ # # Internal Functions # # ------------------ # def _make_multidim_func(one_d_func, n, *args): """ A helper function to cut down on code repetition. Almost all of the code in qnwcheb, qnwlege, qnwsimp, qnwtrap is just dealing various forms of input arguments and then shelling out to the corresponding 1d version of the function. This routine does all the argument checking and passes things through the appropriate 1d function before using a tensor product to combine weights and nodes. Parameters ---------- one_d_func : function The 1d function to be called along each dimension n : int or array_like(float) A length-d iterable of the number of nodes in each dimension args : These are the arguments to various qnw____ functions. For the majority of the functions this is just a and b, but some differ. Returns ------- func : function The multi-dimensional version of the parameter ``one_d_func`` """ args = list(args) n = np.asarray(n) args = list(map(np.asarray, args)) if all([x.size == 1 for x in [n] + args]): return one_d_func(n, *args) d = n.size for i in range(len(args)): if args[i].size == 1: args[i] = np.repeat(args[i], d) nodes = [] weights = [] for i in range(d): ai = [x[i] for x in args] _1d = one_d_func(n[i], *ai) nodes.append(_1d[0]) weights.append(_1d[1]) weights = ckron(*weights[::-1]) # reverse ordered tensor product nodes = gridmake(*nodes) return nodes, weights def _qnwcheb1(n, a, b): """ Compute univariate Guass-Checbychev quadrature nodes and weights Parameters ---------- n : int The number of nodes a : int The lower endpoint b : int The upper endpoint Returns ------- nodes : np.ndarray(dtype=float) An n element array of nodes nodes : np.ndarray(dtype=float) An n element array of weights Notes ----- Based of original function ``qnwcheb1`` in CompEcon toolbox by Miranda and Fackler References ---------- Miranda, Mario J, and Paul L Fackler. Applied Computational Economics and Finance, MIT Press, 2002. """ nodes = (b+a)/2 - (b-a)/2 * np.cos(np.pi/n * np.linspace(0.5, n-0.5, n)) # Create temporary arrays to be used in computing weights t1 = np.arange(1, n+1) - 0.5 t2 = np.arange(0.0, n, 2) t3 = np.concatenate([np.array([1.0]), -2.0/(np.arange(1.0, n-1, 2)*np.arange(3.0, n+1, 2))]) # compute weights and return weights = ((b-a)/n)*np.cos(np.pi/n*np.outer(t1, t2)).dot(t3) return nodes, weights def _qnwlege1(n, a, b): """ Compute univariate Guass-Legendre quadrature nodes and weights Parameters ---------- n : int The number of nodes a : int The lower endpoint b : int The upper endpoint Returns ------- nodes : np.ndarray(dtype=float) An n element array of nodes nodes : np.ndarray(dtype=float) An n element array of weights Notes ----- Based of original function ``qnwlege1`` in CompEcon toolbox by Miranda and Fackler References ---------- Miranda, Mario J, and Paul L Fackler. Applied Computational Economics and Finance, MIT Press, 2002. """ # import ipdb; ipdb.set_trace() maxit = 100 m = np.fix((n + 1) / 2.0).astype(int) xm = 0.5 * (b + a) xl = 0.5 * (b - a) nodes = np.zeros(n) weights = nodes.copy() i = np.arange(m, dtype='int') z = np.cos(np.pi * ((i + 1.0) - 0.25) / (n + 0.5)) for its in range(maxit): p1 = 1.0 p2 = 0.0 for j in range(1, n+1): p3 = p2 p2 = p1 p1 = ((2 * j - 1) * z * p2 - (j - 1) * p3) / j pp = n * (z * p1 - p2)/(z * z - 1.0) z1 = z.copy() z = z1 - p1/pp if all(np.abs(z - z1) < 1e-14): break if its == maxit - 1: raise ValueError("Maximum iterations in _qnwlege1") nodes[i] = xm - xl * z nodes[- i - 1] = xm + xl * z weights[i] = 2 * xl / ((1 - z * z) * pp * pp) weights[- i - 1] = weights[i] return nodes, weights def _qnwnorm1(n): """ Compute nodes and weights for quadrature of univariate standard normal distribution Parameters ---------- n : int The number of nodes Returns ------- nodes : np.ndarray(dtype=float) An n element array of nodes nodes : np.ndarray(dtype=float) An n element array of weights Notes ----- Based of original function ``qnwnorm1`` in CompEcon toolbox by Miranda and Fackler References ---------- Miranda, Mario J, and Paul L Fackler. Applied Computational Economics and Finance, MIT Press, 2002. """ maxit = 100 pim4 = 1 / np.pi**(0.25) m = np.fix((n + 1) / 2).astype(int) nodes = np.zeros(n) weights = np.zeros(n) for i in range(m): if i == 0: z = np.sqrt(2*n+1) - 1.85575 * ((2 * n + 1)**(-1 / 6.1)) elif i == 1: z = z - 1.14 * (n ** 0.426) / z elif i == 2: z = 1.86 * z + 0.86 * nodes[0] elif i == 3: z = 1.91 * z + 0.91 * nodes[1] else: z = 2 * z + nodes[i-2] its = 0 while its < maxit: its += 1 p1 = pim4 p2 = 0 for j in range(1, n+1): p3 = p2 p2 = p1 p1 = z * math.sqrt(2.0/j) * p2 - math.sqrt((j - 1.0) / j) * p3 pp = math.sqrt(2 * n) * p2 z1 = z z = z1 - p1/pp if abs(z - z1) < 1e-14: break if its == maxit: raise ValueError("Failed to converge in _qnwnorm1") nodes[n - 1 - i] = z nodes[i] = -z weights[i] = 2 / (pp*pp) weights[n - 1 - i] = weights[i] weights /= math.sqrt(math.pi) nodes = nodes * math.sqrt(2.0) return nodes, weights def _qnwsimp1(n, a, b): """ Compute univariate Simpson quadrature nodes and weights Parameters ---------- n : int The number of nodes a : int The lower endpoint b : int The upper endpoint Returns ------- nodes : np.ndarray(dtype=float) An n element array of nodes nodes : np.ndarray(dtype=float) An n element array of weights Notes ----- Based of original function ``qnwsimp1`` in CompEcon toolbox by Miranda and Fackler References ---------- Miranda, Mario J, and Paul L Fackler. Applied Computational Economics and Finance, MIT Press, 2002. """ if n % 2 == 0: print("WARNING qnwsimp: n must be an odd integer. Increasing by 1") n += 1 nodes = np.linspace(a, b, n) dx = nodes[1] - nodes[0] weights = np.tile([2.0, 4.0], (n + 1.0) / 2.0) weights = weights[:n] weights[0] = weights[-1] = 1 weights = (dx / 3.0) * weights return nodes, weights def _qnwtrap1(n, a, b): """ Compute univariate trapezoid rule quadrature nodes and weights Parameters ---------- n : int The number of nodes a : int The lower endpoint b : int The upper endpoint Returns ------- nodes : np.ndarray(dtype=float) An n element array of nodes nodes : np.ndarray(dtype=float) An n element array of weights Notes ----- Based of original function ``qnwtrap1`` in CompEcon toolbox by Miranda and Fackler References ---------- Miranda, Mario J, and Paul L Fackler. Applied Computational Economics and Finance, MIT Press, 2002. """ if n < 1: raise ValueError("n must be at least one") nodes = np.linspace(a, b, n) dx = nodes[1] - nodes[0] weights = dx * np.ones(n) weights[0] *= 0.5 weights[-1] *= 0.5 return nodes, weights def _qnwbeta1(n, a=1.0, b=1.0): """ Computes nodes and weights for quadrature on the beta distribution. Default is a=b=1 which is just a uniform distribution NOTE: For now I am just following compecon; would be much better to find a different way since I don't know what they are doing. Parameters ---------- n : scalar : int The number of quadrature points a : scalar : float, optional(default=1) First Beta distribution parameter b : scalar : float, optional(default=1) Second Beta distribution parameter Returns ------- nodes : np.ndarray(dtype=float, ndim=1) The quadrature points weights : np.ndarray(dtype=float, ndim=1) The quadrature weights that correspond to nodes Notes ----- Based of original function ``_qnwbeta1`` in CompEcon toolbox by Miranda and Fackler References ---------- Miranda, Mario J, and Paul L Fackler. Applied Computational Economics and Finance, MIT Press, 2002. """ # We subtract one and write a + 1 where we actually want a, and a # where we want a - 1 a = a - 1 b = b - 1 maxiter = 25 # Allocate empty space nodes = np.zeros(n) weights = np.zeros(n) # Find "reasonable" starting values. Why these numbers? for i in range(n): if i == 0: an = a/n bn = b/n r1 = (1+a) * (2.78/(4+n*n) + .768*an/n) r2 = 1 + 1.48*an + .96*bn + .452*an*an + .83*an*bn z = 1 - r1/r2 elif i == 1: r1 = (4.1+a) / ((1+a)*(1+0.156*a)) r2 = 1 + 0.06 * (n-8) * (1+0.12*a)/n r3 = 1 + 0.012*b * (1+0.25*abs(a))/n z = z - (1-z) * r1 * r2 * r3 elif i == 2: r1 = (1.67+0.28*a)/(1+0.37*a) r2 = 1+0.22*(n-8)/n r3 = 1+8*b/((6.28+b)*n*n) z = z-(nodes[0]-z)*r1*r2*r3 elif i == n - 2: r1 = (1+0.235*b)/(0.766+0.119*b) r2 = 1/(1+0.639*(n-4)/(1+0.71*(n-4))) r3 = 1/(1+20*a/((7.5+a)*n*n)) z = z+(z-nodes[-4])*r1*r2*r3 elif i == n - 1: r1 = (1+0.37*b) / (1.67+0.28*b) r2 = 1 / (1+0.22*(n-8)/n) r3 = 1 / (1+8*a/((6.28+a)*n*n)) z = z+(z-nodes[-3])*r1*r2*r3 else: z = 3*nodes[i-1] - 3*nodes[i-2] + nodes[i-3] ab = a+b # Root finding its = 0 z1 = -100 while abs(z - z1) > 1e-10 and its < maxiter: temp = 2 + ab p1 = (a-b + temp*z)/2 p2 = 1 for j in range(2, n+1): p3 = p2 p2 = p1 temp = 2*j + ab aa = 2*j * (j+ab)*(temp-2) bb = (temp-1) * (a*a - b*b + temp*(temp-2) * z) c = 2 * (j - 1 + a) * (j - 1 + b) * temp p1 = (bb*p2 - c*p3)/aa pp = (n*(a-b-temp*z) * p1 + 2*(n+a)*(n+b)*p2)/(temp*(1 - z*z)) z1 = z z = z1 - p1/pp if abs(z - z1) < 1e-12: break its += 1 if its == maxiter: raise ValueError("Max Iteration reached. Failed to converge") nodes[i] = z weights[i] = temp/(pp*p2) nodes = (1-nodes)/2 weights = weights * math.exp(gammaln(a+n) + gammaln(b+n) - gammaln(n+1) - gammaln(n+ab+1)) weights = weights / (2*math.exp(gammaln(a+1) + gammaln(b+1) - gammaln(ab+2))) return nodes, weights def _qnwgamma1(n, a=None): """ Insert docs. Default is a=0 NOTE: For now I am just following compecon; would be much better to find a different way since I don't know what they are doing. Parameters ---------- n : scalar : int The number of quadrature points a : scalar : float Gamma distribution parameter Returns ------- nodes : np.ndarray(dtype=float, ndim=1) The quadrature points weights : np.ndarray(dtype=float, ndim=1) The quadrature weights that correspond to nodes Notes ----- Based of original function ``qnwgamma1`` in CompEcon toolbox by Miranda and Fackler References ---------- Miranda, Mario J, and Paul L Fackler. Applied Computational Economics and Finance, MIT Press, 2002. """ if a is None: a = 0 else: a -= 1 maxit = 10 factor = -math.exp(gammaln(a+n) - gammaln(n) - gammaln(a+1)) nodes = np.zeros(n) weights = np.zeros(n) # Create nodes for i in range(n): # Reasonable starting values if i == 0: z = (1+a) * (3+0.92*a) / (1 + 2.4*n + 1.8*a) elif i == 1: z = z + (15 + 6.25*a) / (1 + 0.9*a + 2.5*n) else: j = i-1 z = z + ((1 + 2.55*j) / (1.9*j) + 1.26*j*a / (1 + 3.5*j)) * \ (z - nodes[j-1]) / (1 + 0.3*a) # root finding iterations its = 0 z1 = -10000 while abs(z - z1) > 1e-10 and its < maxit: p1 = 1.0 p2 = 0.0 for j in range(1, n+1): p3 = p2 p2 = p1 p1 = ((2*j - 1 + a - z)*p2 - (j - 1 + a)*p3) / j pp = (n*p1 - (n+a)*p2) / z z1 = z z = z1 - p1/pp its += 1 if its == maxit: raise ValueError('Failure to converge') nodes[i] = z weights[i] = factor / (pp*n*p2) return nodes, weights
bsd-3-clause
CYBAI/servo
tests/wpt/web-platform-tests/tools/third_party/aioquic/docs/conf.py
17
5850
#!/usr/bin/env python3 # -*- coding: utf-8 -*- # # aioquic documentation build configuration file, created by # sphinx-quickstart on Thu Feb 8 17:22:14 2018. # # This file is execfile()d with the current directory set to its # containing dir. # # Note that not all possible configuration values are present in this # autogenerated file. # # All configuration values have a default; values that are commented out # serve to show the default. import sys, os # If extensions (or modules to document with autodoc) are in another directory, # add these directories to sys.path here. If the directory is relative to the # documentation root, use os.path.abspath to make it absolute, like shown here. sys.path.insert(0, os.path.abspath('..')) class MockBuffer: Buffer = None BufferReadError = None BufferWriteError = None class MockCrypto: AEAD = None CryptoError = ValueError HeaderProtection = None class MockPylsqpack: Decoder = None Encoder = None StreamBlocked = None sys.modules.update({ "aioquic._buffer": MockBuffer(), "aioquic._crypto": MockCrypto(), "pylsqpack": MockPylsqpack(), }) # -- General configuration ------------------------------------------------ # If your documentation needs a minimal Sphinx version, state it here. # # needs_sphinx = '1.0' # Add any Sphinx extension module names here, as strings. They can be # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom # ones. extensions = [ 'sphinx.ext.autodoc', 'sphinx.ext.intersphinx', 'sphinx_autodoc_typehints', 'sphinxcontrib.asyncio', ] intersphinx_mapping = { 'cryptography': ('https://cryptography.io/en/latest', None), 'python': ('https://docs.python.org/3', None), } # Add any paths that contain templates here, relative to this directory. templates_path = ['_templates'] # The suffix(es) of source filenames. # You can specify multiple suffix as a list of string: # # source_suffix = ['.rst', '.md'] source_suffix = '.rst' # The master toctree document. master_doc = 'index' # General information about the project. project = 'aioquic' copyright = u'2019, Jeremy Lainé' author = u'Jeremy Lainé' # The version info for the project you're documenting, acts as replacement for # |version| and |release|, also used in various other places throughout the # built documents. # # The short X.Y version. version = '' # The full version, including alpha/beta/rc tags. release = '' # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. # # This is also used if you do content translation via gettext catalogs. # Usually you set "language" from the command line for these cases. language = None # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. # This patterns also effect to html_static_path and html_extra_path exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store'] # The name of the Pygments (syntax highlighting) style to use. pygments_style = 'sphinx' # If true, `todo` and `todoList` produce output, else they produce nothing. todo_include_todos = False # -- Options for HTML output ---------------------------------------------- # The theme to use for HTML and HTML Help pages. See the documentation for # a list of builtin themes. # html_theme = 'alabaster' # Theme options are theme-specific and customize the look and feel of a theme # further. For a list of options available for each theme, see the # documentation. # html_theme_options = { 'description': 'A library for QUIC in Python.', 'github_button': True, 'github_user': 'aiortc', 'github_repo': 'aioquic', } # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". html_static_path = ['_static'] # Custom sidebar templates, must be a dictionary that maps document names # to template names. # # This is required for the alabaster theme # refs: http://alabaster.readthedocs.io/en/latest/installation.html#sidebars html_sidebars = { '**': [ 'about.html', 'navigation.html', 'relations.html', 'searchbox.html', ] } # -- Options for HTMLHelp output ------------------------------------------ # Output file base name for HTML help builder. htmlhelp_basename = 'aioquicdoc' # -- Options for LaTeX output --------------------------------------------- latex_elements = { # The paper size ('letterpaper' or 'a4paper'). # # 'papersize': 'letterpaper', # The font size ('10pt', '11pt' or '12pt'). # # 'pointsize': '10pt', # Additional stuff for the LaTeX preamble. # # 'preamble': '', # Latex figure (float) alignment # # 'figure_align': 'htbp', } # Grouping the document tree into LaTeX files. List of tuples # (source start file, target name, title, # author, documentclass [howto, manual, or own class]). latex_documents = [ (master_doc, 'aioquic.tex', 'aioquic Documentation', author, 'manual'), ] # -- Options for manual page output --------------------------------------- # One entry per manual page. List of tuples # (source start file, name, description, authors, manual section). man_pages = [ (master_doc, 'aioquic', 'aioquic Documentation', [author], 1) ] # -- Options for Texinfo output ------------------------------------------- # Grouping the document tree into Texinfo files. List of tuples # (source start file, target name, title, author, # dir menu entry, description, category) texinfo_documents = [ (master_doc, 'aioquic', 'aioquic Documentation', author, 'aioquic', 'One line description of project.', 'Miscellaneous'), ]
mpl-2.0
scroggo/skia
tools/skp/page_sets/skia_ebay_desktop.py
9
1442
# Copyright 2014 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. # pylint: disable=W0401,W0614 from telemetry.page import page as page_module from telemetry.page import page_set as page_set_module class SkiaBuildbotDesktopPage(page_module.Page): def __init__(self, url, page_set): super(SkiaBuildbotDesktopPage, self).__init__( url=url, page_set=page_set, credentials_path='data/credentials.json') self.user_agent_type = 'desktop' self.archive_data_file = 'data/skia_ebay_desktop.json' def RunNavigateSteps(self, action_runner): action_runner.NavigateToPage(self) action_runner.Wait(5) class SkiaEbayDesktopPageSet(page_set_module.PageSet): """ Pages designed to represent the median, not highly optimized web """ def __init__(self): super(SkiaEbayDesktopPageSet, self).__init__( user_agent_type='desktop', archive_data_file='data/skia_ebay_desktop.json') urls_list = [ # Why: #1 commerce website by time spent by users in US. ('http://www.ebay.com/ctg/Harry-Potter-and-Deathly-Hallows-Year-7-J-K-' 'Rowling-2007-Cassette-Unabridged-/123341182?_dmpt=US_Childrens_Books' '&_pcategid=279&_pcatid=4&_refkw=harry+potter+and+the+deathly+hallows'), ] for url in urls_list: self.AddUserStory(SkiaBuildbotDesktopPage(url, self))
bsd-3-clause
codingdojotw/CppMiniTutorial
third_party/gmock-1.7.0/gtest/test/gtest_filter_unittest.py
2826
21261
#!/usr/bin/env python # # Copyright 2005 Google Inc. All Rights Reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above # copyright notice, this list of conditions and the following disclaimer # in the documentation and/or other materials provided with the # distribution. # * Neither the name of Google Inc. nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. """Unit test for Google Test test filters. A user can specify which test(s) in a Google Test program to run via either the GTEST_FILTER environment variable or the --gtest_filter flag. This script tests such functionality by invoking gtest_filter_unittest_ (a program written with Google Test) with different environments and command line flags. Note that test sharding may also influence which tests are filtered. Therefore, we test that here also. """ __author__ = 'wan@google.com (Zhanyong Wan)' import os import re import sets import sys import gtest_test_utils # Constants. # Checks if this platform can pass empty environment variables to child # processes. We set an env variable to an empty string and invoke a python # script in a subprocess to print whether the variable is STILL in # os.environ. We then use 'eval' to parse the child's output so that an # exception is thrown if the input is anything other than 'True' nor 'False'. os.environ['EMPTY_VAR'] = '' child = gtest_test_utils.Subprocess( [sys.executable, '-c', 'import os; print \'EMPTY_VAR\' in os.environ']) CAN_PASS_EMPTY_ENV = eval(child.output) # Check if this platform can unset environment variables in child processes. # We set an env variable to a non-empty string, unset it, and invoke # a python script in a subprocess to print whether the variable # is NO LONGER in os.environ. # We use 'eval' to parse the child's output so that an exception # is thrown if the input is neither 'True' nor 'False'. os.environ['UNSET_VAR'] = 'X' del os.environ['UNSET_VAR'] child = gtest_test_utils.Subprocess( [sys.executable, '-c', 'import os; print \'UNSET_VAR\' not in os.environ']) CAN_UNSET_ENV = eval(child.output) # Checks if we should test with an empty filter. This doesn't # make sense on platforms that cannot pass empty env variables (Win32) # and on platforms that cannot unset variables (since we cannot tell # the difference between "" and NULL -- Borland and Solaris < 5.10) CAN_TEST_EMPTY_FILTER = (CAN_PASS_EMPTY_ENV and CAN_UNSET_ENV) # The environment variable for specifying the test filters. FILTER_ENV_VAR = 'GTEST_FILTER' # The environment variables for test sharding. TOTAL_SHARDS_ENV_VAR = 'GTEST_TOTAL_SHARDS' SHARD_INDEX_ENV_VAR = 'GTEST_SHARD_INDEX' SHARD_STATUS_FILE_ENV_VAR = 'GTEST_SHARD_STATUS_FILE' # The command line flag for specifying the test filters. FILTER_FLAG = 'gtest_filter' # The command line flag for including disabled tests. ALSO_RUN_DISABED_TESTS_FLAG = 'gtest_also_run_disabled_tests' # Command to run the gtest_filter_unittest_ program. COMMAND = gtest_test_utils.GetTestExecutablePath('gtest_filter_unittest_') # Regex for determining whether parameterized tests are enabled in the binary. PARAM_TEST_REGEX = re.compile(r'/ParamTest') # Regex for parsing test case names from Google Test's output. TEST_CASE_REGEX = re.compile(r'^\[\-+\] \d+ tests? from (\w+(/\w+)?)') # Regex for parsing test names from Google Test's output. TEST_REGEX = re.compile(r'^\[\s*RUN\s*\].*\.(\w+(/\w+)?)') # The command line flag to tell Google Test to output the list of tests it # will run. LIST_TESTS_FLAG = '--gtest_list_tests' # Indicates whether Google Test supports death tests. SUPPORTS_DEATH_TESTS = 'HasDeathTest' in gtest_test_utils.Subprocess( [COMMAND, LIST_TESTS_FLAG]).output # Full names of all tests in gtest_filter_unittests_. PARAM_TESTS = [ 'SeqP/ParamTest.TestX/0', 'SeqP/ParamTest.TestX/1', 'SeqP/ParamTest.TestY/0', 'SeqP/ParamTest.TestY/1', 'SeqQ/ParamTest.TestX/0', 'SeqQ/ParamTest.TestX/1', 'SeqQ/ParamTest.TestY/0', 'SeqQ/ParamTest.TestY/1', ] DISABLED_TESTS = [ 'BarTest.DISABLED_TestFour', 'BarTest.DISABLED_TestFive', 'BazTest.DISABLED_TestC', 'DISABLED_FoobarTest.Test1', 'DISABLED_FoobarTest.DISABLED_Test2', 'DISABLED_FoobarbazTest.TestA', ] if SUPPORTS_DEATH_TESTS: DEATH_TESTS = [ 'HasDeathTest.Test1', 'HasDeathTest.Test2', ] else: DEATH_TESTS = [] # All the non-disabled tests. ACTIVE_TESTS = [ 'FooTest.Abc', 'FooTest.Xyz', 'BarTest.TestOne', 'BarTest.TestTwo', 'BarTest.TestThree', 'BazTest.TestOne', 'BazTest.TestA', 'BazTest.TestB', ] + DEATH_TESTS + PARAM_TESTS param_tests_present = None # Utilities. environ = os.environ.copy() def SetEnvVar(env_var, value): """Sets the env variable to 'value'; unsets it when 'value' is None.""" if value is not None: environ[env_var] = value elif env_var in environ: del environ[env_var] def RunAndReturnOutput(args = None): """Runs the test program and returns its output.""" return gtest_test_utils.Subprocess([COMMAND] + (args or []), env=environ).output def RunAndExtractTestList(args = None): """Runs the test program and returns its exit code and a list of tests run.""" p = gtest_test_utils.Subprocess([COMMAND] + (args or []), env=environ) tests_run = [] test_case = '' test = '' for line in p.output.split('\n'): match = TEST_CASE_REGEX.match(line) if match is not None: test_case = match.group(1) else: match = TEST_REGEX.match(line) if match is not None: test = match.group(1) tests_run.append(test_case + '.' + test) return (tests_run, p.exit_code) def InvokeWithModifiedEnv(extra_env, function, *args, **kwargs): """Runs the given function and arguments in a modified environment.""" try: original_env = environ.copy() environ.update(extra_env) return function(*args, **kwargs) finally: environ.clear() environ.update(original_env) def RunWithSharding(total_shards, shard_index, command): """Runs a test program shard and returns exit code and a list of tests run.""" extra_env = {SHARD_INDEX_ENV_VAR: str(shard_index), TOTAL_SHARDS_ENV_VAR: str(total_shards)} return InvokeWithModifiedEnv(extra_env, RunAndExtractTestList, command) # The unit test. class GTestFilterUnitTest(gtest_test_utils.TestCase): """Tests the env variable or the command line flag to filter tests.""" # Utilities. def AssertSetEqual(self, lhs, rhs): """Asserts that two sets are equal.""" for elem in lhs: self.assert_(elem in rhs, '%s in %s' % (elem, rhs)) for elem in rhs: self.assert_(elem in lhs, '%s in %s' % (elem, lhs)) def AssertPartitionIsValid(self, set_var, list_of_sets): """Asserts that list_of_sets is a valid partition of set_var.""" full_partition = [] for slice_var in list_of_sets: full_partition.extend(slice_var) self.assertEqual(len(set_var), len(full_partition)) self.assertEqual(sets.Set(set_var), sets.Set(full_partition)) def AdjustForParameterizedTests(self, tests_to_run): """Adjust tests_to_run in case value parameterized tests are disabled.""" global param_tests_present if not param_tests_present: return list(sets.Set(tests_to_run) - sets.Set(PARAM_TESTS)) else: return tests_to_run def RunAndVerify(self, gtest_filter, tests_to_run): """Checks that the binary runs correct set of tests for a given filter.""" tests_to_run = self.AdjustForParameterizedTests(tests_to_run) # First, tests using the environment variable. # Windows removes empty variables from the environment when passing it # to a new process. This means it is impossible to pass an empty filter # into a process using the environment variable. However, we can still # test the case when the variable is not supplied (i.e., gtest_filter is # None). # pylint: disable-msg=C6403 if CAN_TEST_EMPTY_FILTER or gtest_filter != '': SetEnvVar(FILTER_ENV_VAR, gtest_filter) tests_run = RunAndExtractTestList()[0] SetEnvVar(FILTER_ENV_VAR, None) self.AssertSetEqual(tests_run, tests_to_run) # pylint: enable-msg=C6403 # Next, tests using the command line flag. if gtest_filter is None: args = [] else: args = ['--%s=%s' % (FILTER_FLAG, gtest_filter)] tests_run = RunAndExtractTestList(args)[0] self.AssertSetEqual(tests_run, tests_to_run) def RunAndVerifyWithSharding(self, gtest_filter, total_shards, tests_to_run, args=None, check_exit_0=False): """Checks that binary runs correct tests for the given filter and shard. Runs all shards of gtest_filter_unittest_ with the given filter, and verifies that the right set of tests were run. The union of tests run on each shard should be identical to tests_to_run, without duplicates. Args: gtest_filter: A filter to apply to the tests. total_shards: A total number of shards to split test run into. tests_to_run: A set of tests expected to run. args : Arguments to pass to the to the test binary. check_exit_0: When set to a true value, make sure that all shards return 0. """ tests_to_run = self.AdjustForParameterizedTests(tests_to_run) # Windows removes empty variables from the environment when passing it # to a new process. This means it is impossible to pass an empty filter # into a process using the environment variable. However, we can still # test the case when the variable is not supplied (i.e., gtest_filter is # None). # pylint: disable-msg=C6403 if CAN_TEST_EMPTY_FILTER or gtest_filter != '': SetEnvVar(FILTER_ENV_VAR, gtest_filter) partition = [] for i in range(0, total_shards): (tests_run, exit_code) = RunWithSharding(total_shards, i, args) if check_exit_0: self.assertEqual(0, exit_code) partition.append(tests_run) self.AssertPartitionIsValid(tests_to_run, partition) SetEnvVar(FILTER_ENV_VAR, None) # pylint: enable-msg=C6403 def RunAndVerifyAllowingDisabled(self, gtest_filter, tests_to_run): """Checks that the binary runs correct set of tests for the given filter. Runs gtest_filter_unittest_ with the given filter, and enables disabled tests. Verifies that the right set of tests were run. Args: gtest_filter: A filter to apply to the tests. tests_to_run: A set of tests expected to run. """ tests_to_run = self.AdjustForParameterizedTests(tests_to_run) # Construct the command line. args = ['--%s' % ALSO_RUN_DISABED_TESTS_FLAG] if gtest_filter is not None: args.append('--%s=%s' % (FILTER_FLAG, gtest_filter)) tests_run = RunAndExtractTestList(args)[0] self.AssertSetEqual(tests_run, tests_to_run) def setUp(self): """Sets up test case. Determines whether value-parameterized tests are enabled in the binary and sets the flags accordingly. """ global param_tests_present if param_tests_present is None: param_tests_present = PARAM_TEST_REGEX.search( RunAndReturnOutput()) is not None def testDefaultBehavior(self): """Tests the behavior of not specifying the filter.""" self.RunAndVerify(None, ACTIVE_TESTS) def testDefaultBehaviorWithShards(self): """Tests the behavior without the filter, with sharding enabled.""" self.RunAndVerifyWithSharding(None, 1, ACTIVE_TESTS) self.RunAndVerifyWithSharding(None, 2, ACTIVE_TESTS) self.RunAndVerifyWithSharding(None, len(ACTIVE_TESTS) - 1, ACTIVE_TESTS) self.RunAndVerifyWithSharding(None, len(ACTIVE_TESTS), ACTIVE_TESTS) self.RunAndVerifyWithSharding(None, len(ACTIVE_TESTS) + 1, ACTIVE_TESTS) def testEmptyFilter(self): """Tests an empty filter.""" self.RunAndVerify('', []) self.RunAndVerifyWithSharding('', 1, []) self.RunAndVerifyWithSharding('', 2, []) def testBadFilter(self): """Tests a filter that matches nothing.""" self.RunAndVerify('BadFilter', []) self.RunAndVerifyAllowingDisabled('BadFilter', []) def testFullName(self): """Tests filtering by full name.""" self.RunAndVerify('FooTest.Xyz', ['FooTest.Xyz']) self.RunAndVerifyAllowingDisabled('FooTest.Xyz', ['FooTest.Xyz']) self.RunAndVerifyWithSharding('FooTest.Xyz', 5, ['FooTest.Xyz']) def testUniversalFilters(self): """Tests filters that match everything.""" self.RunAndVerify('*', ACTIVE_TESTS) self.RunAndVerify('*.*', ACTIVE_TESTS) self.RunAndVerifyWithSharding('*.*', len(ACTIVE_TESTS) - 3, ACTIVE_TESTS) self.RunAndVerifyAllowingDisabled('*', ACTIVE_TESTS + DISABLED_TESTS) self.RunAndVerifyAllowingDisabled('*.*', ACTIVE_TESTS + DISABLED_TESTS) def testFilterByTestCase(self): """Tests filtering by test case name.""" self.RunAndVerify('FooTest.*', ['FooTest.Abc', 'FooTest.Xyz']) BAZ_TESTS = ['BazTest.TestOne', 'BazTest.TestA', 'BazTest.TestB'] self.RunAndVerify('BazTest.*', BAZ_TESTS) self.RunAndVerifyAllowingDisabled('BazTest.*', BAZ_TESTS + ['BazTest.DISABLED_TestC']) def testFilterByTest(self): """Tests filtering by test name.""" self.RunAndVerify('*.TestOne', ['BarTest.TestOne', 'BazTest.TestOne']) def testFilterDisabledTests(self): """Select only the disabled tests to run.""" self.RunAndVerify('DISABLED_FoobarTest.Test1', []) self.RunAndVerifyAllowingDisabled('DISABLED_FoobarTest.Test1', ['DISABLED_FoobarTest.Test1']) self.RunAndVerify('*DISABLED_*', []) self.RunAndVerifyAllowingDisabled('*DISABLED_*', DISABLED_TESTS) self.RunAndVerify('*.DISABLED_*', []) self.RunAndVerifyAllowingDisabled('*.DISABLED_*', [ 'BarTest.DISABLED_TestFour', 'BarTest.DISABLED_TestFive', 'BazTest.DISABLED_TestC', 'DISABLED_FoobarTest.DISABLED_Test2', ]) self.RunAndVerify('DISABLED_*', []) self.RunAndVerifyAllowingDisabled('DISABLED_*', [ 'DISABLED_FoobarTest.Test1', 'DISABLED_FoobarTest.DISABLED_Test2', 'DISABLED_FoobarbazTest.TestA', ]) def testWildcardInTestCaseName(self): """Tests using wildcard in the test case name.""" self.RunAndVerify('*a*.*', [ 'BarTest.TestOne', 'BarTest.TestTwo', 'BarTest.TestThree', 'BazTest.TestOne', 'BazTest.TestA', 'BazTest.TestB', ] + DEATH_TESTS + PARAM_TESTS) def testWildcardInTestName(self): """Tests using wildcard in the test name.""" self.RunAndVerify('*.*A*', ['FooTest.Abc', 'BazTest.TestA']) def testFilterWithoutDot(self): """Tests a filter that has no '.' in it.""" self.RunAndVerify('*z*', [ 'FooTest.Xyz', 'BazTest.TestOne', 'BazTest.TestA', 'BazTest.TestB', ]) def testTwoPatterns(self): """Tests filters that consist of two patterns.""" self.RunAndVerify('Foo*.*:*A*', [ 'FooTest.Abc', 'FooTest.Xyz', 'BazTest.TestA', ]) # An empty pattern + a non-empty one self.RunAndVerify(':*A*', ['FooTest.Abc', 'BazTest.TestA']) def testThreePatterns(self): """Tests filters that consist of three patterns.""" self.RunAndVerify('*oo*:*A*:*One', [ 'FooTest.Abc', 'FooTest.Xyz', 'BarTest.TestOne', 'BazTest.TestOne', 'BazTest.TestA', ]) # The 2nd pattern is empty. self.RunAndVerify('*oo*::*One', [ 'FooTest.Abc', 'FooTest.Xyz', 'BarTest.TestOne', 'BazTest.TestOne', ]) # The last 2 patterns are empty. self.RunAndVerify('*oo*::', [ 'FooTest.Abc', 'FooTest.Xyz', ]) def testNegativeFilters(self): self.RunAndVerify('*-BazTest.TestOne', [ 'FooTest.Abc', 'FooTest.Xyz', 'BarTest.TestOne', 'BarTest.TestTwo', 'BarTest.TestThree', 'BazTest.TestA', 'BazTest.TestB', ] + DEATH_TESTS + PARAM_TESTS) self.RunAndVerify('*-FooTest.Abc:BazTest.*', [ 'FooTest.Xyz', 'BarTest.TestOne', 'BarTest.TestTwo', 'BarTest.TestThree', ] + DEATH_TESTS + PARAM_TESTS) self.RunAndVerify('BarTest.*-BarTest.TestOne', [ 'BarTest.TestTwo', 'BarTest.TestThree', ]) # Tests without leading '*'. self.RunAndVerify('-FooTest.Abc:FooTest.Xyz:BazTest.*', [ 'BarTest.TestOne', 'BarTest.TestTwo', 'BarTest.TestThree', ] + DEATH_TESTS + PARAM_TESTS) # Value parameterized tests. self.RunAndVerify('*/*', PARAM_TESTS) # Value parameterized tests filtering by the sequence name. self.RunAndVerify('SeqP/*', [ 'SeqP/ParamTest.TestX/0', 'SeqP/ParamTest.TestX/1', 'SeqP/ParamTest.TestY/0', 'SeqP/ParamTest.TestY/1', ]) # Value parameterized tests filtering by the test name. self.RunAndVerify('*/0', [ 'SeqP/ParamTest.TestX/0', 'SeqP/ParamTest.TestY/0', 'SeqQ/ParamTest.TestX/0', 'SeqQ/ParamTest.TestY/0', ]) def testFlagOverridesEnvVar(self): """Tests that the filter flag overrides the filtering env. variable.""" SetEnvVar(FILTER_ENV_VAR, 'Foo*') args = ['--%s=%s' % (FILTER_FLAG, '*One')] tests_run = RunAndExtractTestList(args)[0] SetEnvVar(FILTER_ENV_VAR, None) self.AssertSetEqual(tests_run, ['BarTest.TestOne', 'BazTest.TestOne']) def testShardStatusFileIsCreated(self): """Tests that the shard file is created if specified in the environment.""" shard_status_file = os.path.join(gtest_test_utils.GetTempDir(), 'shard_status_file') self.assert_(not os.path.exists(shard_status_file)) extra_env = {SHARD_STATUS_FILE_ENV_VAR: shard_status_file} try: InvokeWithModifiedEnv(extra_env, RunAndReturnOutput) finally: self.assert_(os.path.exists(shard_status_file)) os.remove(shard_status_file) def testShardStatusFileIsCreatedWithListTests(self): """Tests that the shard file is created with the "list_tests" flag.""" shard_status_file = os.path.join(gtest_test_utils.GetTempDir(), 'shard_status_file2') self.assert_(not os.path.exists(shard_status_file)) extra_env = {SHARD_STATUS_FILE_ENV_VAR: shard_status_file} try: output = InvokeWithModifiedEnv(extra_env, RunAndReturnOutput, [LIST_TESTS_FLAG]) finally: # This assertion ensures that Google Test enumerated the tests as # opposed to running them. self.assert_('[==========]' not in output, 'Unexpected output during test enumeration.\n' 'Please ensure that LIST_TESTS_FLAG is assigned the\n' 'correct flag value for listing Google Test tests.') self.assert_(os.path.exists(shard_status_file)) os.remove(shard_status_file) if SUPPORTS_DEATH_TESTS: def testShardingWorksWithDeathTests(self): """Tests integration with death tests and sharding.""" gtest_filter = 'HasDeathTest.*:SeqP/*' expected_tests = [ 'HasDeathTest.Test1', 'HasDeathTest.Test2', 'SeqP/ParamTest.TestX/0', 'SeqP/ParamTest.TestX/1', 'SeqP/ParamTest.TestY/0', 'SeqP/ParamTest.TestY/1', ] for flag in ['--gtest_death_test_style=threadsafe', '--gtest_death_test_style=fast']: self.RunAndVerifyWithSharding(gtest_filter, 3, expected_tests, check_exit_0=True, args=[flag]) self.RunAndVerifyWithSharding(gtest_filter, 5, expected_tests, check_exit_0=True, args=[flag]) if __name__ == '__main__': gtest_test_utils.Main()
mit
ChengeLi/VehicleTracking
utilities/embedding.py
1
3427
#### embedding from sklearn.decomposition import PCA from sklearn.manifold import TSNE, MDS from mpl_toolkits.mplot3d import Axes3D class embeddings(obj): def __init__(self, model,data): self.modelChoice = model self.data = data # self.data = FeatureMtx_norm def PCA_embedding(self,n_components): print 'PCA projecting...' self.pca = PCA(n_components= n_components,whiten=False) self.embedding_ = self.model.fit(data) # self.pca = PCAembedding(self.data,50) # FeatureAfterPCA = self.pca.transform(self.data) def TSNE_embedding(self,n_components): # tsne = TSNE(n_components=2, perplexity=30.0) tsne3 = TSNE(n_components=n_components, perplexity=30.0) # tsne_data = tsne.fit_transform(FeatureAfterPCA50) tsne3_data = tsne3.fit_transform(FeatureAfterPCA50) # pickle.dump(tsne_data,open(DataPathobj.DataPath+'/tsne_data.p','wb')) # tsne_data = pickle.load(open(DataPathobj.DataPath+'/tsne_data.p','rb')) self.embedding_ = tsne3_data def MDS_embedding(self,n_components): self.mds = MDS(n_components=n_components, max_iter=100, n_init=1) MDS_data = self.mds.fit_transform(FeatureAfterPCA50) def LLE_embedding(self): """locally linear embedding_""" # self.lle = sklearn.manifold.LocallyLinearEmbedding(n_neighbors=5, n_components=self.n_dimension, reg=0.001, eigen_solver='auto', tol=1e-06, max_iter=100, # method='standard', hessian_tol=0.0001, modified_tol=1e-12, neighbors_algorithm='auto', random_state=None) # self.embedding_ = self.lle.fit_transform(data_sampl_*feature_) """use DPGMM or Spectral labels""" sscfile = loadmat(DataPathobj.sscpath+'001.mat') labels_DPGMM = csr_matrix(sscfile['labels_DPGMM_upup'], shape=sscfile['labels_DPGMM_upup'].shape).toarray() labels_spectral = csr_matrix(sscfile['labels_spectral_upup'], shape=sscfile['labels_spectral_upup'].shape).toarray() trjID = csr_matrix(sscfile['trjID_upup'], shape=sscfile['trjID_upup'].shape).toarray() """use connected_components labels""" adjfile = loadmat(DataPathobj.adjpath+'20knn&thresh_Gaussian_diff_dir_001.mat') labels_CC = csr_matrix(adjfile['c_upup'], shape=adjfile['c_upup'].shape).toarray() """use fake ground truth labels""" arrange_index = pickle.load(open(DataPathobj.DataPath+'/arrange_index.p','rb')) # labels_fakeGT = labels_CC[arrange_index] labels_fakeGT = np.zeros_like(labels_CC) for ii in range(0,int(labels_fakeGT.shape[1]/20),1): labels_fakeGT[0,arrange_index[20*ii:min(20*(ii+1),labels_fakeGT.shape[1])]] = ii # labels_fakeGT[0,5*ii:min(5*(ii+1),labels_fakeGT.shape[1])] = ii def visEmbedding(self): # fig = plt.figure() # ax = fig.add_subplot(111, projection='3d') # labels = labels_DPGMM # labels = labels_spectral # labels = labels_CC labels = labels_fakeGT # data = MDS_data data = tsne_data clustered_color = np.array([np.random.randint(0,255) for _ in range(3*int(len(np.unique(labels))))]).reshape(len(np.unique(labels)),3) plt.figure() for ii in range(labels.shape[1]): plt.scatter(data[ii,0],data[ii,1],color=(clustered_color[int(labels[0,ii])].T/255.0)) plt.draw()
mit
rclmenezes/sqlalchemy
doc/build/builder/dialect_info.py
5
6366
import re from sphinx.util.compat import Directive from docutils import nodes class DialectDirective(Directive): has_content = True _dialects = {} def _parse_content(self): d = {} d['default'] = self.content[0] d['text'] = [] idx = 0 for line in self.content[1:]: idx += 1 m = re.match(r'\:(.+?)\: +(.+)', line) if m: attrname, value = m.group(1, 2) d[attrname] = value else: break d["text"] = self.content[idx + 1:] return d def _dbapi_node(self): dialect_name, dbapi_name = self.dialect_name.split("+") try: dialect_directive = self._dialects[dialect_name] except KeyError: raise Exception("No .. dialect:: %s directive has been established" % dialect_name) output = [] content = self._parse_content() parent_section_ref = self.state.parent.children[0]['ids'][0] self._append_dbapi_bullet(dialect_name, dbapi_name, content['name'], parent_section_ref) p = nodes.paragraph('', '', nodes.Text( "Support for the %s database via the %s driver." % ( dialect_directive.database_name, content['name'] ), "Support for the %s database via the %s driver." % ( dialect_directive.database_name, content['name'] ) ), ) self.state.nested_parse(content['text'], 0, p) output.append(p) if "url" in content or "driverurl" in content: sec = nodes.section( '', nodes.title("DBAPI", "DBAPI"), ids=["dialect-%s-%s-url" % (dialect_name, dbapi_name)] ) if "url" in content: text = "Documentation and download information (if applicable) "\ "for %s is available at:\n" % content["name"] uri = content['url'] sec.append( nodes.paragraph('', '', nodes.Text(text, text), nodes.reference('', '', nodes.Text(uri, uri), refuri=uri, ) ) ) if "driverurl" in content: text = "Drivers for this database are available at:\n" sec.append( nodes.paragraph('', '', nodes.Text(text, text), nodes.reference('', '', nodes.Text(content['driverurl'], content['driverurl']), refuri=content['driverurl'] ) ) ) output.append(sec) if "connectstring" in content: sec = nodes.section( '', nodes.title("Connecting", "Connecting"), nodes.paragraph('', '', nodes.Text("Connect String:", "Connect String:"), nodes.literal_block(content['connectstring'], content['connectstring']) ), ids=["dialect-%s-%s-connect" % (dialect_name, dbapi_name)] ) output.append(sec) return output def _dialect_node(self): self._dialects[self.dialect_name] = self content = self._parse_content() self.database_name = content['name'] self.bullets = nodes.bullet_list() text = "The following dialect/DBAPI options are available. "\ "Please refer to individual DBAPI sections for connect information." sec = nodes.section('', nodes.paragraph('', '', nodes.Text( "Support for the %s database." % content['name'], "Support for the %s database." % content['name'] ), ), nodes.title("DBAPI Support", "DBAPI Support"), nodes.paragraph('', '', nodes.Text(text, text), self.bullets ), ids=["dialect-%s" % self.dialect_name] ) return [sec] def _append_dbapi_bullet(self, dialect_name, dbapi_name, name, idname): env = self.state.document.settings.env dialect_directive = self._dialects[dialect_name] try: relative_uri = env.app.builder.get_relative_uri(dialect_directive.docname, self.docname) except: relative_uri = "" list_node = nodes.list_item('', nodes.paragraph('', '', nodes.reference('', '', nodes.Text(name, name), refdocname=self.docname, refuri= relative_uri + "#" + idname ), #nodes.Text(" ", " "), #nodes.reference('', '', # nodes.Text("(connectstring)", "(connectstring)"), # refdocname=self.docname, # refuri=env.app.builder.get_relative_uri( # dialect_directive.docname, self.docname) + ## "#" + ("dialect-%s-%s-connect" % # (dialect_name, dbapi_name)) # ) ) ) dialect_directive.bullets.append(list_node) def run(self): env = self.state.document.settings.env self.docname = env.docname self.dialect_name = dialect_name = self.content[0] has_dbapi = "+" in dialect_name if has_dbapi: return self._dbapi_node() else: return self._dialect_node() def setup(app): app.add_directive('dialect', DialectDirective)
mit
mattt416/neutron
neutron/tests/unit/plugins/ml2/drivers/base_type_tunnel.py
19
15909
# Copyright (c) 2014 OpenStack Foundation, all rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import mock from six import moves import testtools from testtools import matchers from neutron.common import exceptions as exc from neutron.db import api as db from neutron.plugins.ml2 import driver_api as api from neutron.plugins.ml2.drivers import type_tunnel TUNNEL_IP_ONE = "10.10.10.10" TUNNEL_IP_TWO = "10.10.10.20" HOST_ONE = 'fake_host_one' HOST_TWO = 'fake_host_two' TUN_MIN = 100 TUN_MAX = 109 TUNNEL_RANGES = [(TUN_MIN, TUN_MAX)] UPDATED_TUNNEL_RANGES = [(TUN_MIN + 5, TUN_MAX + 5)] class TunnelTypeTestMixin(object): DRIVER_CLASS = None TYPE = None def setUp(self): super(TunnelTypeTestMixin, self).setUp() self.driver = self.DRIVER_CLASS() self.driver.tunnel_ranges = TUNNEL_RANGES self.driver.sync_allocations() self.session = db.get_session() def test_tunnel_type(self): self.assertEqual(self.TYPE, self.driver.get_type()) def test_validate_provider_segment(self): segment = {api.NETWORK_TYPE: self.TYPE, api.PHYSICAL_NETWORK: 'phys_net', api.SEGMENTATION_ID: None} with testtools.ExpectedException(exc.InvalidInput): self.driver.validate_provider_segment(segment) segment[api.PHYSICAL_NETWORK] = None self.driver.validate_provider_segment(segment) segment[api.SEGMENTATION_ID] = 1 self.driver.validate_provider_segment(segment) def test_sync_tunnel_allocations(self): self.assertIsNone( self.driver.get_allocation(self.session, (TUN_MIN - 1))) self.assertFalse( self.driver.get_allocation(self.session, (TUN_MIN)).allocated) self.assertFalse( self.driver.get_allocation(self.session, (TUN_MIN + 1)).allocated) self.assertFalse( self.driver.get_allocation(self.session, (TUN_MAX - 1)).allocated) self.assertFalse( self.driver.get_allocation(self.session, (TUN_MAX)).allocated) self.assertIsNone( self.driver.get_allocation(self.session, (TUN_MAX + 1))) self.driver.tunnel_ranges = UPDATED_TUNNEL_RANGES self.driver.sync_allocations() self.assertIsNone( self.driver.get_allocation(self.session, (TUN_MIN + 5 - 1))) self.assertFalse( self.driver.get_allocation(self.session, (TUN_MIN + 5)).allocated) self.assertFalse( self.driver.get_allocation(self.session, (TUN_MIN + 5 + 1)).allocated) self.assertFalse( self.driver.get_allocation(self.session, (TUN_MAX + 5 - 1)).allocated) self.assertFalse( self.driver.get_allocation(self.session, (TUN_MAX + 5)).allocated) self.assertIsNone( self.driver.get_allocation(self.session, (TUN_MAX + 5 + 1))) def _test_sync_allocations_and_allocated(self, tunnel_id): segment = {api.NETWORK_TYPE: self.TYPE, api.PHYSICAL_NETWORK: None, api.SEGMENTATION_ID: tunnel_id} self.driver.reserve_provider_segment(self.session, segment) self.driver.tunnel_ranges = UPDATED_TUNNEL_RANGES self.driver.sync_allocations() self.assertTrue( self.driver.get_allocation(self.session, tunnel_id).allocated) def test_sync_allocations_and_allocated_in_initial_range(self): self._test_sync_allocations_and_allocated(TUN_MIN + 2) def test_sync_allocations_and_allocated_in_final_range(self): self._test_sync_allocations_and_allocated(TUN_MAX + 2) def test_sync_allocations_no_op(self): def verify_no_chunk(iterable, chunk_size): # no segment removed/added self.assertEqual(0, len(list(iterable))) return [] with mock.patch.object( type_tunnel, 'chunks', side_effect=verify_no_chunk) as chunks: self.driver.sync_allocations() self.assertEqual(2, len(chunks.mock_calls)) def test_partial_segment_is_partial_segment(self): segment = {api.NETWORK_TYPE: self.TYPE, api.PHYSICAL_NETWORK: None, api.SEGMENTATION_ID: None} self.assertTrue(self.driver.is_partial_segment(segment)) def test_specific_segment_is_not_partial_segment(self): segment = {api.NETWORK_TYPE: self.TYPE, api.PHYSICAL_NETWORK: None, api.SEGMENTATION_ID: 101} self.assertFalse(self.driver.is_partial_segment(segment)) def test_reserve_provider_segment_full_specs(self): segment = {api.NETWORK_TYPE: self.TYPE, api.PHYSICAL_NETWORK: None, api.SEGMENTATION_ID: 101} observed = self.driver.reserve_provider_segment(self.session, segment) alloc = self.driver.get_allocation(self.session, observed[api.SEGMENTATION_ID]) self.assertTrue(alloc.allocated) with testtools.ExpectedException(exc.TunnelIdInUse): self.driver.reserve_provider_segment(self.session, segment) self.driver.release_segment(self.session, segment) alloc = self.driver.get_allocation(self.session, observed[api.SEGMENTATION_ID]) self.assertFalse(alloc.allocated) segment[api.SEGMENTATION_ID] = 1000 observed = self.driver.reserve_provider_segment(self.session, segment) alloc = self.driver.get_allocation(self.session, observed[api.SEGMENTATION_ID]) self.assertTrue(alloc.allocated) self.driver.release_segment(self.session, segment) alloc = self.driver.get_allocation(self.session, observed[api.SEGMENTATION_ID]) self.assertIsNone(alloc) def test_reserve_provider_segment(self): tunnel_ids = set() specs = {api.NETWORK_TYPE: self.TYPE, api.PHYSICAL_NETWORK: 'None', api.SEGMENTATION_ID: None} for x in moves.range(TUN_MIN, TUN_MAX + 1): segment = self.driver.reserve_provider_segment(self.session, specs) self.assertEqual(self.TYPE, segment[api.NETWORK_TYPE]) self.assertThat(segment[api.SEGMENTATION_ID], matchers.GreaterThan(TUN_MIN - 1)) self.assertThat(segment[api.SEGMENTATION_ID], matchers.LessThan(TUN_MAX + 1)) tunnel_ids.add(segment[api.SEGMENTATION_ID]) with testtools.ExpectedException(exc.NoNetworkAvailable): segment = self.driver.reserve_provider_segment(self.session, specs) segment = {api.NETWORK_TYPE: self.TYPE, api.PHYSICAL_NETWORK: 'None', api.SEGMENTATION_ID: tunnel_ids.pop()} self.driver.release_segment(self.session, segment) segment = self.driver.reserve_provider_segment(self.session, specs) self.assertThat(segment[api.SEGMENTATION_ID], matchers.GreaterThan(TUN_MIN - 1)) self.assertThat(segment[api.SEGMENTATION_ID], matchers.LessThan(TUN_MAX + 1)) tunnel_ids.add(segment[api.SEGMENTATION_ID]) for tunnel_id in tunnel_ids: segment[api.SEGMENTATION_ID] = tunnel_id self.driver.release_segment(self.session, segment) def test_allocate_tenant_segment(self): tunnel_ids = set() for x in moves.range(TUN_MIN, TUN_MAX + 1): segment = self.driver.allocate_tenant_segment(self.session) self.assertThat(segment[api.SEGMENTATION_ID], matchers.GreaterThan(TUN_MIN - 1)) self.assertThat(segment[api.SEGMENTATION_ID], matchers.LessThan(TUN_MAX + 1)) tunnel_ids.add(segment[api.SEGMENTATION_ID]) segment = self.driver.allocate_tenant_segment(self.session) self.assertIsNone(segment) segment = {api.NETWORK_TYPE: self.TYPE, api.PHYSICAL_NETWORK: 'None', api.SEGMENTATION_ID: tunnel_ids.pop()} self.driver.release_segment(self.session, segment) segment = self.driver.allocate_tenant_segment(self.session) self.assertThat(segment[api.SEGMENTATION_ID], matchers.GreaterThan(TUN_MIN - 1)) self.assertThat(segment[api.SEGMENTATION_ID], matchers.LessThan(TUN_MAX + 1)) tunnel_ids.add(segment[api.SEGMENTATION_ID]) for tunnel_id in tunnel_ids: segment[api.SEGMENTATION_ID] = tunnel_id self.driver.release_segment(self.session, segment) def add_endpoint(self, ip=TUNNEL_IP_ONE, host=HOST_ONE): return self.driver.add_endpoint(ip, host) def test_add_endpoint(self): endpoint = self.add_endpoint() self.assertEqual(TUNNEL_IP_ONE, endpoint.ip_address) self.assertEqual(HOST_ONE, endpoint.host) return endpoint def test_add_endpoint_for_existing_tunnel_ip(self): self.add_endpoint() with mock.patch.object(type_tunnel.LOG, 'warning') as log_warn: self.add_endpoint() log_warn.assert_called_once_with(mock.ANY, TUNNEL_IP_ONE) def test_get_endpoint_by_host(self): self.add_endpoint() host_endpoint = self.driver.get_endpoint_by_host(HOST_ONE) self.assertEqual(TUNNEL_IP_ONE, host_endpoint.ip_address) return host_endpoint def test_get_endpoint_by_host_for_not_existing_host(self): ip_endpoint = self.driver.get_endpoint_by_host(HOST_TWO) self.assertIsNone(ip_endpoint) def test_get_endpoint_by_ip(self): self.add_endpoint() ip_endpoint = self.driver.get_endpoint_by_ip(TUNNEL_IP_ONE) self.assertEqual(HOST_ONE, ip_endpoint.host) return ip_endpoint def test_get_endpoint_by_ip_for_not_existing_tunnel_ip(self): ip_endpoint = self.driver.get_endpoint_by_ip(TUNNEL_IP_TWO) self.assertIsNone(ip_endpoint) def test_delete_endpoint(self): self.add_endpoint() self.assertIsNone(self.driver.delete_endpoint(TUNNEL_IP_ONE)) # Get all the endpoints and verify its empty endpoints = self.driver.get_endpoints() self.assertNotIn(TUNNEL_IP_ONE, endpoints) class TunnelTypeMultiRangeTestMixin(object): DRIVER_CLASS = None TUN_MIN0 = 100 TUN_MAX0 = 101 TUN_MIN1 = 200 TUN_MAX1 = 201 TUNNEL_MULTI_RANGES = [(TUN_MIN0, TUN_MAX0), (TUN_MIN1, TUN_MAX1)] def setUp(self): super(TunnelTypeMultiRangeTestMixin, self).setUp() self.driver = self.DRIVER_CLASS() self.driver.tunnel_ranges = self.TUNNEL_MULTI_RANGES self.driver.sync_allocations() self.session = db.get_session() def test_release_segment(self): segments = [self.driver.allocate_tenant_segment(self.session) for i in range(4)] # Release them in random order. No special meaning. for i in (0, 2, 1, 3): self.driver.release_segment(self.session, segments[i]) for key in (self.TUN_MIN0, self.TUN_MAX0, self.TUN_MIN1, self.TUN_MAX1): alloc = self.driver.get_allocation(self.session, key) self.assertFalse(alloc.allocated) class TunnelRpcCallbackTestMixin(object): DRIVER_CLASS = None TYPE = None def setUp(self): super(TunnelRpcCallbackTestMixin, self).setUp() self.driver = self.DRIVER_CLASS() def _test_tunnel_sync(self, kwargs, delete_tunnel=False): with mock.patch.object(self.notifier, 'tunnel_update') as tunnel_update,\ mock.patch.object(self.notifier, 'tunnel_delete') as tunnel_delete: details = self.callbacks.tunnel_sync('fake_context', **kwargs) tunnels = details['tunnels'] for tunnel in tunnels: self.assertEqual(kwargs['tunnel_ip'], tunnel['ip_address']) self.assertEqual(kwargs['host'], tunnel['host']) self.assertTrue(tunnel_update.called) if delete_tunnel: self.assertTrue(tunnel_delete.called) else: self.assertFalse(tunnel_delete.called) def _test_tunnel_sync_raises(self, kwargs): with mock.patch.object(self.notifier, 'tunnel_update') as tunnel_update,\ mock.patch.object(self.notifier, 'tunnel_delete') as tunnel_delete: self.assertRaises(exc.InvalidInput, self.callbacks.tunnel_sync, 'fake_context', **kwargs) self.assertFalse(tunnel_update.called) self.assertFalse(tunnel_delete.called) def test_tunnel_sync_called_without_host_passed(self): kwargs = {'tunnel_ip': TUNNEL_IP_ONE, 'tunnel_type': self.TYPE, 'host': None} self._test_tunnel_sync(kwargs) def test_tunnel_sync_called_with_host_passed_for_existing_tunnel_ip(self): self.driver.add_endpoint(TUNNEL_IP_ONE, None) kwargs = {'tunnel_ip': TUNNEL_IP_ONE, 'tunnel_type': self.TYPE, 'host': HOST_ONE} self._test_tunnel_sync(kwargs) def test_tunnel_sync_called_with_host_passed(self): kwargs = {'tunnel_ip': TUNNEL_IP_ONE, 'tunnel_type': self.TYPE, 'host': HOST_ONE} self._test_tunnel_sync(kwargs) def test_tunnel_sync_called_for_existing_endpoint(self): self.driver.add_endpoint(TUNNEL_IP_ONE, HOST_ONE) kwargs = {'tunnel_ip': TUNNEL_IP_ONE, 'tunnel_type': self.TYPE, 'host': HOST_ONE} self._test_tunnel_sync(kwargs) def test_tunnel_sync_called_for_existing_host_with_tunnel_ip_changed(self): self.driver.add_endpoint(TUNNEL_IP_ONE, HOST_ONE) kwargs = {'tunnel_ip': TUNNEL_IP_TWO, 'tunnel_type': self.TYPE, 'host': HOST_ONE} self._test_tunnel_sync(kwargs, True) def test_tunnel_sync_called_with_used_tunnel_ip_case_one(self): self.driver.add_endpoint(TUNNEL_IP_ONE, HOST_ONE) kwargs = {'tunnel_ip': TUNNEL_IP_ONE, 'tunnel_type': self.TYPE, 'host': HOST_TWO} self._test_tunnel_sync_raises(kwargs) def test_tunnel_sync_called_with_used_tunnel_ip_case_two(self): self.driver.add_endpoint(TUNNEL_IP_ONE, None) self.driver.add_endpoint(TUNNEL_IP_TWO, HOST_TWO) kwargs = {'tunnel_ip': TUNNEL_IP_ONE, 'tunnel_type': self.TYPE, 'host': HOST_TWO} self._test_tunnel_sync_raises(kwargs) def test_tunnel_sync_called_without_tunnel_ip(self): kwargs = {'tunnel_type': self.TYPE, 'host': None} self._test_tunnel_sync_raises(kwargs) def test_tunnel_sync_called_without_tunnel_type(self): kwargs = {'tunnel_ip': TUNNEL_IP_ONE, 'host': None} self._test_tunnel_sync_raises(kwargs)
apache-2.0
UdjinM6/omim
data/benchmarks/tk_results_viewer.py
53
9617
from Tkinter import * import tkMessageBox import math import sys import copy import os import shutil import time def less_start_time(t1, t2): fmt = '%a_%b_%d_%H:%M:%S_%Y' return time.strptime(t1[0].replace('__','_'), fmt) < time.strptime(t2[0].replace('__','_'), fmt) class BenchmarkResultsFrame(Frame): def __init__(self, cfg_file, results_file, master=None): Frame.__init__(self, master) self.master.title("Benchmark Results Viewer") self.grid(padx=10, pady=10) self.createWidgets() self.resultsDir = os.path.dirname(results_file) self.readResults(cfg_file, results_file) self.current = None self.poll() def scale_level(self, r): dx = 360.0 / (r[2] - r[0]) dy = 360.0 / (r[3] - r[1]) v = (dx + dy) / 2.0 l = math.log(v) / math.log(2.0) + 1 return math.floor(max(0, min(l, 17)) + 0.5) def poll(self): now = self.resultsList.curselection() if now != self.current: self.onResultsChanged(now) self.current = now self.after(100, self.poll) def onResultsChanged(self, now): if len(now) == 0: return self.resultsView.delete(1.0, END) for i in now: start_time = self.resultsList.get(int(i)) for rev_name in self.rev.keys(): if start_time in self.rev[rev_name]: self.resultsView.insert(END, rev_name + "\n") self.resultsView.insert(END, "\t%s\n" % (start_time)) for bench_name in self.rev[rev_name][start_time].keys(): if bench_name not in self.bench_cfg: s = "\t\t%s [config info not found]\n" % (bench_name) self.resultsView.insert(END, s) else: cfg_info = self.bench_cfg[bench_name] if not cfg_info[0]: s = "\t\t%s [%s %s %s %s], endScale=%d\n" % \ (bench_name, cfg_info[1], cfg_info[2], cfg_info[3], cfg_info[4], cfg_info[5]) self.resultsView.insert(END, s) else: s = "\t\t%s endScale=%d\n" % \ (bench_name, cfg_info[1]) self.resultsView.insert(END, s) k = self.rev[rev_name][start_time][bench_name].keys() k.sort() for scale_level in k: s = "\t\t\t scale: %d, duration: %f\n" % \ (scale_level, self.rev[rev_name][start_time][bench_name][scale_level]) self.resultsView.insert(END, s) self.resultsView.insert(END, "-------------------------------------------------------------------\n") if self.hasTraceAttachement(start_time): self.resultsView.insert(END, "%s attached\n" % (self.traceAttachementFile(start_time))) def readResults(self, cfg_file, results_file): self.results_file = results_file f1 = open(cfg_file, "r") lns1 = f1.readlines() lns1 = [l.split(" ") for l in lns1] # reading benchmark configuration info self.bench_cfg = {} for l in lns1: c_name = l[1] is_country = (len(l) == 3) self.bench_cfg[l[1]] = [] self.bench_cfg[l[1]].append(is_country) if len(l) > 0: if not is_country: self.bench_cfg[c_name].append(float(l[2])) self.bench_cfg[c_name].append(float(l[3])) self.bench_cfg[c_name].append(float(l[4])) self.bench_cfg[c_name].append(float(l[5])) self.bench_cfg[c_name].append(int(l[6])) else: self.bench_cfg[c_name].append(int(l[2])) # reading results file f = open(results_file, "r") lns = f.readlines() lns = [l.strip().split(" ") for l in lns] self.rev = {} cur_start_time = None is_session = False self.start_time_list = [] self.completion_status = [] for l in lns: if l[0] == "START": if cur_start_time is not None: if is_session: # unfinished benchmark, mark as incomplete self.completion_status.append(0) else: # unknown status self.completion_status.append(2) cur_start_time = l[1].strip("\n") self.start_time_list.append(cur_start_time) is_session = True continue if l[0] == "END": if not is_session: raise "END without matching START" self.completion_status.append(1) cur_start_time = None is_session = False continue if len(l) < 9: "android benchmarks don't write first item" l = [" "] + l rev_name = l[1] start_time = l[2] bench_name = l[3] if cur_start_time != start_time: # checking whether new start_time breaks current session if is_session: self.completion_status.append(0) is_session = False else: if cur_start_time is not None: # unknown session type self.completion_status.append(2) cur_start_time = start_time self.start_time_list.append(cur_start_time) rect = [float(l[4]), float(l[5]), float(l[6]), float(l[7])] dur = float(l[8]) if rev_name not in self.rev: self.rev[rev_name] = {} if start_time not in self.rev[rev_name]: self.rev[rev_name][start_time] = {} if bench_name not in self.rev[rev_name][start_time]: self.rev[rev_name][start_time][bench_name] = {} scale = self.scale_level(rect) if scale not in self.rev[rev_name][start_time][bench_name]: self.rev[rev_name][start_time][bench_name][scale] = 0 self.rev[rev_name][start_time][bench_name][scale] += dur if cur_start_time is not None: if is_session: self.completion_status.append(0) else: self.completion_status.append(2) # sorting session list. latest results go first. if len(self.start_time_list) != len(self.completion_status): raise "something wrong with file parsing, list sizes don't match" self.start_time_pairs = [(self.start_time_list[i], self.completion_status[i]) for i in range(0, len(self.start_time_list))] self.start_time_pairs.sort(less_start_time) # updating resultList with names and completion status i = 0 for e in self.start_time_pairs: self.resultsList.insert(END, e[0]) if e[1] == 0: self.resultsList.itemconfig(i, fg="red") elif e[1] == 1: self.resultsList.itemconfig(i, fg="blue") elif e[1] == 2: self.resultsList.itemconfig(i, fg="green") i += 1 def hasTraceAttachement(self, start_time): return self.traceAttachementFile(start_time) is not None def traceAttachementName(self, start_time): return start_time.strip("\n").replace("_", "").replace(":", "").replace("-", "") def traceAttachementFile(self, start_time): trace_files = [t for t in os.listdir(os.path.join(os.curdir, self.resultsDir)) if t.endswith(".trace")] sst = self.traceAttachementName(start_time) for tf in trace_files: stf = tf[0:-6].replace("_", "").replace(":", "").replace("-", "") if stf == sst: return tf def deleteTraceAttachement(self, start_time): sst = self.traceAttachementName(start_time) if tkMessageBox.askokcancel("Profiler results found", "Delete " + self.traceAttachementFile(start_time)): shutil.rmtree(self.traceAttachementFile(start_time)) def removeRecord(self, event): idx = self.resultsList.nearest(event.y) start_time = self.resultsList.get(idx) if tkMessageBox.askokcancel("Are you sure?", "Delete results for " + start_time + " session?"): lns = open(self.results_file, "r").readlines() lns = [l for l in lns if l.find(start_time) == -1] open(self.results_file, "w").writelines(lns) self.resultsList.delete(idx) if self.hasTraceAttachement(start_time): self.deleteTraceAttachement(start_time) def createWidgets(self): self.resultsList = Listbox(self, width=30, height=60, selectmode=EXTENDED) self.resultsList.grid(row=0, column=0) self.resultsList.bind("<Double-Button-1>", self.removeRecord) self.resultsView = Text(self, width=80, height=60) self.resultsView.grid(row=0, column=1, columnspan=3) if __name__ == "__main__": if len(sys.argv) < 3: print "usage: python %s [config.info] [results.txt]" % sys.argv[0] root = BenchmarkResultsFrame(sys.argv[1], sys.argv[2]) root.mainloop()
apache-2.0
shamanu4/netmiko
netmiko/ssh_dispatcher.py
1
3921
"""Controls selection of proper class based on the device type.""" from __future__ import unicode_literals from netmiko.cisco import CiscoIosSSH from netmiko.cisco import CiscoIosTelnet from netmiko.cisco import CiscoAsaSSH from netmiko.cisco import CiscoNxosSSH from netmiko.cisco import CiscoXrSSH from netmiko.cisco import CiscoWlcSSH from netmiko.cisco import CiscoS300SSH from netmiko.eltex import EltexSSH from netmiko.arista import AristaSSH from netmiko.hp import HPProcurveSSH, HPComwareSSH from netmiko.huawei import HuaweiSSH from netmiko.f5 import F5LtmSSH from netmiko.juniper import JuniperSSH from netmiko.brocade import BrocadeNosSSH from netmiko.brocade import BrocadeNetironSSH from netmiko.brocade import BrocadeFastironSSH from netmiko.fortinet import FortinetSSH from netmiko.a10 import A10SSH from netmiko.avaya import AvayaVspSSH from netmiko.avaya import AvayaErsSSH from netmiko.linux import LinuxSSH from netmiko.ovs import OvsLinuxSSH from netmiko.enterasys import EnterasysSSH from netmiko.extreme import ExtremeSSH from netmiko.alcatel import AlcatelSrosSSH from netmiko.dell import DellForce10SSH from netmiko.paloalto import PaloAltoPanosSSH from netmiko.quanta import QuantaMeshSSH from netmiko.aruba import ArubaSSH from netmiko.vyos import VyOSSSH from netmiko.ubiquiti import UbiquitiEdgeSSH from netmiko.cisco import CiscoTpTcCeSSH from netmiko.edgecore import EdgeCoreTelnet # The keys of this dictionary are the supported device_types CLASS_MAPPER_SSH = { 'cisco_ios': CiscoIosSSH, 'cisco_xe': CiscoIosSSH, 'cisco_asa': CiscoAsaSSH, 'cisco_nxos': CiscoNxosSSH, 'cisco_xr': CiscoXrSSH, 'cisco_wlc': CiscoWlcSSH, 'cisco_s300': CiscoS300SSH, 'eltex': EltexSSH, 'arista_eos': AristaSSH, 'hp_procurve': HPProcurveSSH, 'hp_comware': HPComwareSSH, 'huawei': HuaweiSSH, 'f5_ltm': F5LtmSSH, 'juniper': JuniperSSH, 'juniper_junos': JuniperSSH, 'brocade_vdx': BrocadeNosSSH, 'brocade_nos': BrocadeNosSSH, 'brocade_fastiron': BrocadeFastironSSH, 'brocade_netiron': BrocadeNetironSSH, 'vyos': VyOSSSH, 'brocade_vyos': VyOSSSH, 'vyatta_vyos': VyOSSSH, 'a10': A10SSH, 'avaya_vsp': AvayaVspSSH, 'avaya_ers': AvayaErsSSH, 'linux': LinuxSSH, 'ovs_linux': OvsLinuxSSH, 'enterasys': EnterasysSSH, 'extreme': ExtremeSSH, 'alcatel_sros': AlcatelSrosSSH, 'fortinet': FortinetSSH, 'dell_force10': DellForce10SSH, 'paloalto_panos': PaloAltoPanosSSH, 'quanta_mesh': QuantaMeshSSH, 'aruba_os': ArubaSSH, 'ubiquiti_edge': UbiquitiEdgeSSH, 'cisco_tp': CiscoTpTcCeSSH, } # Also support keys that end in _ssh # Note: Protocol selection no longer rely on device_type but defined in connection class (e.g. CiscoTelnetConnection) new_mapper = {} for k, v in CLASS_MAPPER_SSH.items(): new_mapper[k] = v alt_key = k + u"_ssh" new_mapper[alt_key] = v CLASS_MAPPER = new_mapper # Add telnet drivers CLASS_MAPPER_TELNET = { 'cisco_ios_telnet': CiscoIosTelnet, 'edge_core_telnet': EdgeCoreTelnet, } CLASS_MAPPER.update(CLASS_MAPPER_TELNET) platforms = list(CLASS_MAPPER.keys()) platforms.sort() platforms_base = list(CLASS_MAPPER_SSH.keys()) + list(CLASS_MAPPER_TELNET.keys()) platforms_base.sort() platforms_str = u"\n".join(platforms_base) platforms_str = u"\n" + platforms_str def ConnectHandler(*args, **kwargs): """Factory function selects the proper class and creates object based on device_type.""" if kwargs['device_type'] not in platforms: raise ValueError('Unsupported device_type: ' 'currently supported platforms are: {0}'.format(platforms_str)) ConnectionClass = ssh_dispatcher(kwargs['device_type']) return ConnectionClass(*args, **kwargs) def ssh_dispatcher(device_type): """Select the class to be instantiated based on vendor/platform.""" return CLASS_MAPPER[device_type]
mit
BigBearGCU/googletest
test/gtest_xml_test_utils.py
1815
8876
#!/usr/bin/env python # # Copyright 2006, Google Inc. # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above # copyright notice, this list of conditions and the following disclaimer # in the documentation and/or other materials provided with the # distribution. # * Neither the name of Google Inc. nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. """Unit test utilities for gtest_xml_output""" __author__ = 'eefacm@gmail.com (Sean Mcafee)' import re from xml.dom import minidom, Node import gtest_test_utils GTEST_OUTPUT_FLAG = '--gtest_output' GTEST_DEFAULT_OUTPUT_FILE = 'test_detail.xml' class GTestXMLTestCase(gtest_test_utils.TestCase): """ Base class for tests of Google Test's XML output functionality. """ def AssertEquivalentNodes(self, expected_node, actual_node): """ Asserts that actual_node (a DOM node object) is equivalent to expected_node (another DOM node object), in that either both of them are CDATA nodes and have the same value, or both are DOM elements and actual_node meets all of the following conditions: * It has the same tag name as expected_node. * It has the same set of attributes as expected_node, each with the same value as the corresponding attribute of expected_node. Exceptions are any attribute named "time", which needs only be convertible to a floating-point number and any attribute named "type_param" which only has to be non-empty. * It has an equivalent set of child nodes (including elements and CDATA sections) as expected_node. Note that we ignore the order of the children as they are not guaranteed to be in any particular order. """ if expected_node.nodeType == Node.CDATA_SECTION_NODE: self.assertEquals(Node.CDATA_SECTION_NODE, actual_node.nodeType) self.assertEquals(expected_node.nodeValue, actual_node.nodeValue) return self.assertEquals(Node.ELEMENT_NODE, actual_node.nodeType) self.assertEquals(Node.ELEMENT_NODE, expected_node.nodeType) self.assertEquals(expected_node.tagName, actual_node.tagName) expected_attributes = expected_node.attributes actual_attributes = actual_node .attributes self.assertEquals( expected_attributes.length, actual_attributes.length, 'attribute numbers differ in element %s:\nExpected: %r\nActual: %r' % ( actual_node.tagName, expected_attributes.keys(), actual_attributes.keys())) for i in range(expected_attributes.length): expected_attr = expected_attributes.item(i) actual_attr = actual_attributes.get(expected_attr.name) self.assert_( actual_attr is not None, 'expected attribute %s not found in element %s' % (expected_attr.name, actual_node.tagName)) self.assertEquals( expected_attr.value, actual_attr.value, ' values of attribute %s in element %s differ: %s vs %s' % (expected_attr.name, actual_node.tagName, expected_attr.value, actual_attr.value)) expected_children = self._GetChildren(expected_node) actual_children = self._GetChildren(actual_node) self.assertEquals( len(expected_children), len(actual_children), 'number of child elements differ in element ' + actual_node.tagName) for child_id, child in expected_children.iteritems(): self.assert_(child_id in actual_children, '<%s> is not in <%s> (in element %s)' % (child_id, actual_children, actual_node.tagName)) self.AssertEquivalentNodes(child, actual_children[child_id]) identifying_attribute = { 'testsuites': 'name', 'testsuite': 'name', 'testcase': 'name', 'failure': 'message', } def _GetChildren(self, element): """ Fetches all of the child nodes of element, a DOM Element object. Returns them as the values of a dictionary keyed by the IDs of the children. For <testsuites>, <testsuite> and <testcase> elements, the ID is the value of their "name" attribute; for <failure> elements, it is the value of the "message" attribute; CDATA sections and non-whitespace text nodes are concatenated into a single CDATA section with ID "detail". An exception is raised if any element other than the above four is encountered, if two child elements with the same identifying attributes are encountered, or if any other type of node is encountered. """ children = {} for child in element.childNodes: if child.nodeType == Node.ELEMENT_NODE: self.assert_(child.tagName in self.identifying_attribute, 'Encountered unknown element <%s>' % child.tagName) childID = child.getAttribute(self.identifying_attribute[child.tagName]) self.assert_(childID not in children) children[childID] = child elif child.nodeType in [Node.TEXT_NODE, Node.CDATA_SECTION_NODE]: if 'detail' not in children: if (child.nodeType == Node.CDATA_SECTION_NODE or not child.nodeValue.isspace()): children['detail'] = child.ownerDocument.createCDATASection( child.nodeValue) else: children['detail'].nodeValue += child.nodeValue else: self.fail('Encountered unexpected node type %d' % child.nodeType) return children def NormalizeXml(self, element): """ Normalizes Google Test's XML output to eliminate references to transient information that may change from run to run. * The "time" attribute of <testsuites>, <testsuite> and <testcase> elements is replaced with a single asterisk, if it contains only digit characters. * The "timestamp" attribute of <testsuites> elements is replaced with a single asterisk, if it contains a valid ISO8601 datetime value. * The "type_param" attribute of <testcase> elements is replaced with a single asterisk (if it sn non-empty) as it is the type name returned by the compiler and is platform dependent. * The line info reported in the first line of the "message" attribute and CDATA section of <failure> elements is replaced with the file's basename and a single asterisk for the line number. * The directory names in file paths are removed. * The stack traces are removed. """ if element.tagName == 'testsuites': timestamp = element.getAttributeNode('timestamp') timestamp.value = re.sub(r'^\d{4}-\d\d-\d\dT\d\d:\d\d:\d\d$', '*', timestamp.value) if element.tagName in ('testsuites', 'testsuite', 'testcase'): time = element.getAttributeNode('time') time.value = re.sub(r'^\d+(\.\d+)?$', '*', time.value) type_param = element.getAttributeNode('type_param') if type_param and type_param.value: type_param.value = '*' elif element.tagName == 'failure': source_line_pat = r'^.*[/\\](.*:)\d+\n' # Replaces the source line information with a normalized form. message = element.getAttributeNode('message') message.value = re.sub(source_line_pat, '\\1*\n', message.value) for child in element.childNodes: if child.nodeType == Node.CDATA_SECTION_NODE: # Replaces the source line information with a normalized form. cdata = re.sub(source_line_pat, '\\1*\n', child.nodeValue) # Removes the actual stack trace. child.nodeValue = re.sub(r'\nStack trace:\n(.|\n)*', '', cdata) for child in element.childNodes: if child.nodeType == Node.ELEMENT_NODE: self.NormalizeXml(child)
bsd-3-clause
dati91/servo
tests/wpt/web-platform-tests/tools/third_party/more-itertools/more_itertools/tests/test_recipes.py
39
19558
from doctest import DocTestSuite from unittest import TestCase from itertools import combinations from six.moves import range import more_itertools as mi def load_tests(loader, tests, ignore): # Add the doctests tests.addTests(DocTestSuite('more_itertools.recipes')) return tests class AccumulateTests(TestCase): """Tests for ``accumulate()``""" def test_empty(self): """Test that an empty input returns an empty output""" self.assertEqual(list(mi.accumulate([])), []) def test_default(self): """Test accumulate with the default function (addition)""" self.assertEqual(list(mi.accumulate([1, 2, 3])), [1, 3, 6]) def test_bogus_function(self): """Test accumulate with an invalid function""" with self.assertRaises(TypeError): list(mi.accumulate([1, 2, 3], func=lambda x: x)) def test_custom_function(self): """Test accumulate with a custom function""" self.assertEqual( list(mi.accumulate((1, 2, 3, 2, 1), func=max)), [1, 2, 3, 3, 3] ) class TakeTests(TestCase): """Tests for ``take()``""" def test_simple_take(self): """Test basic usage""" t = mi.take(5, range(10)) self.assertEqual(t, [0, 1, 2, 3, 4]) def test_null_take(self): """Check the null case""" t = mi.take(0, range(10)) self.assertEqual(t, []) def test_negative_take(self): """Make sure taking negative items results in a ValueError""" self.assertRaises(ValueError, lambda: mi.take(-3, range(10))) def test_take_too_much(self): """Taking more than an iterator has remaining should return what the iterator has remaining. """ t = mi.take(10, range(5)) self.assertEqual(t, [0, 1, 2, 3, 4]) class TabulateTests(TestCase): """Tests for ``tabulate()``""" def test_simple_tabulate(self): """Test the happy path""" t = mi.tabulate(lambda x: x) f = tuple([next(t) for _ in range(3)]) self.assertEqual(f, (0, 1, 2)) def test_count(self): """Ensure tabulate accepts specific count""" t = mi.tabulate(lambda x: 2 * x, -1) f = (next(t), next(t), next(t)) self.assertEqual(f, (-2, 0, 2)) class TailTests(TestCase): """Tests for ``tail()``""" def test_greater(self): """Length of iterable is greather than requested tail""" self.assertEqual(list(mi.tail(3, 'ABCDEFG')), ['E', 'F', 'G']) def test_equal(self): """Length of iterable is equal to the requested tail""" self.assertEqual( list(mi.tail(7, 'ABCDEFG')), ['A', 'B', 'C', 'D', 'E', 'F', 'G'] ) def test_less(self): """Length of iterable is less than requested tail""" self.assertEqual( list(mi.tail(8, 'ABCDEFG')), ['A', 'B', 'C', 'D', 'E', 'F', 'G'] ) class ConsumeTests(TestCase): """Tests for ``consume()``""" def test_sanity(self): """Test basic functionality""" r = (x for x in range(10)) mi.consume(r, 3) self.assertEqual(3, next(r)) def test_null_consume(self): """Check the null case""" r = (x for x in range(10)) mi.consume(r, 0) self.assertEqual(0, next(r)) def test_negative_consume(self): """Check that negative consumsion throws an error""" r = (x for x in range(10)) self.assertRaises(ValueError, lambda: mi.consume(r, -1)) def test_total_consume(self): """Check that iterator is totally consumed by default""" r = (x for x in range(10)) mi.consume(r) self.assertRaises(StopIteration, lambda: next(r)) class NthTests(TestCase): """Tests for ``nth()``""" def test_basic(self): """Make sure the nth item is returned""" l = range(10) for i, v in enumerate(l): self.assertEqual(mi.nth(l, i), v) def test_default(self): """Ensure a default value is returned when nth item not found""" l = range(3) self.assertEqual(mi.nth(l, 100, "zebra"), "zebra") def test_negative_item_raises(self): """Ensure asking for a negative item raises an exception""" self.assertRaises(ValueError, lambda: mi.nth(range(10), -3)) class AllEqualTests(TestCase): """Tests for ``all_equal()``""" def test_true(self): """Everything is equal""" self.assertTrue(mi.all_equal('aaaaaa')) self.assertTrue(mi.all_equal([0, 0, 0, 0])) def test_false(self): """Not everything is equal""" self.assertFalse(mi.all_equal('aaaaab')) self.assertFalse(mi.all_equal([0, 0, 0, 1])) def test_tricky(self): """Not everything is identical, but everything is equal""" items = [1, complex(1, 0), 1.0] self.assertTrue(mi.all_equal(items)) def test_empty(self): """Return True if the iterable is empty""" self.assertTrue(mi.all_equal('')) self.assertTrue(mi.all_equal([])) def test_one(self): """Return True if the iterable is singular""" self.assertTrue(mi.all_equal('0')) self.assertTrue(mi.all_equal([0])) class QuantifyTests(TestCase): """Tests for ``quantify()``""" def test_happy_path(self): """Make sure True count is returned""" q = [True, False, True] self.assertEqual(mi.quantify(q), 2) def test_custom_predicate(self): """Ensure non-default predicates return as expected""" q = range(10) self.assertEqual(mi.quantify(q, lambda x: x % 2 == 0), 5) class PadnoneTests(TestCase): """Tests for ``padnone()``""" def test_happy_path(self): """wrapper iterator should return None indefinitely""" r = range(2) p = mi.padnone(r) self.assertEqual([0, 1, None, None], [next(p) for _ in range(4)]) class NcyclesTests(TestCase): """Tests for ``nyclces()``""" def test_happy_path(self): """cycle a sequence three times""" r = ["a", "b", "c"] n = mi.ncycles(r, 3) self.assertEqual( ["a", "b", "c", "a", "b", "c", "a", "b", "c"], list(n) ) def test_null_case(self): """asking for 0 cycles should return an empty iterator""" n = mi.ncycles(range(100), 0) self.assertRaises(StopIteration, lambda: next(n)) def test_pathalogical_case(self): """asking for negative cycles should return an empty iterator""" n = mi.ncycles(range(100), -10) self.assertRaises(StopIteration, lambda: next(n)) class DotproductTests(TestCase): """Tests for ``dotproduct()``'""" def test_happy_path(self): """simple dotproduct example""" self.assertEqual(400, mi.dotproduct([10, 10], [20, 20])) class FlattenTests(TestCase): """Tests for ``flatten()``""" def test_basic_usage(self): """ensure list of lists is flattened one level""" f = [[0, 1, 2], [3, 4, 5]] self.assertEqual(list(range(6)), list(mi.flatten(f))) def test_single_level(self): """ensure list of lists is flattened only one level""" f = [[0, [1, 2]], [[3, 4], 5]] self.assertEqual([0, [1, 2], [3, 4], 5], list(mi.flatten(f))) class RepeatfuncTests(TestCase): """Tests for ``repeatfunc()``""" def test_simple_repeat(self): """test simple repeated functions""" r = mi.repeatfunc(lambda: 5) self.assertEqual([5, 5, 5, 5, 5], [next(r) for _ in range(5)]) def test_finite_repeat(self): """ensure limited repeat when times is provided""" r = mi.repeatfunc(lambda: 5, times=5) self.assertEqual([5, 5, 5, 5, 5], list(r)) def test_added_arguments(self): """ensure arguments are applied to the function""" r = mi.repeatfunc(lambda x: x, 2, 3) self.assertEqual([3, 3], list(r)) def test_null_times(self): """repeat 0 should return an empty iterator""" r = mi.repeatfunc(range, 0, 3) self.assertRaises(StopIteration, lambda: next(r)) class PairwiseTests(TestCase): """Tests for ``pairwise()``""" def test_base_case(self): """ensure an iterable will return pairwise""" p = mi.pairwise([1, 2, 3]) self.assertEqual([(1, 2), (2, 3)], list(p)) def test_short_case(self): """ensure an empty iterator if there's not enough values to pair""" p = mi.pairwise("a") self.assertRaises(StopIteration, lambda: next(p)) class GrouperTests(TestCase): """Tests for ``grouper()``""" def test_even(self): """Test when group size divides evenly into the length of the iterable. """ self.assertEqual( list(mi.grouper(3, 'ABCDEF')), [('A', 'B', 'C'), ('D', 'E', 'F')] ) def test_odd(self): """Test when group size does not divide evenly into the length of the iterable. """ self.assertEqual( list(mi.grouper(3, 'ABCDE')), [('A', 'B', 'C'), ('D', 'E', None)] ) def test_fill_value(self): """Test that the fill value is used to pad the final group""" self.assertEqual( list(mi.grouper(3, 'ABCDE', 'x')), [('A', 'B', 'C'), ('D', 'E', 'x')] ) class RoundrobinTests(TestCase): """Tests for ``roundrobin()``""" def test_even_groups(self): """Ensure ordered output from evenly populated iterables""" self.assertEqual( list(mi.roundrobin('ABC', [1, 2, 3], range(3))), ['A', 1, 0, 'B', 2, 1, 'C', 3, 2] ) def test_uneven_groups(self): """Ensure ordered output from unevenly populated iterables""" self.assertEqual( list(mi.roundrobin('ABCD', [1, 2], range(0))), ['A', 1, 'B', 2, 'C', 'D'] ) class PartitionTests(TestCase): """Tests for ``partition()``""" def test_bool(self): """Test when pred() returns a boolean""" lesser, greater = mi.partition(lambda x: x > 5, range(10)) self.assertEqual(list(lesser), [0, 1, 2, 3, 4, 5]) self.assertEqual(list(greater), [6, 7, 8, 9]) def test_arbitrary(self): """Test when pred() returns an integer""" divisibles, remainders = mi.partition(lambda x: x % 3, range(10)) self.assertEqual(list(divisibles), [0, 3, 6, 9]) self.assertEqual(list(remainders), [1, 2, 4, 5, 7, 8]) class PowersetTests(TestCase): """Tests for ``powerset()``""" def test_combinatorics(self): """Ensure a proper enumeration""" p = mi.powerset([1, 2, 3]) self.assertEqual( list(p), [(), (1,), (2,), (3,), (1, 2), (1, 3), (2, 3), (1, 2, 3)] ) class UniqueEverseenTests(TestCase): """Tests for ``unique_everseen()``""" def test_everseen(self): """ensure duplicate elements are ignored""" u = mi.unique_everseen('AAAABBBBCCDAABBB') self.assertEqual( ['A', 'B', 'C', 'D'], list(u) ) def test_custom_key(self): """ensure the custom key comparison works""" u = mi.unique_everseen('aAbACCc', key=str.lower) self.assertEqual(list('abC'), list(u)) def test_unhashable(self): """ensure things work for unhashable items""" iterable = ['a', [1, 2, 3], [1, 2, 3], 'a'] u = mi.unique_everseen(iterable) self.assertEqual(list(u), ['a', [1, 2, 3]]) def test_unhashable_key(self): """ensure things work for unhashable items with a custom key""" iterable = ['a', [1, 2, 3], [1, 2, 3], 'a'] u = mi.unique_everseen(iterable, key=lambda x: x) self.assertEqual(list(u), ['a', [1, 2, 3]]) class UniqueJustseenTests(TestCase): """Tests for ``unique_justseen()``""" def test_justseen(self): """ensure only last item is remembered""" u = mi.unique_justseen('AAAABBBCCDABB') self.assertEqual(list('ABCDAB'), list(u)) def test_custom_key(self): """ensure the custom key comparison works""" u = mi.unique_justseen('AABCcAD', str.lower) self.assertEqual(list('ABCAD'), list(u)) class IterExceptTests(TestCase): """Tests for ``iter_except()``""" def test_exact_exception(self): """ensure the exact specified exception is caught""" l = [1, 2, 3] i = mi.iter_except(l.pop, IndexError) self.assertEqual(list(i), [3, 2, 1]) def test_generic_exception(self): """ensure the generic exception can be caught""" l = [1, 2] i = mi.iter_except(l.pop, Exception) self.assertEqual(list(i), [2, 1]) def test_uncaught_exception_is_raised(self): """ensure a non-specified exception is raised""" l = [1, 2, 3] i = mi.iter_except(l.pop, KeyError) self.assertRaises(IndexError, lambda: list(i)) def test_first(self): """ensure first is run before the function""" l = [1, 2, 3] f = lambda: 25 i = mi.iter_except(l.pop, IndexError, f) self.assertEqual(list(i), [25, 3, 2, 1]) class FirstTrueTests(TestCase): """Tests for ``first_true()``""" def test_something_true(self): """Test with no keywords""" self.assertEqual(mi.first_true(range(10)), 1) def test_nothing_true(self): """Test default return value.""" self.assertEqual(mi.first_true([0, 0, 0]), False) def test_default(self): """Test with a default keyword""" self.assertEqual(mi.first_true([0, 0, 0], default='!'), '!') def test_pred(self): """Test with a custom predicate""" self.assertEqual( mi.first_true([2, 4, 6], pred=lambda x: x % 3 == 0), 6 ) class RandomProductTests(TestCase): """Tests for ``random_product()`` Since random.choice() has different results with the same seed across python versions 2.x and 3.x, these tests use highly probably events to create predictable outcomes across platforms. """ def test_simple_lists(self): """Ensure that one item is chosen from each list in each pair. Also ensure that each item from each list eventually appears in the chosen combinations. Odds are roughly 1 in 7.1 * 10e16 that one item from either list will not be chosen after 100 samplings of one item from each list. Just to be safe, better use a known random seed, too. """ nums = [1, 2, 3] lets = ['a', 'b', 'c'] n, m = zip(*[mi.random_product(nums, lets) for _ in range(100)]) n, m = set(n), set(m) self.assertEqual(n, set(nums)) self.assertEqual(m, set(lets)) self.assertEqual(len(n), len(nums)) self.assertEqual(len(m), len(lets)) def test_list_with_repeat(self): """ensure multiple items are chosen, and that they appear to be chosen from one list then the next, in proper order. """ nums = [1, 2, 3] lets = ['a', 'b', 'c'] r = list(mi.random_product(nums, lets, repeat=100)) self.assertEqual(2 * 100, len(r)) n, m = set(r[::2]), set(r[1::2]) self.assertEqual(n, set(nums)) self.assertEqual(m, set(lets)) self.assertEqual(len(n), len(nums)) self.assertEqual(len(m), len(lets)) class RandomPermutationTests(TestCase): """Tests for ``random_permutation()``""" def test_full_permutation(self): """ensure every item from the iterable is returned in a new ordering 15 elements have a 1 in 1.3 * 10e12 of appearing in sorted order, so we fix a seed value just to be sure. """ i = range(15) r = mi.random_permutation(i) self.assertEqual(set(i), set(r)) if i == r: raise AssertionError("Values were not permuted") def test_partial_permutation(self): """ensure all returned items are from the iterable, that the returned permutation is of the desired length, and that all items eventually get returned. Sampling 100 permutations of length 5 from a set of 15 leaves a (2/3)^100 chance that an item will not be chosen. Multiplied by 15 items, there is a 1 in 2.6e16 chance that at least 1 item will not show up in the resulting output. Using a random seed will fix that. """ items = range(15) item_set = set(items) all_items = set() for _ in range(100): permutation = mi.random_permutation(items, 5) self.assertEqual(len(permutation), 5) permutation_set = set(permutation) self.assertLessEqual(permutation_set, item_set) all_items |= permutation_set self.assertEqual(all_items, item_set) class RandomCombinationTests(TestCase): """Tests for ``random_combination()``""" def test_psuedorandomness(self): """ensure different subsets of the iterable get returned over many samplings of random combinations""" items = range(15) all_items = set() for _ in range(50): combination = mi.random_combination(items, 5) all_items |= set(combination) self.assertEqual(all_items, set(items)) def test_no_replacement(self): """ensure that elements are sampled without replacement""" items = range(15) for _ in range(50): combination = mi.random_combination(items, len(items)) self.assertEqual(len(combination), len(set(combination))) self.assertRaises( ValueError, lambda: mi.random_combination(items, len(items) + 1) ) class RandomCombinationWithReplacementTests(TestCase): """Tests for ``random_combination_with_replacement()``""" def test_replacement(self): """ensure that elements are sampled with replacement""" items = range(5) combo = mi.random_combination_with_replacement(items, len(items) * 2) self.assertEqual(2 * len(items), len(combo)) if len(set(combo)) == len(combo): raise AssertionError("Combination contained no duplicates") def test_pseudorandomness(self): """ensure different subsets of the iterable get returned over many samplings of random combinations""" items = range(15) all_items = set() for _ in range(50): combination = mi.random_combination_with_replacement(items, 5) all_items |= set(combination) self.assertEqual(all_items, set(items)) class NthCombinationTests(TestCase): def test_basic(self): iterable = 'abcdefg' r = 4 for index, expected in enumerate(combinations(iterable, r)): actual = mi.nth_combination(iterable, r, index) self.assertEqual(actual, expected) def test_long(self): actual = mi.nth_combination(range(180), 4, 2000000) expected = (2, 12, 35, 126) self.assertEqual(actual, expected) class PrependTests(TestCase): def test_basic(self): value = 'a' iterator = iter('bcdefg') actual = list(mi.prepend(value, iterator)) expected = list('abcdefg') self.assertEqual(actual, expected) def test_multiple(self): value = 'ab' iterator = iter('cdefg') actual = tuple(mi.prepend(value, iterator)) expected = ('ab',) + tuple('cdefg') self.assertEqual(actual, expected)
mpl-2.0
Juniper/contrail-dev-neutron
neutron/db/migration/alembic_migrations/versions/157a5d299379_ml2_binding_profile.py
11
1586
# vim: tabstop=4 shiftwidth=4 softtabstop=4 # # Copyright 2014 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # """ml2 binding:profile Revision ID: 157a5d299379 Revises: 50d5ba354c23 Create Date: 2014-02-13 23:48:25.147279 """ # revision identifiers, used by Alembic. revision = '157a5d299379' down_revision = '50d5ba354c23' # Change to ['*'] if this migration applies to all plugins migration_for_plugins = [ 'neutron.plugins.ml2.plugin.Ml2Plugin' ] from alembic import op import sqlalchemy as sa from neutron.db import migration def upgrade(active_plugins=None, options=None): if not migration.should_run(active_plugins, migration_for_plugins): return op.add_column('ml2_port_bindings', sa.Column('profile', sa.String(length=4095), nullable=False, server_default='')) def downgrade(active_plugins=None, options=None): if not migration.should_run(active_plugins, migration_for_plugins): return op.drop_column('ml2_port_bindings', 'profile')
apache-2.0
jashandeep-sohi/pyds
docs/conf.py
1
8535
#!/usr/bin/env python3 # -*- coding: utf-8 -*- # # pyds documentation build configuration file, created by # sphinx-quickstart on Thu May 29 20:47:19 2014. # # This file is execfile()d with the current directory set to its # containing dir. # # Note that not all possible configuration values are present in this # autogenerated file. # # All configuration values have a default; values that are commented out # serve to show the default. import sys import os # If extensions (or modules to document with autodoc) are in another directory, # add these directories to sys.path here. If the directory is relative to the # documentation root, use os.path.abspath to make it absolute, like shown here. #sys.path.insert(0, os.path.abspath('.')) sys.path.insert(0, os.path.abspath('..')) from pyds import __version__ # -- General configuration ------------------------------------------------ # If your documentation needs a minimal Sphinx version, state it here. #needs_sphinx = '1.0' # Add any Sphinx extension module names here, as strings. They can be # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom # ones. extensions = [ 'sphinx.ext.autodoc', 'sphinx.ext.doctest', 'sphinx.ext.intersphinx', 'sphinx.ext.autosummary', ] intersphinx_mapping = { 'python': ('https://docs.python.org/3', None), } # Add any paths that contain templates here, relative to this directory. templates_path = ['_templates'] # The suffix of source filenames. source_suffix = '.rst' # The encoding of source files. #source_encoding = 'utf-8-sig' # The master toctree document. master_doc = 'index' # General information about the project. project = 'pyds' copyright = '2015, Jashandeep Sohi' # The version info for the project you're documenting, acts as replacement for # |version| and |release|, also used in various other places throughout the # built documents. # # The short X.Y version. version = __version__ # The full version, including alpha/beta/rc tags. release = __version__ # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. #language = None # There are two options for replacing |today|: either, you set today to some # non-false value, then it is used: #today = '' # Else, today_fmt is used as the format for a strftime call. #today_fmt = '%B %d, %Y' # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. exclude_patterns = ['_build'] # The reST default role (used for this markup: `text`) to use for all # documents. #default_role = None # If true, '()' will be appended to :func: etc. cross-reference text. #add_function_parentheses = True # If true, the current module name will be prepended to all description # unit titles (such as .. function::). #add_module_names = True # If true, sectionauthor and moduleauthor directives will be shown in the # output. They are ignored by default. #show_authors = False # The name of the Pygments (syntax highlighting) style to use. pygments_style = 'sphinx' #The default language to highlight source code in. highlight_language = 'python3' # A list of ignored prefixes for module index sorting. #modindex_common_prefix = [] # If true, keep warnings as "system message" paragraphs in the built documents. #keep_warnings = False # -- Options for HTML output ---------------------------------------------- # The theme to use for HTML and HTML Help pages. See the documentation for # a list of builtin themes. html_theme = 'sphinx_rtd_theme' #html_theme = 'default' # Theme options are theme-specific and customize the look and feel of a theme # further. For a list of options available for each theme, see the # documentation. #html_theme_options = {} # Add any paths that contain custom themes here, relative to this directory. html_theme_path = ['_themes'] # The name for this set of Sphinx documents. If None, it defaults to # "<project> v<release> documentation". #html_title = None # A shorter title for the navigation bar. Default is the same as html_title. #html_short_title = None # The name of an image file (relative to this directory) to place at the top # of the sidebar. #html_logo = None # The name of an image file (within the static path) to use as favicon of the # docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 # pixels large. #html_favicon = None # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". html_static_path = ['_static'] # Add any extra paths that contain custom files (such as robots.txt or # .htaccess) here, relative to this directory. These files are copied # directly to the root of the documentation. #html_extra_path = [] # If not '', a 'Last updated on:' timestamp is inserted at every page bottom, # using the given strftime format. #html_last_updated_fmt = '%b %d, %Y' # If true, SmartyPants will be used to convert quotes and dashes to # typographically correct entities. #html_use_smartypants = True # Custom sidebar templates, maps document names to template names. #html_sidebars = {} # Additional templates that should be rendered to pages, maps page names to # template names. #html_additional_pages = {} # If false, no module index is generated. #html_domain_indices = True # If false, no index is generated. #html_use_index = True # If true, the index is split into individual pages for each letter. #html_split_index = False # If true, links to the reST sources are added to the pages. #html_show_sourcelink = True # If true, "Created using Sphinx" is shown in the HTML footer. Default is True. #html_show_sphinx = True # If true, "(C) Copyright ..." is shown in the HTML footer. Default is True. #html_show_copyright = True # If true, an OpenSearch description file will be output, and all pages will # contain a <link> tag referring to it. The value of this option must be the # base URL from which the finished HTML is served. #html_use_opensearch = '' # This is the file name suffix for HTML files (e.g. ".xhtml"). #html_file_suffix = None # Output file base name for HTML help builder. htmlhelp_basename = 'pydsdoc' # -- Options for LaTeX output --------------------------------------------- latex_elements = { # The paper size ('letterpaper' or 'a4paper'). #'papersize': 'letterpaper', # The font size ('10pt', '11pt' or '12pt'). #'pointsize': '10pt', # Additional stuff for the LaTeX preamble. #'preamble': '', } # Grouping the document tree into LaTeX files. List of tuples # (source start file, target name, title, # author, documentclass [howto, manual, or own class]). latex_documents = [ ('index', 'pyds.tex', 'pyds Documentation', 'Jashandeep Sohi', 'manual'), ] # The name of an image file (relative to this directory) to place at the top of # the title page. #latex_logo = None # For "manual" documents, if this is true, then toplevel headings are parts, # not chapters. #latex_use_parts = False # If true, show page references after internal links. #latex_show_pagerefs = False # If true, show URL addresses after external links. #latex_show_urls = False # Documents to append as an appendix to all manuals. #latex_appendices = [] # If false, no module index is generated. #latex_domain_indices = True # -- Options for manual page output --------------------------------------- # One entry per manual page. List of tuples # (source start file, name, description, authors, manual section). man_pages = [ ('index', 'pyds', 'pyds Documentation', ['Jashandeep Sohi'], 1) ] # If true, show URL addresses after external links. #man_show_urls = False # -- Options for Texinfo output ------------------------------------------- # Grouping the document tree into Texinfo files. List of tuples # (source start file, target name, title, author, # dir menu entry, description, category) texinfo_documents = [ ('index', 'pyds', 'pyds Documentation', 'Jashandeep Sohi', 'pyds', 'One line description of project.', 'Miscellaneous'), ] # Documents to append as an appendix to all manuals. #texinfo_appendices = [] # If false, no module index is generated. #texinfo_domain_indices = True # How to display URL addresses: 'footnote', 'no', or 'inline'. #texinfo_show_urls = 'footnote' # If true, do not generate a @detailmenu in the "Top" node's menu. #texinfo_no_detailmenu = False
gpl-3.0
bmotlaghFLT/FLT_PhantomJS
src/qt/qtwebkit/Tools/Scripts/webkitpy/common/system/stack_utils.py
215
2734
# Copyright (C) 2011 Google Inc. All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above # copyright notice, this list of conditions and the following disclaimer # in the documentation and/or other materials provided with the # distribution. # * Neither the name of Google Inc. nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. """Simple routines for logging, obtaining thread stack information.""" import sys import traceback def log_thread_state(logger, name, thread_id, msg=''): """Log information about the given thread state.""" stack = _find_thread_stack(thread_id) assert(stack is not None) logger("") logger("%s (tid %d) %s" % (name, thread_id, msg)) _log_stack(logger, stack) logger("") def _find_thread_stack(thread_id): """Returns a stack object that can be used to dump a stack trace for the given thread id (or None if the id is not found).""" for tid, stack in sys._current_frames().items(): if tid == thread_id: return stack return None def _log_stack(logger, stack): """Log a stack trace to the logger callback.""" for filename, lineno, name, line in traceback.extract_stack(stack): logger('File: "%s", line %d, in %s' % (filename, lineno, name)) if line: logger(' %s' % line.strip()) def log_traceback(logger, tb): stack = traceback.extract_tb(tb) for frame_str in traceback.format_list(stack): for line in frame_str.split('\n'): if line: logger(" %s" % line)
bsd-3-clause
arista-eosplus/ansible
test/units/modules/network/nxos/test_nxos_ip_interface.py
19
3005
# (c) 2016 Red Hat Inc. # # This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see <http://www.gnu.org/licenses/>. # Make coding more python3-ish from __future__ import (absolute_import, division, print_function) __metaclass__ = type import json from ansible.compat.tests.mock import patch from ansible.modules.network.nxos import nxos_ip_interface from .nxos_module import TestNxosModule, load_fixture, set_module_args class TestNxosIPInterfaceModule(TestNxosModule): module = nxos_ip_interface def setUp(self): self.mock_get_interface_mode = patch( 'ansible.modules.network.nxos.nxos_ip_interface.get_interface_mode') self.get_interface_mode = self.mock_get_interface_mode.start() self.mock_send_show_command = patch( 'ansible.modules.network.nxos.nxos_ip_interface.send_show_command') self.send_show_command = self.mock_send_show_command.start() self.mock_load_config = patch('ansible.modules.network.nxos.nxos_ip_interface.load_config') self.load_config = self.mock_load_config.start() def tearDown(self): self.mock_get_interface_mode.stop() self.mock_send_show_command.stop() self.mock_load_config.stop() def load_fixtures(self, commands=None): self.get_interface_mode.return_value = 'layer3' self.send_show_command.return_value = [load_fixture('nxos_ip_interface.cfg')] self.load_config.return_value = None def test_nxos_ip_interface_ip_present(self): set_module_args(dict(interface='eth2/1', addr='1.1.1.2', mask=8)) result = self.execute_module(changed=True) self.assertEqual(result['commands'], ['interface eth2/1', 'no ip address 1.1.1.1/8', 'interface eth2/1', 'ip address 1.1.1.2/8']) def test_nxos_ip_interface_ip_idempotent(self): set_module_args(dict(interface='eth2/1', addr='1.1.1.1', mask=8)) result = self.execute_module(changed=False) self.assertEqual(result['commands'], []) def test_nxos_ip_interface_ip_absent(self): set_module_args(dict(interface='eth2/1', state='absent', addr='1.1.1.1', mask=8)) result = self.execute_module(changed=True) self.assertEqual(result['commands'], ['interface eth2/1', 'no ip address 1.1.1.1/8'])
gpl-3.0
robhudson/kuma
vendor/packages/translate/storage/lisa.py
24
13208
#!/usr/bin/env python # -*- coding: utf-8 -*- # # Copyright 2006-2011 Zuza Software Foundation # # This file is part of the Translate Toolkit. # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, see <http://www.gnu.org/licenses/>. """Parent class for LISA standards (TMX, TBX, XLIFF)""" try: from lxml import etree from translate.misc.xml_helpers import (getText, getXMLlang, getXMLspace, namespaced, setXMLlang, setXMLspace) except ImportError as e: raise ImportError("lxml is not installed. It might be possible to continue without support for XML formats.") from translate.lang import data from translate.storage import base class LISAunit(base.TranslationUnit): """ A single unit in the file. Provisional work is done to make several languages possible. """ #The name of the root element of this unit type:(termEntry, tu, trans-unit) rootNode = "" # The name of the per language element of this unit type:(termEntry, tu, # trans-unit) languageNode = "" #The name of the innermost element of this unit type:(term, seg) textNode = "" namespace = None _default_xml_space = "preserve" """The default handling of spacing in the absense of an xml:space attribute. This is mostly for correcting XLIFF behaviour.""" def __init__(self, source, empty=False, **kwargs): """Constructs a unit containing the given source string""" self._rich_source = None self._rich_target = None if empty: self._state_n = 0 return self.xmlelement = etree.Element(self.namespaced(self.rootNode)) #add descrip, note, etc. super(LISAunit, self).__init__(source) def __eq__(self, other): """Compares two units""" if not isinstance(other, LISAunit): return super(LISAunit, self).__eq__(other) languageNodes = self.getlanguageNodes() otherlanguageNodes = other.getlanguageNodes() if len(languageNodes) != len(otherlanguageNodes): return False for i in range(len(languageNodes)): mytext = self.getNodeText(languageNodes[i], getXMLspace(self.xmlelement, self._default_xml_space)) othertext = other.getNodeText(otherlanguageNodes[i], getXMLspace(self.xmlelement, self._default_xml_space)) if mytext != othertext: #TODO:^ maybe we want to take children and notes into account return False return True def namespaced(self, name): """Returns name in Clark notation. For example ``namespaced("source")`` in an XLIFF document might return:: {urn:oasis:names:tc:xliff:document:1.1}source This is needed throughout lxml. """ return namespaced(self.namespace, name) def set_source_dom(self, dom_node): languageNodes = self.getlanguageNodes() if len(languageNodes) > 0: self.xmlelement.replace(languageNodes[0], dom_node) else: self.xmlelement.append(dom_node) def get_source_dom(self): return self.getlanguageNode(lang=None, index=0) source_dom = property(get_source_dom, set_source_dom) def setsource(self, text, sourcelang='en'): if self._rich_source is not None: self._rich_source = None text = data.forceunicode(text) self.source_dom = self.createlanguageNode(sourcelang, text, "source") def getsource(self): return self.getNodeText(self.source_dom, getXMLspace(self.xmlelement, self._default_xml_space)) source = property(getsource, setsource) def set_target_dom(self, dom_node, append=False): languageNodes = self.getlanguageNodes() assert len(languageNodes) > 0 if dom_node is not None: if append or len(languageNodes) == 0: self.xmlelement.append(dom_node) else: self.xmlelement.insert(1, dom_node) if not append and len(languageNodes) > 1: self.xmlelement.remove(languageNodes[1]) def get_target_dom(self, lang=None): if lang: return self.getlanguageNode(lang=lang) else: return self.getlanguageNode(lang=None, index=1) target_dom = property(get_target_dom) def settarget(self, text, lang='xx', append=False): """Sets the "target" string (second language), or alternatively appends to the list""" #XXX: we really need the language - can't really be optional, and we # need to propagate it if self._rich_target is not None: self._rich_target = None text = data.forceunicode(text) # Firstly deal with reinitialising to None or setting to identical # string if self.gettarget() == text: return languageNode = self.get_target_dom(None) if not text is None: if languageNode is None: languageNode = self.createlanguageNode(lang, text, "target") self.set_target_dom(languageNode, append) else: if self.textNode: terms = languageNode.iter(self.namespaced(self.textNode)) try: languageNode = terms.next() except StopIteration as e: pass languageNode.text = text else: self.set_target_dom(None, False) def gettarget(self, lang=None): """retrieves the "target" text (second entry), or the entry in the specified language, if it exists""" return self.getNodeText(self.get_target_dom(lang), getXMLspace(self.xmlelement, self._default_xml_space)) target = property(gettarget, settarget) def createlanguageNode(self, lang, text, purpose=None): """Returns a xml Element setup with given parameters to represent a single language entry. Has to be overridden.""" return None def createPHnodes(self, parent, text): """Create the text node in parent containing all the ph tags""" matches = _getPhMatches(text) if not matches: parent.text = text return # Now we know there will definitely be some ph tags start = matches[0].start() pretext = text[:start] if pretext: parent.text = pretext lasttag = parent for i, m in enumerate(matches): #pretext pretext = text[start:m.start()] # this will never happen with the first ph tag if pretext: lasttag.tail = pretext #ph node phnode = etree.SubElement(parent, self.namespaced("ph")) phnode.set("id", str(i + 1)) phnode.text = m.group() lasttag = phnode start = m.end() #post text if text[start:]: lasttag.tail = text[start:] def getlanguageNodes(self): """Returns a list of all nodes that contain per language information. """ return list(self.xmlelement.iterchildren(self.namespaced(self.languageNode))) def getlanguageNode(self, lang=None, index=None): """Retrieves a :attr:`languageNode` either by language or by index.""" if lang is None and index is None: raise KeyError("No criteria for languageNode given") languageNodes = self.getlanguageNodes() if lang: for set in languageNodes: if getXMLlang(set) == lang: return set else: # have to use index if index >= len(languageNodes): return None else: return languageNodes[index] return None def getNodeText(self, languageNode, xml_space="preserve"): """Retrieves the term from the given :attr:`languageNode`.""" if languageNode is None: return None if self.textNode: terms = languageNode.iterdescendants(self.namespaced(self.textNode)) if terms is None: return None try: return getText(terms.next(), xml_space) except StopIteration: # didn't have the structure we expected return None else: return getText(languageNode, xml_space) def __str__(self): return etree.tostring(self.xmlelement, pretty_print=True, encoding='utf-8') def _set_property(self, name, value): self.xmlelement.attrib[name] = value xid = property(lambda self: self.xmlelement.attrib[self.namespaced('xid')], lambda self, value: self._set_property(self.namespaced('xid'), value)) rid = property(lambda self: self.xmlelement.attrib[self.namespaced('rid')], lambda self, value: self._set_property(self.namespaced('rid'), value)) @classmethod def createfromxmlElement(cls, element): term = cls(None, empty=True) term.xmlelement = element return term class LISAfile(base.TranslationStore): """A class representing a file store for one of the LISA file formats.""" UnitClass = LISAunit #The root node of the XML document: rootNode = "" #The root node of the content section: bodyNode = "" #The XML skeleton to use for empty construction: XMLskeleton = "" namespace = None def __init__(self, inputfile=None, sourcelanguage='en', targetlanguage=None, unitclass=None): super(LISAfile, self).__init__(unitclass=unitclass) if inputfile is not None: self.parse(inputfile) assert self.document.getroot().tag == self.namespaced(self.rootNode) else: # We strip out newlines to ensure that spaces in the skeleton # doesn't interfere with the the pretty printing of lxml self.parse(self.XMLskeleton.replace("\n", "")) self.setsourcelanguage(sourcelanguage) self.settargetlanguage(targetlanguage) self.addheader() self._encoding = "UTF-8" def addheader(self): """Method to be overridden to initialise headers, etc.""" pass def namespaced(self, name): """Returns name in Clark notation. For example ``namespaced("source")`` in an XLIFF document might return:: {urn:oasis:names:tc:xliff:document:1.1}source This is needed throughout lxml. """ return namespaced(self.namespace, name) def initbody(self): """Initialises self.body so it never needs to be retrieved from the XML again.""" self.namespace = self.document.getroot().nsmap.get(None, None) self.body = self.document.find('//%s' % self.namespaced(self.bodyNode)) def addsourceunit(self, source): """Adds and returns a new unit with the given string as first entry.""" newunit = self.UnitClass(source) self.addunit(newunit) return newunit def addunit(self, unit, new=True): unit.namespace = self.namespace super(LISAfile, self).addunit(unit) if new: self.body.append(unit.xmlelement) def __str__(self): """Converts to a string containing the file's XML""" return etree.tostring(self.document, pretty_print=True, xml_declaration=True, encoding='utf-8') def parse(self, xml): """Populates this object from the given xml string""" if not hasattr(self, 'filename'): self.filename = getattr(xml, 'name', '') if hasattr(xml, "read"): xml.seek(0) posrc = xml.read() xml = posrc parser = etree.XMLParser(strip_cdata=False) self.document = etree.fromstring(xml, parser).getroottree() self._encoding = self.document.docinfo.encoding self.initbody() assert self.document.getroot().tag == self.namespaced(self.rootNode) for entry in self.document.getroot().iterdescendants(self.namespaced(self.UnitClass.rootNode)): term = self.UnitClass.createfromxmlElement(entry) self.addunit(term, new=False)
mpl-2.0
ProfessionalIT/professionalit-webiste
sdk/google_appengine/lib/django-1.4/django/utils/dates.py
488
2237
"Commonly-used date structures" from django.utils.translation import ugettext_lazy as _, pgettext_lazy WEEKDAYS = { 0:_('Monday'), 1:_('Tuesday'), 2:_('Wednesday'), 3:_('Thursday'), 4:_('Friday'), 5:_('Saturday'), 6:_('Sunday') } WEEKDAYS_ABBR = { 0:_('Mon'), 1:_('Tue'), 2:_('Wed'), 3:_('Thu'), 4:_('Fri'), 5:_('Sat'), 6:_('Sun') } WEEKDAYS_REV = { 'monday':0, 'tuesday':1, 'wednesday':2, 'thursday':3, 'friday':4, 'saturday':5, 'sunday':6 } MONTHS = { 1:_('January'), 2:_('February'), 3:_('March'), 4:_('April'), 5:_('May'), 6:_('June'), 7:_('July'), 8:_('August'), 9:_('September'), 10:_('October'), 11:_('November'), 12:_('December') } MONTHS_3 = { 1:_('jan'), 2:_('feb'), 3:_('mar'), 4:_('apr'), 5:_('may'), 6:_('jun'), 7:_('jul'), 8:_('aug'), 9:_('sep'), 10:_('oct'), 11:_('nov'), 12:_('dec') } MONTHS_3_REV = { 'jan':1, 'feb':2, 'mar':3, 'apr':4, 'may':5, 'jun':6, 'jul':7, 'aug':8, 'sep':9, 'oct':10, 'nov':11, 'dec':12 } MONTHS_AP = { # month names in Associated Press style 1: pgettext_lazy('abbrev. month', 'Jan.'), 2: pgettext_lazy('abbrev. month', 'Feb.'), 3: pgettext_lazy('abbrev. month', 'March'), 4: pgettext_lazy('abbrev. month', 'April'), 5: pgettext_lazy('abbrev. month', 'May'), 6: pgettext_lazy('abbrev. month', 'June'), 7: pgettext_lazy('abbrev. month', 'July'), 8: pgettext_lazy('abbrev. month', 'Aug.'), 9: pgettext_lazy('abbrev. month', 'Sept.'), 10: pgettext_lazy('abbrev. month', 'Oct.'), 11: pgettext_lazy('abbrev. month', 'Nov.'), 12: pgettext_lazy('abbrev. month', 'Dec.') } MONTHS_ALT = { # required for long date representation by some locales 1: pgettext_lazy('alt. month', 'January'), 2: pgettext_lazy('alt. month', 'February'), 3: pgettext_lazy('alt. month', 'March'), 4: pgettext_lazy('alt. month', 'April'), 5: pgettext_lazy('alt. month', 'May'), 6: pgettext_lazy('alt. month', 'June'), 7: pgettext_lazy('alt. month', 'July'), 8: pgettext_lazy('alt. month', 'August'), 9: pgettext_lazy('alt. month', 'September'), 10: pgettext_lazy('alt. month', 'October'), 11: pgettext_lazy('alt. month', 'November'), 12: pgettext_lazy('alt. month', 'December') }
lgpl-3.0
JShadowMan/packages
python/zdl/error_logger/error_logger/utils/daemon.py
2
2948
#!/usr/bin/env python # # Copyright (C) 2017 DL # import os import sys import atexit import signal from error_logger.utils import ( logger, exceptions ) class Daemon(object): def __init__(self, debug=False, pid_file=None, stdin='/dev/null', stdout='/dev/null', stderr='/dev/null'): self._debug = debug self._stdin = stdin # type: str self._stdout = stdout # type: str self._stderr = stderr # type: str if pid_file is None: pid_file = '/tmp/websocket_server.pid' self._pid_file = os.path.abspath(pid_file) # type: str def run_forever(self): if self._debug: logger.warning('Debugger is active!') return if os.path.isfile(self._pid_file): raise exceptions.DeamonError( 'pid file already exists, server running?') self._start_deamon() def stop(self): pass def _start_deamon(self): if not self._debug and (os.name == 'nt' or not hasattr(os, 'fork')): raise exceptions.DeamonError('Windows does not support fork') # double fork create a deamon try: pid = os.fork() # fork #1 if pid > 0: # parent exit exit() except OSError as e: raise exceptions.FatalError( 'Fork #1 error occurs, reason({})'.format(e)) os.chdir('/') os.setsid() os.umask(0) try: pid = os.fork() # fork #2 if pid > 0: # parent exit exit() except OSError as e: raise exceptions.FatalError( 'Fork #2 error occurs, reason({})'.format(e)) # redirect all std file descriptor sys.stdout.flush() sys.stderr.flush() _stdin = open(self._stdin, 'r') _stdout = open(self._stdout, 'a') # if require non-buffer, open mode muse be `b` _stderr = open(self._stderr, 'wb+', buffering=0) os.dup2(_stdin.fileno(), sys.stdin.fileno()) os.dup2(_stdout.fileno(), sys.stdout.fileno()) os.dup2(_stderr.fileno(), sys.stderr.fileno()) # set signal handler signal.signal(signal.SIGTERM, self._signal_handler) signal.signal(signal.SIGILL, self._signal_handler) signal.signal(signal.SIGINT, signal.SIG_IGN) # register function at exit atexit.register(self._remove_pid_file) with open(self._pid_file, 'w') as fd: fd.write('{pid}\n'.format(pid=os.getpid())) logger.info('Daemon has been started') def _signal_handler(self, signum, frame): logger.info('Daemon receive an exit signal({}: {})'.format( signum, frame)) self._remove_pid_file() exit() def _remove_pid_file(self): logger.info('Daemon has exited') if os.path.exists(self._pid_file): os.remove(self._pid_file)
mit
simdugas/childcare
languages/id.py
148
11233
# coding: utf8 { '!langcode!': 'id', '!langname!': 'Indonesian', '%d days ago': '%d hari yang lalu', '%d hours ago': '%d jam yang lalu', '%d minutes ago': '%d menit yang lalu', '%d months ago': '%d bulan yang lalu', '%d seconds ago': '%d detik yang lalu', '%d seconds from now': '%d detik dari sekarang', '%d weeks ago': '%d minggu yang lalu', '%d years ago': '%d tahun yang lalu', '%s %%{row} deleted': '%s %%{row} dihapus', '%s %%{row} updated': '%s %%{row} diperbarui', '%s selected': '%s dipilih', '%Y-%m-%d': '%d-%m-%Y', '%Y-%m-%d %H:%M:%S': '%d-%m-%Y %H:%M:%S', '(requires internet access, experimental)': '(membutuhkan akses internet, eksperimental)', '(something like "it-it")': '(sesuatu seperti "it-it")', '1 day ago': '1 hari yang lalu', '1 hour ago': '1 jam yang lalu', '1 minute ago': '1 menit yang lalu', '1 month ago': '1 bulan yang lalu', '1 second ago': '1 detik yang lalu', '1 week ago': '1 minggu yang lalu', '1 year ago': '1 tahun yang lalu', '< Previous': '< Sebelumnya', 'About': 'Tentang', 'About application': 'Tentang Aplikasi', 'Add': 'Tambah', 'Additional code for your application': 'Tambahan kode untuk aplikasi Anda', 'Address': 'Alamat', 'Admin language': 'Bahasa Admin', 'administrative interface': 'antarmuka administrative', 'Administrator Password:': 'Administrator Kata Sandi:', 'Ajax Recipes': 'Resep Ajax', 'An error occured, please %s the page': 'Terjadi kesalahan, silakan %s halaman', 'And': 'Dan', 'and rename it:': 'dan memberi nama baru itu:', 'Answer': 'Jawaban', 'appadmin is disabled because insecure channel': 'AppAdmin dinonaktifkan karena kanal tidak aman', 'application "%s" uninstalled': 'applikasi "%s" dihapus', 'application compiled': 'aplikasi dikompilasi', 'Application name:': 'Nama Applikasi:', 'are not used yet': 'tidak digunakan lagi', 'Are you sure you want to delete this object?': 'Apakah Anda yakin ingin menghapus ini?', 'Are you sure you want to uninstall application "%s"?': 'Apakah Anda yakin ingin menghapus aplikasi "%s"?', 'Available Databases and Tables': 'Database dan Tabel yang tersedia', 'Back': 'Kembali', 'Buy this book': 'Beli buku ini', 'cache, errors and sessions cleaned': 'cache, kesalahan dan sesi dibersihkan', 'can be a git repo': 'bisa menjadi repo git', 'Cancel': 'Batalkan', 'Cannot be empty': 'Tidak boleh kosong', 'Change admin password': 'Ubah kata sandi admin', 'Change password': 'Ubah kata sandi', 'Check for upgrades': 'Periksa upgrade', 'Check to delete': 'Centang untuk menghapus', 'Checking for upgrades...': 'Memeriksa untuk upgrade...', 'Clean': 'Bersih', 'Clear': 'Hapus', 'Clear CACHE?': 'Hapus CACHE?', 'Clear DISK': 'Hapus DISK', 'Clear RAM': 'Hapus RAM', 'Click row to expand traceback': 'Klik baris untuk memperluas traceback', 'Close': 'Tutup', 'collapse/expand all': 'kempis / memperluas semua', 'Community': 'Komunitas', 'Compile': 'Kompilasi', 'compiled application removed': 'aplikasi yang dikompilasi dihapus', 'Components and Plugins': 'Komponen dan Plugin', 'contains': 'mengandung', 'Controllers': 'Kontrolir', 'controllers': 'kontrolir', 'Copyright': 'Hak Cipta', 'Count': 'Hitung', 'Create': 'Buat', 'create file with filename:': 'buat file dengan nama:', 'created by': 'dibuat oleh', 'CSV (hidden cols)': 'CSV (kolom tersembunyi)', 'currently running': 'sedang berjalan', 'data uploaded': 'data diunggah', 'Database %s select': 'Memilih Database %s', 'database administration': 'administrasi database', 'defines tables': 'mendefinisikan tabel', 'Delete': 'Hapus', 'delete all checked': 'menghapus semua yang di centang', 'Delete this file (you will be asked to confirm deletion)': 'Hapus file ini (Anda akan diminta untuk mengkonfirmasi penghapusan)', 'Delete:': 'Hapus:', 'Description': 'Keterangan', 'design': 'disain', 'direction: ltr': 'petunjuk: ltr', 'Disk Cleared': 'Disk Dihapus', 'Documentation': 'Dokumentasi', "Don't know what to do?": 'Tidak tahu apa yang harus dilakukan?', 'done!': 'selesai!', 'Download': 'Unduh', 'Download .w2p': 'Unduh .w2p', 'download layouts': 'unduh layouts', 'download plugins': 'unduh plugins', 'Duration': 'Durasi', 'Edit': 'Mengedit', 'Edit application': 'Mengedit Aplikasi', 'Email sent': 'Email dikirim', 'enter a valid email address': 'masukkan alamat email yang benar', 'enter a valid URL': 'masukkan URL yang benar', 'enter a value': 'masukkan data', 'Error': 'Kesalahan', 'Error logs for "%(app)s"': 'Catatan kesalahan untuk "%(app)s"', 'Errors': 'Kesalahan', 'export as csv file': 'ekspor sebagai file csv', 'Export:': 'Ekspor:', 'exposes': 'menghadapkan', 'extends': 'meluaskan', 'filter': 'menyaring', 'First Name': 'Nama Depan', 'Forgot username?': 'Lupa nama pengguna?', 'Free Applications': 'Aplikasi Gratis', 'Gender': 'Jenis Kelamin', 'Group %(group_id)s created': 'Grup %(group_id)s dibuat', 'Group uniquely assigned to user %(id)s': 'Grup unik yang diberikan kepada pengguna %(id)s', 'Groups': 'Grup', 'Guest': 'Tamu', 'Hello World': 'Halo Dunia', 'Help': 'Bantuan', 'Home': 'Halaman Utama', 'How did you get here?': 'Bagaimana kamu bisa di sini?', 'Image': 'Gambar', 'import': 'impor', 'Import/Export': 'Impor/Ekspor', 'includes': 'termasuk', 'Install': 'Memasang', 'Installation': 'Instalasi', 'Installed applications': 'Aplikasi yang diinstal', 'Introduction': 'Pengenalan', 'Invalid email': 'Email tidak benar', 'Language': 'Bahasa', 'languages': 'bahasa', 'Languages': 'Bahasa', 'Last Name': 'Nama Belakang', 'License for': 'Lisensi untuk', 'loading...': 'sedang memuat...', 'Logged in': 'Masuk', 'Logged out': 'Keluar', 'Login': 'Masuk', 'Login to the Administrative Interface': 'Masuk ke antarmuka Administrasi', 'Logout': 'Keluar', 'Lost Password': 'Lupa Kata Sandi', 'Lost password?': 'Lupa kata sandi?', 'Maintenance': 'Pemeliharaan', 'Manage': 'Mengelola', 'Manage Cache': 'Mengelola Cache', 'models': 'model', 'Models': 'Model', 'Modules': 'Modul', 'modules': 'modul', 'My Sites': 'Situs Saya', 'New': 'Baru', 'new application "%s" created': 'aplikasi baru "%s" dibuat', 'New password': 'Kata sandi baru', 'New simple application': 'Aplikasi baru sederhana', 'News': 'Berita', 'next 100 rows': '100 baris berikutnya', 'Next >': 'Berikutnya >', 'Next Page': 'Halaman Berikutnya', 'No databases in this application': 'Tidak ada database dalam aplikasi ini', 'No ticket_storage.txt found under /private folder': 'Tidak ditemukan ticket_storage.txt dalam folder /private', 'not a Zip Code': 'bukan Kode Pos', 'Note': 'Catatan', 'Old password': 'Kata sandi lama', 'Online examples': 'Contoh Online', 'Or': 'Atau', 'or alternatively': 'atau alternatif', 'Or Get from URL:': 'Atau Dapatkan dari URL:', 'or import from csv file': 'atau impor dari file csv', 'Other Plugins': 'Plugin Lainnya', 'Other Recipes': 'Resep Lainnya', 'Overview': 'Ikhtisar', 'Overwrite installed app': 'Ikhtisar app yang terinstall', 'Pack all': 'Pak semua', 'Pack compiled': 'Pak yang telah dikompilasi', 'Pack custom': 'Pak secara kustomisasi', 'Password': 'Kata sandi', 'Password changed': 'Kata sandi berubah', "Password fields don't match": 'Kata sandi tidak sama', 'please input your password again': 'silahkan masukan kata sandi anda lagi', 'plugins': 'plugin', 'Plugins': 'Plugin', 'Plural-Forms:': 'Bentuk-Jamak:', 'Powered by': 'Didukung oleh', 'Preface': 'Pendahuluan', 'previous 100 rows': '100 baris sebelumnya', 'Previous Page': 'Halaman Sebelumnya', 'private files': 'file pribadi', 'Private files': 'File pribadi', 'Profile': 'Profil', 'Profile updated': 'Profil diperbarui', 'Project Progress': 'Perkembangan Proyek', 'Quick Examples': 'Contoh Cepat', 'Ram Cleared': 'Ram Dihapus', 'Recipes': 'Resep', 'Register': 'Daftar', 'Registration successful': 'Pendaftaran berhasil', 'reload': 'memuat kembali', 'Reload routes': 'Memuat rute kembali', 'Remember me (for 30 days)': 'Ingat saya (selama 30 hari)', 'Remove compiled': 'Hapus Kompilasi', 'Request reset password': 'Meminta reset kata sandi', 'Rows in Table': 'Baris dalam Tabel', 'Rows selected': 'Baris dipilih', "Run tests in this file (to run all files, you may also use the button labelled 'test')": "Jalankan tes di file ini (untuk menjalankan semua file, Anda juga dapat menggunakan tombol berlabel 'test')", 'Running on %s': 'Berjalan di %s', 'Save model as...': 'Simpan model sebagai ...', 'Save profile': 'Simpan profil', 'Search': 'Cari', 'Select Files to Package': 'Pilih Berkas untuk Paket', 'Send Email': 'Kirim Email', 'Service': 'Layanan', 'Site': 'Situs', 'Size of cache:': 'Ukuran cache:', 'starts with': 'dimulai dengan', 'static': 'statis', 'Static': 'Statis', 'Statistics': 'Statistik', 'Support': 'Mendukung', 'Table': 'Tabel', 'test': 'tes', 'The application logic, each URL path is mapped in one exposed function in the controller': 'Logika aplikasi, setiap jalur URL dipetakan dalam satu fungsi terpapar di kontrolir', 'The data representation, define database tables and sets': 'Representasi data, mendefinisikan tabel database dan set', 'There are no plugins': 'Tidak ada plugin', 'There are no private files': 'Tidak ada file pribadi', 'These files are not served, they are only available from within your app': 'File-file ini tidak dilayani, mereka hanya tersedia dari dalam aplikasi Anda', 'These files are served without processing, your images go here': 'File-file ini disajikan tanpa pengolahan, gambar Anda di sini', 'This App': 'App Ini', 'Time in Cache (h:m:s)': 'Waktu di Cache (h: m: s)', 'To create a plugin, name a file/folder plugin_[name]': 'Untuk membuat sebuah plugin, nama file / folder plugin_ [nama]', 'too short': 'terlalu pendek', 'Translation strings for the application': 'Terjemahan string untuk aplikasi', 'Try the mobile interface': 'Coba antarmuka ponsel', 'Unable to download because:': 'Tidak dapat mengunduh karena:', 'unable to parse csv file': 'tidak mampu mengurai file csv', 'update all languages': 'memperbarui semua bahasa', 'Update:': 'Perbarui:', 'Upload': 'Unggah', 'Upload a package:': 'Unggah sebuah paket:', 'Upload and install packed application': 'Upload dan pasang aplikasi yang dikemas', 'upload file:': 'unggah file:', 'upload plugin file:': 'unggah file plugin:', 'User %(id)s Logged-in': 'Pengguna %(id)s Masuk', 'User %(id)s Logged-out': 'Pengguna %(id)s Keluar', 'User %(id)s Password changed': 'Pengguna %(id)s Kata Sandi berubah', 'User %(id)s Password reset': 'Pengguna %(id)s Kata Sandi telah direset', 'User %(id)s Profile updated': 'Pengguna %(id)s Profil diperbarui', 'User %(id)s Registered': 'Pengguna %(id)s Terdaftar', 'value already in database or empty': 'data sudah ada dalam database atau kosong', 'value not allowed': 'data tidak benar', 'value not in database': 'data tidak ada dalam database', 'Verify Password': 'Verifikasi Kata Sandi', 'Version': 'Versi', 'View': 'Lihat', 'Views': 'Lihat', 'views': 'lihat', 'Web Framework': 'Kerangka Web', 'web2py is up to date': 'web2py terbaru', 'web2py Recent Tweets': 'Tweet web2py terbaru', 'Website': 'Situs Web', 'Welcome': 'Selamat Datang', 'Welcome to web2py!': 'Selamat Datang di web2py!', 'You are successfully running web2py': 'Anda berhasil menjalankan web2py', 'You can modify this application and adapt it to your needs': 'Anda dapat memodifikasi aplikasi ini dan menyesuaikan dengan kebutuhan Anda', 'You visited the url %s': 'Anda mengunjungi url %s', }
gpl-2.0
ProjectSWGCore/NGECore2
scripts/object/tangible/wearables/ring/item_band_set_smuggler_dps_01_01.py
2
1144
import sys def setup(core, object): object.setAttachment('radial_filename', 'ring/unity') object.setAttachment('objType', 'ring') object.setStfFilename('static_item_n') object.setStfName('item_band_set_smuggler_dps_01_01') object.setDetailFilename('static_item_d') object.setDetailName('item_band_set_smuggler_dps_01_01') object.setStringAttribute('class_required', 'Smuggler') object.setIntAttribute('required_combat_level', 85) object.setIntAttribute('cat_skill_mod_bonus.@stat_n:expertise_action_line_sm_dm', 2) object.setIntAttribute('cat_skill_mod_bonus.@stat_n:fast_attack_line_sm_dm', 1) object.setIntAttribute('cat_skill_mod_bonus.@stat_n:expertise_freeshot_sm_dm', 2) object.setIntAttribute('cat_skill_mod_bonus.@stat_n:expertise_cooldown_line_sm_dm', 2) object.setStringAttribute('@set_bonus:piece_bonus_count_3', '@set_bonus:set_bonus_smuggler_dps_1') object.setStringAttribute('@set_bonus:piece_bonus_count_4', '@set_bonus:set_bonus_smuggler_dps_2') object.setStringAttribute('@set_bonus:piece_bonus_count_5', '@set_bonus:set_bonus_smuggler_dps_3') object.setAttachment('setBonus', 'set_bonus_smuggler_dps') return
lgpl-3.0
sogelink/ansible
lib/ansible/modules/database/postgresql/postgresql_privs.py
26
25234
#!/usr/bin/python # -*- coding: utf-8 -*- # Copyright: Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) from __future__ import absolute_import, division, print_function __metaclass__ = type ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ['stableinterface'], 'supported_by': 'community'} DOCUMENTATION = """ --- module: postgresql_privs version_added: "1.2" short_description: Grant or revoke privileges on PostgreSQL database objects. description: - Grant or revoke privileges on PostgreSQL database objects. - This module is basically a wrapper around most of the functionality of PostgreSQL's GRANT and REVOKE statements with detection of changes (GRANT/REVOKE I(privs) ON I(type) I(objs) TO/FROM I(roles)) options: database: description: - Name of database to connect to. - 'Alias: I(db)' required: yes state: description: - If C(present), the specified privileges are granted, if C(absent) they are revoked. required: no default: present choices: [present, absent] privs: description: - Comma separated list of privileges to grant/revoke. - 'Alias: I(priv)' required: no type: description: - Type of database object to set privileges on. required: no default: table choices: [table, sequence, function, database, schema, language, tablespace, group] objs: description: - Comma separated list of database objects to set privileges on. - If I(type) is C(table) or C(sequence), the special value C(ALL_IN_SCHEMA) can be provided instead to specify all database objects of type I(type) in the schema specified via I(schema). (This also works with PostgreSQL < 9.0.) - If I(type) is C(database), this parameter can be omitted, in which case privileges are set for the database specified via I(database). - 'If I(type) is I(function), colons (":") in object names will be replaced with commas (needed to specify function signatures, see examples)' - 'Alias: I(obj)' required: no schema: description: - Schema that contains the database objects specified via I(objs). - May only be provided if I(type) is C(table), C(sequence) or C(function). Defaults to C(public) in these cases. required: no roles: description: - Comma separated list of role (user/group) names to set permissions for. - The special value C(PUBLIC) can be provided instead to set permissions for the implicitly defined PUBLIC group. - 'Alias: I(role)' required: yes grant_option: description: - Whether C(role) may grant/revoke the specified privileges/group memberships to others. - Set to C(no) to revoke GRANT OPTION, leave unspecified to make no changes. - I(grant_option) only has an effect if I(state) is C(present). - 'Alias: I(admin_option)' required: no choices: ['yes', 'no'] host: description: - Database host address. If unspecified, connect via Unix socket. - 'Alias: I(login_host)' default: null required: no port: description: - Database port to connect to. required: no default: 5432 unix_socket: description: - Path to a Unix domain socket for local connections. - 'Alias: I(login_unix_socket)' required: false default: null login: description: - The username to authenticate with. - 'Alias: I(login_user)' default: postgres password: description: - The password to authenticate with. - 'Alias: I(login_password))' default: null required: no ssl_mode: description: - Determines whether or with what priority a secure SSL TCP/IP connection will be negotiated with the server. - See https://www.postgresql.org/docs/current/static/libpq-ssl.html for more information on the modes. - Default of C(prefer) matches libpq default. required: false default: prefer choices: [disable, allow, prefer, require, verify-ca, verify-full] version_added: '2.3' ssl_rootcert: description: - Specifies the name of a file containing SSL certificate authority (CA) certificate(s). If the file exists, the server's certificate will be verified to be signed by one of these authorities. required: false default: null version_added: '2.3' notes: - Default authentication assumes that postgresql_privs is run by the C(postgres) user on the remote host. (Ansible's C(user) or C(sudo-user)). - This module requires Python package I(psycopg2) to be installed on the remote host. In the default case of the remote host also being the PostgreSQL server, PostgreSQL has to be installed there as well, obviously. For Debian/Ubuntu-based systems, install packages I(postgresql) and I(python-psycopg2). - Parameters that accept comma separated lists (I(privs), I(objs), I(roles)) have singular alias names (I(priv), I(obj), I(role)). - To revoke only C(GRANT OPTION) for a specific object, set I(state) to C(present) and I(grant_option) to C(no) (see examples). - Note that when revoking privileges from a role R, this role may still have access via privileges granted to any role R is a member of including C(PUBLIC). - Note that when revoking privileges from a role R, you do so as the user specified via I(login). If R has been granted the same privileges by another user also, R can still access database objects via these privileges. - When revoking privileges, C(RESTRICT) is assumed (see PostgreSQL docs). - The ssl_rootcert parameter requires at least Postgres version 8.4 and I(psycopg2) version 2.4.3. requirements: [psycopg2] author: "Bernhard Weitzhofer (@b6d)" """ EXAMPLES = """ # On database "library": # GRANT SELECT, INSERT, UPDATE ON TABLE public.books, public.authors # TO librarian, reader WITH GRANT OPTION - postgresql_privs: database: library state: present privs: SELECT,INSERT,UPDATE type: table objs: books,authors schema: public roles: librarian,reader grant_option: yes # Same as above leveraging default values: - postgresql_privs: db: library privs: SELECT,INSERT,UPDATE objs: books,authors roles: librarian,reader grant_option: yes # REVOKE GRANT OPTION FOR INSERT ON TABLE books FROM reader # Note that role "reader" will be *granted* INSERT privilege itself if this # isn't already the case (since state: present). - postgresql_privs: db: library state: present priv: INSERT obj: books role: reader grant_option: no # REVOKE INSERT, UPDATE ON ALL TABLES IN SCHEMA public FROM reader # "public" is the default schema. This also works for PostgreSQL 8.x. - postgresql_privs: db: library state: absent privs: INSERT,UPDATE objs: ALL_IN_SCHEMA role: reader # GRANT ALL PRIVILEGES ON SCHEMA public, math TO librarian - postgresql_privs: db: library privs: ALL type: schema objs: public,math role: librarian # GRANT ALL PRIVILEGES ON FUNCTION math.add(int, int) TO librarian, reader # Note the separation of arguments with colons. - postgresql_privs: db: library privs: ALL type: function obj: add(int:int) schema: math roles: librarian,reader # GRANT librarian, reader TO alice, bob WITH ADMIN OPTION # Note that group role memberships apply cluster-wide and therefore are not # restricted to database "library" here. - postgresql_privs: db: library type: group objs: librarian,reader roles: alice,bob admin_option: yes # GRANT ALL PRIVILEGES ON DATABASE library TO librarian # Note that here "db: postgres" specifies the database to connect to, not the # database to grant privileges on (which is specified via the "objs" param) - postgresql_privs: db: postgres privs: ALL type: database obj: library role: librarian # GRANT ALL PRIVILEGES ON DATABASE library TO librarian # If objs is omitted for type "database", it defaults to the database # to which the connection is established - postgresql_privs: db: library privs: ALL type: database role: librarian """ import traceback try: import psycopg2 import psycopg2.extensions except ImportError: psycopg2 = None # import module snippets from ansible.module_utils.basic import AnsibleModule from ansible.module_utils.database import pg_quote_identifier from ansible.module_utils._text import to_native, to_text VALID_PRIVS = frozenset(('SELECT', 'INSERT', 'UPDATE', 'DELETE', 'TRUNCATE', 'REFERENCES', 'TRIGGER', 'CREATE', 'CONNECT', 'TEMPORARY', 'TEMP', 'EXECUTE', 'USAGE', 'ALL', 'USAGE')) class Error(Exception): pass # We don't have functools.partial in Python < 2.5 def partial(f, *args, **kwargs): """Partial function application""" def g(*g_args, **g_kwargs): new_kwargs = kwargs.copy() new_kwargs.update(g_kwargs) return f(*(args + g_args), **g_kwargs) g.f = f g.args = args g.kwargs = kwargs return g class Connection(object): """Wrapper around a psycopg2 connection with some convenience methods""" def __init__(self, params): self.database = params.database # To use defaults values, keyword arguments must be absent, so # check which values are empty and don't include in the **kw # dictionary params_map = { "host":"host", "login":"user", "password":"password", "port":"port", "database": "database", "ssl_mode":"sslmode", "ssl_rootcert":"sslrootcert" } kw = dict( (params_map[k], getattr(params, k)) for k in params_map if getattr(params, k) != '' and getattr(params, k) is not None ) # If a unix_socket is specified, incorporate it here. is_localhost = "host" not in kw or kw["host"] == "" or kw["host"] == "localhost" if is_localhost and params.unix_socket != "": kw["host"] = params.unix_socket sslrootcert = params.ssl_rootcert if psycopg2.__version__ < '2.4.3' and sslrootcert is not None: raise ValueError('psycopg2 must be at least 2.4.3 in order to user the ssl_rootcert parameter') self.connection = psycopg2.connect(**kw) self.cursor = self.connection.cursor() def commit(self): self.connection.commit() def rollback(self): self.connection.rollback() @property def encoding(self): """Connection encoding in Python-compatible form""" return psycopg2.extensions.encodings[self.connection.encoding] ### Methods for querying database objects # PostgreSQL < 9.0 doesn't support "ALL TABLES IN SCHEMA schema"-like # phrases in GRANT or REVOKE statements, therefore alternative methods are # provided here. def schema_exists(self, schema): query = """SELECT count(*) FROM pg_catalog.pg_namespace WHERE nspname = %s""" self.cursor.execute(query, (schema,)) return self.cursor.fetchone()[0] > 0 def get_all_tables_in_schema(self, schema): if not self.schema_exists(schema): raise Error('Schema "%s" does not exist.' % schema) query = """SELECT relname FROM pg_catalog.pg_class c JOIN pg_catalog.pg_namespace n ON n.oid = c.relnamespace WHERE nspname = %s AND relkind in ('r', 'v')""" self.cursor.execute(query, (schema,)) return [t[0] for t in self.cursor.fetchall()] def get_all_sequences_in_schema(self, schema): if not self.schema_exists(schema): raise Error('Schema "%s" does not exist.' % schema) query = """SELECT relname FROM pg_catalog.pg_class c JOIN pg_catalog.pg_namespace n ON n.oid = c.relnamespace WHERE nspname = %s AND relkind = 'S'""" self.cursor.execute(query, (schema,)) return [t[0] for t in self.cursor.fetchall()] ### Methods for getting access control lists and group membership info # To determine whether anything has changed after granting/revoking # privileges, we compare the access control lists of the specified database # objects before and afterwards. Python's list/string comparison should # suffice for change detection, we should not actually have to parse ACLs. # The same should apply to group membership information. def get_table_acls(self, schema, tables): query = """SELECT relacl FROM pg_catalog.pg_class c JOIN pg_catalog.pg_namespace n ON n.oid = c.relnamespace WHERE nspname = %s AND relkind = 'r' AND relname = ANY (%s) ORDER BY relname""" self.cursor.execute(query, (schema, tables)) return [t[0] for t in self.cursor.fetchall()] def get_sequence_acls(self, schema, sequences): query = """SELECT relacl FROM pg_catalog.pg_class c JOIN pg_catalog.pg_namespace n ON n.oid = c.relnamespace WHERE nspname = %s AND relkind = 'S' AND relname = ANY (%s) ORDER BY relname""" self.cursor.execute(query, (schema, sequences)) return [t[0] for t in self.cursor.fetchall()] def get_function_acls(self, schema, function_signatures): funcnames = [f.split('(', 1)[0] for f in function_signatures] query = """SELECT proacl FROM pg_catalog.pg_proc p JOIN pg_catalog.pg_namespace n ON n.oid = p.pronamespace WHERE nspname = %s AND proname = ANY (%s) ORDER BY proname, proargtypes""" self.cursor.execute(query, (schema, funcnames)) return [t[0] for t in self.cursor.fetchall()] def get_schema_acls(self, schemas): query = """SELECT nspacl FROM pg_catalog.pg_namespace WHERE nspname = ANY (%s) ORDER BY nspname""" self.cursor.execute(query, (schemas,)) return [t[0] for t in self.cursor.fetchall()] def get_language_acls(self, languages): query = """SELECT lanacl FROM pg_catalog.pg_language WHERE lanname = ANY (%s) ORDER BY lanname""" self.cursor.execute(query, (languages,)) return [t[0] for t in self.cursor.fetchall()] def get_tablespace_acls(self, tablespaces): query = """SELECT spcacl FROM pg_catalog.pg_tablespace WHERE spcname = ANY (%s) ORDER BY spcname""" self.cursor.execute(query, (tablespaces,)) return [t[0] for t in self.cursor.fetchall()] def get_database_acls(self, databases): query = """SELECT datacl FROM pg_catalog.pg_database WHERE datname = ANY (%s) ORDER BY datname""" self.cursor.execute(query, (databases,)) return [t[0] for t in self.cursor.fetchall()] def get_group_memberships(self, groups): query = """SELECT roleid, grantor, member, admin_option FROM pg_catalog.pg_auth_members am JOIN pg_catalog.pg_roles r ON r.oid = am.roleid WHERE r.rolname = ANY(%s) ORDER BY roleid, grantor, member""" self.cursor.execute(query, (groups,)) return self.cursor.fetchall() ### Manipulating privileges def manipulate_privs(self, obj_type, privs, objs, roles, state, grant_option, schema_qualifier=None): """Manipulate database object privileges. :param obj_type: Type of database object to grant/revoke privileges for. :param privs: Either a list of privileges to grant/revoke or None if type is "group". :param objs: List of database objects to grant/revoke privileges for. :param roles: Either a list of role names or "PUBLIC" for the implicitly defined "PUBLIC" group :param state: "present" to grant privileges, "absent" to revoke. :param grant_option: Only for state "present": If True, set grant/admin option. If False, revoke it. If None, don't change grant option. :param schema_qualifier: Some object types ("TABLE", "SEQUENCE", "FUNCTION") must be qualified by schema. Ignored for other Types. """ # get_status: function to get current status if obj_type == 'table': get_status = partial(self.get_table_acls, schema_qualifier) elif obj_type == 'sequence': get_status = partial(self.get_sequence_acls, schema_qualifier) elif obj_type == 'function': get_status = partial(self.get_function_acls, schema_qualifier) elif obj_type == 'schema': get_status = self.get_schema_acls elif obj_type == 'language': get_status = self.get_language_acls elif obj_type == 'tablespace': get_status = self.get_tablespace_acls elif obj_type == 'database': get_status = self.get_database_acls elif obj_type == 'group': get_status = self.get_group_memberships else: raise Error('Unsupported database object type "%s".' % obj_type) # Return False (nothing has changed) if there are no objs to work on. if not objs: return False # obj_ids: quoted db object identifiers (sometimes schema-qualified) if obj_type == 'function': obj_ids = [] for obj in objs: try: f, args = obj.split('(', 1) except: raise Error('Illegal function signature: "%s".' % obj) obj_ids.append('"%s"."%s"(%s' % (schema_qualifier, f, args)) elif obj_type in ['table', 'sequence']: obj_ids = ['"%s"."%s"' % (schema_qualifier, o) for o in objs] else: obj_ids = ['"%s"' % o for o in objs] # set_what: SQL-fragment specifying what to set for the target roles: # Either group membership or privileges on objects of a certain type if obj_type == 'group': set_what = ','.join(pg_quote_identifier(i, 'role') for i in obj_ids) else: # function types are already quoted above if obj_type != 'function': obj_ids = [pg_quote_identifier(i, 'table') for i in obj_ids] # Note: obj_type has been checked against a set of string literals # and privs was escaped when it was parsed set_what = '%s ON %s %s' % (','.join(privs), obj_type, ','.join(obj_ids)) # for_whom: SQL-fragment specifying for whom to set the above if roles == 'PUBLIC': for_whom = 'PUBLIC' else: for_whom = ','.join(pg_quote_identifier(r, 'role') for r in roles) status_before = get_status(objs) if state == 'present': if grant_option: if obj_type == 'group': query = 'GRANT %s TO %s WITH ADMIN OPTION' else: query = 'GRANT %s TO %s WITH GRANT OPTION' else: query = 'GRANT %s TO %s' self.cursor.execute(query % (set_what, for_whom)) # Only revoke GRANT/ADMIN OPTION if grant_option actually is False. if grant_option is False: if obj_type == 'group': query = 'REVOKE ADMIN OPTION FOR %s FROM %s' else: query = 'REVOKE GRANT OPTION FOR %s FROM %s' self.cursor.execute(query % (set_what, for_whom)) else: query = 'REVOKE %s FROM %s' self.cursor.execute(query % (set_what, for_whom)) status_after = get_status(objs) return status_before != status_after def main(): module = AnsibleModule( argument_spec = dict( database=dict(required=True, aliases=['db']), state=dict(default='present', choices=['present', 'absent']), privs=dict(required=False, aliases=['priv']), type=dict(default='table', choices=['table', 'sequence', 'function', 'database', 'schema', 'language', 'tablespace', 'group']), objs=dict(required=False, aliases=['obj']), schema=dict(required=False), roles=dict(required=True, aliases=['role']), grant_option=dict(required=False, type='bool', aliases=['admin_option']), host=dict(default='', aliases=['login_host']), port=dict(type='int', default=5432), unix_socket=dict(default='', aliases=['login_unix_socket']), login=dict(default='postgres', aliases=['login_user']), password=dict(default='', aliases=['login_password'], no_log=True), ssl_mode=dict(default="prefer", choices=['disable', 'allow', 'prefer', 'require', 'verify-ca', 'verify-full']), ssl_rootcert=dict(default=None) ), supports_check_mode = True ) # Create type object as namespace for module params p = type('Params', (), module.params) # param "schema": default, allowed depends on param "type" if p.type in ['table', 'sequence', 'function']: p.schema = p.schema or 'public' elif p.schema: module.fail_json(msg='Argument "schema" is not allowed ' 'for type "%s".' % p.type) # param "objs": default, required depends on param "type" if p.type == 'database': p.objs = p.objs or p.database elif not p.objs: module.fail_json(msg='Argument "objs" is required ' 'for type "%s".' % p.type) # param "privs": allowed, required depends on param "type" if p.type == 'group': if p.privs: module.fail_json(msg='Argument "privs" is not allowed ' 'for type "group".') elif not p.privs: module.fail_json(msg='Argument "privs" is required ' 'for type "%s".' % p.type) # Connect to Database if not psycopg2: module.fail_json(msg='Python module "psycopg2" must be installed.') try: conn = Connection(p) except psycopg2.Error as e: module.fail_json(msg='Could not connect to database: %s' % to_native(e), exception=traceback.format_exc()) except TypeError as e: if 'sslrootcert' in e.args[0]: module.fail_json(msg='Postgresql server must be at least version 8.4 to support sslrootcert') module.fail_json(msg="unable to connect to database: %s" % to_native(e), exception=traceback.format_exc()) except ValueError as e: # We raise this when the psycopg library is too old module.fail_json(msg=to_native(e)) try: # privs if p.privs: privs = frozenset(pr.upper() for pr in p.privs.split(',')) if not privs.issubset(VALID_PRIVS): module.fail_json(msg='Invalid privileges specified: %s' % privs.difference(VALID_PRIVS)) else: privs = None # objs: if p.type == 'table' and p.objs == 'ALL_IN_SCHEMA': objs = conn.get_all_tables_in_schema(p.schema) elif p.type == 'sequence' and p.objs == 'ALL_IN_SCHEMA': objs = conn.get_all_sequences_in_schema(p.schema) else: objs = p.objs.split(',') # function signatures are encoded using ':' to separate args if p.type == 'function': objs = [obj.replace(':', ',') for obj in objs] # roles if p.roles == 'PUBLIC': roles = 'PUBLIC' else: roles = p.roles.split(',') changed = conn.manipulate_privs( obj_type = p.type, privs = privs, objs = objs, roles = roles, state = p.state, grant_option = p.grant_option, schema_qualifier=p.schema ) except Error as e: conn.rollback() module.fail_json(msg=e.message, exception=traceback.format_exc()) except psycopg2.Error as e: conn.rollback() # psycopg2 errors come in connection encoding msg = to_text(e.message(encoding=conn.encoding)) module.fail_json(msg=msg) if module.check_mode: conn.rollback() else: conn.commit() module.exit_json(changed=changed) if __name__ == '__main__': main()
gpl-3.0
modesttree/Projeny
Source/prj/main/PrjRunner.py
1
7435
import sys import os import webbrowser from mtm.util.Assert import * import mtm.util.MiscUtil as MiscUtil import mtm.util.PlatformUtil as PlatformUtil from mtm.util.Platforms import Platforms from mtm.util.CommonSettings import ConfigFileName import mtm.ioc.Container as Container from mtm.ioc.Inject import Inject from mtm.ioc.Inject import InjectMany from mtm.ioc.Inject import InjectOptional import mtm.ioc.IocAssertions as Assertions from prj.main.ProjenyConstants import ProjectConfigFileName class PrjRunner: _scriptRunner = Inject('ScriptRunner') _config = Inject('Config') _packageMgr = Inject('PackageManager') _projectConfigChanger = Inject('ProjectConfigChanger') _unityHelper = Inject('UnityHelper') _varMgr = Inject('VarManager') _log = Inject('Logger') _mainConfig = InjectOptional('MainConfigPath', None) _sys = Inject('SystemHelper') _vsSolutionHelper = Inject('VisualStudioHelper') _projVsHelper = Inject('ProjenyVisualStudioHelper') _releaseSourceManager = Inject('ReleaseSourceManager') def run(self, args): self._args = self._processArgs(args) success = self._scriptRunner.runWrapper(self._runInternal) self._onBuildComplete(success) def _onBuildComplete(self, success): if not success: sys.exit(1) def _processArgs(self, args): if args.buildFullProject or args.buildFull: args.updateLinks = True args.updateUnitySolution = True args.updateCustomSolution = True args.buildCustomSolution = True if args.buildFull: args.buildPrebuild = True if not args.project: args.project = self._config.tryGetString(None, 'DefaultProject') if args.project and not self._packageMgr.projectExists(args.project) and not args.createProject: args.project = self._packageMgr.getProjectFromAlias(args.project) if not args.project and self._varMgr.hasKey('UnityProjectsDir'): allProjects = self._packageMgr.getAllProjectNames() # If there's only one project, then just always assume they are operating on that if len(allProjects) == 1: args.project = allProjects[0] return args def _runPreBuild(self): if self._args.deleteProject: if not self._args.suppressPrompts: if not MiscUtil.confirmChoice("Are you sure you want to delete project '{0}'? (y/n) \nNote that this will only delete your unity project settings and the {1} for this project. \nThe rest of the content for your project will remain in the UnityPackages folder ".format(self._args.project, ProjectConfigFileName)): assertThat(False, "User aborted operation") self._packageMgr.deleteProject(self._args.project) if self._args.createProject: self._packageMgr.createProject(self._args.project, self._platform) if self._args.projectAddPackageAssets: self._projectConfigChanger.addPackage(self._args.project, self._args.projectAddPackageAssets, True) if self._args.projectAddPackagePlugins: self._projectConfigChanger.addPackage(self._args.project, self._args.projectAddPackagePlugins, False) if self._args.openDocumentation: self._openDocumentation() if self._args.clearProjectGeneratedFiles: self._packageMgr.clearProjectGeneratedFiles(self._args.project) if self._args.clearAllProjectGeneratedFiles: self._packageMgr.clearAllProjectGeneratedFiles() if self._args.deleteAllLinks: self._packageMgr.deleteAllLinks() if self._args.buildPrebuild: self.buildPrebuildProjects() if self._args.init: self._packageMgr.updateLinksForAllProjects() if self._args.initLinks: self._packageMgr.checkProjectInitialized(self._args.project, self._platform) if self._args.updateLinks: self._packageMgr.updateProjectJunctions(self._args.project, self._platform) if self._args.updateUnitySolution: self._projVsHelper.updateUnitySolution(self._args.project, self._platform) if self._args.updateCustomSolution: self._projVsHelper.updateCustomSolution(self._args.project, self._platform) def buildPrebuildProjects(self, config = None): solutionPath = self._config.tryGetString(None, 'Prebuild', 'SolutionPath') if solutionPath != None: with self._log.heading('Building {0}'.format(os.path.basename(self._varMgr.expandPath(solutionPath)))): if config == None: config = self._config.tryGetString('Debug', 'Prebuild', 'SolutionConfig') self._vsSolutionHelper.buildVisualStudioProject(solutionPath, config) def _openDocumentation(self): webbrowser.open('https://github.com/modesttree/ModestUnityPackageManager') def _runBuild(self): if self._args.buildCustomSolution: self._projVsHelper.buildCustomSolution(self._args.project, self._platform) def _runPostBuild(self): if self._args.listReleases: self._releaseSourceManager.listAllReleases() if self._args.listProjects: self._packageMgr.listAllProjects() if self._args.listPackages: self._packageMgr.listAllPackages(self._args.project) if self._args.openUnity: self._packageMgr.checkProjectInitialized(self._args.project, self._platform) self._unityHelper.openUnity(self._args.project, self._platform) if self._args.openCustomSolution: self._projVsHelper.openCustomSolution(self._args.project, self._platform) if self._args.editProjectYaml: self._editProjectYaml() def _editProjectYaml(self): assertThat(self._args.project) schemaPath = self._varMgr.expandPath('[UnityProjectsDir]/{0}/{1}'.format(self._args.project, ProjectConfigFileName)) os.startfile(schemaPath) def _initialize(self): self._platform = PlatformUtil.fromPlatformArgName(self._args.platform) if self._args.project and self._platform: self._packageMgr.setPathsForProjectPlatform(self._args.project, self._platform) def _runInternal(self): self._log.debug("Started Prj with arguments: {0}".format(" ".join(sys.argv[1:]))) self._initialize() self._validateRequest() self._runPreBuild() self._runBuild() self._runPostBuild() def _argsRequiresProject(self): return self._args.updateLinks or self._args.updateUnitySolution \ or self._args.updateCustomSolution or self._args.buildCustomSolution \ or self._args.clearProjectGeneratedFiles or self._args.buildFull \ or self._args.openUnity or self._args.openCustomSolution \ or self._args.editProjectYaml or self._args.createProject \ or self._args.projectAddPackageAssets or self._args.projectAddPackagePlugins \ or self._args.deleteProject or self._args.listPackages def _validateRequest(self): if self._argsRequiresProject() and not self._args.project: assertThat(False, "Cannot execute the given arguments without a project specified, or a default project defined in the {0} file", ConfigFileName)
mit
Quikling/gpdb
gpMgmt/bin/gppylib/system/configurationImplGpdb.py
6
17891
#!/usr/bin/env python # # Copyright (c) Greenplum Inc 2010. All Rights Reserved. # Copyright (c) EMC/Greenplum Inc 2011. All Rights Reserved. # """ This file defines the interface that can be used to fetch and update system configuration information. """ import os, copy from gppylib.gplog import * from gppylib.utils import checkNotNone from gppylib.system.configurationInterface import * from gppylib.system.ComputeCatalogUpdate import ComputeCatalogUpdate from gppylib.gparray import GpArray, GpDB, InvalidSegmentConfiguration from gppylib import gparray from gppylib.db import dbconn from gppylib.commands.gp import get_local_db_mode logger = get_default_logger() class GpConfigurationProviderUsingGpdbCatalog(GpConfigurationProvider) : """ An implementation of GpConfigurationProvider will provide functionality to fetch and update gpdb system configuration information (as stored in the database) Note that the client of this is assuming that the database data is not changed by another party between the time segment data is loaded and when it is updated """ def __init__(self): self.__masterDbUrl = None def initializeProvider( self, masterPort ) : """ Initialize the provider to get information from the given master db, if it chooses to get its data from the database returns self """ checkNotNone("masterPort", masterPort) dbUrl = dbconn.DbURL(port=masterPort, dbname='template1') self.__masterDbUrl = dbUrl return self def loadSystemConfig( self, useUtilityMode, verbose=True ) : """ Load all segment information from the configuration source. Returns a new GpArray object """ # ensure initializeProvider() was called checkNotNone("masterDbUrl", self.__masterDbUrl) if verbose : logger.info("Obtaining Segment details from master...") array = GpArray.initFromCatalog(self.__masterDbUrl, useUtilityMode) if get_local_db_mode(array.master.getSegmentDataDirectory()) != 'UTILITY': logger.debug("Validating configuration...") if not array.is_array_valid(): raise InvalidSegmentConfiguration(array) return array def sendPgElogFromMaster( self, msg, sendAlerts): """ Send a message from the master database using select pg_elog ... """ # ensure initializeProvider() was called checkNotNone("masterDbUrl", self.__masterDbUrl) conn = None try: conn = dbconn.connect(self.__masterDbUrl, utility=True) dbconn.execSQL(conn, "SELECT GP_ELOG(" + self.__toSqlCharValue(msg) + "," + ("true" if sendAlerts else "false") + ")") finally: if conn: conn.close() def updateSystemConfig( self, gpArray, textForConfigTable, dbIdToForceMirrorRemoveAdd, useUtilityMode, allowPrimary) : """ Update the configuration for the given segments in the underlying configuration store to match the current values Also resets any dirty bits on saved/updated objects @param textForConfigTable label to be used when adding to segment configuration history @param dbIdToForceMirrorRemoveAdd a map of dbid -> True for mirrors for which we should force updating the mirror @param useUtilityMode True if the operations we're doing are expected to run via utility moed @param allowPrimary True if caller authorizes add/remove primary operations (e.g. gpexpand) """ # ensure initializeProvider() was called checkNotNone("masterDbUrl", self.__masterDbUrl) logger.debug("Validating configuration changes...") if not gpArray.is_array_valid(): logger.critical("Configuration is invalid") raise InvalidSegmentConfiguration(gpArray) conn = dbconn.connect(self.__masterDbUrl, useUtilityMode, allowSystemTableMods='dml') dbconn.execSQL(conn, "BEGIN") # compute what needs to be updated update = ComputeCatalogUpdate(gpArray, dbIdToForceMirrorRemoveAdd, useUtilityMode, allowPrimary) update.validate() # put the mirrors in a map by content id so we can update them later mirror_map = {} for seg in update.mirror_to_add: mirror_map[ seg.getSegmentContentId() ] = seg # reset dbId of new primary and mirror segments to -1 # before invoking the operations which will assign them new ids for seg in update.primary_to_add: seg.setSegmentDbId(-1) for seg in update.mirror_to_add: seg.setSegmentDbId(-1) # remove mirror segments (e.g. for gpexpand rollback) for seg in update.mirror_to_remove: self.__updateSystemConfigRemoveMirror(conn, seg, textForConfigTable) # remove primary segments (e.g for gpexpand rollback) for seg in update.primary_to_remove: self.__updateSystemConfigRemovePrimary(conn, seg, textForConfigTable) # add new primary segments for seg in update.primary_to_add: self.__updateSystemConfigAddPrimary(conn, gpArray, seg, textForConfigTable, mirror_map) # add new mirror segments for seg in update.mirror_to_add: self.__updateSystemConfigAddMirror(conn, gpArray, seg, textForConfigTable) # remove and add mirror segments necessitated by catalog attribute update for seg in update.mirror_to_remove_and_add: self.__updateSystemConfigRemoveAddMirror(conn, gpArray, seg, textForConfigTable) # apply updates to existing segments for seg in update.segment_to_update: originalSeg = update.dbsegmap.get(seg.getSegmentDbId()) self.__updateSystemConfigUpdateSegment(conn, gpArray, seg, originalSeg, textForConfigTable) # apply update to fault strategy if gpArray.getStrategyAsLoadedFromDb() != gpArray.getFaultStrategy(): self.__updateSystemConfigFaultStrategy(conn, gpArray) # commit changes logger.debug("Committing configuration table changes") dbconn.execSQL(conn, "COMMIT") conn.close() gpArray.setStrategyAsLoadedFromDb( [gpArray.getFaultStrategy()]) gpArray.setSegmentsAsLoadedFromDb([seg.copy() for seg in gpArray.getDbList()]) def __updateSystemConfigRemoveMirror(self, conn, seg, textForConfigTable): """ Remove a mirror segment currently in gp_segment_configuration but not present in the goal configuration and record our action in gp_configuration_history. """ dbId = seg.getSegmentDbId() self.__callSegmentRemoveMirror(conn, seg) self.__insertConfigHistory(conn, dbId, "%s: removed mirror segment configuration" % textForConfigTable) def __updateSystemConfigRemovePrimary(self, conn, seg, textForConfigTable): """ Remove a primary segment currently in gp_segment_configuration but not present in the goal configuration and record our action in gp_configuration_history. """ dbId = seg.getSegmentDbId() self.__callSegmentRemove(conn, seg) self.__insertConfigHistory(conn, dbId, "%s: removed primary segment configuration" % textForConfigTable) def __updateSystemConfigAddPrimary(self, conn, gpArray, seg, textForConfigTable, mirror_map): """ Add a primary segment specified in our goal configuration but which is missing from the current gp_segment_configuration table and record our action in gp_configuration_history. """ # lookup the mirror (if any) so that we may correct its content id mirrorseg = mirror_map.get( seg.getSegmentContentId() ) # add the new segment dbId = self.__callSegmentAdd(conn, gpArray, seg) # update the segment mode, status and replication port self.__updateSegmentModeStatus(conn, seg) if gpArray.get_mirroring_enabled() == True: self.__updateSegmentReplicationPort(conn, seg) # get the newly added segment's content id # MPP-12393 et al WARNING: there is an unusual side effect going on here. # Although gp_add_segment() executed by __callSegmentAdd() above returns # the dbId of the new row in gp_segment_configuration, the following # select from gp_segment_configuration can return 0 rows if the updates # done by __updateSegmentModeStatus() and/or __updateSegmentReplicationPort() # are not done first. Don't change the order of these operations unless you # understand why gp_add_segment() behaves as it does. sql = "select content from pg_catalog.gp_segment_configuration where dbId = %s" % self.__toSqlIntValue(seg.getSegmentDbId()) logger.debug(sql) sqlResult = self.__fetchSingleOutputRow(conn, sql) contentId = int(sqlResult[0]) # Set the new content id for the primary as well the mirror if present. seg.setSegmentContentId(contentId) if mirrorseg is not None: mirrorseg.setSegmentContentId(contentId) self.__insertConfigHistory(conn, dbId, "%s: inserted primary segment configuration with contentid %s" % (textForConfigTable, contentId)) def __updateSystemConfigAddMirror(self, conn, gpArray, seg, textForConfigTable): """ Add a mirror segment specified in our goal configuration but which is missing from the current gp_segment_configuration table and record our action in gp_configuration_history. """ dbId = self.__callSegmentAddMirror(conn, gpArray, seg) self.__updateSegmentModeStatus(conn, seg) self.__insertConfigHistory(conn, dbId, "%s: inserted mirror segment configuration" % textForConfigTable) def __updateSystemConfigRemoveAddMirror(self, conn, gpArray, seg, textForConfigTable): """ We've been asked to update the mirror in a manner that require it to be removed and then re-added. Perform the tasks and record our action in gp_configuration_history. """ origDbId = seg.getSegmentDbId() self.__callSegmentRemoveMirror(conn, seg) dbId = self.__callSegmentAddMirror(conn, gpArray, seg) # now update mode/status since this is not done by gp_add_segment_mirror self.__updateSegmentModeStatus(conn, seg) self.__insertConfigHistory(conn, seg.getSegmentDbId(), "%s: inserted segment configuration for full recovery or original dbid %s" \ % (textForConfigTable, origDbId)) def __updateSystemConfigUpdateSegment(self, conn, gpArray, seg, originalSeg, textForConfigTable): # update mode and status # when adding a mirror, the replication port may change as well # what = "%s: segment mode and status" self.__updateSegmentModeStatus(conn, seg) if seg.getSegmentReplicationPort() != originalSeg.getSegmentReplicationPort(): what = "%s: segment mode, status, and replication port" self.__updateSegmentReplicationPort(conn, seg) self.__insertConfigHistory(conn, seg.getSegmentDbId(), what % textForConfigTable) def __updateSystemConfigFaultStrategy(self, conn, gpArray): """ Update the fault strategy. """ fs = gpArray.getFaultStrategy() sql = "UPDATE gp_fault_strategy\n SET fault_strategy = " + self.__toSqlCharValue(fs) + "\n" logger.debug(sql) dbconn.executeUpdateOrInsert(conn, sql, 1) def __callSegmentRemoveMirror(self, conn, seg): """ Call gp_remove_segment_mirror() to remove the mirror. """ sql = "SELECT gp_remove_segment_mirror(%s::int2)" % (self.__toSqlIntValue(seg.getSegmentContentId())) logger.debug(sql) result = self.__fetchSingleOutputRow(conn, sql) assert result[0] # must return True def __callSegmentRemove(self, conn, seg): """ Call gp_remove_segment() to remove the primary. """ sql = "SELECT gp_remove_segment(%s::int2)" % (self.__toSqlIntValue(seg.getSegmentDbId())) logger.debug(sql) result = self.__fetchSingleOutputRow(conn, sql) assert result[0] def __callSegmentAdd(self, conn, gpArray, seg): """ Call gp_add_segment() to add the primary. Return the new segment's dbid. """ logger.debug('callSegmentAdd %s' % repr(seg)) filespaceMapStr = self.__toSqlFilespaceMapStr(gpArray, seg) sql = "SELECT gp_add_segment(%s, %s, %s, %s)" \ % ( self.__toSqlTextValue(seg.getSegmentHostName()), self.__toSqlTextValue(seg.getSegmentAddress()), self.__toSqlIntValue(seg.getSegmentPort()), self.__toSqlTextValue(filespaceMapStr) ) logger.debug(sql) sqlResult = self.__fetchSingleOutputRow(conn, sql) dbId = int(sqlResult[0]) seg.setSegmentDbId(dbId) return dbId def __callSegmentAddMirror(self, conn, gpArray, seg): """ Call gp_add_segment_mirror() to add the mirror. Return the new segment's dbid. """ logger.debug('callSegmentAddMirror %s' % repr(seg)) filespaceMapStr = self.__toSqlFilespaceMapStr(gpArray, seg) sql = "SELECT gp_add_segment_mirror(%s::int2, %s, %s, %s, %s, %s)" \ % ( self.__toSqlIntValue(seg.getSegmentContentId()), self.__toSqlTextValue(seg.getSegmentHostName()), self.__toSqlTextValue(seg.getSegmentAddress()), self.__toSqlIntValue(seg.getSegmentPort()), self.__toSqlIntValue(seg.getSegmentReplicationPort()), self.__toSqlTextValue(filespaceMapStr) ) logger.debug(sql) sqlResult = self.__fetchSingleOutputRow(conn, sql) dbId = int(sqlResult[0]) seg.setSegmentDbId(dbId) return dbId def __updateSegmentReplicationPort(self, conn, seg): # run an update sql = "UPDATE pg_catalog.gp_segment_configuration\n" + \ " SET\n" + \ " replication_port = " + self.__toSqlIntValue(seg.getSegmentReplicationPort()) + "\n" \ "WHERE dbid = " + self.__toSqlIntValue(seg.getSegmentDbId()) logger.debug(sql) dbconn.executeUpdateOrInsert(conn, sql, 1) def __updateSegmentModeStatus(self, conn, seg): # run an update sql = "UPDATE pg_catalog.gp_segment_configuration\n" + \ " SET\n" + \ " mode = " + self.__toSqlCharValue(seg.getSegmentMode()) + ",\n" \ " status = " + self.__toSqlCharValue(seg.getSegmentStatus()) + "\n" \ "WHERE dbid = " + self.__toSqlIntValue(seg.getSegmentDbId()) logger.debug(sql) dbconn.executeUpdateOrInsert(conn, sql, 1) def __fetchSingleOutputRow(self, conn, sql, retry=False): """ Execute specified SQL command and return what we expect to be a single row. Raise an exception when more or fewer than one row is seen and when more than one row is seen display up to 10 rows as logger warnings. """ cursor = dbconn.execSQL(conn, sql) numrows = cursor.rowcount numshown = 0 res = None for row in cursor: if numrows != 1: # # if we got back more than one row # we print a few of the rows first # instead of immediately raising an exception # numshown += 1 if numshown > 10: break logger.warning('>>> %s' % row) else: assert res is None res = row assert res is not None cursor.close() if numrows != 1: raise Exception("SQL returned %d rows, not 1 as expected:\n%s" % (numrows, sql)) return res def __insertConfigHistory(self, conn, dbId, msg ): # now update change history sql = "INSERT INTO gp_configuration_history (time, dbid, \"desc\") VALUES(\n" \ "now(),\n " + \ self.__toSqlIntValue(dbId) + ",\n " + \ self.__toSqlCharValue(msg) + "\n)" logger.debug(sql) dbconn.executeUpdateOrInsert(conn, sql, 1) def __toSqlFilespaceMapStr(self, gpArray, seg): """ Return a string representation of the filespace map suitable for inclusion into the call to gp_add_segment_mirror(). """ filespaceArrayString = [] for fs in gpArray.getFilespaces(): path = seg.getSegmentFilespaces()[ fs.getOid() ] filespaceArrayString.append("{%s,%s}" % \ (self.__toSqlArrayStringValue(fs.getName()), \ self.__toSqlArrayStringValue(path))) filespaceMapStr = "{" + ",".join(filespaceArrayString) + "}" return filespaceMapStr def __toSqlIntValue(self, val): if val is None: return "null" return str(val) def __toSqlArrayStringValue(self, val): if val is None: return "null" return '"' + val.replace('"','\\"').replace('\\','\\\\') + '"' def __toSqlCharValue(self, val): return self.__toSqlTextValue(val) def __toSqlTextValue(self, val): if val is None: return "null" return "'" + val.replace("'","''").replace('\\','\\\\') + "'"
apache-2.0
iqas/e2gui
lib/python/Plugins/Extensions/MiniTV/plugin.py
29
2039
from Plugins.Plugin import PluginDescriptor from Components.PluginComponent import plugins from Components.config import config, ConfigSubsection, ConfigSelection from enigma import eDBoxLCD config.plugins.minitv = ConfigSubsection() config.plugins.minitv.enable = ConfigSelection(default = "disable", choices = [ ("enable", "enable"), ("disable", "disable")]) class MiniTV: def __init__(self): config.plugins.minitv.enable.addNotifier(self.miniTVChanged, initial_call = True) config.misc.standbyCounter.addNotifier(self.standbyCounterChanged, initial_call = False) def getExtensionName(self): if config.plugins.minitv.enable.value == "enable": return _("Disable MiniTV") return _("Enable MiniTV") def showMiniTV(self): old_value = config.plugins.minitv.enable.value config.plugins.minitv.enable.value = (old_value == "enable") and "disable" or "enable" config.plugins.minitv.enable.save() def miniTVChanged(self, configElement): self.setMiniTV(configElement.value) def setMiniTV(self, value): cur_value = open("/proc/stb/lcd/live_enable", "r").read().strip() if cur_value != value: open("/proc/stb/lcd/live_enable", "w").write(value) def standbyCounterChanged(self, configElement): from Screens.Standby import inStandby if self.leaveStandby not in inStandby.onClose: inStandby.onClose.append(self.leaveStandby) self.setMiniTV("disable") def leaveStandby(self): self.setMiniTV(config.plugins.minitv.enable.value) minitv_instance = MiniTV() def addExtentions(infobarExtensions): infobarExtensions.addExtension((minitv_instance.getExtensionName, minitv_instance.showMiniTV, lambda: True), None) def autoStart(reason, **kwargs): if reason == 1: minitv_instance.setMiniTV("standby") def Plugins(**kwargs): list = [] list.append( PluginDescriptor(name="MiniTV", description="MiniTV", where = [PluginDescriptor.WHERE_EXTENSIONSINGLE], fnc = addExtentions)) list.append( PluginDescriptor( where = [PluginDescriptor.WHERE_AUTOSTART], fnc = autoStart)) return list
gpl-2.0
ChanderG/scipy
scipy/io/harwell_boeing/hb.py
83
18473
""" Implementation of Harwell-Boeing read/write. At the moment not the full Harwell-Boeing format is supported. Supported features are: - assembled, non-symmetric, real matrices - integer for pointer/indices - exponential format for float values, and int format """ from __future__ import division, print_function, absolute_import # TODO: # - Add more support (symmetric/complex matrices, non-assembled matrices ?) # XXX: reading is reasonably efficient (>= 85 % is in numpy.fromstring), but # takes a lot of memory. Being faster would require compiled code. # write is not efficient. Although not a terribly exciting task, # having reusable facilities to efficiently read/write fortran-formatted files # would be useful outside this module. import warnings import numpy as np from scipy.sparse import csc_matrix from scipy.io.harwell_boeing._fortran_format_parser import \ FortranFormatParser, IntFormat, ExpFormat from scipy._lib.six import string_types __all__ = ["MalformedHeader", "hb_read", "hb_write", "HBInfo", "HBFile", "HBMatrixType"] class MalformedHeader(Exception): pass class LineOverflow(Warning): pass def _nbytes_full(fmt, nlines): """Return the number of bytes to read to get every full lines for the given parsed fortran format.""" return (fmt.repeat * fmt.width + 1) * (nlines - 1) class HBInfo(object): @classmethod def from_data(cls, m, title="Default title", key="0", mxtype=None, fmt=None): """Create a HBInfo instance from an existing sparse matrix. Parameters ---------- m : sparse matrix the HBInfo instance will derive its parameters from m title : str Title to put in the HB header key : str Key mxtype : HBMatrixType type of the input matrix fmt : dict not implemented Returns ------- hb_info : HBInfo instance """ pointer = m.indptr indices = m.indices values = m.data nrows, ncols = m.shape nnon_zeros = m.nnz if fmt is None: # +1 because HB use one-based indexing (Fortran), and we will write # the indices /pointer as such pointer_fmt = IntFormat.from_number(np.max(pointer+1)) indices_fmt = IntFormat.from_number(np.max(indices+1)) if values.dtype.kind in np.typecodes["AllFloat"]: values_fmt = ExpFormat.from_number(-np.max(np.abs(values))) elif values.dtype.kind in np.typecodes["AllInteger"]: values_fmt = IntFormat.from_number(-np.max(np.abs(values))) else: raise NotImplementedError("type %s not implemented yet" % values.dtype.kind) else: raise NotImplementedError("fmt argument not supported yet.") if mxtype is None: if not np.isrealobj(values): raise ValueError("Complex values not supported yet") if values.dtype.kind in np.typecodes["AllInteger"]: tp = "integer" elif values.dtype.kind in np.typecodes["AllFloat"]: tp = "real" else: raise NotImplementedError("type %s for values not implemented" % values.dtype) mxtype = HBMatrixType(tp, "unsymmetric", "assembled") else: raise ValueError("mxtype argument not handled yet.") def _nlines(fmt, size): nlines = size // fmt.repeat if nlines * fmt.repeat != size: nlines += 1 return nlines pointer_nlines = _nlines(pointer_fmt, pointer.size) indices_nlines = _nlines(indices_fmt, indices.size) values_nlines = _nlines(values_fmt, values.size) total_nlines = pointer_nlines + indices_nlines + values_nlines return cls(title, key, total_nlines, pointer_nlines, indices_nlines, values_nlines, mxtype, nrows, ncols, nnon_zeros, pointer_fmt.fortran_format, indices_fmt.fortran_format, values_fmt.fortran_format) @classmethod def from_file(cls, fid): """Create a HBInfo instance from a file object containg a matrix in the HB format. Parameters ---------- fid : file-like matrix File or file-like object containing a matrix in the HB format. Returns ------- hb_info : HBInfo instance """ # First line line = fid.readline().strip("\n") if not len(line) > 72: raise ValueError("Expected at least 72 characters for first line, " "got: \n%s" % line) title = line[:72] key = line[72:] # Second line line = fid.readline().strip("\n") if not len(line.rstrip()) >= 56: raise ValueError("Expected at least 56 characters for second line, " "got: \n%s" % line) total_nlines = _expect_int(line[:14]) pointer_nlines = _expect_int(line[14:28]) indices_nlines = _expect_int(line[28:42]) values_nlines = _expect_int(line[42:56]) rhs_nlines = line[56:72].strip() if rhs_nlines == '': rhs_nlines = 0 else: rhs_nlines = _expect_int(rhs_nlines) if not rhs_nlines == 0: raise ValueError("Only files without right hand side supported for " "now.") # Third line line = fid.readline().strip("\n") if not len(line) >= 70: raise ValueError("Expected at least 72 character for third line, got:\n" "%s" % line) mxtype_s = line[:3].upper() if not len(mxtype_s) == 3: raise ValueError("mxtype expected to be 3 characters long") mxtype = HBMatrixType.from_fortran(mxtype_s) if mxtype.value_type not in ["real", "integer"]: raise ValueError("Only real or integer matrices supported for " "now (detected %s)" % mxtype) if not mxtype.structure == "unsymmetric": raise ValueError("Only unsymmetric matrices supported for " "now (detected %s)" % mxtype) if not mxtype.storage == "assembled": raise ValueError("Only assembled matrices supported for now") if not line[3:14] == " " * 11: raise ValueError("Malformed data for third line: %s" % line) nrows = _expect_int(line[14:28]) ncols = _expect_int(line[28:42]) nnon_zeros = _expect_int(line[42:56]) nelementals = _expect_int(line[56:70]) if not nelementals == 0: raise ValueError("Unexpected value %d for nltvl (last entry of line 3)" % nelementals) # Fourth line line = fid.readline().strip("\n") ct = line.split() if not len(ct) == 3: raise ValueError("Expected 3 formats, got %s" % ct) return cls(title, key, total_nlines, pointer_nlines, indices_nlines, values_nlines, mxtype, nrows, ncols, nnon_zeros, ct[0], ct[1], ct[2], rhs_nlines, nelementals) def __init__(self, title, key, total_nlines, pointer_nlines, indices_nlines, values_nlines, mxtype, nrows, ncols, nnon_zeros, pointer_format_str, indices_format_str, values_format_str, right_hand_sides_nlines=0, nelementals=0): """Do not use this directly, but the class ctrs (from_* functions).""" self.title = title self.key = key if title is None: title = "No Title" if len(title) > 72: raise ValueError("title cannot be > 72 characters") if key is None: key = "|No Key" if len(key) > 8: warnings.warn("key is > 8 characters (key is %s)" % key, LineOverflow) self.total_nlines = total_nlines self.pointer_nlines = pointer_nlines self.indices_nlines = indices_nlines self.values_nlines = values_nlines parser = FortranFormatParser() pointer_format = parser.parse(pointer_format_str) if not isinstance(pointer_format, IntFormat): raise ValueError("Expected int format for pointer format, got %s" % pointer_format) indices_format = parser.parse(indices_format_str) if not isinstance(indices_format, IntFormat): raise ValueError("Expected int format for indices format, got %s" % indices_format) values_format = parser.parse(values_format_str) if isinstance(values_format, ExpFormat): if mxtype.value_type not in ["real", "complex"]: raise ValueError("Inconsistency between matrix type %s and " "value type %s" % (mxtype, values_format)) values_dtype = np.float64 elif isinstance(values_format, IntFormat): if mxtype.value_type not in ["integer"]: raise ValueError("Inconsistency between matrix type %s and " "value type %s" % (mxtype, values_format)) # XXX: fortran int -> dtype association ? values_dtype = int else: raise ValueError("Unsupported format for values %r" % (values_format,)) self.pointer_format = pointer_format self.indices_format = indices_format self.values_format = values_format self.pointer_dtype = np.int32 self.indices_dtype = np.int32 self.values_dtype = values_dtype self.pointer_nlines = pointer_nlines self.pointer_nbytes_full = _nbytes_full(pointer_format, pointer_nlines) self.indices_nlines = indices_nlines self.indices_nbytes_full = _nbytes_full(indices_format, indices_nlines) self.values_nlines = values_nlines self.values_nbytes_full = _nbytes_full(values_format, values_nlines) self.nrows = nrows self.ncols = ncols self.nnon_zeros = nnon_zeros self.nelementals = nelementals self.mxtype = mxtype def dump(self): """Gives the header corresponding to this instance as a string.""" header = [self.title.ljust(72) + self.key.ljust(8)] header.append("%14d%14d%14d%14d" % (self.total_nlines, self.pointer_nlines, self.indices_nlines, self.values_nlines)) header.append("%14s%14d%14d%14d%14d" % (self.mxtype.fortran_format.ljust(14), self.nrows, self.ncols, self.nnon_zeros, 0)) pffmt = self.pointer_format.fortran_format iffmt = self.indices_format.fortran_format vffmt = self.values_format.fortran_format header.append("%16s%16s%20s" % (pffmt.ljust(16), iffmt.ljust(16), vffmt.ljust(20))) return "\n".join(header) def _expect_int(value, msg=None): try: return int(value) except ValueError: if msg is None: msg = "Expected an int, got %s" raise ValueError(msg % value) def _read_hb_data(content, header): # XXX: look at a way to reduce memory here (big string creation) ptr_string = "".join([content.read(header.pointer_nbytes_full), content.readline()]) ptr = np.fromstring(ptr_string, dtype=int, sep=' ') ind_string = "".join([content.read(header.indices_nbytes_full), content.readline()]) ind = np.fromstring(ind_string, dtype=int, sep=' ') val_string = "".join([content.read(header.values_nbytes_full), content.readline()]) val = np.fromstring(val_string, dtype=header.values_dtype, sep=' ') try: return csc_matrix((val, ind-1, ptr-1), shape=(header.nrows, header.ncols)) except ValueError as e: raise e def _write_data(m, fid, header): def write_array(f, ar, nlines, fmt): # ar_nlines is the number of full lines, n is the number of items per # line, ffmt the fortran format pyfmt = fmt.python_format pyfmt_full = pyfmt * fmt.repeat # for each array to write, we first write the full lines, and special # case for partial line full = ar[:(nlines - 1) * fmt.repeat] for row in full.reshape((nlines-1, fmt.repeat)): f.write(pyfmt_full % tuple(row) + "\n") nremain = ar.size - full.size if nremain > 0: f.write((pyfmt * nremain) % tuple(ar[ar.size - nremain:]) + "\n") fid.write(header.dump()) fid.write("\n") # +1 is for fortran one-based indexing write_array(fid, m.indptr+1, header.pointer_nlines, header.pointer_format) write_array(fid, m.indices+1, header.indices_nlines, header.indices_format) write_array(fid, m.data, header.values_nlines, header.values_format) class HBMatrixType(object): """Class to hold the matrix type.""" # q2f* translates qualified names to fortran character _q2f_type = { "real": "R", "complex": "C", "pattern": "P", "integer": "I", } _q2f_structure = { "symmetric": "S", "unsymmetric": "U", "hermitian": "H", "skewsymmetric": "Z", "rectangular": "R" } _q2f_storage = { "assembled": "A", "elemental": "E", } _f2q_type = dict([(j, i) for i, j in _q2f_type.items()]) _f2q_structure = dict([(j, i) for i, j in _q2f_structure.items()]) _f2q_storage = dict([(j, i) for i, j in _q2f_storage.items()]) @classmethod def from_fortran(cls, fmt): if not len(fmt) == 3: raise ValueError("Fortran format for matrix type should be 3 " "characters long") try: value_type = cls._f2q_type[fmt[0]] structure = cls._f2q_structure[fmt[1]] storage = cls._f2q_storage[fmt[2]] return cls(value_type, structure, storage) except KeyError: raise ValueError("Unrecognized format %s" % fmt) def __init__(self, value_type, structure, storage="assembled"): self.value_type = value_type self.structure = structure self.storage = storage if value_type not in self._q2f_type: raise ValueError("Unrecognized type %s" % value_type) if structure not in self._q2f_structure: raise ValueError("Unrecognized structure %s" % structure) if storage not in self._q2f_storage: raise ValueError("Unrecognized storage %s" % storage) @property def fortran_format(self): return self._q2f_type[self.value_type] + \ self._q2f_structure[self.structure] + \ self._q2f_storage[self.storage] def __repr__(self): return "HBMatrixType(%s, %s, %s)" % \ (self.value_type, self.structure, self.storage) class HBFile(object): def __init__(self, file, hb_info=None): """Create a HBFile instance. Parameters ---------- file : file-object StringIO work as well hb_info : HBInfo, optional Should be given as an argument for writing, in which case the file should be writable. """ self._fid = file if hb_info is None: self._hb_info = HBInfo.from_file(file) else: #raise IOError("file %s is not writable, and hb_info " # "was given." % file) self._hb_info = hb_info @property def title(self): return self._hb_info.title @property def key(self): return self._hb_info.key @property def type(self): return self._hb_info.mxtype.value_type @property def structure(self): return self._hb_info.mxtype.structure @property def storage(self): return self._hb_info.mxtype.storage def read_matrix(self): return _read_hb_data(self._fid, self._hb_info) def write_matrix(self, m): return _write_data(m, self._fid, self._hb_info) def hb_read(file): """Read HB-format file. Parameters ---------- file : str-like or file-like If a string-like object, file is the name of the file to read. If a file-like object, the data are read from it. Returns ------- data : scipy.sparse.csc_matrix instance The data read from the HB file as a sparse matrix. Notes ----- At the moment not the full Harwell-Boeing format is supported. Supported features are: - assembled, non-symmetric, real matrices - integer for pointer/indices - exponential format for float values, and int format """ def _get_matrix(fid): hb = HBFile(fid) return hb.read_matrix() if isinstance(file, string_types): fid = open(file) try: return _get_matrix(fid) finally: fid.close() else: return _get_matrix(file) def hb_write(file, m, hb_info=None): """Write HB-format file. Parameters ---------- file : str-like or file-like if a string-like object, file is the name of the file to read. If a file-like object, the data are read from it. m : sparse-matrix the sparse matrix to write hb_info : HBInfo contains the meta-data for write Returns ------- None Notes ----- At the moment not the full Harwell-Boeing format is supported. Supported features are: - assembled, non-symmetric, real matrices - integer for pointer/indices - exponential format for float values, and int format """ if hb_info is None: hb_info = HBInfo.from_data(m) def _set_matrix(fid): hb = HBFile(fid, hb_info) return hb.write_matrix(m) if isinstance(file, string_types): fid = open(file, "w") try: return _set_matrix(fid) finally: fid.close() else: return _set_matrix(file)
bsd-3-clause
berkerpeksag/astor
astor/code_gen.py
1
31784
# -*- coding: utf-8 -*- """ Part of the astor library for Python AST manipulation. License: 3-clause BSD Copyright (c) 2008 Armin Ronacher Copyright (c) 2012-2017 Patrick Maupin Copyright (c) 2013-2017 Berker Peksag This module converts an AST into Python source code. Before being version-controlled as part of astor, this code came from here (in 2012): https://gist.github.com/1250562 """ import ast import inspect import math import sys from .op_util import get_op_symbol, get_op_precedence, Precedence from .node_util import ExplicitNodeVisitor from .string_repr import pretty_string from .source_repr import pretty_source def to_source(node, indent_with=' ' * 4, add_line_information=False, pretty_string=pretty_string, pretty_source=pretty_source, source_generator_class=None): """This function can convert a node tree back into python sourcecode. This is useful for debugging purposes, especially if you're dealing with custom asts not generated by python itself. It could be that the sourcecode is evaluable when the AST itself is not compilable / evaluable. The reason for this is that the AST contains some more data than regular sourcecode does, which is dropped during conversion. Each level of indentation is replaced with `indent_with`. Per default this parameter is equal to four spaces as suggested by PEP 8, but it might be adjusted to match the application's styleguide. If `add_line_information` is set to `True` comments for the line numbers of the nodes are added to the output. This can be used to spot wrong line number information of statement nodes. `source_generator_class` defaults to `SourceGenerator`, and specifies the class that will be instantiated and used to generate the source code. """ if source_generator_class is None: source_generator_class = SourceGenerator elif not inspect.isclass(source_generator_class): raise TypeError('source_generator_class should be a class') elif not issubclass(source_generator_class, SourceGenerator): raise TypeError('source_generator_class should be a subclass of SourceGenerator') generator = source_generator_class( indent_with, add_line_information, pretty_string) generator.visit(node) generator.result.append('\n') if set(generator.result[0]) == set('\n'): generator.result[0] = '' return pretty_source(generator.result) def precedence_setter(AST=ast.AST, get_op_precedence=get_op_precedence, isinstance=isinstance, list=list): """ This only uses a closure for performance reasons, to reduce the number of attribute lookups. (set_precedence is called a lot of times.) """ def set_precedence(value, *nodes): """Set the precedence (of the parent) into the children. """ if isinstance(value, AST): value = get_op_precedence(value) for node in nodes: if isinstance(node, AST): node._pp = value elif isinstance(node, list): set_precedence(value, *node) else: assert node is None, node return set_precedence set_precedence = precedence_setter() class Delimit(object): """A context manager that can add enclosing delimiters around the output of a SourceGenerator method. By default, the parentheses are added, but the enclosed code may set discard=True to get rid of them. """ discard = False def __init__(self, tree, *args): """ use write instead of using result directly for initial data, because it may flush preceding data into result. """ delimiters = '()' node = None op = None for arg in args: if isinstance(arg, ast.AST): if node is None: node = arg else: op = arg else: delimiters = arg tree.write(delimiters[0]) result = self.result = tree.result self.index = len(result) self.closing = delimiters[1] if node is not None: self.p = p = get_op_precedence(op or node) self.pp = pp = tree.get__pp(node) self.discard = p >= pp def __enter__(self): return self def __exit__(self, *exc_info): result = self.result start = self.index - 1 if self.discard: result[start] = '' else: result.append(self.closing) class SourceGenerator(ExplicitNodeVisitor): """This visitor is able to transform a well formed syntax tree into Python sourcecode. For more details have a look at the docstring of the `to_source` function. """ using_unicode_literals = False def __init__(self, indent_with, add_line_information=False, pretty_string=pretty_string, # constants len=len, isinstance=isinstance, callable=callable): self.result = [] self.indent_with = indent_with self.add_line_information = add_line_information self.indentation = 0 # Current indentation level self.new_lines = 0 # Number of lines to insert before next code self.colinfo = 0, 0 # index in result of string containing linefeed, and # position of last linefeed in that string self.pretty_string = pretty_string AST = ast.AST visit = self.visit result = self.result append = result.append def write(*params): """ self.write is a closure for performance (to reduce the number of attribute lookups). """ for item in params: if isinstance(item, AST): visit(item) elif callable(item): item() else: if self.new_lines: append('\n' * self.new_lines) self.colinfo = len(result), 0 append(self.indent_with * self.indentation) self.new_lines = 0 if item: append(item) self.write = write def __getattr__(self, name, defaults=dict(keywords=(), _pp=Precedence.highest).get): """ Get an attribute of the node. like dict.get (returns None if doesn't exist) """ if not name.startswith('get_'): raise AttributeError geta = getattr shortname = name[4:] default = defaults(shortname) def getter(node): return geta(node, shortname, default) setattr(self, name, getter) return getter def delimit(self, *args): return Delimit(self, *args) def conditional_write(self, *stuff): if stuff[-1] is not None: self.write(*stuff) # Inform the caller that we wrote return True def newline(self, node=None, extra=0): self.new_lines = max(self.new_lines, 1 + extra) if node is not None and self.add_line_information: self.write('# line: %s' % node.lineno) self.new_lines = 1 def body(self, statements): self.indentation += 1 self.write(*statements) self.indentation -= 1 def else_body(self, elsewhat): if elsewhat: self.write(self.newline, 'else:') self.body(elsewhat) def body_or_else(self, node): self.body(node.body) self.else_body(node.orelse) def visit_arguments(self, node): want_comma = [] def write_comma(): if want_comma: self.write(', ') else: want_comma.append(True) def loop_args(args, defaults): set_precedence(Precedence.Comma, defaults) padding = [None] * (len(args) - len(defaults)) for arg, default in zip(args, padding + defaults): self.write(write_comma, arg) self.conditional_write('=', default) posonlyargs = getattr(node, 'posonlyargs', []) offset = 0 if posonlyargs: offset += len(node.defaults) - len(node.args) loop_args(posonlyargs, node.defaults[:offset]) self.write(write_comma, '/') loop_args(node.args, node.defaults[offset:]) self.conditional_write(write_comma, '*', node.vararg) kwonlyargs = self.get_kwonlyargs(node) if kwonlyargs: if node.vararg is None: self.write(write_comma, '*') loop_args(kwonlyargs, node.kw_defaults) self.conditional_write(write_comma, '**', node.kwarg) def statement(self, node, *params, **kw): self.newline(node) self.write(*params) def decorators(self, node, extra): self.newline(extra=extra) for decorator in node.decorator_list: self.statement(decorator, '@', decorator) def comma_list(self, items, trailing=False): set_precedence(Precedence.Comma, *items) for idx, item in enumerate(items): self.write(', ' if idx else '', item) self.write(',' if trailing else '') # Statements def visit_Assign(self, node): set_precedence(node, node.value, *node.targets) self.newline(node) for target in node.targets: self.write(target, ' = ') self.visit(node.value) def visit_AugAssign(self, node): set_precedence(node, node.value, node.target) self.statement(node, node.target, get_op_symbol(node.op, ' %s= '), node.value) def visit_AnnAssign(self, node): set_precedence(node, node.target, node.annotation) set_precedence(Precedence.Comma, node.value) need_parens = isinstance(node.target, ast.Name) and not node.simple begin = '(' if need_parens else '' end = ')' if need_parens else '' self.statement(node, begin, node.target, end, ': ', node.annotation) self.conditional_write(' = ', node.value) def visit_ImportFrom(self, node): self.statement(node, 'from ', node.level * '.', node.module or '', ' import ') self.comma_list(node.names) # Goofy stuff for Python 2.7 _pyio module if node.module == '__future__' and 'unicode_literals' in ( x.name for x in node.names): self.using_unicode_literals = True def visit_Import(self, node): self.statement(node, 'import ') self.comma_list(node.names) def visit_Expr(self, node): set_precedence(node, node.value) self.statement(node) self.generic_visit(node) def visit_FunctionDef(self, node, is_async=False): prefix = 'async ' if is_async else '' self.decorators(node, 1 if self.indentation else 2) self.statement(node, '%sdef %s' % (prefix, node.name), '(') self.visit_arguments(node.args) self.write(')') self.conditional_write(' -> ', self.get_returns(node)) self.write(':') self.body(node.body) if not self.indentation: self.newline(extra=2) # introduced in Python 3.5 def visit_AsyncFunctionDef(self, node): self.visit_FunctionDef(node, is_async=True) def visit_ClassDef(self, node): have_args = [] def paren_or_comma(): if have_args: self.write(', ') else: have_args.append(True) self.write('(') self.decorators(node, 2) self.statement(node, 'class %s' % node.name) for base in node.bases: self.write(paren_or_comma, base) # keywords not available in early version for keyword in self.get_keywords(node): self.write(paren_or_comma, keyword.arg or '', '=' if keyword.arg else '**', keyword.value) self.conditional_write(paren_or_comma, '*', self.get_starargs(node)) self.conditional_write(paren_or_comma, '**', self.get_kwargs(node)) self.write(have_args and '):' or ':') self.body(node.body) if not self.indentation: self.newline(extra=2) def visit_If(self, node): set_precedence(node, node.test) self.statement(node, 'if ', node.test, ':') self.body(node.body) while True: else_ = node.orelse if len(else_) == 1 and isinstance(else_[0], ast.If): node = else_[0] set_precedence(node, node.test) self.write(self.newline, 'elif ', node.test, ':') self.body(node.body) else: self.else_body(else_) break def visit_For(self, node, is_async=False): set_precedence(node, node.target) prefix = 'async ' if is_async else '' self.statement(node, '%sfor ' % prefix, node.target, ' in ', node.iter, ':') self.body_or_else(node) # introduced in Python 3.5 def visit_AsyncFor(self, node): self.visit_For(node, is_async=True) def visit_While(self, node): set_precedence(node, node.test) self.statement(node, 'while ', node.test, ':') self.body_or_else(node) def visit_With(self, node, is_async=False): prefix = 'async ' if is_async else '' self.statement(node, '%swith ' % prefix) if hasattr(node, "context_expr"): # Python < 3.3 self.visit_withitem(node) else: # Python >= 3.3 self.comma_list(node.items) self.write(':') self.body(node.body) # new for Python 3.5 def visit_AsyncWith(self, node): self.visit_With(node, is_async=True) # new for Python 3.3 def visit_withitem(self, node): self.write(node.context_expr) self.conditional_write(' as ', node.optional_vars) # deprecated in Python 3.8 def visit_NameConstant(self, node): self.write(repr(node.value)) def visit_Pass(self, node): self.statement(node, 'pass') def visit_Print(self, node): # XXX: python 2.6 only self.statement(node, 'print ') values = node.values if node.dest is not None: self.write(' >> ') values = [node.dest] + node.values self.comma_list(values, not node.nl) def visit_Delete(self, node): self.statement(node, 'del ') self.comma_list(node.targets) def visit_TryExcept(self, node): self.statement(node, 'try:') self.body(node.body) self.write(*node.handlers) self.else_body(node.orelse) # new for Python 3.3 def visit_Try(self, node): self.statement(node, 'try:') self.body(node.body) self.write(*node.handlers) self.else_body(node.orelse) if node.finalbody: self.statement(node, 'finally:') self.body(node.finalbody) def visit_ExceptHandler(self, node): self.statement(node, 'except') if self.conditional_write(' ', node.type): self.conditional_write(' as ', node.name) self.write(':') self.body(node.body) def visit_TryFinally(self, node): self.statement(node, 'try:') self.body(node.body) self.statement(node, 'finally:') self.body(node.finalbody) def visit_Exec(self, node): dicts = node.globals, node.locals dicts = dicts[::-1] if dicts[0] is None else dicts self.statement(node, 'exec ', node.body) self.conditional_write(' in ', dicts[0]) self.conditional_write(', ', dicts[1]) def visit_Assert(self, node): set_precedence(node, node.test, node.msg) self.statement(node, 'assert ', node.test) self.conditional_write(', ', node.msg) def visit_Global(self, node): self.statement(node, 'global ', ', '.join(node.names)) def visit_Nonlocal(self, node): self.statement(node, 'nonlocal ', ', '.join(node.names)) def visit_Return(self, node): set_precedence(node, node.value) self.statement(node, 'return') self.conditional_write(' ', node.value) def visit_Break(self, node): self.statement(node, 'break') def visit_Continue(self, node): self.statement(node, 'continue') def visit_Raise(self, node): # XXX: Python 2.6 / 3.0 compatibility self.statement(node, 'raise') if self.conditional_write(' ', self.get_exc(node)): self.conditional_write(' from ', node.cause) elif self.conditional_write(' ', self.get_type(node)): set_precedence(node, node.inst) self.conditional_write(', ', node.inst) self.conditional_write(', ', node.tback) # Expressions def visit_Attribute(self, node): self.write(node.value, '.', node.attr) def visit_Call(self, node, len=len): write = self.write want_comma = [] def write_comma(): if want_comma: write(', ') else: want_comma.append(True) args = node.args keywords = node.keywords starargs = self.get_starargs(node) kwargs = self.get_kwargs(node) numargs = len(args) + len(keywords) numargs += starargs is not None numargs += kwargs is not None p = Precedence.Comma if numargs > 1 else Precedence.call_one_arg set_precedence(p, *args) self.visit(node.func) write('(') for arg in args: write(write_comma, arg) set_precedence(Precedence.Comma, *(x.value for x in keywords)) for keyword in keywords: # a keyword.arg of None indicates dictionary unpacking # (Python >= 3.5) arg = keyword.arg or '' write(write_comma, arg, '=' if arg else '**', keyword.value) # 3.5 no longer has these self.conditional_write(write_comma, '*', starargs) self.conditional_write(write_comma, '**', kwargs) write(')') def visit_Name(self, node): self.write(node.id) # ast.Constant is new in Python 3.6 and it replaces ast.Bytes, # ast.Ellipsis, ast.NameConstant, ast.Num, ast.Str in Python 3.8 def visit_Constant(self, node): value = node.value if isinstance(value, (int, float, complex)): with self.delimit(node): self._handle_numeric_constant(value) elif isinstance(value, str): self._handle_string_constant(node, node.value) elif value is Ellipsis: self.write('...') else: self.write(repr(value)) def visit_JoinedStr(self, node): self._handle_string_constant(node, None, is_joined=True) def _handle_string_constant(self, node, value, is_joined=False): # embedded is used to control when we might want # to use a triple-quoted string. We determine # if we are in an assignment and/or in an expression precedence = self.get__pp(node) embedded = ((precedence > Precedence.Expr) + (precedence >= Precedence.Assign)) # Flush any pending newlines, because we're about # to severely abuse the result list. self.write('') result = self.result # Calculate the string representing the line # we are working on, up to but not including # the string we are adding. res_index, str_index = self.colinfo current_line = self.result[res_index:] if str_index: current_line[0] = current_line[0][str_index:] current_line = ''.join(current_line) has_ast_constant = sys.version_info >= (3, 6) if is_joined: # Handle new f-strings. This is a bit complicated, because # the tree can contain subnodes that recurse back to JoinedStr # subnodes... def recurse(node): for value in node.values: if isinstance(value, ast.Str): # Double up braces to escape them. self.write(value.s.replace('{', '{{').replace('}', '}}')) elif isinstance(value, ast.FormattedValue): with self.delimit('{}'): set_precedence(value, value.value) self.visit(value.value) if value.conversion != -1: self.write('!%s' % chr(value.conversion)) if value.format_spec is not None: self.write(':') recurse(value.format_spec) elif has_ast_constant and isinstance(value, ast.Constant): self.write(value.value) else: kind = type(value).__name__ assert False, 'Invalid node %s inside JoinedStr' % kind index = len(result) recurse(node) # Flush trailing newlines (so that they are part of mystr) self.write('') mystr = ''.join(result[index:]) del result[index:] self.colinfo = res_index, str_index # Put it back like we found it uni_lit = False # No formatted byte strings else: assert value is not None, "Node value cannot be None" mystr = value uni_lit = self.using_unicode_literals mystr = self.pretty_string(mystr, embedded, current_line, uni_lit) if is_joined: mystr = 'f' + mystr elif getattr(node, 'kind', False): # Constant.kind is a Python 3.8 addition. mystr = node.kind + mystr self.write(mystr) lf = mystr.rfind('\n') + 1 if lf: self.colinfo = len(result) - 1, lf # deprecated in Python 3.8 def visit_Str(self, node): self._handle_string_constant(node, node.s) # deprecated in Python 3.8 def visit_Bytes(self, node): self.write(repr(node.s)) def _handle_numeric_constant(self, value): x = value def part(p, imaginary): # Represent infinity as 1e1000 and NaN as 1e1000-1e1000. s = 'j' if imaginary else '' try: if math.isinf(p): if p < 0: return '-1e1000' + s return '1e1000' + s if math.isnan(p): return '(1e1000%s-1e1000%s)' % (s, s) except OverflowError: # math.isinf will raise this when given an integer # that's too large to convert to a float. pass return repr(p) + s real = part(x.real if isinstance(x, complex) else x, imaginary=False) if isinstance(x, complex): imag = part(x.imag, imaginary=True) if x.real == 0: s = imag elif x.imag == 0: s = '(%s+0j)' % real else: # x has nonzero real and imaginary parts. s = '(%s%s%s)' % (real, ['+', ''][imag.startswith('-')], imag) else: s = real self.write(s) def visit_Num(self, node, # constants new=sys.version_info >= (3, 0)): with self.delimit(node) as delimiters: self._handle_numeric_constant(node.n) # We can leave the delimiters handling in visit_Num # since this is meant to handle a Python 2.x specific # issue and ast.Constant exists only in 3.6+ # The Python 2.x compiler merges a unary minus # with a number. This is a premature optimization # that we deal with here... if not new and delimiters.discard: if not isinstance(node.n, complex) and node.n < 0: pow_lhs = Precedence.Pow + 1 delimiters.discard = delimiters.pp != pow_lhs else: op = self.get__p_op(node) delimiters.discard = not isinstance(op, ast.USub) def visit_Tuple(self, node): with self.delimit(node) as delimiters: # Two things are special about tuples: # 1) We cannot discard the enclosing parentheses if empty # 2) We need the trailing comma if only one item elts = node.elts delimiters.discard = delimiters.discard and elts self.comma_list(elts, len(elts) == 1) def visit_List(self, node): with self.delimit('[]'): self.comma_list(node.elts) def visit_Set(self, node): if node.elts: with self.delimit('{}'): self.comma_list(node.elts) else: # If we tried to use "{}" to represent an empty set, it would be # interpreted as an empty dictionary. We can't use "set()" either # because the name "set" might be rebound. self.write('{1}.__class__()') def visit_Dict(self, node): set_precedence(Precedence.Comma, *node.values) with self.delimit('{}'): for idx, (key, value) in enumerate(zip(node.keys, node.values)): self.write(', ' if idx else '', key if key else '', ': ' if key else '**', value) def visit_BinOp(self, node): op, left, right = node.op, node.left, node.right with self.delimit(node, op) as delimiters: ispow = isinstance(op, ast.Pow) p = delimiters.p set_precedence((Precedence.Pow + 1) if ispow else p, left) set_precedence(Precedence.PowRHS if ispow else (p + 1), right) self.write(left, get_op_symbol(op, ' %s '), right) def visit_BoolOp(self, node): with self.delimit(node, node.op) as delimiters: op = get_op_symbol(node.op, ' %s ') set_precedence(delimiters.p + 1, *node.values) for idx, value in enumerate(node.values): self.write(idx and op or '', value) def visit_Compare(self, node): with self.delimit(node, node.ops[0]) as delimiters: set_precedence(delimiters.p + 1, node.left, *node.comparators) self.visit(node.left) for op, right in zip(node.ops, node.comparators): self.write(get_op_symbol(op, ' %s '), right) # assignment expressions; new for Python 3.8 def visit_NamedExpr(self, node): with self.delimit(node) as delimiters: p = delimiters.p set_precedence(p, node.target) set_precedence(p + 1, node.value) # Python is picky about delimiters for assignment # expressions: it requires at least one pair in any # statement that uses an assignment expression, even # when not necessary according to the precedence # rules. We address this with the kludge of forcing a # pair of parentheses around every assignment # expression. delimiters.discard = False self.write(node.target, ' := ', node.value) def visit_UnaryOp(self, node): with self.delimit(node, node.op) as delimiters: set_precedence(delimiters.p, node.operand) # In Python 2.x, a unary negative of a literal # number is merged into the number itself. This # bit of ugliness means it is useful to know # what the parent operation was... node.operand._p_op = node.op sym = get_op_symbol(node.op) self.write(sym, ' ' if sym.isalpha() else '', node.operand) def visit_Subscript(self, node): set_precedence(node, node.slice) self.write(node.value, '[', node.slice, ']') def visit_Slice(self, node): set_precedence(node, node.lower, node.upper, node.step) self.conditional_write(node.lower) self.write(':') self.conditional_write(node.upper) if node.step is not None: self.write(':') if not (isinstance(node.step, ast.Name) and node.step.id == 'None'): self.visit(node.step) def visit_Index(self, node): with self.delimit(node) as delimiters: set_precedence(delimiters.p, node.value) self.visit(node.value) def visit_ExtSlice(self, node): dims = node.dims set_precedence(node, *dims) self.comma_list(dims, len(dims) == 1) def visit_Yield(self, node): with self.delimit(node): set_precedence(get_op_precedence(node) + 1, node.value) self.write('yield') self.conditional_write(' ', node.value) # new for Python 3.3 def visit_YieldFrom(self, node): with self.delimit(node): self.write('yield from ', node.value) # new for Python 3.5 def visit_Await(self, node): with self.delimit(node): self.write('await ', node.value) def visit_Lambda(self, node): with self.delimit(node) as delimiters: set_precedence(delimiters.p, node.body) self.write('lambda ') self.visit_arguments(node.args) self.write(': ', node.body) def visit_Ellipsis(self, node): self.write('...') def visit_ListComp(self, node): with self.delimit('[]'): self.write(node.elt, *node.generators) def visit_GeneratorExp(self, node): with self.delimit(node) as delimiters: if delimiters.pp == Precedence.call_one_arg: delimiters.discard = True set_precedence(Precedence.Comma, node.elt) self.write(node.elt, *node.generators) def visit_SetComp(self, node): with self.delimit('{}'): self.write(node.elt, *node.generators) def visit_DictComp(self, node): with self.delimit('{}'): self.write(node.key, ': ', node.value, *node.generators) def visit_IfExp(self, node): with self.delimit(node) as delimiters: set_precedence(delimiters.p + 1, node.body, node.test) set_precedence(delimiters.p, node.orelse) self.write(node.body, ' if ', node.test, ' else ', node.orelse) def visit_Starred(self, node): self.write('*', node.value) def visit_Repr(self, node): # XXX: python 2.6 only with self.delimit('``'): self.visit(node.value) def visit_Module(self, node): self.write(*node.body) visit_Interactive = visit_Module def visit_Expression(self, node): self.visit(node.body) # Helper Nodes def visit_arg(self, node): self.write(node.arg) self.conditional_write(': ', node.annotation) def visit_alias(self, node): self.write(node.name) self.conditional_write(' as ', node.asname) def visit_comprehension(self, node): set_precedence(node, node.iter, *node.ifs) set_precedence(Precedence.comprehension_target, node.target) stmt = ' async for ' if self.get_is_async(node) else ' for ' self.write(stmt, node.target, ' in ', node.iter) for if_ in node.ifs: self.write(' if ', if_)
bsd-3-clause
pashinin/fabric
fabric/contrib/console.py
67
1175
""" Console/terminal user interface functionality. """ from fabric.api import prompt def confirm(question, default=True): """ Ask user a yes/no question and return their response as True or False. ``question`` should be a simple, grammatically complete question such as "Do you wish to continue?", and will have a string similar to " [Y/n] " appended automatically. This function will *not* append a question mark for you. By default, when the user presses Enter without typing anything, "yes" is assumed. This can be changed by specifying ``default=False``. """ # Set up suffix if default: suffix = "Y/n" else: suffix = "y/N" # Loop till we get something we like while True: response = prompt("%s [%s] " % (question, suffix)).lower() # Default if not response: return default # Yes if response in ['y', 'yes']: return True # No if response in ['n', 'no']: return False # Didn't get empty, yes or no, so complain and loop print("I didn't understand you. Please specify '(y)es' or '(n)o'.")
bsd-2-clause
seims/SEIMS
preprocess/import_parameters.py
2
8592
#! /usr/bin/env python # coding=utf-8 # @Import model calibrateion parameters # Author: Junzhi Liu # Revised: Liang-Jun Zhu # import sqlite3 from struct import pack from gridfs import * from pymongo import MongoClient from pymongo.errors import ConnectionFailure from config import * from gen_subbasins import ImportSubbasinStatistics from util import * def ImportParameters(sqlite_file, db): # delete if existed, create if not existed cList = db.collection_names() if not StringInList(DB_TAB_PARAMETERS.upper(), cList): db.create_collection(DB_TAB_PARAMETERS.upper()) else: db.drop_collection(DB_TAB_PARAMETERS.upper()) # read sqlite database conn = sqlite3.connect(sqlite_file) c = conn.cursor() # get all the tablename c.execute("select name from sqlite_master where type='table' order by name;") tablelist = c.fetchall() # Find parameter table list excluding "XXLookup" tablelist = [item[0].encode("ascii") for item in tablelist if ( item[0].lower().find("lookup") < 0)] # print tablelist field_list = [PARAM_FLD_NAME.upper(), PARAM_FLD_DESC.upper(), PARAM_FLD_UNIT.upper(), PARAM_FLD_MODS.upper(), PARAM_FLD_VALUE.upper(), PARAM_FLD_IMPACT.upper(), PARAM_FLD_CHANGE.upper(), PARAM_FLD_MAX.upper(), PARAM_FLD_MIN.upper(), PARAM_FLD_USE.upper()] for tablename in tablelist: # print tablename str_sql = "select * from %s;" % (tablename,) cur = c.execute(str_sql) records = cur.fetchall() for items in records: dic = {} dic[Tag_DT_Type.upper()] = tablename for i in range(len(items)): if (type(items[i]) == type('a') or type(items[i]) == type(u'a')): dic[field_list[i].upper()] = items[i].encode('ascii') else: dic[field_list[i].upper()] = items[i] curfilter = {PARAM_FLD_NAME.upper(): dic[ PARAM_FLD_NAME.upper()], Tag_DT_Type.upper(): tablename} db[DB_TAB_PARAMETERS.upper()].find_one_and_replace( curfilter, dic, upsert=True) db[DB_TAB_PARAMETERS.upper()].create_index(PARAM_FLD_NAME.upper()) c.close() conn.close() print 'Model parameter tables are imported.' def ImportLookupTables(sqlite_file, db): ''' :param sqlite_file: SQLite database file contains lookup tables :param db: MongoDB Client :return: None ''' # read sqlite database conn = sqlite3.connect(sqlite_file) c = conn.cursor() # get all the tablename c.execute("select name from sqlite_master where type='table' order by name;") tablelist = c.fetchall() # Find parameter table list excluding "XXLookup" tablelist = [item[0].encode("ascii") for item in tablelist if ( item[0].lower().find("lookup") >= 0)] # print tablelist for tablename in tablelist: # print tablename str_sql = "select * from %s;" % (tablename,) cur = c.execute(str_sql) records = cur.fetchall() itemValues = [] for items in records: itemValue = [] for i in range(len(items)): if isNumericValue(items[i]): itemValue.append(float(items[i])) itemValues.append(itemValue) nRow = len(itemValues) # print itemValues if (nRow >= 1): nCol = len(itemValues[0]) for i in range(nRow): if (nCol != len(itemValues[i])): raise ValueError( "Please check %s to make sure each item has the same numeric dimension." % tablename) else: itemValues[i].insert(0, nCol) # import to mongoDB as GridFS spatial = GridFS(db, DB_TAB_SPATIAL.upper()) # delete if the tablename file existed already. if (spatial.exists(filename=tablename.upper())): x = spatial.get_version(filename=tablename.upper()) spatial.delete(x._id) metadic = {META_LOOKUP_ITEM_COUNT.upper(): nRow, META_LOOKUP_FIELD_COUNT.upper(): nCol} curLookupGridFS = spatial.new_file(filename=tablename.upper(), metadata=metadic) header = [nRow] fmt = '%df' % (1) s = pack(fmt, *header) curLookupGridFS.write(s) fmt = '%df' % (nCol + 1) for i in range(nRow): s = pack(fmt, *itemValues[i]) curLookupGridFS.write(s) curLookupGridFS.close() c.close() conn.close() print 'Lookup tables are imported.' def ImportModelConfiguration(db): ''' Import Configuration information of SEIMS, i.e., file.in and file.out :return: ''' fileIn = MODEL_DIR + os.sep + FILE_IN fileOut = MODEL_DIR + os.sep + FILE_OUT # create if collection not existed cList = db.collection_names() conf_tabs = [DB_TAB_FILE_IN.upper(), DB_TAB_FILE_OUT.upper()] for item in conf_tabs: if not StringInList(item, cList): db.create_collection(item) else: db.drop_collection(item) fileInItems = ReadDataItemsFromTxt(fileIn) fileOutItems = ReadDataItemsFromTxt(fileOut) for item in fileInItems: fileInDict = {} values = SplitStr(StripStr(item[0]), ['|']) if len(values) != 2: raise ValueError("One item should only have one Tag and one value string, split by '|'") fileInDict[FLD_CONF_TAG] = values[0] fileInDict[FLD_CONF_VALUE] = values[1] db[DB_TAB_FILE_IN.upper()].find_one_and_replace( fileInDict, fileInDict, upsert=True) outFieldArray = fileOutItems[0] outDataArray = fileOutItems[1:] # print outDataArray for item in outDataArray: fileOutDict = {} for i in range(len(outFieldArray)): if StringMatch(FLD_CONF_MODCLS, outFieldArray[i]): fileOutDict[FLD_CONF_MODCLS] = item[i] elif StringMatch(FLD_CONF_OUTPUTID, outFieldArray[i]): fileOutDict[FLD_CONF_OUTPUTID] = item[i] elif StringMatch(FLD_CONF_DESC, outFieldArray[i]): fileOutDict[FLD_CONF_DESC] = item[i] elif StringMatch(FLD_CONF_UNIT, outFieldArray[i]): fileOutDict[FLD_CONF_UNIT] = item[i] elif StringMatch(FLD_CONF_TYPE, outFieldArray[i]): fileOutDict[FLD_CONF_TYPE] = item[i] elif StringMatch(FLD_CONF_STIME, outFieldArray[i]): fileOutDict[FLD_CONF_STIME] = item[i] elif StringMatch(FLD_CONF_ETIME, outFieldArray[i]): fileOutDict[FLD_CONF_ETIME] = item[i] elif StringMatch(FLD_CONF_INTERVAL, outFieldArray[i]): fileOutDict[FLD_CONF_INTERVAL] = item[i] elif StringMatch(FLD_CONF_INTERVALUNIT, outFieldArray[i]): fileOutDict[FLD_CONF_INTERVALUNIT] = item[i] elif StringMatch(FLD_CONF_FILENAME, outFieldArray[i]): fileOutDict[FLD_CONF_FILENAME] = item[i] elif StringMatch(FLD_CONF_USE, outFieldArray[i]): fileOutDict[FLD_CONF_USE] = item[i] elif StringMatch(FLD_CONF_SUBBSN, outFieldArray[i]): fileOutDict[FLD_CONF_SUBBSN] = item[i] if fileOutDict.keys() is []: raise ValueError( "There are not any valid output item stored in file.out!") curFileter = {FLD_CONF_MODCLS: fileOutDict[FLD_CONF_MODCLS], FLD_CONF_OUTPUTID: fileOutDict[FLD_CONF_OUTPUTID], FLD_CONF_STIME: fileOutDict[FLD_CONF_STIME], FLD_CONF_ETIME: fileOutDict[FLD_CONF_ETIME]} db[DB_TAB_FILE_OUT].find_one_and_replace( curFileter, fileOutDict, upsert=True) print 'Model configuration tables are imported.' if __name__ == "__main__": # Load Configuration file LoadConfiguration(GetINIfile()) import sys try: conn = MongoClient(HOSTNAME, PORT) except ConnectionFailure, e: sys.stderr.write("Could not connect to MongoDB: %s" % e) sys.exit(1) db = conn[SpatialDBName] from txt2db3 import reConstructSQLiteDB reConstructSQLiteDB() ImportParameters(TXT_DB_DIR + os.sep + sqliteFile, db) # IMPORT LOOKUP TABLES AS GRIDFS, DT_Array2D ImportLookupTables(TXT_DB_DIR + os.sep + sqliteFile, db) ImportModelConfiguration(db) ImportSubbasinStatistics()
gpl-2.0
ShashaQin/erpnext
erpnext/controllers/tests/test_recurring_document.py
31
4665
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors # License: GNU General Public License v3. See license.txt from __future__ import unicode_literals import frappe import frappe.permissions from erpnext.controllers.recurring_document import date_field_map from frappe.utils import get_first_day, get_last_day, add_to_date, nowdate, getdate, add_days def test_recurring_document(obj, test_records): frappe.db.set_value("Print Settings", "Print Settings", "send_print_as_pdf", 1) today = nowdate() base_doc = frappe.copy_doc(test_records[0]) base_doc.update({ "is_recurring": 1, "submit_on_create": 1, "recurring_type": "Monthly", "notification_email_address": "test@example.com, test1@example.com, test2@example.com", "repeat_on_day_of_month": getdate(today).day, "due_date": None, "from_date": get_first_day(today), "to_date": get_last_day(today) }) date_field = date_field_map[base_doc.doctype] base_doc.set(date_field, today) if base_doc.doctype == "Sales Order": base_doc.set("delivery_date", add_days(today, 15)) # monthly doc1 = frappe.copy_doc(base_doc) doc1.insert() doc1.submit() _test_recurring_document(obj, doc1, date_field, True) # monthly without a first and last day period if getdate(today).day != 1: doc2 = frappe.copy_doc(base_doc) doc2.update({ "from_date": today, "to_date": add_to_date(today, days=30) }) doc2.insert() doc2.submit() _test_recurring_document(obj, doc2, date_field, False) # quarterly doc3 = frappe.copy_doc(base_doc) doc3.update({ "recurring_type": "Quarterly", "from_date": get_first_day(today), "to_date": get_last_day(add_to_date(today, months=3)) }) doc3.insert() doc3.submit() _test_recurring_document(obj, doc3, date_field, True) # quarterly without a first and last day period doc4 = frappe.copy_doc(base_doc) doc4.update({ "recurring_type": "Quarterly", "from_date": today, "to_date": add_to_date(today, months=3) }) doc4.insert() doc4.submit() _test_recurring_document(obj, doc4, date_field, False) # yearly doc5 = frappe.copy_doc(base_doc) doc5.update({ "recurring_type": "Yearly", "from_date": get_first_day(today), "to_date": get_last_day(add_to_date(today, years=1)) }) doc5.insert() doc5.submit() _test_recurring_document(obj, doc5, date_field, True) # yearly without a first and last day period doc6 = frappe.copy_doc(base_doc) doc6.update({ "recurring_type": "Yearly", "from_date": today, "to_date": add_to_date(today, years=1) }) doc6.insert() doc6.submit() _test_recurring_document(obj, doc6, date_field, False) # change date field but keep recurring day to be today doc7 = frappe.copy_doc(base_doc) doc7.update({ date_field: today, }) doc7.insert() doc7.submit() # setting so that _test function works # doc7.set(date_field, today) _test_recurring_document(obj, doc7, date_field, True) def _test_recurring_document(obj, base_doc, date_field, first_and_last_day): from frappe.utils import add_months, get_last_day from erpnext.controllers.recurring_document import manage_recurring_documents, \ get_next_date no_of_months = ({"Monthly": 1, "Quarterly": 3, "Yearly": 12})[base_doc.recurring_type] def _test(i): obj.assertEquals(i+1, frappe.db.sql("""select count(*) from `tab%s` where recurring_id=%s and (docstatus=1 or docstatus=0)""" % (base_doc.doctype, '%s'), (base_doc.recurring_id))[0][0]) next_date = get_next_date(base_doc.get(date_field), no_of_months, base_doc.repeat_on_day_of_month) manage_recurring_documents(base_doc.doctype, next_date=next_date, commit=False) recurred_documents = frappe.db.sql("""select name from `tab%s` where recurring_id=%s and (docstatus=1 or docstatus=0) order by name desc""" % (base_doc.doctype, '%s'), (base_doc.recurring_id)) obj.assertEquals(i+2, len(recurred_documents)) new_doc = frappe.get_doc(base_doc.doctype, recurred_documents[0][0]) for fieldname in ["is_recurring", "recurring_type", "repeat_on_day_of_month", "notification_email_address"]: obj.assertEquals(base_doc.get(fieldname), new_doc.get(fieldname)) obj.assertEquals(new_doc.get(date_field), getdate(next_date)) obj.assertEquals(new_doc.from_date, getdate(add_months(base_doc.from_date, no_of_months))) if first_and_last_day: obj.assertEquals(new_doc.to_date, getdate(get_last_day(add_months(base_doc.to_date, no_of_months)))) else: obj.assertEquals(new_doc.to_date, getdate(add_months(base_doc.to_date, no_of_months))) return new_doc # if yearly, test 1 repetition, else test 5 repetitions count = 1 if (no_of_months == 12) else 5 for i in xrange(count): base_doc = _test(i)
agpl-3.0
att-comdev/armada
armada/tests/unit/utils/test_source.py
1
9206
# Copyright 2017 The Armada Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os import socket import shutil import fixtures import mock import testtools from armada.exceptions import source_exceptions from armada.tests.unit import base from armada.tests import test_utils from armada.utils import source def is_connected(): """Verifies whether network connectivity is up. :returns: True if connected else False. """ try: host = socket.gethostbyname("www.github.com") socket.create_connection((host, 80), 2) return True except (socket.error, socket.herror, socket.timeout): pass return False class GitTestCase(base.ArmadaTestCase): def _validate_git_clone(self, repo_dir, expected_ref=None): self.assertTrue(os.path.isdir(repo_dir)) self.addCleanup(shutil.rmtree, repo_dir) self.assertIn('armada', repo_dir) # Assert that the directory is a Git repo. self.assertTrue(os.path.isdir(os.path.join(repo_dir, '.git'))) if expected_ref: # Assert the FETCH_HEAD is at the expected ref. with open(os.path.join(repo_dir, '.git', 'FETCH_HEAD'), 'r') \ as git_file: self.assertIn(expected_ref, git_file.read()) @testtools.skipUnless( is_connected(), 'git clone requires network connectivity.') def test_git_clone_good_url(self): url = 'http://github.com/att-comdev/armada' git_dir = source.git_clone(url) self._validate_git_clone(git_dir) @testtools.skipUnless( is_connected(), 'git clone requires network connectivity.') def test_git_clone_commit(self): url = 'http://github.com/att-comdev/armada' commit = 'cba78d1d03e4910f6ab1691bae633c5bddce893d' git_dir = source.git_clone(url, commit) self._validate_git_clone(git_dir) @testtools.skipUnless( is_connected(), 'git clone requires network connectivity.') def test_git_clone_ref(self): ref = 'refs/changes/54/457754/73' git_dir = source.git_clone( 'https://github.com/openstack/openstack-helm', ref) self._validate_git_clone(git_dir, ref) @test_utils.attr(type=['negative']) @testtools.skipUnless( is_connected(), 'git clone requires network connectivity.') def test_git_clone_empty_url(self): url = '' # error_re = '%s is not a valid git repository.' % url self.assertRaises(source_exceptions.GitException, source.git_clone, url) @test_utils.attr(type=['negative']) @testtools.skipUnless( is_connected(), 'git clone requires network connectivity.') def test_git_clone_bad_url(self): url = 'http://github.com/dummy/armada' self.assertRaises(source_exceptions.GitException, source.git_clone, url) # TODO need to design a positive proxy test, # difficult to achieve behind a corporate proxy @test_utils.attr(type=['negative']) @testtools.skipUnless( is_connected(), 'git clone requires network connectivity.') def test_git_clone_fake_proxy(self): url = 'http://github.com/att-comdev/armada' proxy_url = test_utils.rand_name( 'not.a.proxy.that.works.and.never.will', prefix='http://') + ":8080" self.assertRaises( source_exceptions.GitProxyException, source.git_clone, url, proxy_server=proxy_url) @mock.patch('armada.utils.source.tempfile') @mock.patch('armada.utils.source.requests') def test_tarball_download(self, mock_requests, mock_temp): url = 'http://localhost:8879/charts/mariadb-0.1.0.tgz' mock_temp.mkstemp.return_value = (None, '/tmp/armada') mock_response = mock.Mock() mock_response.content = 'some string' mock_requests.get.return_value = mock_response mock_open = mock.mock_open() with mock.patch.object(source, 'open', mock_open, create=True): source.download_tarball(url) mock_temp.mkstemp.assert_called_once() mock_requests.get.assert_called_once_with(url, verify=False) mock_open.assert_called_once_with('/tmp/armada', 'wb') mock_open().write.assert_called_once_with( mock_requests.get(url).content) @mock.patch('armada.utils.source.tempfile') @mock.patch('armada.utils.source.os.path') @mock.patch('armada.utils.source.tarfile') def test_tarball_extract(self, mock_tarfile, mock_path, mock_temp): mock_path.exists.return_value = True mock_temp.mkdtemp.return_value = '/tmp/armada' mock_opened_file = mock.Mock() mock_tarfile.open.return_value = mock_opened_file path = '/tmp/mariadb-0.1.0.tgz' source.extract_tarball(path) mock_path.exists.assert_called_once() mock_temp.mkdtemp.assert_called_once() mock_tarfile.open.assert_called_once_with(path) mock_opened_file.extractall.assert_called_once_with('/tmp/armada') @test_utils.attr(type=['negative']) @mock.patch('armada.utils.source.os.path') @mock.patch('armada.utils.source.tarfile') def test_tarball_extract_bad_path(self, mock_tarfile, mock_path): mock_path.exists.return_value = False path = '/tmp/armada' self.assertRaises(source_exceptions.InvalidPathException, source.extract_tarball, path) mock_tarfile.open.assert_not_called() mock_tarfile.extractall.assert_not_called() @testtools.skipUnless( is_connected(), 'git clone requires network connectivity.') @mock.patch.object(source, 'LOG') def test_source_cleanup(self, mock_log): url = 'http://github.com/att-comdev/armada' git_path = source.git_clone(url) source.source_cleanup(git_path) mock_log.warning.assert_not_called() @test_utils.attr(type=['negative']) @mock.patch.object(source, 'LOG') @mock.patch('armada.utils.source.shutil') def test_source_cleanup_fake_git_path(self, mock_shutil, mock_log): # Verify that passing in a fake path does nothing but log a warning. # Don't want anyone using the function to delete random directories. fake_path = self.useFixture(fixtures.TempDir()).path self.addCleanup(shutil.rmtree, fake_path) source.source_cleanup(fake_path) mock_shutil.rmtree.assert_not_called() self.assertTrue(mock_log.warning.called) actual_call = mock_log.warning.mock_calls[0][1][:2] self.assertEqual( ('%s is not a valid git repository. Details: %s', fake_path), actual_call) @test_utils.attr(type=['negative']) @mock.patch.object(source, 'LOG') @mock.patch('armada.utils.source.shutil') @mock.patch('armada.utils.source.os.path') def test_source_cleanup_missing_git_path(self, mock_path, mock_shutil, mock_log): # Verify that passing in a missing path does nothing but log a warning. mock_path.exists.return_value = False path = 'armada' source.source_cleanup(path) mock_shutil.rmtree.assert_not_called() self.assertTrue(mock_log.warning.called) actual_call = mock_log.warning.mock_calls[0][1] self.assertEqual( ('Could not delete the path %s. Is it a git repository?', path), actual_call) @testtools.skipUnless( is_connected(), 'git clone requires network connectivity.') @test_utils.attr(type=['negative']) @mock.patch.object(source, 'os') def test_git_clone_ssh_auth_method_fails_auth(self, mock_os): mock_os.path.exists.return_value = True fake_user = test_utils.rand_name('fake_user') url = ('ssh://%s@review.gerrithub.io:29418/att-comdev/armada' % fake_user) self.assertRaises( source_exceptions.GitAuthException, source.git_clone, url, ref='refs/changes/17/388517/5', auth_method='SSH') @testtools.skipUnless( is_connected(), 'git clone requires network connectivity.') @test_utils.attr(type=['negative']) @mock.patch.object(source, 'os') def test_git_clone_ssh_auth_method_missing_ssh_key(self, mock_os): mock_os.path.exists.return_value = False fake_user = test_utils.rand_name('fake_user') url = ('ssh://%s@review.gerrithub.io:29418/att-comdev/armada' % fake_user) self.assertRaises( source_exceptions.GitSSHException, source.git_clone, url, ref='refs/changes/17/388517/5', auth_method='SSH')
apache-2.0
Tony-Zhang03/kafka
system_test/utils/testcase_env.py
71
7198
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. #!/usr/bin/env python # =================================== # testcase_env.py # =================================== import json import os import sys import thread import system_test_utils class TestcaseEnv(): def __init__(self, systemTestEnv, classInstance): self.systemTestEnv = systemTestEnv # ================================ # Generic testcase environment # ================================ # dictionary of entity_id to ppid for Zookeeper entities # key: entity_id # val: ppid of Zookeeper associated to that entity_id # { 0: 12345, 1: 12389, ... } self.entityZkParentPidDict = {} # dictionary of entity_id to ppid for broker entities # key: entity_id # val: ppid of broker associated to that entity_id # { 0: 12345, 1: 12389, ... } self.entityBrokerParentPidDict = {} # dictionary of entity_id to ppid for mirror-maker entities # key: entity_id # val: ppid of broker associated to that entity_id # { 0: 12345, 1: 12389, ... } self.entityMirrorMakerParentPidDict = {} # dictionary of entity_id to ppid for console-consumer entities # key: entity_id # val: ppid of console consumer associated to that entity_id # { 0: 12345, 1: 12389, ... } self.entityConsoleConsumerParentPidDict = {} # dictionary of entity_id to ppid for migration tool entities # key: entity_id # val: ppid of broker associated to that entity_id # { 0: 12345, 1: 12389, ... } self.entityMigrationToolParentPidDict = {} # dictionary of entity_id to list of JMX ppid # key: entity_id # val: list of JMX ppid associated to that entity_id # { 1: [1234, 1235, 1236], 2: [2234, 2235, 2236], ... } self.entityJmxParentPidDict = {} # dictionary of hostname-topic-ppid for consumer # key: hostname # val: dict of topic-ppid # { host1: { test1 : 12345 }, host1: { test2 : 12389 }, ... } self.consumerHostParentPidDict = {} # dictionary of hostname-topic-ppid for producer # key: hostname # val: dict of topic-ppid # { host1: { test1 : 12345 }, host1: { test2 : 12389 }, ... } self.producerHostParentPidDict = {} # list of testcase configs self.testcaseConfigsList = [] # dictionary to keep track of testcase arguments such as replica_factor, num_partition self.testcaseArgumentsDict = {} # gather the test case related info and add to an SystemTestEnv object self.testcaseResultsDict = {} self.testcaseResultsDict["_test_class_name"] = classInstance.__class__.__name__ self.testcaseResultsDict["_test_case_name"] = "" self.validationStatusDict = {} self.testcaseResultsDict["validation_status"] = self.validationStatusDict self.systemTestEnv.systemTestResultsList.append(self.testcaseResultsDict) # FIXME: in a distributed environement, kafkaBaseDir could be different in individual host # => TBD self.kafkaBaseDir = "" self.systemTestBaseDir = systemTestEnv.SYSTEM_TEST_BASE_DIR # to be initialized in the Test Module self.testSuiteBaseDir = "" self.testCaseBaseDir = "" self.testCaseLogsDir = "" self.testCaseDashboardsDir = "" self.testcasePropJsonPathName = "" self.testcaseNonEntityDataDict = {} # ================================ # dictionary to keep track of # user-defined environment variables # ================================ # LEADER_ELECTION_COMPLETED_MSG = "completed the leader state transition" # REGX_LEADER_ELECTION_PATTERN = "\[(.*?)\] .* Broker (.*?) " + \ # LEADER_ELECTION_COMPLETED_MSG + \ # " for topic (.*?) partition (.*?) \(.*" # zkConnectStr = "" # consumerLogPathName = "" # consumerConfigPathName = "" # producerLogPathName = "" # producerConfigPathName = "" self.userDefinedEnvVarDict = {} # Lock object for producer threads synchronization self.lock = thread.allocate_lock() self.numProducerThreadsRunning = 0 # to be used when validating data match - these variables will be # updated by kafka_system_test_utils.start_producer_in_thread self.producerTopicsString = "" self.consumerTopicsString = "" def initWithKnownTestCasePathName(self, testCasePathName): testcaseDirName = os.path.basename(testCasePathName) self.testcaseResultsDict["_test_case_name"] = testcaseDirName self.testCaseBaseDir = testCasePathName self.testCaseLogsDir = self.testCaseBaseDir + "/logs" self.testCaseDashboardsDir = self.testCaseBaseDir + "/dashboards" # find testcase properties json file self.testcasePropJsonPathName = system_test_utils.get_testcase_prop_json_pathname(testCasePathName) # get the dictionary that contains the testcase arguments and description self.testcaseNonEntityDataDict = system_test_utils.get_json_dict_data(self.testcasePropJsonPathName) def printTestCaseDescription(self, testcaseDirName): testcaseDescription = "" for k,v in self.testcaseNonEntityDataDict.items(): if ( k == "description" ): testcaseDescription = v print "\n" print "=======================================================================================" print "Test Case Name :", testcaseDirName print "=======================================================================================" print "Description :" for step in sorted(testcaseDescription.iterkeys()): print " ", step, ":", testcaseDescription[step] print "=======================================================================================" print "Test Case Args :" for k,v in self.testcaseArgumentsDict.items(): print " ", k, " : ", v self.testcaseResultsDict["arg : " + k] = v print "======================================================================================="
apache-2.0
LiamBao/Spiders
LivePlatformIndex/douyuOnlineCount.py
1
5918
# -*- coding: utf-8 -*- ##__author__ =='liam' # python3.52 import re,time,random import requests import tkinter as tk from tkinter import filedialog import xlsxwriter as wx from lxml import etree USER_AGENTS = [ "Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; AcooBrowser; .NET CLR 1.1.4322; .NET CLR 2.0.50727)", "Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 6.0; Acoo Browser; SLCC1; .NET CLR 2.0.50727; Media Center PC 5.0; .NET CLR 3.0.04506)", "Mozilla/4.0 (compatible; MSIE 7.0; AOL 9.5; AOLBuild 4337.35; Windows NT 5.1; .NET CLR 1.1.4322; .NET CLR 2.0.50727)", "Mozilla/5.0 (Windows; U; MSIE 9.0; Windows NT 9.0; en-US)", "Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; Win64; x64; Trident/5.0; .NET CLR 3.5.30729; .NET CLR 3.0.30729; .NET CLR 2.0.50727; Media Center PC 6.0)", "Mozilla/5.0 (compatible; MSIE 8.0; Windows NT 6.0; Trident/4.0; WOW64; Trident/4.0; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.5.30729; .NET CLR 3.0.30729; .NET CLR 1.0.3705; .NET CLR 1.1.4322)", "Mozilla/4.0 (compatible; MSIE 7.0b; Windows NT 5.2; .NET CLR 1.1.4322; .NET CLR 2.0.50727; InfoPath.2; .NET CLR 3.0.04506.30)", "Mozilla/5.0 (Windows; U; Windows NT 5.1; zh-CN) AppleWebKit/523.15 (KHTML, like Gecko, Safari/419.3) Arora/0.3 (Change: 287 c9dfb30)", "Mozilla/5.0 (X11; U; Linux; en-US) AppleWebKit/527+ (KHTML, like Gecko, Safari/419.3) Arora/0.6", "Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US; rv:1.8.1.2pre) Gecko/20070215 K-Ninja/2.1.1", "Mozilla/5.0 (Windows; U; Windows NT 5.1; zh-CN; rv:1.9) Gecko/20080705 Firefox/3.0 Kapiko/3.0", "Mozilla/5.0 (X11; Linux i686; U;) Gecko/20070322 Kazehakase/0.4.5", "Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.9.0.8) Gecko Fedora/1.9.0.8-1.fc10 Kazehakase/0.5.6", "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/535.11 (KHTML, like Gecko) Chrome/17.0.963.56 Safari/535.11", "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_7_3) AppleWebKit/535.20 (KHTML, like Gecko) Chrome/19.0.1036.7 Safari/535.20", "Opera/9.80 (Macintosh; Intel Mac OS X 10.6.8; U; fr) Presto/2.9.168 Version/11.52", ] Headers = { # "Accept": "*/*", "Accept-Encoding": "gzip, deflate, br", "Accept-Language": "en-US,en;q=0.8,zh-CN;q=0.6,zh;q=0.4,zh-TW;q=0.2", "Connection": "keep-alive", "Content-Type": "application/x-www-form-urlencoded; charset=UTF-8", "User-Agent": random.choice(USER_AGENTS) } def getExcel(data): try: title = ['DateTime', 'roomId', 'userName', 'onlineNum', 'fansNum','cateName', 'roomName','url'] file_name = 'Output_Douyu'+ str((time.time() * 1000))[10:] workbook = wx.Workbook(file_name + '.xlsx') worksheet = workbook.add_worksheet('info') for i in range(len(data)): for j in range(len(title)): if i == 0: worksheet.write(i, j, title[j]) worksheet.write(i + 1, j, data[i][j]) workbook.close() print('\n File ' + file_name + ' Done!') except Exception as err: print(err) def douyu_get_live_status(url): try: urlPage = requests.get(url, headers = Headers, timeout=20) if urlPage: urlPage = urlPage.text xml = etree.HTML(urlPage) fansNum = (xml.xpath('.//b[@id="J-subscriber-total"]')[0].xpath('string(.)').strip(),0)[len(xml.xpath('.//b[@id="J-subscriber-total"]'))==0] cateName = (str([cat.xpath('string(.)').strip() for cat in xml.xpath('.//div[@class="filter-label-list"]/ul/li/a')]).replace('\'全部标签\',','').replace(', \'直播回放合辑\'',''),'')[len(xml.xpath('.//div[@class="filter-label-list"]/ul/li/a'))==0] userName = (xml.xpath('.//strong[@class="name-text"]')[0].xpath('string(.)').strip(),'')[len(xml.xpath('.//strong[@class="name-text"]'))==0] allVideo = xml.xpath('.//div[@class = "list clearfix"]//a[@class="list-item"]') if allVideo: for video in allVideo: get_Each_Video(url,fansNum,cateName,userName,video) except Exception as err: print(err) def get_Each_Video(url,fansNum,cateName,userName,xml): global LiveData onlineNum = (xml.xpath('.//span[@class="item-amount fr"]/b')[0].xpath('string(.)').strip(),0)[len(xml.xpath('.//span[@class="item-amount fr"]'))==0] roomName = (xml.xpath('.//strong[@class="list-title"]')[0].xpath('string(.)').strip(),0)[len(xml.xpath('.//strong[@class="list-title"]'))==0] videoDate = (re.search('.*(\d{4}-\d{1,2}-\d{1,2}).*',roomName).group(1),'2017')[re.search('.*?(\d{4}-\d{1,2}-\d{1,2}).*',roomName)==None] roomId = (re.search('.*?author/(.*)',url).group(1),'')[re.search('.*?author/(.*)',url)==None] Info =[videoDate, roomId, userName, onlineNum,fansNum,cateName, roomName,url] LiveData.append(Info) def main(): print('*' * 40) print('## Python 3.52') print('## Author Liam') print('## Date 02/28/2017') print('## Douyu Index') print('*' * 40) print('\r\n请选择账户信息文件') dialog = tk.Tk() dialog.withdraw() filename = filedialog.askopenfilename() if filename is None or filename == '': sys.exit(0) # filename = './test.txt' print(filename) f = open(filename, 'rb') task_lines = [i for i in f.readlines()] f.close() global LiveData LiveData = [] count = 0 try: for line in task_lines: count += 1 line = str(line, encoding='utf-8') line = line.strip() if not line or not re.search('.*?douyu.*?',line): continue douyu_get_live_status(line) # waitTime = random.uniform(2, 4) # time.sleep(waitTime) getExcel(LiveData) except Exception as err: print(err) finally: print("Done") if __name__ == '__main__': main()
gpl-3.0
Chilledheart/chromium
tools/telemetry/third_party/gsutilz/third_party/boto/boto/contrib/ymlmessage.py
70
1880
# Copyright (c) 2006,2007 Chris Moyer # # Permission is hereby granted, free of charge, to any person obtaining a # copy of this software and associated documentation files (the # "Software"), to deal in the Software without restriction, including # without limitation the rights to use, copy, modify, merge, publish, dis- # tribute, sublicense, and/or sell copies of the Software, and to permit # persons to whom the Software is furnished to do so, subject to the fol- # lowing conditions: # # The above copyright notice and this permission notice shall be included # in all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS # OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- # ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT # SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, # WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS # IN THE SOFTWARE. """ This module was contributed by Chris Moyer. It provides a subclass of the SQS Message class that supports YAML as the body of the message. This module requires the yaml module. """ from boto.sqs.message import Message import yaml class YAMLMessage(Message): """ The YAMLMessage class provides a YAML compatible message. Encoding and decoding are handled automaticaly. Access this message data like such: m.data = [ 1, 2, 3] m.data[0] # Returns 1 This depends on the PyYAML package """ def __init__(self, queue=None, body='', xml_attrs=None): self.data = None super(YAMLMessage, self).__init__(queue, body) def set_body(self, body): self.data = yaml.load(body) def get_body(self): return yaml.dump(self.data)
bsd-3-clause
etos/django
tests/template_tests/filter_tests/test_rjust.py
521
1030
from django.template.defaultfilters import rjust from django.test import SimpleTestCase from django.utils.safestring import mark_safe from ..utils import setup class RjustTests(SimpleTestCase): @setup({'rjust01': '{% autoescape off %}.{{ a|rjust:"5" }}. .{{ b|rjust:"5" }}.{% endautoescape %}'}) def test_rjust01(self): output = self.engine.render_to_string('rjust01', {"a": "a&b", "b": mark_safe("a&b")}) self.assertEqual(output, ". a&b. . a&b.") @setup({'rjust02': '.{{ a|rjust:"5" }}. .{{ b|rjust:"5" }}.'}) def test_rjust02(self): output = self.engine.render_to_string('rjust02', {"a": "a&b", "b": mark_safe("a&b")}) self.assertEqual(output, ". a&amp;b. . a&b.") class FunctionTests(SimpleTestCase): def test_rjust(self): self.assertEqual(rjust('test', 10), ' test') def test_less_than_string_length(self): self.assertEqual(rjust('test', 3), 'test') def test_non_string_input(self): self.assertEqual(rjust(123, 4), ' 123')
bsd-3-clause
charbeljc/OCB
addons/hr_timesheet_sheet/report/hr_timesheet_report.py
194
4092
# -*- coding: utf-8 -*- ############################################################################## # # OpenERP, Open Source Management Solution # Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>). # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ############################################################################## from openerp import tools from openerp.osv import fields,osv class hr_timesheet_report(osv.osv): _inherit = "hr.timesheet.report" _columns = { 'to_invoice': fields.many2one('hr_timesheet_invoice.factor', 'Type of Invoicing',readonly=True), 'nbr': fields.integer('# Nbr Timesheet',readonly=True), 'total_diff': fields.float('# Total Diff',readonly=True), 'total_timesheet': fields.float('# Total Timesheet',readonly=True), 'total_attendance': fields.float('# Total Attendance',readonly=True), 'department_id':fields.many2one('hr.department','Department',readonly=True), 'date_from': fields.date('Date from',readonly=True,), 'date_to': fields.date('Date to',readonly=True), 'state' : fields.selection([ ('new', 'New'), ('draft','Draft'), ('confirm','Confirmed'), ('done','Done')], 'Status', readonly=True), } def _select(self): return """ WITH totals AS ( SELECT d.sheet_id, d.name as date, sum(total_difference) / coalesce(sum(j.count),1) as total_diff, sum(total_timesheet) / coalesce(sum(j.count),1) as total_timesheet, sum(total_attendance) / coalesce(sum(j.count),1) as total_attendance FROM hr_timesheet_sheet_sheet_day d left join ( SELECT h.sheet_id, a.date, count(*) FROM account_analytic_line a inner join hr_analytic_timesheet h ON (h.line_id=a.id) GROUP BY h.sheet_id, a.date ) j ON (d.sheet_id = j.sheet_id AND d.name = j.date) GROUP BY d.sheet_id, d.name ) """ + super(hr_timesheet_report, self)._select() + """, htss.name, htss.date_from, htss.date_to, count(*) as nbr, sum(t.total_diff) as total_diff, sum(t.total_timesheet) as total_timesheet, sum(t.total_attendance) as total_attendance, aal.to_invoice, htss.department_id, htss.state""" def _from(self): return super(hr_timesheet_report, self)._from() + "left join hr_timesheet_sheet_sheet as htss ON (hat.sheet_id=htss.id) left join totals as t on (t.sheet_id = hat.sheet_id and t.date = aal.date)" def _group_by(self): return super(hr_timesheet_report, self)._group_by() + """, htss.date_from, htss.date_to, aal.unit_amount, aal.amount, aal.to_invoice, htss.name, htss.state, htss.id, htss.department_id""" # vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
agpl-3.0
dgwakeman/mne-python
mne/io/tree.py
11
4708
# Authors: Alexandre Gramfort <alexandre.gramfort@telecom-paristech.fr> # Matti Hamalainen <msh@nmr.mgh.harvard.edu> # # License: BSD (3-clause) import struct import numpy as np from .constants import FIFF from .tag import Tag from .tag import read_tag from .write import write_id, start_block, end_block, _write from ..utils import logger, verbose def dir_tree_find(tree, kind): """Find nodes of the given kind from a directory tree structure Parameters ---------- tree : dict Directory tree. kind : int Kind to find. Returns ------- nodes : list List of matching nodes. """ nodes = [] if isinstance(tree, list): for t in tree: nodes += dir_tree_find(t, kind) else: # Am I desirable myself? if tree['block'] == kind: nodes.append(tree) # Search the subtrees for child in tree['children']: nodes += dir_tree_find(child, kind) return nodes @verbose def make_dir_tree(fid, directory, start=0, indent=0, verbose=None): """Create the directory tree structure """ FIFF_BLOCK_START = 104 FIFF_BLOCK_END = 105 FIFF_FILE_ID = 100 FIFF_BLOCK_ID = 103 FIFF_PARENT_BLOCK_ID = 110 if directory[start].kind == FIFF_BLOCK_START: tag = read_tag(fid, directory[start].pos) block = tag.data else: block = 0 logger.debug(' ' * indent + 'start { %d' % block) this = start tree = dict() tree['block'] = block tree['id'] = None tree['parent_id'] = None tree['nent'] = 0 tree['nchild'] = 0 tree['directory'] = directory[this] tree['children'] = [] while this < len(directory): if directory[this].kind == FIFF_BLOCK_START: if this != start: child, this = make_dir_tree(fid, directory, this, indent + 1) tree['nchild'] += 1 tree['children'].append(child) elif directory[this].kind == FIFF_BLOCK_END: tag = read_tag(fid, directory[start].pos) if tag.data == block: break else: tree['nent'] += 1 if tree['nent'] == 1: tree['directory'] = list() tree['directory'].append(directory[this]) # Add the id information if available if block == 0: if directory[this].kind == FIFF_FILE_ID: tag = read_tag(fid, directory[this].pos) tree['id'] = tag.data else: if directory[this].kind == FIFF_BLOCK_ID: tag = read_tag(fid, directory[this].pos) tree['id'] = tag.data elif directory[this].kind == FIFF_PARENT_BLOCK_ID: tag = read_tag(fid, directory[this].pos) tree['parent_id'] = tag.data this += 1 # Eliminate the empty directory if tree['nent'] == 0: tree['directory'] = None logger.debug(' ' * (indent + 1) + 'block = %d nent = %d nchild = %d' % (tree['block'], tree['nent'], tree['nchild'])) logger.debug(' ' * indent + 'end } %d' % block) last = this return tree, last ############################################################################### # Writing def copy_tree(fidin, in_id, nodes, fidout): """Copies directory subtrees from fidin to fidout""" if len(nodes) <= 0: return if not isinstance(nodes, list): nodes = [nodes] for node in nodes: start_block(fidout, node['block']) if node['id'] is not None: if in_id is not None: write_id(fidout, FIFF.FIFF_PARENT_FILE_ID, in_id) write_id(fidout, FIFF.FIFF_BLOCK_ID, in_id) write_id(fidout, FIFF.FIFF_PARENT_BLOCK_ID, node['id']) if node['directory'] is not None: for d in node['directory']: # Do not copy these tags if d.kind == FIFF.FIFF_BLOCK_ID or \ d.kind == FIFF.FIFF_PARENT_BLOCK_ID or \ d.kind == FIFF.FIFF_PARENT_FILE_ID: continue # Read and write tags, pass data through transparently fidin.seek(d.pos, 0) s = fidin.read(4 * 4) tag = Tag(*struct.unpack(">iIii", s)) tag.data = np.fromstring(fidin.read(tag.size), dtype='>B') _write(fidout, tag.data, tag.kind, 1, tag.type, '>B') for child in node['children']: copy_tree(fidin, in_id, child, fidout) end_block(fidout, node['block'])
bsd-3-clause
mrpau/kolibri
kolibri/core/content/serializers.py
1
12418
from django.db.models import Manager from django.db.models import Sum from django.db.models.query import RawQuerySet from le_utils.constants import content_kinds from rest_framework import serializers from kolibri.core.content.models import ChannelMetadata from kolibri.core.content.models import ContentNode from kolibri.core.content.models import File from kolibri.core.content.models import Language from kolibri.core.fields import create_timezonestamp class DynamicFieldsModelSerializer(serializers.ModelSerializer): def __init__(self, *args, **kwargs): # Instantiate the superclass normally super(DynamicFieldsModelSerializer, self).__init__(*args, **kwargs) # enable dynamic fields specification! if "request" in self.context and self.context["request"].GET.get( "fields", None ): fields = self.context["request"].GET["fields"].split(",") # Drop any fields that are not specified in the `fields` argument. allowed = set(fields) existing = set(self.fields.keys()) for field_name in existing - allowed: self.fields.pop(field_name) class ChannelMetadataSerializer(serializers.ModelSerializer): root = serializers.PrimaryKeyRelatedField(read_only=True) lang_code = serializers.SerializerMethodField() lang_name = serializers.SerializerMethodField() available = serializers.SerializerMethodField() num_coach_contents = serializers.IntegerField(source="root.num_coach_contents") def get_lang_code(self, instance): if instance.root.lang is None: return None return instance.root.lang.lang_code def get_lang_name(self, instance): if instance.root.lang is None: return None return instance.root.lang.lang_name def get_available(self, instance): return instance.root.available class Meta: model = ChannelMetadata fields = ( "author", "description", "tagline", "id", "last_updated", "lang_code", "lang_name", "name", "root", "thumbnail", "version", "available", "num_coach_contents", "public", ) class PublicChannelSerializer(serializers.ModelSerializer): included_languages = serializers.SerializerMethodField() matching_tokens = serializers.SerializerMethodField("match_tokens") language = serializers.SerializerMethodField() icon_encoding = serializers.SerializerMethodField() last_published = serializers.SerializerMethodField() def get_language(self, instance): if instance.root.lang is None: return None return instance.root.lang.lang_code def get_icon_encoding(self, instance): return instance.thumbnail def get_included_languages(self, instance): return list(instance.included_languages.all().values_list("id", flat=True)) def get_last_published(self, instance): return ( None if not instance.last_updated else create_timezonestamp(instance.last_updated) ) def match_tokens(self, channel): return [] class Meta: model = ChannelMetadata fields = ( "id", "name", "language", "included_languages", "description", "tagline", "total_resource_count", "version", "published_size", "last_published", "icon_encoding", "matching_tokens", "public", ) class LowerCaseField(serializers.CharField): def to_representation(self, obj): return super(LowerCaseField, self).to_representation(obj).lower() class LanguageSerializer(serializers.ModelSerializer): id = LowerCaseField(max_length=14) lang_code = LowerCaseField(max_length=3) lang_subcode = LowerCaseField(max_length=10) class Meta: model = Language fields = ("id", "lang_code", "lang_subcode", "lang_name", "lang_direction") class FileSerializer(serializers.ModelSerializer): checksum = serializers.CharField(source="local_file_id") storage_url = serializers.SerializerMethodField() download_url = serializers.SerializerMethodField() extension = serializers.SerializerMethodField() file_size = serializers.SerializerMethodField() lang = LanguageSerializer() available = serializers.BooleanField(source="local_file.available") def get_storage_url(self, target_node): return target_node.get_storage_url() def get_download_url(self, target_node): return target_node.get_download_url() def get_extension(self, target_node): return target_node.get_extension() def get_file_size(self, target_node): return target_node.get_file_size() class Meta: model = File fields = ( "storage_url", "id", "priority", "available", "file_size", "extension", "checksum", "preset", "lang", "supplementary", "thumbnail", "download_url", ) def get_summary_logs(content_ids, user): from kolibri.core.logger.models import ContentSummaryLog if not content_ids: return ContentSummaryLog.objects.none() # get all summary logs for the current user that correspond to the descendant content nodes return ContentSummaryLog.objects.filter(user=user, content_id__in=content_ids) def get_topic_progress_fraction(topic, user): leaf_ids = ( topic.get_descendants(include_self=False) .order_by() .exclude(kind=content_kinds.TOPIC) .values_list("content_id", flat=True) ) return round( ( get_summary_logs(leaf_ids, user).aggregate(Sum("progress"))["progress__sum"] or 0 ) / (len(leaf_ids) or 1), 4, ) def get_content_progress_fraction(content, user): from kolibri.core.logger.models import ContentSummaryLog try: # add up all the progress for the logs, and divide by the total number of content nodes to get overall progress overall_progress = ContentSummaryLog.objects.get( user=user, content_id=content.content_id ).progress except ContentSummaryLog.DoesNotExist: return None return round(overall_progress, 4) def get_topic_and_content_progress_fraction(node, user): if node.kind == content_kinds.TOPIC: return get_topic_progress_fraction(node, user) else: return get_content_progress_fraction(node, user) def get_topic_and_content_progress_fractions(nodes, user): leaf_ids = ( nodes.get_descendants(include_self=True) .order_by() .exclude(available=False) .exclude(kind=content_kinds.TOPIC) .values_list("content_id", flat=True) ) leaf_node_logs = get_summary_logs(leaf_ids, user) overall_progress = {} for log in leaf_node_logs.values("content_id", "progress"): overall_progress[log["content_id"]] = round(log["progress"], 4) for node in nodes: if node.kind == content_kinds.TOPIC: topic_leaf_ids = ( node.get_descendants(include_self=True) .order_by() .exclude(available=False) .exclude(kind=content_kinds.TOPIC) .values_list("content_id", flat=True) ) overall_progress[node.content_id] = ( round( sum(overall_progress.get(leaf_id, 0) for leaf_id in topic_leaf_ids) / len(topic_leaf_ids), 4, ) if topic_leaf_ids else 0.0 ) return overall_progress def get_content_progress_fractions(nodes, user): if isinstance(nodes, RawQuerySet) or isinstance(nodes, list): leaf_ids = [datum.content_id for datum in nodes] else: leaf_ids = nodes.exclude(kind=content_kinds.TOPIC).values_list( "content_id", flat=True ) summary_logs = get_summary_logs(leaf_ids, user) # make a lookup dict for all logs to allow mapping from content_id to current progress overall_progress = { log["content_id"]: round(log["progress"], 4) for log in summary_logs.values("content_id", "progress") } return overall_progress class ContentNodeGranularSerializer(serializers.ModelSerializer): num_coach_contents = serializers.SerializerMethodField() coach_content = serializers.SerializerMethodField() total_resources = serializers.SerializerMethodField() importable = serializers.SerializerMethodField() class Meta: model = ContentNode fields = ( "id", "available", "coach_content", "importable", "kind", "num_coach_contents", "on_device_resources", "title", "total_resources", ) @property def channel_stats(self): return self.context["channel_stats"] def get_total_resources(self, instance): # channel_stats is None for export if self.channel_stats is None: return instance.on_device_resources return self.channel_stats.get(instance.id, {"total_resources": 0})[ "total_resources" ] def get_num_coach_contents(self, instance): # If for exporting, only show what is available on server. For importing, # show all of the coach contents in the topic. if self.channel_stats is None: return instance.num_coach_contents return self.channel_stats.get(instance.id, {"num_coach_contents": 0})[ "num_coach_contents" ] def get_coach_content(self, instance): # If for exporting, only show what is on server. For importing, # show all of the coach contents in the topic. if self.channel_stats is None: return instance.coach_content return self.channel_stats.get(instance.id, {"coach_content": False})[ "coach_content" ] def get_importable(self, instance): # If for export, just return None if self.channel_stats is None: return None return instance.id in self.channel_stats class ContentNodeProgressListSerializer(serializers.ListSerializer): def to_representation(self, data): if not data: return data if ( "request" not in self.context or not self.context["request"].user.is_facility_user ): progress_dict = {} else: user = self.context["request"].user # Don't annotate topic progress as too expensive progress_dict = get_topic_and_content_progress_fractions(data, user) # Dealing with nested relationships, data can be a Manager, # so, first get a queryset from the Manager if needed iterable = data.all() if isinstance(data, Manager) else data return [ self.child.to_representation( item, progress_fraction=progress_dict.get(item.content_id, 0.0), annotate_progress_fraction=False, ) for item in iterable ] class ContentNodeProgressSerializer(serializers.Serializer): def to_representation( self, instance, progress_fraction=None, annotate_progress_fraction=True ): if progress_fraction is None and annotate_progress_fraction: if ( "request" not in self.context or not self.context["request"].user.is_facility_user ): # Don't try to annotate for a non facility user progress_fraction = 0 else: user = self.context["request"].user progress_fraction = ( get_topic_and_content_progress_fraction(instance, user) or 0.0 ) return {"id": instance.id, "progress_fraction": progress_fraction} class Meta: list_serializer_class = ContentNodeProgressListSerializer
mit
mj-5/volatility
volatility/plugins/mac/session_hash_table.py
45
2308
# Volatility # Copyright (C) 2007-2013 Volatility Foundation # # This file is part of Volatility. # # Volatility is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. # # Volatility is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Volatility. If not, see <http://www.gnu.org/licenses/>. # """ @author: Andrew Case @license: GNU General Public License 2.0 @contact: atcuno@gmail.com @organization: """ import volatility.plugins.mac.pslist as pslist import volatility.obj as obj import volatility.plugins.mac.common as common class mac_list_sessions(pslist.mac_pslist): """ Enumerates sessions """ def calculate(self): common.set_plugin_members(self) shash_addr = self.addr_space.profile.get_symbol("_sesshash") shash = obj.Object("unsigned long", offset = shash_addr, vm = self.addr_space) shashtbl_addr = self.addr_space.profile.get_symbol("_sesshashtbl") shashtbl_ptr = obj.Object("Pointer", offset = shashtbl_addr, vm = self.addr_space) shash_array = obj.Object(theType = "Array", targetType = "sesshashhead", count = shash + 1, vm = self.addr_space, offset = shashtbl_ptr) for sess in shash_array: s = sess.lh_first while s: yield s s = s.s_hash.le_next def render_text(self, outfd, data): self.table_header(outfd, [("Leader (Pid)", "8"), ("Leader (Name)", "20"), ("Login Name", "25")]) for sess in data: if sess.s_leader: pid = sess.s_leader.p_pid pname = sess.s_leader.p_comm else: pid = -1 pname = "<INVALID LEADER>" self.table_row(outfd, pid, pname, sess.s_login)
gpl-2.0
ZENGXH/scikit-learn
sklearn/utils/tests/test_estimator_checks.py
202
3757
import scipy.sparse as sp import numpy as np import sys from sklearn.externals.six.moves import cStringIO as StringIO from sklearn.base import BaseEstimator, ClassifierMixin from sklearn.utils.testing import assert_raises_regex, assert_true from sklearn.utils.estimator_checks import check_estimator from sklearn.utils.estimator_checks import check_estimators_unfitted from sklearn.linear_model import LogisticRegression from sklearn.utils.validation import check_X_y, check_array class CorrectNotFittedError(ValueError): """Exception class to raise if estimator is used before fitting. Like NotFittedError, it inherits from ValueError, but not from AttributeError. Used for testing only. """ class BaseBadClassifier(BaseEstimator, ClassifierMixin): def fit(self, X, y): return self def predict(self, X): return np.ones(X.shape[0]) class NoCheckinPredict(BaseBadClassifier): def fit(self, X, y): X, y = check_X_y(X, y) return self class NoSparseClassifier(BaseBadClassifier): def fit(self, X, y): X, y = check_X_y(X, y, accept_sparse=['csr', 'csc']) if sp.issparse(X): raise ValueError("Nonsensical Error") return self def predict(self, X): X = check_array(X) return np.ones(X.shape[0]) class CorrectNotFittedErrorClassifier(BaseBadClassifier): def fit(self, X, y): X, y = check_X_y(X, y) self.coef_ = np.ones(X.shape[1]) return self def predict(self, X): if not hasattr(self, 'coef_'): raise CorrectNotFittedError("estimator is not fitted yet") X = check_array(X) return np.ones(X.shape[0]) def test_check_estimator(): # tests that the estimator actually fails on "bad" estimators. # not a complete test of all checks, which are very extensive. # check that we have a set_params and can clone msg = "it does not implement a 'get_params' methods" assert_raises_regex(TypeError, msg, check_estimator, object) # check that we have a fit method msg = "object has no attribute 'fit'" assert_raises_regex(AttributeError, msg, check_estimator, BaseEstimator) # check that fit does input validation msg = "TypeError not raised by fit" assert_raises_regex(AssertionError, msg, check_estimator, BaseBadClassifier) # check that predict does input validation (doesn't accept dicts in input) msg = "Estimator doesn't check for NaN and inf in predict" assert_raises_regex(AssertionError, msg, check_estimator, NoCheckinPredict) # check for sparse matrix input handling msg = "Estimator type doesn't seem to fail gracefully on sparse data" # the check for sparse input handling prints to the stdout, # instead of raising an error, so as not to remove the original traceback. # that means we need to jump through some hoops to catch it. old_stdout = sys.stdout string_buffer = StringIO() sys.stdout = string_buffer try: check_estimator(NoSparseClassifier) except: pass finally: sys.stdout = old_stdout assert_true(msg in string_buffer.getvalue()) # doesn't error on actual estimator check_estimator(LogisticRegression) def test_check_estimators_unfitted(): # check that a ValueError/AttributeError is raised when calling predict # on an unfitted estimator msg = "AttributeError or ValueError not raised by predict" assert_raises_regex(AssertionError, msg, check_estimators_unfitted, "estimator", NoSparseClassifier) # check that CorrectNotFittedError inherit from either ValueError # or AttributeError check_estimators_unfitted("estimator", CorrectNotFittedErrorClassifier)
bsd-3-clause
wbvalid/python2
getVduNormal.py
1
1309
#!/usr/bin/python import re import time import sys import os work_path = '/home/vpu_data' def to_unix(date): if len(date) < 14: date = date + '0' * (14 - len(date)) return int(time.mktime(time.strptime(date, "%Y%m%d%H%M%S"))) def convert(file_name): hex_num = re.findall(r'/(\w{16})_', file_name) bin_num = bin(int(hex_num[0], 16)) time_stamp = int(bin_num[2:33], 2) return time_stamp def search_all(path): result = [] dir_list = os.listdir(path) for dirs in dir_list: if os.path.isdir(dirs): for items in os.popen("find ./%s/normal/%s -name '*_*'" % (dirs, sys.argv[3])).read().split('\n'): if items != '': result.append(items) return result if __name__ == "__main__": try: start_time = to_unix(sys.argv[1]) end_time = to_unix(sys.argv[2]) os.chdir(work_path) except IndexError: print "Usage:\n ./getVpuNormal.py 201704270910 201704271110 2017-04-27" os._exit(1) print "Searching for normal models in %s" % os.getcwd() all_searched = search_all(work_path) count = 0 for i in all_searched: ts = convert(i) if ts > start_time and ts < end_time: count += 1 print "total number: {}".format(count)
unlicense
huseyinbiyik/plugin.program.ump
lib/providers/audio_link_youtube.py
2
3032
# -*- coding: utf-8 -*- import json import re import urlparse import unidecode domain="http://www.youtube.com/" encoding="utf-8" tunnel=["cookie"] timetol=5 maxttol=15 filters=["sheetmusic","canli","live","karaoke","cover","concert","konser","remix","reaction","parody","meet","version","tutorial"] def add(hash,i,artist,title,mname,parts): ump.add_log("youtube found track: %s - %s"%(i["artist"],i["title"])) part={"url_provider_name":"youtube", "url_provider_hash":hash,"referer":domain,"partname":"%s - %s" %(artist,title),"info":i} if "playlist" in ump.args: parts.append(part) else: ump.add_mirror([part],mname,wait=0.2,missing="ignore") def run(ump): globals()['ump'] = ump parts=[] ids=[] tracks=[] old_artist="" old_artalbum="" old_tralbum="" if ump.defs.MT_ALBUM==ump.info["mediatype"]: playlist=ump.args["playlist"] mname=ump.args.get("mname","Grooveshark Playlist") else: playlist=[{"info":ump.info,"art":ump.art}] mname="%s - %s" %(ump.info["artist"],ump.info["title"]) for item in playlist: i=item["info"] candidates={} q='%s %s'%(i["artist"],i["title"]) page=ump.get_page(domain+"results",encoding,query={"search_query":q},header={"Cookie":"PREF=f1=50000000&f5=30"}) ump.add_log("youtube is searching track: %s - %s"%(i["artist"],i["title"])) match=False for res in re.findall('<h3 class="yt-lockup-title\s*"><a href="(.*?)".*?title="(.*?)".*?</a><span class="accessible-description".*?>(.*?)</span></h3>(.*?)</div></li>',page): link,ftitle,times,rest=res times=re.findall("([0-9]*?)\:([0-9]*?)\.",times) try: idur=int(i.get("duration",0)) dur=int(times[0][0])*60+int(times[0][1]) except: idur=0 dur=0 hash=urlparse.parse_qs(urlparse.urlparse(link).query).get("v",[None])[0] if not hash or hash in ids: continue if dur>0 and idur>0: fmtitle=unidecode.unidecode(ftitle).lower().replace(" ","") filtered=False artist=unidecode.unidecode(i["artist"]).lower().replace(" ","") title=unidecode.unidecode(i["title"]).lower().replace(" ","") for filter in filters: if filter in fmtitle and not filter in title: filtered=True break if filtered: continue frest=unidecode.unidecode(rest).lower().replace(" ","") if (artist in fmtitle or artist in frest) and title in fmtitle: if abs(dur-idur)<=timetol: match=True elif abs(dur-idur)<=maxttol: candidates[abs(dur-idur)]=hash else: title=ftitle.split("-") if not len(title)==2:continue artist,title=title match=ump.is_same(artist,i["artist"]) and ump.is_same(title,i["title"]) if match: ids.append(hash) add(hash,i,artist,title,ftitle,parts) if "playlist" in ump.args: break if not match and len(candidates): hash=candidates[sorted(candidates)[0]] ids.append(hash) add(hash,i,artist,title,mname,parts) if len(parts): if len(parts)==1: ump.add_mirror(parts,mname) elif len(parts)>1: ump.add_mirror(parts,"%s [TRACKS:%d]"%(mname,len(parts)),wait=0.2,missing="ignore")
gpl-2.0
malayaleecoder/servo
tests/wpt/css-tests/tools/html5lib/html5lib/treewalkers/pulldom.py
1729
2302
from __future__ import absolute_import, division, unicode_literals from xml.dom.pulldom import START_ELEMENT, END_ELEMENT, \ COMMENT, IGNORABLE_WHITESPACE, CHARACTERS from . import _base from ..constants import voidElements class TreeWalker(_base.TreeWalker): def __iter__(self): ignore_until = None previous = None for event in self.tree: if previous is not None and \ (ignore_until is None or previous[1] is ignore_until): if previous[1] is ignore_until: ignore_until = None for token in self.tokens(previous, event): yield token if token["type"] == "EmptyTag": ignore_until = previous[1] previous = event if ignore_until is None or previous[1] is ignore_until: for token in self.tokens(previous, None): yield token elif ignore_until is not None: raise ValueError("Illformed DOM event stream: void element without END_ELEMENT") def tokens(self, event, next): type, node = event if type == START_ELEMENT: name = node.nodeName namespace = node.namespaceURI attrs = {} for attr in list(node.attributes.keys()): attr = node.getAttributeNode(attr) attrs[(attr.namespaceURI, attr.localName)] = attr.value if name in voidElements: for token in self.emptyTag(namespace, name, attrs, not next or next[1] is not node): yield token else: yield self.startTag(namespace, name, attrs) elif type == END_ELEMENT: name = node.nodeName namespace = node.namespaceURI if name not in voidElements: yield self.endTag(namespace, name) elif type == COMMENT: yield self.comment(node.nodeValue) elif type in (IGNORABLE_WHITESPACE, CHARACTERS): for token in self.text(node.nodeValue): yield token else: yield self.unknown(type)
mpl-2.0
alxgu/ansible
lib/ansible/plugins/callback/logdna.py
52
6805
# (c) 2018, Samir Musali <samir.musali@logdna.com> # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) from __future__ import (absolute_import, division, print_function) __metaclass__ = type DOCUMENTATION = ''' callback: logdna callback_type: aggregate short_description: Sends playbook logs to LogDNA description: - This callback will report logs from playbook actions, tasks, and events to LogDNA (https://app.logdna.com) version_added: "2.7" requirements: - LogDNA Python Library (https://github.com/logdna/python) - whitelisting in configuration options: conf_key: required: True description: LogDNA Ingestion Key type: string env: - name: LOGDNA_INGESTION_KEY ini: - section: callback_logdna key: conf_key plugin_ignore_errors: required: False description: Whether to ignore errors on failing or not type: boolean env: - name: ANSIBLE_IGNORE_ERRORS ini: - section: callback_logdna key: plugin_ignore_errors default: False conf_hostname: required: False description: Alternative Host Name; the current host name by default type: string env: - name: LOGDNA_HOSTNAME ini: - section: callback_logdna key: conf_hostname conf_tags: required: False description: Tags type: string env: - name: LOGDNA_TAGS ini: - section: callback_logdna key: conf_tags default: ansible ''' import logging import json import socket from uuid import getnode from ansible.plugins.callback import CallbackBase from ansible.parsing.ajson import AnsibleJSONEncoder try: from logdna import LogDNAHandler HAS_LOGDNA = True except ImportError: HAS_LOGDNA = False # Getting MAC Address of system: def get_mac(): mac = "%012x" % getnode() return ":".join(map(lambda index: mac[index:index + 2], range(int(len(mac) / 2)))) # Getting hostname of system: def get_hostname(): return str(socket.gethostname()).split('.local')[0] # Getting IP of system: def get_ip(): try: return socket.gethostbyname(get_hostname()) except Exception: s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) try: s.connect(('10.255.255.255', 1)) IP = s.getsockname()[0] except Exception: IP = '127.0.0.1' finally: s.close() return IP # Is it JSON? def isJSONable(obj): try: json.dumps(obj, sort_keys=True, cls=AnsibleJSONEncoder) return True except Exception: return False # LogDNA Callback Module: class CallbackModule(CallbackBase): CALLBACK_VERSION = 0.1 CALLBACK_TYPE = 'aggregate' CALLBACK_NAME = 'logdna' CALLBACK_NEEDS_WHITELIST = True def __init__(self, display=None): super(CallbackModule, self).__init__(display=display) self.disabled = True self.playbook_name = None self.playbook = None self.conf_key = None self.plugin_ignore_errors = None self.conf_hostname = None self.conf_tags = None def set_options(self, task_keys=None, var_options=None, direct=None): super(CallbackModule, self).set_options(task_keys=task_keys, var_options=var_options, direct=direct) self.conf_key = self.get_option('conf_key') self.plugin_ignore_errors = self.get_option('plugin_ignore_errors') self.conf_hostname = self.get_option('conf_hostname') self.conf_tags = self.get_option('conf_tags') self.mac = get_mac() self.ip = get_ip() if self.conf_hostname is None: self.conf_hostname = get_hostname() self.conf_tags = self.conf_tags.split(',') if HAS_LOGDNA: self.log = logging.getLogger('logdna') self.log.setLevel(logging.INFO) self.options = {'hostname': self.conf_hostname, 'mac': self.mac, 'index_meta': True} self.log.addHandler(LogDNAHandler(self.conf_key, self.options)) self.disabled = False else: self.disabled = True self._display.warning('WARNING:\nPlease, install LogDNA Python Package: `pip install logdna`') def metaIndexing(self, meta): invalidKeys = [] ninvalidKeys = 0 for key, value in meta.items(): if not isJSONable(value): invalidKeys.append(key) ninvalidKeys += 1 if ninvalidKeys > 0: for key in invalidKeys: del meta[key] meta['__errors'] = 'These keys have been sanitized: ' + ', '.join(invalidKeys) return meta def sanitizeJSON(self, data): try: return json.loads(json.dumps(data, sort_keys=True, cls=AnsibleJSONEncoder)) except Exception: return {'warnings': ['JSON Formatting Issue', json.dumps(data, sort_keys=True, cls=AnsibleJSONEncoder)]} def flush(self, log, options): if HAS_LOGDNA: self.log.info(json.dumps(log), options) def sendLog(self, host, category, logdata): options = {'app': 'ansible', 'meta': {'playbook': self.playbook_name, 'host': host, 'category': category}} logdata['info'].pop('invocation', None) warnings = logdata['info'].pop('warnings', None) if warnings is not None: self.flush({'warn': warnings}, options) self.flush(logdata, options) def v2_playbook_on_start(self, playbook): self.playbook = playbook self.playbook_name = playbook._file_name def v2_playbook_on_stats(self, stats): result = dict() for host in stats.processed.keys(): result[host] = stats.summarize(host) self.sendLog(self.conf_hostname, 'STATS', {'info': self.sanitizeJSON(result)}) def runner_on_failed(self, host, res, ignore_errors=False): if self.plugin_ignore_errors: ignore_errors = self.plugin_ignore_errors self.sendLog(host, 'FAILED', {'info': self.sanitizeJSON(res), 'ignore_errors': ignore_errors}) def runner_on_ok(self, host, res): self.sendLog(host, 'OK', {'info': self.sanitizeJSON(res)}) def runner_on_unreachable(self, host, res): self.sendLog(host, 'UNREACHABLE', {'info': self.sanitizeJSON(res)}) def runner_on_async_failed(self, host, res, jid): self.sendLog(host, 'ASYNC_FAILED', {'info': self.sanitizeJSON(res), 'job_id': jid}) def runner_on_async_ok(self, host, res, jid): self.sendLog(host, 'ASYNC_OK', {'info': self.sanitizeJSON(res), 'job_id': jid})
gpl-3.0
erwilan/ansible
lib/ansible/plugins/lookup/passwordstore.py
60
7765
# (c) 2017, Patrick Deelman <patrick@patrickdeelman.nl> # # This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see <http://www.gnu.org/licenses/>. from __future__ import (absolute_import, division, print_function) __metaclass__ = type import os import subprocess import time from distutils import util from ansible.errors import AnsibleError from ansible.plugins.lookup import LookupBase # backhacked check_output with input for python 2.7 # http://stackoverflow.com/questions/10103551/passing-data-to-subprocess-check-output def check_output2(*popenargs, **kwargs): if 'stdout' in kwargs: raise ValueError('stdout argument not allowed, it will be overridden.') if 'stderr' in kwargs: raise ValueError('stderr argument not allowed, it will be overridden.') if 'input' in kwargs: if 'stdin' in kwargs: raise ValueError('stdin and input arguments may not both be used.') inputdata = kwargs['input'] del kwargs['input'] kwargs['stdin'] = subprocess.PIPE else: inputdata = None process = subprocess.Popen(*popenargs, stdout=subprocess.PIPE, stderr=subprocess.PIPE, **kwargs) try: out,err = process.communicate(inputdata) except: process.kill() process.wait() raise retcode = process.poll() if retcode: cmd = kwargs.get("args") if cmd is None: cmd = popenargs[0] raise subprocess.CalledProcessError(retcode, cmd, out+err) return out class LookupModule(LookupBase): def parse_params(self, term): # I went with the "traditional" param followed with space separated KV pairs. # Waiting for final implementation of lookup parameter parsing. # See: https://github.com/ansible/ansible/issues/12255 params = term.split() if len(params) > 0: # the first param is the pass-name self.passname = params[0] # next parse the optional parameters in keyvalue pairs try: for param in params[1:]: name, value = param.split('=') assert(name in self.paramvals) self.paramvals[name] = value except (ValueError, AssertionError) as e: raise AnsibleError(e) # check and convert values try: for key in ['create', 'returnall', 'overwrite']: if not isinstance(self.paramvals[key], bool): self.paramvals[key] = util.strtobool(self.paramvals[key]) except (ValueError, AssertionError) as e: raise AnsibleError(e) if not isinstance(self.paramvals['length'], int): if self.paramvals['length'].isdigit(): self.paramvals['length'] = int(self.paramvals['length']) else: raise AnsibleError("{} is not a correct value for length".format(self.paramvals['length'])) # Set PASSWORD_STORE_DIR if directory is set if self.paramvals['directory']: if os.path.isdir(self.paramvals['directory']): os.environ['PASSWORD_STORE_DIR'] = self.paramvals['directory'] else: raise AnsibleError('Passwordstore directory \'{}\' does not exist'.format(self.paramvals['directory'])) def check_pass(self): try: self.passoutput = check_output2(["pass", self.passname]).splitlines() self.password = self.passoutput[0] self.passdict = {} for line in self.passoutput[1:]: if ":" in line: name, value = line.split(':', 1) self.passdict[name.strip()] = value.strip() except (subprocess.CalledProcessError) as e: if e.returncode == 1 and 'not in the password store' in e.output: # if pass returns 1 and return string contains 'is not in the password store.' # We need to determine if this is valid or Error. if not self.paramvals['create']: raise AnsibleError('passname: {} not found, use create=True'.format(self.passname)) else: return False else: raise AnsibleError(e) return True def get_newpass(self): if self.paramvals['userpass']: newpass = self.paramvals['userpass'] else: try: newpass = check_output2(['pwgen','-cns',str(self.paramvals['length']), '1']).rstrip() except (subprocess.CalledProcessError) as e: raise AnsibleError(e) return newpass def update_password(self): # generate new password, insert old lines from current result and return new password newpass = self.get_newpass() datetime= time.strftime("%d/%m/%Y %H:%M:%S") msg = newpass +'\n' + '\n'.join(self.passoutput[1:]) msg+="\nlookup_pass: old password was {} (Updated on {})\n".format(self.password, datetime) try: generate = check_output2(['pass','insert','-f','-m',self.passname], input=msg) except (subprocess.CalledProcessError) as e: raise AnsibleError(e) return newpass def generate_password(self): # generate new file and insert lookup_pass: Generated by Ansible on {date} # use pwgen to generate the password and insert values with pass -m newpass = self.get_newpass() datetime = time.strftime("%d/%m/%Y %H:%M:%S") msg = newpass + '\n' + "lookup_pass: First generated by ansible on {}\n".format(datetime) try: generate = check_output2(['pass','insert','-f','-m',self.passname], input=msg) except (subprocess.CalledProcessError) as e: raise AnsibleError(e) return newpass def get_passresult(self): if self.paramvals['returnall']: return os.linesep.join(self.passoutput) if self.paramvals['subkey'] == 'password': return self.password else: if self.paramvals['subkey'] in self.passdict: return self.passdict[self.paramvals['subkey']] else: return None def run(self, terms, variables, **kwargs): result = [] self.paramvals = { 'subkey':'password', 'directory':variables.get('passwordstore'), 'create':False, 'returnall': False, 'overwrite':False, 'userpass':'', 'length': 16} for term in terms: self.parse_params(term) # parse the input into paramvals if self.check_pass(): # password exists if self.paramvals['overwrite'] and self.paramvals['subkey'] == 'password': result.append(self.update_password()) else: result.append(self.get_passresult()) else: # password does not exist if self.paramvals['create']: result.append(self.generate_password()) return result
gpl-3.0
gmaxwell/bitcoin
test/functional/invalidblockrequest.py
19
4319
#!/usr/bin/env python3 # Copyright (c) 2015-2016 The Bitcoin Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. """Test node responses to invalid blocks. In this test we connect to one node over p2p, and test block requests: 1) Valid blocks should be requested and become chain tip. 2) Invalid block with duplicated transaction should be re-requested. 3) Invalid block with bad coinbase value should be rejected and not re-requested. """ from test_framework.test_framework import ComparisonTestFramework from test_framework.util import * from test_framework.comptool import TestManager, TestInstance, RejectResult from test_framework.blocktools import * import copy import time # Use the ComparisonTestFramework with 1 node: only use --testbinary. class InvalidBlockRequestTest(ComparisonTestFramework): ''' Can either run this test as 1 node with expected answers, or two and compare them. Change the "outcome" variable from each TestInstance object to only do the comparison. ''' def set_test_params(self): self.num_nodes = 1 self.setup_clean_chain = True def run_test(self): test = TestManager(self, self.options.tmpdir) test.add_all_connections(self.nodes) self.tip = None self.block_time = None NetworkThread().start() # Start up network handling in another thread test.run() def get_tests(self): if self.tip is None: self.tip = int("0x" + self.nodes[0].getbestblockhash(), 0) self.block_time = int(time.time())+1 ''' Create a new block with an anyone-can-spend coinbase ''' height = 1 block = create_block(self.tip, create_coinbase(height), self.block_time) self.block_time += 1 block.solve() # Save the coinbase for later self.block1 = block self.tip = block.sha256 height += 1 yield TestInstance([[block, True]]) ''' Now we need that block to mature so we can spend the coinbase. ''' test = TestInstance(sync_every_block=False) for i in range(100): block = create_block(self.tip, create_coinbase(height), self.block_time) block.solve() self.tip = block.sha256 self.block_time += 1 test.blocks_and_transactions.append([block, True]) height += 1 yield test ''' Now we use merkle-root malleability to generate an invalid block with same blockheader. Manufacture a block with 3 transactions (coinbase, spend of prior coinbase, spend of that spend). Duplicate the 3rd transaction to leave merkle root and blockheader unchanged but invalidate the block. ''' block2 = create_block(self.tip, create_coinbase(height), self.block_time) self.block_time += 1 # b'0x51' is OP_TRUE tx1 = create_transaction(self.block1.vtx[0], 0, b'\x51', 50 * COIN) tx2 = create_transaction(tx1, 0, b'\x51', 50 * COIN) block2.vtx.extend([tx1, tx2]) block2.hashMerkleRoot = block2.calc_merkle_root() block2.rehash() block2.solve() orig_hash = block2.sha256 block2_orig = copy.deepcopy(block2) # Mutate block 2 block2.vtx.append(tx2) assert_equal(block2.hashMerkleRoot, block2.calc_merkle_root()) assert_equal(orig_hash, block2.rehash()) assert(block2_orig.vtx != block2.vtx) self.tip = block2.sha256 yield TestInstance([[block2, RejectResult(16, b'bad-txns-duplicate')], [block2_orig, True]]) height += 1 ''' Make sure that a totally screwed up block is not valid. ''' block3 = create_block(self.tip, create_coinbase(height), self.block_time) self.block_time += 1 block3.vtx[0].vout[0].nValue = 100 * COIN # Too high! block3.vtx[0].sha256=None block3.vtx[0].calc_sha256() block3.hashMerkleRoot = block3.calc_merkle_root() block3.rehash() block3.solve() yield TestInstance([[block3, RejectResult(16, b'bad-cb-amount')]]) if __name__ == '__main__': InvalidBlockRequestTest().main()
mit
valkjsaaa/sl4a
python/src/Lib/filecmp.py
61
9470
"""Utilities for comparing files and directories. Classes: dircmp Functions: cmp(f1, f2, shallow=1) -> int cmpfiles(a, b, common) -> ([], [], []) """ import os import stat from itertools import ifilter, ifilterfalse, imap, izip __all__ = ["cmp","dircmp","cmpfiles"] _cache = {} BUFSIZE=8*1024 def cmp(f1, f2, shallow=1): """Compare two files. Arguments: f1 -- First file name f2 -- Second file name shallow -- Just check stat signature (do not read the files). defaults to 1. Return value: True if the files are the same, False otherwise. This function uses a cache for past comparisons and the results, with a cache invalidation mechanism relying on stale signatures. """ s1 = _sig(os.stat(f1)) s2 = _sig(os.stat(f2)) if s1[0] != stat.S_IFREG or s2[0] != stat.S_IFREG: return False if shallow and s1 == s2: return True if s1[1] != s2[1]: return False result = _cache.get((f1, f2)) if result and (s1, s2) == result[:2]: return result[2] outcome = _do_cmp(f1, f2) _cache[f1, f2] = s1, s2, outcome return outcome def _sig(st): return (stat.S_IFMT(st.st_mode), st.st_size, st.st_mtime) def _do_cmp(f1, f2): bufsize = BUFSIZE fp1 = open(f1, 'rb') fp2 = open(f2, 'rb') while True: b1 = fp1.read(bufsize) b2 = fp2.read(bufsize) if b1 != b2: return False if not b1: return True # Directory comparison class. # class dircmp: """A class that manages the comparison of 2 directories. dircmp(a,b,ignore=None,hide=None) A and B are directories. IGNORE is a list of names to ignore, defaults to ['RCS', 'CVS', 'tags']. HIDE is a list of names to hide, defaults to [os.curdir, os.pardir]. High level usage: x = dircmp(dir1, dir2) x.report() -> prints a report on the differences between dir1 and dir2 or x.report_partial_closure() -> prints report on differences between dir1 and dir2, and reports on common immediate subdirectories. x.report_full_closure() -> like report_partial_closure, but fully recursive. Attributes: left_list, right_list: The files in dir1 and dir2, filtered by hide and ignore. common: a list of names in both dir1 and dir2. left_only, right_only: names only in dir1, dir2. common_dirs: subdirectories in both dir1 and dir2. common_files: files in both dir1 and dir2. common_funny: names in both dir1 and dir2 where the type differs between dir1 and dir2, or the name is not stat-able. same_files: list of identical files. diff_files: list of filenames which differ. funny_files: list of files which could not be compared. subdirs: a dictionary of dircmp objects, keyed by names in common_dirs. """ def __init__(self, a, b, ignore=None, hide=None): # Initialize self.left = a self.right = b if hide is None: self.hide = [os.curdir, os.pardir] # Names never to be shown else: self.hide = hide if ignore is None: self.ignore = ['RCS', 'CVS', 'tags'] # Names ignored in comparison else: self.ignore = ignore def phase0(self): # Compare everything except common subdirectories self.left_list = _filter(os.listdir(self.left), self.hide+self.ignore) self.right_list = _filter(os.listdir(self.right), self.hide+self.ignore) self.left_list.sort() self.right_list.sort() def phase1(self): # Compute common names a = dict(izip(imap(os.path.normcase, self.left_list), self.left_list)) b = dict(izip(imap(os.path.normcase, self.right_list), self.right_list)) self.common = map(a.__getitem__, ifilter(b.__contains__, a)) self.left_only = map(a.__getitem__, ifilterfalse(b.__contains__, a)) self.right_only = map(b.__getitem__, ifilterfalse(a.__contains__, b)) def phase2(self): # Distinguish files, directories, funnies self.common_dirs = [] self.common_files = [] self.common_funny = [] for x in self.common: a_path = os.path.join(self.left, x) b_path = os.path.join(self.right, x) ok = 1 try: a_stat = os.stat(a_path) except os.error, why: # print 'Can\'t stat', a_path, ':', why[1] ok = 0 try: b_stat = os.stat(b_path) except os.error, why: # print 'Can\'t stat', b_path, ':', why[1] ok = 0 if ok: a_type = stat.S_IFMT(a_stat.st_mode) b_type = stat.S_IFMT(b_stat.st_mode) if a_type != b_type: self.common_funny.append(x) elif stat.S_ISDIR(a_type): self.common_dirs.append(x) elif stat.S_ISREG(a_type): self.common_files.append(x) else: self.common_funny.append(x) else: self.common_funny.append(x) def phase3(self): # Find out differences between common files xx = cmpfiles(self.left, self.right, self.common_files) self.same_files, self.diff_files, self.funny_files = xx def phase4(self): # Find out differences between common subdirectories # A new dircmp object is created for each common subdirectory, # these are stored in a dictionary indexed by filename. # The hide and ignore properties are inherited from the parent self.subdirs = {} for x in self.common_dirs: a_x = os.path.join(self.left, x) b_x = os.path.join(self.right, x) self.subdirs[x] = dircmp(a_x, b_x, self.ignore, self.hide) def phase4_closure(self): # Recursively call phase4() on subdirectories self.phase4() for sd in self.subdirs.itervalues(): sd.phase4_closure() def report(self): # Print a report on the differences between a and b # Output format is purposely lousy print 'diff', self.left, self.right if self.left_only: self.left_only.sort() print 'Only in', self.left, ':', self.left_only if self.right_only: self.right_only.sort() print 'Only in', self.right, ':', self.right_only if self.same_files: self.same_files.sort() print 'Identical files :', self.same_files if self.diff_files: self.diff_files.sort() print 'Differing files :', self.diff_files if self.funny_files: self.funny_files.sort() print 'Trouble with common files :', self.funny_files if self.common_dirs: self.common_dirs.sort() print 'Common subdirectories :', self.common_dirs if self.common_funny: self.common_funny.sort() print 'Common funny cases :', self.common_funny def report_partial_closure(self): # Print reports on self and on subdirs self.report() for sd in self.subdirs.itervalues(): print sd.report() def report_full_closure(self): # Report on self and subdirs recursively self.report() for sd in self.subdirs.itervalues(): print sd.report_full_closure() methodmap = dict(subdirs=phase4, same_files=phase3, diff_files=phase3, funny_files=phase3, common_dirs = phase2, common_files=phase2, common_funny=phase2, common=phase1, left_only=phase1, right_only=phase1, left_list=phase0, right_list=phase0) def __getattr__(self, attr): if attr not in self.methodmap: raise AttributeError, attr self.methodmap[attr](self) return getattr(self, attr) def cmpfiles(a, b, common, shallow=1): """Compare common files in two directories. a, b -- directory names common -- list of file names found in both directories shallow -- if true, do comparison based solely on stat() information Returns a tuple of three lists: files that compare equal files that are different filenames that aren't regular files. """ res = ([], [], []) for x in common: ax = os.path.join(a, x) bx = os.path.join(b, x) res[_cmp(ax, bx, shallow)].append(x) return res # Compare two files. # Return: # 0 for equal # 1 for different # 2 for funny cases (can't stat, etc.) # def _cmp(a, b, sh, abs=abs, cmp=cmp): try: return not abs(cmp(a, b, sh)) except os.error: return 2 # Return a copy with items that occur in skip removed. # def _filter(flist, skip): return list(ifilterfalse(skip.__contains__, flist)) # Demonstration and testing. # def demo(): import sys import getopt options, args = getopt.getopt(sys.argv[1:], 'r') if len(args) != 2: raise getopt.GetoptError('need exactly two args', None) dd = dircmp(args[0], args[1]) if ('-r', '') in options: dd.report_full_closure() else: dd.report() if __name__ == '__main__': demo()
apache-2.0
Backflipz/plugin.video.excubed
resources/lib/more_itertools/recipes.py
6
8915
"""Imported from the recipes section of the itertools documentation. All functions taken from the recipes section of the itertools library docs [1]_. Some backward-compatible usability improvements have been made. .. [1] http://docs.python.org/library/itertools.html#recipes """ from collections import deque from itertools import chain, combinations, count, cycle, groupby, ifilterfalse, imap, islice, izip, izip_longest, repeat, starmap, tee # Wrapping breaks 2to3. import operator from random import randrange, sample, choice __all__ = ['take', 'tabulate', 'consume', 'nth', 'quantify', 'padnone', 'ncycles', 'dotproduct', 'flatten', 'repeatfunc', 'pairwise', 'grouper', 'roundrobin', 'powerset', 'unique_everseen', 'unique_justseen', 'iter_except', 'random_product', 'random_permutation', 'random_combination', 'random_combination_with_replacement'] def take(n, iterable): """Return first n items of the iterable as a list >>> take(3, range(10)) [0, 1, 2] >>> take(5, range(3)) [0, 1, 2] Effectively a short replacement for ``next`` based iterator consumption when you want more than one item, but less than the whole iterator. """ return list(islice(iterable, n)) def tabulate(function, start=0): """Return an iterator mapping the function over linear input. The start argument will be increased by 1 each time the iterator is called and fed into the function. >>> t = tabulate(lambda x: x**2, -3) >>> take(3, t) [9, 4, 1] """ return imap(function, count(start)) def consume(iterator, n=None): """Advance the iterator n-steps ahead. If n is none, consume entirely. Efficiently exhausts an iterator without returning values. Defaults to consuming the whole iterator, but an optional second argument may be provided to limit consumption. >>> i = (x for x in range(10)) >>> next(i) 0 >>> consume(i, 3) >>> next(i) 4 >>> consume(i) >>> next(i) Traceback (most recent call last): File "<stdin>", line 1, in <module> StopIteration If the iterator has fewer items remaining than the provided limit, the whole iterator will be consumed. >>> i = (x for x in range(3)) >>> consume(i, 5) >>> next(i) Traceback (most recent call last): File "<stdin>", line 1, in <module> StopIteration """ # Use functions that consume iterators at C speed. if n is None: # feed the entire iterator into a zero-length deque deque(iterator, maxlen=0) else: # advance to the empty slice starting at position n next(islice(iterator, n, n), None) def nth(iterable, n, default=None): """Returns the nth item or a default value >>> l = range(10) >>> nth(l, 3) 3 >>> nth(l, 20, "zebra") 'zebra' """ return next(islice(iterable, n, None), default) def quantify(iterable, pred=bool): """Return the how many times the predicate is true >>> quantify([True, False, True]) 2 """ return sum(imap(pred, iterable)) def padnone(iterable): """Returns the sequence of elements and then returns None indefinitely. >>> take(5, padnone(range(3))) [0, 1, 2, None, None] Useful for emulating the behavior of the built-in map() function. """ return chain(iterable, repeat(None)) def ncycles(iterable, n): """Returns the sequence elements n times >>> list(ncycles(["a", "b"], 3)) ['a', 'b', 'a', 'b', 'a', 'b'] """ return chain.from_iterable(repeat(tuple(iterable), n)) def dotproduct(vec1, vec2): """Returns the dot product of the two iterables >>> dotproduct([10, 10], [20, 20]) 400 """ return sum(imap(operator.mul, vec1, vec2)) def flatten(listOfLists): """Return an iterator flattening one level of nesting in a list of lists >>> list(flatten([[0, 1], [2, 3]])) [0, 1, 2, 3] """ return chain.from_iterable(listOfLists) def repeatfunc(func, times=None, *args): """Repeat calls to func with specified arguments. >>> list(repeatfunc(lambda: 5, 3)) [5, 5, 5] >>> list(repeatfunc(lambda x: x ** 2, 3, 3)) [9, 9, 9] """ if times is None: return starmap(func, repeat(args)) return starmap(func, repeat(args, times)) def pairwise(iterable): """Returns an iterator of paired items, overlapping, from the original >>> take(4, pairwise(count())) [(0, 1), (1, 2), (2, 3), (3, 4)] """ a, b = tee(iterable) next(b, None) return izip(a, b) def grouper(n, iterable, fillvalue=None): """Collect data into fixed-length chunks or blocks >>> list(grouper(3, 'ABCDEFG', 'x')) [('A', 'B', 'C'), ('D', 'E', 'F'), ('G', 'x', 'x')] """ args = [iter(iterable)] * n return izip_longest(fillvalue=fillvalue, *args) def roundrobin(*iterables): """Yields an item from each iterable, alternating between them >>> list(roundrobin('ABC', 'D', 'EF')) ['A', 'D', 'E', 'B', 'F', 'C'] """ # Recipe credited to George Sakkis pending = len(iterables) nexts = cycle(iter(it).next for it in iterables) while pending: try: for next in nexts: yield next() except StopIteration: pending -= 1 nexts = cycle(islice(nexts, pending)) def powerset(iterable): """Yields all possible subsets of the iterable >>> list(powerset([1,2,3])) [(), (1,), (2,), (3,), (1, 2), (1, 3), (2, 3), (1, 2, 3)] """ s = list(iterable) return chain.from_iterable(combinations(s, r) for r in range(len(s)+1)) def unique_everseen(iterable, key=None): """Yield unique elements, preserving order. >>> list(unique_everseen('AAAABBBCCDAABBB')) ['A', 'B', 'C', 'D'] >>> list(unique_everseen('ABBCcAD', str.lower)) ['A', 'B', 'C', 'D'] """ seen = set() seen_add = seen.add if key is None: for element in ifilterfalse(seen.__contains__, iterable): seen_add(element) yield element else: for element in iterable: k = key(element) if k not in seen: seen_add(k) yield element def unique_justseen(iterable, key=None): """Yields elements in order, ignoring serial duplicates >>> list(unique_justseen('AAAABBBCCDAABBB')) ['A', 'B', 'C', 'D', 'A', 'B'] >>> list(unique_justseen('ABBCcAD', str.lower)) ['A', 'B', 'C', 'A', 'D'] """ return imap(next, imap(operator.itemgetter(1), groupby(iterable, key))) def iter_except(func, exception, first=None): """Yields results from a function repeatedly until an exception is raised. Converts a call-until-exception interface to an iterator interface. Like __builtin__.iter(func, sentinel) but uses an exception instead of a sentinel to end the loop. >>> l = range(3) >>> list(iter_except(l.pop, IndexError)) [2, 1, 0] """ try: if first is not None: yield first() while 1: yield func() except exception: pass def random_product(*args, **kwds): """Returns a random pairing of items from each iterable argument If `repeat` is provided as a kwarg, it's value will be used to indicate how many pairings should be chosen. >>> random_product(['a', 'b', 'c'], [1, 2], repeat=2) # doctest:+SKIP ('b', '2', 'c', '2') """ pools = map(tuple, args) * kwds.get('repeat', 1) return tuple(choice(pool) for pool in pools) def random_permutation(iterable, r=None): """Returns a random permutation. If r is provided, the permutation is truncated to length r. >>> random_permutation(range(5)) # doctest:+SKIP (3, 4, 0, 1, 2) """ pool = tuple(iterable) r = len(pool) if r is None else r return tuple(sample(pool, r)) def random_combination(iterable, r): """Returns a random combination of length r, chosen without replacement. >>> random_combination(range(5), 3) # doctest:+SKIP (2, 3, 4) """ pool = tuple(iterable) n = len(pool) indices = sorted(sample(xrange(n), r)) return tuple(pool[i] for i in indices) def random_combination_with_replacement(iterable, r): """Returns a random combination of length r, chosen with replacement. >>> random_combination_with_replacement(range(3), 5) # # doctest:+SKIP (0, 0, 1, 2, 2) """ pool = tuple(iterable) n = len(pool) indices = sorted(randrange(n) for i in xrange(r)) return tuple(pool[i] for i in indices)
gpl-2.0
fifengine/fifengine
engine/python/fife/extensions/pychan/widgets/radiobutton.py
1
4543
# -*- coding: utf-8 -*- # #################################################################### # Copyright (C) 2005-2019 by the FIFE team # http://www.fifengine.net # This file is part of FIFE. # # FIFE is free software; you can redistribute it and/or # modify it under the terms of the GNU Lesser General Public # License as published by the Free Software Foundation; either # version 2.1 of the License, or (at your option) any later version. # # This library is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public # License along with this library; if not, write to the # Free Software Foundation, Inc., # 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA # #################################################################### from __future__ import absolute_import from fife import fifechan from fife.extensions.pychan.attrs import Attr, BoolAttr from .basictextwidget import BasicTextWidget from .common import text2gui class RadioButton(BasicTextWidget): """ A basic radiobutton (an exclusive checkbox). New Attributes ============== - marked: Boolean: Whether the checkbox is checked or not. - group: String: All RadioButtons with the same group name can only be checked exclusively. Data ==== The marked status can be read and set via L{distributeData} and L{collectData} """ ATTRIBUTES = BasicTextWidget.ATTRIBUTES + [ BoolAttr('marked'), Attr('group') ] DEFAULT_GROUP = "_no_group_" def __init__(self, parent = None, name = None, size = None, min_size = None, max_size = None, fixed_size = None, margins = None, padding = None, helptext = None, position = None, style = None, hexpand = None, vexpand = None, font = None, base_color = None, background_color = None, foreground_color = None, selection_color = None, border_color = None, outline_color = None, border_size = None, outline_size = None, position_technique = None, is_focusable = None, comment = None, text = None, group = None): self.real_widget = fifechan.RadioButton() self.group = self.DEFAULT_GROUP super(RadioButton,self).__init__(parent=parent, name=name, size=size, min_size=min_size, max_size=max_size, fixed_size=fixed_size, margins=margins, padding=padding, helptext=helptext, position=position, style=style, hexpand=hexpand, vexpand=vexpand, font=font, base_color=base_color, background_color=background_color, foreground_color=foreground_color, selection_color=selection_color, border_color=border_color, outline_color=outline_color, border_size=border_size, outline_size=outline_size, position_technique=position_technique, is_focusable=is_focusable, comment=comment, text=text) if group is not None: self.group = group # Prepare Data collection framework self.accepts_data = True self._realGetData = self._isMarked self._realSetData = self._setMarked # Initial data stuff inherited. def clone(self, prefix): rbuttonClone = RadioButton(None, self._createNameWithPrefix(prefix), self.size, self.min_size, self.max_size, self.fixed_size, self.margins, self.padding, self.helptext, self.position, self.style, self.hexpand, self.vexpand, self.font, self.base_color, self.background_color, self.foreground_color, self.selection_color, self.border_color, self.outline_color, self.border_size, self.outline_size, self.position_technique, self.is_focusable, self.comment, self.text, self.group) return rbuttonClone def _isMarked(self): return self.real_widget.isSelected() def _setMarked(self,mark): self.real_widget.setSelected(mark) marked = property(_isMarked,_setMarked) def _setGroup(self,group): self.real_widget.setGroup(group) def _getGroup(self): return self.real_widget.getGroup() group = property(_getGroup,_setGroup)
lgpl-2.1
asm666/sympy
bin/coverage_doctest.py
83
21410
#!/usr/bin/env python """ Program to test that all methods/functions have at least one example doctest. Also checks if docstrings are imported into Sphinx. For this to work, the Sphinx docs need to be built first. Use "cd doc; make html" to build the Sphinx docs. Usage: ./bin/coverage_doctest.py sympy/core or ./bin/coverage_doctest.py sympy/core/basic.py If no arguments are given, all files in sympy/ are checked. """ from __future__ import print_function import os import sys import inspect from argparse import ArgumentParser, RawDescriptionHelpFormatter try: from HTMLParser import HTMLParser except ImportError: # It's html.parser in Python 3 from html.parser import HTMLParser # Load color templates, used from sympy/utilities/runtests.py color_templates = ( ("Black", "0;30"), ("Red", "0;31"), ("Green", "0;32"), ("Brown", "0;33"), ("Blue", "0;34"), ("Purple", "0;35"), ("Cyan", "0;36"), ("LightGray", "0;37"), ("DarkGray", "1;30"), ("LightRed", "1;31"), ("LightGreen", "1;32"), ("Yellow", "1;33"), ("LightBlue", "1;34"), ("LightPurple", "1;35"), ("LightCyan", "1;36"), ("White", "1;37"), ) colors = {} for name, value in color_templates: colors[name] = value c_normal = '\033[0m' c_color = '\033[%sm' def print_header(name, underline=None, color=None): print() if color: print("%s%s%s" % (c_color % colors[color], name, c_normal)) else: print(name) if underline and not color: print(underline*len(name)) def print_coverage(module_path, c, c_md, c_mdt, c_idt, c_sph, f, f_md, f_mdt, f_idt, f_sph, score, total_doctests, total_members, sphinx_score, total_sphinx, verbose=False, no_color=False, sphinx=True): """ Prints details (depending on verbose) of a module """ doctest_color = "Brown" sphinx_color = "DarkGray" less_100_color = "Red" less_50_color = "LightRed" equal_100_color = "Green" big_header_color = "LightPurple" small_header_color = "Purple" if no_color: score_string = "Doctests: %s%% (%s of %s)" % (score, total_doctests, total_members) elif score < 100: if score < 50: score_string = "%sDoctests:%s %s%s%% (%s of %s)%s" % \ (c_color % colors[doctest_color], c_normal, c_color % colors[less_50_color], score, total_doctests, total_members, c_normal) else: score_string = "%sDoctests:%s %s%s%% (%s of %s)%s" % \ (c_color % colors[doctest_color], c_normal, c_color % colors[less_100_color], score, total_doctests, total_members, c_normal) else: score_string = "%sDoctests:%s %s%s%% (%s of %s)%s" % \ (c_color % colors[doctest_color], c_normal, c_color % colors[equal_100_color], score, total_doctests, total_members, c_normal) if sphinx: if no_color: sphinx_score_string = "Sphinx: %s%% (%s of %s)" % (sphinx_score, total_members - total_sphinx, total_members) elif sphinx_score < 100: if sphinx_score < 50: sphinx_score_string = "%sSphinx:%s %s%s%% (%s of %s)%s" % \ (c_color % colors[sphinx_color], c_normal, c_color % colors[less_50_color], sphinx_score, total_members - total_sphinx, total_members, c_normal) else: sphinx_score_string = "%sSphinx:%s %s%s%% (%s of %s)%s" % \ (c_color % colors[sphinx_color], c_normal, c_color % colors[less_100_color], sphinx_score, total_members - total_sphinx, total_members, c_normal) else: sphinx_score_string = "%sSphinx:%s %s%s%% (%s of %s)%s" % \ (c_color % colors[sphinx_color], c_normal, c_color % colors[equal_100_color], sphinx_score, total_members - total_sphinx, total_members, c_normal) if verbose: print('\n' + '-'*70) print(module_path) print('-'*70) else: if sphinx: print("%s: %s %s" % (module_path, score_string, sphinx_score_string)) else: print("%s: %s" % (module_path, score_string)) if verbose: print_header('CLASSES', '*', not no_color and big_header_color) if not c: print_header('No classes found!') else: if c_md: print_header('Missing docstrings', '-', not no_color and small_header_color) for md in c_md: print(' * ' + md) if c_mdt: print_header('Missing doctests', '-', not no_color and small_header_color) for md in c_mdt: print(' * ' + md) if c_idt: # Use "# indirect doctest" in the docstring to # supress this warning. print_header('Indirect doctests', '-', not no_color and small_header_color) for md in c_idt: print(' * ' + md) print('\n Use \"# indirect doctest\" in the docstring to supress this warning') if c_sph: print_header('Not imported into Sphinx', '-', not no_color and small_header_color) for md in c_sph: print(' * ' + md) print_header('FUNCTIONS', '*', not no_color and big_header_color) if not f: print_header('No functions found!') else: if f_md: print_header('Missing docstrings', '-', not no_color and small_header_color) for md in f_md: print(' * ' + md) if f_mdt: print_header('Missing doctests', '-', not no_color and small_header_color) for md in f_mdt: print(' * ' + md) if f_idt: print_header('Indirect doctests', '-', not no_color and small_header_color) for md in f_idt: print(' * ' + md) print('\n Use \"# indirect doctest\" in the docstring to supress this warning') if f_sph: print_header('Not imported into Sphinx', '-', not no_color and small_header_color) for md in f_sph: print(' * ' + md) if verbose: print('\n' + '-'*70) print(score_string) if sphinx: print(sphinx_score_string) print('-'*70) def _is_indirect(member, doc): """ Given string repr of doc and member checks if the member contains indirect documentation """ d = member in doc e = 'indirect doctest' in doc if not d and not e: return True else: return False def _get_arg_list(name, fobj): """ Given a function object, constructs a list of arguments and their defaults. Takes care of varargs and kwargs """ trunc = 20 # Sometimes argument length can be huge argspec = inspect.getargspec(fobj) arg_list = [] if argspec.args: for arg in argspec.args: arg_list.append(str(arg)) arg_list.reverse() # Now add the defaults if argspec.defaults: for i in range(len(argspec.defaults)): arg_list[i] = str(arg_list[i]) + '=' + str(argspec.defaults[-i]) # Get the list in right order arg_list.reverse() # Add var args if argspec.varargs: arg_list.append(argspec.varargs) if argspec.keywords: arg_list.append(argspec.keywords) # Truncate long arguments arg_list = [x[:trunc] for x in arg_list] # Construct the parameter string (enclosed in brackets) str_param = "%s(%s)" % (name, ', '.join(arg_list)) return str_param def get_mod_name(path, base): """ Gets a module name, given the path of file/dir and base dir of sympy """ rel_path = os.path.relpath(path, base) # Remove the file extension rel_path, ign = os.path.splitext(rel_path) # Replace separators by . for module path file_module = "" h, t = os.path.split(rel_path) while h or t: if t: file_module = t + '.' + file_module h, t = os.path.split(h) return file_module[:-1] class FindInSphinx(HTMLParser): is_imported = [] def handle_starttag(self, tag, attr): a = dict(attr) if tag == "div" and a.get('class', None) == "viewcode-block": self.is_imported.append(a['id']) def find_sphinx(name, mod_path, found={}): if mod_path in found: # Cache results return name in found[mod_path] doc_path = mod_path.split('.') doc_path[-1] += '.html' sphinx_path = os.path.join(sympy_top, 'doc', '_build', 'html', '_modules', *doc_path) if not os.path.exists(sphinx_path): return False with open(sphinx_path) as f: html_txt = f.read() p = FindInSphinx() p.feed(html_txt) found[mod_path] = p.is_imported return name in p.is_imported def process_function(name, c_name, b_obj, mod_path, f_sk, f_md, f_mdt, f_idt, f_has_doctest, sk_list, sph, sphinx=True): """ Processes a function to get information regarding documentation. It is assume that the function calling this subrouting has already verified that it is a valid module function. """ if name in sk_list: return False, False # We add in the end, as inspect.getsourcelines is slow add_md = False add_mdt = False add_idt = False in_sphinx = True f_doctest = False function = False if inspect.isclass(b_obj): obj = getattr(b_obj, name) obj_name = c_name + '.' + name else: obj = b_obj obj_name = name full_name = _get_arg_list(name, obj) if name.startswith('_'): f_sk.append(full_name) else: if not obj.__doc__: add_md = True elif not '>>>' in obj.__doc__: add_mdt = True elif _is_indirect(name, obj.__doc__): add_idt = True else: f_doctest = True function = True if sphinx: in_sphinx = find_sphinx(obj_name, mod_path) if add_md or add_mdt or add_idt or not in_sphinx: try: line_no = inspect.getsourcelines(obj)[1] except IOError: # Raised when source does not exist # which means the function is not there. return False, False full_name = "LINE %d: %s" % (line_no, full_name) if add_md: f_md.append(full_name) elif add_mdt: f_mdt.append(full_name) elif add_idt: f_idt.append(full_name) if not in_sphinx: sph.append(full_name) return f_doctest, function def process_class(c_name, obj, c_sk, c_md, c_mdt, c_idt, c_has_doctest, mod_path, sph, sphinx=True): """ Extracts information about the class regarding documentation. It is assumed that the function calling this subroutine has already checked that the class is valid. """ # Skip class case if c_name.startswith('_'): c_sk.append(c_name) return False, False, None c = False c_dt = False # Get the line number of class try: source, line_no = inspect.getsourcelines(obj) except IOError: # Raised when source does not exist # which means the class is not there. return False, False, None c = True full_name = "LINE %d: %s" % (line_no, c_name) if not obj.__doc__: c_md.append(full_name) elif not '>>>' in obj.__doc__: c_mdt.append(full_name) elif _is_indirect(c_name, obj.__doc__): c_idt.append(full_name) else: c_dt = True c_has_doctest.append(full_name) in_sphinx = False if sphinx: in_sphinx = find_sphinx(c_name, mod_path) if not in_sphinx: sph.append(full_name) return c_dt, c, source def coverage(module_path, verbose=False, no_color=False, sphinx=True): """ Given a module path, builds an index of all classes and functions contained. It then goes through each of the classes/functions to get the docstring and doctest coverage of the module. """ # Import the package and find members m = None try: __import__(module_path) m = sys.modules[module_path] except Exception as a: # Most likely cause, absence of __init__ print("%s could not be loaded due to %s." % (module_path, repr(a))) return 0, 0, 0 c_skipped = [] c_md = [] c_mdt = [] c_has_doctest = [] c_idt = [] classes = 0 c_doctests = 0 c_sph = [] f_skipped = [] f_md = [] f_mdt = [] f_has_doctest = [] f_idt = [] functions = 0 f_doctests = 0 f_sph = [] skip_members = ['__abstractmethods__'] # Get the list of members m_members = dir(m) for member in m_members: # Check for skipped functions first, they throw nasty errors # when combined with getattr if member in skip_members: continue # Identify if the member (class/def) a part of this module obj = getattr(m, member) obj_mod = inspect.getmodule(obj) # Function not a part of this module if not obj_mod or not obj_mod.__name__ == module_path: continue # If it's a function if inspect.isfunction(obj) or inspect.ismethod(obj): f_dt, f = process_function(member, '', obj, module_path, f_skipped, f_md, f_mdt, f_idt, f_has_doctest, skip_members, f_sph, sphinx=sphinx) if f: functions += 1 if f_dt: f_doctests += 1 # If it's a class, look at it's methods too elif inspect.isclass(obj): # Process the class first c_dt, c, source = process_class(member, obj, c_skipped, c_md, c_mdt, c_idt, c_has_doctest, module_path, c_sph, sphinx=sphinx) if not c: continue else: classes += 1 if c_dt: c_doctests += 1 # Iterate through it's members for f_name in obj.__dict__: if f_name in skip_members or f_name.startswith('_'): continue # Check if def funcname appears in source if not ("def " + f_name) in ' '.join(source): continue # Identify the module of the current class member f_obj = getattr(obj, f_name) obj_mod = inspect.getmodule(f_obj) # Function not a part of this module if not obj_mod or not obj_mod.__name__ == module_path: continue # If it's a function if inspect.isfunction(f_obj) or inspect.ismethod(f_obj): f_dt, f = process_function(f_name, member, obj, module_path, f_skipped, f_md, f_mdt, f_idt, f_has_doctest, skip_members, f_sph, sphinx=sphinx) if f: functions += 1 if f_dt: f_doctests += 1 # Evaluate the percent coverage total_doctests = c_doctests + f_doctests total_members = classes + functions if total_members: score = 100 * float(total_doctests) / (total_members) else: score = 100 score = int(score) if sphinx: total_sphinx = len(c_sph) + len(f_sph) if total_members: sphinx_score = 100 - 100 * float(total_sphinx) / total_members else: sphinx_score = 100 sphinx_score = int(sphinx_score) else: total_sphinx = 0 sphinx_score = 0 # Sort functions/classes by line number c_md = sorted(c_md, key=lambda x: int(x.split()[1][:-1])) c_mdt = sorted(c_mdt, key=lambda x: int(x.split()[1][:-1])) c_idt = sorted(c_idt, key=lambda x: int(x.split()[1][:-1])) f_md = sorted(f_md, key=lambda x: int(x.split()[1][:-1])) f_mdt = sorted(f_mdt, key=lambda x: int(x.split()[1][:-1])) f_idt = sorted(f_idt, key=lambda x: int(x.split()[1][:-1])) print_coverage(module_path, classes, c_md, c_mdt, c_idt, c_sph, functions, f_md, f_mdt, f_idt, f_sph, score, total_doctests, total_members, sphinx_score, total_sphinx, verbose=verbose, no_color=no_color, sphinx=sphinx) return total_doctests, total_sphinx, total_members def go(sympy_top, file, verbose=False, no_color=False, exact=True, sphinx=True): if os.path.isdir(file): doctests, total_sphinx, num_functions = 0, 0, 0 for F in os.listdir(file): _doctests, _total_sphinx, _num_functions = go(sympy_top, '%s/%s' % (file, F), verbose=verbose, no_color=no_color, exact=False, sphinx=sphinx) doctests += _doctests total_sphinx += _total_sphinx num_functions += _num_functions return doctests, total_sphinx, num_functions if (not (file.endswith('.py') or file.endswith('.pyx')) or file.endswith('__init__.py') or not exact and ('test_' in file or 'bench_' in file or any(name in file for name in skip_paths))): return 0, 0, 0 if not os.path.exists(file): print("File(%s does not exist." % file) sys.exit(1) # Relpath for constructing the module name return coverage(get_mod_name(file, sympy_top), verbose=verbose, no_color=no_color, sphinx=sphinx) if __name__ == "__main__": bintest_dir = os.path.abspath(os.path.dirname(__file__)) # bin/cover... sympy_top = os.path.split(bintest_dir)[0] # ../ sympy_dir = os.path.join(sympy_top, 'sympy') # ../sympy/ if os.path.isdir(sympy_dir): sys.path.insert(0, sympy_top) usage = "usage: ./bin/doctest_coverage.py PATHS" parser = ArgumentParser( description=__doc__, usage=usage, formatter_class=RawDescriptionHelpFormatter, ) parser.add_argument("path", nargs='*', default=[os.path.join(sympy_top, 'sympy')]) parser.add_argument("-v", "--verbose", action="store_true", dest="verbose", default=False) parser.add_argument("--no-colors", action="store_true", dest="no_color", help="use no colors", default=False) parser.add_argument("--no-sphinx", action="store_false", dest="sphinx", help="don't report Sphinx coverage", default=True) args = parser.parse_args() if args.sphinx and not os.path.exists(os.path.join(sympy_top, 'doc', '_build', 'html')): print(""" Cannot check Sphinx coverage without a documentation build. To build the docs, run "cd doc; make html". To skip checking Sphinx coverage, pass --no-sphinx. """) sys.exit(1) full_coverage = True for file in args.path: file = os.path.normpath(file) print('DOCTEST COVERAGE for %s' % (file)) print('='*70) print() doctests, total_sphinx, num_functions = go(sympy_top, file, verbose=args.verbose, no_color=args.no_color, sphinx=args.sphinx) if num_functions == 0: score = 100 sphinx_score = 100 else: score = 100 * float(doctests) / num_functions score = int(score) if doctests < num_functions: full_coverage = False if args.sphinx: sphinx_score = 100 - 100 * float(total_sphinx) / num_functions sphinx_score = int(sphinx_score) if total_sphinx > 0: full_coverage = False print() print('='*70) if args.no_color: print("TOTAL DOCTEST SCORE for %s: %s%% (%s of %s)" % \ (get_mod_name(file, sympy_top), score, doctests, num_functions)) elif score < 100: print("TOTAL DOCTEST SCORE for %s: %s%s%% (%s of %s)%s" % \ (get_mod_name(file, sympy_top), c_color % (colors["Red"]), score, doctests, num_functions, c_normal)) else: print("TOTAL DOCTEST SCORE for %s: %s%s%% (%s of %s)%s" % \ (get_mod_name(file, sympy_top), c_color % (colors["Green"]), score, doctests, num_functions, c_normal)) if args.sphinx: if args.no_color: print("TOTAL SPHINX SCORE for %s: %s%% (%s of %s)" % \ (get_mod_name(file, sympy_top), sphinx_score, num_functions - total_sphinx, num_functions)) elif sphinx_score < 100: print("TOTAL SPHINX SCORE for %s: %s%s%% (%s of %s)%s" % \ (get_mod_name(file, sympy_top), c_color % (colors["Red"]), sphinx_score, num_functions - total_sphinx, num_functions, c_normal)) else: print("TOTAL SPHINX SCORE for %s: %s%s%% (%s of %s)%s" % \ (get_mod_name(file, sympy_top), c_color % (colors["Green"]), sphinx_score, num_functions - total_sphinx, num_functions, c_normal)) print() sys.exit(not full_coverage)
bsd-3-clause
dya2/python-for-android
python3-alpha/python3-src/Lib/test/test_compileall.py
51
14735
import sys import compileall import imp import os import py_compile import shutil import struct import subprocess import tempfile import time import unittest import io from test import support, script_helper class CompileallTests(unittest.TestCase): def setUp(self): self.directory = tempfile.mkdtemp() self.source_path = os.path.join(self.directory, '_test.py') self.bc_path = imp.cache_from_source(self.source_path) with open(self.source_path, 'w') as file: file.write('x = 123\n') self.source_path2 = os.path.join(self.directory, '_test2.py') self.bc_path2 = imp.cache_from_source(self.source_path2) shutil.copyfile(self.source_path, self.source_path2) self.subdirectory = os.path.join(self.directory, '_subdir') os.mkdir(self.subdirectory) self.source_path3 = os.path.join(self.subdirectory, '_test3.py') shutil.copyfile(self.source_path, self.source_path3) def tearDown(self): shutil.rmtree(self.directory) def data(self): with open(self.bc_path, 'rb') as file: data = file.read(8) mtime = int(os.stat(self.source_path).st_mtime) compare = struct.pack('<4sl', imp.get_magic(), mtime) return data, compare def recreation_check(self, metadata): """Check that compileall recreates bytecode when the new metadata is used.""" if not hasattr(os, 'stat'): return py_compile.compile(self.source_path) self.assertEqual(*self.data()) with open(self.bc_path, 'rb') as file: bc = file.read()[len(metadata):] with open(self.bc_path, 'wb') as file: file.write(metadata) file.write(bc) self.assertNotEqual(*self.data()) compileall.compile_dir(self.directory, force=False, quiet=True) self.assertTrue(*self.data()) def test_mtime(self): # Test a change in mtime leads to a new .pyc. self.recreation_check(struct.pack('<4sl', imp.get_magic(), 1)) def test_magic_number(self): # Test a change in mtime leads to a new .pyc. self.recreation_check(b'\0\0\0\0') def test_compile_files(self): # Test compiling a single file, and complete directory for fn in (self.bc_path, self.bc_path2): try: os.unlink(fn) except: pass compileall.compile_file(self.source_path, force=False, quiet=True) self.assertTrue(os.path.isfile(self.bc_path) and not os.path.isfile(self.bc_path2)) os.unlink(self.bc_path) compileall.compile_dir(self.directory, force=False, quiet=True) self.assertTrue(os.path.isfile(self.bc_path) and os.path.isfile(self.bc_path2)) os.unlink(self.bc_path) os.unlink(self.bc_path2) def test_no_pycache_in_non_package(self): # Bug 8563 reported that __pycache__ directories got created by # compile_file() for non-.py files. data_dir = os.path.join(self.directory, 'data') data_file = os.path.join(data_dir, 'file') os.mkdir(data_dir) # touch data/file with open(data_file, 'w'): pass compileall.compile_file(data_file) self.assertFalse(os.path.exists(os.path.join(data_dir, '__pycache__'))) def test_optimize(self): # make sure compiling with different optimization settings than the # interpreter's creates the correct file names optimize = 1 if __debug__ else 0 compileall.compile_dir(self.directory, quiet=True, optimize=optimize) cached = imp.cache_from_source(self.source_path, debug_override=not optimize) self.assertTrue(os.path.isfile(cached)) cached2 = imp.cache_from_source(self.source_path2, debug_override=not optimize) self.assertTrue(os.path.isfile(cached2)) cached3 = imp.cache_from_source(self.source_path3, debug_override=not optimize) self.assertTrue(os.path.isfile(cached3)) class EncodingTest(unittest.TestCase): """Issue 6716: compileall should escape source code when printing errors to stdout.""" def setUp(self): self.directory = tempfile.mkdtemp() self.source_path = os.path.join(self.directory, '_test.py') with open(self.source_path, 'w', encoding='utf-8') as file: file.write('# -*- coding: utf-8 -*-\n') file.write('print u"\u20ac"\n') def tearDown(self): shutil.rmtree(self.directory) def test_error(self): try: orig_stdout = sys.stdout sys.stdout = io.TextIOWrapper(io.BytesIO(),encoding='ascii') compileall.compile_dir(self.directory) finally: sys.stdout = orig_stdout class CommandLineTests(unittest.TestCase): """Test compileall's CLI.""" def assertRunOK(self, *args, **env_vars): rc, out, err = script_helper.assert_python_ok( '-S', '-m', 'compileall', *args, **env_vars) self.assertEqual(b'', err) return out def assertRunNotOK(self, *args, **env_vars): rc, out, err = script_helper.assert_python_failure( '-S', '-m', 'compileall', *args, **env_vars) return rc, out, err def assertCompiled(self, fn): self.assertTrue(os.path.exists(imp.cache_from_source(fn))) def assertNotCompiled(self, fn): self.assertFalse(os.path.exists(imp.cache_from_source(fn))) def setUp(self): self.addCleanup(self._cleanup) self.directory = tempfile.mkdtemp() self.pkgdir = os.path.join(self.directory, 'foo') os.mkdir(self.pkgdir) self.pkgdir_cachedir = os.path.join(self.pkgdir, '__pycache__') # Create the __init__.py and a package module. self.initfn = script_helper.make_script(self.pkgdir, '__init__', '') self.barfn = script_helper.make_script(self.pkgdir, 'bar', '') def _cleanup(self): support.rmtree(self.directory) def test_no_args_compiles_path(self): # Note that -l is implied for the no args case. bazfn = script_helper.make_script(self.directory, 'baz', '') self.assertRunOK(PYTHONPATH=self.directory) self.assertCompiled(bazfn) self.assertNotCompiled(self.initfn) self.assertNotCompiled(self.barfn) # Ensure that the default behavior of compileall's CLI is to create # PEP 3147 pyc/pyo files. for name, ext, switch in [ ('normal', 'pyc', []), ('optimize', 'pyo', ['-O']), ('doubleoptimize', 'pyo', ['-OO']), ]: def f(self, ext=ext, switch=switch): script_helper.assert_python_ok(*(switch + ['-m', 'compileall', '-q', self.pkgdir])) # Verify the __pycache__ directory contents. self.assertTrue(os.path.exists(self.pkgdir_cachedir)) expected = sorted(base.format(imp.get_tag(), ext) for base in ('__init__.{}.{}', 'bar.{}.{}')) self.assertEqual(sorted(os.listdir(self.pkgdir_cachedir)), expected) # Make sure there are no .pyc files in the source directory. self.assertFalse([fn for fn in os.listdir(self.pkgdir) if fn.endswith(ext)]) locals()['test_pep3147_paths_' + name] = f def test_legacy_paths(self): # Ensure that with the proper switch, compileall leaves legacy # pyc/pyo files, and no __pycache__ directory. self.assertRunOK('-b', '-q', self.pkgdir) # Verify the __pycache__ directory contents. self.assertFalse(os.path.exists(self.pkgdir_cachedir)) expected = sorted(['__init__.py', '__init__.pyc', 'bar.py', 'bar.pyc']) self.assertEqual(sorted(os.listdir(self.pkgdir)), expected) def test_multiple_runs(self): # Bug 8527 reported that multiple calls produced empty # __pycache__/__pycache__ directories. self.assertRunOK('-q', self.pkgdir) # Verify the __pycache__ directory contents. self.assertTrue(os.path.exists(self.pkgdir_cachedir)) cachecachedir = os.path.join(self.pkgdir_cachedir, '__pycache__') self.assertFalse(os.path.exists(cachecachedir)) # Call compileall again. self.assertRunOK('-q', self.pkgdir) self.assertTrue(os.path.exists(self.pkgdir_cachedir)) self.assertFalse(os.path.exists(cachecachedir)) def test_force(self): self.assertRunOK('-q', self.pkgdir) pycpath = imp.cache_from_source(self.barfn) # set atime/mtime backward to avoid file timestamp resolution issues os.utime(pycpath, (time.time()-60,)*2) mtime = os.stat(pycpath).st_mtime # without force, no recompilation self.assertRunOK('-q', self.pkgdir) mtime2 = os.stat(pycpath).st_mtime self.assertEqual(mtime, mtime2) # now force it. self.assertRunOK('-q', '-f', self.pkgdir) mtime2 = os.stat(pycpath).st_mtime self.assertNotEqual(mtime, mtime2) def test_recursion_control(self): subpackage = os.path.join(self.pkgdir, 'spam') os.mkdir(subpackage) subinitfn = script_helper.make_script(subpackage, '__init__', '') hamfn = script_helper.make_script(subpackage, 'ham', '') self.assertRunOK('-q', '-l', self.pkgdir) self.assertNotCompiled(subinitfn) self.assertFalse(os.path.exists(os.path.join(subpackage, '__pycache__'))) self.assertRunOK('-q', self.pkgdir) self.assertCompiled(subinitfn) self.assertCompiled(hamfn) def test_quiet(self): noisy = self.assertRunOK(self.pkgdir) quiet = self.assertRunOK('-q', self.pkgdir) self.assertNotEqual(b'', noisy) self.assertEqual(b'', quiet) def test_regexp(self): self.assertRunOK('-q', '-x', r'ba[^\\/]*$', self.pkgdir) self.assertNotCompiled(self.barfn) self.assertCompiled(self.initfn) def test_multiple_dirs(self): pkgdir2 = os.path.join(self.directory, 'foo2') os.mkdir(pkgdir2) init2fn = script_helper.make_script(pkgdir2, '__init__', '') bar2fn = script_helper.make_script(pkgdir2, 'bar2', '') self.assertRunOK('-q', self.pkgdir, pkgdir2) self.assertCompiled(self.initfn) self.assertCompiled(self.barfn) self.assertCompiled(init2fn) self.assertCompiled(bar2fn) def test_d_takes_exactly_one_dir(self): rc, out, err = self.assertRunNotOK('-d', 'foo') self.assertEqual(out, b'') self.assertRegex(err, b'-d') rc, out, err = self.assertRunNotOK('-d', 'foo', 'bar') self.assertEqual(out, b'') self.assertRegex(err, b'-d') def test_d_compile_error(self): script_helper.make_script(self.pkgdir, 'crunchyfrog', 'bad(syntax') rc, out, err = self.assertRunNotOK('-q', '-d', 'dinsdale', self.pkgdir) self.assertRegex(out, b'File "dinsdale') def test_d_runtime_error(self): bazfn = script_helper.make_script(self.pkgdir, 'baz', 'raise Exception') self.assertRunOK('-q', '-d', 'dinsdale', self.pkgdir) fn = script_helper.make_script(self.pkgdir, 'bing', 'import baz') pyc = imp.cache_from_source(bazfn) os.rename(pyc, os.path.join(self.pkgdir, 'baz.pyc')) os.remove(bazfn) rc, out, err = script_helper.assert_python_failure(fn) self.assertRegex(err, b'File "dinsdale') def test_include_bad_file(self): rc, out, err = self.assertRunNotOK( '-i', os.path.join(self.directory, 'nosuchfile'), self.pkgdir) self.assertRegex(out, b'rror.*nosuchfile') self.assertNotRegex(err, b'Traceback') self.assertFalse(os.path.exists(imp.cache_from_source( self.pkgdir_cachedir))) def test_include_file_with_arg(self): f1 = script_helper.make_script(self.pkgdir, 'f1', '') f2 = script_helper.make_script(self.pkgdir, 'f2', '') f3 = script_helper.make_script(self.pkgdir, 'f3', '') f4 = script_helper.make_script(self.pkgdir, 'f4', '') with open(os.path.join(self.directory, 'l1'), 'w') as l1: l1.write(os.path.join(self.pkgdir, 'f1.py')+os.linesep) l1.write(os.path.join(self.pkgdir, 'f2.py')+os.linesep) self.assertRunOK('-i', os.path.join(self.directory, 'l1'), f4) self.assertCompiled(f1) self.assertCompiled(f2) self.assertNotCompiled(f3) self.assertCompiled(f4) def test_include_file_no_arg(self): f1 = script_helper.make_script(self.pkgdir, 'f1', '') f2 = script_helper.make_script(self.pkgdir, 'f2', '') f3 = script_helper.make_script(self.pkgdir, 'f3', '') f4 = script_helper.make_script(self.pkgdir, 'f4', '') with open(os.path.join(self.directory, 'l1'), 'w') as l1: l1.write(os.path.join(self.pkgdir, 'f2.py')+os.linesep) self.assertRunOK('-i', os.path.join(self.directory, 'l1')) self.assertNotCompiled(f1) self.assertCompiled(f2) self.assertNotCompiled(f3) self.assertNotCompiled(f4) def test_include_on_stdin(self): f1 = script_helper.make_script(self.pkgdir, 'f1', '') f2 = script_helper.make_script(self.pkgdir, 'f2', '') f3 = script_helper.make_script(self.pkgdir, 'f3', '') f4 = script_helper.make_script(self.pkgdir, 'f4', '') p = script_helper.spawn_python('-m', 'compileall', '-i', '-') p.stdin.write((f3+os.linesep).encode('ascii')) script_helper.kill_python(p) self.assertNotCompiled(f1) self.assertNotCompiled(f2) self.assertCompiled(f3) self.assertNotCompiled(f4) def test_compiles_as_much_as_possible(self): bingfn = script_helper.make_script(self.pkgdir, 'bing', 'syntax(error') rc, out, err = self.assertRunNotOK('nosuchfile', self.initfn, bingfn, self.barfn) self.assertRegex(out, b'rror') self.assertNotCompiled(bingfn) self.assertCompiled(self.initfn) self.assertCompiled(self.barfn) def test_invalid_arg_produces_message(self): out = self.assertRunOK('badfilename') self.assertRegex(out, b"Can't list 'badfilename'") def test_main(): support.run_unittest( CommandLineTests, CompileallTests, EncodingTest, ) if __name__ == "__main__": test_main()
apache-2.0
vjmac15/Lyilis
lib/pip/_vendor/distlib/metadata (VJ Washington's conflicted copy 2017-08-29).py
335
38833
# -*- coding: utf-8 -*- # # Copyright (C) 2012 The Python Software Foundation. # See LICENSE.txt and CONTRIBUTORS.txt. # """Implementation of the Metadata for Python packages PEPs. Supports all metadata formats (1.0, 1.1, 1.2, and 2.0 experimental). """ from __future__ import unicode_literals import codecs from email import message_from_file import json import logging import re from . import DistlibException, __version__ from .compat import StringIO, string_types, text_type from .markers import interpret from .util import extract_by_key, get_extras from .version import get_scheme, PEP440_VERSION_RE logger = logging.getLogger(__name__) class MetadataMissingError(DistlibException): """A required metadata is missing""" class MetadataConflictError(DistlibException): """Attempt to read or write metadata fields that are conflictual.""" class MetadataUnrecognizedVersionError(DistlibException): """Unknown metadata version number.""" class MetadataInvalidError(DistlibException): """A metadata value is invalid""" # public API of this module __all__ = ['Metadata', 'PKG_INFO_ENCODING', 'PKG_INFO_PREFERRED_VERSION'] # Encoding used for the PKG-INFO files PKG_INFO_ENCODING = 'utf-8' # preferred version. Hopefully will be changed # to 1.2 once PEP 345 is supported everywhere PKG_INFO_PREFERRED_VERSION = '1.1' _LINE_PREFIX_1_2 = re.compile('\n \|') _LINE_PREFIX_PRE_1_2 = re.compile('\n ') _241_FIELDS = ('Metadata-Version', 'Name', 'Version', 'Platform', 'Summary', 'Description', 'Keywords', 'Home-page', 'Author', 'Author-email', 'License') _314_FIELDS = ('Metadata-Version', 'Name', 'Version', 'Platform', 'Supported-Platform', 'Summary', 'Description', 'Keywords', 'Home-page', 'Author', 'Author-email', 'License', 'Classifier', 'Download-URL', 'Obsoletes', 'Provides', 'Requires') _314_MARKERS = ('Obsoletes', 'Provides', 'Requires', 'Classifier', 'Download-URL') _345_FIELDS = ('Metadata-Version', 'Name', 'Version', 'Platform', 'Supported-Platform', 'Summary', 'Description', 'Keywords', 'Home-page', 'Author', 'Author-email', 'Maintainer', 'Maintainer-email', 'License', 'Classifier', 'Download-URL', 'Obsoletes-Dist', 'Project-URL', 'Provides-Dist', 'Requires-Dist', 'Requires-Python', 'Requires-External') _345_MARKERS = ('Provides-Dist', 'Requires-Dist', 'Requires-Python', 'Obsoletes-Dist', 'Requires-External', 'Maintainer', 'Maintainer-email', 'Project-URL') _426_FIELDS = ('Metadata-Version', 'Name', 'Version', 'Platform', 'Supported-Platform', 'Summary', 'Description', 'Keywords', 'Home-page', 'Author', 'Author-email', 'Maintainer', 'Maintainer-email', 'License', 'Classifier', 'Download-URL', 'Obsoletes-Dist', 'Project-URL', 'Provides-Dist', 'Requires-Dist', 'Requires-Python', 'Requires-External', 'Private-Version', 'Obsoleted-By', 'Setup-Requires-Dist', 'Extension', 'Provides-Extra') _426_MARKERS = ('Private-Version', 'Provides-Extra', 'Obsoleted-By', 'Setup-Requires-Dist', 'Extension') _ALL_FIELDS = set() _ALL_FIELDS.update(_241_FIELDS) _ALL_FIELDS.update(_314_FIELDS) _ALL_FIELDS.update(_345_FIELDS) _ALL_FIELDS.update(_426_FIELDS) EXTRA_RE = re.compile(r'''extra\s*==\s*("([^"]+)"|'([^']+)')''') def _version2fieldlist(version): if version == '1.0': return _241_FIELDS elif version == '1.1': return _314_FIELDS elif version == '1.2': return _345_FIELDS elif version == '2.0': return _426_FIELDS raise MetadataUnrecognizedVersionError(version) def _best_version(fields): """Detect the best version depending on the fields used.""" def _has_marker(keys, markers): for marker in markers: if marker in keys: return True return False keys = [] for key, value in fields.items(): if value in ([], 'UNKNOWN', None): continue keys.append(key) possible_versions = ['1.0', '1.1', '1.2', '2.0'] # first let's try to see if a field is not part of one of the version for key in keys: if key not in _241_FIELDS and '1.0' in possible_versions: possible_versions.remove('1.0') if key not in _314_FIELDS and '1.1' in possible_versions: possible_versions.remove('1.1') if key not in _345_FIELDS and '1.2' in possible_versions: possible_versions.remove('1.2') if key not in _426_FIELDS and '2.0' in possible_versions: possible_versions.remove('2.0') # possible_version contains qualified versions if len(possible_versions) == 1: return possible_versions[0] # found ! elif len(possible_versions) == 0: raise MetadataConflictError('Unknown metadata set') # let's see if one unique marker is found is_1_1 = '1.1' in possible_versions and _has_marker(keys, _314_MARKERS) is_1_2 = '1.2' in possible_versions and _has_marker(keys, _345_MARKERS) is_2_0 = '2.0' in possible_versions and _has_marker(keys, _426_MARKERS) if int(is_1_1) + int(is_1_2) + int(is_2_0) > 1: raise MetadataConflictError('You used incompatible 1.1/1.2/2.0 fields') # we have the choice, 1.0, or 1.2, or 2.0 # - 1.0 has a broken Summary field but works with all tools # - 1.1 is to avoid # - 1.2 fixes Summary but has little adoption # - 2.0 adds more features and is very new if not is_1_1 and not is_1_2 and not is_2_0: # we couldn't find any specific marker if PKG_INFO_PREFERRED_VERSION in possible_versions: return PKG_INFO_PREFERRED_VERSION if is_1_1: return '1.1' if is_1_2: return '1.2' return '2.0' _ATTR2FIELD = { 'metadata_version': 'Metadata-Version', 'name': 'Name', 'version': 'Version', 'platform': 'Platform', 'supported_platform': 'Supported-Platform', 'summary': 'Summary', 'description': 'Description', 'keywords': 'Keywords', 'home_page': 'Home-page', 'author': 'Author', 'author_email': 'Author-email', 'maintainer': 'Maintainer', 'maintainer_email': 'Maintainer-email', 'license': 'License', 'classifier': 'Classifier', 'download_url': 'Download-URL', 'obsoletes_dist': 'Obsoletes-Dist', 'provides_dist': 'Provides-Dist', 'requires_dist': 'Requires-Dist', 'setup_requires_dist': 'Setup-Requires-Dist', 'requires_python': 'Requires-Python', 'requires_external': 'Requires-External', 'requires': 'Requires', 'provides': 'Provides', 'obsoletes': 'Obsoletes', 'project_url': 'Project-URL', 'private_version': 'Private-Version', 'obsoleted_by': 'Obsoleted-By', 'extension': 'Extension', 'provides_extra': 'Provides-Extra', } _PREDICATE_FIELDS = ('Requires-Dist', 'Obsoletes-Dist', 'Provides-Dist') _VERSIONS_FIELDS = ('Requires-Python',) _VERSION_FIELDS = ('Version',) _LISTFIELDS = ('Platform', 'Classifier', 'Obsoletes', 'Requires', 'Provides', 'Obsoletes-Dist', 'Provides-Dist', 'Requires-Dist', 'Requires-External', 'Project-URL', 'Supported-Platform', 'Setup-Requires-Dist', 'Provides-Extra', 'Extension') _LISTTUPLEFIELDS = ('Project-URL',) _ELEMENTSFIELD = ('Keywords',) _UNICODEFIELDS = ('Author', 'Maintainer', 'Summary', 'Description') _MISSING = object() _FILESAFE = re.compile('[^A-Za-z0-9.]+') def _get_name_and_version(name, version, for_filename=False): """Return the distribution name with version. If for_filename is true, return a filename-escaped form.""" if for_filename: # For both name and version any runs of non-alphanumeric or '.' # characters are replaced with a single '-'. Additionally any # spaces in the version string become '.' name = _FILESAFE.sub('-', name) version = _FILESAFE.sub('-', version.replace(' ', '.')) return '%s-%s' % (name, version) class LegacyMetadata(object): """The legacy metadata of a release. Supports versions 1.0, 1.1 and 1.2 (auto-detected). You can instantiate the class with one of these arguments (or none): - *path*, the path to a metadata file - *fileobj* give a file-like object with metadata as content - *mapping* is a dict-like object - *scheme* is a version scheme name """ # TODO document the mapping API and UNKNOWN default key def __init__(self, path=None, fileobj=None, mapping=None, scheme='default'): if [path, fileobj, mapping].count(None) < 2: raise TypeError('path, fileobj and mapping are exclusive') self._fields = {} self.requires_files = [] self._dependencies = None self.scheme = scheme if path is not None: self.read(path) elif fileobj is not None: self.read_file(fileobj) elif mapping is not None: self.update(mapping) self.set_metadata_version() def set_metadata_version(self): self._fields['Metadata-Version'] = _best_version(self._fields) def _write_field(self, fileobj, name, value): fileobj.write('%s: %s\n' % (name, value)) def __getitem__(self, name): return self.get(name) def __setitem__(self, name, value): return self.set(name, value) def __delitem__(self, name): field_name = self._convert_name(name) try: del self._fields[field_name] except KeyError: raise KeyError(name) def __contains__(self, name): return (name in self._fields or self._convert_name(name) in self._fields) def _convert_name(self, name): if name in _ALL_FIELDS: return name name = name.replace('-', '_').lower() return _ATTR2FIELD.get(name, name) def _default_value(self, name): if name in _LISTFIELDS or name in _ELEMENTSFIELD: return [] return 'UNKNOWN' def _remove_line_prefix(self, value): if self.metadata_version in ('1.0', '1.1'): return _LINE_PREFIX_PRE_1_2.sub('\n', value) else: return _LINE_PREFIX_1_2.sub('\n', value) def __getattr__(self, name): if name in _ATTR2FIELD: return self[name] raise AttributeError(name) # # Public API # # dependencies = property(_get_dependencies, _set_dependencies) def get_fullname(self, filesafe=False): """Return the distribution name with version. If filesafe is true, return a filename-escaped form.""" return _get_name_and_version(self['Name'], self['Version'], filesafe) def is_field(self, name): """return True if name is a valid metadata key""" name = self._convert_name(name) return name in _ALL_FIELDS def is_multi_field(self, name): name = self._convert_name(name) return name in _LISTFIELDS def read(self, filepath): """Read the metadata values from a file path.""" fp = codecs.open(filepath, 'r', encoding='utf-8') try: self.read_file(fp) finally: fp.close() def read_file(self, fileob): """Read the metadata values from a file object.""" msg = message_from_file(fileob) self._fields['Metadata-Version'] = msg['metadata-version'] # When reading, get all the fields we can for field in _ALL_FIELDS: if field not in msg: continue if field in _LISTFIELDS: # we can have multiple lines values = msg.get_all(field) if field in _LISTTUPLEFIELDS and values is not None: values = [tuple(value.split(',')) for value in values] self.set(field, values) else: # single line value = msg[field] if value is not None and value != 'UNKNOWN': self.set(field, value) self.set_metadata_version() def write(self, filepath, skip_unknown=False): """Write the metadata fields to filepath.""" fp = codecs.open(filepath, 'w', encoding='utf-8') try: self.write_file(fp, skip_unknown) finally: fp.close() def write_file(self, fileobject, skip_unknown=False): """Write the PKG-INFO format data to a file object.""" self.set_metadata_version() for field in _version2fieldlist(self['Metadata-Version']): values = self.get(field) if skip_unknown and values in ('UNKNOWN', [], ['UNKNOWN']): continue if field in _ELEMENTSFIELD: self._write_field(fileobject, field, ','.join(values)) continue if field not in _LISTFIELDS: if field == 'Description': if self.metadata_version in ('1.0', '1.1'): values = values.replace('\n', '\n ') else: values = values.replace('\n', '\n |') values = [values] if field in _LISTTUPLEFIELDS: values = [','.join(value) for value in values] for value in values: self._write_field(fileobject, field, value) def update(self, other=None, **kwargs): """Set metadata values from the given iterable `other` and kwargs. Behavior is like `dict.update`: If `other` has a ``keys`` method, they are looped over and ``self[key]`` is assigned ``other[key]``. Else, ``other`` is an iterable of ``(key, value)`` iterables. Keys that don't match a metadata field or that have an empty value are dropped. """ def _set(key, value): if key in _ATTR2FIELD and value: self.set(self._convert_name(key), value) if not other: # other is None or empty container pass elif hasattr(other, 'keys'): for k in other.keys(): _set(k, other[k]) else: for k, v in other: _set(k, v) if kwargs: for k, v in kwargs.items(): _set(k, v) def set(self, name, value): """Control then set a metadata field.""" name = self._convert_name(name) if ((name in _ELEMENTSFIELD or name == 'Platform') and not isinstance(value, (list, tuple))): if isinstance(value, string_types): value = [v.strip() for v in value.split(',')] else: value = [] elif (name in _LISTFIELDS and not isinstance(value, (list, tuple))): if isinstance(value, string_types): value = [value] else: value = [] if logger.isEnabledFor(logging.WARNING): project_name = self['Name'] scheme = get_scheme(self.scheme) if name in _PREDICATE_FIELDS and value is not None: for v in value: # check that the values are valid if not scheme.is_valid_matcher(v.split(';')[0]): logger.warning( "'%s': '%s' is not valid (field '%s')", project_name, v, name) # FIXME this rejects UNKNOWN, is that right? elif name in _VERSIONS_FIELDS and value is not None: if not scheme.is_valid_constraint_list(value): logger.warning("'%s': '%s' is not a valid version (field '%s')", project_name, value, name) elif name in _VERSION_FIELDS and value is not None: if not scheme.is_valid_version(value): logger.warning("'%s': '%s' is not a valid version (field '%s')", project_name, value, name) if name in _UNICODEFIELDS: if name == 'Description': value = self._remove_line_prefix(value) self._fields[name] = value def get(self, name, default=_MISSING): """Get a metadata field.""" name = self._convert_name(name) if name not in self._fields: if default is _MISSING: default = self._default_value(name) return default if name in _UNICODEFIELDS: value = self._fields[name] return value elif name in _LISTFIELDS: value = self._fields[name] if value is None: return [] res = [] for val in value: if name not in _LISTTUPLEFIELDS: res.append(val) else: # That's for Project-URL res.append((val[0], val[1])) return res elif name in _ELEMENTSFIELD: value = self._fields[name] if isinstance(value, string_types): return value.split(',') return self._fields[name] def check(self, strict=False): """Check if the metadata is compliant. If strict is True then raise if no Name or Version are provided""" self.set_metadata_version() # XXX should check the versions (if the file was loaded) missing, warnings = [], [] for attr in ('Name', 'Version'): # required by PEP 345 if attr not in self: missing.append(attr) if strict and missing != []: msg = 'missing required metadata: %s' % ', '.join(missing) raise MetadataMissingError(msg) for attr in ('Home-page', 'Author'): if attr not in self: missing.append(attr) # checking metadata 1.2 (XXX needs to check 1.1, 1.0) if self['Metadata-Version'] != '1.2': return missing, warnings scheme = get_scheme(self.scheme) def are_valid_constraints(value): for v in value: if not scheme.is_valid_matcher(v.split(';')[0]): return False return True for fields, controller in ((_PREDICATE_FIELDS, are_valid_constraints), (_VERSIONS_FIELDS, scheme.is_valid_constraint_list), (_VERSION_FIELDS, scheme.is_valid_version)): for field in fields: value = self.get(field, None) if value is not None and not controller(value): warnings.append("Wrong value for '%s': %s" % (field, value)) return missing, warnings def todict(self, skip_missing=False): """Return fields as a dict. Field names will be converted to use the underscore-lowercase style instead of hyphen-mixed case (i.e. home_page instead of Home-page). """ self.set_metadata_version() mapping_1_0 = ( ('metadata_version', 'Metadata-Version'), ('name', 'Name'), ('version', 'Version'), ('summary', 'Summary'), ('home_page', 'Home-page'), ('author', 'Author'), ('author_email', 'Author-email'), ('license', 'License'), ('description', 'Description'), ('keywords', 'Keywords'), ('platform', 'Platform'), ('classifiers', 'Classifier'), ('download_url', 'Download-URL'), ) data = {} for key, field_name in mapping_1_0: if not skip_missing or field_name in self._fields: data[key] = self[field_name] if self['Metadata-Version'] == '1.2': mapping_1_2 = ( ('requires_dist', 'Requires-Dist'), ('requires_python', 'Requires-Python'), ('requires_external', 'Requires-External'), ('provides_dist', 'Provides-Dist'), ('obsoletes_dist', 'Obsoletes-Dist'), ('project_url', 'Project-URL'), ('maintainer', 'Maintainer'), ('maintainer_email', 'Maintainer-email'), ) for key, field_name in mapping_1_2: if not skip_missing or field_name in self._fields: if key != 'project_url': data[key] = self[field_name] else: data[key] = [','.join(u) for u in self[field_name]] elif self['Metadata-Version'] == '1.1': mapping_1_1 = ( ('provides', 'Provides'), ('requires', 'Requires'), ('obsoletes', 'Obsoletes'), ) for key, field_name in mapping_1_1: if not skip_missing or field_name in self._fields: data[key] = self[field_name] return data def add_requirements(self, requirements): if self['Metadata-Version'] == '1.1': # we can't have 1.1 metadata *and* Setuptools requires for field in ('Obsoletes', 'Requires', 'Provides'): if field in self: del self[field] self['Requires-Dist'] += requirements # Mapping API # TODO could add iter* variants def keys(self): return list(_version2fieldlist(self['Metadata-Version'])) def __iter__(self): for key in self.keys(): yield key def values(self): return [self[key] for key in self.keys()] def items(self): return [(key, self[key]) for key in self.keys()] def __repr__(self): return '<%s %s %s>' % (self.__class__.__name__, self.name, self.version) METADATA_FILENAME = 'pydist.json' WHEEL_METADATA_FILENAME = 'metadata.json' class Metadata(object): """ The metadata of a release. This implementation uses 2.0 (JSON) metadata where possible. If not possible, it wraps a LegacyMetadata instance which handles the key-value metadata format. """ METADATA_VERSION_MATCHER = re.compile('^\d+(\.\d+)*$') NAME_MATCHER = re.compile('^[0-9A-Z]([0-9A-Z_.-]*[0-9A-Z])?$', re.I) VERSION_MATCHER = PEP440_VERSION_RE SUMMARY_MATCHER = re.compile('.{1,2047}') METADATA_VERSION = '2.0' GENERATOR = 'distlib (%s)' % __version__ MANDATORY_KEYS = { 'name': (), 'version': (), 'summary': ('legacy',), } INDEX_KEYS = ('name version license summary description author ' 'author_email keywords platform home_page classifiers ' 'download_url') DEPENDENCY_KEYS = ('extras run_requires test_requires build_requires ' 'dev_requires provides meta_requires obsoleted_by ' 'supports_environments') SYNTAX_VALIDATORS = { 'metadata_version': (METADATA_VERSION_MATCHER, ()), 'name': (NAME_MATCHER, ('legacy',)), 'version': (VERSION_MATCHER, ('legacy',)), 'summary': (SUMMARY_MATCHER, ('legacy',)), } __slots__ = ('_legacy', '_data', 'scheme') def __init__(self, path=None, fileobj=None, mapping=None, scheme='default'): if [path, fileobj, mapping].count(None) < 2: raise TypeError('path, fileobj and mapping are exclusive') self._legacy = None self._data = None self.scheme = scheme #import pdb; pdb.set_trace() if mapping is not None: try: self._validate_mapping(mapping, scheme) self._data = mapping except MetadataUnrecognizedVersionError: self._legacy = LegacyMetadata(mapping=mapping, scheme=scheme) self.validate() else: data = None if path: with open(path, 'rb') as f: data = f.read() elif fileobj: data = fileobj.read() if data is None: # Initialised with no args - to be added self._data = { 'metadata_version': self.METADATA_VERSION, 'generator': self.GENERATOR, } else: if not isinstance(data, text_type): data = data.decode('utf-8') try: self._data = json.loads(data) self._validate_mapping(self._data, scheme) except ValueError: # Note: MetadataUnrecognizedVersionError does not # inherit from ValueError (it's a DistlibException, # which should not inherit from ValueError). # The ValueError comes from the json.load - if that # succeeds and we get a validation error, we want # that to propagate self._legacy = LegacyMetadata(fileobj=StringIO(data), scheme=scheme) self.validate() common_keys = set(('name', 'version', 'license', 'keywords', 'summary')) none_list = (None, list) none_dict = (None, dict) mapped_keys = { 'run_requires': ('Requires-Dist', list), 'build_requires': ('Setup-Requires-Dist', list), 'dev_requires': none_list, 'test_requires': none_list, 'meta_requires': none_list, 'extras': ('Provides-Extra', list), 'modules': none_list, 'namespaces': none_list, 'exports': none_dict, 'commands': none_dict, 'classifiers': ('Classifier', list), 'source_url': ('Download-URL', None), 'metadata_version': ('Metadata-Version', None), } del none_list, none_dict def __getattribute__(self, key): common = object.__getattribute__(self, 'common_keys') mapped = object.__getattribute__(self, 'mapped_keys') if key in mapped: lk, maker = mapped[key] if self._legacy: if lk is None: result = None if maker is None else maker() else: result = self._legacy.get(lk) else: value = None if maker is None else maker() if key not in ('commands', 'exports', 'modules', 'namespaces', 'classifiers'): result = self._data.get(key, value) else: # special cases for PEP 459 sentinel = object() result = sentinel d = self._data.get('extensions') if d: if key == 'commands': result = d.get('python.commands', value) elif key == 'classifiers': d = d.get('python.details') if d: result = d.get(key, value) else: d = d.get('python.exports') if not d: d = self._data.get('python.exports') if d: result = d.get(key, value) if result is sentinel: result = value elif key not in common: result = object.__getattribute__(self, key) elif self._legacy: result = self._legacy.get(key) else: result = self._data.get(key) return result def _validate_value(self, key, value, scheme=None): if key in self.SYNTAX_VALIDATORS: pattern, exclusions = self.SYNTAX_VALIDATORS[key] if (scheme or self.scheme) not in exclusions: m = pattern.match(value) if not m: raise MetadataInvalidError("'%s' is an invalid value for " "the '%s' property" % (value, key)) def __setattr__(self, key, value): self._validate_value(key, value) common = object.__getattribute__(self, 'common_keys') mapped = object.__getattribute__(self, 'mapped_keys') if key in mapped: lk, _ = mapped[key] if self._legacy: if lk is None: raise NotImplementedError self._legacy[lk] = value elif key not in ('commands', 'exports', 'modules', 'namespaces', 'classifiers'): self._data[key] = value else: # special cases for PEP 459 d = self._data.setdefault('extensions', {}) if key == 'commands': d['python.commands'] = value elif key == 'classifiers': d = d.setdefault('python.details', {}) d[key] = value else: d = d.setdefault('python.exports', {}) d[key] = value elif key not in common: object.__setattr__(self, key, value) else: if key == 'keywords': if isinstance(value, string_types): value = value.strip() if value: value = value.split() else: value = [] if self._legacy: self._legacy[key] = value else: self._data[key] = value @property def name_and_version(self): return _get_name_and_version(self.name, self.version, True) @property def provides(self): if self._legacy: result = self._legacy['Provides-Dist'] else: result = self._data.setdefault('provides', []) s = '%s (%s)' % (self.name, self.version) if s not in result: result.append(s) return result @provides.setter def provides(self, value): if self._legacy: self._legacy['Provides-Dist'] = value else: self._data['provides'] = value def get_requirements(self, reqts, extras=None, env=None): """ Base method to get dependencies, given a set of extras to satisfy and an optional environment context. :param reqts: A list of sometimes-wanted dependencies, perhaps dependent on extras and environment. :param extras: A list of optional components being requested. :param env: An optional environment for marker evaluation. """ if self._legacy: result = reqts else: result = [] extras = get_extras(extras or [], self.extras) for d in reqts: if 'extra' not in d and 'environment' not in d: # unconditional include = True else: if 'extra' not in d: # Not extra-dependent - only environment-dependent include = True else: include = d.get('extra') in extras if include: # Not excluded because of extras, check environment marker = d.get('environment') if marker: include = interpret(marker, env) if include: result.extend(d['requires']) for key in ('build', 'dev', 'test'): e = ':%s:' % key if e in extras: extras.remove(e) # A recursive call, but it should terminate since 'test' # has been removed from the extras reqts = self._data.get('%s_requires' % key, []) result.extend(self.get_requirements(reqts, extras=extras, env=env)) return result @property def dictionary(self): if self._legacy: return self._from_legacy() return self._data @property def dependencies(self): if self._legacy: raise NotImplementedError else: return extract_by_key(self._data, self.DEPENDENCY_KEYS) @dependencies.setter def dependencies(self, value): if self._legacy: raise NotImplementedError else: self._data.update(value) def _validate_mapping(self, mapping, scheme): if mapping.get('metadata_version') != self.METADATA_VERSION: raise MetadataUnrecognizedVersionError() missing = [] for key, exclusions in self.MANDATORY_KEYS.items(): if key not in mapping: if scheme not in exclusions: missing.append(key) if missing: msg = 'Missing metadata items: %s' % ', '.join(missing) raise MetadataMissingError(msg) for k, v in mapping.items(): self._validate_value(k, v, scheme) def validate(self): if self._legacy: missing, warnings = self._legacy.check(True) if missing or warnings: logger.warning('Metadata: missing: %s, warnings: %s', missing, warnings) else: self._validate_mapping(self._data, self.scheme) def todict(self): if self._legacy: return self._legacy.todict(True) else: result = extract_by_key(self._data, self.INDEX_KEYS) return result def _from_legacy(self): assert self._legacy and not self._data result = { 'metadata_version': self.METADATA_VERSION, 'generator': self.GENERATOR, } lmd = self._legacy.todict(True) # skip missing ones for k in ('name', 'version', 'license', 'summary', 'description', 'classifier'): if k in lmd: if k == 'classifier': nk = 'classifiers' else: nk = k result[nk] = lmd[k] kw = lmd.get('Keywords', []) if kw == ['']: kw = [] result['keywords'] = kw keys = (('requires_dist', 'run_requires'), ('setup_requires_dist', 'build_requires')) for ok, nk in keys: if ok in lmd and lmd[ok]: result[nk] = [{'requires': lmd[ok]}] result['provides'] = self.provides author = {} maintainer = {} return result LEGACY_MAPPING = { 'name': 'Name', 'version': 'Version', 'license': 'License', 'summary': 'Summary', 'description': 'Description', 'classifiers': 'Classifier', } def _to_legacy(self): def process_entries(entries): reqts = set() for e in entries: extra = e.get('extra') env = e.get('environment') rlist = e['requires'] for r in rlist: if not env and not extra: reqts.add(r) else: marker = '' if extra: marker = 'extra == "%s"' % extra if env: if marker: marker = '(%s) and %s' % (env, marker) else: marker = env reqts.add(';'.join((r, marker))) return reqts assert self._data and not self._legacy result = LegacyMetadata() nmd = self._data for nk, ok in self.LEGACY_MAPPING.items(): if nk in nmd: result[ok] = nmd[nk] r1 = process_entries(self.run_requires + self.meta_requires) r2 = process_entries(self.build_requires + self.dev_requires) if self.extras: result['Provides-Extra'] = sorted(self.extras) result['Requires-Dist'] = sorted(r1) result['Setup-Requires-Dist'] = sorted(r2) # TODO: other fields such as contacts return result def write(self, path=None, fileobj=None, legacy=False, skip_unknown=True): if [path, fileobj].count(None) != 1: raise ValueError('Exactly one of path and fileobj is needed') self.validate() if legacy: if self._legacy: legacy_md = self._legacy else: legacy_md = self._to_legacy() if path: legacy_md.write(path, skip_unknown=skip_unknown) else: legacy_md.write_file(fileobj, skip_unknown=skip_unknown) else: if self._legacy: d = self._from_legacy() else: d = self._data if fileobj: json.dump(d, fileobj, ensure_ascii=True, indent=2, sort_keys=True) else: with codecs.open(path, 'w', 'utf-8') as f: json.dump(d, f, ensure_ascii=True, indent=2, sort_keys=True) def add_requirements(self, requirements): if self._legacy: self._legacy.add_requirements(requirements) else: run_requires = self._data.setdefault('run_requires', []) always = None for entry in run_requires: if 'environment' not in entry and 'extra' not in entry: always = entry break if always is None: always = { 'requires': requirements } run_requires.insert(0, always) else: rset = set(always['requires']) | set(requirements) always['requires'] = sorted(rset) def __repr__(self): name = self.name or '(no name)' version = self.version or 'no version' return '<%s %s %s (%s)>' % (self.__class__.__name__, self.metadata_version, name, version)
gpl-3.0
RobCranfill/miditrans
changer.py
1
2952
#!/usr/bin/python3 # # Command-line app to write data for MPK mapper app # Note that we want Python 3, not 2! # robcranfill@gmail.com import sys SR18voiceNames_ = [ "kick", "snare_1", "snare_2", "highhat_1", "highhat_2", "highhat_3", "tom_1", "tom_2", "tom_3", "crash", "ride", "bell" ] # default assignment MPKmap_ = ["kick", "snare_1", "highhat_1", "highhat_3", "tom_1", "tom_3", "crash", "ride"] # show the choices def print_pads(): print("--- SR18 pads ---") i = 0 for v in SR18voiceNames_: i = i + 1 print("voice {0:2}: {1}".format(i, v)) def print_mapped_pads(): print("--- mapping ---") for i,v in enumerate(MPKmap_): print("pad {0}: {1}".format(i, v)) # return false iff we are done processing def process_command(): p = str(input("\nSet pad number: ")) if not p: return False v = input("To voice #? ") np = int(p) nv = int(v) voice = SR18voiceNames_[nv-1] print("voice {0} is '{1}'".format(nv, voice)) MPKmap_[np] = voice return True def output_pad_mapping_code(filehandle): filehandle.write("\n\n# Written by python front-end\n" \ "mapMPKPadsToSR18Pads = [\n" \ "\tChannelFilter(10) >> [\n" \ # "\t\t~KeyFilter(notes=[0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15]),\n" \ "\t\t~KeyFilter(notes=[0,1,2,3,4,5,6,7]),\n" \ "\t\tKeyFilter(notes=[0]) >> pad_{0},\n" \ "\t\tKeyFilter(notes=[1]) >> pad_{1},\n" \ "\t\tKeyFilter(notes=[2]) >> pad_{2},\n" \ "\t\tKeyFilter(notes=[3]) >> pad_{3},\n" \ "\t\tKeyFilter(notes=[4]) >> pad_{4},\n" \ "\t\tKeyFilter(notes=[5]) >> pad_{5},\n" \ "\t\tKeyFilter(notes=[6]) >> pad_{6},\n" \ "\t\tKeyFilter(notes=[7]) >> pad_{7}\n" \ # " KeyFilter(notes=[8]) >> pad_snare_1,\n" \ # " KeyFilter(notes=[9]) >> pad_kick,\n" \ # " KeyFilter(notes=[10]) >> pad_highhat_1,\n" \ # " KeyFilter(notes=[11]) >> pad_highhat_3,\n" \ # " KeyFilter(notes=[12]) >> pad_tom_1,\n" \ # " KeyFilter(notes=[13]) >> pad_tom_3,\n" \ # " KeyFilter(notes=[14]) >> pad_ride,\n" \ # " KeyFilter(notes=[15]) >> pad_crash\n" \ "\t\t]\n" \ "\t]\n\n".format(MPKmap_[0], MPKmap_[1], MPKmap_[2], MPKmap_[3], MPKmap_[4], MPKmap_[5], MPKmap_[6], MPKmap_[7])) def output_mididings_code(): with open('midi_mapper.py', 'w') as outfile: with open("changer_1.py.part") as infile: outfile.write(infile.read()) output_pad_mapping_code(outfile) with open("changer_2.py.part") as infile: outfile.write(infile.read()) # start while (True): print_pads() print_mapped_pads() if not process_command(): print("gubbeye!") break output_mididings_code() # end
gpl-2.0
dagwieers/ansible-modules-extras
packaging/language/npm.py
73
8566
#!/usr/bin/python # -*- coding: utf-8 -*- # (c) 2013, Chris Hoffman <christopher.hoffman@gmail.com> # # This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see <http://www.gnu.org/licenses/>. DOCUMENTATION = ''' --- module: npm short_description: Manage node.js packages with npm description: - Manage node.js packages with Node Package Manager (npm) version_added: 1.2 author: "Chris Hoffman (@chrishoffman)" options: name: description: - The name of a node.js library to install required: false path: description: - The base path where to install the node.js libraries required: false version: description: - The version to be installed required: false global: description: - Install the node.js library globally required: false default: no choices: [ "yes", "no" ] executable: description: - The executable location for npm. - This is useful if you are using a version manager, such as nvm required: false ignore_scripts: description: - Use the --ignore-scripts flag when installing. required: false choices: [ "yes", "no" ] default: no version_added: "1.8" production: description: - Install dependencies in production mode, excluding devDependencies required: false choices: [ "yes", "no" ] default: no registry: description: - The registry to install modules from. required: false version_added: "1.6" state: description: - The state of the node.js library required: false default: present choices: [ "present", "absent", "latest" ] ''' EXAMPLES = ''' description: Install "coffee-script" node.js package. - npm: name=coffee-script path=/app/location description: Install "coffee-script" node.js package on version 1.6.1. - npm: name=coffee-script version=1.6.1 path=/app/location description: Install "coffee-script" node.js package globally. - npm: name=coffee-script global=yes description: Remove the globally package "coffee-script". - npm: name=coffee-script global=yes state=absent description: Install "coffee-script" node.js package from custom registry. - npm: name=coffee-script registry=http://registry.mysite.com description: Install packages based on package.json. - npm: path=/app/location description: Update packages based on package.json to their latest version. - npm: path=/app/location state=latest description: Install packages based on package.json using the npm installed with nvm v0.10.1. - npm: path=/app/location executable=/opt/nvm/v0.10.1/bin/npm state=present ''' import os try: import json except ImportError: import simplejson as json class Npm(object): def __init__(self, module, **kwargs): self.module = module self.glbl = kwargs['glbl'] self.name = kwargs['name'] self.version = kwargs['version'] self.path = kwargs['path'] self.registry = kwargs['registry'] self.production = kwargs['production'] self.ignore_scripts = kwargs['ignore_scripts'] if kwargs['executable']: self.executable = kwargs['executable'].split(' ') else: self.executable = [module.get_bin_path('npm', True)] if kwargs['version']: self.name_version = self.name + '@' + self.version else: self.name_version = self.name def _exec(self, args, run_in_check_mode=False, check_rc=True): if not self.module.check_mode or (self.module.check_mode and run_in_check_mode): cmd = self.executable + args if self.glbl: cmd.append('--global') if self.production: cmd.append('--production') if self.ignore_scripts: cmd.append('--ignore-scripts') if self.name: cmd.append(self.name_version) if self.registry: cmd.append('--registry') cmd.append(self.registry) #If path is specified, cd into that path and run the command. cwd = None if self.path: self.path = os.path.abspath(os.path.expanduser(self.path)) if not os.path.exists(self.path): os.makedirs(self.path) if not os.path.isdir(self.path): self.module.fail_json(msg="path %s is not a directory" % self.path) cwd = self.path rc, out, err = self.module.run_command(cmd, check_rc=check_rc, cwd=cwd) return out return '' def list(self): cmd = ['list', '--json'] installed = list() missing = list() data = json.loads(self._exec(cmd, True, False)) if 'dependencies' in data: for dep in data['dependencies']: if 'missing' in data['dependencies'][dep] and data['dependencies'][dep]['missing']: missing.append(dep) elif 'invalid' in data['dependencies'][dep] and data['dependencies'][dep]['invalid']: missing.append(dep) else: installed.append(dep) if self.name and self.name not in installed: missing.append(self.name) #Named dependency not installed else: missing.append(self.name) return installed, missing def install(self): return self._exec(['install']) def update(self): return self._exec(['update']) def uninstall(self): return self._exec(['uninstall']) def list_outdated(self): outdated = list() data = self._exec(['outdated'], True, False) for dep in data.splitlines(): if dep: # node.js v0.10.22 changed the `npm outdated` module separator # from "@" to " ". Split on both for backwards compatibility. pkg, other = re.split('\s|@', dep, 1) outdated.append(pkg) return outdated def main(): arg_spec = dict( name=dict(default=None), path=dict(default=None), version=dict(default=None), production=dict(default='no', type='bool'), executable=dict(default=None), registry=dict(default=None), state=dict(default='present', choices=['present', 'absent', 'latest']), ignore_scripts=dict(default=False, type='bool'), ) arg_spec['global'] = dict(default='no', type='bool') module = AnsibleModule( argument_spec=arg_spec, supports_check_mode=True ) name = module.params['name'] path = module.params['path'] version = module.params['version'] glbl = module.params['global'] production = module.params['production'] executable = module.params['executable'] registry = module.params['registry'] state = module.params['state'] ignore_scripts = module.params['ignore_scripts'] if not path and not glbl: module.fail_json(msg='path must be specified when not using global') if state == 'absent' and not name: module.fail_json(msg='uninstalling a package is only available for named packages') npm = Npm(module, name=name, path=path, version=version, glbl=glbl, production=production, \ executable=executable, registry=registry, ignore_scripts=ignore_scripts) changed = False if state == 'present': installed, missing = npm.list() if len(missing): changed = True npm.install() elif state == 'latest': installed, missing = npm.list() outdated = npm.list_outdated() if len(missing) or len(outdated): changed = True npm.install() else: #absent installed, missing = npm.list() if name in installed: changed = True npm.uninstall() module.exit_json(changed=changed) # import module snippets from ansible.module_utils.basic import * main()
gpl-3.0
jstrobl/rts2
scripts/rts2saf/rts2saf_exclusive.py
3
6092
#!/usr/bin/env python # (C) 2017, Markus Wildi, wildi.markus@bluewin.ch # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2, or (at your option) # any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software Foundation, # Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. # # Or visit http://www.gnu.org/licenses/gpl.html. # ''' start rts2asaf exclusively (now) e.g. in crontab (replace $HOME by full path) rts2-scriptexec -d C0 -s ' exe $HOME/rts2/script/rts2saf/rts2saf_start.py ' rts2-scriptexec -d C0 -s ' exe $HOME/rts2/script/rts2saf/rts2saf_stop.py ' ''' __author__ = 'wildi.markus@bluewin.ch' import os import sys import time import logging import rts2.scriptcomm import argparse import datetime class Script (rts2.scriptcomm.Rts2Comm): def __init__(self,lg=None,tar_id=None): rts2.scriptcomm.Rts2Comm.__init__(self) self.lg=lg self.tar_id=tar_id def initial_values(self): # ToDo: avoid_tar_ids=[15,32,] try: state = self.getState('centrald') if (state & 0x3f) < 10 and not(state & 0x80000000): self.lg.info('good weather and on') else: self.lg.info('either bad weather or system not on, exiting') sys.exit(1) except Exception as ex: self.lg.error('exception: {}',ex) sys.exit(1) selector_enabled=self.getValue('selector_enabled', 'SEL') current=self.getValue('current','EXEC') current_name=self.getValue('current_name','EXEC') current_type=self.getValue('current_type','EXEC') current_id=self.getValue('current','EXEC') self.lg.info('initial_values: EXEC current: {}, current_name: {}, current_type: {}'.format(current,current_name,current_type)) # check for GLORIA and GRB, and exit if so if 'GLORIA teleoperation' in current_name: # reserved observing time self.lg.info('initial_values: there is a ongoing GLORIA teleoperation, exiting') sys.exit(1) elif 'G' in current_type: # it is a GRB self.lg.info('initial_values: there is now a GRB target selected, exiting') sys.exit(1) elif int(current_id) in avoid_tar_ids: self.lg.info('found tar_id: {} in list of excluded targets, exiting'.format(current_id)) sys.exit(1) else: self.lg.info('initial_values: neither GLORIA teleoperation, nor GRB target, nor target to avoid (avoid_tar_ids)') selector_next=self.getValueFloat('selector_next','EXEC') self.lg.info('initial_values: EXEC selector_next: {}'.format(selector_next)) selector_enabled=self.getValueFloat('selector_enabled','SEL') self.lg.info('initial_values: SEL selector_enabled: {}'.format(selector_enabled)) def start_rts2saf(self): self.setValue('selector_enabled', '0', 'SEL') self.setValue('selector_next', '0', 'EXEC') self.sendCommand('now {}'.format(self.tar_id), 'EXEC') # ToDo ad hoc ok for B2. time.sleep(20.) self.sendCommand('now 5', 'EXEC') self.lg.info('start_rts2saf: disabled SEL, EXEC and sent "now 5" to EXEC') def stop_rts2saf(self): self.setValue('selector_enabled', '1', 'SEL') self.setValue('selector_next', '1', 'EXEC') self.lg.info('stop_rts2saf: enabled SEL') if __name__ == '__main__': startTime= datetime.datetime.now() # since rts2 can not pass options, any option needs a decent default value script=os.path.basename(__file__) parser= argparse.ArgumentParser(prog=script, description='start rts2asaf exclusively, use sym links rts2saf_(start|stop).py in case of rts2-scriptexec') parser.add_argument('--level', dest='level', default='INFO', help=': %(default)s, debug level') parser.add_argument('--toconsole', dest='toconsole', action='store_true', default=False, help=': %(default)s, log to console') group = parser.add_mutually_exclusive_group() group.add_argument('--start', dest='start', action='store_true', default=False, help=': %(default)s, start rts2saf') group.add_argument('--stop', dest='stop', action='store_true', default=False, help=': %(default)s, stop rts2saf') parser.add_argument('--tar-id', dest='tar_id', action='store', default=539, help=': %(default)s, set mount to tar_id=xxx, see your postgres database') # ToDo # targets not to interrupt: # B2 >50000, 15 and ... 18 args=parser.parse_args() if args.toconsole: args.level='DEBUG' script=os.path.split(sys.argv[0].replace('.py',''))[-1] filename='/tmp/rts2saf_exclusive.log' # ToDo datetime, name of the script logformat= '%(asctime)s:%(name)s:%(levelname)s:%(message)s' logging.basicConfig(filename=filename, level=args.level.upper(), format= logformat) logger = logging.getLogger() if args.toconsole: # http://www.mglerner.com/blog/?p=8 soh = logging.StreamHandler(sys.stdout) soh.setLevel(args.level) soh.setLevel(args.level) logger.addHandler(soh) sc=Script(lg=logger,tar_id=args.tar_id) logger.info('rts2saf_exclusiv: query initial state') sc.initial_values() if 'start' in script or args.start: logger.info('rts2saf_exclusiv: enable rts2saf') sc.start_rts2saf() elif 'stop' in script or args.stop: logger.info('rts2saf_exclusiv: disable rts2saf') sc.stop_rts2saf() else: logger.warn('no argument given, doing nothing') logger.info('rts2saf_exclusive: DONE')
lgpl-3.0