repo_name
stringlengths
5
100
path
stringlengths
4
375
copies
stringclasses
991 values
size
stringlengths
4
7
content
stringlengths
666
1M
license
stringclasses
15 values
yiliaofan/brotli
setup.py
67
7796
import distutils from distutils.core import setup, Extension from distutils.command.build_ext import build_ext from distutils.cmd import Command import platform import os import re CURR_DIR = os.path.abspath(os.path.dirname(os.path.realpath(__file__))) # when compiling for Windows Python 2.7, force distutils to use Visual Studio # 2010 instead of 2008, as the latter doesn't support c++0x if platform.system() == 'Windows': try: import distutils.msvc9compiler except distutils.errors.DistutilsPlatformError: pass # importing msvc9compiler raises when running under MinGW else: orig_find_vcvarsall = distutils.msvc9compiler.find_vcvarsall def patched_find_vcvarsall(version): return orig_find_vcvarsall(version if version != 9.0 else 10.0) distutils.msvc9compiler.find_vcvarsall = patched_find_vcvarsall def get_version(): """ Return BROTLI_VERSION string as defined in 'brotlimodule.cc' file. """ brotlimodule = os.path.join(CURR_DIR, 'python', 'brotlimodule.cc') with open(brotlimodule, 'r') as f: for line in f: m = re.match(r'#define\sBROTLI_VERSION\s"(.*)"', line) if m: return m.group(1) return "" class TestCommand(Command): """ Run all *_test.py scripts in 'tests' folder with the same Python interpreter used to run setup.py. """ user_options = [] def initialize_options(self): pass def finalize_options(self): pass def run(self): import sys, subprocess, glob test_dir = os.path.join(CURR_DIR, 'python', 'tests') os.chdir(test_dir) for test in glob.glob("*_test.py"): try: subprocess.check_call([sys.executable, test]) except subprocess.CalledProcessError: raise SystemExit(1) class BuildExt(build_ext): def get_source_files(self): filenames = build_ext.get_source_files(self) for ext in self.extensions: filenames.extend(ext.depends) return filenames def build_extension(self, ext): c_sources = [] cxx_sources = [] for source in ext.sources: if source.endswith(".c"): c_sources.append(source) else: cxx_sources.append(source) extra_args = ext.extra_compile_args or [] objects = [] for lang, sources in (("c", c_sources), ("c++", cxx_sources)): if lang == "c++": if platform.system() == "Darwin": extra_args.extend(["-stdlib=libc++", "-mmacosx-version-min=10.7"]) if self.compiler.compiler_type in ["unix", "cygwin", "mingw32"]: extra_args.append("-std=c++0x") elif self.compiler.compiler_type == "msvc": extra_args.append("/EHsc") macros = ext.define_macros[:] if platform.system() == "Darwin": macros.append(("OS_MACOSX", "1")) elif self.compiler.compiler_type == "mingw32": # On Windows Python 2.7, pyconfig.h defines "hypot" as "_hypot", # This clashes with GCC's cmath, and causes compilation errors when # building under MinGW: http://bugs.python.org/issue11566 macros.append(("_hypot", "hypot")) for undef in ext.undef_macros: macros.append((undef,)) objs = self.compiler.compile(sources, output_dir=self.build_temp, macros=macros, include_dirs=ext.include_dirs, debug=self.debug, extra_postargs=extra_args, depends=ext.depends) objects.extend(objs) self._built_objects = objects[:] if ext.extra_objects: objects.extend(ext.extra_objects) extra_args = ext.extra_link_args or [] # when using GCC on Windows, we statically link libgcc and libstdc++, # so that we don't need to package extra DLLs if self.compiler.compiler_type == "mingw32": extra_args.extend(['-static-libgcc', '-static-libstdc++']) ext_path = self.get_ext_fullpath(ext.name) # Detect target language, if not provided language = ext.language or self.compiler.detect_language(sources) self.compiler.link_shared_object( objects, ext_path, libraries=self.get_libraries(ext), library_dirs=ext.library_dirs, runtime_library_dirs=ext.runtime_library_dirs, extra_postargs=extra_args, export_symbols=self.get_export_symbols(ext), debug=self.debug, build_temp=self.build_temp, target_lang=language) brotli = Extension("brotli", sources=[ "python/brotlimodule.cc", "enc/backward_references.cc", "enc/block_splitter.cc", "enc/brotli_bit_stream.cc", "enc/encode.cc", "enc/entropy_encode.cc", "enc/histogram.cc", "enc/literal_cost.cc", "enc/metablock.cc", "enc/static_dict.cc", "enc/streams.cc", "dec/bit_reader.c", "dec/decode.c", "dec/huffman.c", "dec/streams.c", "dec/state.c", ], depends=[ "enc/backward_references.h", "enc/bit_cost.h", "enc/block_splitter.h", "enc/brotli_bit_stream.h", "enc/cluster.h", "enc/command.h", "enc/context.h", "enc/dictionary.h", "enc/dictionary_hash.h", "enc/encode.h", "enc/entropy_encode.h", "enc/fast_log.h", "enc/find_match_length.h", "enc/hash.h", "enc/histogram.h", "enc/literal_cost.h", "enc/metablock.h", "enc/port.h", "enc/prefix.h", "enc/ringbuffer.h", "enc/static_dict.h", "enc/static_dict_lut.h", "enc/streams.h", "enc/transform.h", "enc/write_bits.h", "dec/bit_reader.h", "dec/context.h", "dec/decode.h", "dec/dictionary.h", "dec/huffman.h", "dec/prefix.h", "dec/port.h", "dec/streams.h", "dec/transform.h", "dec/types.h", "dec/state.h", ], language="c++", ) setup( name="Brotli", version=get_version(), url="https://github.com/google/brotli", description="Python binding of the Brotli compression library", author="Khaled Hosny", author_email="khaledhosny@eglug.org", license="Apache 2.0", ext_modules=[brotli], cmdclass={ 'build_ext': BuildExt, 'test': TestCommand }, )
apache-2.0
mosesfistos1/beetbox
beetsplug/web/__init__.py
4
9737
# -*- coding: utf-8 -*- # This file is part of beets. # Copyright 2016, Adrian Sampson. # # Permission is hereby granted, free of charge, to any person obtaining # a copy of this software and associated documentation files (the # "Software"), to deal in the Software without restriction, including # without limitation the rights to use, copy, modify, merge, publish, # distribute, sublicense, and/or sell copies of the Software, and to # permit persons to whom the Software is furnished to do so, subject to # the following conditions: # # The above copyright notice and this permission notice shall be # included in all copies or substantial portions of the Software. """A Web interface to beets.""" from __future__ import division, absolute_import, print_function from beets.plugins import BeetsPlugin from beets import ui from beets import util import beets.library import flask from flask import g from werkzeug.routing import BaseConverter, PathConverter import os import json # Utilities. def _rep(obj, expand=False): """Get a flat -- i.e., JSON-ish -- representation of a beets Item or Album object. For Albums, `expand` dictates whether tracks are included. """ out = dict(obj) if isinstance(obj, beets.library.Item): del out['path'] # Get the size (in bytes) of the backing file. This is useful # for the Tomahawk resolver API. try: out['size'] = os.path.getsize(util.syspath(obj.path)) except OSError: out['size'] = 0 return out elif isinstance(obj, beets.library.Album): del out['artpath'] if expand: out['items'] = [_rep(item) for item in obj.items()] return out def json_generator(items, root, expand=False): """Generator that dumps list of beets Items or Albums as JSON :param root: root key for JSON :param items: list of :class:`Item` or :class:`Album` to dump :param expand: If true every :class:`Album` contains its items in the json representation :returns: generator that yields strings """ yield '{"%s":[' % root first = True for item in items: if first: first = False else: yield ',' yield json.dumps(_rep(item, expand=expand)) yield ']}' def is_expand(): """Returns whether the current request is for an expanded response.""" return flask.request.args.get('expand') is not None def resource(name): """Decorates a function to handle RESTful HTTP requests for a resource. """ def make_responder(retriever): def responder(ids): entities = [retriever(id) for id in ids] entities = [entity for entity in entities if entity] if len(entities) == 1: return flask.jsonify(_rep(entities[0], expand=is_expand())) elif entities: return app.response_class( json_generator(entities, root=name), mimetype='application/json' ) else: return flask.abort(404) responder.__name__ = 'get_{0}'.format(name) return responder return make_responder def resource_query(name): """Decorates a function to handle RESTful HTTP queries for resources. """ def make_responder(query_func): def responder(queries): return app.response_class( json_generator( query_func(queries), root='results', expand=is_expand() ), mimetype='application/json' ) responder.__name__ = 'query_{0}'.format(name) return responder return make_responder def resource_list(name): """Decorates a function to handle RESTful HTTP request for a list of resources. """ def make_responder(list_all): def responder(): return app.response_class( json_generator(list_all(), root=name, expand=is_expand()), mimetype='application/json' ) responder.__name__ = 'all_{0}'.format(name) return responder return make_responder def _get_unique_table_field_values(model, field, sort_field): """ retrieve all unique values belonging to a key from a model """ if field not in model.all_keys() or sort_field not in model.all_keys(): raise KeyError with g.lib.transaction() as tx: rows = tx.query('SELECT DISTINCT "{0}" FROM "{1}" ORDER BY "{2}"' .format(field, model._table, sort_field)) return [row[0] for row in rows] class IdListConverter(BaseConverter): """Converts comma separated lists of ids in urls to integer lists. """ def to_python(self, value): ids = [] for id in value.split(','): try: ids.append(int(id)) except ValueError: pass return ids def to_url(self, value): return ','.join(value) class QueryConverter(PathConverter): """Converts slash separated lists of queries in the url to string list. """ def to_python(self, value): return value.split('/') def to_url(self, value): return ','.join(value) # Flask setup. app = flask.Flask(__name__) app.url_map.converters['idlist'] = IdListConverter app.url_map.converters['query'] = QueryConverter @app.before_request def before_request(): g.lib = app.config['lib'] # Items. @app.route('/item/<idlist:ids>') @resource('items') def get_item(id): return g.lib.get_item(id) @app.route('/item/') @app.route('/item/query/') @resource_list('items') def all_items(): return g.lib.items() @app.route('/item/<int:item_id>/file') def item_file(item_id): item = g.lib.get_item(item_id) response = flask.send_file( util.py3_path(item.path), as_attachment=True, attachment_filename=os.path.basename(util.py3_path(item.path)), ) response.headers['Content-Length'] = os.path.getsize(item.path) return response @app.route('/item/query/<query:queries>') @resource_query('items') def item_query(queries): return g.lib.items(queries) @app.route('/item/values/<string:key>') def item_unique_field_values(key): sort_key = flask.request.args.get('sort_key', key) try: values = _get_unique_table_field_values(beets.library.Item, key, sort_key) except KeyError: return flask.abort(404) return flask.jsonify(values=values) # Albums. @app.route('/album/<idlist:ids>') @resource('albums') def get_album(id): return g.lib.get_album(id) @app.route('/album/') @app.route('/album/query/') @resource_list('albums') def all_albums(): return g.lib.albums() @app.route('/album/query/<query:queries>') @resource_query('albums') def album_query(queries): return g.lib.albums(queries) @app.route('/album/<int:album_id>/art') def album_art(album_id): album = g.lib.get_album(album_id) if album.artpath: return flask.send_file(album.artpath) else: return flask.abort(404) @app.route('/album/values/<string:key>') def album_unique_field_values(key): sort_key = flask.request.args.get('sort_key', key) try: values = _get_unique_table_field_values(beets.library.Album, key, sort_key) except KeyError: return flask.abort(404) return flask.jsonify(values=values) # Artists. @app.route('/artist/') def all_artists(): with g.lib.transaction() as tx: rows = tx.query("SELECT DISTINCT albumartist FROM albums") all_artists = [row[0] for row in rows] return flask.jsonify(artist_names=all_artists) # Library information. @app.route('/stats') def stats(): with g.lib.transaction() as tx: item_rows = tx.query("SELECT COUNT(*) FROM items") album_rows = tx.query("SELECT COUNT(*) FROM albums") return flask.jsonify({ 'items': item_rows[0][0], 'albums': album_rows[0][0], }) # UI. @app.route('/') def home(): return flask.render_template('index.html') # Plugin hook. class WebPlugin(BeetsPlugin): def __init__(self): super(WebPlugin, self).__init__() self.config.add({ 'host': u'127.0.0.1', 'port': 8337, 'cors': '', }) def commands(self): cmd = ui.Subcommand('web', help=u'start a Web interface') cmd.parser.add_option(u'-d', u'--debug', action='store_true', default=False, help=u'debug mode') def func(lib, opts, args): args = ui.decargs(args) if args: self.config['host'] = args.pop(0) if args: self.config['port'] = int(args.pop(0)) app.config['lib'] = lib # Normalizes json output app.config['JSONIFY_PRETTYPRINT_REGULAR'] = False # Enable CORS if required. if self.config['cors']: self._log.info(u'Enabling CORS with origin: {0}', self.config['cors']) from flask.ext.cors import CORS app.config['CORS_ALLOW_HEADERS'] = "Content-Type" app.config['CORS_RESOURCES'] = { r"/*": {"origins": self.config['cors'].get(str)} } CORS(app) # Start the web application. app.run(host=self.config['host'].as_str(), port=self.config['port'].get(int), debug=opts.debug, threaded=True) cmd.func = func return [cmd]
mit
hernandito/SickRage
lib/markupsafe/_constants.py
1535
4795
# -*- coding: utf-8 -*- """ markupsafe._constants ~~~~~~~~~~~~~~~~~~~~~ Highlevel implementation of the Markup string. :copyright: (c) 2010 by Armin Ronacher. :license: BSD, see LICENSE for more details. """ HTML_ENTITIES = { 'AElig': 198, 'Aacute': 193, 'Acirc': 194, 'Agrave': 192, 'Alpha': 913, 'Aring': 197, 'Atilde': 195, 'Auml': 196, 'Beta': 914, 'Ccedil': 199, 'Chi': 935, 'Dagger': 8225, 'Delta': 916, 'ETH': 208, 'Eacute': 201, 'Ecirc': 202, 'Egrave': 200, 'Epsilon': 917, 'Eta': 919, 'Euml': 203, 'Gamma': 915, 'Iacute': 205, 'Icirc': 206, 'Igrave': 204, 'Iota': 921, 'Iuml': 207, 'Kappa': 922, 'Lambda': 923, 'Mu': 924, 'Ntilde': 209, 'Nu': 925, 'OElig': 338, 'Oacute': 211, 'Ocirc': 212, 'Ograve': 210, 'Omega': 937, 'Omicron': 927, 'Oslash': 216, 'Otilde': 213, 'Ouml': 214, 'Phi': 934, 'Pi': 928, 'Prime': 8243, 'Psi': 936, 'Rho': 929, 'Scaron': 352, 'Sigma': 931, 'THORN': 222, 'Tau': 932, 'Theta': 920, 'Uacute': 218, 'Ucirc': 219, 'Ugrave': 217, 'Upsilon': 933, 'Uuml': 220, 'Xi': 926, 'Yacute': 221, 'Yuml': 376, 'Zeta': 918, 'aacute': 225, 'acirc': 226, 'acute': 180, 'aelig': 230, 'agrave': 224, 'alefsym': 8501, 'alpha': 945, 'amp': 38, 'and': 8743, 'ang': 8736, 'apos': 39, 'aring': 229, 'asymp': 8776, 'atilde': 227, 'auml': 228, 'bdquo': 8222, 'beta': 946, 'brvbar': 166, 'bull': 8226, 'cap': 8745, 'ccedil': 231, 'cedil': 184, 'cent': 162, 'chi': 967, 'circ': 710, 'clubs': 9827, 'cong': 8773, 'copy': 169, 'crarr': 8629, 'cup': 8746, 'curren': 164, 'dArr': 8659, 'dagger': 8224, 'darr': 8595, 'deg': 176, 'delta': 948, 'diams': 9830, 'divide': 247, 'eacute': 233, 'ecirc': 234, 'egrave': 232, 'empty': 8709, 'emsp': 8195, 'ensp': 8194, 'epsilon': 949, 'equiv': 8801, 'eta': 951, 'eth': 240, 'euml': 235, 'euro': 8364, 'exist': 8707, 'fnof': 402, 'forall': 8704, 'frac12': 189, 'frac14': 188, 'frac34': 190, 'frasl': 8260, 'gamma': 947, 'ge': 8805, 'gt': 62, 'hArr': 8660, 'harr': 8596, 'hearts': 9829, 'hellip': 8230, 'iacute': 237, 'icirc': 238, 'iexcl': 161, 'igrave': 236, 'image': 8465, 'infin': 8734, 'int': 8747, 'iota': 953, 'iquest': 191, 'isin': 8712, 'iuml': 239, 'kappa': 954, 'lArr': 8656, 'lambda': 955, 'lang': 9001, 'laquo': 171, 'larr': 8592, 'lceil': 8968, 'ldquo': 8220, 'le': 8804, 'lfloor': 8970, 'lowast': 8727, 'loz': 9674, 'lrm': 8206, 'lsaquo': 8249, 'lsquo': 8216, 'lt': 60, 'macr': 175, 'mdash': 8212, 'micro': 181, 'middot': 183, 'minus': 8722, 'mu': 956, 'nabla': 8711, 'nbsp': 160, 'ndash': 8211, 'ne': 8800, 'ni': 8715, 'not': 172, 'notin': 8713, 'nsub': 8836, 'ntilde': 241, 'nu': 957, 'oacute': 243, 'ocirc': 244, 'oelig': 339, 'ograve': 242, 'oline': 8254, 'omega': 969, 'omicron': 959, 'oplus': 8853, 'or': 8744, 'ordf': 170, 'ordm': 186, 'oslash': 248, 'otilde': 245, 'otimes': 8855, 'ouml': 246, 'para': 182, 'part': 8706, 'permil': 8240, 'perp': 8869, 'phi': 966, 'pi': 960, 'piv': 982, 'plusmn': 177, 'pound': 163, 'prime': 8242, 'prod': 8719, 'prop': 8733, 'psi': 968, 'quot': 34, 'rArr': 8658, 'radic': 8730, 'rang': 9002, 'raquo': 187, 'rarr': 8594, 'rceil': 8969, 'rdquo': 8221, 'real': 8476, 'reg': 174, 'rfloor': 8971, 'rho': 961, 'rlm': 8207, 'rsaquo': 8250, 'rsquo': 8217, 'sbquo': 8218, 'scaron': 353, 'sdot': 8901, 'sect': 167, 'shy': 173, 'sigma': 963, 'sigmaf': 962, 'sim': 8764, 'spades': 9824, 'sub': 8834, 'sube': 8838, 'sum': 8721, 'sup': 8835, 'sup1': 185, 'sup2': 178, 'sup3': 179, 'supe': 8839, 'szlig': 223, 'tau': 964, 'there4': 8756, 'theta': 952, 'thetasym': 977, 'thinsp': 8201, 'thorn': 254, 'tilde': 732, 'times': 215, 'trade': 8482, 'uArr': 8657, 'uacute': 250, 'uarr': 8593, 'ucirc': 251, 'ugrave': 249, 'uml': 168, 'upsih': 978, 'upsilon': 965, 'uuml': 252, 'weierp': 8472, 'xi': 958, 'yacute': 253, 'yen': 165, 'yuml': 255, 'zeta': 950, 'zwj': 8205, 'zwnj': 8204 }
gpl-3.0
levixie/zulip
api/integrations/asana/zulip_asana_config.py
124
2157
#!/usr/bin/env python # -*- coding: utf-8 -*- # # Copyright © 2014 Zulip, Inc. # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN # THE SOFTWARE. ### REQUIRED CONFIGURATION ### # Change these values to your Asana credentials. ASANA_API_KEY = "0123456789abcdef0123456789abcdef" # Change these values to the credentials for your Asana bot. ZULIP_USER = "asana-bot@example.com" ZULIP_API_KEY = "0123456789abcdef0123456789abcdef" # The Zulip stream that will receive Asana task updates. ZULIP_STREAM_NAME = "asana" ### OPTIONAL CONFIGURATION ### # Set to None for logging to stdout when testing, and to a file for # logging in production. #LOG_FILE = "/var/tmp/zulip_asana.log" LOG_FILE = None # This file is used to resume this mirror in case the script shuts down. # It is required and needs to be writeable. RESUME_FILE = "/var/tmp/zulip_asana.state" # When initially started, how many hours of messages to include. ASANA_INITIAL_HISTORY_HOURS = 1 # Set this to your Zulip API server URI ZULIP_SITE = "https://api.zulip.com" # If properly installed, the Zulip API should be in your import # path, but if not, set a custom path below ZULIP_API_PATH = None
apache-2.0
maciekcc/tensorflow
tensorflow/tools/test/run_and_gather_logs.py
137
4918
# Copyright 2016 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Test runner for TensorFlow tests.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import argparse import os import shlex from string import maketrans import sys import time from google.protobuf import json_format from google.protobuf import text_format from tensorflow.core.util import test_log_pb2 from tensorflow.python.platform import app from tensorflow.python.platform import gfile from tensorflow.python.platform import test from tensorflow.python.platform import tf_logging from tensorflow.tools.test import run_and_gather_logs_lib # pylint: disable=g-import-not-at-top # pylint: disable=g-bad-import-order # pylint: disable=unused-import # Note: cpuinfo and psutil are not installed for you in the TensorFlow # OSS tree. They are installable via pip. try: import cpuinfo import psutil except ImportError as e: tf_logging.error("\n\n\nERROR: Unable to import necessary library: {}. " "Issuing a soft exit.\n\n\n".format(e)) sys.exit(0) # pylint: enable=g-bad-import-order # pylint: enable=unused-import FLAGS = None def gather_build_configuration(): build_config = test_log_pb2.BuildConfiguration() build_config.mode = FLAGS.compilation_mode # Include all flags except includes cc_flags = [ flag for flag in shlex.split(FLAGS.cc_flags) if not flag.startswith("-i") ] build_config.cc_flags.extend(cc_flags) return build_config def main(unused_args): name = FLAGS.name test_name = FLAGS.test_name test_args = FLAGS.test_args benchmark_type = FLAGS.benchmark_type test_results, _ = run_and_gather_logs_lib.run_and_gather_logs( name, test_name=test_name, test_args=test_args, benchmark_type=benchmark_type) # Additional bits we receive from bazel test_results.build_configuration.CopyFrom(gather_build_configuration()) if not FLAGS.test_log_output_dir: print(text_format.MessageToString(test_results)) return if FLAGS.test_log_output_filename: file_name = FLAGS.test_log_output_filename else: file_name = (name.strip("/").translate(maketrans("/:", "__")) + time.strftime("%Y%m%d%H%M%S", time.gmtime())) if FLAGS.test_log_output_use_tmpdir: tmpdir = test.get_temp_dir() output_path = os.path.join(tmpdir, FLAGS.test_log_output_dir, file_name) else: output_path = os.path.join( os.path.abspath(FLAGS.test_log_output_dir), file_name) json_test_results = json_format.MessageToJson(test_results) gfile.GFile(output_path + ".json", "w").write(json_test_results) tf_logging.info("Test results written to: %s" % output_path) if __name__ == "__main__": parser = argparse.ArgumentParser() parser.register( "type", "bool", lambda v: v.lower() in ("true", "t", "y", "yes")) parser.add_argument( "--name", type=str, default="", help="Benchmark target identifier.") parser.add_argument( "--test_name", type=str, default="", help="Test target to run.") parser.add_argument( "--benchmark_type", type=str, default="", help="BenchmarkType enum string (benchmark type).") parser.add_argument( "--test_args", type=str, default="", help="Test arguments, space separated.") parser.add_argument( "--test_log_output_use_tmpdir", type="bool", nargs="?", const=True, default=False, help="Store the log output into tmpdir?") parser.add_argument( "--compilation_mode", type=str, default="", help="Mode used during this build (e.g. opt, dbg).") parser.add_argument( "--cc_flags", type=str, default="", help="CC flags used during this build.") parser.add_argument( "--test_log_output_dir", type=str, default="", help="Directory to write benchmark results to.") parser.add_argument( "--test_log_output_filename", type=str, default="", help="Filename to output benchmark results to. If the filename is not " "specified, it will be automatically created based on --name " "and current time.") FLAGS, unparsed = parser.parse_known_args() app.run(main=main, argv=[sys.argv[0]] + unparsed)
apache-2.0
fossoult/odoo
addons/l10n_eu_service/__openerp__.py
254
3277
# -*- encoding: utf-8 -*- ############################################################################## # # Odoo, Open Source Business Applications # Copyright (C) 2015 Odoo S.A. <http://www.odoo.com> # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ############################################################################## { 'name': 'EU Mini One Stop Shop (MOSS)', 'version': '1.0', 'author': 'Odoo SA', 'website': 'http://www.odoo.com', 'category': '', 'description': """ EU Mini One Stop Shop (MOSS) VAT for telecommunications, broadcasting and electronic services ============================================================================================= As of January 1rst, 2015, telecommunications, broadcasting and electronic services sold within the European Union have to be always taxed in the country where the customer belongs. In order to simplify the application of this EU directive, the Mini One Stop Shop (MOSS) registration scheme allows businesses to make a unique tax declaration. This module makes it possible by helping with the creation of the required EU fiscal positions and taxes in order to automatically apply and record the required taxes. This module installs a wizard to help setup fiscal positions and taxes for selling electronic services inside EU. The wizard lets you select: - the EU countries to which you are selling these services - your national VAT tax for services, to be mapped to the target country's tax - optionally: a template fiscal position, in order to copy the account mapping. Should be your existing B2C Intra-EU fiscal position. (defaults to no account mapping) - optionally: an account to use for collecting the tax amounts (defaults to the account used by your national VAT tax for services) It creates the corresponding fiscal positions and taxes, automatically applicable for EU sales with a customer in the selected countries. The wizard can be run again for adding more countries. The wizard creates a separate Chart of Taxes for collecting the VAT amounts of the MOSS declaration, so extracting the MOSS data should be easy. Look for a Chart of Taxes named "EU MOSS VAT Chart" in the Taxes Report menu (Generic Accounting Report). References ++++++++++ - Directive 2008/8/EC - Council Implementing Regulation (EU) No 1042/2013 """, 'depends': ['account_accountant'], 'data': [ 'security/ir.model.access.csv', 'wizard/wizard.xml', 'wizard/l10n_eu_service.service_tax_rate.csv' ], 'test': [], 'demo': [], 'auto_install': False, 'installable': True, }
agpl-3.0
hefnrh/PyChat
client/rabin.py
1
5671
from math import * from random import * class rabin: def __init__(self): pass def get_big(self, bit): #get a big number x that x % 4 = 3 big = 1 for i in range(bit-3): temp = randrange(0,2) big = big * 2 + temp big = big * 4 + 3 return big def get_prime(self, bit): #get d-h safe-prime b = False while b == False: b = True p = self.get_big(bit-1)-2 for i in range(10): base = randrange(2,1000) x = self.mill(p,base) if x == 0: b = False break p = p*2 + 1 for i in range(10): base = randrange(2,1000) x = self.mill(p,base) if x == 0: b = False break return p def powmod(self, b, m, n): if m==0: return 1 ans = 1 tmod = b % n while m != 0: if m%2 : ans=(ans * tmod) % n tmod = (tmod * tmod) % n m /= 2 return ans def mill(self, n, base): #rabin_miller test m = n - 1 b,k = base, 0 while (m % 2) == 0: m = m / 2 k += 1 t = self.powmod(b, m, n) if (t%n) == 1 or (t%n) == n-1: return True for i in range(k-1): t = pow(t,2) if (t%n) == 1: return False; elif (t%n) == (n-1): return True; else: if (t%n) == 1: return True elif (t%n) == (n-1): return True return False def euclid(self,a,p): #calculate s1: (s1*a)%p = 1 s0,s1,t0,t1 = 1,0,0,1 r0,r1 = a,p if r1: q0 = r0 / r1 r2 = r0 - (q0 * r1) r0,r1 = r1,r2 while r1: q1 = r0 / r1 r2 = r0 - (q1 * r1) s2 = s0 - (q0 * s1) t2 = t0 - (q0 * t1) q0 = q1 r0,r1 = r1,r2 s0,s1 = s1,s2 t0,t1 = t1,t2 if s1 < 0: s1 = s1 + p return s1 def encode(self, m, n): #get encrypt every 5-bits i = 0 l = len(m) cipher = [] ci = 0 for i in range(l): temp = ord(m[i]) if (i%5) == 0: ci = temp if i == (l-1): cipher.append(self.powmod(ci,2,n)) elif (i%5) == 4: ci = ci * 256 ci = ci + temp #print ci cipher.append(self.powmod(ci,2,n)) ci = 0 else: ci = ci * 256 ci = ci + temp if i == (l-1): cipher.append(self.powmod(ci,2,n)) s="" for i in cipher: s+=str(i) s+=" " s= s[0:len(s)-1] return s def decode(self,s, p, q): m=[] for i in s.split(" "): m.append(long(i)) l = len(m) expressly = "" for i in range(l): #decode temp1 = self.powmod(m[i],(p+1)/4,p) temp2 = self.powmod(m[i],(q+1)/4,q) u = self.euclid(q,p) v = self.euclid(p,q) temp1 = (temp1*u*q + temp2*v*p + p*q) % (p*q) temp2 = (temp1*u*q - temp2*v*p + p*q) % (p*q) temp3 = (-temp1*u*q + temp2*v*p + p*q) % (p*q) temp4 = (-temp1*u*q - temp2*v*p + p*q) % (p*q) if temp1 < 1099511627776: temp = temp1 elif temp2 < 1099511627776: temp = temp2 elif temp3 < 1099511627776: temp = temp3 elif temp4 < 1099511627776: temp = temp4 #get decrypt tems = "" tem = 0 while(temp >= 4294967296): temp -= 4294967296 tem += 1 if(tem): tems += chr(tem) tem = 0 while(temp >= 16777216): temp -= 16777216 tem += 1 if(tem): tems += chr(tem) tem = 0 while(temp >= 65536): temp -= 65536 tem += 1 if(tem): tems += chr(tem) tem = 0 while(temp >= 256): temp -= 256 tem += 1 if(tem): tems += chr(tem) tems += chr(temp) expressly += tems return expressly def getrabinkey(self): b=False while b == False: b = True p = self.get_big(28) for i in range(10): base = randrange(2,1000) x = self.mill(p,base) if x == 0: b = False break self.p = p b=False while b == False: b = True q = self.get_big(28) for i in range(10): base = randrange(2,1000) x = self.mill(q,base) if x == 0: b = False break self.q = q self.n = p * q return (self.n, [self.p, self.q])
gpl-2.0
gnieboer/tensorflow
tensorflow/compiler/tests/variable_ops_test.py
30
3939
# Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for XLA JIT compiler.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import numpy as np from tensorflow.compiler.tests.xla_test import XLATestCase from tensorflow.python.framework import dtypes from tensorflow.python.framework import ops from tensorflow.python.ops import array_ops from tensorflow.python.ops import init_ops from tensorflow.python.ops import math_ops from tensorflow.python.ops import state_ops from tensorflow.python.ops import variable_scope from tensorflow.python.ops import variables from tensorflow.python.platform import googletest from tensorflow.python.training.gradient_descent import GradientDescentOptimizer class VariableOpsTest(XLATestCase): """Test cases for resource variable operators.""" def testReadWrite(self): """Tests initialization, reading, and writing a resource variable.""" with self.test_session() as session: with self.test_scope(): with variable_scope.variable_scope("ascope", use_resource=True): x = variable_scope.get_variable( "x", shape=[], dtype=dtypes.float32, initializer=init_ops.constant_initializer(2)) a = x.read_value() with ops.control_dependencies([a]): b = state_ops.assign(x, 47) with ops.control_dependencies([b]): c = x.read_value() with ops.control_dependencies([c]): d = state_ops.assign_add(x, 3) with ops.control_dependencies([d]): e = x.read_value() session.run(variables.global_variables_initializer()) v1, v2, v3 = session.run([a, c, e]) self.assertAllClose(2.0, v1) self.assertAllClose(47.0, v2) self.assertAllClose(50.0, v3) def testTraining(self): """Tests a gradient descent step for a simple model.""" with self.test_session() as session: with self.test_scope(): with variable_scope.variable_scope("ascope", use_resource=True): w = variable_scope.get_variable( "w", shape=[4, 2], dtype=dtypes.float32, initializer=init_ops.constant_initializer( np.array([[1, 2], [3, 4], [5, 6], [7, 8]], dtype=np.float32))) b = variable_scope.get_variable( "b", shape=[2], dtype=dtypes.float32, initializer=init_ops.constant_initializer( np.array([2, 3], dtype=np.float32))) x = array_ops.placeholder(dtypes.float32, shape=[1, 4]) y = math_ops.matmul(x, w) + b loss = math_ops.reduce_sum(y) optimizer = GradientDescentOptimizer(0.1) train = optimizer.minimize(loss) session.run(variables.global_variables_initializer()) session.run(train, {x: np.array([[7, 3, 5, 9]], dtype=np.float32)}) vw, vb = session.run([w, b]) self.assertAllClose( np.array( [[0.3, 1.3], [2.7, 3.7], [4.5, 5.5], [6.1, 7.1]], dtype=np.float32), vw, rtol=1e-4) self.assertAllClose(np.array([1.9, 2.9], dtype=np.float32), vb, rtol=1e-4) if __name__ == "__main__": googletest.main()
apache-2.0
gimarg/dynamotors
node_modules/node-gyp/gyp/pylib/gyp/simple_copy.py
1869
1247
# Copyright 2014 Google Inc. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. """A clone of the default copy.deepcopy that doesn't handle cyclic structures or complex types except for dicts and lists. This is because gyp copies so large structure that small copy overhead ends up taking seconds in a project the size of Chromium.""" class Error(Exception): pass __all__ = ["Error", "deepcopy"] def deepcopy(x): """Deep copy operation on gyp objects such as strings, ints, dicts and lists. More than twice as fast as copy.deepcopy but much less generic.""" try: return _deepcopy_dispatch[type(x)](x) except KeyError: raise Error('Unsupported type %s for deepcopy. Use copy.deepcopy ' + 'or expand simple_copy support.' % type(x)) _deepcopy_dispatch = d = {} def _deepcopy_atomic(x): return x for x in (type(None), int, long, float, bool, str, unicode, type): d[x] = _deepcopy_atomic def _deepcopy_list(x): return [deepcopy(a) for a in x] d[list] = _deepcopy_list def _deepcopy_dict(x): y = {} for key, value in x.iteritems(): y[deepcopy(key)] = deepcopy(value) return y d[dict] = _deepcopy_dict del d
gpl-2.0
irvingpop/digital-beer-menu
src/lib/flask_admin/tests/test_model.py
9
23799
import wtforms from nose.tools import eq_, ok_ from flask import Flask from werkzeug.wsgi import DispatcherMiddleware from werkzeug.test import Client from wtforms import fields from flask_admin import Admin, form from flask_admin._compat import iteritems, itervalues from flask_admin.model import base, filters from flask_admin.model.template import macro def wtforms2_and_up(func): """Decorator for skipping test if wtforms <2 """ if int(wtforms.__version__[0]) < 2: func.__test__ = False return func class Model(object): def __init__(self, id=None, c1=1, c2=2, c3=3): self.id = id self.col1 = c1 self.col2 = c2 self.col3 = c3 class Form(form.BaseForm): col1 = fields.StringField() col2 = fields.StringField() col3 = fields.StringField() class SimpleFilter(filters.BaseFilter): def apply(self, query): query._applied = True return query def operation(self): return 'test' class MockModelView(base.BaseModelView): def __init__(self, model, data=None, name=None, category=None, endpoint=None, url=None, **kwargs): # Allow to set any attributes from parameters for k, v in iteritems(kwargs): setattr(self, k, v) super(MockModelView, self).__init__(model, name, category, endpoint, url) self.created_models = [] self.updated_models = [] self.deleted_models = [] self.search_arguments = [] if data is None: self.all_models = {1: Model(1), 2: Model(2)} else: self.all_models = data self.last_id = len(self.all_models) + 1 # Scaffolding def get_pk_value(self, model): return model.id def scaffold_list_columns(self): columns = ['col1', 'col2', 'col3'] if self.column_exclude_list: return filter(lambda x: x not in self.column_exclude_list, columns) return columns def init_search(self): return bool(self.column_searchable_list) def scaffold_filters(self, name): return [SimpleFilter(name)] def scaffold_sortable_columns(self): return ['col1', 'col2', 'col3'] def scaffold_form(self): return Form # Data def get_list(self, page, sort_field, sort_desc, search, filters, page_size=None): self.search_arguments.append((page, sort_field, sort_desc, search, filters)) return len(self.all_models), itervalues(self.all_models) def get_one(self, id): return self.all_models.get(int(id)) def create_model(self, form): model = Model(self.last_id) self.last_id += 1 form.populate_obj(model) self.created_models.append(model) self.all_models[model.id] = model return True def update_model(self, form, model): form.populate_obj(model) self.updated_models.append(model) return True def delete_model(self, model): self.deleted_models.append(model) return True def setup(): app = Flask(__name__) app.config['CSRF_ENABLED'] = False app.secret_key = '1' admin = Admin(app) return app, admin def test_mockview(): app, admin = setup() view = MockModelView(Model) admin.add_view(view) eq_(view.model, Model) eq_(view.name, 'Model') eq_(view.endpoint, 'model') # Verify scaffolding eq_(view._sortable_columns, ['col1', 'col2', 'col3']) eq_(view._create_form_class, Form) eq_(view._edit_form_class, Form) eq_(view._search_supported, False) eq_(view._filters, None) client = app.test_client() # Make model view requests rv = client.get('/admin/model/') eq_(rv.status_code, 200) # Test model creation view rv = client.get('/admin/model/new/') eq_(rv.status_code, 200) rv = client.post('/admin/model/new/', data=dict(col1='test1', col2='test2', col3='test3')) eq_(rv.status_code, 302) eq_(len(view.created_models), 1) model = view.created_models.pop() eq_(model.id, 3) eq_(model.col1, 'test1') eq_(model.col2, 'test2') eq_(model.col3, 'test3') # Try model edit view rv = client.get('/admin/model/edit/?id=3') eq_(rv.status_code, 200) data = rv.data.decode('utf-8') ok_('test1' in data) rv = client.post('/admin/model/edit/?id=3', data=dict(col1='test!', col2='test@', col3='test#')) eq_(rv.status_code, 302) eq_(len(view.updated_models), 1) model = view.updated_models.pop() eq_(model.col1, 'test!') eq_(model.col2, 'test@') eq_(model.col3, 'test#') rv = client.get('/admin/model/edit/?id=4') eq_(rv.status_code, 302) # Attempt to delete model rv = client.post('/admin/model/delete/?id=3') eq_(rv.status_code, 302) eq_(rv.headers['location'], 'http://localhost/admin/model/') # Create a dispatched application to test that edit view's "save and # continue" functionality works when app is not located at root dummy_app = Flask('dummy_app') dispatched_app = DispatcherMiddleware(dummy_app, {'/dispatched': app}) dispatched_client = Client(dispatched_app) app_iter, status, headers = dispatched_client.post( '/dispatched/admin/model/edit/?id=3', data=dict(col1='another test!', col2='test@', col3='test#', _continue_editing='True')) eq_(status, '302 FOUND') eq_(headers['Location'], 'http://localhost/dispatched/admin/model/edit/?id=3') model = view.updated_models.pop() eq_(model.col1, 'another test!') def test_permissions(): app, admin = setup() view = MockModelView(Model) admin.add_view(view) client = app.test_client() view.can_create = False rv = client.get('/admin/model/new/') eq_(rv.status_code, 302) view.can_edit = False rv = client.get('/admin/model/edit/?id=1') eq_(rv.status_code, 302) view.can_delete = False rv = client.post('/admin/model/delete/?id=1') eq_(rv.status_code, 302) def test_templates(): app, admin = setup() view = MockModelView(Model) admin.add_view(view) client = app.test_client() view.list_template = 'mock.html' view.create_template = 'mock.html' view.edit_template = 'mock.html' rv = client.get('/admin/model/') eq_(rv.data, b'Success!') rv = client.get('/admin/model/new/') eq_(rv.data, b'Success!') rv = client.get('/admin/model/edit/?id=1') eq_(rv.data, b'Success!') def test_list_columns(): app, admin = setup() view = MockModelView(Model, column_list=['col1', 'col3'], column_labels=dict(col1='Column1')) admin.add_view(view) eq_(len(view._list_columns), 2) eq_(view._list_columns, [('col1', 'Column1'), ('col3', 'Col3')]) client = app.test_client() rv = client.get('/admin/model/') data = rv.data.decode('utf-8') ok_('Column1' in data) ok_('Col2' not in data) def test_exclude_columns(): app, admin = setup() view = MockModelView(Model, column_exclude_list=['col2']) admin.add_view(view) eq_(view._list_columns, [('col1', 'Col1'), ('col3', 'Col3')]) client = app.test_client() rv = client.get('/admin/model/') data = rv.data.decode('utf-8') ok_('Col1' in data) ok_('Col2' not in data) def test_sortable_columns(): app, admin = setup() view = MockModelView(Model, column_sortable_list=['col1', ('col2', 'test1')]) admin.add_view(view) eq_(view._sortable_columns, dict(col1='col1', col2='test1')) def test_column_searchable_list(): app, admin = setup() view = MockModelView(Model, column_searchable_list=['col1', 'col2']) admin.add_view(view) eq_(view._search_supported, True) # TODO: Make calls with search def test_column_filters(): app, admin = setup() view = MockModelView(Model, column_filters=['col1', 'col2']) admin.add_view(view) eq_(len(view._filters), 2) eq_(view._filters[0].name, 'col1') eq_(view._filters[1].name, 'col2') eq_([(f['index'], f['operation']) for f in view._filter_groups[u'col1']], [(0, 'test')]) eq_([(f['index'], f['operation']) for f in view._filter_groups[u'col2']], [(1, 'test')]) # TODO: Make calls with filters def test_filter_list_callable(): app, admin = setup() flt = SimpleFilter('test', options=lambda: [('1', 'Test 1'), ('2', 'Test 2')]) view = MockModelView(Model, column_filters=[flt]) admin.add_view(view) opts = flt.get_options(view) eq_(len(opts), 2) eq_(opts, [('1', 'Test 1'), ('2', 'Test 2')]) def test_form(): # TODO: form_columns # TODO: form_excluded_columns # TODO: form_args # TODO: form_widget_args pass @wtforms2_and_up def test_csrf(): class SecureModelView(MockModelView): form_base_class = form.SecureForm def scaffold_form(self): return form.SecureForm def get_csrf_token(data): data = data.split('name="csrf_token" type="hidden" value="')[1] token = data.split('"')[0] return token app, admin = setup() view = SecureModelView(Model, endpoint='secure') admin.add_view(view) client = app.test_client() ################ # create_view ################ rv = client.get('/admin/secure/new/') eq_(rv.status_code, 200) ok_(u'name="csrf_token"' in rv.data.decode('utf-8')) csrf_token = get_csrf_token(rv.data.decode('utf-8')) # Create without CSRF token rv = client.post('/admin/secure/new/', data=dict(name='test1')) eq_(rv.status_code, 200) # Create with CSRF token rv = client.post('/admin/secure/new/', data=dict(name='test1', csrf_token=csrf_token)) eq_(rv.status_code, 302) ############### # edit_view ############### rv = client.get('/admin/secure/edit/?url=%2Fadmin%2Fsecure%2F&id=1') eq_(rv.status_code, 200) ok_(u'name="csrf_token"' in rv.data.decode('utf-8')) csrf_token = get_csrf_token(rv.data.decode('utf-8')) # Edit without CSRF token rv = client.post('/admin/secure/edit/?url=%2Fadmin%2Fsecure%2F&id=1', data=dict(name='test1')) eq_(rv.status_code, 200) # Edit with CSRF token rv = client.post('/admin/secure/edit/?url=%2Fadmin%2Fsecure%2F&id=1', data=dict(name='test1', csrf_token=csrf_token)) eq_(rv.status_code, 302) ################ # delete_view ################ rv = client.get('/admin/secure/') eq_(rv.status_code, 200) ok_(u'name="csrf_token"' in rv.data.decode('utf-8')) csrf_token = get_csrf_token(rv.data.decode('utf-8')) # Delete without CSRF token, test validation errors rv = client.post('/admin/secure/delete/', data=dict(id="1", url="/admin/secure/"), follow_redirects=True) eq_(rv.status_code, 200) ok_(u'Record was successfully deleted.' not in rv.data.decode('utf-8')) ok_(u'Failed to delete record.' in rv.data.decode('utf-8')) # Delete with CSRF token rv = client.post('/admin/secure/delete/', data=dict(id="1", url="/admin/secure/", csrf_token=csrf_token), follow_redirects=True) eq_(rv.status_code, 200) ok_(u'Record was successfully deleted.' in rv.data.decode('utf-8')) def test_custom_form(): app, admin = setup() class TestForm(form.BaseForm): pass view = MockModelView(Model, form=TestForm) admin.add_view(view) eq_(view._create_form_class, TestForm) eq_(view._edit_form_class, TestForm) ok_(not hasattr(view._create_form_class, 'col1')) def test_modal_edit(): # bootstrap 2 - test edit_modal app_bs2 = Flask(__name__) admin_bs2 = Admin(app_bs2, template_mode="bootstrap2") edit_modal_on = MockModelView(Model, edit_modal=True, endpoint="edit_modal_on") edit_modal_off = MockModelView(Model, edit_modal=False, endpoint="edit_modal_off") create_modal_on = MockModelView(Model, create_modal=True, endpoint="create_modal_on") create_modal_off = MockModelView(Model, create_modal=False, endpoint="create_modal_off") admin_bs2.add_view(edit_modal_on) admin_bs2.add_view(edit_modal_off) admin_bs2.add_view(create_modal_on) admin_bs2.add_view(create_modal_off) client_bs2 = app_bs2.test_client() # bootstrap 2 - ensure modal window is added when edit_modal is enabled rv = client_bs2.get('/admin/edit_modal_on/') eq_(rv.status_code, 200) data = rv.data.decode('utf-8') ok_('fa_modal_window' in data) # bootstrap 2 - test edit modal disabled rv = client_bs2.get('/admin/edit_modal_off/') eq_(rv.status_code, 200) data = rv.data.decode('utf-8') ok_('fa_modal_window' not in data) # bootstrap 2 - ensure modal window is added when create_modal is enabled rv = client_bs2.get('/admin/create_modal_on/') eq_(rv.status_code, 200) data = rv.data.decode('utf-8') ok_('fa_modal_window' in data) # bootstrap 2 - test create modal disabled rv = client_bs2.get('/admin/create_modal_off/') eq_(rv.status_code, 200) data = rv.data.decode('utf-8') ok_('fa_modal_window' not in data) # bootstrap 3 app_bs3 = Flask(__name__) admin_bs3 = Admin(app_bs3, template_mode="bootstrap3") admin_bs3.add_view(edit_modal_on) admin_bs3.add_view(edit_modal_off) admin_bs3.add_view(create_modal_on) admin_bs3.add_view(create_modal_off) client_bs3 = app_bs3.test_client() # bootstrap 3 - ensure modal window is added when edit_modal is enabled rv = client_bs3.get('/admin/edit_modal_on/') eq_(rv.status_code, 200) data = rv.data.decode('utf-8') ok_('fa_modal_window' in data) # bootstrap 3 - test modal disabled rv = client_bs3.get('/admin/edit_modal_off/') eq_(rv.status_code, 200) data = rv.data.decode('utf-8') ok_('fa_modal_window' not in data) # bootstrap 3 - ensure modal window is added when edit_modal is enabled rv = client_bs3.get('/admin/create_modal_on/') eq_(rv.status_code, 200) data = rv.data.decode('utf-8') ok_('fa_modal_window' in data) # bootstrap 3 - test modal disabled rv = client_bs3.get('/admin/create_modal_off/') eq_(rv.status_code, 200) data = rv.data.decode('utf-8') ok_('fa_modal_window' not in data) def check_class_name(): class DummyView(MockModelView): pass view = DummyView(Model) eq_(view.name, 'Dummy View') def test_export_csv(): app, admin = setup() client = app.test_client() # test redirect when csv export is disabled view = MockModelView(Model, column_list=['col1', 'col2'], endpoint="test") admin.add_view(view) rv = client.get('/admin/test/export/csv/') eq_(rv.status_code, 302) # basic test of csv export with a few records view_data = { 1: Model(1, "col1_1", "col2_1"), 2: Model(2, "col1_2", "col2_2"), 3: Model(3, "col1_3", "col2_3"), } view = MockModelView(Model, view_data, can_export=True, column_list=['col1', 'col2']) admin.add_view(view) rv = client.get('/admin/model/export/csv/') data = rv.data.decode('utf-8') eq_(rv.mimetype, 'text/csv') eq_(rv.status_code, 200) ok_("Col1,Col2\r\n" "col1_1,col2_1\r\n" "col1_2,col2_2\r\n" "col1_3,col2_3\r\n" == data) # test explicit use of column_export_list view = MockModelView(Model, view_data, can_export=True, column_list=['col1', 'col2'], column_export_list=['id','col1','col2'], endpoint='exportinclusion') admin.add_view(view) rv = client.get('/admin/exportinclusion/export/csv/') data = rv.data.decode('utf-8') eq_(rv.mimetype, 'text/csv') eq_(rv.status_code, 200) ok_("Id,Col1,Col2\r\n" "1,col1_1,col2_1\r\n" "2,col1_2,col2_2\r\n" "3,col1_3,col2_3\r\n" == data) # test explicit use of column_export_exclude_list view = MockModelView(Model, view_data, can_export=True, column_list=['col1', 'col2'], column_export_exclude_list=['col2'], endpoint='exportexclusion') admin.add_view(view) rv = client.get('/admin/exportexclusion/export/csv/') data = rv.data.decode('utf-8') eq_(rv.mimetype, 'text/csv') eq_(rv.status_code, 200) ok_("Col1\r\n" "col1_1\r\n" "col1_2\r\n" "col1_3\r\n" == data) # test utf8 characters in csv export view_data[4] = Model(1, u'\u2013ut8_1\u2013', u'\u2013utf8_2\u2013') view = MockModelView(Model, view_data, can_export=True, column_list=['col1', 'col2'], endpoint="utf8") admin.add_view(view) rv = client.get('/admin/utf8/export/csv/') data = rv.data.decode('utf-8') eq_(rv.status_code, 200) ok_(u'\u2013ut8_1\u2013,\u2013utf8_2\u2013\r\n' in data) # test None type, integer type, column_labels, and column_formatters view_data = { 1: Model(1, "col1_1", 1), 2: Model(2, "col1_2", 2), 3: Model(3, None, 3), } view = MockModelView( Model, view_data, can_export=True, column_list=['col1', 'col2'], column_labels={'col1': 'Str Field', 'col2': 'Int Field'}, column_formatters=dict(col2=lambda v, c, m, p: m.col2*2), endpoint="types_and_formatters" ) admin.add_view(view) rv = client.get('/admin/types_and_formatters/export/csv/') data = rv.data.decode('utf-8') eq_(rv.status_code, 200) ok_("Str Field,Int Field\r\n" "col1_1,2\r\n" "col1_2,4\r\n" ",6\r\n" == data) # test column_formatters_export and column_formatters_export type_formatters = {type(None): lambda view, value: "null"} view = MockModelView( Model, view_data, can_export=True, column_list=['col1', 'col2'], column_formatters_export=dict(col2=lambda v, c, m, p: m.col2*3), column_formatters=dict(col2=lambda v, c, m, p: m.col2*2), # overridden column_type_formatters_export=type_formatters, endpoint="export_types_and_formatters" ) admin.add_view(view) rv = client.get('/admin/export_types_and_formatters/export/csv/') data = rv.data.decode('utf-8') eq_(rv.status_code, 200) ok_("Col1,Col2\r\n" "col1_1,3\r\n" "col1_2,6\r\n" "null,9\r\n" == data) # Macros are not implemented for csv export yet and will throw an error view = MockModelView( Model, can_export=True, column_list=['col1', 'col2'], column_formatters=dict(col1=macro('render_macro')), endpoint="macro_exception" ) admin.add_view(view) rv = client.get('/admin/macro_exception/export/csv/') data = rv.data.decode('utf-8') eq_(rv.status_code, 500) # We should be able to specify column_formatters_export # and not get an exception if a column_formatter is using a macro def export_formatter(v, c, m, p): return m.col1 if m else '' view = MockModelView( Model, view_data, can_export=True, column_list=['col1', 'col2'], column_formatters=dict(col1=macro('render_macro')), column_formatters_export=dict(col1=export_formatter), endpoint="macro_exception_formatter_override" ) admin.add_view(view) rv = client.get('/admin/macro_exception_formatter_override/export/csv/') data = rv.data.decode('utf-8') eq_(rv.status_code, 200) ok_("Col1,Col2\r\n" "col1_1,1\r\n" "col1_2,2\r\n" ",3\r\n" == data) # We should not get an exception if a column_formatter is # using a macro but it is on the column_export_exclude_list view = MockModelView( Model, view_data, can_export=True, column_list=['col1', 'col2'], column_formatters=dict(col1=macro('render_macro')), column_export_exclude_list=['col1'], endpoint="macro_exception_exclude_override" ) admin.add_view(view) rv = client.get('/admin/macro_exception_exclude_override/export/csv/') data = rv.data.decode('utf-8') eq_(rv.status_code, 200) ok_("Col2\r\n" "1\r\n" "2\r\n" "3\r\n" == data) # When we use column_export_list to hide the macro field # we should not get an exception view = MockModelView( Model, view_data, can_export=True, column_list=['col1', 'col2'], column_formatters=dict(col1=macro('render_macro')), column_export_list=['col2'], endpoint="macro_exception_list_override" ) admin.add_view(view) rv = client.get('/admin/macro_exception_list_override/export/csv/') data = rv.data.decode('utf-8') eq_(rv.status_code, 200) ok_("Col2\r\n" "1\r\n" "2\r\n" "3\r\n" == data) # If they define a macro on the column_formatters_export list # then raise an exception view = MockModelView( Model, view_data, can_export=True, column_list=['col1', 'col2'], column_formatters=dict(col1=macro('render_macro')), endpoint="macro_exception_macro_override" ) admin.add_view(view) rv = client.get('/admin/macro_exception_macro_override/export/csv/') data = rv.data.decode('utf-8') eq_(rv.status_code, 500) def test_list_row_actions(): app, admin = setup() client = app.test_client() from flask_admin.model import template # Test default actions view = MockModelView(Model, endpoint='test') admin.add_view(view) actions = view.get_list_row_actions() ok_(isinstance(actions[0], template.EditRowAction)) ok_(isinstance(actions[1], template.DeleteRowAction)) rv = client.get('/admin/test/') eq_(rv.status_code, 200) # Test default actions view = MockModelView(Model, endpoint='test1', can_edit=False, can_delete=False, can_view_details=True) admin.add_view(view) actions = view.get_list_row_actions() eq_(len(actions), 1) ok_(isinstance(actions[0], template.ViewRowAction)) rv = client.get('/admin/test1/') eq_(rv.status_code, 200) # Test popups view = MockModelView(Model, endpoint='test2', can_view_details=True, details_modal=True, edit_modal=True) admin.add_view(view) actions = view.get_list_row_actions() ok_(isinstance(actions[0], template.ViewPopupRowAction)) ok_(isinstance(actions[1], template.EditPopupRowAction)) ok_(isinstance(actions[2], template.DeleteRowAction)) rv = client.get('/admin/test2/') eq_(rv.status_code, 200) # Test custom views view = MockModelView(Model, endpoint='test3', column_extra_row_actions=[ template.LinkRowAction('glyphicon glyphicon-off', 'http://localhost/?id={row_id}'), template.EndpointLinkRowAction('glyphicon glyphicon-test', 'test1.index_view') ]) admin.add_view(view) actions = view.get_list_row_actions() ok_(isinstance(actions[0], template.EditRowAction)) ok_(isinstance(actions[1], template.DeleteRowAction)) ok_(isinstance(actions[2], template.LinkRowAction)) ok_(isinstance(actions[3], template.EndpointLinkRowAction)) rv = client.get('/admin/test3/') eq_(rv.status_code, 200) data = rv.data.decode('utf-8') ok_('glyphicon-off' in data) ok_('http://localhost/?id=' in data) ok_('glyphicon-test' in data)
gpl-2.0
jcushman/pywb
pywb/manager/manager.py
1
16268
import os import shutil import sys import logging import heapq import yaml import re from distutils.util import strtobool from pkg_resources import resource_string from argparse import ArgumentParser, RawTextHelpFormatter from pywb.utils.loaders import load_yaml_config from pywb.utils.timeutils import timestamp20_now from pywb import DEFAULT_CONFIG #============================================================================= # to allow testing by mocking get_input def get_input(msg): #pragma: no cover return raw_input(msg) #============================================================================= class CollectionsManager(object): """ This utility is designed to simplify the creation and management of web archive collections It may be used via cmdline to setup and maintain the directory structure expected by pywb """ DEF_INDEX_FILE = 'index.cdxj' AUTO_INDEX_FILE = 'autoindex.cdxj' COLL_RX = re.compile('^[\w][-\w]*$') def __init__(self, coll_name, colls_dir='collections', must_exist=True): self.default_config = load_yaml_config(DEFAULT_CONFIG) if coll_name and not self.COLL_RX.match(coll_name): raise ValueError('Invalid Collection Name: ' + coll_name) self.colls_dir = os.path.join(os.getcwd(), colls_dir) self._set_coll_dirs(coll_name) if must_exist: self._assert_coll_exists() def _set_coll_dirs(self, coll_name): self.coll_name = coll_name self.curr_coll_dir = os.path.join(self.colls_dir, coll_name) self.archive_dir = self._get_dir('archive_paths') self.indexes_dir = self._get_dir('index_paths') self.static_dir = self._get_dir('static_path') self.templates_dir = self._get_dir('templates_dir') def list_colls(self): print('Collections:') if not os.path.isdir(self.colls_dir): msg = ('"Collections" directory not found. ' + 'To create a new collection, run:\n\n{0} init <name>') raise IOError(msg.format(sys.argv[0])) for d in os.listdir(self.colls_dir): if os.path.isdir(os.path.join(self.colls_dir, d)): print('- ' + d) def _get_root_dir(self, name): return os.path.join(os.getcwd(), self.default_config['paths'][name]) def _get_dir(self, name): return os.path.join(self.curr_coll_dir, self.default_config['paths'][name]) def _create_dir(self, dirname): if not os.path.isdir(dirname): os.mkdir(dirname) logging.info('Created Directory: ' + dirname) def add_collection(self): os.makedirs(self.curr_coll_dir) logging.info('Created Directory: ' + self.curr_coll_dir) self._create_dir(self.archive_dir) self._create_dir(self.indexes_dir) self._create_dir(self.static_dir) self._create_dir(self.templates_dir) self._create_dir(self._get_root_dir('static_path')) self._create_dir(self._get_root_dir('templates_dir')) def _assert_coll_exists(self): if not os.path.isdir(self.curr_coll_dir): msg = ('Collection {0} does not exist. ' + 'To create a new collection, run\n\n{1} init {0}') raise IOError(msg.format(self.coll_name, sys.argv[0])) def add_warcs(self, warcs): if not os.path.isdir(self.archive_dir): raise IOError('Directory {0} does not exist'. format(self.archive_dir)) full_paths = [] for filename in warcs: filename = os.path.abspath(filename) shutil.copy2(filename, self.archive_dir) full_paths.append(os.path.join(self.archive_dir, filename)) logging.info('Copied ' + filename + ' to ' + self.archive_dir) self._index_merge_warcs(full_paths, self.DEF_INDEX_FILE) def reindex(self): cdx_file = os.path.join(self.indexes_dir, self.DEF_INDEX_FILE) logging.info('Indexing ' + self.archive_dir + ' to ' + cdx_file) self._cdx_index(cdx_file, [self.archive_dir]) def _cdx_index(self, out, input_, rel_root=None): from pywb.warc.cdxindexer import write_multi_cdx_index options = dict(append_post=True, cdxj=True, sort=True, recurse=True, rel_root=rel_root) write_multi_cdx_index(out, input_, **options) def index_merge(self, filelist, index_file): wrongdir = 'Skipping {0}, must be in {1} archive directory' notfound = 'Skipping {0}, file not found' filtered_warcs = [] # Check that warcs are actually in archive dir abs_archive_dir = os.path.abspath(self.archive_dir) for f in filelist: abs_filepath = os.path.abspath(f) prefix = os.path.commonprefix([abs_archive_dir, abs_filepath]) if prefix != abs_archive_dir: raise IOError(wrongdir.format(abs_filepath, abs_archive_dir)) elif not os.path.isfile(abs_filepath): raise IOError(notfound.format(f)) else: filtered_warcs.append(abs_filepath) self._index_merge_warcs(filtered_warcs, index_file, abs_archive_dir) def _index_merge_warcs(self, new_warcs, index_file, rel_root=None): cdx_file = os.path.join(self.indexes_dir, index_file) temp_file = cdx_file + '.tmp.' + timestamp20_now() self._cdx_index(temp_file, new_warcs, rel_root) # no existing file, so just make it the new file if not os.path.isfile(cdx_file): shutil.move(temp_file, cdx_file) return merged_file = temp_file + '.merged' last_line = None with open(cdx_file) as orig_index: with open(temp_file) as new_index: with open(merged_file, 'w+b') as merged: for line in heapq.merge(orig_index, new_index): if last_line != line: merged.write(line) last_line = line shutil.move(merged_file, cdx_file) #os.rename(merged_file, cdx_file) os.remove(temp_file) def set_metadata(self, namevalue_pairs): metadata_yaml = os.path.join(self.curr_coll_dir, 'metadata.yaml') metadata = None if os.path.isfile(metadata_yaml): with open(metadata_yaml) as fh: metadata = yaml.safe_load(fh) if not metadata: metadata = {} msg = 'Metadata params must be in the form "name=value"' for pair in namevalue_pairs: v = pair.split('=', 1) if len(v) != 2: raise ValueError(msg) print('Set {0}={1}'.format(v[0], v[1])) metadata[v[0]] = v[1] with open(metadata_yaml, 'w+b') as fh: fh.write(yaml.dump(metadata, default_flow_style=False)) def _load_templates_map(self): defaults = load_yaml_config(DEFAULT_CONFIG) temp_dir = defaults['paths']['templates_dir'] # Coll Templates templates = defaults['paths']['template_files'] for name, _ in templates.iteritems(): templates[name] = os.path.join(temp_dir, defaults[name]) # Shared Templates shared_templates = defaults['paths']['shared_template_files'] for name, _ in shared_templates.iteritems(): shared_templates[name] = os.path.join(temp_dir, defaults[name]) return templates, shared_templates def list_templates(self): templates, shared_templates = self._load_templates_map() print('Shared Templates') for n, v in shared_templates.iteritems(): print('- {0}: (pywb/{1})'.format(n, v)) print('') print('Collection Templates') for n, v in templates.iteritems(): print('- {0}: (pywb/{1})'.format(n, v)) def _confirm_overwrite(self, full_path, msg): if not os.path.isfile(full_path): return True res = get_input(msg) try: res = strtobool(res) except ValueError: res = False if not res: raise IOError('Skipping, {0} already exists'.format(full_path)) def _get_template_path(self, template_name, verb): templates, shared_templates = self._load_templates_map() try: filename = templates[template_name] if not self.coll_name: full_path = os.path.join(os.getcwd(), filename) else: full_path = os.path.join(self.templates_dir, os.path.basename(filename)) except KeyError: try: filename = shared_templates[template_name] full_path = os.path.join(os.getcwd(), filename) except KeyError: msg = 'template name must be one of {0} or {1}' msg = msg.format(templates.keys(), shared_templates.keys()) raise KeyError(msg) return full_path, filename def add_template(self, template_name, force=False): full_path, filename = self._get_template_path(template_name, 'add') msg = ('Template file "{0}" ({1}) already exists. ' + 'Overwrite with default template? (y/n) ') msg = msg.format(full_path, template_name) if not force: self._confirm_overwrite(full_path, msg) data = resource_string('pywb', filename) with open(full_path, 'w+b') as fh: fh.write(data) full_path = os.path.abspath(full_path) msg = 'Copied default template "{0}" to "{1}"' print(msg.format(filename, full_path)) def remove_template(self, template_name, force=False): full_path, filename = self._get_template_path(template_name, 'remove') if not os.path.isfile(full_path): msg = 'Template "{0}" does not exist.' raise IOError(msg.format(full_path)) msg = 'Delete template file "{0}" ({1})? (y/n) ' msg = msg.format(full_path, template_name) if not force: self._confirm_overwrite(full_path, msg) os.remove(full_path) print('Removed template file "{0}"'.format(full_path)) def migrate_cdxj(self, path, force=False): from migrate import MigrateCDX migrate = MigrateCDX(path) count = migrate.count_cdx() if count == 0: print('Index files up-to-date, nothing to convert') return msg = 'Convert {0} index files? (y/n)'.format(count) if not force: res = get_input(msg) try: res = strtobool(res) except ValueError: res = False if not res: return migrate.convert_to_cdxj() def autoindex(self, do_loop=True): from autoindex import CDXAutoIndexer if self.coll_name: any_coll = False path = self.archive_dir else: path = self.colls_dir any_coll = True def do_index(warc): if any_coll: coll_name = warc.split(self.colls_dir + os.path.sep) coll_name = coll_name[-1].split(os.path.sep)[0] if coll_name != self.coll_name: self._set_coll_dirs(coll_name) print('Auto-Indexing: ' + warc) self.index_merge([warc], self.AUTO_INDEX_FILE) print('Done.. Waiting for file updates') indexer = CDXAutoIndexer(do_index, path) indexer.start_watch() if do_loop: indexer.do_loop() #============================================================================= def main(args=None): description = """ Create manage file based web archive collections """ #format(os.path.basename(sys.argv[0])) logging.basicConfig(format='%(asctime)s: [%(levelname)s]: %(message)s', level=logging.DEBUG) parser = ArgumentParser(description=description, #epilog=epilog, formatter_class=RawTextHelpFormatter) subparsers = parser.add_subparsers(dest='type') # Init Coll def do_init(r): m = CollectionsManager(r.coll_name, must_exist=False) m.add_collection() init_help = 'Init new collection, create all collection directories' init = subparsers.add_parser('init', help=init_help) init.add_argument('coll_name') init.set_defaults(func=do_init) # List Colls def do_list(r): m = CollectionsManager('', must_exist=False) m.list_colls() list_help = 'List Collections' listcmd = subparsers.add_parser('list', help=list_help) listcmd.set_defaults(func=do_list) # Add Warcs def do_add(r): m = CollectionsManager(r.coll_name) m.add_warcs(r.files) addwarc_help = 'Copy ARCS/WARCS to collection directory and reindex' addwarc = subparsers.add_parser('add', help=addwarc_help) addwarc.add_argument('coll_name') addwarc.add_argument('files', nargs='+') addwarc.set_defaults(func=do_add) # Reindex All def do_reindex(r): m = CollectionsManager(r.coll_name) m.reindex() reindex_help = 'Re-Index entire collection' reindex = subparsers.add_parser('reindex', help=reindex_help) reindex.add_argument('coll_name') reindex.set_defaults(func=do_reindex) # Index warcs def do_index(r): m = CollectionsManager(r.coll_name) m.index_merge(r.files, m.DEF_INDEX_FILE) indexwarcs_help = 'Index specified ARC/WARC files in the collection' indexwarcs = subparsers.add_parser('index', help=indexwarcs_help) indexwarcs.add_argument('coll_name') indexwarcs.add_argument('files', nargs='+') indexwarcs.set_defaults(func=do_index) # Set metadata def do_metadata(r): m = CollectionsManager(r.coll_name) m.set_metadata(r.set) metadata_help = 'Set Metadata' metadata = subparsers.add_parser('metadata', help=metadata_help) metadata.add_argument('coll_name') metadata.add_argument('--set', nargs='+') metadata.set_defaults(func=do_metadata) # Add default template def do_add_template(r): m = CollectionsManager(r.coll_name, must_exist=False) if r.add: m.add_template(r.add, r.force) elif r.remove: m.remove_template(r.remove, r.force) elif r.list: m.list_templates() template_help = 'Add default html template for customization' template = subparsers.add_parser('template', help=template_help) template.add_argument('coll_name', nargs='?', default='') template.add_argument('-f', '--force', action='store_true') template.add_argument('--add') template.add_argument('--remove') template.add_argument('--list', action='store_true') template.set_defaults(func=do_add_template) # Migrate CDX def do_migrate(r): m = CollectionsManager('', must_exist=False) m.migrate_cdxj(r.path, r.force) migrate_help = 'Convert any existing archive indexes to new json format' migrate = subparsers.add_parser('cdx-convert', help=migrate_help) migrate.add_argument('path', default='./', nargs='?') migrate.add_argument('-f', '--force', action='store_true') migrate.set_defaults(func=do_migrate) # Auto Index def do_autoindex(r): m = CollectionsManager(r.coll_name, must_exist=False) m.autoindex(True) autoindex_help = 'Automatically index any change archive files' autoindex = subparsers.add_parser('autoindex', help=autoindex_help) autoindex.add_argument('coll_name', nargs='?', default='') autoindex.set_defaults(func=do_autoindex) r = parser.parse_args(args=args) r.func(r) # special wrapper for cli to avoid printing stack trace def main_wrap_exc(): #pragma: no cover try: main() except Exception as e: print('Error: ' + str(e)) sys.exit(2) if __name__ == "__main__": main_wrap_exc()
gpl-3.0
anpingli/openshift-ansible
roles/openshift_health_checker/openshift_checks/logging/elasticsearch.py
45
9996
"""Check for an aggregated logging Elasticsearch deployment""" import json import re from openshift_checks import OpenShiftCheckException, OpenShiftCheckExceptionList from openshift_checks.logging.logging import LoggingCheck class Elasticsearch(LoggingCheck): """Check for an aggregated logging Elasticsearch deployment""" name = "elasticsearch" tags = ["health", "logging"] def run(self): """Check various things and gather errors. Returns: result as hash""" es_pods = self.get_pods_for_component("es") self.check_elasticsearch(es_pods) # TODO(lmeyer): run it all again for the ops cluster return {} def check_elasticsearch(self, es_pods): """Perform checks for Elasticsearch. Raises OpenShiftCheckExceptionList on any errors.""" running_pods, errors = self.running_elasticsearch_pods(es_pods) pods_by_name = { pod['metadata']['name']: pod for pod in running_pods # Filter out pods that are not members of a DC if pod['metadata'].get('labels', {}).get('deploymentconfig') } if not pods_by_name: # nothing running, cannot run the rest of the check errors.append(OpenShiftCheckException( 'NoRunningPods', 'No logging Elasticsearch pods were found running, so no logs are being aggregated.' )) raise OpenShiftCheckExceptionList(errors) errors += self.check_elasticsearch_masters(pods_by_name) errors += self.check_elasticsearch_node_list(pods_by_name) errors += self.check_es_cluster_health(pods_by_name) errors += self.check_elasticsearch_diskspace(pods_by_name) if errors: raise OpenShiftCheckExceptionList(errors) def running_elasticsearch_pods(self, es_pods): """Returns: list of running pods, list of errors about non-running pods""" not_running = self.not_running_pods(es_pods) running_pods = [pod for pod in es_pods if pod not in not_running] if not_running: return running_pods, [OpenShiftCheckException( 'PodNotRunning', 'The following Elasticsearch pods are defined but not running:\n' '{pods}'.format(pods=''.join( " {} ({})\n".format(pod['metadata']['name'], pod['spec'].get('host', 'None')) for pod in not_running )) )] return running_pods, [] @staticmethod def _build_es_curl_cmd(pod_name, url): base = "exec {name} -- curl -s --cert {base}cert --key {base}key --cacert {base}ca -XGET '{url}'" return base.format(base="/etc/elasticsearch/secret/admin-", name=pod_name, url=url) def check_elasticsearch_masters(self, pods_by_name): """Check that Elasticsearch masters are sane. Returns: list of errors""" es_master_names = set() errors = [] for pod_name in pods_by_name.keys(): # Compare what each ES node reports as master and compare for split brain get_master_cmd = self._build_es_curl_cmd(pod_name, "https://localhost:9200/_cat/master") master_name_str = self.exec_oc(get_master_cmd, [], save_as_name="get_master_names.json") master_names = (master_name_str or '').split(' ') if len(master_names) > 1: es_master_names.add(master_names[1]) else: errors.append(OpenShiftCheckException( 'NoMasterName', 'Elasticsearch {pod} gave unexpected response when asked master name:\n' ' {response}'.format(pod=pod_name, response=master_name_str) )) if not es_master_names: errors.append(OpenShiftCheckException( 'NoMasterFound', 'No logging Elasticsearch masters were found.' )) return errors if len(es_master_names) > 1: errors.append(OpenShiftCheckException( 'SplitBrainMasters', 'Found multiple Elasticsearch masters according to the pods:\n' '{master_list}\n' 'This implies that the masters have "split brain" and are not correctly\n' 'replicating data for the logging cluster. Log loss is likely to occur.' .format(master_list='\n'.join(' ' + master for master in es_master_names)) )) return errors def check_elasticsearch_node_list(self, pods_by_name): """Check that reported ES masters are accounted for by pods. Returns: list of errors""" if not pods_by_name: return [OpenShiftCheckException( 'MissingComponentPods', 'No logging Elasticsearch pods were found.' )] # get ES cluster nodes node_cmd = self._build_es_curl_cmd(list(pods_by_name.keys())[0], 'https://localhost:9200/_nodes') cluster_node_data = self.exec_oc(node_cmd, [], save_as_name="get_es_nodes.json") try: cluster_nodes = json.loads(cluster_node_data)['nodes'] except (ValueError, KeyError): return [OpenShiftCheckException( 'MissingNodeList', 'Failed to query Elasticsearch for the list of ES nodes. The output was:\n' + cluster_node_data )] # Try to match all ES-reported node hosts to known pods. errors = [] for node in cluster_nodes.values(): # Note that with 1.4/3.4 the pod IP may be used as the master name if not any(node['host'] in (pod_name, pod['status'].get('podIP')) for pod_name, pod in pods_by_name.items()): errors.append(OpenShiftCheckException( 'EsPodNodeMismatch', 'The Elasticsearch cluster reports a member node "{node}"\n' 'that does not correspond to any known ES pod.'.format(node=node['host']) )) return errors def check_es_cluster_health(self, pods_by_name): """Exec into the elasticsearch pods and check the cluster health. Returns: list of errors""" errors = [] for pod_name in pods_by_name.keys(): cluster_health_cmd = self._build_es_curl_cmd(pod_name, 'https://localhost:9200/_cluster/health?pretty=true') cluster_health_data = self.exec_oc(cluster_health_cmd, [], save_as_name='get_es_health.json') try: health_res = json.loads(cluster_health_data) if not health_res or not health_res.get('status'): raise ValueError() except ValueError: errors.append(OpenShiftCheckException( 'BadEsResponse', 'Could not retrieve cluster health status from logging ES pod "{pod}".\n' 'Response was:\n{output}'.format(pod=pod_name, output=cluster_health_data) )) continue if health_res['status'] not in ['green', 'yellow']: errors.append(OpenShiftCheckException( 'EsClusterHealthRed', 'Elasticsearch cluster health status is RED according to pod "{}"'.format(pod_name) )) return errors def check_elasticsearch_diskspace(self, pods_by_name): """ Exec into an ES pod and query the diskspace on the persistent volume. Returns: list of errors """ errors = [] for pod_name in pods_by_name.keys(): df_cmd = '-c elasticsearch exec {} -- df --output=ipcent,pcent /elasticsearch/persistent'.format(pod_name) disk_output = self.exec_oc(df_cmd, [], save_as_name='get_pv_diskspace.json') lines = disk_output.splitlines() # expecting one header looking like 'IUse% Use%' and one body line body_re = r'\s*(\d+)%?\s+(\d+)%?\s*$' if len(lines) != 2 or len(lines[0].split()) != 2 or not re.match(body_re, lines[1]): errors.append(OpenShiftCheckException( 'BadDfResponse', 'Could not retrieve storage usage from logging ES pod "{pod}".\n' 'Response to `df` command was:\n{output}'.format(pod=pod_name, output=disk_output) )) continue inode_pct, disk_pct = re.match(body_re, lines[1]).groups() inode_pct_thresh = self.get_var('openshift_check_efk_es_inode_pct', default='90') if int(inode_pct) >= int(inode_pct_thresh): errors.append(OpenShiftCheckException( 'InodeUsageTooHigh', 'Inode percent usage on the storage volume for logging ES pod "{pod}"\n' ' is {pct}, greater than threshold {limit}.\n' ' Note: threshold can be specified in inventory with {param}'.format( pod=pod_name, pct=str(inode_pct), limit=str(inode_pct_thresh), param='openshift_check_efk_es_inode_pct', ))) disk_pct_thresh = self.get_var('openshift_check_efk_es_storage_pct', default='80') if int(disk_pct) >= int(disk_pct_thresh): errors.append(OpenShiftCheckException( 'DiskUsageTooHigh', 'Disk percent usage on the storage volume for logging ES pod "{pod}"\n' ' is {pct}, greater than threshold {limit}.\n' ' Note: threshold can be specified in inventory with {param}'.format( pod=pod_name, pct=str(disk_pct), limit=str(disk_pct_thresh), param='openshift_check_efk_es_storage_pct', ))) return errors
apache-2.0
Refefer/pylearn2
pylearn2/datasets/norb.py
44
14401
""" An interface to the small NORB dataset. Unlike `./norb_small.py`, this reads the original NORB file format, not the LISA lab's `.npy` version. Currently only supports the Small NORB Dataset. Download the dataset from `here <http://www.cs.nyu.edu/~ylclab/data/norb-v1.0-small/>`_. NORB dataset(s) by Fu Jie Huang and Yann LeCun. """ __authors__ = "Guillaume Desjardins and Matthew Koichi Grimes" __copyright__ = "Copyright 2010-2014, Universite de Montreal" __credits__ = __authors__.split(" and ") __license__ = "3-clause BSD" __maintainer__ = "Matthew Koichi Grimes" __email__ = "mkg alum mit edu (@..)" import bz2 import gzip import logging import os import warnings try: from exceptions import DeprecationWarning except ImportError: pass import numpy import theano from pylearn2.datasets import dense_design_matrix from pylearn2.datasets.cache import datasetCache from pylearn2.space import VectorSpace, Conv2DSpace, CompositeSpace from pylearn2.datasets.new_norb import StereoViewConverter logger = logging.getLogger(__name__) warnings.warn("Using deprecated module pylearn2.datasets.norb. " "This will be replaced with pylearn2.datasets.new_norb in " "December 2014. Users are encouraged to switch to that " "module now.", DeprecationWarning) class SmallNORB(dense_design_matrix.DenseDesignMatrix): """ An interface to the small NORB dataset. If instantiated with default arguments, target labels are integers representing categories, which can be looked up using category_name = SmallNORB.get_category(label). If instantiated with multi_target=True, labels are vectors of indices representing: [ category, instance, elevation, azimuth, lighting ] Like with category, there are class methods that map these ints to their actual values, e.g: category = SmallNORB.get_category(label[0]) elevation = SmallNORB.get_elevation_degrees(label[2]) Parameters ---------- which_set: str Must be 'train' or 'test'. multi_target: bool, optional If False, each label is an integer labeling the image catergory. If True, each label is a vector: [category, instance, lighting, elevation, azimuth]. All labels are given as integers. Use the categories, elevation_degrees, and azimuth_degrees arrays to map from these integers to actual values. """ # Actual image shape may change, e.g. after being preprocessed by # datasets.preprocessing.Downsample original_image_shape = (96, 96) _categories = ['animal', # four-legged animal 'human', # human figure 'airplane', 'truck', 'car'] @classmethod def get_category(cls, scalar_label): """ Returns the category string corresponding to an integer category label. """ return cls._categories[int(scalar_label)] @classmethod def get_elevation_degrees(cls, scalar_label): """ Returns the elevation, in degrees, corresponding to an integer elevation label. """ scalar_label = int(scalar_label) assert scalar_label >= 0 assert scalar_label < 9 return 30 + 5 * scalar_label @classmethod def get_azimuth_degrees(cls, scalar_label): """ Returns the azimuth, in degrees, corresponding to an integer label. """ scalar_label = int(scalar_label) assert scalar_label >= 0 assert scalar_label <= 34 assert (scalar_label % 2) == 0 return scalar_label * 10 # Maps azimuth labels (ints) to their actual values, in degrees. azimuth_degrees = numpy.arange(0, 341, 20) # Maps a label type to its index within a label vector. label_type_to_index = {'category': 0, 'instance': 1, 'elevation': 2, 'azimuth': 3, 'lighting': 4} # Number of labels, for each label type. num_labels_by_type = (len(_categories), 10, # instances 9, # elevations 18, # azimuths 6) # lighting # [mkg] Dropped support for the 'center' argument for now. In Pylearn 1, it # shifted the pixel values from [0:255] by subtracting 127.5. Seems like a # form of preprocessing, which might be better implemented separately using # the Preprocess class. def __init__(self, which_set, multi_target=False, stop=None): """ parameters ---------- which_set : str Must be 'train' or 'test'. multi_target : bool If False, each label is an integer labeling the image catergory. If True, each label is a vector: [category, instance, lighting, elevation, azimuth]. All labels are given as integers. Use the categories, elevation_degrees, and azimuth_degrees arrays to map from these integers to actual values. """ assert which_set in ['train', 'test'] self.which_set = which_set subtensor = slice(0, stop) if stop is not None else None X = SmallNORB.load(which_set, 'dat', subtensor=subtensor) # Casts to the GPU-supported float type, using theano._asarray(), a # safer alternative to numpy.asarray(). # # TODO: move the dtype-casting to the view_converter's output space, # once dtypes-for-spaces is merged into master. X = theano._asarray(X, theano.config.floatX) # Formats data as rows in a matrix, for DenseDesignMatrix X = X.reshape(-1, 2 * numpy.prod(self.original_image_shape)) # This is uint8 y = SmallNORB.load(which_set, 'cat', subtensor=subtensor) if multi_target: y_extra = SmallNORB.load(which_set, 'info', subtensor=subtensor) y = numpy.hstack((y[:, numpy.newaxis], y_extra)) datum_shape = ((2, ) + # two stereo images self.original_image_shape + (1, )) # one color channel # 's' is the stereo channel: 0 (left) or 1 (right) axes = ('b', 's', 0, 1, 'c') view_converter = StereoViewConverter(datum_shape, axes) super(SmallNORB, self).__init__(X=X, y=y, y_labels=numpy.max(y) + 1, view_converter=view_converter) @classmethod def load(cls, which_set, filetype, subtensor): """Reads and returns a single file as a numpy array.""" assert which_set in ['train', 'test'] assert filetype in ['dat', 'cat', 'info'] def getPath(which_set): dirname = os.path.join(os.getenv('PYLEARN2_DATA_PATH'), 'norb_small/original') if which_set == 'train': instance_list = '46789' elif which_set == 'test': instance_list = '01235' filename = 'smallnorb-5x%sx9x18x6x2x96x96-%s-%s.mat' % \ (instance_list, which_set + 'ing', filetype) return os.path.join(dirname, filename) def parseNORBFile(file_handle, subtensor=None, debug=False): """ Load all or part of file 'file_handle' into a numpy ndarray .. todo:: WRITEME properly :param file_handle: file from which to read file can be opended with open(), gzip.open() and bz2.BZ2File() @type file_handle: file-like object. Can be a gzip open file. :param subtensor: If subtensor is not None, it should be like the argument to numpy.ndarray.__getitem__. The following two expressions should return equivalent ndarray objects, but the one on the left may be faster and more memory efficient if the underlying file f is big. read(file_handle, subtensor) <===> read(file_handle)[*subtensor] Support for subtensors is currently spotty, so check the code to see if your particular type of subtensor is supported. """ def readNums(file_handle, num_type, count): """ Reads 4 bytes from file, returns it as a 32-bit integer. """ num_bytes = count * numpy.dtype(num_type).itemsize string = file_handle.read(num_bytes) return numpy.fromstring(string, dtype=num_type) def readHeader(file_handle, debug=False, from_gzip=None): """ .. todo:: WRITEME properly :param file_handle: an open file handle. :type file_handle: a file or gzip.GzipFile object :param from_gzip: bool or None :type from_gzip: if None determine the type of file handle. :returns: data type, element size, rank, shape, size """ if from_gzip is None: from_gzip = isinstance(file_handle, (gzip.GzipFile, bz2.BZ2File)) key_to_type = {0x1E3D4C51: ('float32', 4), # what is a packed matrix? # 0x1E3D4C52: ('packed matrix', 0), 0x1E3D4C53: ('float64', 8), 0x1E3D4C54: ('int32', 4), 0x1E3D4C55: ('uint8', 1), 0x1E3D4C56: ('int16', 2)} type_key = readNums(file_handle, 'int32', 1)[0] elem_type, elem_size = key_to_type[type_key] if debug: logger.debug("header's type key, type, type size: " "{0} {1} {2}".format(type_key, elem_type, elem_size)) if elem_type == 'packed matrix': raise NotImplementedError('packed matrix not supported') num_dims = readNums(file_handle, 'int32', 1)[0] if debug: logger.debug('# of dimensions, according to header: ' '{0}'.format(num_dims)) if from_gzip: shape = readNums(file_handle, 'int32', max(num_dims, 3))[:num_dims] else: shape = numpy.fromfile(file_handle, dtype='int32', count=max(num_dims, 3))[:num_dims] if debug: logger.debug('Tensor shape, as listed in header: ' '{0}'.format(shape)) return elem_type, elem_size, shape elem_type, elem_size, shape = readHeader(file_handle, debug) beginning = file_handle.tell() num_elems = numpy.prod(shape) result = None if isinstance(file_handle, (gzip.GzipFile, bz2.BZ2File)): assert subtensor is None, \ "Subtensors on gzip files are not implemented." result = readNums(file_handle, elem_type, num_elems * elem_size).reshape(shape) elif subtensor is None: result = numpy.fromfile(file_handle, dtype=elem_type, count=num_elems).reshape(shape) elif isinstance(subtensor, slice): if subtensor.step not in (None, 1): raise NotImplementedError('slice with step', subtensor.step) if subtensor.start not in (None, 0): bytes_per_row = numpy.prod(shape[1:]) * elem_size file_handle.seek( beginning + subtensor.start * bytes_per_row) shape[0] = min(shape[0], subtensor.stop) - subtensor.start num_elems = numpy.prod(shape) result = numpy.fromfile(file_handle, dtype=elem_type, count=num_elems).reshape(shape) else: raise NotImplementedError('subtensor access not written yet:', subtensor) return result fname = getPath(which_set) fname = datasetCache.cache_file(fname) file_handle = open(fname) return parseNORBFile(file_handle, subtensor) def get_topological_view(self, mat=None, single_tensor=True): """ .. todo:: WRITEME """ result = super(SmallNORB, self).get_topological_view(mat) if single_tensor: warnings.warn("The single_tensor argument is True by default to " "maintain backwards compatibility. This argument " "will be removed, and the behavior will become that " "of single_tensor=False, as of August 2014.") axes = list(self.view_converter.axes) s_index = axes.index('s') assert axes.index('b') == 0 num_image_pairs = result[0].shape[0] shape = (num_image_pairs, ) + self.view_converter.shape # inserts a singleton dimension where the 's' dimesion will be mono_shape = shape[:s_index] + (1, ) + shape[(s_index + 1):] for i, res in enumerate(result): logger.info("result {0} shape: {1}".format(i, str(res.shape))) result = tuple(t.reshape(mono_shape) for t in result) result = numpy.concatenate(result, axis=s_index) else: warnings.warn("The single_tensor argument will be removed on " "August 2014. The behavior will be the same as " "single_tensor=False.") return result
bsd-3-clause
appop/bitcoin
qa/rpc-tests/invalidateblock.py
1
3083
#!/usr/bin/env python3 # Copyright (c) 2014-2016 The nealcoin Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. """Test the invalidateblock RPC.""" from test_framework.test_framework import nealcoinTestFramework from test_framework.util import * class InvalidateTest(nealcoinTestFramework): def __init__(self): super().__init__() self.setup_clean_chain = True self.num_nodes = 3 def setup_network(self): self.nodes = [] self.is_network_split = False self.nodes.append(start_node(0, self.options.tmpdir, ["-debug"])) self.nodes.append(start_node(1, self.options.tmpdir, ["-debug"])) self.nodes.append(start_node(2, self.options.tmpdir, ["-debug"])) def run_test(self): print("Make sure we repopulate setBlockIndexCandidates after InvalidateBlock:") print("Mine 4 blocks on Node 0") self.nodes[0].generate(4) assert(self.nodes[0].getblockcount() == 4) besthash = self.nodes[0].getbestblockhash() print("Mine competing 6 blocks on Node 1") self.nodes[1].generate(6) assert(self.nodes[1].getblockcount() == 6) print("Connect nodes to force a reorg") connect_nodes_bi(self.nodes,0,1) sync_blocks(self.nodes[0:2]) assert(self.nodes[0].getblockcount() == 6) badhash = self.nodes[1].getblockhash(2) print("Invalidate block 2 on node 0 and verify we reorg to node 0's original chain") self.nodes[0].invalidateblock(badhash) newheight = self.nodes[0].getblockcount() newhash = self.nodes[0].getbestblockhash() if (newheight != 4 or newhash != besthash): raise AssertionError("Wrong tip for node0, hash %s, height %d"%(newhash,newheight)) print("\nMake sure we won't reorg to a lower work chain:") connect_nodes_bi(self.nodes,1,2) print("Sync node 2 to node 1 so both have 6 blocks") sync_blocks(self.nodes[1:3]) assert(self.nodes[2].getblockcount() == 6) print("Invalidate block 5 on node 1 so its tip is now at 4") self.nodes[1].invalidateblock(self.nodes[1].getblockhash(5)) assert(self.nodes[1].getblockcount() == 4) print("Invalidate block 3 on node 2, so its tip is now 2") self.nodes[2].invalidateblock(self.nodes[2].getblockhash(3)) assert(self.nodes[2].getblockcount() == 2) print("..and then mine a block") self.nodes[2].generate(1) print("Verify all nodes are at the right height") time.sleep(5) for i in range(3): print(i,self.nodes[i].getblockcount()) assert(self.nodes[2].getblockcount() == 3) assert(self.nodes[0].getblockcount() == 4) node1height = self.nodes[1].getblockcount() if node1height < 4: raise AssertionError("Node 1 reorged to a lower height: %d"%node1height) if __name__ == '__main__': InvalidateTest().main()
mit
xzturn/tensorflow
tensorflow/python/platform/analytics.py
21
1090
# Copyright 2019 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Analytics helpers library.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function def track_usage(tool_id, tags): """No usage tracking for external library. Args: tool_id: A string identifier for tool to be tracked. tags: list of string tags that will be added to the tracking. """ del tool_id, tags # Unused externally.
apache-2.0
alikins/ansible
lib/ansible/modules/cloud/cloudscale/cloudscale_floating_ip.py
49
9740
#!/usr/bin/python # -*- coding: utf-8 -*- # # (c) 2017, Gaudenz Steinlin <gaudenz.steinlin@cloudscale.ch> # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) from __future__ import absolute_import, division, print_function __metaclass__ = type ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ['preview'], 'supported_by': 'community'} DOCUMENTATION = ''' --- module: cloudscale_floating_ip short_description: Manages floating IPs on the cloudscale.ch IaaS service description: - Create, assign and delete floating IPs on the cloudscale.ch IaaS service. - All operations are performed using the cloudscale.ch public API v1. - "For details consult the full API documentation: U(https://www.cloudscale.ch/en/api/v1)." - A valid API token is required for all operations. You can create as many tokens as you like using the cloudscale.ch control panel at U(https://control.cloudscale.ch). notes: - Instead of the api_token parameter the CLOUDSCALE_API_TOKEN environment variable can be used. - To create a new floating IP at least the C(ip_version) and C(server) options are required. - Once a floating_ip is created all parameters except C(server) are read-only. - It's not possible to request a floating IP without associating it with a server at the same time. - This module requires the ipaddress python library. This library is included in Python since version 3.3. It is available as a module on PyPi for earlier versions. version_added: 2.5 author: "Gaudenz Steinlin (@gaudenz) <gaudenz.steinlin@cloudscale.ch>" options: state: description: - State of the floating IP. default: present choices: [ present, absent ] ip: description: - Floating IP address to change. - Required to assign the IP to a different server or if I(state) is absent. aliases: [ network ] ip_version: description: - IP protocol version of the floating IP. choices: [ 4, 6 ] server: description: - UUID of the server assigned to this floating IP. - Required unless I(state) is absent. prefix_length: description: - Only valid if I(ip_version) is 6. - Prefix length for the IPv6 network. Currently only a prefix of /56 can be requested. If no I(prefix_length) is present, a single address is created. choices: [ 56 ] reverse_ptr: description: - Reverse PTR entry for this address. - You cannot set a reverse PTR entry for IPv6 floating networks. Reverse PTR entries are only allowed for single addresses. api_token: description: - cloudscale.ch API token. - This can also be passed in the CLOUDSCALE_API_TOKEN environment variable. api_timeout: description: - Timeout in seconds for calls to the cloudscale.ch API. default: 30 ''' EXAMPLES = ''' # Request a new floating IP - name: Request a floating IP cloudscale_floating_ip: ip_version: 4 server: 47cec963-fcd2-482f-bdb6-24461b2d47b1 reverse_ptr: my-server.example.com api_token: xxxxxx register: floating_ip # Assign an existing floating IP to a different server - name: Move floating IP to backup server cloudscale_floating_ip: ip: 192.0.2.123 server: ea3b39a3-77a8-4d0b-881d-0bb00a1e7f48 api_token: xxxxxx # Request a new floating IPv6 network - name: Request a floating IP cloudscale_floating_ip: ip_version: 6 prefix_length: 56 server: 47cec963-fcd2-482f-bdb6-24461b2d47b1 api_token: xxxxxx register: floating_ip # Assign an existing floating network to a different server - name: Move floating IP to backup server cloudscale_floating_ip: ip: '{{ floating_ip.network | ip }}' server: ea3b39a3-77a8-4d0b-881d-0bb00a1e7f48 api_token: xxxxxx # Release a floating IP - name: Release floating IP cloudscale_floating_ip: ip: 192.0.2.123 state: absent api_token: xxxxxx ''' RETURN = ''' href: description: The API URL to get details about this floating IP. returned: success when state == present type: string sample: https://api.cloudscale.ch/v1/floating-ips/2001:db8::cafe network: description: The CIDR notation of the network that is routed to your server. returned: success when state == present type: string sample: 2001:db8::cafe/128 next_hop: description: Your floating IP is routed to this IP address. returned: success when state == present type: string sample: 2001:db8:dead:beef::42 reverse_ptr: description: The reverse pointer for this floating IP address. returned: success when state == present type: string sample: 185-98-122-176.cust.cloudscale.ch server: description: The floating IP is routed to this server. returned: success when state == present type: string sample: 47cec963-fcd2-482f-bdb6-24461b2d47b1 ip: description: The floating IP address or network. This is always present and used to identify floating IPs after creation. returned: success type: string sample: 185.98.122.176 state: description: The current status of the floating IP. returned: success type: string sample: present ''' import os try: from ipaddress import ip_network HAS_IPADDRESS = True except ImportError: HAS_IPADDRESS = False from ansible.module_utils.basic import AnsibleModule, env_fallback from ansible.module_utils.cloudscale import AnsibleCloudscaleBase, cloudscale_argument_spec class AnsibleCloudscaleFloatingIP(AnsibleCloudscaleBase): def __init__(self, module): super(AnsibleCloudscaleFloatingIP, self).__init__(module) # Initialize info dict # Set state to absent, will be updated by self.update_info() self.info = {'state': 'absent'} if self._module.params['ip']: self.update_info() @staticmethod def _resp2info(resp): # If the API response has some content, the floating IP must exist resp['state'] = 'present' # Add the IP address to the response, otherwise handling get's to complicated as this # has to be converted from the network all the time. resp['ip'] = str(ip_network(resp['network']).network_address) # Replace the server with just the UUID, the href to the server is useless and just makes # things more complicated resp['server'] = resp['server']['uuid'] return resp def update_info(self): resp = self._get('floating-ips/' + self._module.params['ip']) if resp: self.info = self._resp2info(resp) else: self.info = {'ip': self._module.params['ip'], 'state': 'absent'} def request_floating_ip(self): params = self._module.params # check for required parameters to request a floating IP missing_parameters = [] for p in ('ip_version', 'server'): if p not in params or not params[p]: missing_parameters.append(p) if len(missing_parameters) > 0: self._module.fail_json(msg='Missing required parameter(s) to request a floating IP: %s.' % ' '.join(missing_parameters)) data = {'ip_version': params['ip_version'], 'server': params['server']} if params['prefix_length']: data['prefix_length'] = params['prefix_length'] if params['reverse_ptr']: data['reverse_ptr'] = params['reverse_ptr'] self.info = self._resp2info(self._post('floating-ips', data)) def release_floating_ip(self): self._delete('floating-ips/%s' % self._module.params['ip']) self.info = {'ip': self.info['ip'], 'state': 'absent'} def update_floating_ip(self): params = self._module.params if 'server' not in params or not params['server']: self._module.fail_json(msg='Missing required parameter to update a floating IP: server.') self.info = self._resp2info(self._post('floating-ips/%s' % params['ip'], {'server': params['server']})) def main(): argument_spec = cloudscale_argument_spec() argument_spec.update(dict( state=dict(default='present', choices=('present', 'absent')), ip=dict(aliases=('network', )), ip_version=dict(choices=(4, 6), type='int'), server=dict(), prefix_length=dict(choices=(56,), type='int'), reverse_ptr=dict(), )) module = AnsibleModule( argument_spec=argument_spec, required_one_of=(('ip', 'ip_version'),), supports_check_mode=True, ) if not HAS_IPADDRESS: module.fail_json(msg='Could not import the python library ipaddress required by this module') target_state = module.params['state'] target_server = module.params['server'] floating_ip = AnsibleCloudscaleFloatingIP(module) current_state = floating_ip.info['state'] current_server = floating_ip.info['server'] if 'server' in floating_ip.info else None if module.check_mode: module.exit_json(changed=not target_state == current_state or (current_state == 'present' and current_server != target_server), **floating_ip.info) changed = False if current_state == 'absent' and target_state == 'present': floating_ip.request_floating_ip() changed = True elif current_state == 'present' and target_state == 'absent': floating_ip.release_floating_ip() changed = True elif current_state == 'present' and current_server != target_server: floating_ip.update_floating_ip() changed = True module.exit_json(changed=changed, **floating_ip.info) if __name__ == '__main__': main()
gpl-3.0
idjaw/keystone
keystone/contrib/endpoint_policy/backends/sql.py
11
1154
# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import logging from oslo_log import versionutils from keystone.endpoint_policy.backends import sql LOG = logging.getLogger(__name__) _OLD = 'keystone.contrib.endpoint_policy.backends.sql.EndpointPolicy' _NEW = 'keystone.endpoint_policy.backends.sql.EndpointPolicy' class EndpointPolicy(sql.EndpointPolicy): @versionutils.deprecated(versionutils.deprecated.LIBERTY, in_favor_of=_NEW, remove_in=1, what=_OLD) def __init__(self, *args, **kwargs): super(EndpointPolicy, self).__init__(*args, **kwargs)
apache-2.0
kumajaya/android_kernel_samsung_lt02
tools/perf/python/twatch.py
7370
1334
#! /usr/bin/python # -*- python -*- # -*- coding: utf-8 -*- # twatch - Experimental use of the perf python interface # Copyright (C) 2011 Arnaldo Carvalho de Melo <acme@redhat.com> # # This application is free software; you can redistribute it and/or # modify it under the terms of the GNU General Public License # as published by the Free Software Foundation; version 2. # # This application is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # General Public License for more details. import perf def main(): cpus = perf.cpu_map() threads = perf.thread_map() evsel = perf.evsel(task = 1, comm = 1, mmap = 0, wakeup_events = 1, watermark = 1, sample_id_all = 1, sample_type = perf.SAMPLE_PERIOD | perf.SAMPLE_TID | perf.SAMPLE_CPU | perf.SAMPLE_TID) evsel.open(cpus = cpus, threads = threads); evlist = perf.evlist(cpus, threads) evlist.add(evsel) evlist.mmap() while True: evlist.poll(timeout = -1) for cpu in cpus: event = evlist.read_on_cpu(cpu) if not event: continue print "cpu: %2d, pid: %4d, tid: %4d" % (event.sample_cpu, event.sample_pid, event.sample_tid), print event if __name__ == '__main__': main()
gpl-2.0
autopulated/yotta
yotta/test/cli/test_shrinkwrap.py
3
5298
#!/usr/bin/env python # Copyright 2015 ARM Limited # # Licensed under the Apache License, Version 2.0 # See LICENSE file for details. # standard library modules, , , import unittest import os import copy # internal modules: from yotta.test.cli import cli from yotta.test.cli import util Test_Target = "x86-osx-native,*" Test_Target_Name = 'x86-osx-native' Test_Target_Old_Version = '0.0.7' Test_Shrinkwrap = { 'module.json':'''{ "name": "test-shrinkwrap", "version": "0.0.0", "description": "Test yotta shrinkwrap", "author": "James Crosby <james.crosby@arm.com>", "license": "Apache-2.0", "dependencies":{ "test-testing-dummy": "*" } }''', 'source/foo.c':'''#include "stdio.h" int foo(){ printf("foo!\\n"); return 7; }''', # test-testing-dummy v0.0.1 (a newer version is available from the registry) 'yotta_modules/test-testing-dummy/module.json':'''{ "name": "test-testing-dummy", "version": "0.0.1", "description": "Test yotta's compilation of tests.", "author": "James Crosby <james.crosby@arm.com>", "license": "Apache-2.0" } ''' } Test_Shrinkwrap_Missing_Dependency = { 'module.json':'''{ "name": "test-shrinkwrap", "version": "0.0.0", "description": "Test yotta shrinkwrap", "author": "James Crosby <james.crosby@arm.com>", "license": "Apache-2.0", "dependencies":{ "test-testing-dummy": "*" } }''', 'source/foo.c':'''#include "stdio.h" int foo(){ printf("foo!\\n"); return 7; }''' } Test_Existing_Shrinkwrap_Missing_Dependency = copy.copy(Test_Shrinkwrap_Missing_Dependency) Test_Existing_Shrinkwrap_Missing_Dependency['yotta-shrinkwrap.json'] = ''' { "modules": [ { "version": "0.0.0", "name": "test-shrinkwrap" }, { "version": "0.0.1", "name": "test-testing-dummy" } ], "targets": [ { "version": "%s", "name": "%s" } ] }''' % (Test_Target_Old_Version, Test_Target_Name) Test_Existing_Shrinkwrap = copy.copy(Test_Shrinkwrap) Test_Existing_Shrinkwrap['yotta_targets/inherits-from-test-target/target.json'] = '''{ "name": "inherits-from-test-target", "version": "1.0.0", "license": "Apache-2.0", "inherits": { "%s": "*" } }''' % Test_Target_Name Test_Existing_Shrinkwrap['yotta-shrinkwrap.json'] = ''' { "modules": [ { "version": "0.0.0", "name": "test-shrinkwrap" }, { "version": "0.0.1", "name": "test-testing-dummy" } ], "targets": [ { "version": "%s", "name": "%s" } ] }''' % (Test_Target_Old_Version, Test_Target_Name) class TestCLIShrinkwrap(unittest.TestCase): def testCreateShrinkwrap(self): test_dir = util.writeTestFiles(Test_Shrinkwrap, True) stdout, stderr, statuscode = cli.run(['-t', Test_Target, '--plain', 'shrinkwrap'], cwd=test_dir) self.assertEqual(statuscode, 0) self.assertTrue(os.path.exists(os.path.join(test_dir, 'yotta-shrinkwrap.json'))) util.rmRf(test_dir) def testMissingDependenciesShrinkwrap(self): test_dir = util.writeTestFiles(Test_Shrinkwrap_Missing_Dependency, True) stdout, stderr, statuscode = cli.run(['-t', Test_Target, '--plain', 'shrinkwrap'], cwd=test_dir) self.assertNotEqual(statuscode, 0) self.assertFalse(os.path.exists(os.path.join(test_dir, 'yotta-shrinkwrap.json'))) self.assertIn('is missing', stdout+stderr) util.rmRf(test_dir) def testInstallWithShrinkwrap(self): test_dir = util.writeTestFiles(Test_Existing_Shrinkwrap_Missing_Dependency, True) stdout, stderr, statuscode = cli.run(['-t', Test_Target, '--plain', 'install'], cwd=test_dir) self.assertEqual(statuscode, 0) stdout, stderr, statuscode = cli.run(['-t', Test_Target, '--plain', 'list'], cwd=test_dir) self.assertEqual(statuscode, 0) # as opposed to 0.0.2 which is the latest self.assertIn('test-testing-dummy 0.0.1', stdout+stderr) stdout, stderr, statuscode = cli.run(['-t', Test_Target, '--plain', 'target'], cwd=test_dir) self.assertEqual(statuscode, 0) self.assertIn('%s %s' % (Test_Target_Name, Test_Target_Old_Version), stdout+stderr) util.rmRf(test_dir) def testBaseTargetInstallWithShrinkwrap(self): test_dir = util.writeTestFiles(Test_Existing_Shrinkwrap, True) stdout, stderr, statuscode = cli.run(['-t', 'inherits-from-test-target', '--plain', 'install'], cwd=test_dir) self.assertEqual(statuscode, 0) stdout, stderr, statuscode = cli.run(['-t', 'inherits-from-test-target', '--plain', 'target'], cwd=test_dir) self.assertEqual(statuscode, 0) self.assertIn('%s %s' % (Test_Target_Name, Test_Target_Old_Version), stdout+stderr) util.rmRf(test_dir) def testUpdateWithShrinkwrap(self): test_dir = util.writeTestFiles(Test_Existing_Shrinkwrap, True) stdout, stderr, statuscode = cli.run(['-t', Test_Target, '--plain', 'update'], cwd=test_dir) self.assertEqual(statuscode, 0) stdout, stderr, statuscode = cli.run(['-t', Test_Target, '--plain', 'list'], cwd=test_dir) self.assertEqual(statuscode, 0) # as opposed to 0.0.2 which is the latest self.assertIn('test-testing-dummy 0.0.1', stdout+stderr) util.rmRf(test_dir)
apache-2.0
waynew/salmon
salmon/data/prototype/config/testing.py
2
1091
from config import settings from salmon import view from salmon.routing import Router from salmon.server import Relay import jinja2 import logging import logging.config import os logging.config.fileConfig("config/test_logging.conf") # the relay host to actually send the final message to (set debug=1 to see what # the relay is saying to the log server). settings.relay = Relay(host=settings.relay_config['host'], port=settings.relay_config['port'], debug=0) settings.receiver = None Router.defaults(**settings.router_defaults) Router.load(settings.handlers) Router.RELOAD=True Router.LOG_EXCEPTIONS=False view.LOADER = jinja2.Environment( loader=jinja2.PackageLoader(settings.template_config['dir'], settings.template_config['module'])) # if you have pyenchant and enchant installed then the template tests will do # spell checking for you, but you need to tell pyenchant where to find itself # if 'PYENCHANT_LIBRARY_PATH' not in os.environ: # os.environ['PYENCHANT_LIBRARY_PATH'] = '/opt/local/lib/libenchant.dylib'
gpl-3.0
pwmarcz/django
django/contrib/gis/geos/prototypes/errcheck.py
48
3013
""" Error checking functions for GEOS ctypes prototype functions. """ from ctypes import c_void_p, string_at from django.contrib.gis.geos.error import GEOSException from django.contrib.gis.geos.prototypes.threadsafe import GEOSFunc # Getting the `free` routine used to free the memory allocated for # string pointers returned by GEOS. free = GEOSFunc('GEOSFree') free.argtypes = [c_void_p] free.restype = None ### ctypes error checking routines ### def last_arg_byref(args): "Returns the last C argument's value by reference." return args[-1]._obj.value def check_dbl(result, func, cargs): "Checks the status code and returns the double value passed in by reference." # Checking the status code if result != 1: return None # Double passed in by reference, return its value. return last_arg_byref(cargs) def check_geom(result, func, cargs): "Error checking on routines that return Geometries." if not result: raise GEOSException('Error encountered checking Geometry returned from GEOS C function "%s".' % func.__name__) return result def check_minus_one(result, func, cargs): "Error checking on routines that should not return -1." if result == -1: raise GEOSException('Error encountered in GEOS C function "%s".' % func.__name__) else: return result def check_predicate(result, func, cargs): "Error checking for unary/binary predicate functions." val = ord(result) # getting the ordinal from the character if val == 1: return True elif val == 0: return False else: raise GEOSException('Error encountered on GEOS C predicate function "%s".' % func.__name__) def check_sized_string(result, func, cargs): """ Error checking for routines that return explicitly sized strings. This frees the memory allocated by GEOS at the result pointer. """ if not result: raise GEOSException('Invalid string pointer returned by GEOS C function "%s"' % func.__name__) # A c_size_t object is passed in by reference for the second # argument on these routines, and its needed to determine the # correct size. s = string_at(result, last_arg_byref(cargs)) # Freeing the memory allocated within GEOS free(result) return s def check_string(result, func, cargs): """ Error checking for routines that return strings. This frees the memory allocated by GEOS at the result pointer. """ if not result: raise GEOSException('Error encountered checking string return value in GEOS C function "%s".' % func.__name__) # Getting the string value at the pointer address. s = string_at(result) # Freeing the memory allocated within GEOS free(result) return s def check_zero(result, func, cargs): "Error checking on routines that should not return 0." if result == 0: raise GEOSException('Error encountered in GEOS C function "%s".' % func.__name__) else: return result
bsd-3-clause
pigeonflight/strider-plone
docker/appengine/lib/django-1.4/tests/regressiontests/utils/decorators.py
43
3919
from django.http import HttpResponse from django.template import Template, Context from django.template.response import TemplateResponse from django.test import TestCase, RequestFactory from django.utils.decorators import decorator_from_middleware class ProcessViewMiddleware(object): def process_view(self, request, view_func, view_args, view_kwargs): pass process_view_dec = decorator_from_middleware(ProcessViewMiddleware) @process_view_dec def process_view(request): return HttpResponse() class ClassProcessView(object): def __call__(self, request): return HttpResponse() class_process_view = process_view_dec(ClassProcessView()) class FullMiddleware(object): def process_request(self, request): request.process_request_reached = True def process_view(sef, request, view_func, view_args, view_kwargs): request.process_view_reached = True def process_template_response(self, request, response): request.process_template_response_reached = True return response def process_response(self, request, response): # This should never receive unrendered content. request.process_response_content = response.content request.process_response_reached = True return response full_dec = decorator_from_middleware(FullMiddleware) class DecoratorFromMiddlewareTests(TestCase): """ Tests for view decorators created using ``django.utils.decorators.decorator_from_middleware``. """ rf = RequestFactory() def test_process_view_middleware(self): """ Test a middleware that implements process_view. """ process_view(self.rf.get('/')) def test_callable_process_view_middleware(self): """ Test a middleware that implements process_view, operating on a callable class. """ class_process_view(self.rf.get('/')) def test_full_dec_normal(self): """ Test that all methods of middleware are called for normal HttpResponses """ @full_dec def normal_view(request): t = Template("Hello world") return HttpResponse(t.render(Context({}))) request = self.rf.get('/') response = normal_view(request) self.assertTrue(getattr(request, 'process_request_reached', False)) self.assertTrue(getattr(request, 'process_view_reached', False)) # process_template_response must not be called for HttpResponse self.assertFalse(getattr(request, 'process_template_response_reached', False)) self.assertTrue(getattr(request, 'process_response_reached', False)) def test_full_dec_templateresponse(self): """ Test that all methods of middleware are called for TemplateResponses in the right sequence. """ @full_dec def template_response_view(request): t = Template("Hello world") return TemplateResponse(request, t, {}) request = self.rf.get('/') response = template_response_view(request) self.assertTrue(getattr(request, 'process_request_reached', False)) self.assertTrue(getattr(request, 'process_view_reached', False)) self.assertTrue(getattr(request, 'process_template_response_reached', False)) # response must not be rendered yet. self.assertFalse(response._is_rendered) # process_response must not be called until after response is rendered, # otherwise some decorators like csrf_protect and gzip_page will not # work correctly. See #16004 self.assertFalse(getattr(request, 'process_response_reached', False)) response.render() self.assertTrue(getattr(request, 'process_response_reached', False)) # Check that process_response saw the rendered content self.assertEqual(request.process_response_content, "Hello world")
mit
gmacchi93/serverInfoParaguay
apps/venv/lib/python2.7/site-packages/pip/exceptions.py
398
1086
"""Exceptions used throughout package""" class PipError(Exception): """Base pip exception""" class InstallationError(PipError): """General exception during installation""" class UninstallationError(PipError): """General exception during uninstallation""" class DistributionNotFound(InstallationError): """Raised when a distribution cannot be found to satisfy a requirement""" class BestVersionAlreadyInstalled(PipError): """Raised when the most up-to-date version of a package is already installed. """ class BadCommand(PipError): """Raised when virtualenv or a command is not found""" class CommandError(PipError): """Raised when there is an error in command-line arguments""" class PreviousBuildDirError(PipError): """Raised when there's a previous conflicting build directory""" class HashMismatch(InstallationError): """Distribution file hash values don't match.""" class InvalidWheelFilename(InstallationError): """Invalid wheel filename.""" class UnsupportedWheel(InstallationError): """Unsupported wheel."""
apache-2.0
shishaochen/TensorFlow-0.8-Win
third_party/grpc/tools/buildgen/plugins/generate_vsprojects.py
59
3748
# Copyright 2015, Google Inc. # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above # copyright notice, this list of conditions and the following disclaimer # in the documentation and/or other materials provided with the # distribution. # * Neither the name of Google Inc. nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. """Buildgen vsprojects plugin. This parses the list of libraries, and generates globals "vsprojects" and "vsproject_dict", to be used by the visual studio generators. """ import hashlib import re def mako_plugin(dictionary): """The exported plugin code for generate_vsprojeccts We want to help the work of the visual studio generators. """ libs = dictionary.get('libs', []) targets = dictionary.get('targets', []) for lib in libs: lib['is_library'] = True for target in targets: target['is_library'] = False projects = [] projects.extend(libs) projects.extend(targets) for target in projects: if 'build' in target and target['build'] == 'test': default_test_dir = 'test' else: default_test_dir = '.' if 'vs_config_type' not in target: if 'build' in target and target['build'] == 'test': target['vs_config_type'] = 'Application' else: target['vs_config_type'] = 'StaticLibrary' if 'vs_packages' not in target: target['vs_packages'] = [] if 'vs_props' not in target: target['vs_props'] = [] target['vs_proj_dir'] = target.get('vs_proj_dir', default_test_dir) if target.get('vs_project_guid', None) is None and 'windows' in target.get('platforms', ['windows']): name = target['name'] guid = re.sub('(........)(....)(....)(....)(.*)', r'{\1-\2-\3-\4-\5}', hashlib.md5(name).hexdigest()) target['vs_project_guid'] = guid.upper() # Exclude projects without a visual project guid, such as the tests. projects = [project for project in projects if project.get('vs_project_guid', None)] projects = [project for project in projects if project['language'] != 'c++' or project['build'] == 'all' or project['build'] == 'protoc' or (project['language'] == 'c++' and (project['build'] == 'test' or project['build'] == 'private'))] project_dict = dict([(p['name'], p) for p in projects]) packages = dictionary.get('vspackages', []) packages_dict = dict([(p['name'], p) for p in packages]) dictionary['vsprojects'] = projects dictionary['vsproject_dict'] = project_dict dictionary['vspackages_dict'] = packages_dict
apache-2.0
mozilla/verbatim
vendor/lib/python/django/contrib/admin/actions.py
98
3205
""" Built-in, globally-available admin actions. """ from django.core.exceptions import PermissionDenied from django.contrib.admin import helpers from django.contrib.admin.util import get_deleted_objects, model_ngettext from django.db import router from django.template.response import TemplateResponse from django.utils.encoding import force_unicode from django.utils.translation import ugettext_lazy, ugettext as _ def delete_selected(modeladmin, request, queryset): """ Default action which deletes the selected objects. This action first displays a confirmation page whichs shows all the deleteable objects, or, if the user has no permission one of the related childs (foreignkeys), a "permission denied" message. Next, it delets all selected objects and redirects back to the change list. """ opts = modeladmin.model._meta app_label = opts.app_label # Check that the user has delete permission for the actual model if not modeladmin.has_delete_permission(request): raise PermissionDenied using = router.db_for_write(modeladmin.model) # Populate deletable_objects, a data structure of all related objects that # will also be deleted. deletable_objects, perms_needed, protected = get_deleted_objects( queryset, opts, request.user, modeladmin.admin_site, using) # The user has already confirmed the deletion. # Do the deletion and return a None to display the change list view again. if request.POST.get('post'): if perms_needed: raise PermissionDenied n = queryset.count() if n: for obj in queryset: obj_display = force_unicode(obj) modeladmin.log_deletion(request, obj, obj_display) queryset.delete() modeladmin.message_user(request, _("Successfully deleted %(count)d %(items)s.") % { "count": n, "items": model_ngettext(modeladmin.opts, n) }) # Return None to display the change list page again. return None if len(queryset) == 1: objects_name = force_unicode(opts.verbose_name) else: objects_name = force_unicode(opts.verbose_name_plural) if perms_needed or protected: title = _("Cannot delete %(name)s") % {"name": objects_name} else: title = _("Are you sure?") context = { "title": title, "objects_name": objects_name, "deletable_objects": [deletable_objects], 'queryset': queryset, "perms_lacking": perms_needed, "protected": protected, "opts": opts, "app_label": app_label, 'action_checkbox_name': helpers.ACTION_CHECKBOX_NAME, } # Display the confirmation page return TemplateResponse(request, modeladmin.delete_selected_confirmation_template or [ "admin/%s/%s/delete_selected_confirmation.html" % (app_label, opts.object_name.lower()), "admin/%s/delete_selected_confirmation.html" % app_label, "admin/delete_selected_confirmation.html" ], context, current_app=modeladmin.admin_site.name) delete_selected.short_description = ugettext_lazy("Delete selected %(verbose_name_plural)s")
gpl-2.0
breisfeld/avoplot
src/AvoPlot.py
3
1352
#!/usr/bin/python #Copyright (C) Nial Peters 2013 # #This file is part of AvoPlot. # #AvoPlot is free software: you can redistribute it and/or modify #it under the terms of the GNU General Public License as published by #the Free Software Foundation, either version 3 of the License, or #(at your option) any later version. # #AvoPlot is distributed in the hope that it will be useful, #but WITHOUT ANY WARRANTY; without even the implied warranty of #MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the #GNU General Public License for more details. # #You should have received a copy of the GNU General Public License #along with AvoPlot. If not, see <http://www.gnu.org/licenses/>. """ This module contains the main script for running AvoPlot. """ import optparse import avoplot from avoplot.gui import main def __parse_cmd_line(): """ Function parses the command line input and returns a tuple of (options, args). """ usage = ("Usage: %prog [options]") parser = optparse.OptionParser(usage, version=avoplot.VERSION) (options, args) = parser.parse_args() return (options, args) if __name__ == '__main__': #parse any command line args options, args = __parse_cmd_line() #create and run the wx app app = main.AvoPlotApp(options, args) app.MainLoop()
gpl-3.0
tashigaofei/BlogSpider
scrapy/contrib/downloadermiddleware/httpcache.py
33
4327
from email.utils import formatdate from scrapy import signals from scrapy.exceptions import NotConfigured, IgnoreRequest from scrapy.utils.misc import load_object class HttpCacheMiddleware(object): def __init__(self, settings, stats): if not settings.getbool('HTTPCACHE_ENABLED'): raise NotConfigured self.policy = load_object(settings['HTTPCACHE_POLICY'])(settings) self.storage = load_object(settings['HTTPCACHE_STORAGE'])(settings) self.ignore_missing = settings.getbool('HTTPCACHE_IGNORE_MISSING') self.stats = stats @classmethod def from_crawler(cls, crawler): o = cls(crawler.settings, crawler.stats) crawler.signals.connect(o.spider_opened, signal=signals.spider_opened) crawler.signals.connect(o.spider_closed, signal=signals.spider_closed) return o def spider_opened(self, spider): self.storage.open_spider(spider) def spider_closed(self, spider): self.storage.close_spider(spider) def process_request(self, request, spider): # Skip uncacheable requests if not self.policy.should_cache_request(request): request.meta['_dont_cache'] = True # flag as uncacheable return # Look for cached response and check if expired cachedresponse = self.storage.retrieve_response(spider, request) if cachedresponse is None: self.stats.inc_value('httpcache/miss', spider=spider) if self.ignore_missing: self.stats.inc_value('httpcache/ignore', spider=spider) raise IgnoreRequest("Ignored request not in cache: %s" % request) return # first time request # Return cached response only if not expired cachedresponse.flags.append('cached') if self.policy.is_cached_response_fresh(cachedresponse, request): self.stats.inc_value('httpcache/hit', spider=spider) return cachedresponse # Keep a reference to cached response to avoid a second cache lookup on # process_response hook request.meta['cached_response'] = cachedresponse def process_response(self, request, response, spider): # Skip cached responses and uncacheable requests if 'cached' in response.flags or '_dont_cache' in request.meta: request.meta.pop('_dont_cache', None) return response # RFC2616 requires origin server to set Date header, # http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.18 if 'Date' not in response.headers: response.headers['Date'] = formatdate(usegmt=1) # Do not validate first-hand responses cachedresponse = request.meta.pop('cached_response', None) if cachedresponse is None: self.stats.inc_value('httpcache/firsthand', spider=spider) self._cache_response(spider, response, request, cachedresponse) return response if self.policy.is_cached_response_valid(cachedresponse, response, request): self.stats.inc_value('httpcache/revalidate', spider=spider) return cachedresponse self.stats.inc_value('httpcache/invalidate', spider=spider) self._cache_response(spider, response, request, cachedresponse) return response def _cache_response(self, spider, response, request, cachedresponse): if self.policy.should_cache_response(response, request): self.stats.inc_value('httpcache/store', spider=spider) self.storage.store_response(spider, request, response) else: self.stats.inc_value('httpcache/uncacheable', spider=spider) from scrapy.contrib.httpcache import FilesystemCacheStorage as _FilesystemCacheStorage class FilesystemCacheStorage(_FilesystemCacheStorage): def __init__(self, *args, **kwargs): import warnings from scrapy.exceptions import ScrapyDeprecationWarning warnings.warn('Importing FilesystemCacheStorage from ' 'scrapy.contrib.downloadermiddlware.httpcache is ' 'deprecated, use scrapy.contrib.httpcache instead.', category=ScrapyDeprecationWarning, stacklevel=1) super(FilesystemCacheStorage, self).__init__(*args, **kwargs)
mit
BonexGu/Blik2D-SDK
Blik2D/addon/tensorflow-1.2.1_for_blik/tensorflow/tensorboard/plugins/base_plugin.py
27
2305
# Copyright 2016 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """TensorBoard Plugin abstract base class. Every plugin in TensorBoard must extend and implement the abstract methods of this base class. """ from __future__ import absolute_import from __future__ import division from __future__ import print_function from abc import ABCMeta from abc import abstractmethod class TBPlugin(object): """TensorBoard plugin interface. Every plugin must extend from this class.""" __metaclass__ = ABCMeta # The plugin_name will also be a prefix in the http handlers generated by # the plugin, e.g. `data/plugins/$PLUGIN_NAME/$HANDLER` # The plugin name must be unique for each registered plugin, or # a ValueError will be thrown when the application is constructed plugin_name = None @abstractmethod def get_plugin_apps(self, multiplexer, logdir): """Returns a set of WSGI applications that the plugin implements. Each application gets registered with the tensorboard app and is served under a prefix path that includes the name of the plugin. Args: multiplexer: The event_multiplexer with underlying TB data. logdir: The logging directory TensorBoard was started with. Returns: A dict mapping route paths to WSGI applications. """ raise NotImplementedError() @abstractmethod def is_active(self): """Determines whether this plugin is active. A plugin may not be active for instance if it lacks relevant data. If a plugin is inactive, the frontend may avoid issuing requests to its routes. Returns: A boolean value. Whether this plugin is active. """ raise NotImplementedError()
mit
kmee/odoo-brazil-hr
l10n_br_hr_arquivos_governo/models/arquivo_grrf.py
2
9714
# -*- coding: utf-8 -*- # (c) 2017 KMEE- Hendrix Costa <hendrix.costa@kmee.com.br> # License AGPL-3 - See http://www.gnu.org/licenses/agpl-3.0.html from .abstract_arquivos_governo import AbstractArquivosGoverno class Grrf(AbstractArquivosGoverno): # Informações do Responsavel def _registro_00(self): registro_00 = self.tipo_de_registro_00 registro_00 += str.ljust('', 51) registro_00 += self._validar(self.tipo_de_remessa, 1, 'N') registro_00 += \ self._validar(self.tipo_inscricao_responsavel, 1, 'N') \ if self.tipo_inscricao_responsavel \ else self._validar(self.self.tipo_de_inscricao_empresa, 1, 'N') registro_00 += self._validar(self.inscricao_do_responsavel, 14, 'N') \ if self.inscricao_do_responsavel \ else self._validar(self.inscricao_da_empresa, 14, 'N') registro_00 += self._validar(self.razao_social_responsavel, 30, 'AN') \ if self.razao_social_responsavel \ else self._validar(self.razao_social_empresa, 30, 'AN') registro_00 += self._validar(self.nome_do_contato_responsavel, 20, 'A') registro_00 += self._validar(self.endereco_responsavel, 50) \ if self.endereco_responsavel \ else self._validar(self.endereco_empresa, 50) registro_00 += self._validar(self.bairro_responsavel, 20) \ if self.bairro_responsavel \ else self._validar(self.bairro_empresa, 20) registro_00 += self._validar(self.cep_responsavel, 8, 'N') \ if self.cep_responsavel \ else self._validar(self.cep_empresa, 8, 'N') registro_00 += self._validar(self.cidade_responsavel, 20, 'AN') \ if self.cidade_responsavel \ else self._validar(self.cidade_empresa, 20, 'AN') registro_00 += \ self._validar(self.unidade_federacao_responsavel, 2, 'A') \ if self.unidade_federacao_responsavel \ else self._validar(self.unidade_federacao_empresa, 2, 'A') registro_00 += \ self._validar(self.telefone_contato_responsavel, 12, 'N') registro_00 += self._validar(self.endereco_internet_responsavel, 60) registro_00 += self._validar(self.data_recolhimento_grrf, 8, 'D') registro_00 += str.ljust('', 60) registro_00 += self.final_de_linha return registro_00 # Informações da Empresa def _registro_10(self): registro_10 = self.tipo_de_registro_10 registro_10 += self._validar(self.tipo_de_inscricao_empresa, 1, 'N') registro_10 += self._validar(self.inscricao_da_empresa, 14, 'N') registro_10 += ''.rjust(36, '0') registro_10 += self._validar(self.razao_social_empresa, 40, 'AN') registro_10 += self._validar(self.endereco_empresa, 50, 'AN') registro_10 += self._validar(self.bairro_empresa, 20, 'AN') registro_10 += self._validar(self.cep_empresa, 8, 'N') registro_10 += self._validar(self.cidade_empresa, 20, 'AN') registro_10 += self._validar(self.unidade_federacao_empresa, 2, 'A') registro_10 += self._validar(self.telefone_empresa, 12, 'N') registro_10 += self._validar(self.CNAE_fiscal, 7, 'N') registro_10 += self._validar(self.simples, 1, 'N') registro_10 += self._validar(self.fpas, 3, 'N') registro_10 += str.ljust('', 143) registro_10 += self.final_de_linha return registro_10 # Informações do trabalhador def _registro_40(self): registro_40 = self.tipo_de_registro_40 registro_40 += \ self._validar(self.tipo_de_inscricao_trabalhador, 1, 'N') \ if self.tipo_de_inscricao_trabalhador \ else self._validar(self.tipo_de_inscricao_empresa, 1, 'N') registro_40 += self._validar(self.inscricao_do_trabalhador, 14, 'N') \ if self.inscricao_do_trabalhador \ else self._validar(self.inscricao_da_empresa, 14, 'N') registro_40 += self._validar(self.tipo_inscricao_tomador, 1, 'N') registro_40 += self._validar(self.inscricao_tomador, 14, 'N') registro_40 += self._validar(self.PIS_PASEP, 11, 'N') registro_40 += self._validar(self.data_admissao, 8, 'D') registro_40 += self._validar(self.categoria_trabalhador, 2, 'N') registro_40 += self._validar(self.nome_do_trabalhador, 70, 'A') registro_40 += self._validar(self.numero_ctps, 7, 'N') registro_40 += self._validar(self.serie_ctps, 5, 'N') registro_40 += self._validar(self.sexo, 1, 'N') registro_40 += self._validar(self.grau_de_instrucao, 2, 'N') registro_40 += self._validar(self.data_nascimento, 8, 'D') registro_40 += self._validar(self.qtd_horas_trabalhadas_semana, 2, 'N') registro_40 += self._validar(self.CBO, 6, 'AN') registro_40 += self._validar(self.data_opcao, 8, 'D') registro_40 += self._validar(self.codigo_da_movimentacao, 2, 'AN') registro_40 += self._validar(self.data_movimentacao, 8, 'D') registro_40 += self._validar(self.codigo_de_saque, 3, 'AN') registro_40 += self._validar(self.aviso_previo, 1, 'N') registro_40 += self._validar(self.data_inicio_aviso_previo, 8, 'D') registro_40 += self._validar(self.reposicao_de_vaga, 1, 'A') registro_40 += self._validar(self.data_homologacao_dissidio, 8, 'D') registro_40 += self._validar(self.valor_dissidio, 15, 'V') registro_40 += self._validar(self.remuneracao_mes_aterior, 15, 'V') registro_40 += self._validar(self.remuneracao_mes_rescisao, 15, 'V') registro_40 += self._validar(self.aviso_previo_indenizado, 15, 'V') registro_40 += \ self._validar(self.indicativo_pensao_alimenticia, 1, 'A') registro_40 += \ self._validar(self.percentual_pensao_alimenticia, 5, 'V') registro_40 += self._validar(self.valor_pensao_alimenticia, 15, 'V') registro_40 += self._validar(self.CPF, 11, 'N') registro_40 += self._validar(self.banco_conta_trabalhador, 3, 'N') registro_40 += self._validar(self.agencia_trabalhador, 4, 'N') registro_40 += self._validar(self.conta_trabalhador, 13, 'N') registro_40 += self._validar(self.saldo_para_fins_rescisorios, 15, 'N') registro_40 += str.ljust('', 39) registro_40 += self.final_de_linha return registro_40 def _registro_90(self): registro_90 = self.tipo_de_registro_90 registro_90 += self.marca_final_de_registro registro_90 += str.ljust('', 306) registro_90 += self.final_de_linha return registro_90 def _gerar_grrf(self): return \ self._registro_00() + \ self._registro_10() + \ self._registro_40() + \ self._registro_90() # campos do registro 00 --------------------------------------------------- tipo_de_registro_00 = u'00' # sempre '00' tipo_de_remessa = u'2' # 2 - GRRF | 4 - Comunicar movimentação tipo_inscricao_responsavel = u'1' # 1 - CNPJ | 2 - CEI inscricao_do_responsavel = '' # CNPJ | CEI razao_social_responsavel = '' nome_do_contato_responsavel = '' endereco_responsavel = '' bairro_responsavel = '' cep_responsavel = '' cidade_responsavel = '' unidade_federacao_responsavel = '' telefone_contato_responsavel = '' endereco_internet_responsavel = '' data_recolhimento_grrf = '' final_de_linha = u'*' # ------------------------------------------------------------------------- # campos do registro 10 --------------------------------------------------- tipo_de_registro_10 = u'10' # sempre '10' tipo_de_inscricao_empresa = u'1' inscricao_da_empresa = '' # CNPJ | CEI razao_social_empresa = '' endereco_empresa = '' bairro_empresa = '' cep_empresa = '' cidade_empresa = '' unidade_federacao_empresa = '' telefone_empresa = '' CNAE_fiscal = '' simples = '' fpas = '' # ------------------------------------------------------------------------ # campos do registro 40 --------------------------------------------------- tipo_de_registro_40 = u'40' # sempre '40' tipo_de_inscricao_trabalhador = u'1' inscricao_do_trabalhador = '' tipo_inscricao_tomador = '' inscricao_tomador = '' PIS_PASEP = '' data_admissao = '' # DDMMAAAA categoria_trabalhador = u'01' nome_do_trabalhador = '' numero_ctps = '' serie_ctps = '' sexo = '' grau_de_instrucao = '' data_nascimento = '' qtd_horas_trabalhadas_semana = '' CBO = '' data_opcao = '' codigo_da_movimentacao = '' data_movimentacao = '' codigo_de_saque = '' aviso_previo = '' data_inicio_aviso_previo = '' reposicao_de_vaga = '' data_homologacao_dissidio = '' valor_dissidio = '' remuneracao_mes_aterior = '' remuneracao_mes_rescisao = '' aviso_previo_indenizado = '' indicativo_pensao_alimenticia = '' percentual_pensao_alimenticia = '' valor_pensao_alimenticia = '' CPF = '' banco_conta_trabalhador = '' agencia_trabalhador = '' conta_trabalhador = '' saldo_para_fins_rescisorios = '' # ------------------------------------------------------------------------ # campos do registro 90 --------------------------------------------------- tipo_de_registro_90 = u'90' # sempre '90' marca_final_de_registro = ''.rjust(51, '9') # ------------------------------------------------------------------------
agpl-3.0
nikhilnrng/algorithms-and-data-structures
data_structures/trees/binary_search_tree.py
1
5647
class BSTNode(object): """A node in a binary search tree.""" def __init__(self, data, left=None, right=None): self.data = data self.left = left self.right = right class BinarySearchTree(object): """A class for a binary search tree that stores unique values.""" def __init__(self): self.root = None def insert(self, data): """Insert data into search tree. This method observes two cases: (1) Data is already in BST -> returns without doing anything. (2) Data is not in BST -> there exists a node with an empty child where the data node can be placed. The second case can be proved by contradiction. Say there does not exist a would-be parent containing an empty child slot for the data node. This implies that the would-be parent of the data node has a child that is less than it's data value and one that is greater than it's data value. If this is truly the would-be parent of the data value and no child slots are available among either of the child nodes, then either two cases have occurred: (1) this is not the actual would-be parent, and the would-be parent exists within one of the child subtrees, and thus, an empty child node exists somewhere further down the tree or (2) consecutive data values already exist among both child subtrees, and the desired data value is already in the subtree, in which case we would observe case 1. In a balanced tree, insertions occur in O(log(n)) based on the height of the BST. Note, this is not guaranteed since a BST is not guaranteed to be balanced. """ if self.root is None: # BST is empty self.root = BSTNode(data) return node, parent = self.find_node_and_parent(data) if node is not None: # a node with the 'data' value already exists in BST return if data < parent.data: assert parent.left is None parent.left = BSTNode(data) else: assert parent.right is None parent.right = BSTNode(data) def delete(self, key): """Delete key from the binary search tree. This method observes four cases: (1) Key is not found in BST -> returns without doing anything. (2) Key node has two children -> replace key node with next highest node. (3) Key node has one child -> replace key node with child node. (4) Key node has no children -> delete key node. In general, in the average case, deletions occur in O(log(n)) based on the height of the BST. Note, this is not guaranteed since a BST is not guaranteed to be balanced. """ node, parent = self.find_node_and_parent(key) if node is None: # node with 'key' value does not exist in BST return if None not in (node.left, node.right): # node has two children next_node, parent = self.find_min(node.right, node) node.data = next_node.data # swap data from next_node to node node = next_node # delete next_node if (node.left, node.right) == (None, None): # node is a leaf if parent is None: # node is root of BST self.root = None elif node.data < parent.data: # remove parent's left child parent.left = None else: # remove parent's right child parent.right = None elif None in (node.left, node.right): # node has one child child = node.left if child is None: child = node.right if parent is None: # node is root of BST self.root = child elif node.data < parent.data: # replace parent's left child with child parent.left = child else: # replace parent's right child with child parent.right = child def find(self, key): """Find key in binary search tree, if it is present.""" node = self.find_node_and_parent(key)[0] if node is None: return None else: return node.data def traverse(self): """Return a list of items in sorted order.""" alist = [] self.inorder_traversal(self.root, alist) return alist def inorder_traversal(self, node, alist): """Recursively build an inorder list.""" if node is not None: self.inorder_traversal(node.left, alist) alist.append(node.data) self.inorder_traversal(node.right, alist) def find_min(self, node, parent): if node is None: return None while node.left is not None: parent = node node = node.left return node, parent def find_node_and_parent(self, key): """Search for key, returning node containing key and parent. If key doesn't exist, return None and key's would-be parent.""" parent = None node = self.root while node is not None and node.data != key: # while node is not empty and node data is not the key data parent = node if key < parent.data: node = parent.left else: node = parent.right # node is None or node data == key return node, parent
mit
chubbymaggie/simuvex
simuvex/vex/statements/storeg.py
1
1135
from . import SimIRStmt from ... import s_options as o from ...s_action_object import SimActionObject from ...s_action import SimActionData class SimIRStmt_StoreG(SimIRStmt): def _execute(self): addr = self._translate_expr(self.stmt.addr) data = self._translate_expr(self.stmt.data) expr = data.expr.to_bv() guard = self._translate_expr(self.stmt.guard) if o.TRACK_MEMORY_ACTIONS in self.state.options: data_ao = SimActionObject(expr, reg_deps=data.reg_deps(), tmp_deps=data.tmp_deps()) addr_ao = SimActionObject(addr.expr, reg_deps=addr.reg_deps(), tmp_deps=addr.tmp_deps()) guard_ao = SimActionObject(guard.expr, reg_deps=guard.reg_deps(), tmp_deps=guard.tmp_deps()) size_ao = SimActionObject(data.size_bits()) a = SimActionData(self.state, self.state.memory.id, SimActionData.WRITE, addr=addr_ao, data=data_ao, condition=guard_ao, size=size_ao) self.actions.append(a) else: a = None self.state.memory.store(addr.expr, expr, condition=guard.expr == 1, endness=self.stmt.end, action=a)
bsd-2-clause
tsteinholz/cldoc
setup.py
2
4372
#!/usr/bin/env python from setuptools import setup, Command import subprocess, os, shutil, glob, sys coffee_files = [ 'cldoc.coffee', 'page.coffee', 'sidebar.coffee', 'node.coffee', 'type.coffee', 'doc.coffee', 'category.coffee', 'enum.coffee', 'templated.coffee', 'struct.coffee', 'structtemplate.coffee', 'class.coffee', 'classtemplate.coffee', 'namespace.coffee', 'typedef.coffee', 'variable.coffee', 'function.coffee', 'functiontemplate.coffee', 'field.coffee', 'method.coffee', 'methodtemplate.coffee', 'constructor.coffee', 'destructor.coffee', 'base.coffee', 'implements.coffee', 'subclass.coffee', 'implementedby.coffee', 'templatetypeparameter.coffee', 'coverage.coffee', 'arguments.coffee', 'report.coffee', 'references.coffee', 'union.coffee', 'gobjectclass.coffee', 'gobjectinterface.coffee', 'gobjectboxed.coffee', 'gobjectproperty.coffee', ] class cldoc_generate(Command): description = "generate css, js and html files" user_options = [ ('coffee=', None, 'path to coffeescript compiler'), ('sass=', None, 'path to sass compiler'), ('inline=', None, 'path to inline') ] def initialize_options(self): self.coffee = 'coffee' self.sass = 'sass' self.inline = 'scripts/inline' def finalize_options(self): pass def run_coffee(self): print('running {0}'.format(self.coffee)) for d in ('html/javascript', 'cldoc/data/javascript'): try: os.makedirs(d) except: pass args = [self.coffee, '--bare', '--stdio', '--compile'] try: sp = subprocess.Popen(args, stdout=subprocess.PIPE, stdin=subprocess.PIPE) except Exception as e: sys.stderr.write("Failed to run coffee (please make sure it is installed)\n") sys.exit(1) for f in coffee_files: with open(os.path.join('html', 'coffee', f)) as ff: sp.stdin.write(ff.read()) sp.stdin.close() with open('html/javascript/cldoc.js', 'w') as out: out.write(sp.stdout.read()) sp.wait() for js in glob.glob('html/javascript/*.js'): shutil.copyfile(js, 'cldoc/data/javascript/' + os.path.basename(js)) def run_sass(self): print('running {0}'.format(self.sass)) for d in ('html/styles', 'cldoc/data/styles'): try: os.makedirs(d) except: pass args = [self.sass, '--scss', '--line-numbers', '--no-cache', '--style', 'compressed'] files = ['html/sass/cldoc.scss', 'html/styles/cldoc.css'] subprocess.call(args + files) for css in glob.glob('html/styles/*.css'): shutil.copyfile(css, 'cldoc/data/styles/' + os.path.basename(css)) def run_inline(self): if self.inline == '': shutil.copyfile('html/index.html', 'cldoc/data/index.html') return print('running {0}'.format(self.inline)) args = [self.inline, 'html/index.html'] try: os.makedirs('cldoc/data') except: pass fout = file('cldoc/data/index.html', 'w') proc = subprocess.Popen(args, stdout=fout) proc.wait() def run(self): self.run_coffee() self.run_sass() self.run_inline() cmdclass = { 'generate': cldoc_generate } datafiles = [] dataprefix = 'cldoc' for dirpath, dirnames, filenames in os.walk(os.path.join(dataprefix, 'data')): datafiles += [os.path.join(dirpath[len(dataprefix)+1:], f) for f in filenames] setup(name='cldoc', version='1.9', description='clang based documentation generator for C/C++', author='Jesse van den Kieboom', author_email='jessevdk@gmail.com', url='http://jessevdk.github.com/cldoc', license='GPLv2', keywords=['clang', 'c++', 'documentation'], packages=['cldoc', 'cldoc.clang', 'cldoc.nodes', 'cldoc.generators'], entry_points = { 'console_scripts': [ 'cldoc = cldoc:run' ] }, package_data={'cldoc': datafiles}, cmdclass=cmdclass, install_requires=['pyparsing ==1.5.7']) # vi:ts=4:et
gpl-2.0
fmichea/srddl
srddl/filetypes/pcap.py
1
1307
import srddl.data as sd import srddl.fields as sf import srddl.helpers as sh import srddl.models as sm class PcapFileHeader(sm.Struct): magic = sf.IntField('magic', size=4) version_major = sf.IntField('', size=2) version_minor = sf.IntField('', size=2) thiszone = sf.IntField('gmt to local correction', size=4) sigfigs = sf.IntField('accuracy of timestamps', size=4) snaplen = sf.IntField('max length saved portion for each pkt', size=4) linktype = sf.IntField('data link type (LINKTYPE_*)', size=4) class Timeval(sm.Struct): tv_sec = sf.IntField('', size=4) tv_usec = sf.IntField('', size=4) class PcapPkthdr(sm.Struct): ts = sf.SuperField(Timeval) caplen = sf.IntField('length of portion present', size=4) length = sf.IntField('length this packet (off wire)', size=4) class PcapPacket(sm.Struct): pkthdr = sf.SuperField(PcapPkthdr) payload = sf.ByteArrayField(lambda strct : strct.pkthdr.caplen) class Pcap(sm.FileType): '''Packet Capture File''' class Meta: author = '' author_email = '' extensions = '' def check(self, data): return data.unpack_from('4s', 0)[0] == bytes.fromhex('d4c3b2a1') def setup(self, data): header = data.map(0, PcapFileHeader) data.map_fill_array(header['size'], -1, PcapPacket)
bsd-3-clause
isc-projects/forge
tests/dhcpv6/kea_only/config_backend/test_cb_v6_options.py
1
13970
"""Kea database config backend commands hook testing""" import pytest import srv_msg from cb_model import setup_server_for_config_backend_cmds pytestmark = [pytest.mark.v6, pytest.mark.kea_only, pytest.mark.controlchannel, pytest.mark.hook, pytest.mark.config_backend, pytest.mark.cb_cmds] @pytest.fixture(autouse=True) def run_around_tests(): setup_server_for_config_backend_cmds(config_control={"config-fetch-wait-time": 1}, force_reload=False) cmd = dict(command="remote-server6-set", arguments={"remote": {"type": "mysql"}, "servers": [{"server-tag": "abc"}]}) srv_msg.send_ctrl_cmd(cmd, exp_result=0) def _get_server_config(reload_kea=False): if reload_kea: cmd = dict(command="config-backend-pull", arguments={}) srv_msg.send_ctrl_cmd(cmd, exp_result=0) cmd = dict(command="config-get", arguments={}) return srv_msg.send_ctrl_cmd(cmd, exp_result=0) def _subnet_set(): cmd = dict(command="remote-subnet6-set", arguments={"remote": {"type": "mysql"}, "server-tags": ["abc"], "subnets": [{"subnet": "2001:db8:1::/64", "id": 5, "interface": "$(SERVER_IFACE)", "shared-network-name": "", "pools": [ {"pool": "2001:db8:1::1-2001:db8:1::10"}]}]}) response = srv_msg.send_ctrl_cmd(cmd) assert response == {"arguments": {"subnets": [{"id": 5, "subnet": "2001:db8:1::/64"}]}, "result": 0, "text": "IPv6 subnet successfully set."} def _set_global_parameter(): cmd = dict(command="remote-global-parameter6-set", arguments={"remote": {"type": "mysql"}, "server-tags": ["abc"], "parameters": { "decline-probation-period": 123456}}) response = srv_msg.send_ctrl_cmd(cmd) assert response == {"arguments": {"count": 1, "parameters": {"decline-probation-period": 123456}}, "result": 0, "text": "1 DHCPv6 global parameter(s) successfully set."} def _set_global_option(channel='http'): cmd = dict(command="remote-option6-global-set", arguments={"remote": {"type": "mysql"}, "server-tags": ["abc"], "options": [{ "code": 7, "data": "123"}]}) response = srv_msg.send_ctrl_cmd(cmd, channel=channel) assert response == {"result": 0, "text": "DHCPv6 option successfully set.", "arguments": {"options": [{"code": 7, "space": "dhcp6"}]}} def _set_network(channel='http'): cmd = dict(command="remote-network6-set", arguments={"remote": {"type": "mysql"}, "server-tags": ["abc"], "shared-networks": [{"name": "floor13"}]}) response = srv_msg.send_ctrl_cmd(cmd, channel=channel) assert response == {"arguments": {"shared-networks": [{"name": "floor13"}]}, "result": 0, "text": "IPv6 shared network successfully set."} @pytest.mark.v6 def test_subnet_option(): _subnet_set() cmd = dict(command="remote-option6-subnet-set", arguments={"subnets": [{"id": 5}], "options": [{"always-send": False, "code": 23, "csv-format": True, "data": "2001:db8:1::1", "name": "dns-servers", "space": "dhcp6"}], "remote": {"type": "mysql"}}) srv_msg.send_ctrl_cmd(cmd, exp_result=0) srv_msg.forge_sleep(3, "seconds") cfg = _get_server_config() assert cfg["arguments"]["Dhcp6"]["subnet6"][0]["option-data"] == cmd["arguments"]["options"] cmd = dict(command="remote-option6-subnet-del", arguments={"subnets": [{"id": 5}], "options": [{"code": 23, "space": "dhcp6"}], "remote": {"type": "mysql"}}) srv_msg.send_ctrl_cmd(cmd, exp_result=0) srv_msg.forge_sleep(3, "seconds") cfg = _get_server_config() assert cfg["arguments"]["Dhcp6"]["subnet6"][0]["option-data"] == [] @pytest.mark.v6 def test_subnet_in_network_option(): _set_network() cmd = dict(command="remote-subnet6-set", arguments={"remote": {"type": "mysql"}, "server-tags": ["abc"], "subnets": [{"subnet": "2001:db8:1::/64", "id": 5, "interface": "$(SERVER_IFACE)", "shared-network-name": "floor13", "pools": [ {"pool": "2001:db8:1::1-2001:db8:1::10"}]}]}) srv_msg.send_ctrl_cmd(cmd, exp_result=0) cmd = dict(command="remote-option6-subnet-set", arguments={"subnets": [{"id": 5}], "options": [{"always-send": False, "code": 23, "csv-format": True, "data": "2001:db8:1::1", "name": "dns-servers", "space": "dhcp6"}], "remote": {"type": "mysql"}}) srv_msg.send_ctrl_cmd(cmd, exp_result=0) srv_msg.forge_sleep(2, "seconds") cfg = _get_server_config() assert cfg["arguments"]["Dhcp6"]["shared-networks"][0]["subnet6"][0]["option-data"] == cmd["arguments"]["options"] @pytest.mark.v6 def test_option_on_all_levels(): _set_network() cmd = dict(command="remote-subnet6-set", arguments={"remote": {"type": "mysql"}, "server-tags": ["abc"], "subnets": [{"subnet": "2001:db8:1::/64", "id": 5, "interface": "$(SERVER_IFACE)", "shared-network-name": "floor13", "pools": [{"pool": "2001:db8:1::1-2001:db8:1::10"}], "pd-pools": [{"delegated-len": 91, "prefix": "2001:db8:2::", "prefix-len": 90}]}]}) srv_msg.send_ctrl_cmd(cmd, exp_result=0) cmd_sub = dict(command="remote-option6-subnet-set", arguments={"subnets": [{"id": 5}], "options": [{"always-send": False, "code": 23, "csv-format": True, "name": "dns-servers", "space": "dhcp6", "data": "2001:db8:1::1"}], "remote": {"type": "mysql"}}) srv_msg.send_ctrl_cmd(cmd_sub, exp_result=0) cmd_pool = dict(command="remote-option6-pool-set", arguments={"pools": [{"pool": "2001:db8:1::1-2001:db8:1::10"}], "options": [{"always-send": False, "code": 23, "csv-format": True, "name": "dns-servers", "space": "dhcp6", "data": "2001:db8:1::2"}], "remote": {"type": "mysql"}}) srv_msg.send_ctrl_cmd(cmd_pool, exp_result=0) cmd_net = dict(command="remote-option6-network-set", arguments={"shared-networks": [{"name": "floor13"}], "options": [{"always-send": False, "code": 23, "csv-format": True, "name": "dns-servers", "space": "dhcp6", "data": "2001:db8:1::3"}], "remote": {"type": "mysql"}}) srv_msg.send_ctrl_cmd(cmd_net, exp_result=0) cmd_pd = dict(command="remote-option6-pd-pool-set", arguments={"pd-pools": [{"prefix": "2001:db8:2::", "prefix-len": 90}], "options": [{"always-send": False, "code": 23, "csv-format": True, "name": "dns-servers", "space": "dhcp6", "data": "2001:db8:1::3"}], "remote": {"type": "mysql"}}) srv_msg.send_ctrl_cmd(cmd_pd, exp_result=0) srv_msg.forge_sleep(2, "seconds") cfg = _get_server_config() assert cfg["arguments"]["Dhcp6"]["shared-networks"][0]["subnet6"][0]["option-data"] == \ cmd_sub["arguments"]["options"] assert cfg["arguments"]["Dhcp6"]["shared-networks"][0]["subnet6"][0]["pd-pools"][0]["option-data"] == \ cmd_pd["arguments"]["options"] assert cfg["arguments"]["Dhcp6"]["shared-networks"][0]["subnet6"][0]["pools"][0]["option-data"] == \ cmd_pool["arguments"]["options"] assert cfg["arguments"]["Dhcp6"]["shared-networks"][0]["option-data"] == cmd_net["arguments"]["options"] assert cfg["arguments"]["Dhcp6"]["option-data"] == [] @pytest.mark.v6 def test_network_option(): _set_network() cmd = dict(command="remote-option6-network-set", arguments={ "shared-networks": [{"name": "floor13"}], "options": [{"always-send": False, "code": 23, "csv-format": True, "data": "2001:db8:1::1", "name": "dns-servers", "space": "dhcp6"}], "remote": {"type": "mysql"}}) srv_msg.send_ctrl_cmd(cmd, exp_result=0) srv_msg.forge_sleep(2, "seconds") cfg = _get_server_config() assert cfg["arguments"]["Dhcp6"]["shared-networks"][0]["option-data"] == cmd["arguments"]["options"] cmd = dict(command="remote-option6-network-del", arguments={ "shared-networks": [{"name": "floor13"}], "options": [{"code": 23, "space": "dhcp6"}], "remote": {"type": "mysql"}}) srv_msg.send_ctrl_cmd(cmd, exp_result=0) srv_msg.forge_sleep(3, "seconds") cfg = _get_server_config() assert cfg["arguments"]["Dhcp6"]["shared-networks"][0]["option-data"] == [] @pytest.mark.v6 def test_pool_option(): _subnet_set() cmd = dict(command="remote-option6-pool-set", arguments={ "pools": [{"pool": "2001:db8:1::1-2001:db8:1::10"}], "options": [{"always-send": False, "code": 23, "csv-format": True, "data": "2001:db8:1::1", "name": "dns-servers", "space": "dhcp6"}], "remote": {"type": "mysql"}}) srv_msg.send_ctrl_cmd(cmd, exp_result=0) srv_msg.forge_sleep(3, "seconds") cfg = _get_server_config() assert cfg["arguments"]["Dhcp6"]["subnet6"][0]["pools"][0]["option-data"] == cmd["arguments"]["options"] cmd = dict(command="remote-option6-pool-del", arguments={ "pools": [{"pool": "2001:db8:1::1-2001:db8:1::10"}], "options": [{"code": 23, "space": "dhcp6"}], "remote": {"type": "mysql"}}) srv_msg.send_ctrl_cmd(cmd, exp_result=0) srv_msg.forge_sleep(4, "seconds") cfg = _get_server_config() assert cfg["arguments"]["Dhcp6"]["subnet6"][0]["pools"][0]["option-data"] == [] @pytest.mark.v6 def test_pd_pool_option(): cmd = dict(command="remote-subnet6-set", arguments={"remote": {"type": "mysql"}, "server-tags": ["abc"], "subnets": [{"subnet": "2001:db8:1::/64", "id": 5, "interface": "$(SERVER_IFACE)", "shared-network-name": "", "pd-pools": [{ "delegated-len": 91, "prefix": "2001:db8:2::", "prefix-len": 90}]}]}) srv_msg.send_ctrl_cmd(cmd) srv_msg.forge_sleep(2, "seconds") cmd = dict(command="remote-option6-pd-pool-set", arguments={ "pd-pools": [{"prefix": "2001:db8:2::", "prefix-len": 90}], "options": [{"always-send": False, "code": 23, "csv-format": True, "data": "2001:db8:1::1", "name": "dns-servers", "space": "dhcp6"}], "remote": {"type": "mysql"}}) srv_msg.send_ctrl_cmd(cmd, exp_result=0) srv_msg.forge_sleep(3, "seconds") cfg = _get_server_config() assert cfg["arguments"]["Dhcp6"]["subnet6"][0]["pd-pools"][0]["option-data"] == cmd["arguments"]["options"] cmd = dict(command="remote-option6-pd-pool-del", arguments={ "pd-pools": [{"prefix": "2001:db8:2::", "prefix-len": 90}], "options": [{"code": 23, "space": "dhcp6"}], "remote": {"type": "mysql"}}) srv_msg.send_ctrl_cmd(cmd, exp_result=0) srv_msg.forge_sleep(3, "seconds") cfg = _get_server_config() assert cfg["arguments"]["Dhcp6"]["subnet6"][0]["pd-pools"][0]["option-data"] == []
isc
joone/chromium-crosswalk
tools/findit/svn_repository_parser.py
74
9178
# Copyright (c) 2014 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. import xml.dom.minidom as minidom from xml.parsers.expat import ExpatError import crash_utils from repository_parser_interface import ParserInterface # This number is 6 because each linediff page in src.chromium.org should # contain the following tables: table with revision number, table with actual # diff, table with dropdown menu, table with legend, a border table and a table # containing page information. NUM_TABLES_IN_LINEDIFF_PAGE = 6 # Each of the linediff info should contain 3 tds, one for changed line number, # and two for line contents before/after. NUM_TDS_IN_LINEDIFF_PAGE = 3 class SVNParser(ParserInterface): """Parser for SVN repository using chromium.org, for components in config. Attributes: url_map: A map from component to the urls, where urls are for changelog, revision, line diff and annotation. """ def __init__(self, url_map): self.component_to_urls_map = url_map def ParseChangelog(self, component, range_start, range_end): file_to_revision_map = {} revision_map = {} # Check if the current component is supported by reading the components # parsed from config file. If it is not, fail. url_map = self.component_to_urls_map.get(component) if not url_map: return (revision_map, file_to_revision_map) # Retrieve data from the url, return empty map if fails. revision_range_str = '%s:%s' % (range_start, range_end) url = url_map['changelog_url'] % revision_range_str response = crash_utils.GetDataFromURL(url) if not response: return (revision_map, file_to_revision_map) # Parse xml out of the returned string. If it fails, return empty map. try: xml_revisions = minidom.parseString(response) except ExpatError: return (revision_map, file_to_revision_map) # Iterate through the returned XML object. revisions = xml_revisions.getElementsByTagName('logentry') for revision in revisions: # Create new revision object for each of the revision. revision_object = {} # Set author of the CL. revision_object['author'] = revision.getElementsByTagName( 'author')[0].firstChild.nodeValue # Get the revision number from xml. revision_number = int(revision.getAttribute('revision')) # Iterate through the changed paths in the CL. paths = revision.getElementsByTagName('paths') if paths: for changed_path in paths[0].getElementsByTagName('path'): # Get path and file change type from the xml. file_path = changed_path.firstChild.nodeValue file_change_type = changed_path.getAttribute('action') if file_path.startswith('/trunk/'): file_path = file_path[len('/trunk/'):] # Add file to the map. if file_path not in file_to_revision_map: file_to_revision_map[file_path] = [] file_to_revision_map[file_path].append( (revision_number, file_change_type)) # Set commit message of the CL. revision_object['message'] = revision.getElementsByTagName('msg')[ 0].firstChild.nodeValue # Set url of this CL. revision_url = url_map['revision_url'] % revision_number revision_object['url'] = revision_url # Add this CL to the revision map. revision_map[revision_number] = revision_object return (revision_map, file_to_revision_map) def ParseLineDiff(self, path, component, file_change_type, revision_number): changed_line_numbers = [] changed_line_contents = [] url_map = self.component_to_urls_map.get(component) if not url_map: return (None, None, None) # If the file is added (not modified), treat it as if it is not changed. backup_url = url_map['revision_url'] % revision_number if file_change_type == 'A': return (backup_url, changed_line_numbers, changed_line_contents) # Retrieve data from the url. If no data is retrieved, return empty lists. url = url_map['diff_url'] % (path, revision_number - 1, revision_number, revision_number) data = crash_utils.GetDataFromURL(url) if not data: return (backup_url, changed_line_numbers, changed_line_contents) line_diff_html = minidom.parseString(data) tables = line_diff_html.getElementsByTagName('table') # If there are not NUM_TABLES tables in the html page, there should be an # error in the html page. if len(tables) != NUM_TABLES_IN_LINEDIFF_PAGE: return (backup_url, changed_line_numbers, changed_line_contents) # Diff content is in the second table. Each line of the diff content # is in <tr>. trs = tables[1].getElementsByTagName('tr') prefix_len = len('vc_diff_') # Filter trs so that it only contains diff chunk with contents. filtered_trs = [] for tr in trs: tr_class = tr.getAttribute('class') # Check for the classes of the <tr>s. if tr_class: tr_class = tr_class[prefix_len:] # Do not have to add header. if tr_class == 'header' or tr_class == 'chunk_header': continue # If the class of tr is empty, this page does not have any change. if tr_class == 'empty': return (backup_url, changed_line_numbers, changed_line_contents) filtered_trs.append(tr) # Iterate through filtered trs, and grab line diff information. for tr in filtered_trs: tds = tr.getElementsByTagName('td') # If there aren't 3 tds, this line does should not contain line diff. if len(tds) != NUM_TDS_IN_LINEDIFF_PAGE: continue # If line number information is not in hyperlink, ignore this line. try: line_num = tds[0].getElementsByTagName('a')[0].firstChild.nodeValue left_diff_type = tds[1].getAttribute('class')[prefix_len:] right_diff_type = tds[2].getAttribute('class')[prefix_len:] except IndexError: continue # Treat the line as modified only if both left and right diff has type # changed or both have different change type, and if the change is not # deletion. if (left_diff_type != right_diff_type) or ( left_diff_type == 'change' and right_diff_type == 'change'): # Check if the line content is not empty. try: new_line = tds[2].firstChild.nodeValue except AttributeError: new_line = '' if not (left_diff_type == 'remove' and right_diff_type == 'empty'): changed_line_numbers.append(int(line_num)) changed_line_contents.append(new_line.strip()) return (url, changed_line_numbers, changed_line_contents) def ParseBlameInfo(self, component, file_path, line, revision): url_map = self.component_to_urls_map.get(component) if not url_map: return None # Retrieve blame data from url, return None if fails. url = url_map['blame_url'] % (file_path, revision, revision) data = crash_utils.GetDataFromURL(url) if not data: return None blame_html = minidom.parseString(data) title = blame_html.getElementsByTagName('title') # If the returned html page is an exception page, return None. if title[0].firstChild.nodeValue == 'ViewVC Exception': return None # Each of the blame result is in <tr>. blame_results = blame_html.getElementsByTagName('tr') try: blame_result = blame_results[line] except IndexError: return None # There must be 4 <td> for each <tr>. If not, this page is wrong. tds = blame_result.getElementsByTagName('td') if len(tds) != 4: return None # The third <td> has the line content, separated by <span>s. Combine # those to get a string of changed line. If it has nothing, the line # is empty. line_content = '' if tds[3].hasChildNodes(): contents = tds[3].childNodes for content in contents: # Nodetype 3 means it is text node. if content.nodeType == minidom.Node.TEXT_NODE: line_content += content.nodeValue else: line_content += content.firstChild.nodeValue line_content = line_content.strip() # If the current line has the same author/revision as the previous lines, # the result is not shown. Propagate up until we find the line with info. while not tds[1].firstChild: line -= 1 blame_result = blame_results[line] tds = blame_result.getElementsByTagName('td') author = tds[1].firstChild.nodeValue # Revision can either be in hyperlink or plain text. try: revision = tds[2].getElementsByTagName('a')[0].firstChild.nodeValue except IndexError: revision = tds[2].firstChild.nodeValue (revision_info, _) = self.ParseChangelog(component, revision, revision) message = revision_info[int(revision)]['message'] # Return the parsed information. revision_url = url_map['revision_url'] % int(revision) return (line_content, revision, author, revision_url, message)
bsd-3-clause
flyingrub/scdl
scdl/scdl.py
1
26778
#!/usr/bin/env python3 # -*- encoding: utf-8 -*- """scdl allows you to download music from Soundcloud Usage: scdl -l <track_url> [-a | -f | -C | -t | -p][-c | --force-metadata][-n <maxtracks>]\ [-o <offset>][--hidewarnings][--debug | --error][--path <path>][--addtofile][--addtimestamp] [--onlymp3][--hide-progress][--min-size <size>][--max-size <size>][--remove][--no-album-tag] [--no-playlist-folder][--download-archive <file>][--extract-artist][--flac] scdl me (-s | -a | -f | -t | -p | -m)[-c | --force-metadata][-n <maxtracks>]\ [-o <offset>][--hidewarnings][--debug | --error][--path <path>][--addtofile][--addtimestamp] [--onlymp3][--hide-progress][--min-size <size>][--max-size <size>][--remove] [--no-playlist-folder][--download-archive <file>][--extract-artist][--flac][--no-album-tag] scdl -h | --help scdl --version Options: -h --help Show this screen --version Show version me Use the user profile from the auth_token -l [url] URL can be track/playlist/user -n [maxtracks] Download the n last tracks of a playlist according to the creation date -s Download the stream of a user (token needed) -a Download all tracks of user (including reposts) -t Download all uploads of a user (no reposts) -f Download all favorites of a user -C Download all commented by a user -p Download all playlists of a user -m Download all liked and owned playlists of user -c Continue if a downloaded file already exists --force-metadata This will set metadata on already downloaded track -o [offset] Begin with a custom offset --addtimestamp Add track creation timestamp to filename, which allows for chronological sorting --addtofile Add artist to filename if missing --debug Set log level to DEBUG --download-archive [file] Keep track of track IDs in an archive file, and skip already-downloaded files --error Set log level to ERROR --extract-artist Set artist tag from title instead of username --hide-progress Hide the wget progress bar --hidewarnings Hide Warnings. (use with precaution) --max-size [max-size] Skip tracks larger than size (k/m/g) --min-size [min-size] Skip tracks smaller than size (k/m/g) --no-playlist-folder Download playlist tracks into main directory, instead of making a playlist subfolder --onlymp3 Download only the streamable mp3 file, even if track has a Downloadable file --path [path] Use a custom path for downloaded files --remove Remove any files not downloaded from execution --flac Convert original files to .flac --no-album-tag On some player track get the same cover art if from the same album, this prevent it """ import logging import os import signal import sys import time import warnings import math import shutil import requests import re import tempfile import codecs import shlex import shutil import configparser import mutagen from docopt import docopt from clint.textui import progress from scdl import __version__, CLIENT_ID, ALT_CLIENT_ID from scdl import client, utils from datetime import datetime import subprocess logging.basicConfig(level=logging.INFO, format='%(message)s') logging.getLogger('requests').setLevel(logging.WARNING) logger = logging.getLogger(__name__) logger.setLevel(logging.INFO) logger.addFilter(utils.ColorizeFilter()) arguments = None token = '' path = '' offset = 1 url = { 'playlists-liked': ('https://api-v2.soundcloud.com/users/{0}/playlists' '/liked_and_owned?limit=200'), 'favorites': ('https://api-v2.soundcloud.com/users/{0}/track_likes?' 'limit=200'), 'commented': ('https://api-v2.soundcloud.com/users/{0}/comments'), 'tracks': ('https://api-v2.soundcloud.com/users/{0}/tracks?' 'limit=200'), 'all': ('https://api-v2.soundcloud.com/profile/soundcloud:users:{0}?' 'limit=200'), 'playlists': ('https://api-v2.soundcloud.com/users/{0}/playlists?' 'limit=5'), 'resolve': ('https://api-v2.soundcloud.com/resolve?url={0}'), 'trackinfo': ('https://api-v2.soundcloud.com/tracks/{0}'), 'original_download' : ("https://api-v2.soundcloud.com/tracks/{0}/download"), 'user': ('https://api-v2.soundcloud.com/users/{0}'), 'me': ('https://api-v2.soundcloud.com/me?oauth_token={0}') } client = client.Client() fileToKeep = [] def main(): """ Main function, parses the URL from command line arguments """ signal.signal(signal.SIGINT, signal_handler) global offset global arguments # Parse argument arguments = docopt(__doc__, version=__version__) if arguments['--debug']: logger.level = logging.DEBUG elif arguments['--error']: logger.level = logging.ERROR # import conf file get_config() logger.info('Soundcloud Downloader') logger.debug(arguments) if arguments['-o'] is not None: try: offset = int(arguments['-o']) if offset < 0: raise except: logger.error('Offset should be a positive integer...') sys.exit(-1) logger.debug('offset: %d', offset) if arguments['--min-size'] is not None: try: arguments['--min-size'] = utils.size_in_bytes( arguments['--min-size'] ) except: logger.exception( 'Min size should be an integer with a possible unit suffix' ) sys.exit(-1) logger.debug('min-size: %d', arguments['--min-size']) if arguments['--max-size'] is not None: try: arguments['--max-size'] = utils.size_in_bytes( arguments['--max-size'] ) except: logger.error( 'Max size should be an integer with a possible unit suffix' ) sys.exit(-1) logger.debug('max-size: %d', arguments['--max-size']) if arguments['--hidewarnings']: warnings.filterwarnings('ignore') if arguments['--path'] is not None: if os.path.exists(arguments['--path']): os.chdir(arguments['--path']) else: logger.error('Invalid path in arguments...') sys.exit(-1) logger.debug('Downloading to ' + os.getcwd() + '...') if arguments['-l']: parse_url(arguments['-l']) elif arguments['me']: if arguments['-f']: download(who_am_i(), 'favorites', 'likes') if arguments['-C']: download(who_am_i(), 'commented', 'commented tracks') elif arguments['-t']: download(who_am_i(), 'tracks', 'uploaded tracks') elif arguments['-a']: download(who_am_i(), 'all', 'tracks and reposts') elif arguments['-p']: download(who_am_i(), 'playlists', 'playlists') elif arguments['-m']: download(who_am_i(), 'playlists-liked', 'my and liked playlists') if arguments['--remove']: remove_files() def get_config(): """ Reads the music download filepath from scdl.cfg """ global token config = configparser.ConfigParser() if 'XDG_CONFIG_HOME' in os.environ: config_file = os.path.join( os.environ['XDG_CONFIG_HOME'], 'scdl', 'scdl.cfg', ) else: config_file = os.path.join( os.path.expanduser('~'), '.config', 'scdl', 'scdl.cfg', ) config.read(config_file, 'utf8') try: token = config['scdl']['auth_token'] path = config['scdl']['path'] except: logger.error('Are you sure scdl.cfg is in $HOME/.config/scdl/ ?') logger.error('Are both "auth_token" and "path" defined there?') sys.exit(-1) if os.path.exists(path): os.chdir(path) else: logger.error('Invalid path in scdl.cfg...') sys.exit(-1) def get_item(track_url, client_id=CLIENT_ID): """ Fetches metadata for a track or playlist """ try: item_url = url['resolve'].format(track_url) r = requests.get(item_url, params={'client_id': client_id}) logger.debug(r.url) if r.status_code == 403: return get_item(track_url, ALT_CLIENT_ID) item = r.json() no_tracks = item['kind'] == 'playlist' and not item['tracks'] if no_tracks and client_id != ALT_CLIENT_ID: return get_item(track_url, ALT_CLIENT_ID) except Exception: if client_id == ALT_CLIENT_ID: logger.error('Failed to get item...') return logger.error('Error resolving url, retrying...') time.sleep(5) try: return get_item(track_url, ALT_CLIENT_ID) except Exception as e: logger.error('Could not resolve url {0}'.format(track_url)) logger.exception(e) sys.exit(-1) return item def parse_url(track_url): """ Detects if a URL is a track or a playlist, and parses the track(s) to the track downloader """ global arguments item = get_item(track_url) logger.debug(item) if not item: return elif item['kind'] == 'track': logger.info('Found a track') download_track(item) elif item['kind'] == 'playlist': logger.info('Found a playlist') download_playlist(item) elif item['kind'] == 'user': logger.info('Found a user profile') if arguments['-f']: download(item, 'favorites', 'likes') elif arguments['-C']: download(item, 'commented', 'commented tracks') elif arguments['-t']: download(item, 'tracks', 'uploaded tracks') elif arguments['-a']: download(item, 'all', 'tracks and reposts') elif arguments['-p']: download(item, 'playlists', 'playlists') elif arguments['-m']: download(item, 'playlists-liked', 'my and liked playlists') else: logger.error('Please provide a download type...') else: logger.error('Unknown item type {0}'.format(item['kind'])) def who_am_i(): """ Display username from current token and check for validity """ me = url['me'].format(token) r = requests.get(me, params={'client_id': CLIENT_ID}) r.raise_for_status() current_user = r.json() logger.debug(me) logger.info('Hello {0}!'.format(current_user['username'])) return current_user def remove_files(): """ Removes any pre-existing tracks that were not just downloaded """ logger.info("Removing local track files that were not downloaded...") files = [f for f in os.listdir('.') if os.path.isfile(f)] for f in files: if f not in fileToKeep: os.remove(f) def get_track_info(track): """ Fetches track info from Soundcloud, given a track_id """ if 'media' in track: return track logger.info('Retrieving more info on the track') info_url = url["trackinfo"].format(track['id']) r = requests.get(info_url, params={'client_id': CLIENT_ID}, stream=True) item = r.json() logger.debug(item) return item def download(user, dl_type, name): """ Download user items of dl_type (ie. all, playlists, liked, commented, etc.) """ if not is_ffmpeg_available(): logger.error('ffmpeg is not available and download cannot continue. Please install ffmpeg and re-run the program.') return username = user['username'] user_id = user['id'] logger.info( 'Retrieving all {0} of user {1}...'.format(name, username) ) dl_url = url[dl_type].format(user_id) logger.debug(dl_url) resources = client.get_collection(dl_url, token) del resources[:offset - 1] logger.debug(resources) total = len(resources) logger.info('Retrieved {0} {1}'.format(total, name)) for counter, item in enumerate(resources, offset): try: logger.debug(item) logger.info('{0} n°{1} of {2}'.format( name.capitalize(), counter, total) ) if dl_type == 'all': item_name = item['type'].split('-')[0] # remove the '-repost' uri = item[item_name]['uri'] parse_url(uri) elif dl_type == 'playlists': download_playlist(item) elif dl_type == 'playlists-liked': parse_url(item['playlist']['uri']) elif dl_type == 'tracks': download_track(item) else: download_track(item['track']) except Exception as e: logger.exception(e) logger.info('Downloaded all {0} {1} of user {2}!'.format( total, name, username) ) def download_playlist(playlist): """ Downloads a playlist """ global arguments invalid_chars = '\/:*?|<>"' playlist_name = playlist['title'].encode('utf-8', 'ignore') playlist_name = playlist_name.decode('utf8') playlist_name = ''.join(c for c in playlist_name if c not in invalid_chars) if not arguments['--no-playlist-folder']: if not os.path.exists(playlist_name): os.makedirs(playlist_name) os.chdir(playlist_name) try: with codecs.open(playlist_name + '.m3u', 'w+', 'utf8') as playlist_file: playlist_file.write('#EXTM3U' + os.linesep) if arguments['-n']: # Order by creation date and get the n lasts tracks playlist['tracks'].sort(key=lambda track: track['created_at'], reverse=True) playlist['tracks'] = playlist['tracks'][:int(arguments['-n'])] else: del playlist['tracks'][:offset - 1] for counter, track_raw in enumerate(playlist['tracks'], offset): logger.debug(track_raw) logger.info('Track n°{0}'.format(counter)) playlist_info = {'title': playlist['title'], 'file': playlist_file, 'tracknumber': counter} download_track(track_raw, playlist_info) finally: if not arguments['--no-playlist-folder']: os.chdir('..') def download_my_stream(): """ DONT WORK FOR NOW Download the stream of the current user """ # TODO # Use Token def try_utime(path, filetime): try: os.utime(path, (time.time(), filetime)) except: logger.error("Cannot update utime of file") def get_filename(track, original_filename=None): invalid_chars = '\/:*?|<>"' username = track['user']['username'] title = track['title'].encode('utf-8', 'ignore').decode('utf8') if arguments['--addtofile']: if username not in title and '-' not in title: title = '{0} - {1}'.format(username, title) logger.debug('Adding "{0}" to filename'.format(username)) if arguments['--addtimestamp']: # created_at sample: 2019-01-30T11:11:37Z ts = datetime \ .strptime(track['created_at'], "%Y-%m-%dT%H:%M:%SZ") \ .timestamp() title = str(int(ts)) + "_" + title ext = ".mp3" if original_filename is not None: original_filename.encode('utf-8', 'ignore').decode('utf8') ext = os.path.splitext(original_filename)[1] filename = title[:251] + ext.lower() filename = ''.join(c for c in filename if c not in invalid_chars) return filename def download_original_file(track, title): logger.info('Downloading the original file.') original_url = url['original_download'].format(track['id']) # Get the requests stream r = requests.get( original_url, params={'client_id': CLIENT_ID} ) r = requests.get(r.json()['redirectUri'], stream=True) if r.status_code == 401: logger.info('The original file has no download left.') return (None, False) if r.status_code == 404: logger.info('Could not get name from stream - using basic name') return (None, False) # Find filename d = r.headers.get('content-disposition') filename = re.findall("filename=(.+)", d)[0] filename = get_filename(track, filename) logger.debug("filename : {0}".format(filename)) # Skip if file ID or filename already exists if already_downloaded(track, title, filename): if arguments['--flac'] and can_convert(filename): filename = filename[:-4] + ".flac" return (filename, True) # Write file total_length = int(r.headers.get('content-length')) temp = tempfile.NamedTemporaryFile(delete=False) received = 0 with temp as f: for chunk in progress.bar( r.iter_content(chunk_size=1024), expected_size=(total_length / 1024) + 1, hide=True if arguments["--hide-progress"] else False ): if chunk: received += len(chunk) f.write(chunk) f.flush() if received != total_length: logger.error('connection closed prematurely, download incomplete') sys.exit(-1) shutil.move(temp.name, os.path.join(os.getcwd(), filename)) if arguments['--flac'] and can_convert(filename): logger.info('Converting to .flac...') newfilename = filename[:-4] + ".flac" commands = ['ffmpeg', '-i', filename, newfilename, '-loglevel', 'error'] logger.debug("Commands: {}".format(commands)) subprocess.call(commands) os.remove(filename) filename = newfilename return (filename, False) def get_track_m3u8(track): url = None for transcoding in track['media']['transcodings']: if transcoding['format']['protocol'] == 'hls' \ and transcoding['format']['mime_type'] == 'audio/mpeg': url = transcoding['url'] if url is not None: r = requests.get(url, params={'client_id': CLIENT_ID}) logger.debug(r.url) return r.json()['url'] def download_hls_mp3(track, title): filename = get_filename(track) logger.debug("filename : {0}".format(filename)) # Skip if file ID or filename already exists if already_downloaded(track, title, filename): return (filename, True) # Get the requests stream url = get_track_m3u8(track) filename_path = os.path.abspath(filename) subprocess.call(['ffmpeg', '-i', url, '-c', 'copy', filename_path, '-loglevel', 'fatal']) return (filename, False) def download_track(track, playlist_info=None): """ Downloads a track """ global arguments track = get_track_info(track) title = track['title'] title = title.encode('utf-8', 'ignore').decode('utf8') logger.info('Downloading {0}'.format(title)) # Not streamable if not track['streamable']: logger.error('{0} is not streamable...'.format(title)) return # Geoblocked track if track['policy'] == 'BLOCK': logger.error('{0} is not available in your location...\n'.format(title)) return # Downloadable track filename = None is_already_downloaded = False if track['downloadable'] and track['has_downloads_left'] and not arguments['--onlymp3']: filename, is_already_downloaded = download_original_file(track, title) if filename is None: filename, is_already_downloaded = download_hls_mp3(track, title) # Add the track to the generated m3u playlist file if playlist_info: duration = math.floor(track['duration'] / 1000) playlist_info['file'].write( '#EXTINF:{0},{1}{3}{2}{3}'.format( duration, title, filename, os.linesep ) ) if arguments['--remove']: fileToKeep.append(filename) record_download_archive(track) # Skip if file ID or filename already exists if is_already_downloaded and not arguments['--force-metadata']: logger.info('Track "{0}" already downloaded.'.format(title)) return # If file does not exist an error occurred if not os.path.isfile(filename): logger.error('An error occurred downloading {0}.\n'.format(filename)) logger.error('Exiting...') sys.exit(-1) # Try to set the metadata if filename.endswith('.mp3') or filename.endswith('.flac'): try: set_metadata(track, filename, playlist_info) except Exception as e: logger.error('Error trying to set the tags...') logger.debug(e) else: logger.error("This type of audio doesn't support tagging...") # Try to change the real creation date created_at = track['created_at'] timestamp = datetime.strptime(created_at, '%Y-%m-%dT%H:%M:%SZ') filetime = int(time.mktime(timestamp.timetuple())) try_utime(filename, filetime) logger.info('{0} Downloaded.\n'.format(filename)) def can_convert(filename): ext = os.path.splitext(filename)[1] return 'wav' in ext or 'aif' in ext def already_downloaded(track, title, filename): """ Returns True if the file has already been downloaded """ global arguments already_downloaded = False if os.path.isfile(filename): already_downloaded = True if arguments['--flac'] and can_convert(filename) \ and os.path.isfile(filename[:-4] + ".flac"): already_downloaded = True if arguments['--download-archive'] and in_download_archive(track): already_downloaded = True if arguments['--flac'] and can_convert(filename) and os.path.isfile(filename): already_downloaded = False if already_downloaded: if arguments['-c'] or arguments['--remove'] or arguments['--force-metadata']: return True else: logger.error('Track "{0}" already exists!'.format(title)) logger.error('Exiting... (run again with -c to continue)') sys.exit(-1) return False def in_download_archive(track): """ Returns True if a track_id exists in the download archive """ global arguments if not arguments['--download-archive']: return archive_filename = arguments.get('--download-archive') try: with open(archive_filename, 'a+', encoding='utf-8') as file: file.seek(0) track_id = '{0}'.format(track['id']) for line in file: if line.strip() == track_id: return True except IOError as ioe: logger.error('Error trying to read download archive...') logger.debug(ioe) return False def record_download_archive(track): """ Write the track_id in the download archive """ global arguments if not arguments['--download-archive']: return archive_filename = arguments.get('--download-archive') try: with open(archive_filename, 'a', encoding='utf-8') as file: file.write('{0}'.format(track['id']) + '\n') except IOError as ioe: logger.error('Error trying to write to download archive...') logger.debug(ioe) def set_metadata(track, filename, playlist_info=None): """ Sets the mp3 file metadata using the Python module Mutagen """ logger.info('Setting tags...') global arguments artwork_url = track['artwork_url'] user = track['user'] if not artwork_url: artwork_url = user['avatar_url'] artwork_url = artwork_url.replace('large', 't500x500') response = requests.get(artwork_url, stream=True) with tempfile.NamedTemporaryFile() as out_file: shutil.copyfileobj(response.raw, out_file) out_file.seek(0) track_created = track['created_at'] track_date = datetime.strptime(track_created, "%Y-%m-%dT%H:%M:%SZ") debug_extract_dates = '{0} {1}'.format(track_created, track_date) logger.debug('Extracting date: {0}'.format(debug_extract_dates)) track['date'] = track_date.strftime("%Y-%m-%d %H::%M::%S") track['artist'] = user['username'] if arguments['--extract-artist']: for dash in [' - ', ' − ', ' – ', ' — ', ' ― ']: if dash in track['title']: artist_title = track['title'].split(dash) track['artist'] = artist_title[0].strip() track['title'] = artist_title[1].strip() break audio = mutagen.File(filename, easy=True) audio['title'] = track['title'] audio['artist'] = track['artist'] if track['genre']: audio['genre'] = track['genre'] if track['permalink_url']: audio['website'] = track['permalink_url'] if track['date']: audio['date'] = track['date'] if playlist_info: if not arguments['--no-album-tag']: audio['album'] = playlist_info['title'] audio['tracknumber'] = str(playlist_info['tracknumber']) audio.save() a = mutagen.File(filename) if track['description']: if a.__class__ == mutagen.flac.FLAC: a['description'] = track['description'] elif a.__class__ == mutagen.mp3.MP3: a['COMM'] = mutagen.id3.COMM( encoding=3, lang=u'ENG', text=track['description'] ) if artwork_url: if a.__class__ == mutagen.flac.FLAC: p = mutagen.flac.Picture() p.data = out_file.read() p.width = 500 p.height = 500 p.type = mutagen.id3.PictureType.COVER_FRONT a.add_picture(p) elif a.__class__ == mutagen.mp3.MP3: a['APIC'] = mutagen.id3.APIC( encoding=3, mime='image/jpeg', type=3, desc='Cover', data=out_file.read() ) a.save() def signal_handler(signal, frame): """ Handle keyboard interrupt """ logger.info('\nGood bye!') sys.exit(0) def is_ffmpeg_available(): """ Returns true if ffmpeg is available in the operating system """ return shutil.which('ffmpeg') is not None if __name__ == '__main__': main()
gpl-2.0
CrankWheel/grit-i18n
grit/gather/rc.py
62
11190
#!/usr/bin/env python # Copyright (c) 2012 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. '''Support for gathering resources from RC files. ''' import re from grit import exception from grit import lazy_re from grit import tclib from grit.gather import regexp # Find portions that need unescaping in resource strings. We need to be # careful that a \\n is matched _first_ as a \\ rather than matching as # a \ followed by a \n. # TODO(joi) Handle ampersands if we decide to change them into <ph> # TODO(joi) May need to handle other control characters than \n _NEED_UNESCAPE = lazy_re.compile(r'""|\\\\|\\n|\\t') # Find portions that need escaping to encode string as a resource string. _NEED_ESCAPE = lazy_re.compile(r'"|\n|\t|\\|\&nbsp\;') # How to escape certain characters _ESCAPE_CHARS = { '"' : '""', '\n' : '\\n', '\t' : '\\t', '\\' : '\\\\', '&nbsp;' : ' ' } # How to unescape certain strings _UNESCAPE_CHARS = dict([[value, key] for key, value in _ESCAPE_CHARS.items()]) class Section(regexp.RegexpGatherer): '''A section from a resource file.''' @staticmethod def Escape(text): '''Returns a version of 'text' with characters escaped that need to be for inclusion in a resource section.''' def Replace(match): return _ESCAPE_CHARS[match.group()] return _NEED_ESCAPE.sub(Replace, text) @staticmethod def UnEscape(text): '''Returns a version of 'text' with escaped characters unescaped.''' def Replace(match): return _UNESCAPE_CHARS[match.group()] return _NEED_UNESCAPE.sub(Replace, text) def _RegExpParse(self, rexp, text_to_parse): '''Overrides _RegExpParse to add shortcut group handling. Otherwise the same. ''' super(Section, self)._RegExpParse(rexp, text_to_parse) if not self.is_skeleton and len(self.GetTextualIds()) > 0: group_name = self.GetTextualIds()[0] for c in self.GetCliques(): c.AddToShortcutGroup(group_name) def ReadSection(self): rc_text = self._LoadInputFile() out = '' begin_count = 0 assert self.extkey first_line_re = re.compile(r'\s*' + self.extkey + r'\b') for line in rc_text.splitlines(True): if out or first_line_re.match(line): out += line # we stop once we reach the END for the outermost block. begin_count_was = begin_count if len(out) > 0 and line.strip() == 'BEGIN': begin_count += 1 elif len(out) > 0 and line.strip() == 'END': begin_count -= 1 if begin_count_was == 1 and begin_count == 0: break if len(out) == 0: raise exception.SectionNotFound('%s in file %s' % (self.extkey, self.rc_file)) self.text_ = out.strip() class Dialog(Section): '''A resource section that contains a dialog resource.''' # A typical dialog resource section looks like this: # # IDD_ABOUTBOX DIALOGEX 22, 17, 230, 75 # STYLE DS_SETFONT | DS_MODALFRAME | WS_CAPTION | WS_SYSMENU # CAPTION "About" # FONT 8, "System", 0, 0, 0x0 # BEGIN # ICON IDI_KLONK,IDC_MYICON,14,9,20,20 # LTEXT "klonk Version ""yibbee"" 1.0",IDC_STATIC,49,10,119,8, # SS_NOPREFIX # LTEXT "Copyright (C) 2005",IDC_STATIC,49,20,119,8 # DEFPUSHBUTTON "OK",IDOK,195,6,30,11,WS_GROUP # CONTROL "Jack ""Black"" Daniels",IDC_RADIO1,"Button", # BS_AUTORADIOBUTTON,46,51,84,10 # END # We are using a sorted set of keys, and we assume that the # group name used for descriptions (type) will come after the "text" # group in alphabetical order. We also assume that there cannot be # more than one description per regular expression match. # If that's not the case some descriptions will be clobbered. dialog_re_ = lazy_re.compile(''' # The dialog's ID in the first line (?P<id1>[A-Z0-9_]+)\s+DIALOG(EX)? | # The caption of the dialog (?P<type1>CAPTION)\s+"(?P<text1>.*?([^"]|""))"\s | # Lines for controls that have text and an ID \s+(?P<type2>[A-Z]+)\s+"(?P<text2>.*?([^"]|"")?)"\s*,\s*(?P<id2>[A-Z0-9_]+)\s*, | # Lines for controls that have text only \s+(?P<type3>[A-Z]+)\s+"(?P<text3>.*?([^"]|"")?)"\s*, | # Lines for controls that reference other resources \s+[A-Z]+\s+[A-Z0-9_]+\s*,\s*(?P<id3>[A-Z0-9_]*[A-Z][A-Z0-9_]*) | # This matches "NOT SOME_STYLE" so that it gets consumed and doesn't get # matched by the next option (controls that have only an ID and then just # numbers) \s+NOT\s+[A-Z][A-Z0-9_]+ | # Lines for controls that have only an ID and then just numbers \s+[A-Z]+\s+(?P<id4>[A-Z0-9_]*[A-Z][A-Z0-9_]*)\s*, ''', re.MULTILINE | re.VERBOSE) def Parse(self): '''Knows how to parse dialog resource sections.''' self.ReadSection() self._RegExpParse(self.dialog_re_, self.text_) class Menu(Section): '''A resource section that contains a menu resource.''' # A typical menu resource section looks something like this: # # IDC_KLONK MENU # BEGIN # POPUP "&File" # BEGIN # MENUITEM "E&xit", IDM_EXIT # MENUITEM "This be ""Klonk"" me like", ID_FILE_THISBE # POPUP "gonk" # BEGIN # MENUITEM "Klonk && is ""good""", ID_GONK_KLONKIS # END # END # POPUP "&Help" # BEGIN # MENUITEM "&About ...", IDM_ABOUT # END # END # Description used for the messages generated for menus, to explain to # the translators how to handle them. MENU_MESSAGE_DESCRIPTION = ( 'This message represents a menu. Each of the items appears in sequence ' '(some possibly within sub-menus) in the menu. The XX01XX placeholders ' 'serve to separate items. Each item contains an & (ampersand) character ' 'in front of the keystroke that should be used as a shortcut for that item ' 'in the menu. Please make sure that no two items in the same menu share ' 'the same shortcut.' ) # A dandy regexp to suck all the IDs and translateables out of a menu # resource menu_re_ = lazy_re.compile(''' # Match the MENU ID on the first line ^(?P<id1>[A-Z0-9_]+)\s+MENU | # Match the translateable caption for a popup menu POPUP\s+"(?P<text1>.*?([^"]|""))"\s | # Match the caption & ID of a MENUITEM MENUITEM\s+"(?P<text2>.*?([^"]|""))"\s*,\s*(?P<id2>[A-Z0-9_]+) ''', re.MULTILINE | re.VERBOSE) def Parse(self): '''Knows how to parse menu resource sections. Because it is important that menu shortcuts are unique within the menu, we return each menu as a single message with placeholders to break up the different menu items, rather than return a single message per menu item. we also add an automatic description with instructions for the translators.''' self.ReadSection() self.single_message_ = tclib.Message(description=self.MENU_MESSAGE_DESCRIPTION) self._RegExpParse(self.menu_re_, self.text_) class Version(Section): '''A resource section that contains a VERSIONINFO resource.''' # A typical version info resource can look like this: # # VS_VERSION_INFO VERSIONINFO # FILEVERSION 1,0,0,1 # PRODUCTVERSION 1,0,0,1 # FILEFLAGSMASK 0x3fL # #ifdef _DEBUG # FILEFLAGS 0x1L # #else # FILEFLAGS 0x0L # #endif # FILEOS 0x4L # FILETYPE 0x2L # FILESUBTYPE 0x0L # BEGIN # BLOCK "StringFileInfo" # BEGIN # BLOCK "040904e4" # BEGIN # VALUE "CompanyName", "TODO: <Company name>" # VALUE "FileDescription", "TODO: <File description>" # VALUE "FileVersion", "1.0.0.1" # VALUE "LegalCopyright", "TODO: (c) <Company name>. All rights reserved." # VALUE "InternalName", "res_format_test.dll" # VALUE "OriginalFilename", "res_format_test.dll" # VALUE "ProductName", "TODO: <Product name>" # VALUE "ProductVersion", "1.0.0.1" # END # END # BLOCK "VarFileInfo" # BEGIN # VALUE "Translation", 0x409, 1252 # END # END # # # In addition to the above fields, VALUE fields named "Comments" and # "LegalTrademarks" may also be translateable. version_re_ = lazy_re.compile(''' # Match the ID on the first line ^(?P<id1>[A-Z0-9_]+)\s+VERSIONINFO | # Match all potentially translateable VALUE sections \s+VALUE\s+" ( CompanyName|FileDescription|LegalCopyright| ProductName|Comments|LegalTrademarks )",\s+"(?P<text1>.*?([^"]|""))"\s ''', re.MULTILINE | re.VERBOSE) def Parse(self): '''Knows how to parse VERSIONINFO resource sections.''' self.ReadSection() self._RegExpParse(self.version_re_, self.text_) # TODO(joi) May need to override the Translate() method to change the # "Translation" VALUE block to indicate the correct language code. class RCData(Section): '''A resource section that contains some data .''' # A typical rcdataresource section looks like this: # # IDR_BLAH RCDATA { 1, 2, 3, 4 } dialog_re_ = lazy_re.compile(''' ^(?P<id1>[A-Z0-9_]+)\s+RCDATA\s+(DISCARDABLE)?\s+\{.*?\} ''', re.MULTILINE | re.VERBOSE | re.DOTALL) def Parse(self): '''Implementation for resource types w/braces (not BEGIN/END) ''' rc_text = self._LoadInputFile() out = '' begin_count = 0 openbrace_count = 0 assert self.extkey first_line_re = re.compile(r'\s*' + self.extkey + r'\b') for line in rc_text.splitlines(True): if out or first_line_re.match(line): out += line # We stop once the braces balance (could happen in one line). begin_count_was = begin_count if len(out) > 0: openbrace_count += line.count('{') begin_count += line.count('{') begin_count -= line.count('}') if ((begin_count_was == 1 and begin_count == 0) or (openbrace_count > 0 and begin_count == 0)): break if len(out) == 0: raise exception.SectionNotFound('%s in file %s' % (self.extkey, self.rc_file)) self.text_ = out self._RegExpParse(self.dialog_re_, out) class Accelerators(Section): '''An ACCELERATORS table. ''' # A typical ACCELERATORS section looks like this: # # IDR_ACCELERATOR1 ACCELERATORS # BEGIN # "^C", ID_ACCELERATOR32770, ASCII, NOINVERT # "^V", ID_ACCELERATOR32771, ASCII, NOINVERT # VK_INSERT, ID_ACCELERATOR32772, VIRTKEY, CONTROL, NOINVERT # END accelerators_re_ = lazy_re.compile(''' # Match the ID on the first line ^(?P<id1>[A-Z0-9_]+)\s+ACCELERATORS\s+ | # Match accelerators specified as VK_XXX \s+VK_[A-Z0-9_]+,\s*(?P<id2>[A-Z0-9_]+)\s*, | # Match accelerators specified as e.g. "^C" \s+"[^"]*",\s+(?P<id3>[A-Z0-9_]+)\s*, ''', re.MULTILINE | re.VERBOSE) def Parse(self): '''Knows how to parse ACCELERATORS resource sections.''' self.ReadSection() self._RegExpParse(self.accelerators_re_, self.text_)
bsd-2-clause
ravibhure/ansible
lib/ansible/modules/cloud/cloudstack/cs_loadbalancer_rule_member.py
49
9858
#!/usr/bin/python # -*- coding: utf-8 -*- # # (c) 2015, Darren Worrall <darren@iweb.co.uk> # (c) 2015, René Moser <mail@renemoser.net> # # This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see <http://www.gnu.org/licenses/>. ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ['stableinterface'], 'supported_by': 'community'} DOCUMENTATION = ''' --- module: cs_loadbalancer_rule_member short_description: Manages load balancer rule members on Apache CloudStack based clouds. description: - Add and remove load balancer rule members. version_added: '2.0' author: - "Darren Worrall (@dazworrall)" - "René Moser (@resmo)" options: name: description: - The name of the load balancer rule. required: true ip_address: description: - Public IP address from where the network traffic will be load balanced from. - Only needed to find the rule if C(name) is not unique. required: false default: null aliases: [ 'public_ip' ] vms: description: - List of VMs to assign to or remove from the rule. required: true aliases: [ 'vm' ] state: description: - Should the VMs be present or absent from the rule. required: false default: 'present' choices: [ 'present', 'absent' ] project: description: - Name of the project the firewall rule is related to. required: false default: null domain: description: - Domain the rule is related to. required: false default: null account: description: - Account the rule is related to. required: false default: null zone: description: - Name of the zone in which the rule should be located. - If not set, default zone is used. required: false default: null extends_documentation_fragment: cloudstack ''' EXAMPLES = ''' # Add VMs to an existing load balancer - local_action: module: cs_loadbalancer_rule_member name: balance_http vms: - web01 - web02 # Remove a VM from an existing load balancer - local_action: module: cs_loadbalancer_rule_member name: balance_http vms: - web01 - web02 state: absent # Rolling upgrade of hosts - hosts: webservers serial: 1 pre_tasks: - name: Remove from load balancer local_action: module: cs_loadbalancer_rule_member name: balance_http vm: "{{ ansible_hostname }}" state: absent tasks: # Perform update post_tasks: - name: Add to load balancer local_action: module: cs_loadbalancer_rule_member name: balance_http vm: "{{ ansible_hostname }}" state: present ''' RETURN = ''' --- id: description: UUID of the rule. returned: success type: string sample: a6f7a5fc-43f8-11e5-a151-feff819cdc9f zone: description: Name of zone the rule is related to. returned: success type: string sample: ch-gva-2 project: description: Name of project the rule is related to. returned: success type: string sample: Production account: description: Account the rule is related to. returned: success type: string sample: example account domain: description: Domain the rule is related to. returned: success type: string sample: example domain algorithm: description: Load balancer algorithm used. returned: success type: string sample: "source" cidr: description: CIDR to forward traffic from. returned: success type: string sample: "" name: description: Name of the rule. returned: success type: string sample: "http-lb" description: description: Description of the rule. returned: success type: string sample: "http load balancer rule" protocol: description: Protocol of the rule. returned: success type: string sample: "tcp" public_port: description: Public port. returned: success type: string sample: 80 private_port: description: Private IP address. returned: success type: string sample: 80 public_ip: description: Public IP address. returned: success type: string sample: "1.2.3.4" vms: description: Rule members. returned: success type: list sample: '[ "web01", "web02" ]' tags: description: List of resource tags associated with the rule. returned: success type: dict sample: '[ { "key": "foo", "value": "bar" } ]' state: description: State of the rule. returned: success type: string sample: "Add" ''' from ansible.module_utils.basic import AnsibleModule from ansible.module_utils.cloudstack import ( AnsibleCloudStack, cs_argument_spec, cs_required_together, ) class AnsibleCloudStackLBRuleMember(AnsibleCloudStack): def __init__(self, module): super(AnsibleCloudStackLBRuleMember, self).__init__(module) self.returns = { 'publicip': 'public_ip', 'algorithm': 'algorithm', 'cidrlist': 'cidr', 'protocol': 'protocol', } # these values will be casted to int self.returns_to_int = { 'publicport': 'public_port', 'privateport': 'private_port', } def get_rule(self): args = self._get_common_args() args.update({ 'name': self.module.params.get('name'), 'zoneid': self.get_zone(key='id') if self.module.params.get('zone') else None, }) if self.module.params.get('ip_address'): args['publicipid'] = self.get_ip_address(key='id') rules = self.query_api('listLoadBalancerRules', **args) if rules: if len(rules['loadbalancerrule']) > 1: self.module.fail_json(msg="More than one rule having name %s. Please pass 'ip_address' as well." % args['name']) return rules['loadbalancerrule'][0] return None def _get_common_args(self): return { 'account': self.get_account(key='name'), 'domainid': self.get_domain(key='id'), 'projectid': self.get_project(key='id'), } def _get_members_of_rule(self, rule): res = self.query_api('listLoadBalancerRuleInstances', id=rule['id']) return res.get('loadbalancerruleinstance', []) def _ensure_members(self, operation): if operation not in ['add', 'remove']: self.module.fail_json(msg="Bad operation: %s" % operation) rule = self.get_rule() if not rule: self.module.fail_json(msg="Unknown rule: %s" % self.module.params.get('name')) existing = {} for vm in self._get_members_of_rule(rule=rule): existing[vm['name']] = vm['id'] wanted_names = self.module.params.get('vms') if operation == 'add': cs_func = self.cs.assignToLoadBalancerRule to_change = set(wanted_names) - set(existing.keys()) else: cs_func = self.cs.removeFromLoadBalancerRule to_change = set(wanted_names) & set(existing.keys()) if not to_change: return rule args = self._get_common_args() vms = self.query_api('listVirtualMachines', **args) to_change_ids = [] for name in to_change: for vm in vms.get('virtualmachine', []): if vm['name'] == name: to_change_ids.append(vm['id']) break else: self.module.fail_json(msg="Unknown VM: %s" % name) if to_change_ids: self.result['changed'] = True if to_change_ids and not self.module.check_mode: res = cs_func( id=rule['id'], virtualmachineids=to_change_ids, ) poll_async = self.module.params.get('poll_async') if poll_async: self.poll_job(res) rule = self.get_rule() return rule def add_members(self): return self._ensure_members('add') def remove_members(self): return self._ensure_members('remove') def get_result(self, rule): super(AnsibleCloudStackLBRuleMember, self).get_result(rule) if rule: self.result['vms'] = [] for vm in self._get_members_of_rule(rule=rule): self.result['vms'].append(vm['name']) return self.result def main(): argument_spec = cs_argument_spec() argument_spec.update(dict( name=dict(required=True), ip_address=dict(aliases=['public_ip']), vms=dict(required=True, aliases=['vm'], type='list'), state=dict(choices=['present', 'absent'], default='present'), zone=dict(), domain=dict(), project=dict(), account=dict(), poll_async=dict(type='bool', default=True), )) module = AnsibleModule( argument_spec=argument_spec, required_together=cs_required_together(), supports_check_mode=True ) acs_lb_rule_member = AnsibleCloudStackLBRuleMember(module) state = module.params.get('state') if state in ['absent']: rule = acs_lb_rule_member.remove_members() else: rule = acs_lb_rule_member.add_members() result = acs_lb_rule_member.get_result(rule) module.exit_json(**result) if __name__ == '__main__': main()
gpl-3.0
paweljasinski/ironpython3
Tests/interop/net/loadorder/t3h.py
3
1606
##################################################################################### # # Copyright (c) Microsoft Corporation. All rights reserved. # # This source code is subject to terms and conditions of the Apache License, Version 2.0. A # copy of the license can be found in the License.html file at the root of this distribution. If # you cannot locate the Apache License, Version 2.0, please send an email to # ironpy@microsoft.com. By using this source code in any fashion, you are agreeing to be bound # by the terms of the Apache License, Version 2.0. # # You must not remove this notice, or any other, from this software. # # ##################################################################################### from iptest.assert_util import * add_clr_assemblies("loadorder_3") # namespace First { # public class Generic1<K, V> { # public static string Flag = typeof(Generic1<,>).FullName; # } # } import First from First import * AreEqual(First.Generic1[str, str].Flag, "First.Generic1`2") add_clr_assemblies("loadorder_3h") # namespace First { # public class Generic1<T> { # public static string Flag = typeof(Generic1<>).FullName; # } # } AreEqual(First.Generic1[str, str].Flag, "First.Generic1`2") AreEqual(First.Generic1[int].Flag, "First.Generic1`1") AssertError(ValueError, lambda: Generic1[int]) # !!! AreEqual(Generic1[str, str].Flag, "First.Generic1`2") from First import * AreEqual(Generic1[str, str].Flag, "First.Generic1`2") AreEqual(Generic1[int].Flag, "First.Generic1`1")
apache-2.0
CiscoUcs/Ironic
build/lib/ironic/openstack/common/periodic_task.py
5
8319
# # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import copy import logging import random import time from oslo_config import cfg import six from ironic.openstack.common._i18n import _, _LE, _LI periodic_opts = [ cfg.BoolOpt('run_external_periodic_tasks', default=True, help='Some periodic tasks can be run in a separate process. ' 'Should we run them here?'), ] CONF = cfg.CONF CONF.register_opts(periodic_opts) LOG = logging.getLogger(__name__) DEFAULT_INTERVAL = 60.0 def list_opts(): """Entry point for oslo-config-generator.""" return [(None, copy.deepcopy(periodic_opts))] class InvalidPeriodicTaskArg(Exception): message = _("Unexpected argument for periodic task creation: %(arg)s.") def periodic_task(*args, **kwargs): """Decorator to indicate that a method is a periodic task. This decorator can be used in two ways: 1. Without arguments '@periodic_task', this will be run on the default interval of 60 seconds. 2. With arguments: @periodic_task(spacing=N [, run_immediately=[True|False]] [, name=[None|"string"]) this will be run on approximately every N seconds. If this number is negative the periodic task will be disabled. If the run_immediately argument is provided and has a value of 'True', the first run of the task will be shortly after task scheduler starts. If run_immediately is omitted or set to 'False', the first time the task runs will be approximately N seconds after the task scheduler starts. If name is not provided, __name__ of function is used. """ def decorator(f): # Test for old style invocation if 'ticks_between_runs' in kwargs: raise InvalidPeriodicTaskArg(arg='ticks_between_runs') # Control if run at all f._periodic_task = True f._periodic_external_ok = kwargs.pop('external_process_ok', False) if f._periodic_external_ok and not CONF.run_external_periodic_tasks: f._periodic_enabled = False else: f._periodic_enabled = kwargs.pop('enabled', True) f._periodic_name = kwargs.pop('name', f.__name__) # Control frequency f._periodic_spacing = kwargs.pop('spacing', 0) f._periodic_immediate = kwargs.pop('run_immediately', False) if f._periodic_immediate: f._periodic_last_run = None else: f._periodic_last_run = time.time() return f # NOTE(sirp): The `if` is necessary to allow the decorator to be used with # and without parenthesis. # # In the 'with-parenthesis' case (with kwargs present), this function needs # to return a decorator function since the interpreter will invoke it like: # # periodic_task(*args, **kwargs)(f) # # In the 'without-parenthesis' case, the original function will be passed # in as the first argument, like: # # periodic_task(f) if kwargs: return decorator else: return decorator(args[0]) class _PeriodicTasksMeta(type): def _add_periodic_task(cls, task): """Add a periodic task to the list of periodic tasks. The task should already be decorated by @periodic_task. :return: whether task was actually enabled """ name = task._periodic_name if task._periodic_spacing < 0: LOG.info(_LI('Skipping periodic task %(task)s because ' 'its interval is negative'), {'task': name}) return False if not task._periodic_enabled: LOG.info(_LI('Skipping periodic task %(task)s because ' 'it is disabled'), {'task': name}) return False # A periodic spacing of zero indicates that this task should # be run on the default interval to avoid running too # frequently. if task._periodic_spacing == 0: task._periodic_spacing = DEFAULT_INTERVAL cls._periodic_tasks.append((name, task)) cls._periodic_spacing[name] = task._periodic_spacing return True def __init__(cls, names, bases, dict_): """Metaclass that allows us to collect decorated periodic tasks.""" super(_PeriodicTasksMeta, cls).__init__(names, bases, dict_) # NOTE(sirp): if the attribute is not present then we must be the base # class, so, go ahead an initialize it. If the attribute is present, # then we're a subclass so make a copy of it so we don't step on our # parent's toes. try: cls._periodic_tasks = cls._periodic_tasks[:] except AttributeError: cls._periodic_tasks = [] try: cls._periodic_spacing = cls._periodic_spacing.copy() except AttributeError: cls._periodic_spacing = {} for value in cls.__dict__.values(): if getattr(value, '_periodic_task', False): cls._add_periodic_task(value) def _nearest_boundary(last_run, spacing): """Find nearest boundary which is in the past, which is a multiple of the spacing with the last run as an offset. Eg if last run was 10 and spacing was 7, the new last run could be: 17, 24, 31, 38... 0% to 5% of the spacing value will be added to this value to ensure tasks do not synchronize. This jitter is rounded to the nearest second, this means that spacings smaller than 20 seconds will not have jitter. """ current_time = time.time() if last_run is None: return current_time delta = current_time - last_run offset = delta % spacing # Add up to 5% jitter jitter = int(spacing * (random.random() / 20)) return current_time - offset + jitter @six.add_metaclass(_PeriodicTasksMeta) class PeriodicTasks(object): def __init__(self): super(PeriodicTasks, self).__init__() self._periodic_last_run = {} for name, task in self._periodic_tasks: self._periodic_last_run[name] = task._periodic_last_run def add_periodic_task(self, task): """Add a periodic task to the list of periodic tasks. The task should already be decorated by @periodic_task. """ if self.__class__._add_periodic_task(task): self._periodic_last_run[task._periodic_name] = ( task._periodic_last_run) def run_periodic_tasks(self, context, raise_on_error=False): """Tasks to be run at a periodic interval.""" idle_for = DEFAULT_INTERVAL for task_name, task in self._periodic_tasks: full_task_name = '.'.join([self.__class__.__name__, task_name]) spacing = self._periodic_spacing[task_name] last_run = self._periodic_last_run[task_name] # Check if due, if not skip idle_for = min(idle_for, spacing) if last_run is not None: delta = last_run + spacing - time.time() if delta > 0: idle_for = min(idle_for, delta) continue LOG.debug("Running periodic task %(full_task_name)s", {"full_task_name": full_task_name}) self._periodic_last_run[task_name] = _nearest_boundary( last_run, spacing) try: task(self, context) except Exception as e: if raise_on_error: raise LOG.exception(_LE("Error during %(full_task_name)s: %(e)s"), {"full_task_name": full_task_name, "e": e}) time.sleep(0) return idle_for
apache-2.0
bdh1011/wau
venv/lib/python2.7/site-packages/twisted/conch/scripts/tkconch.py
10
22833
# -*- test-case-name: twisted.conch.test.test_scripts -*- # Copyright (c) Twisted Matrix Laboratories. # See LICENSE for details. """ Implementation module for the `tkconch` command. """ import Tkinter, tkFileDialog, tkMessageBox from twisted.conch import error from twisted.conch.ui import tkvt100 from twisted.conch.ssh import transport, userauth, connection, common, keys from twisted.conch.ssh import session, forwarding, channel from twisted.conch.client.default import isInKnownHosts from twisted.internet import reactor, defer, protocol, tksupport from twisted.python import usage, log import os, sys, getpass, struct, base64, signal class TkConchMenu(Tkinter.Frame): def __init__(self, *args, **params): ## Standard heading: initialization apply(Tkinter.Frame.__init__, (self,) + args, params) self.master.title('TkConch') self.localRemoteVar = Tkinter.StringVar() self.localRemoteVar.set('local') Tkinter.Label(self, anchor='w', justify='left', text='Hostname').grid(column=1, row=1, sticky='w') self.host = Tkinter.Entry(self) self.host.grid(column=2, columnspan=2, row=1, sticky='nesw') Tkinter.Label(self, anchor='w', justify='left', text='Port').grid(column=1, row=2, sticky='w') self.port = Tkinter.Entry(self) self.port.grid(column=2, columnspan=2, row=2, sticky='nesw') Tkinter.Label(self, anchor='w', justify='left', text='Username').grid(column=1, row=3, sticky='w') self.user = Tkinter.Entry(self) self.user.grid(column=2, columnspan=2, row=3, sticky='nesw') Tkinter.Label(self, anchor='w', justify='left', text='Command').grid(column=1, row=4, sticky='w') self.command = Tkinter.Entry(self) self.command.grid(column=2, columnspan=2, row=4, sticky='nesw') Tkinter.Label(self, anchor='w', justify='left', text='Identity').grid(column=1, row=5, sticky='w') self.identity = Tkinter.Entry(self) self.identity.grid(column=2, row=5, sticky='nesw') Tkinter.Button(self, command=self.getIdentityFile, text='Browse').grid(column=3, row=5, sticky='nesw') Tkinter.Label(self, text='Port Forwarding').grid(column=1, row=6, sticky='w') self.forwards = Tkinter.Listbox(self, height=0, width=0) self.forwards.grid(column=2, columnspan=2, row=6, sticky='nesw') Tkinter.Button(self, text='Add', command=self.addForward).grid(column=1, row=7) Tkinter.Button(self, text='Remove', command=self.removeForward).grid(column=1, row=8) self.forwardPort = Tkinter.Entry(self) self.forwardPort.grid(column=2, row=7, sticky='nesw') Tkinter.Label(self, text='Port').grid(column=3, row=7, sticky='nesw') self.forwardHost = Tkinter.Entry(self) self.forwardHost.grid(column=2, row=8, sticky='nesw') Tkinter.Label(self, text='Host').grid(column=3, row=8, sticky='nesw') self.localForward = Tkinter.Radiobutton(self, text='Local', variable=self.localRemoteVar, value='local') self.localForward.grid(column=2, row=9) self.remoteForward = Tkinter.Radiobutton(self, text='Remote', variable=self.localRemoteVar, value='remote') self.remoteForward.grid(column=3, row=9) Tkinter.Label(self, text='Advanced Options').grid(column=1, columnspan=3, row=10, sticky='nesw') Tkinter.Label(self, anchor='w', justify='left', text='Cipher').grid(column=1, row=11, sticky='w') self.cipher = Tkinter.Entry(self, name='cipher') self.cipher.grid(column=2, columnspan=2, row=11, sticky='nesw') Tkinter.Label(self, anchor='w', justify='left', text='MAC').grid(column=1, row=12, sticky='w') self.mac = Tkinter.Entry(self, name='mac') self.mac.grid(column=2, columnspan=2, row=12, sticky='nesw') Tkinter.Label(self, anchor='w', justify='left', text='Escape Char').grid(column=1, row=13, sticky='w') self.escape = Tkinter.Entry(self, name='escape') self.escape.grid(column=2, columnspan=2, row=13, sticky='nesw') Tkinter.Button(self, text='Connect!', command=self.doConnect).grid(column=1, columnspan=3, row=14, sticky='nesw') # Resize behavior(s) self.grid_rowconfigure(6, weight=1, minsize=64) self.grid_columnconfigure(2, weight=1, minsize=2) self.master.protocol("WM_DELETE_WINDOW", sys.exit) def getIdentityFile(self): r = tkFileDialog.askopenfilename() if r: self.identity.delete(0, Tkinter.END) self.identity.insert(Tkinter.END, r) def addForward(self): port = self.forwardPort.get() self.forwardPort.delete(0, Tkinter.END) host = self.forwardHost.get() self.forwardHost.delete(0, Tkinter.END) if self.localRemoteVar.get() == 'local': self.forwards.insert(Tkinter.END, 'L:%s:%s' % (port, host)) else: self.forwards.insert(Tkinter.END, 'R:%s:%s' % (port, host)) def removeForward(self): cur = self.forwards.curselection() if cur: self.forwards.remove(cur[0]) def doConnect(self): finished = 1 options['host'] = self.host.get() options['port'] = self.port.get() options['user'] = self.user.get() options['command'] = self.command.get() cipher = self.cipher.get() mac = self.mac.get() escape = self.escape.get() if cipher: if cipher in SSHClientTransport.supportedCiphers: SSHClientTransport.supportedCiphers = [cipher] else: tkMessageBox.showerror('TkConch', 'Bad cipher.') finished = 0 if mac: if mac in SSHClientTransport.supportedMACs: SSHClientTransport.supportedMACs = [mac] elif finished: tkMessageBox.showerror('TkConch', 'Bad MAC.') finished = 0 if escape: if escape == 'none': options['escape'] = None elif escape[0] == '^' and len(escape) == 2: options['escape'] = chr(ord(escape[1])-64) elif len(escape) == 1: options['escape'] = escape elif finished: tkMessageBox.showerror('TkConch', "Bad escape character '%s'." % escape) finished = 0 if self.identity.get(): options.identitys.append(self.identity.get()) for line in self.forwards.get(0,Tkinter.END): if line[0]=='L': options.opt_localforward(line[2:]) else: options.opt_remoteforward(line[2:]) if '@' in options['host']: options['user'], options['host'] = options['host'].split('@',1) if (not options['host'] or not options['user']) and finished: tkMessageBox.showerror('TkConch', 'Missing host or username.') finished = 0 if finished: self.master.quit() self.master.destroy() if options['log']: realout = sys.stdout log.startLogging(sys.stderr) sys.stdout = realout else: log.discardLogs() log.deferr = handleError # HACK if not options.identitys: options.identitys = ['~/.ssh/id_rsa', '~/.ssh/id_dsa'] host = options['host'] port = int(options['port'] or 22) log.msg((host,port)) reactor.connectTCP(host, port, SSHClientFactory()) frame.master.deiconify() frame.master.title('%s@%s - TkConch' % (options['user'], options['host'])) else: self.focus() class GeneralOptions(usage.Options): synopsis = """Usage: tkconch [options] host [command] """ optParameters = [['user', 'l', None, 'Log in using this user name.'], ['identity', 'i', '~/.ssh/identity', 'Identity for public key authentication'], ['escape', 'e', '~', "Set escape character; ``none'' = disable"], ['cipher', 'c', None, 'Select encryption algorithm.'], ['macs', 'm', None, 'Specify MAC algorithms for protocol version 2.'], ['port', 'p', None, 'Connect to this port. Server must be on the same port.'], ['localforward', 'L', None, 'listen-port:host:port Forward local port to remote address'], ['remoteforward', 'R', None, 'listen-port:host:port Forward remote port to local address'], ] optFlags = [['tty', 't', 'Tty; allocate a tty even if command is given.'], ['notty', 'T', 'Do not allocate a tty.'], ['version', 'V', 'Display version number only.'], ['compress', 'C', 'Enable compression.'], ['noshell', 'N', 'Do not execute a shell or command.'], ['subsystem', 's', 'Invoke command (mandatory) as SSH2 subsystem.'], ['log', 'v', 'Log to stderr'], ['ansilog', 'a', 'Print the received data to stdout']] _ciphers = transport.SSHClientTransport.supportedCiphers _macs = transport.SSHClientTransport.supportedMACs compData = usage.Completions( mutuallyExclusive=[("tty", "notty")], optActions={ "cipher": usage.CompleteList(_ciphers), "macs": usage.CompleteList(_macs), "localforward": usage.Completer(descr="listen-port:host:port"), "remoteforward": usage.Completer(descr="listen-port:host:port")}, extraActions=[usage.CompleteUserAtHost(), usage.Completer(descr="command"), usage.Completer(descr="argument", repeat=True)] ) identitys = [] localForwards = [] remoteForwards = [] def opt_identity(self, i): self.identitys.append(i) def opt_localforward(self, f): localPort, remoteHost, remotePort = f.split(':') # doesn't do v6 yet localPort = int(localPort) remotePort = int(remotePort) self.localForwards.append((localPort, (remoteHost, remotePort))) def opt_remoteforward(self, f): remotePort, connHost, connPort = f.split(':') # doesn't do v6 yet remotePort = int(remotePort) connPort = int(connPort) self.remoteForwards.append((remotePort, (connHost, connPort))) def opt_compress(self): SSHClientTransport.supportedCompressions[0:1] = ['zlib'] def parseArgs(self, *args): if args: self['host'] = args[0] self['command'] = ' '.join(args[1:]) else: self['host'] = '' self['command'] = '' # Rest of code in "run" options = None menu = None exitStatus = 0 frame = None def deferredAskFrame(question, echo): if frame.callback: raise ValueError("can't ask 2 questions at once!") d = defer.Deferred() resp = [] def gotChar(ch, resp=resp): if not ch: return if ch=='\x03': # C-c reactor.stop() if ch=='\r': frame.write('\r\n') stresp = ''.join(resp) del resp frame.callback = None d.callback(stresp) return elif 32 <= ord(ch) < 127: resp.append(ch) if echo: frame.write(ch) elif ord(ch) == 8 and resp: # BS if echo: frame.write('\x08 \x08') resp.pop() frame.callback = gotChar frame.write(question) frame.canvas.focus_force() return d def run(): global menu, options, frame args = sys.argv[1:] if '-l' in args: # cvs is an idiot i = args.index('-l') args = args[i:i+2]+args del args[i+2:i+4] for arg in args[:]: try: i = args.index(arg) if arg[:2] == '-o' and args[i+1][0]!='-': args[i:i+2] = [] # suck on it scp except ValueError: pass root = Tkinter.Tk() root.withdraw() top = Tkinter.Toplevel() menu = TkConchMenu(top) menu.pack(side=Tkinter.TOP, fill=Tkinter.BOTH, expand=1) options = GeneralOptions() try: options.parseOptions(args) except usage.UsageError, u: print 'ERROR: %s' % u options.opt_help() sys.exit(1) for k,v in options.items(): if v and hasattr(menu, k): getattr(menu,k).insert(Tkinter.END, v) for (p, (rh, rp)) in options.localForwards: menu.forwards.insert(Tkinter.END, 'L:%s:%s:%s' % (p, rh, rp)) options.localForwards = [] for (p, (rh, rp)) in options.remoteForwards: menu.forwards.insert(Tkinter.END, 'R:%s:%s:%s' % (p, rh, rp)) options.remoteForwards = [] frame = tkvt100.VT100Frame(root, callback=None) root.geometry('%dx%d'%(tkvt100.fontWidth*frame.width+3, tkvt100.fontHeight*frame.height+3)) frame.pack(side = Tkinter.TOP) tksupport.install(root) root.withdraw() if (options['host'] and options['user']) or '@' in options['host']: menu.doConnect() else: top.mainloop() reactor.run() sys.exit(exitStatus) def handleError(): from twisted.python import failure global exitStatus exitStatus = 2 log.err(failure.Failure()) reactor.stop() raise class SSHClientFactory(protocol.ClientFactory): noisy = 1 def stopFactory(self): reactor.stop() def buildProtocol(self, addr): return SSHClientTransport() def clientConnectionFailed(self, connector, reason): tkMessageBox.showwarning('TkConch','Connection Failed, Reason:\n %s: %s' % (reason.type, reason.value)) class SSHClientTransport(transport.SSHClientTransport): def receiveError(self, code, desc): global exitStatus exitStatus = 'conch:\tRemote side disconnected with error code %i\nconch:\treason: %s' % (code, desc) def sendDisconnect(self, code, reason): global exitStatus exitStatus = 'conch:\tSending disconnect with error code %i\nconch:\treason: %s' % (code, reason) transport.SSHClientTransport.sendDisconnect(self, code, reason) def receiveDebug(self, alwaysDisplay, message, lang): global options if alwaysDisplay or options['log']: log.msg('Received Debug Message: %s' % message) def verifyHostKey(self, pubKey, fingerprint): #d = defer.Deferred() #d.addCallback(lambda x:defer.succeed(1)) #d.callback(2) #return d goodKey = isInKnownHosts(options['host'], pubKey, {'known-hosts': None}) if goodKey == 1: # good key return defer.succeed(1) elif goodKey == 2: # AAHHHHH changed return defer.fail(error.ConchError('bad host key')) else: if options['host'] == self.transport.getPeer()[1]: host = options['host'] khHost = options['host'] else: host = '%s (%s)' % (options['host'], self.transport.getPeer()[1]) khHost = '%s,%s' % (options['host'], self.transport.getPeer()[1]) keyType = common.getNS(pubKey)[0] ques = """The authenticity of host '%s' can't be established.\r %s key fingerprint is %s.""" % (host, {'ssh-dss':'DSA', 'ssh-rsa':'RSA'}[keyType], fingerprint) ques+='\r\nAre you sure you want to continue connecting (yes/no)? ' return deferredAskFrame(ques, 1).addCallback(self._cbVerifyHostKey, pubKey, khHost, keyType) def _cbVerifyHostKey(self, ans, pubKey, khHost, keyType): if ans.lower() not in ('yes', 'no'): return deferredAskFrame("Please type 'yes' or 'no': ",1).addCallback(self._cbVerifyHostKey, pubKey, khHost, keyType) if ans.lower() == 'no': frame.write('Host key verification failed.\r\n') raise error.ConchError('bad host key') try: frame.write("Warning: Permanently added '%s' (%s) to the list of known hosts.\r\n" % (khHost, {'ssh-dss':'DSA', 'ssh-rsa':'RSA'}[keyType])) known_hosts = open(os.path.expanduser('~/.ssh/known_hosts'), 'a') encodedKey = base64.encodestring(pubKey).replace('\n', '') known_hosts.write('\n%s %s %s' % (khHost, keyType, encodedKey)) known_hosts.close() except: log.deferr() raise error.ConchError def connectionSecure(self): if options['user']: user = options['user'] else: user = getpass.getuser() self.requestService(SSHUserAuthClient(user, SSHConnection())) class SSHUserAuthClient(userauth.SSHUserAuthClient): usedFiles = [] def getPassword(self, prompt = None): if not prompt: prompt = "%s@%s's password: " % (self.user, options['host']) return deferredAskFrame(prompt,0) def getPublicKey(self): files = [x for x in options.identitys if x not in self.usedFiles] if not files: return None file = files[0] log.msg(file) self.usedFiles.append(file) file = os.path.expanduser(file) file += '.pub' if not os.path.exists(file): return try: return keys.Key.fromFile(file).blob() except: return self.getPublicKey() # try again def getPrivateKey(self): file = os.path.expanduser(self.usedFiles[-1]) if not os.path.exists(file): return None try: return defer.succeed(keys.Key.fromFile(file).keyObject) except keys.BadKeyError, e: if e.args[0] == 'encrypted key with no password': prompt = "Enter passphrase for key '%s': " % \ self.usedFiles[-1] return deferredAskFrame(prompt, 0).addCallback(self._cbGetPrivateKey, 0) def _cbGetPrivateKey(self, ans, count): file = os.path.expanduser(self.usedFiles[-1]) try: return keys.Key.fromFile(file, password = ans).keyObject except keys.BadKeyError: if count == 2: raise prompt = "Enter passphrase for key '%s': " % \ self.usedFiles[-1] return deferredAskFrame(prompt, 0).addCallback(self._cbGetPrivateKey, count+1) class SSHConnection(connection.SSHConnection): def serviceStarted(self): if not options['noshell']: self.openChannel(SSHSession()) if options.localForwards: for localPort, hostport in options.localForwards: reactor.listenTCP(localPort, forwarding.SSHListenForwardingFactory(self, hostport, forwarding.SSHListenClientForwardingChannel)) if options.remoteForwards: for remotePort, hostport in options.remoteForwards: log.msg('asking for remote forwarding for %s:%s' % (remotePort, hostport)) data = forwarding.packGlobal_tcpip_forward( ('0.0.0.0', remotePort)) self.sendGlobalRequest('tcpip-forward', data) self.remoteForwards[remotePort] = hostport class SSHSession(channel.SSHChannel): name = 'session' def channelOpen(self, foo): #global globalSession #globalSession = self # turn off local echo self.escapeMode = 1 c = session.SSHSessionClient() if options['escape']: c.dataReceived = self.handleInput else: c.dataReceived = self.write c.connectionLost = self.sendEOF frame.callback = c.dataReceived frame.canvas.focus_force() if options['subsystem']: self.conn.sendRequest(self, 'subsystem', \ common.NS(options['command'])) elif options['command']: if options['tty']: term = os.environ.get('TERM', 'xterm') #winsz = fcntl.ioctl(fd, tty.TIOCGWINSZ, '12345678') winSize = (25,80,0,0) #struct.unpack('4H', winsz) ptyReqData = session.packRequest_pty_req(term, winSize, '') self.conn.sendRequest(self, 'pty-req', ptyReqData) self.conn.sendRequest(self, 'exec', \ common.NS(options['command'])) else: if not options['notty']: term = os.environ.get('TERM', 'xterm') #winsz = fcntl.ioctl(fd, tty.TIOCGWINSZ, '12345678') winSize = (25,80,0,0) #struct.unpack('4H', winsz) ptyReqData = session.packRequest_pty_req(term, winSize, '') self.conn.sendRequest(self, 'pty-req', ptyReqData) self.conn.sendRequest(self, 'shell', '') self.conn.transport.transport.setTcpNoDelay(1) def handleInput(self, char): #log.msg('handling %s' % repr(char)) if char in ('\n', '\r'): self.escapeMode = 1 self.write(char) elif self.escapeMode == 1 and char == options['escape']: self.escapeMode = 2 elif self.escapeMode == 2: self.escapeMode = 1 # so we can chain escapes together if char == '.': # disconnect log.msg('disconnecting from escape') reactor.stop() return elif char == '\x1a': # ^Z, suspend # following line courtesy of Erwin@freenode os.kill(os.getpid(), signal.SIGSTOP) return elif char == 'R': # rekey connection log.msg('rekeying connection') self.conn.transport.sendKexInit() return self.write('~' + char) else: self.escapeMode = 0 self.write(char) def dataReceived(self, data): if options['ansilog']: print repr(data) frame.write(data) def extReceived(self, t, data): if t==connection.EXTENDED_DATA_STDERR: log.msg('got %s stderr data' % len(data)) sys.stderr.write(data) sys.stderr.flush() def eofReceived(self): log.msg('got eof') sys.stdin.close() def closed(self): log.msg('closed %s' % self) if len(self.conn.channels) == 1: # just us left reactor.stop() def request_exit_status(self, data): global exitStatus exitStatus = int(struct.unpack('>L', data)[0]) log.msg('exit status: %s' % exitStatus) def sendEOF(self): self.conn.sendEOF(self) if __name__=="__main__": run()
mit
txemi/ansible
lib/ansible/utils/module_docs_fragments/cnos.py
77
3621
# Copyright (C) 2017 Lenovo, Inc. # # This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see <http://www.gnu.org/licenses/>. # class ModuleDocFragment(object): # Standard CNOS documentation fragment DOCUMENTATION = ''' options: outputfile: description: - This specifies the file path where the output of each command execution is saved. Each command that is specified in the merged template file and each response from the device are saved here. Usually the location is the results folder, but you can choose another location based on your write permission. required: true default: Null version_added: 2.3 host: description: - This is the variable used to search the hosts file at /etc/ansible/hosts and identify the IP address of the device on which the template is going to be applied. Usually the Ansible keyword {{ inventory_hostname }} is specified in the playbook as an abstraction of the group of network elements that need to be configured. required: true default: Null version_added: 2.3 username: description: - Configures the username used to authenticate the connection to the remote device. The value of the username parameter is used to authenticate the SSH session. While generally the value should come from the inventory file, you can also specify it as a variable. This parameter is optional. If it is not specified, no default value will be used. required: true default: Null version_added: 2.3 password: description: - Configures the password used to authenticate the connection to the remote device. The value of the password parameter is used to authenticate the SSH session. While generally the value should come from the inventory file, you can also specify it as a variable. This parameter is optional. If it is not specified, no default value will be used. required: true default: Null version_added: 2.3 enablePassword: description: - Configures the password used to enter Global Configuration command mode on the switch. If the switch does not request this password, the parameter is ignored.While generally the value should come from the inventory file, you can also specify it as a variable. This parameter is optional. If it is not specified, no default value will be used. required: false default: Null version_added: 2.3 deviceType: description: - This specifies the type of device where the method is executed. required: Yes default: null choices: [g8272_cnos,g8296_cnos,g8332_cnos] version_added: 2.3 '''
gpl-3.0
andreymaznyak/ontologydatabase
vendor/sonata-project/admin-bundle/Resources/doc/conf.py
77
7893
# -*- coding: utf-8 -*- # # IoC documentation build configuration file, created by # sphinx-quickstart on Fri Mar 29 01:43:00 2013. # # This file is execfile()d with the current directory set to its containing dir. # # Note that not all possible configuration values are present in this # autogenerated file. # # All configuration values have a default; values that are commented out # serve to show the default. import sys, os # If extensions (or modules to document with autodoc) are in another directory, # add these directories to sys.path here. If the directory is relative to the # documentation root, use os.path.abspath to make it absolute, like shown here. #sys.path.insert(0, os.path.abspath('.')) # -- General configuration ----------------------------------------------------- # If your documentation needs a minimal Sphinx version, state it here. #needs_sphinx = '1.0' # Add any Sphinx extension module names here, as strings. They can be extensions # coming with Sphinx (named 'sphinx.ext.*') or your custom ones. extensions = ['sensio.sphinx.refinclude', 'sensio.sphinx.configurationblock', 'sensio.sphinx.phpcode'] # Add any paths that contain templates here, relative to this directory. templates_path = ['_templates'] # The suffix of source filenames. source_suffix = '.rst' # The encoding of source files. #source_encoding = 'utf-8-sig' # The master toctree document. master_doc = 'index' # General information about the project. project = u'Sonata ~ AdminBundle' copyright = u'2010-2014, Thomas Rabaix' # The version info for the project you're documenting, acts as replacement for # |version| and |release|, also used in various other places throughout the # built documents. # # The short X.Y version. #version = '0.0.1' # The full version, including alpha/beta/rc tags. #release = '0.0.1' # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. #language = None # There are two options for replacing |today|: either, you set today to some # non-false value, then it is used: #today = '' # Else, today_fmt is used as the format for a strftime call. #today_fmt = '%B %d, %Y' # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. exclude_patterns = ['_build'] # The reST default role (used for this markup: `text`) to use for all documents. #default_role = None # If true, '()' will be appended to :func: etc. cross-reference text. #add_function_parentheses = True # If true, the current module name will be prepended to all description # unit titles (such as .. function::). #add_module_names = True # If true, sectionauthor and moduleauthor directives will be shown in the # output. They are ignored by default. #show_authors = False # The name of the Pygments (syntax highlighting) style to use. pygments_style = 'sphinx' # A list of ignored prefixes for module index sorting. #modindex_common_prefix = [] # -- Options for HTML output --------------------------------------------------- import sphinx_rtd_theme # The theme to use for HTML and HTML Help pages. See the documentation for # a list of builtin themes. html_theme = 'sphinx_rtd_theme' # Theme options are theme-specific and customize the look and feel of a theme # further. For a list of options available for each theme, see the # documentation. #html_theme_options = {} # Add any paths that contain custom themes here, relative to this directory. html_theme_path = [sphinx_rtd_theme.get_html_theme_path()] # The name for this set of Sphinx documents. If None, it defaults to # "<project> v<release> documentation". #html_title = None # A shorter title for the navigation bar. Default is the same as html_title. #html_short_title = None # The name of an image file (relative to this directory) to place at the top # of the sidebar. #html_logo = None # The name of an image file (within the static path) to use as favicon of the # docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 # pixels large. #html_favicon = None # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". html_static_path = ['_static'] # If not '', a 'Last updated on:' timestamp is inserted at every page bottom, # using the given strftime format. #html_last_updated_fmt = '%b %d, %Y' # If true, SmartyPants will be used to convert quotes and dashes to # typographically correct entities. #html_use_smartypants = True # Custom sidebar templates, maps document names to template names. #html_sidebars = {} # Additional templates that should be rendered to pages, maps page names to # template names. #html_additional_pages = {} # If false, no module index is generated. #html_domain_indices = True # If false, no index is generated. #html_use_index = True # If true, the index is split into individual pages for each letter. #html_split_index = False # If true, links to the reST sources are added to the pages. #html_show_sourcelink = True # If true, "Created using Sphinx" is shown in the HTML footer. Default is True. #html_show_sphinx = True # If true, "(C) Copyright ..." is shown in the HTML footer. Default is True. #html_show_copyright = True # If true, an OpenSearch description file will be output, and all pages will # contain a <link> tag referring to it. The value of this option must be the # base URL from which the finished HTML is served. #html_use_opensearch = '' # This is the file name suffix for HTML files (e.g. ".xhtml"). #html_file_suffix = None # Output file base name for HTML help builder. htmlhelp_basename = 'doc' # -- Options for LaTeX output -------------------------------------------------- latex_elements = { # The paper size ('letterpaper' or 'a4paper'). #'papersize': 'letterpaper', # The font size ('10pt', '11pt' or '12pt'). #'pointsize': '10pt', # Additional stuff for the LaTeX preamble. #'preamble': '', } # Grouping the document tree into LaTeX files. List of tuples # (source start file, target name, title, author, documentclass [howto/manual]). #latex_documents = [ # ('index', 'PythonElement.tex', u'Python Documentation', # u'Thomas Rabaix', 'manual'), #] # The name of an image file (relative to this directory) to place at the top of # the title page. #latex_logo = None # For "manual" documents, if this is true, then toplevel headings are parts, # not chapters. #latex_use_parts = False # If true, show page references after internal links. #latex_show_pagerefs = False # If true, show URL addresses after external links. #latex_show_urls = False # Documents to append as an appendix to all manuals. #latex_appendices = [] # If false, no module index is generated. #latex_domain_indices = True # -- Options for manual page output -------------------------------------------- # One entry per manual page. List of tuples #(source start file, name, description, authors, manual section). #man_pages = [ # ('index', 'ioc', u'IoC Documentation', # [u'Thomas Rabaix'], 1) #] # If true, show URL addresses after external links. #man_show_urls = False # -- Options for Texinfo output ------------------------------------------------ # Grouping the document tree into Texinfo files. List of tuples # (source start file, target name, title, author, # dir menu entry, description, category) #texinfo_documents = [ # ('index', 'IoC', u'IoC Documentation', # u'Thomas Rabaix', 'IoC', 'One line description of project.', # 'Miscellaneous'), #] # Documents to append as an appendix to all manuals. #texinfo_appendices = [] # If false, no module index is generated. #texinfo_domain_indices = True # How to display URL addresses: 'footnote', 'no', or 'inline'. #texinfo_show_urls = 'footnote'
mit
111t8e/h2o-2
py/testdir_multi_jvm/test_rf_hhp_2a_fvec.py
9
2453
import unittest, time, sys sys.path.extend(['.','..','../..','py']) import h2o, h2o_cmd, h2o_import as h2i, h2o_exec, h2o_rf class Basic(unittest.TestCase): def tearDown(self): h2o.check_sandbox_for_errors() @classmethod def setUpClass(cls): h2o.init(3) @classmethod def tearDownClass(cls): h2o.tear_down_cloud() def test_rf_hhp_2a_fvec(self): csvFilenameList = { 'hhp.cut3.214.data.gz', } for csvFilename in csvFilenameList: csvPathname = csvFilename print "RF start on ", csvPathname dataKeyTrain = 'rTrain.hex' start = time.time() parseResult = h2i.import_parse(bucket='smalldata', path=csvPathname, hex_key=dataKeyTrain, schema='put', timeoutSecs=120) inspect = h2o_cmd.runInspect(key=parseResult['destination_key']) print "\n" + csvPathname, \ " numRows:", "{:,}".format(inspect['numRows']), \ " numCols:", "{:,}".format(inspect['numCols']) numCols = inspect['numCols'] # we want the last col. Should be values 0 to 14. 14 most rare # from the cut3 set # 84777 0 # 13392 1 # 6546 2 # 5716 3 # 4210 4 # 3168 5 # 2009 6 # 1744 7 # 1287 8 # 1150 9 # 1133 10 # 780 11 # 806 12 # 700 13 # 345 14 # 3488 15 execExpr = "%s[,%s] = %s[,%s]==14" % (dataKeyTrain, numCols, dataKeyTrain, numCols) h2o_exec.exec_expr(None, execExpr, resultKey=dataKeyTrain, timeoutSecs=30) inspect = h2o_cmd.runInspect(key=dataKeyTrain) h2o_cmd.infoFromInspect(inspect, "going into RF") execResult = {'destination_key': dataKeyTrain} kwargs = { 'ntrees': 2, 'max_depth': 20, 'nbins': 50, } rfView = h2o_cmd.runRF(parseResult=execResult, timeoutSecs=900, retryDelaySecs=300, **kwargs) print "RF end on ", csvPathname, 'took', time.time() - start, 'seconds' (error, classErrorPctList, totalScores) = h2o_rf.simpleCheckRFView(rfv=rfView) if __name__ == '__main__': h2o.unit_main()
apache-2.0
rahushen/ansible
lib/ansible/modules/packaging/os/svr4pkg.py
14
7700
#!/usr/bin/python # -*- coding: utf-8 -*- # (c) 2012, Boyd Adamson <boyd () boydadamson.com> # # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) from __future__ import absolute_import, division, print_function __metaclass__ = type ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ['preview'], 'supported_by': 'community'} DOCUMENTATION = ''' --- module: svr4pkg short_description: Manage Solaris SVR4 packages description: - Manages SVR4 packages on Solaris 10 and 11. - These were the native packages on Solaris <= 10 and are available as a legacy feature in Solaris 11. - Note that this is a very basic packaging system. It will not enforce dependencies on install or remove. version_added: "0.9" author: "Boyd Adamson (@brontitall)" options: name: description: - Package name, e.g. C(SUNWcsr) required: true state: description: - Whether to install (C(present)), or remove (C(absent)) a package. - If the package is to be installed, then I(src) is required. - The SVR4 package system doesn't provide an upgrade operation. You need to uninstall the old, then install the new package. required: true choices: ["present", "absent"] src: description: - Specifies the location to install the package from. Required when C(state=present). - "Can be any path acceptable to the C(pkgadd) command's C(-d) option. e.g.: C(somefile.pkg), C(/dir/with/pkgs), C(http:/server/mypkgs.pkg)." - If using a file or directory, they must already be accessible by the host. See the M(copy) module for a way to get them there. proxy: description: - HTTP[s] proxy to be used if C(src) is a URL. response_file: description: - Specifies the location of a response file to be used if package expects input on install. (added in Ansible 1.4) required: false zone: description: - Whether to install the package only in the current zone, or install it into all zones. - The installation into all zones works only if you are working with the global zone. required: false default: "all" choices: ["current", "all"] version_added: "1.6" category: description: - Install/Remove category instead of a single package. required: false choices: ["true", "false"] version_added: "1.6" ''' EXAMPLES = ''' # Install a package from an already copied file - svr4pkg: name: CSWcommon src: /tmp/cswpkgs.pkg state: present # Install a package directly from an http site - svr4pkg: name: CSWpkgutil src: 'http://get.opencsw.org/now' state: present zone: current # Install a package with a response file - svr4pkg: name: CSWggrep src: /tmp/third-party.pkg response_file: /tmp/ggrep.response state: present # Ensure that a package is not installed. - svr4pkg: name: SUNWgnome-sound-recorder state: absent # Ensure that a category is not installed. - svr4pkg: name: FIREFOX state: absent category: true ''' import os import tempfile from ansible.module_utils.basic import AnsibleModule def package_installed(module, name, category): cmd = [module.get_bin_path('pkginfo', True)] cmd.append('-q') if category: cmd.append('-c') cmd.append(name) rc, out, err = module.run_command(' '.join(cmd)) if rc == 0: return True else: return False def create_admin_file(): (desc, filename) = tempfile.mkstemp(prefix='ansible_svr4pkg', text=True) fullauto = ''' mail= instance=unique partial=nocheck runlevel=quit idepend=nocheck rdepend=nocheck space=quit setuid=nocheck conflict=nocheck action=nocheck networktimeout=60 networkretries=3 authentication=quit keystore=/var/sadm/security proxy= basedir=default ''' os.write(desc, fullauto) os.close(desc) return filename def run_command(module, cmd): progname = cmd[0] cmd[0] = module.get_bin_path(progname, True) return module.run_command(cmd) def package_install(module, name, src, proxy, response_file, zone, category): adminfile = create_admin_file() cmd = ['pkgadd', '-n'] if zone == 'current': cmd += ['-G'] cmd += ['-a', adminfile, '-d', src] if proxy is not None: cmd += ['-x', proxy] if response_file is not None: cmd += ['-r', response_file] if category: cmd += ['-Y'] cmd.append(name) (rc, out, err) = run_command(module, cmd) os.unlink(adminfile) return (rc, out, err) def package_uninstall(module, name, src, category): adminfile = create_admin_file() if category: cmd = ['pkgrm', '-na', adminfile, '-Y', name] else: cmd = ['pkgrm', '-na', adminfile, name] (rc, out, err) = run_command(module, cmd) os.unlink(adminfile) return (rc, out, err) def main(): module = AnsibleModule( argument_spec=dict( name=dict(required=True), state=dict(required=True, choices=['present', 'absent']), src=dict(default=None), proxy=dict(default=None), response_file=dict(default=None), zone=dict(required=False, default='all', choices=['current', 'all']), category=dict(default=False, type='bool') ), supports_check_mode=True ) state = module.params['state'] name = module.params['name'] src = module.params['src'] proxy = module.params['proxy'] response_file = module.params['response_file'] zone = module.params['zone'] category = module.params['category'] rc = None out = '' err = '' result = {} result['name'] = name result['state'] = state if state == 'present': if src is None: module.fail_json(name=name, msg="src is required when state=present") if not package_installed(module, name, category): if module.check_mode: module.exit_json(changed=True) (rc, out, err) = package_install(module, name, src, proxy, response_file, zone, category) # Stdout is normally empty but for some packages can be # very long and is not often useful if len(out) > 75: out = out[:75] + '...' elif state == 'absent': if package_installed(module, name, category): if module.check_mode: module.exit_json(changed=True) (rc, out, err) = package_uninstall(module, name, src, category) out = out[:75] # Returncodes as per pkgadd(1m) # 0 Successful completion # 1 Fatal error. # 2 Warning. # 3 Interruption. # 4 Administration. # 5 Administration. Interaction is required. Do not use pkgadd -n. # 10 Reboot after installation of all packages. # 20 Reboot after installation of this package. # 99 (observed) pkgadd: ERROR: could not process datastream from </tmp/pkgutil.pkg> if rc in (0, 2, 3, 10, 20): result['changed'] = True # no install nor uninstall, or failed else: result['changed'] = False # rc will be none when the package already was installed and no action took place # Only return failed=False when the returncode is known to be good as there may be more # undocumented failure return codes if rc not in (None, 0, 2, 10, 20): result['failed'] = True else: result['failed'] = False if out: result['stdout'] = out if err: result['stderr'] = err module.exit_json(**result) if __name__ == '__main__': main()
gpl-3.0
ofekd/servo
tests/wpt/web-platform-tests/tools/html5lib/html5lib/tests/performance/concatenation.py
451
1145
from __future__ import absolute_import, division, unicode_literals def f1(): x = "ABCDEFGHIJKLMNOPQRSTUVWXYZABCDEFGHIJKLMNOPQRSTUVWXYZ" y = "ABCDEFGHIJKLMNOPQRSTUVWXYZABCDEFGHIJKLMNOPQRSTUVWXYZ" z = "ABCDEFGHIJKLMNOPQRSTUVWXYZABCDEFGHIJKLMNOPQRSTUVWXYZ" x += y + z def f2(): x = "ABCDEFGHIJKLMNOPQRSTUVWXYZABCDEFGHIJKLMNOPQRSTUVWXYZ" y = "ABCDEFGHIJKLMNOPQRSTUVWXYZABCDEFGHIJKLMNOPQRSTUVWXYZ" z = "ABCDEFGHIJKLMNOPQRSTUVWXYZABCDEFGHIJKLMNOPQRSTUVWXYZ" x = x + y + z def f3(): x = "ABCDEFGHIJKLMNOPQRSTUVWXYZABCDEFGHIJKLMNOPQRSTUVWXYZ" y = "ABCDEFGHIJKLMNOPQRSTUVWXYZABCDEFGHIJKLMNOPQRSTUVWXYZ" z = "ABCDEFGHIJKLMNOPQRSTUVWXYZABCDEFGHIJKLMNOPQRSTUVWXYZ" x = "".join((x, y, z)) def f4(): x = "ABCDEFGHIJKLMNOPQRSTUVWXYZABCDEFGHIJKLMNOPQRSTUVWXYZ" y = "ABCDEFGHIJKLMNOPQRSTUVWXYZABCDEFGHIJKLMNOPQRSTUVWXYZ" z = "ABCDEFGHIJKLMNOPQRSTUVWXYZABCDEFGHIJKLMNOPQRSTUVWXYZ" x = "%s%s%s" % (x, y, z) import timeit for x in range(4): statement = "f%s" % (x + 1) t = timeit.Timer(statement, "from __main__ import " + statement) r = t.repeat(3, 1000000) print(r, min(r))
mpl-2.0
ExPHAT/binding-of-isaac
Gurdy.py
1
1437
# Gurdy.py # Aaron Taylor # Moose Abumeeiz # # This is the first boss, Gurdy. He just sits there and takes your bullets # from pygame import * from const import * from func import * from Animation import * from Enemy import * class Gurdy(Enemy): x = 6 y = 3 health = 100 hurtDistance = 2 def __init__(self, textures, sounds): self.body = textures["bosses"]["gurdy"].subsurface(0, 0, 284, 320) self.head = textures["bosses"]["gurdy"].subsurface(0, 768, 84, 104) self.sounds = sounds self.tearTextures = textures["tears"] self.tearSounds = sounds["tear"] self.textures = textures self.lastShot = -1 def die(self): self.dead = True def render(self, surface, time, character, nodes, paths, bounds, obsticals): # Draw body surface.blit(self.body, (GRIDX+GRATIO*self.x-284/2, GRIDY+GRATIO*self.y-320/2)) # Blit head surface.blit(self.head, (GRIDX+GRATIO*self.x-284/4 + 30, GRIDY+GRATIO*self.y-320/4 - 40)) if time-self.lastShot >= .4: self.lastShot = time dx = character.x-(GRIDX+GRATIO*self.x) dy = character.y-(GRIDY+GRATIO*self.y) dist = sqrt(dx**2+dy**2) self.tears.append(Tear((dx/dist, dy/dist), ((GRIDX+GRATIO*self.x),(GRIDY+GRATIO*self.y)), (0,0), 1, 1, 1, False, self.tearTextures, self.tearSounds)) for tear in self.tears[:]: if not tear.render(surface, time, bounds, obsticals): self.tears.remove(tear) self.checkHurt(character, time) return not self.dead
mit
Vignesh2208/Awlsim
awlsim/core/systemblocks/system_sfc_m1.py
2
1118
# -*- coding: utf-8 -*- # # AWL simulator - SFCs # # Copyright 2012-2015 Michael Buesch <m@bues.ch> # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License along # with this program; if not, write to the Free Software Foundation, Inc., # 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. # from __future__ import division, absolute_import, print_function, unicode_literals from awlsim.common.compat import * from awlsim.core.systemblocks.systemblocks import * from awlsim.core.util import * class SFCm1(SFC): name = (-1, "__SFC_NOP", None) def run(self): s = self.cpu.statusWord s.BIE = 1
gpl-2.0
nicko96/Chrome-Infra
appengine/findit/handlers/failure_log.py
1
1213
# Copyright 2015 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. from base_handler import BaseHandler from base_handler import Permission from waterfall import buildbot from waterfall import masters from model.wf_step import WfStep class FailureLog(BaseHandler): PERMISSION_LEVEL = Permission.CORP_USER def HandleGet(self): """Fetch the log of a failed step as a JSON result.""" url = self.request.get('url', '') step_info = buildbot.ParseStepUrl(url) if not step_info: return BaseHandler.CreateError( 'Url "%s" is not pointing to a step.' % url, 501) master_name, builder_name, build_number, step_name = step_info step = WfStep.Get(master_name, builder_name, build_number, step_name) if not step: return BaseHandler.CreateError('No failure log available.', 404) data = { 'master_name': master_name, 'builder_name': builder_name, 'build_number': build_number, 'step_name': step_name, 'step_logs': step.log_data, } return {'template': 'failure_log.html', 'data': data}
bsd-3-clause
pyrocko/kite
src/deramp.py
1
2008
import numpy as num from pyrocko.guts import Bool from .plugin import PluginConfig, Plugin class DerampConfig(PluginConfig): demean = Bool.T(optional=True, default=True) class Deramp(Plugin): def __init__(self, scene, config=None): self.scene = scene self.config = config or DerampConfig() self._log = scene._log.getChild('Deramp') def get_ramp_coefficients(self, displacement): '''Fit plane through the displacement data. :returns: Mean of the displacement and slopes in easting coefficients of the fitted plane. The array hold ``[offset_e, offset_n, slope_e, slope_n]``. :rtype: :class:`numpy.ndarray` ''' scene = self.scene msk = num.isfinite(displacement) displacement = displacement[msk] coords = scene.frame.coordinates[msk.flatten()] # Add ones for the offset coords = num.hstack(( num.ones_like(coords), coords)) coeffs, res, _, _ = num.linalg.lstsq( coords, displacement, rcond=None) return coeffs def set_demean(self, demean): assert isinstance(demean, bool) self.config.demean = demean self.update() def apply(self, displacement): '''Fit a plane onto the displacement data and substract it :param demean: Demean the displacement :type demean: bool :param inplace: Replace data of the scene (default: True) :type inplace: bool :return: ``None`` if ``inplace=True`` else a new Scene :rtype: ``None`` or :class:`~kite.Scene` ''' self._log.debug('De-ramping scene') coeffs = self.get_ramp_coefficients(displacement) coords = self.scene.frame.coordinates ramp = coeffs[2:] * coords if self.config.demean: ramp += coeffs[:2] ramp = ramp.sum(axis=1).reshape(displacement.shape) displacement -= ramp return displacement
gpl-3.0
sarvex/tensorflow
tensorflow/python/training/slot_creator.py
9
10200
# Copyright 2015 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Standard functions for creating slots. A slot is a `Variable` created with the same first m-dimension as a primary variable or `Tensor`. A slot is always scoped in the namespace of the primary object and typically has the same device and type. Slots are typically used as accumulators to track values associated with the primary object: ```python # Optimizers can create a slot for each variable to track accumulators accumulators = {var : create_zeros_slot(var, "momentum") for var in vs} for var in vs: apply_momentum(var, accumulators[var], lr, grad, momentum_tensor) # Slots can also be used for moving averages mavg = create_slot(var, var.initialized_value(), "exponential_moving_avg") update_mavg = mavg.assign_sub((mavg - var) * (1 - decay)) ``` """ # pylint: disable=g-bad-name from __future__ import absolute_import from __future__ import division from __future__ import print_function from tensorflow.compiler.xla.experimental.xla_sharding import xla_sharding from tensorflow.python.distribute import distribution_strategy_context from tensorflow.python.ops import array_ops from tensorflow.python.ops import init_ops from tensorflow.python.ops import resource_variable_ops from tensorflow.python.ops import variable_scope from tensorflow.python.ops import variables def _create_slot_var(primary, val, scope, validate_shape, shape, dtype, *, copy_xla_sharding=False): """Helper function for creating a slot variable.""" # TODO(lukaszkaiser): Consider allowing partitioners to be set in the current # scope. current_partitioner = variable_scope.get_variable_scope().partitioner variable_scope.get_variable_scope().set_partitioner(None) # When init from val instead of callable initializer, the shape is expected to # be None, not <unknown> or any fully defined shape. shape = shape if callable(val) else None if resource_variable_ops.is_resource_variable(primary): use_resource = True elif isinstance(primary, variables.RefVariable): use_resource = False else: use_resource = None slot = variable_scope.get_variable( scope, initializer=val, trainable=False, use_resource=use_resource, shape=shape, dtype=dtype, validate_shape=validate_shape) variable_scope.get_variable_scope().set_partitioner(current_partitioner) # pylint: disable=protected-access if isinstance(primary, variables.Variable) and primary._save_slice_info: # Primary is a partitioned variable, so we need to also indicate that # the slot is a partitioned variable. Slots have the same partitioning # as their primaries. # For examples when using AdamOptimizer in linear model, slot.name # here can be "linear//weights/Adam:0", while primary.op.name is # "linear//weight". We want to get 'Adam' as real_slot_name, so we # remove "'linear//weight' + '/'" and ':0'. real_slot_name = slot.name[len(primary.op.name + "/"):-2] slice_info = primary._save_slice_info # support slot's shape not same as primary's shape # example: primary's shape = [10, 20, 30], slot's shape = # None, [], [10], [10, 20] or [10, 20, 30] is allowed # slot's shape = None or [10, 20, 30], set slot's slice_info same as primary # slot's shape = [], don't set slot's slice_info # slot's shape = [10] or [10, 20], set slot's slice_info according to ndims n = slot.shape.ndims if n is None or n > 0: slot._set_save_slice_info( variables.Variable.SaveSliceInfo( slice_info.full_name + "/" + real_slot_name, slice_info.full_shape[:n], slice_info.var_offset[:n], slice_info.var_shape[:n])) # pylint: enable=protected-access # Copy XLA sharding attributes from primary. if copy_xla_sharding: slot = xla_sharding.copy_sharding(primary, slot, use_sharding_op=False) return slot def create_slot(primary, val, name, colocate_with_primary=True, *, copy_xla_sharding=False): """Create a slot initialized to the given value. The type of the slot is determined by the given value. Args: primary: The primary `Variable` or `Tensor`. val: A `Tensor` specifying the initial value of the slot. name: Name to use for the slot variable. colocate_with_primary: Boolean. If True the slot is located on the same device as `primary`. copy_xla_sharding: Boolean. If True also copies XLA sharding from primary. Returns: A `Variable` object. """ # Scope the slot name in the namespace of the primary variable. # Set primary's name + '/' + name as default name, so the scope name of # optimizer can be shared when reuse is True. Meanwhile when reuse is False # and the same name has been previously used, the scope name will add '_N' # as suffix for unique identifications. validate_shape = val.get_shape().is_fully_defined() if isinstance(primary, variables.Variable): prefix = primary._shared_name # pylint: disable=protected-access else: prefix = primary.op.name with variable_scope.variable_scope(None, prefix + "/" + name): if colocate_with_primary: distribution_strategy = distribution_strategy_context.get_strategy() with distribution_strategy.extended.colocate_vars_with(primary): return _create_slot_var( primary, val, "", validate_shape, None, None, copy_xla_sharding=copy_xla_sharding) else: return _create_slot_var( primary, val, "", validate_shape, None, None, copy_xla_sharding=copy_xla_sharding) def create_slot_with_initializer(primary, initializer, shape, dtype, name, colocate_with_primary=True, *, copy_xla_sharding=False): """Creates a slot initialized using an `Initializer`. The type of the slot is determined by the given value. Args: primary: The primary `Variable` or `Tensor`. initializer: An `Initializer`. The initial value of the slot. shape: Shape of the initial value of the slot. dtype: Type of the value of the slot. name: Name to use for the slot variable. colocate_with_primary: Boolean. If True the slot is located on the same device as `primary`. copy_xla_sharding: Boolean. If True also copies XLA sharding from primary. Returns: A `Variable` object. """ # Scope the slot name in the namespace of the primary variable. # Set "primary.op.name + '/' + name" as default name, so the scope name of # optimizer can be shared when reuse is True. Meanwhile when reuse is False # and the same name has been previously used, the scope name will add '_N' # as suffix for unique identifications. validate_shape = shape.is_fully_defined() if isinstance(primary, variables.Variable): prefix = primary._shared_name # pylint: disable=protected-access else: prefix = primary.op.name with variable_scope.variable_scope(None, prefix + "/" + name): if colocate_with_primary: distribution_strategy = distribution_strategy_context.get_strategy() with distribution_strategy.extended.colocate_vars_with(primary): return _create_slot_var( primary, initializer, "", validate_shape, shape, dtype, copy_xla_sharding=copy_xla_sharding) else: return _create_slot_var( primary, initializer, "", validate_shape, shape, dtype, copy_xla_sharding=copy_xla_sharding) def create_zeros_slot(primary, name, dtype=None, colocate_with_primary=True, *, copy_xla_sharding=False): """Create a slot initialized to 0 with same shape as the primary object. Args: primary: The primary `Variable` or `Tensor`. name: Name to use for the slot variable. dtype: Type of the slot variable. Defaults to the type of `primary`. colocate_with_primary: Boolean. If True the slot is located on the same device as `primary`. copy_xla_sharding: Boolean. If True also copies XLA sharding from primary. Returns: A `Variable` object. """ if dtype is None: dtype = primary.dtype slot_shape = primary.get_shape() if slot_shape.is_fully_defined(): initializer = init_ops.zeros_initializer() return create_slot_with_initializer( primary, initializer, slot_shape, dtype, name, colocate_with_primary=colocate_with_primary, copy_xla_sharding=copy_xla_sharding) else: if isinstance(primary, variables.Variable): slot_shape = array_ops.shape(primary.initialized_value()) else: slot_shape = array_ops.shape(primary) val = array_ops.zeros(slot_shape, dtype=dtype) return create_slot( primary, val, name, colocate_with_primary=colocate_with_primary, copy_xla_sharding=copy_xla_sharding)
apache-2.0
fake-name/ReadableWebProxy
alembic/versions/2019-09-14_5552dfae2cb0_add_hash_function_and_dependent_pgcrpyto.py
1
1402
"""Add has function and dependent pgcrpyto Revision ID: 5552dfae2cb0 Revises: c225ea8fbf5e Create Date: 2019-09-14 06:19:36.520447 """ # revision identifiers, used by Alembic. revision = '5552dfae2cb0' down_revision = 'c225ea8fbf5e' branch_labels = None depends_on = None from alembic import op import sqlalchemy as sa from sqlalchemy_utils.types import TSVectorType from sqlalchemy_searchable import make_searchable import sqlalchemy_utils # Patch in knowledge of the citext type, so it reflects properly. from sqlalchemy.dialects.postgresql.base import ischema_names import citext import queue import datetime from sqlalchemy.dialects.postgresql import ENUM from sqlalchemy.dialects.postgresql import JSON from sqlalchemy.dialects.postgresql import JSONB from sqlalchemy.dialects.postgresql import TSVECTOR ischema_names['citext'] = citext.CIText # We use a UUID column because it's a performant in-table 8-byte storage mechanism with nice printing facilities. # SHA-1 has a 160 bit output, so we need to truncate the input SQL_FUNC = ''' CREATE OR REPLACE FUNCTION sha_row_hash(text) returns uuid AS $$ SELECT substring(encode(digest($1, 'sha1'), 'hex') from 0 for 33)::uuid; $$ LANGUAGE SQL STRICT IMMUTABLE; ''' def upgrade(): op.execute("""CREATE EXTENSION IF NOT EXISTS pgcrypto""") op.execute(SQL_FUNC) def downgrade(): op.execute("""DROP FUNCTION sha1;""") pass
bsd-3-clause
AngelkPetkov/titanium_mobile
support/common/markdown/etree_loader.py
136
1287
from markdown import message, CRITICAL import sys ## Import def importETree(): """Import the best implementation of ElementTree, return a module object.""" etree_in_c = None try: # Is it Python 2.5+ with C implemenation of ElementTree installed? import xml.etree.cElementTree as etree_in_c except ImportError: try: # Is it Python 2.5+ with Python implementation of ElementTree? import xml.etree.ElementTree as etree except ImportError: try: # An earlier version of Python with cElementTree installed? import cElementTree as etree_in_c except ImportError: try: # An earlier version of Python with Python ElementTree? import elementtree.ElementTree as etree except ImportError: message(CRITICAL, "Failed to import ElementTree") sys.exit(1) if etree_in_c and etree_in_c.VERSION < "1.0": message(CRITICAL, "For cElementTree version 1.0 or higher is required.") sys.exit(1) elif etree_in_c : return etree_in_c elif etree.VERSION < "1.1": message(CRITICAL, "For ElementTree version 1.1 or higher is required") sys.exit(1) else : return etree
apache-2.0
BruceDai/testkit-lite
testkitlite/engines/bdd.py
4
5938
#!/usr/bin/python # # Copyright (C) 2012 Intel Corporation # # This program is free software; you can redistribute it and/or # modify it under the terms of the GNU General Public License # as published by the Free Software Foundation; either version 2 # of the License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # Authors: # Chengtao,Liu <chengtaox.liu@intel.com> """ The implementation of bdd test engine""" import os import time import sys import threading import uuid from testkitlite.util.log import LOGGER from testkitlite.util.result import TestSetResut from testkitlite.util import tr_utils import subprocess STR_PASS = 'PASS' STR_FAIL = 'FAIL' STR_BLOCK = 'BLOCK' DEFAULT_TIMEOUT = 90 EXISTS = os.path.exists def _bdd_test_exec(test_session, cases, result_obj, session_dir): """function for running bdd tests""" result_obj.set_status(0) result_list = [] for i_case in cases['cases']: i_case_timeout = i_case.get('timeout', DEFAULT_TIMEOUT) try: case_entry = i_case['entry'] if not EXISTS(case_entry): i_case['result'] = STR_BLOCK i_case['stdout'] = "[Message]No such file or dirctory: %s" % case_entry result_list.append(i_case) continue case_id = i_case['case_id'] tmp_result_dir = "%s/%s" % (session_dir, case_id) os.makedirs(tmp_result_dir) popen_args = "behave %s --junit --junit-directory %s" % (case_entry, tmp_result_dir) i_case_proc = subprocess.Popen(args=popen_args, shell=True) i_case_pre_time = time.time() while True: i_case_exit_code = i_case_proc.poll() i_case_elapsed_time = time.time() - i_case_pre_time if i_case_exit_code == None: if i_case_elapsed_time >= i_case_timeout: tr_utils.KillAllProcesses(ppid=i_case_proc.pid) i_case['result'] = STR_BLOCK i_case['stdout'] = "[Message]Timeout" LOGGER.debug("Run %s timeout" % case_id) break elif str(i_case_exit_code) == str(i_case['expected_result']): i_case['result'] = STR_PASS i_case['stdout'] = tmp_result_dir break else: i_case['result'] = STR_FAIL i_case['stdout'] = tmp_result_dir break time.sleep(1) except KeyError: i_case['result'] = STR_BLOCK i_case['stdout'] = "[Message]No 'bdd_test_script_entry' node." LOGGER.error( "Run %s: failed: No 'bdd_test_script_entry' node, exit from executer" % case_id) except Exception, e: i_case['result'] = STR_BLOCK i_case['stdout'] = "[Message]%s" % e LOGGER.error( "Run %s: failed: %s, exit from executer" % (case_id, e)) result_list.append(i_case) result_obj.extend_result(result_list) result_obj.set_status(1) class TestWorker(object): """Test executor for testkit-lite""" def __init__(self, conn): super(TestWorker, self).__init__() self.conn = conn self.server_url = None self.result_obj = None self.session_dir = None self.opts = dict({'block_size': 300, 'test_type': None, 'auto_iu': False, 'fuzzy_match': False, 'self_exec': False, 'self_repeat': False, 'debug_mode': False }) def init_test(self, params): """init the test envrionment""" self.session_dir =params.get('session_dir', '') self.opts['testset_name'] = params.get('testset-name', '') self.opts['testsuite_name'] = params.get('testsuite-name', '') self.opts['debug_log_base'] = params.get("debug-log-base", '') return str(uuid.uuid1()) def run_test(self, sessionid, test_set): """ process the execution for a test set """ if sessionid is None: return False disabledlog = os.environ.get("disabledlog","") # start debug trace thread if len(disabledlog) > 0 : pass else: self.conn.start_debug(self.opts['debug_log_base']) time.sleep(1) self.result_obj = TestSetResut( self.opts['testsuite_name'], self.opts['testset_name']) self.opts['async_th'] = threading.Thread( target=_bdd_test_exec, args=(sessionid, test_set, self.result_obj, self.session_dir) ) self.opts['async_th'].start() return True def get_test_status(self, sessionid): """poll the test task status""" if sessionid is None: return None result = {} result["msg"] = [] result["finished"] = str(self.result_obj.get_status()) return result def get_test_result(self, sessionid): """get the test result for a test set """ result = {} if sessionid is None: return result result = self.result_obj.get_result() return result def finalize_test(self, sessionid): """clear the test stub and related resources""" if sessionid is None: return False if self.result_obj is not None: self.result_obj.set_status(1) # stop debug thread self.conn.stop_debug() return True
gpl-2.0
GoSteven/PythonPractice
basic/solution/mimic.py
208
2995
#!/usr/bin/python -tt # Copyright 2010 Google Inc. # Licensed under the Apache License, Version 2.0 # http://www.apache.org/licenses/LICENSE-2.0 # Google's Python Class # http://code.google.com/edu/languages/google-python-class/ """Mimic pyquick exercise -- optional extra exercise. Google's Python Class Read in the file specified on the command line. Do a simple split() on whitespace to obtain all the words in the file. Rather than read the file line by line, it's easier to read it into one giant string and split it once. Build a "mimic" dict that maps each word that appears in the file to a list of all the words that immediately follow that word in the file. The list of words can be be in any order and should include duplicates. So for example the key "and" might have the list ["then", "best", "then", "after", ...] listing all the words which came after "and" in the text. We'll say that the empty string is what comes before the first word in the file. With the mimic dict, it's fairly easy to emit random text that mimics the original. Print a word, then look up what words might come next and pick one at random as the next work. Use the empty string as the first word to prime things. If we ever get stuck with a word that is not in the dict, go back to the empty string to keep things moving. Note: the standard python module 'random' includes a random.choice(list) method which picks a random element from a non-empty list. For fun, feed your program to itself as input. Could work on getting it to put in linebreaks around 70 columns, so the output looks better. """ import random import sys def mimic_dict(filename): """Returns mimic dict mapping each word to list of words which follow it.""" # +++your code here+++ # LAB(begin solution) mimic_dict = {} f = open(filename, 'r') text = f.read() f.close() words = text.split() prev = '' for word in words: if not prev in mimic_dict: mimic_dict[prev] = [word] else: mimic_dict[prev].append(word) # Could write as: mimic_dict[prev] = mimic_dict.get(prev, []) + [word] # It's one line, but not totally satisfying. prev = word return mimic_dict # LAB(replace solution) # return # LAB(end solution) def print_mimic(mimic_dict, word): """Given mimic dict and start word, prints 200 random words.""" # +++your code here+++ # LAB(begin solution) for unused_i in range(200): print word, nexts = mimic_dict.get(word) # Returns None if not found if not nexts: nexts = mimic_dict[''] # Fallback to '' if not found word = random.choice(nexts) # The 'unused_' prefix turns off the lint warning about the unused variable. # LAB(replace solution) # return # LAB(end solution) # Provided main(), calls mimic_dict() and mimic() def main(): if len(sys.argv) != 2: print 'usage: ./mimic.py file-to-read' sys.exit(1) dict = mimic_dict(sys.argv[1]) print_mimic(dict, '') if __name__ == '__main__': main()
apache-2.0
sgraham/nope
chrome/common/extensions/docs/server2/object_store_creator_test.py
94
1899
#!/usr/bin/env python # Copyright 2013 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. import unittest from environment import GetAppVersion from test_object_store import TestObjectStore from object_store_creator import ObjectStoreCreator class _FooClass(object): def __init__(self): pass class ObjectStoreCreatorTest(unittest.TestCase): def setUp(self): self._creator = ObjectStoreCreator(start_empty=False, store_type=TestObjectStore, disable_wrappers=True) def testVanilla(self): store = self._creator.Create(_FooClass) self.assertEqual( 'class=_FooClass&app_version=%s' % GetAppVersion(), store.namespace) self.assertFalse(store.start_empty) def testWithCategory(self): store = self._creator.Create(_FooClass, category='hi') self.assertEqual( 'class=_FooClass&category=hi&app_version=%s' % GetAppVersion(), store.namespace) self.assertFalse(store.start_empty) def testWithoutAppVersion(self): store = self._creator.Create(_FooClass, app_version=None) self.assertEqual('class=_FooClass', store.namespace) self.assertFalse(store.start_empty) def testStartConfiguration(self): store = self._creator.Create(_FooClass, start_empty=True) self.assertTrue(store.start_empty) store = self._creator.Create(_FooClass, start_empty=False) self.assertFalse(store.start_empty) self.assertRaises(ValueError, ObjectStoreCreator) def testIllegalCharacters(self): self.assertRaises(ValueError, self._creator.Create, _FooClass, app_version='1&2') self.assertRaises(ValueError, self._creator.Create, _FooClass, category='a=&b') if __name__ == '__main__': unittest.main()
bsd-3-clause
ryfeus/lambda-packs
Tensorflow_Pandas_Numpy/source3.6/numpy/polynomial/__init__.py
11
1150
""" A sub-package for efficiently dealing with polynomials. Within the documentation for this sub-package, a "finite power series," i.e., a polynomial (also referred to simply as a "series") is represented by a 1-D numpy array of the polynomial's coefficients, ordered from lowest order term to highest. For example, array([1,2,3]) represents ``P_0 + 2*P_1 + 3*P_2``, where P_n is the n-th order basis polynomial applicable to the specific module in question, e.g., `polynomial` (which "wraps" the "standard" basis) or `chebyshev`. For optimal performance, all operations on polynomials, including evaluation at an argument, are implemented as operations on the coefficients. Additional (module-specific) information can be found in the docstring for the module of interest. """ from __future__ import division, absolute_import, print_function from .polynomial import Polynomial from .chebyshev import Chebyshev from .legendre import Legendre from .hermite import Hermite from .hermite_e import HermiteE from .laguerre import Laguerre from numpy.testing._private.pytesttester import PytestTester test = PytestTester(__name__) del PytestTester
mit
haukurk/qlivestats
qlivestats/core/config.py
1
1253
import yaml import os class BaseConfigError(Exception): pass class ConfigParsingError(BaseConfigError): pass class ConfigReadError(BaseConfigError): pass class BaseHolder(object): def __init__(self, configfile="/etc/qlivestats.yaml"): self.name = self.__class__.__name__.lower() self.config = None try: if not os.path.isfile(configfile): raise ConfigReadError("Cannot find the configuration file.") self.config = yaml.load(file(configfile)) except yaml.YAMLError, exc: self.config = None if hasattr(exc, 'problem_mark'): mark = exc.problem_mark error_message = "Error position: (%s:%s)" % (mark.line+1, mark.column+1) raise ConfigParsingError(error_message) raise ConfigParsingError("Unknown Error") self.filename = configfile def __repr__(self): return "%s (sections: %s)" % (self.__class__.__name__, len(self.config)) def __str__(self): return yaml.dump(self.config) class YAMLConfig(BaseHolder): def get_filename(self): return self.filename def get_broker(self): return self.config["livestatus"]["broker"]
mit
naritta/numpy
numpy/f2py/cb_rules.py
23
20754
#!/usr/bin/env python """ Build call-back mechanism for f2py2e. Copyright 2000 Pearu Peterson all rights reserved, Pearu Peterson <pearu@ioc.ee> Permission to use, modify, and distribute this software is given under the terms of the NumPy License. NO WARRANTY IS EXPRESSED OR IMPLIED. USE AT YOUR OWN RISK. $Date: 2005/07/20 11:27:58 $ Pearu Peterson """ from __future__ import division, absolute_import, print_function import pprint import sys from . import __version__ from .auxfuncs import * from . import cfuncs f2py_version = __version__.version errmess=sys.stderr.write outmess=sys.stdout.write show=pprint.pprint ################## Rules for callback function ############## cb_routine_rules={ 'cbtypedefs':'typedef #rctype#(*#name#_typedef)(#optargs_td##args_td##strarglens_td##noargs#);', 'body':""" #begintitle# PyObject *#name#_capi = NULL;/*was Py_None*/ PyTupleObject *#name#_args_capi = NULL; int #name#_nofargs = 0; jmp_buf #name#_jmpbuf; /*typedef #rctype#(*#name#_typedef)(#optargs_td##args_td##strarglens_td##noargs#);*/ #static# #rctype# #callbackname# (#optargs##args##strarglens##noargs#) { \tPyTupleObject *capi_arglist = #name#_args_capi; \tPyObject *capi_return = NULL; \tPyObject *capi_tmp = NULL; \tint capi_j,capi_i = 0; \tint capi_longjmp_ok = 1; #decl# #ifdef F2PY_REPORT_ATEXIT f2py_cb_start_clock(); #endif \tCFUNCSMESS(\"cb:Call-back function #name# (maxnofargs=#maxnofargs#(-#nofoptargs#))\\n\"); \tCFUNCSMESSPY(\"cb:#name#_capi=\",#name#_capi); \tif (#name#_capi==NULL) { \t\tcapi_longjmp_ok = 0; \t\t#name#_capi = PyObject_GetAttrString(#modulename#_module,\"#argname#\"); \t} \tif (#name#_capi==NULL) { \t\tPyErr_SetString(#modulename#_error,\"cb: Callback #argname# not defined (as an argument or module #modulename# attribute).\\n\"); \t\tgoto capi_fail; \t} \tif (F2PyCapsule_Check(#name#_capi)) { \t#name#_typedef #name#_cptr; \t#name#_cptr = F2PyCapsule_AsVoidPtr(#name#_capi); \t#returncptr#(*#name#_cptr)(#optargs_nm##args_nm##strarglens_nm#); \t#return# \t} \tif (capi_arglist==NULL) { \t\tcapi_longjmp_ok = 0; \t\tcapi_tmp = PyObject_GetAttrString(#modulename#_module,\"#argname#_extra_args\"); \t\tif (capi_tmp) { \t\t\tcapi_arglist = (PyTupleObject *)PySequence_Tuple(capi_tmp); \t\t\tif (capi_arglist==NULL) { \t\t\t\tPyErr_SetString(#modulename#_error,\"Failed to convert #modulename#.#argname#_extra_args to tuple.\\n\"); \t\t\t\tgoto capi_fail; \t\t\t} \t\t} else { \t\t\tPyErr_Clear(); \t\t\tcapi_arglist = (PyTupleObject *)Py_BuildValue(\"()\"); \t\t} \t} \tif (capi_arglist == NULL) { \t\tPyErr_SetString(#modulename#_error,\"Callback #argname# argument list is not set.\\n\"); \t\tgoto capi_fail; \t} #setdims# #pyobjfrom# \tCFUNCSMESSPY(\"cb:capi_arglist=\",capi_arglist); \tCFUNCSMESS(\"cb:Call-back calling Python function #argname#.\\n\"); #ifdef F2PY_REPORT_ATEXIT f2py_cb_start_call_clock(); #endif \tcapi_return = PyObject_CallObject(#name#_capi,(PyObject *)capi_arglist); #ifdef F2PY_REPORT_ATEXIT f2py_cb_stop_call_clock(); #endif \tCFUNCSMESSPY(\"cb:capi_return=\",capi_return); \tif (capi_return == NULL) { \t\tfprintf(stderr,\"capi_return is NULL\\n\"); \t\tgoto capi_fail; \t} \tif (capi_return == Py_None) { \t\tPy_DECREF(capi_return); \t\tcapi_return = Py_BuildValue(\"()\"); \t} \telse if (!PyTuple_Check(capi_return)) { \t\tcapi_return = Py_BuildValue(\"(N)\",capi_return); \t} \tcapi_j = PyTuple_Size(capi_return); \tcapi_i = 0; #frompyobj# \tCFUNCSMESS(\"cb:#name#:successful\\n\"); \tPy_DECREF(capi_return); #ifdef F2PY_REPORT_ATEXIT f2py_cb_stop_clock(); #endif \tgoto capi_return_pt; capi_fail: \tfprintf(stderr,\"Call-back #name# failed.\\n\"); \tPy_XDECREF(capi_return); \tif (capi_longjmp_ok) \t\tlongjmp(#name#_jmpbuf,-1); capi_return_pt: \t; #return# } #endtitle# """, 'need':['setjmp.h', 'CFUNCSMESS'], 'maxnofargs':'#maxnofargs#', 'nofoptargs':'#nofoptargs#', 'docstr':"""\ \tdef #argname#(#docsignature#): return #docreturn#\\n\\ #docstrsigns#""", 'latexdocstr':""" {{}\\verb@def #argname#(#latexdocsignature#): return #docreturn#@{}} #routnote# #latexdocstrsigns#""", 'docstrshort':'def #argname#(#docsignature#): return #docreturn#' } cb_rout_rules=[ {# Init 'separatorsfor': {'decl': '\n', 'args': ',', 'optargs': '', 'pyobjfrom': '\n', 'freemem': '\n', 'args_td': ',', 'optargs_td': '', 'args_nm': ',', 'optargs_nm': '', 'frompyobj': '\n', 'setdims': '\n', 'docstrsigns': '\\n"\n"', 'latexdocstrsigns': '\n', 'latexdocstrreq': '\n', 'latexdocstropt': '\n', 'latexdocstrout': '\n', 'latexdocstrcbs': '\n', }, 'decl': '/*decl*/', 'pyobjfrom': '/*pyobjfrom*/', 'frompyobj': '/*frompyobj*/', 'args': [], 'optargs': '', 'return': '', 'strarglens': '', 'freemem': '/*freemem*/', 'args_td': [], 'optargs_td': '', 'strarglens_td': '', 'args_nm': [], 'optargs_nm': '', 'strarglens_nm': '', 'noargs': '', 'setdims': '/*setdims*/', 'docstrsigns': '', 'latexdocstrsigns': '', 'docstrreq': '\tRequired arguments:', 'docstropt': '\tOptional arguments:', 'docstrout': '\tReturn objects:', 'docstrcbs': '\tCall-back functions:', 'docreturn': '', 'docsign': '', 'docsignopt': '', 'latexdocstrreq': '\\noindent Required arguments:', 'latexdocstropt': '\\noindent Optional arguments:', 'latexdocstrout': '\\noindent Return objects:', 'latexdocstrcbs': '\\noindent Call-back functions:', 'routnote': {hasnote:'--- #note#',l_not(hasnote):''}, }, { # Function 'decl':'\t#ctype# return_value;', 'frompyobj':[{debugcapi:'\tCFUNCSMESS("cb:Getting return_value->");'}, '\tif (capi_j>capi_i)\n\t\tGETSCALARFROMPYTUPLE(capi_return,capi_i++,&return_value,#ctype#,"#ctype#_from_pyobj failed in converting return_value of call-back function #name# to C #ctype#\\n");', {debugcapi:'\tfprintf(stderr,"#showvalueformat#.\\n",return_value);'} ], 'need':['#ctype#_from_pyobj', {debugcapi:'CFUNCSMESS'}, 'GETSCALARFROMPYTUPLE'], 'return':'\treturn return_value;', '_check':l_and(isfunction, l_not(isstringfunction), l_not(iscomplexfunction)) }, {# String function 'pyobjfrom':{debugcapi:'\tfprintf(stderr,"debug-capi:cb:#name#:%d:\\n",return_value_len);'}, 'args':'#ctype# return_value,int return_value_len', 'args_nm':'return_value,&return_value_len', 'args_td':'#ctype# ,int', 'frompyobj':[{debugcapi:'\tCFUNCSMESS("cb:Getting return_value->\\"");'}, """\tif (capi_j>capi_i) \t\tGETSTRFROMPYTUPLE(capi_return,capi_i++,return_value,return_value_len);""", {debugcapi:'\tfprintf(stderr,"#showvalueformat#\\".\\n",return_value);'} ], 'need':['#ctype#_from_pyobj', {debugcapi:'CFUNCSMESS'}, 'string.h', 'GETSTRFROMPYTUPLE'], 'return':'return;', '_check':isstringfunction }, {# Complex function 'optargs':""" #ifndef F2PY_CB_RETURNCOMPLEX #ctype# *return_value #endif """, 'optargs_nm':""" #ifndef F2PY_CB_RETURNCOMPLEX return_value #endif """, 'optargs_td':""" #ifndef F2PY_CB_RETURNCOMPLEX #ctype# * #endif """, 'decl':""" #ifdef F2PY_CB_RETURNCOMPLEX \t#ctype# return_value; #endif """, 'frompyobj':[{debugcapi:'\tCFUNCSMESS("cb:Getting return_value->");'}, """\ \tif (capi_j>capi_i) #ifdef F2PY_CB_RETURNCOMPLEX \t\tGETSCALARFROMPYTUPLE(capi_return,capi_i++,&return_value,#ctype#,\"#ctype#_from_pyobj failed in converting return_value of call-back function #name# to C #ctype#\\n\"); #else \t\tGETSCALARFROMPYTUPLE(capi_return,capi_i++,return_value,#ctype#,\"#ctype#_from_pyobj failed in converting return_value of call-back function #name# to C #ctype#\\n\"); #endif """, {debugcapi:""" #ifdef F2PY_CB_RETURNCOMPLEX \tfprintf(stderr,\"#showvalueformat#.\\n\",(return_value).r,(return_value).i); #else \tfprintf(stderr,\"#showvalueformat#.\\n\",(*return_value).r,(*return_value).i); #endif """} ], 'return':""" #ifdef F2PY_CB_RETURNCOMPLEX \treturn return_value; #else \treturn; #endif """, 'need':['#ctype#_from_pyobj', {debugcapi:'CFUNCSMESS'}, 'string.h', 'GETSCALARFROMPYTUPLE', '#ctype#'], '_check':iscomplexfunction }, {'docstrout':'\t\t#pydocsignout#', 'latexdocstrout':['\\item[]{{}\\verb@#pydocsignout#@{}}', {hasnote:'--- #note#'}], 'docreturn':'#rname#,', '_check':isfunction}, {'_check':issubroutine,'return':'return;'} ] cb_arg_rules=[ { # Doc 'docstropt':{l_and(isoptional, isintent_nothide):'\t\t#pydocsign#'}, 'docstrreq':{l_and(isrequired, isintent_nothide):'\t\t#pydocsign#'}, 'docstrout':{isintent_out:'\t\t#pydocsignout#'}, 'latexdocstropt':{l_and(isoptional, isintent_nothide):['\\item[]{{}\\verb@#pydocsign#@{}}', {hasnote:'--- #note#'}]}, 'latexdocstrreq':{l_and(isrequired, isintent_nothide):['\\item[]{{}\\verb@#pydocsign#@{}}', {hasnote:'--- #note#'}]}, 'latexdocstrout':{isintent_out:['\\item[]{{}\\verb@#pydocsignout#@{}}', {l_and(hasnote, isintent_hide):'--- #note#', l_and(hasnote, isintent_nothide):'--- See above.'}]}, 'docsign':{l_and(isrequired, isintent_nothide):'#varname#,'}, 'docsignopt':{l_and(isoptional, isintent_nothide):'#varname#,'}, 'depend':'' }, { 'args': { l_and (isscalar, isintent_c):'#ctype# #varname_i#', l_and (isscalar, l_not(isintent_c)):'#ctype# *#varname_i#_cb_capi', isarray:'#ctype# *#varname_i#', isstring:'#ctype# #varname_i#' }, 'args_nm': { l_and (isscalar, isintent_c):'#varname_i#', l_and (isscalar, l_not(isintent_c)):'#varname_i#_cb_capi', isarray:'#varname_i#', isstring:'#varname_i#' }, 'args_td': { l_and (isscalar, isintent_c):'#ctype#', l_and (isscalar, l_not(isintent_c)):'#ctype# *', isarray:'#ctype# *', isstring:'#ctype#' }, 'strarglens': {isstring:',int #varname_i#_cb_len'}, # untested with multiple args 'strarglens_td': {isstring:',int'}, # untested with multiple args 'strarglens_nm': {isstring:',#varname_i#_cb_len'}, # untested with multiple args }, { # Scalars 'decl':{l_not(isintent_c):'\t#ctype# #varname_i#=(*#varname_i#_cb_capi);'}, 'error': {l_and(isintent_c, isintent_out, throw_error('intent(c,out) is forbidden for callback scalar arguments')):\ ''}, 'frompyobj':[{debugcapi:'\tCFUNCSMESS("cb:Getting #varname#->");'}, {isintent_out:'\tif (capi_j>capi_i)\n\t\tGETSCALARFROMPYTUPLE(capi_return,capi_i++,#varname_i#_cb_capi,#ctype#,"#ctype#_from_pyobj failed in converting argument #varname# of call-back function #name# to C #ctype#\\n");'}, {l_and(debugcapi, l_and(l_not(iscomplex), isintent_c)):'\tfprintf(stderr,"#showvalueformat#.\\n",#varname_i#);'}, {l_and(debugcapi, l_and(l_not(iscomplex), l_not(isintent_c))):'\tfprintf(stderr,"#showvalueformat#.\\n",*#varname_i#_cb_capi);'}, {l_and(debugcapi, l_and(iscomplex, isintent_c)):'\tfprintf(stderr,"#showvalueformat#.\\n",(#varname_i#).r,(#varname_i#).i);'}, {l_and(debugcapi, l_and(iscomplex, l_not(isintent_c))):'\tfprintf(stderr,"#showvalueformat#.\\n",(*#varname_i#_cb_capi).r,(*#varname_i#_cb_capi).i);'}, ], 'need':[{isintent_out:['#ctype#_from_pyobj', 'GETSCALARFROMPYTUPLE']}, {debugcapi:'CFUNCSMESS'}], '_check':isscalar }, { 'pyobjfrom':[{isintent_in:"""\ \tif (#name#_nofargs>capi_i) \t\tif (PyTuple_SetItem((PyObject *)capi_arglist,capi_i++,pyobj_from_#ctype#1(#varname_i#))) \t\t\tgoto capi_fail;"""}, {isintent_inout:"""\ \tif (#name#_nofargs>capi_i) \t\tif (PyTuple_SetItem((PyObject *)capi_arglist,capi_i++,pyarr_from_p_#ctype#1(#varname_i#_cb_capi))) \t\t\tgoto capi_fail;"""}], 'need':[{isintent_in:'pyobj_from_#ctype#1'}, {isintent_inout:'pyarr_from_p_#ctype#1'}, {iscomplex:'#ctype#'}], '_check':l_and(isscalar, isintent_nothide), '_optional':'' }, {# String 'frompyobj':[{debugcapi:'\tCFUNCSMESS("cb:Getting #varname#->\\"");'}, """\tif (capi_j>capi_i) \t\tGETSTRFROMPYTUPLE(capi_return,capi_i++,#varname_i#,#varname_i#_cb_len);""", {debugcapi:'\tfprintf(stderr,"#showvalueformat#\\":%d:.\\n",#varname_i#,#varname_i#_cb_len);'}, ], 'need':['#ctype#', 'GETSTRFROMPYTUPLE', {debugcapi:'CFUNCSMESS'}, 'string.h'], '_check':l_and(isstring, isintent_out) }, { 'pyobjfrom':[{debugcapi:'\tfprintf(stderr,"debug-capi:cb:#varname#=\\"#showvalueformat#\\":%d:\\n",#varname_i#,#varname_i#_cb_len);'}, {isintent_in:"""\ \tif (#name#_nofargs>capi_i) \t\tif (PyTuple_SetItem((PyObject *)capi_arglist,capi_i++,pyobj_from_#ctype#1size(#varname_i#,#varname_i#_cb_len))) \t\t\tgoto capi_fail;"""}, {isintent_inout:"""\ \tif (#name#_nofargs>capi_i) { \t\tint #varname_i#_cb_dims[] = {#varname_i#_cb_len}; \t\tif (PyTuple_SetItem((PyObject *)capi_arglist,capi_i++,pyarr_from_p_#ctype#1(#varname_i#,#varname_i#_cb_dims))) \t\t\tgoto capi_fail; \t}"""}], 'need':[{isintent_in:'pyobj_from_#ctype#1size'}, {isintent_inout:'pyarr_from_p_#ctype#1'}], '_check':l_and(isstring, isintent_nothide), '_optional':'' }, # Array ... { 'decl':'\tnpy_intp #varname_i#_Dims[#rank#] = {#rank*[-1]#};', 'setdims':'\t#cbsetdims#;', '_check':isarray, '_depend':'' }, { 'pyobjfrom': [{debugcapi:'\tfprintf(stderr,"debug-capi:cb:#varname#\\n");'}, {isintent_c: """\ \tif (#name#_nofargs>capi_i) { \t\tPyArrayObject *tmp_arr = (PyArrayObject *)PyArray_New(&PyArray_Type,#rank#,#varname_i#_Dims,#atype#,NULL,(char*)#varname_i#,0,NPY_ARRAY_CARRAY,NULL); /*XXX: Hmm, what will destroy this array??? */ """, l_not(isintent_c): """\ \tif (#name#_nofargs>capi_i) { \t\tPyArrayObject *tmp_arr = (PyArrayObject *)PyArray_New(&PyArray_Type,#rank#,#varname_i#_Dims,#atype#,NULL,(char*)#varname_i#,0,NPY_ARRAY_FARRAY,NULL); /*XXX: Hmm, what will destroy this array??? */ """, }, """ \t\tif (tmp_arr==NULL) \t\t\tgoto capi_fail; \t\tif (PyTuple_SetItem((PyObject *)capi_arglist,capi_i++,(PyObject *)tmp_arr)) \t\t\tgoto capi_fail; }"""], '_check': l_and(isarray, isintent_nothide, l_or(isintent_in, isintent_inout)), '_optional': '', }, { 'frompyobj':[{debugcapi:'\tCFUNCSMESS("cb:Getting #varname#->");'}, """\tif (capi_j>capi_i) { \t\tPyArrayObject *rv_cb_arr = NULL; \t\tif ((capi_tmp = PyTuple_GetItem(capi_return,capi_i++))==NULL) goto capi_fail; \t\trv_cb_arr = array_from_pyobj(#atype#,#varname_i#_Dims,#rank#,F2PY_INTENT_IN""", {isintent_c:'|F2PY_INTENT_C'}, """,capi_tmp); \t\tif (rv_cb_arr == NULL) { \t\t\tfprintf(stderr,\"rv_cb_arr is NULL\\n\"); \t\t\tgoto capi_fail; \t\t} \t\tMEMCOPY(#varname_i#,PyArray_DATA(rv_cb_arr),PyArray_NBYTES(rv_cb_arr)); \t\tif (capi_tmp != (PyObject *)rv_cb_arr) { \t\t\tPy_DECREF(rv_cb_arr); \t\t} \t}""", {debugcapi:'\tfprintf(stderr,"<-.\\n");'}, ], 'need':['MEMCOPY', {iscomplexarray:'#ctype#'}], '_check':l_and(isarray, isintent_out) }, { 'docreturn':'#varname#,', '_check':isintent_out } ] ################## Build call-back module ############# cb_map={} def buildcallbacks(m): global cb_map cb_map[m['name']]=[] for bi in m['body']: if bi['block']=='interface': for b in bi['body']: if b: buildcallback(b, m['name']) else: errmess('warning: empty body for %s\n' % (m['name'])) def buildcallback(rout, um): global cb_map from . import capi_maps outmess('\tConstructing call-back function "cb_%s_in_%s"\n'%(rout['name'], um)) args, depargs=getargs(rout) capi_maps.depargs=depargs var=rout['vars'] vrd=capi_maps.cb_routsign2map(rout, um) rd=dictappend({}, vrd) cb_map[um].append([rout['name'], rd['name']]) for r in cb_rout_rules: if ('_check' in r and r['_check'](rout)) or ('_check' not in r): ar=applyrules(r, vrd, rout) rd=dictappend(rd, ar) savevrd={} for i, a in enumerate(args): vrd=capi_maps.cb_sign2map(a, var[a], index=i) savevrd[a]=vrd for r in cb_arg_rules: if '_depend' in r: continue if '_optional' in r and isoptional(var[a]): continue if ('_check' in r and r['_check'](var[a])) or ('_check' not in r): ar=applyrules(r, vrd, var[a]) rd=dictappend(rd, ar) if '_break' in r: break for a in args: vrd=savevrd[a] for r in cb_arg_rules: if '_depend' in r: continue if ('_optional' not in r) or ('_optional' in r and isrequired(var[a])): continue if ('_check' in r and r['_check'](var[a])) or ('_check' not in r): ar=applyrules(r, vrd, var[a]) rd=dictappend(rd, ar) if '_break' in r: break for a in depargs: vrd=savevrd[a] for r in cb_arg_rules: if '_depend' not in r: continue if '_optional' in r: continue if ('_check' in r and r['_check'](var[a])) or ('_check' not in r): ar=applyrules(r, vrd, var[a]) rd=dictappend(rd, ar) if '_break' in r: break if 'args' in rd and 'optargs' in rd: if isinstance(rd['optargs'], list): rd['optargs']=rd['optargs']+[""" #ifndef F2PY_CB_RETURNCOMPLEX , #endif """] rd['optargs_nm']=rd['optargs_nm']+[""" #ifndef F2PY_CB_RETURNCOMPLEX , #endif """] rd['optargs_td']=rd['optargs_td']+[""" #ifndef F2PY_CB_RETURNCOMPLEX , #endif """] if isinstance(rd['docreturn'], list): rd['docreturn']=stripcomma(replace('#docreturn#', {'docreturn':rd['docreturn']})) optargs=stripcomma(replace('#docsignopt#', {'docsignopt':rd['docsignopt']} )) if optargs=='': rd['docsignature']=stripcomma(replace('#docsign#', {'docsign':rd['docsign']})) else: rd['docsignature']=replace('#docsign#[#docsignopt#]', {'docsign': rd['docsign'], 'docsignopt': optargs, }) rd['latexdocsignature']=rd['docsignature'].replace('_', '\\_') rd['latexdocsignature']=rd['latexdocsignature'].replace(',', ', ') rd['docstrsigns']=[] rd['latexdocstrsigns']=[] for k in ['docstrreq', 'docstropt', 'docstrout', 'docstrcbs']: if k in rd and isinstance(rd[k], list): rd['docstrsigns']=rd['docstrsigns']+rd[k] k='latex'+k if k in rd and isinstance(rd[k], list): rd['latexdocstrsigns']=rd['latexdocstrsigns']+rd[k][0:1]+\ ['\\begin{description}']+rd[k][1:]+\ ['\\end{description}'] if 'args' not in rd: rd['args']='' rd['args_td']='' rd['args_nm']='' if not (rd.get('args') or rd.get('optargs') or rd.get('strarglens')): rd['noargs'] = 'void' ar=applyrules(cb_routine_rules, rd) cfuncs.callbacks[rd['name']]=ar['body'] if isinstance(ar['need'], str): ar['need']=[ar['need']] if 'need' in rd: for t in cfuncs.typedefs.keys(): if t in rd['need']: ar['need'].append(t) cfuncs.typedefs_generated[rd['name']+'_typedef'] = ar['cbtypedefs'] ar['need'].append(rd['name']+'_typedef') cfuncs.needs[rd['name']]=ar['need'] capi_maps.lcb2_map[rd['name']]={'maxnofargs':ar['maxnofargs'], 'nofoptargs':ar['nofoptargs'], 'docstr':ar['docstr'], 'latexdocstr':ar['latexdocstr'], 'argname':rd['argname'] } outmess('\t %s\n'%(ar['docstrshort'])) #print ar['body'] return ################## Build call-back function #############
bsd-3-clause
RaspberryCoin/RaspberryCoinCore
qa/rpc-tests/forknotify.py
15
2204
#!/usr/bin/env python3 # Copyright (c) 2014-2016 The Bitcoin Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. # # Test -alertnotify # from test_framework.test_framework import BitcoinTestFramework from test_framework.util import * class ForkNotifyTest(BitcoinTestFramework): def __init__(self): super().__init__() self.num_nodes = 2 self.setup_clean_chain = False alert_filename = None # Set by setup_network def setup_network(self): self.nodes = [] self.alert_filename = os.path.join(self.options.tmpdir, "alert.txt") with open(self.alert_filename, 'w') as f: pass # Just open then close to create zero-length file self.nodes.append(start_node(0, self.options.tmpdir, ["-blockversion=2", "-alertnotify=echo %s >> \"" + self.alert_filename + "\""])) # Node1 mines block.version=211 blocks self.nodes.append(start_node(1, self.options.tmpdir, ["-blockversion=211"])) connect_nodes(self.nodes[1], 0) self.is_network_split = False self.sync_all() def run_test(self): # Mine 51 up-version blocks self.nodes[1].generate(51) self.sync_all() # -alertnotify should trigger on the 51'st, # but mine and sync another to give # -alertnotify time to write self.nodes[1].generate(1) self.sync_all() with open(self.alert_filename, 'r') as f: alert_text = f.read() if len(alert_text) == 0: raise AssertionError("-alertnotify did not warn of up-version blocks") # Mine more up-version blocks, should not get more alerts: self.nodes[1].generate(1) self.sync_all() self.nodes[1].generate(1) self.sync_all() with open(self.alert_filename, 'r') as f: alert_text2 = f.read() if alert_text != alert_text2: raise AssertionError("-alertnotify excessive warning of up-version blocks") if __name__ == '__main__': ForkNotifyTest().main()
mit
dsoft20/romrenamer
rm.py
1
3004
#Replace game name with the rom name, github: https://github.com/dsoft20 #This script takes the gamelist.xml generated by the Universal XML-Scraper #and replace the scraped name field (<name>) with the rom name # <path>./Game name (Europe).zip</path> # <name>Game name</name> # TO # <path>./Game name (Europe).zip</path> # <name>Game name (Europe)</name> # #WINDOWS: place the file on \\raspberry\configs\all\emulationstation\gamelists\ # run the script via python rm.py # #RETROPIE: place the file in /home/pi/.emulationstation/gamelists/ # run the script via python rm.py # import sys, os, time, io def parse(directory): romFileName = "" gameName = "" tempName = "" entries = [] #toreplace contains string that have to be removed from the rom file name #please add unlisted rom extensions (eg: .gbc) toreplace = ["./",".zip","<path>","</path>"] index = 0 if os.path.exists(".//"+directory+"gamelist.xml") == False: #check if gamelist.xml exists, if not return print("FILE DOES NOT EXISTS") return print("READING gamelist.xml") #read gamelist.xml in this directory file = io.open(".//"+directory+"gamelist.xml", "r",encoding='utf-8') entries = file.read().splitlines() file.close() #make a backup file = io.open(".//"+directory+"gamelist.xml.bck","w",encoding='utf-8') while index<len(entries): file.write(entries[index]+"\n") index+=1 file.close() index = 0 #check the file to its end and do stuff while index<len(entries): if "<path>" in entries[index]: tempName = entries[index] print("Found:" + tempName) #remove spaces before and after filename tempName = tempName.rstrip() tempName = tempName.lstrip() #remove strings as per toremove array for i in range(0,len(toreplace)): tempName = tempName.replace(toreplace[i],"") tempName = "\t\t<name>"+tempName+"</name>" entries[index+1] = tempName print("Name edited: " + tempName) index = index+1 index = 0 #write the updated gamelist.xml to disk file = io.open(".//"+directory+"gamelist.xml", "w",encoding='utf-8') print("writing updated gamelist.xml") while index<len(entries): file.write(entries[index]+"\n") index+=1 file.close() return def main(): #systems list, please add unlisted systems systems = ["amstradcpc","atari2600","atari7800","atarilynx","coleco","fba","fbs","fds","gamegear","gb","gba","gbc","genesis","mame-libretro","mame-mame4all","mastersystem","megadrive","msx","n64","neogeo","nes","ngp","ngpc","pcengine","psx","sega32x","segacd","sg-1000","snes","vectrex","wonderswan","wonderswancolor","zxspectrum"] if os.path.exists(".//gamelist.xml"): #if gamelist.xml exists on current directory then parse & edit only that, if not search for systems directory and parse their gamelist.xml print("Parsing local gamelist.xml") parse(".//") else: for i in range(0,len(systems)): print("Parsing "+ systems[i]) parse(".//"+systems[i]+"//") print("DONE!") return if __name__ == "__main__": main()
mit
jcpowermac/ansible
test/runner/lib/powershell_import_analysis.py
102
2276
"""Analyze powershell import statements.""" from __future__ import absolute_import, print_function import os import re from lib.util import ( display, ) def get_powershell_module_utils_imports(powershell_targets): """Return a dictionary of module_utils names mapped to sets of powershell file paths. :type powershell_targets: list[TestTarget] :rtype: dict[str, set[str]] """ module_utils = enumerate_module_utils() imports_by_target_path = {} for target in powershell_targets: imports_by_target_path[target.path] = extract_powershell_module_utils_imports(target.path, module_utils) imports = dict([(module_util, set()) for module_util in module_utils]) for target_path in imports_by_target_path: for module_util in imports_by_target_path[target_path]: imports[module_util].add(target_path) for module_util in sorted(imports): if not imports[module_util]: display.warning('No imports found which use the "%s" module_util.' % module_util) return imports def enumerate_module_utils(): """Return a list of available module_utils imports. :rtype: set[str] """ return set(os.path.splitext(p)[0] for p in os.listdir('lib/ansible/module_utils/powershell') if os.path.splitext(p)[1] == '.psm1') def extract_powershell_module_utils_imports(path, module_utils): """Return a list of module_utils imports found in the specified source file. :type path: str :type module_utils: set[str] :rtype: set[str] """ imports = set() with open(path, 'r') as module_fd: code = module_fd.read() if '# POWERSHELL_COMMON' in code: imports.add('Ansible.ModuleUtils.Legacy') lines = code.splitlines() line_number = 0 for line in lines: line_number += 1 match = re.search(r'(?i)^#\s*requires\s+-module(?:s?)\s*(Ansible\.ModuleUtils\..+)', line) if not match: continue import_name = match.group(1) if import_name in module_utils: imports.add(import_name) else: display.warning('%s:%d Invalid module_utils import: %s' % (path, line_number, import_name)) return imports
gpl-3.0
resmo/ansible
test/units/modules/network/fortios/test_fortios_user_security_exempt_list.py
21
7891
# Copyright 2019 Fortinet, Inc. # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see <https://www.gnu.org/licenses/>. # Make coding more python3-ish from __future__ import (absolute_import, division, print_function) __metaclass__ = type import os import json import pytest from mock import ANY from ansible.module_utils.network.fortios.fortios import FortiOSHandler try: from ansible.modules.network.fortios import fortios_user_security_exempt_list except ImportError: pytest.skip("Could not load required modules for testing", allow_module_level=True) @pytest.fixture(autouse=True) def connection_mock(mocker): connection_class_mock = mocker.patch('ansible.modules.network.fortios.fortios_user_security_exempt_list.Connection') return connection_class_mock fos_instance = FortiOSHandler(connection_mock) def test_user_security_exempt_list_creation(mocker): schema_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.schema') set_method_result = {'status': 'success', 'http_method': 'POST', 'http_status': 200} set_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.set', return_value=set_method_result) input_data = { 'username': 'admin', 'state': 'present', 'user_security_exempt_list': { 'description': 'test_value_3', 'name': 'default_name_4', }, 'vdom': 'root'} is_error, changed, response = fortios_user_security_exempt_list.fortios_user(input_data, fos_instance) expected_data = { 'description': 'test_value_3', 'name': 'default_name_4', } set_method_mock.assert_called_with('user', 'security-exempt-list', data=expected_data, vdom='root') schema_method_mock.assert_not_called() assert not is_error assert changed assert response['status'] == 'success' assert response['http_status'] == 200 def test_user_security_exempt_list_creation_fails(mocker): schema_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.schema') set_method_result = {'status': 'error', 'http_method': 'POST', 'http_status': 500} set_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.set', return_value=set_method_result) input_data = { 'username': 'admin', 'state': 'present', 'user_security_exempt_list': { 'description': 'test_value_3', 'name': 'default_name_4', }, 'vdom': 'root'} is_error, changed, response = fortios_user_security_exempt_list.fortios_user(input_data, fos_instance) expected_data = { 'description': 'test_value_3', 'name': 'default_name_4', } set_method_mock.assert_called_with('user', 'security-exempt-list', data=expected_data, vdom='root') schema_method_mock.assert_not_called() assert is_error assert not changed assert response['status'] == 'error' assert response['http_status'] == 500 def test_user_security_exempt_list_removal(mocker): schema_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.schema') delete_method_result = {'status': 'success', 'http_method': 'POST', 'http_status': 200} delete_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.delete', return_value=delete_method_result) input_data = { 'username': 'admin', 'state': 'absent', 'user_security_exempt_list': { 'description': 'test_value_3', 'name': 'default_name_4', }, 'vdom': 'root'} is_error, changed, response = fortios_user_security_exempt_list.fortios_user(input_data, fos_instance) delete_method_mock.assert_called_with('user', 'security-exempt-list', mkey=ANY, vdom='root') schema_method_mock.assert_not_called() assert not is_error assert changed assert response['status'] == 'success' assert response['http_status'] == 200 def test_user_security_exempt_list_deletion_fails(mocker): schema_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.schema') delete_method_result = {'status': 'error', 'http_method': 'POST', 'http_status': 500} delete_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.delete', return_value=delete_method_result) input_data = { 'username': 'admin', 'state': 'absent', 'user_security_exempt_list': { 'description': 'test_value_3', 'name': 'default_name_4', }, 'vdom': 'root'} is_error, changed, response = fortios_user_security_exempt_list.fortios_user(input_data, fos_instance) delete_method_mock.assert_called_with('user', 'security-exempt-list', mkey=ANY, vdom='root') schema_method_mock.assert_not_called() assert is_error assert not changed assert response['status'] == 'error' assert response['http_status'] == 500 def test_user_security_exempt_list_idempotent(mocker): schema_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.schema') set_method_result = {'status': 'error', 'http_method': 'DELETE', 'http_status': 404} set_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.set', return_value=set_method_result) input_data = { 'username': 'admin', 'state': 'present', 'user_security_exempt_list': { 'description': 'test_value_3', 'name': 'default_name_4', }, 'vdom': 'root'} is_error, changed, response = fortios_user_security_exempt_list.fortios_user(input_data, fos_instance) expected_data = { 'description': 'test_value_3', 'name': 'default_name_4', } set_method_mock.assert_called_with('user', 'security-exempt-list', data=expected_data, vdom='root') schema_method_mock.assert_not_called() assert not is_error assert not changed assert response['status'] == 'error' assert response['http_status'] == 404 def test_user_security_exempt_list_filter_foreign_attributes(mocker): schema_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.schema') set_method_result = {'status': 'success', 'http_method': 'POST', 'http_status': 200} set_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.set', return_value=set_method_result) input_data = { 'username': 'admin', 'state': 'present', 'user_security_exempt_list': { 'random_attribute_not_valid': 'tag', 'description': 'test_value_3', 'name': 'default_name_4', }, 'vdom': 'root'} is_error, changed, response = fortios_user_security_exempt_list.fortios_user(input_data, fos_instance) expected_data = { 'description': 'test_value_3', 'name': 'default_name_4', } set_method_mock.assert_called_with('user', 'security-exempt-list', data=expected_data, vdom='root') schema_method_mock.assert_not_called() assert not is_error assert changed assert response['status'] == 'success' assert response['http_status'] == 200
gpl-3.0
kaniini/aerospike-py
aerospike_py/message.py
1
10055
import asyncio from collections import namedtuple import struct from aerospike_py.connection import Connection, ASConnectionError from aerospike_py.result_code import ASMSGProtocolException class ASIOException(Exception): pass class InvalidMessageException(Exception): pass # --- all messages --- AerospikeOuterHeader = namedtuple('AerospikeOuterHeader', ['version', 'msg_type', 'sz']) AerospikeOuterHeaderStruct = struct.Struct('>Q') def pack_outer_header(header: AerospikeOuterHeader) -> bytes: size = (header.sz) | ((header.version & 0xFF) << 56) | ((header.msg_type & 0xFF) << 48) return AerospikeOuterHeaderStruct.pack(size) def unpack_outer_header(data: bytes) -> AerospikeOuterHeader: header_uint64 = AerospikeOuterHeaderStruct.unpack(data)[0] size = header_uint64 & 0xFFFFFFFFFFFF msg_type = data[1] msg_proto = data[0] return AerospikeOuterHeader(msg_proto, msg_type, size) def pack_message(envelope: bytes, msg_type: int) -> bytes: size = len(envelope) return pack_outer_header(AerospikeOuterHeader(2, msg_type, size)) + envelope def unpack_message(envelope: bytes, whole_message: bool = False) -> (AerospikeOuterHeader, bytes): if len(envelope) < 8: raise InvalidMessageException('message length %d is too short' % len(envelope)) header = unpack_outer_header(envelope[0:8]) if header.version != 2: raise InvalidMessageException('protocol version %d is not supported' % header.version) if header.msg_type not in (1, 3): raise InvalidMessageException('message type %d is not supported' % header.msg_type) if whole_message and header.sz != len(envelope[8:]): raise InvalidMessageException('message payload is less than the specified length (%d < %d).' % (len(envelope[8:]), header.sz)) return (header, envelope[8:]) # --- AS_MSG (type 3) messages --- AS_INFO1_READ = (1 << 0) AS_INFO1_GET_ALL = (1 << 1) AS_INFO1_BATCH = (1 << 3) AS_INFO1_NOBINDATA = (1 << 5) AS_INFO1_CONSISTENCY_ALL = (1 << 6) AS_INFO2_WRITE = (1 << 0) AS_INFO2_DELETE = (1 << 1) AS_INFO2_GENERATION = (1 << 2) AS_INFO2_GENERATION_GT = (1 << 3) AS_INFO2_GENERATION_DUP = (1 << 4) AS_INFO2_CREATE_ONLY = (1 << 5) AS_INFO2_CREATE_BIN_ONLY = (1 << 6) AS_INFO3_LAST = (1 << 0) AS_INFO3_COMMIT_MASTER = (1 << 1) AS_INFO3_UPDATE_ONLY = (1 << 3) AS_INFO3_CREATE_OR_REPLACE = (1 << 4) AS_INFO3_REPLACE_ONLY = (1 << 5) AerospikeASMSGHeader = namedtuple('AerospikeASMSGHeader', [ 'header_sz', 'info1', 'info2', 'info3', 'result_code', 'generation', 'record_ttl', 'transaction_ttl', 'n_fields', 'n_ops' ]) AerospikeASMSGHeaderStruct = struct.Struct('>BBBBxBIIIHH') def pack_asmsg_header(info1: int, info2: int, info3: int, generation: int, record_ttl: int, transaction_ttl: int, n_fields: int, n_ops: int) -> bytes: header = AerospikeASMSGHeader(22, info1, info2, info3, 0, generation, record_ttl, transaction_ttl, n_fields, n_ops) return AerospikeASMSGHeaderStruct.pack(*header) def unpack_asmsg_header(header: bytes) -> AerospikeASMSGHeader: if len(header) < 22: raise InvalidMessageException('AS_MSG header is not 22 bytes long') return AerospikeASMSGHeader(*AerospikeASMSGHeaderStruct.unpack(header)) # Aerospike fields are actually locators, i.e. they describe how to locate data rows/documents/whatever. AerospikeASMSGFieldHeader = namedtuple('AerospikeASMSGFieldHeader', ['size', 'field_type']) AerospikeASMSGFieldHeaderStruct = struct.Struct('>IB') AS_MSG_FIELD_TYPE_NAMESPACE = 0 AS_MSG_FIELD_TYPE_SET = 1 AS_MSG_FIELD_TYPE_KEY = 2 AS_MSG_FIELD_TYPE_BIN = 3 AS_MSG_FIELD_TYPE_DIGEST_RIPE = 4 AS_MSG_FIELD_TYPE_DIGEST_RIPE_ARRAY = 6 AS_MSG_FIELD_TYPE_TRID = 7 AS_MSG_FIELD_TYPE_SCAN_OPTIONS = 8 AS_MSG_PARTICLE_TYPE_NULL = 0 AS_MSG_PARTICLE_TYPE_INTEGER = 1 AS_MSG_PARTICLE_TYPE_DOUBLE = 2 AS_MSG_PARTICLE_TYPE_STRING = 3 AS_MSG_PARTICLE_TYPE_BLOB = 4 AS_MSG_PARTICLE_TYPE_PYTHON_BLOB = 9 AS_MSG_PARTICLE_TYPE_MAP = 19 AS_MSG_PARTICLE_TYPE_LIST = 20 AS_MSG_PARTICLE_TYPE_GEOJSON = 23 _decoders = { AS_MSG_PARTICLE_TYPE_NULL: lambda x: None, AS_MSG_PARTICLE_TYPE_INTEGER: lambda x: struct.unpack('>Q', x[0:8])[0], AS_MSG_PARTICLE_TYPE_DOUBLE: lambda x: struct.unpack('>d', x[0:8])[0], AS_MSG_PARTICLE_TYPE_STRING: lambda x: x.decode('UTF-8').strip('\x00'), AS_MSG_PARTICLE_TYPE_BLOB: lambda x: x, } NoneType = type(None) _encoders = { NoneType: lambda x: (b'', AS_MSG_PARTICLE_TYPE_NULL), int: lambda x: (struct.pack('>Q', x), AS_MSG_PARTICLE_TYPE_INTEGER), float: lambda x: (struct.pack('>d', x), AS_MSG_PARTICLE_TYPE_DOUBLE), str: lambda x: (x.encode('UTF-8') + b'\x00', AS_MSG_PARTICLE_TYPE_STRING), bytes: lambda x: (x, AS_MSG_PARTICLE_TYPE_BLOB), } def encode_payload(payload): encoder = _encoders.get(type(payload), lambda x: (b'', AS_MSG_PARTICLE_TYPE_NULL)) return encoder(payload) def decode_payload(ptype, payload): decoder = _decoders.get(ptype, lambda x: x) return decoder(payload) def pack_asmsg_field(data: bytes, field_type: int) -> bytes: header = AerospikeASMSGFieldHeader(len(data) + 1, field_type) return AerospikeASMSGFieldHeaderStruct.pack(*header) + data def unpack_asmsg_field(data: bytes) -> (AerospikeASMSGFieldHeader, bytes): header = AerospikeASMSGFieldHeader(*AerospikeASMSGFieldHeaderStruct.unpack(data[0:5])) return (header, data[5:]) # Aerospike operations describe what to do to the located record(s). AerospikeASMSGOperationHeader = namedtuple('AerospikeASMSGOperationHeader', [ 'size', 'op', 'bin_data_type', 'bin_version', 'bin_name_length' ]) AerospikeASMSGOperationHeaderStruct = struct.Struct('>IBBBB') AS_MSG_OP_READ = 1 AS_MSG_OP_WRITE = 2 AS_MSG_OP_INCR = 5 AS_MSG_OP_APPEND = 9 AS_MSG_OP_PREPEND = 10 AS_MSG_OP_TOUCH = 11 AS_MSG_OP_MC_INCR = 129 AS_MSG_OP_MC_APPEND = 130 AS_MSG_OP_MC_PREPEND = 131 AS_MSG_OP_MC_TOUCH = 132 def pack_asmsg_operation(op: int, bin_data_type: int, bin_name: str, bin_data: bytes) -> bytes: bin_name_enc = bin_name.encode('UTF-8') header = AerospikeASMSGOperationHeader(len(bin_name_enc) + len(bin_data) + 4, op, bin_data_type, 0, len(bin_name_enc)) return AerospikeASMSGOperationHeaderStruct.pack(*header) + bin_name_enc + bin_data def unpack_asmsg_operation(data: bytes) -> (AerospikeASMSGOperationHeader, str, bytes): header = AerospikeASMSGOperationHeader(*AerospikeASMSGOperationHeaderStruct.unpack(data[0:8])) if len(data) == 8: return header, None, None bin_name = data[8:8 + header.bin_name_length].decode('UTF-8') return (header, bin_name, data[8 + header.bin_name_length:]) def pack_asmsg(info1: int, info2: int, info3: int, generation: int, record_ttl: int, transaction_ttl: int, fields: list, ops: list) -> bytes: asmsg_hdr = pack_asmsg_header(info1, info2, info3, generation, record_ttl, transaction_ttl, len(fields), len(ops)) return asmsg_hdr + b''.join(fields) + b''.join(ops) def unpack_asmsg(data: bytes) -> (AerospikeASMSGHeader, list, list): asmsg_hdr = unpack_asmsg_header(data[0:22]) # next comes fields: pos = 22 fields = [] for i in range(asmsg_hdr.n_fields): f_hdr, _ = unpack_asmsg_field(data[pos:(pos + 5)]) f_hdr, payload = unpack_asmsg_field(data[pos:(pos + 5 + f_hdr.size)]) fields += [(f_hdr, payload)] pos += (4 + f_hdr.size) ops = [] for i in range(asmsg_hdr.n_ops): o_hdr, _, _ = unpack_asmsg_operation(data[pos:(pos + 8)]) o_hdr, bin_name, bin_payload = unpack_asmsg_operation(data[pos:(pos + 5 + o_hdr.size)]) ops += [(o_hdr, bin_name, bin_payload)] pos += (4 + o_hdr.size) return asmsg_hdr, fields, ops, data[pos:] @asyncio.coroutine def submit_message(conn: Connection, data: bytes) -> (AerospikeOuterHeader, AerospikeASMSGHeader, list, list): ohdr = AerospikeOuterHeader(2, 3, len(data)) buf = pack_outer_header(ohdr) + data yield from conn.open_connection() try: try: yield from conn.write(buf) except ASConnectionError as e: raise ASIOException('write: %r' % e) hdr_payload = yield from conn.read(8) if not hdr_payload: raise ASIOException('read') header, _ = unpack_message(hdr_payload) data = hdr_payload data += yield from conn.read(header.sz) header, payload = unpack_message(data) asmsg_header, asmsg_fields, asmsg_ops, _ = unpack_asmsg(payload) if asmsg_header.result_code != 0: raise ASMSGProtocolException(asmsg_header.result_code) return header, asmsg_header, asmsg_fields, asmsg_ops finally: conn.close_connection() @asyncio.coroutine def submit_multi_message(conn: Connection, data: bytes) -> list: ohdr = AerospikeOuterHeader(2, 3, len(data)) buf = pack_outer_header(ohdr) + data yield from conn.open_connection() try: try: yield from conn.write(buf) except ASConnectionError as e: raise ASIOException('write: %r' % e) not_last = True messages = [] while not_last: hdr_payload = yield from conn.read(8) if not hdr_payload: raise ASIOException('read') header, _ = unpack_message(hdr_payload) data = hdr_payload data += yield from conn.read(header.sz) if len(data) != 8 + header.sz: raise ASIOException('read') header, payload = unpack_message(data) while payload: asmsg_header, asmsg_fields, asmsg_ops, payload = unpack_asmsg(payload) messages += [(header, asmsg_header, asmsg_fields, asmsg_ops)] if asmsg_header.result_code not in (0, 2): raise ASMSGProtocolException(asmsg_header.result_code) if (asmsg_header.info3 & AS_INFO3_LAST) == AS_INFO3_LAST: not_last = False continue return messages finally: conn.close_connection()
isc
yzl0083/orange
docs/reference/rst/code/freeviz-pca.py
6
1686
# Description: FreeViz projector # Category: projection # Uses: zoo # Referenced: Orange.projection.linear # Classes: Orange.projection.linear.FreeViz, Orange.projection.linear.Projector import Orange import numpy as np tab = Orange.data.Table('titanic') ind = Orange.data.sample.SubsetIndices2(p0=0.99)(tab) train, test = tab.select(ind, 0), tab.select(ind, 1) freeviz = Orange.projection.linear.FreeViz() freeviz.graph.set_data(train) freeviz.show_all_attributes() def mirror(tab): a = tab.to_numpy("a")[0] rotate = np.diagflat([-1 if val<0 else 1 for val in a[0]]) a = np.dot(a, rotate) return Orange.data.Table(Orange.data.Domain(tab.domain.features), a) print "PCA" freeviz.find_projection(Orange.projection.linear.DR_PCA, set_anchors=True) projector = freeviz() for e, projected in zip(test, mirror(projector(test))): print e, projected print "SPCA" freeviz.find_projection(Orange.projection.linear.DR_SPCA, set_anchors=True) projector = freeviz() for e, projected in zip(test, mirror(projector(test))): print e, projected print "SPCA w/out generalization" freeviz.use_generalized_eigenvectors = False freeviz.find_projection(Orange.projection.linear.DR_SPCA, set_anchors=True) projector = freeviz() for e, projected in zip(test, mirror(projector(test))): print e, projected print "PCA with 2 attributes" freeviz.graph.anchor_data = [(0,0, a.name) for a in freeviz.graph.data_domain .attributes[:2]] freeviz.find_projection(Orange.projection.linear.DR_PCA, set_anchors=True) projector = freeviz() for e, projected in zip(test, mirror(projector(test))): print e, projected
gpl-3.0
pombredanne/similarityPy
tests/algorihtm_tests/mean_test.py
2
1357
from unittest import TestCase from similarityPy.algorithms.mean import Mean from tests import test_logger __author__ = 'cenk' class MeanTest(TestCase): def setUp(self): pass def test_algorithm_with_list(self): test_logger.debug("MeanTest - test_algorithm_with_list Starts") mean = Mean() data_list = [1, 2, 3, 4, 5] self.assertEquals(3, mean.calculate(data_list)) data_list = [1, 2, 3, 4] self.assertEquals(2.5, mean.calculate(data_list)) data_list = [] with self.assertRaises(ZeroDivisionError) as context: mean.calculate(data_list) self.assertEqual("integer division or modulo by zero", context.exception.message) test_logger.debug("MeanTest - test_algorithm_with_list Ends") def test_algorithm_with_tuple(self): test_logger.debug("MeanTest - test_algorithm_with_tuple Starts") mean = Mean() data_list = [("a", 1), ("b", 2), ("c", 3), ( "d", 4), ("e", 5)] self.assertEquals(3, mean.calculate(data_list, is_tuple=True, index=1)) data_list = [("a", "a", 1), ("b", "b", 2), ("c", "c", 3), ("d", "d", 4), ("e", "e", 5)] self.assertEquals(3.0, mean.calculate(data_list, is_tuple=True, index=2)) test_logger.debug("MeanTest - test_algorithm_with_tuple Ends")
mit
curiousbadger/pyWitnessSolver
src/lib/Partition.py
1
10104
''' Created on Mar 9, 2016 @author: charper ''' from collections import Counter import logging from lib import lib_dbg_filehandler, lib_consolehandler, lib_inf_filehandler module_logger=logging.getLogger(__name__) module_logger.addHandler(lib_inf_filehandler) module_logger.addHandler(lib_dbg_filehandler) module_logger.addHandler(lib_consolehandler) linf, ldbg, ldbg2 = module_logger.info, module_logger.debug, module_logger.debug ldbg('init:'+__name__) from lib.Graph import Graph from lib.Geometry import MultiBlock, Point from lib.util import UniqueColorGenerator class Partition(Graph): def travel_partition(self, n): '''Travel the partition to discover all Nodes. TODO: accumulate some simple rule info as we go?''' if n.key() in self: return self[n.key()]=n for e in n.traversable_edges(): if e.nodes not in self.edges: self.edges[e.nodes]=e self.travel_partition(e.traverse_from_node(n)) def get_rule_shapes(self): if self.rule_shapes is not None and len(self.rule_shapes)==0: raise Exception('ruleshapes') if self.rule_shapes is None: self.rule_shapes=[] self.rule_shape_nodes for n in self.values(): cur_shape=n.rule_shape if cur_shape: self.total_rule_shape_points+=len(cur_shape) self.rule_shapes.append(n.rule_shape) return self.rule_shapes def can_be_composed_of(self,rule_shapes, partition_multiblock, depth_counter): cur_multiblock=rule_shapes[depth_counter] t='\t'*depth_counter #ldbg2(t,'partition_multiblock', partition_multiblock) #ldbg2(t,'cur_multiblock', cur_multiblock) found_solution=False for cur_shape in cur_multiblock.rotations: #ldbg2(' cur_shape',cur_shape) # Is the shape bigger than the partition? if not partition_multiblock.could_contain(cur_shape): #ldbg2(' %s can''t contain %s', partition_multiblock, cur_shape) continue # Put the shape in the lower-left corner cur_shape.set_offset(partition_multiblock.offset_point()) #ldbg2(' cur_shape.offset_point', cur_shape.offset_point()) max_shift_point=partition_multiblock.upper_right()-cur_shape.upper_right() #ldbg2(' max_shift_point', max_shift_point) # TODO: Change to MultiBlock yield? for y in range(max_shift_point.y+1): for x in range(max_shift_point.x+1): # Shift all the shape's Points cur_shape.set_offset(Point((x,y))) abs_points=cur_shape.get_absolute_point_set() #ldbg2(' abs_points', abs_points) '''There are now several possibilities: 1. The shifted points completely cover the partition points We're done no matter what. If the last shape has been placed then this is a solution. If not, then we ran out of space. 2. The shifted points are a proper subset of the partition points. This is fine, pass the remaining points back to see if further shapes can fill them 3. The shifted points are not a subset of the partition points, ie. some lie outside the partition points. Invalid, discard and move on ''' # If any point in cur_shape lies outside the partition points, this is not a solution # TODO: Change to yield? No reason to check all points, just need first violation outside_points=abs_points - partition_multiblock if outside_points: #ldbg2(' !!outside_points', outside_points) continue # Return all the points in the partition that are left remaining_partition_points=partition_multiblock - abs_points #ldbg2(' remaining_partition_points', remaining_partition_points) # Haven't filled all our points yet... if remaining_partition_points: if depth_counter==len(rule_shapes)-1: #ldbg2( ' still points') raise Exception('still points') else: #ldbg2(' keep on truckin...') new_partition=MultiBlock(remaining_partition_points,auto_shift_Q1=False) #ldbg2(' new_partition', new_partition) found_solution=self.can_be_composed_of(rule_shapes, new_partition, depth_counter+1) # Filled all our points, are we at the end? else: if depth_counter==len(rule_shapes)-1: found_solution=True else: # Should not happen till we start implementing SubtractionSquare raise Exception('Filled too soon') return False if found_solution: #ldbg2('\t'*(depth_counter)+'FOUND SOLUTION!!', cur_shape) sol_pts=cur_shape.get_absolute_point_set() self.solution_shapes.append(sol_pts) return True return False def has_shape_violation(self): if self.shape_violation is not None: raise Exception('Violation already checked') return self.shape_violation self.solution_shapes=[] rule_shapes=self.get_rule_shapes() #ldbg2('rule_shapes', rule_shapes) if self.total_rule_shape_points != len(self): self.shape_violation=True return True partition_multiblock=MultiBlock(self.keys(),name='partition_multiblock',auto_shift_Q1=False) self.shape_violation = not self.can_be_composed_of(rule_shapes,partition_multiblock, 0) return self.shape_violation def has_color_violation(self): if self.color_violation is not None: raise Exception('Violation already checked') return self.color_violation distinct_rule_colors = Counter([n.rule_color for n in self.values() if n.rule_color]) #ldbg2('distinct_rule_colors', distinct_rule_colors) # Only 1 rule_color allowed per Partition self.color_violation = len(distinct_rule_colors)>1 return self.color_violation def has_sun_violation(self): '''TODO: This logic is far from complete, but works for simple puzzles The rule suns will actually count other objects with the same color as a "buddy", so far I've seen "Rule Shapes" and normal "Rule Colors" count. ''' if self.sun_violation is not None: raise Exception('Violation already checked') return self.sun_violation rule_sun_colors = Counter([n.sun_color for n in self.values() if n.sun_color]) # Every sun needs a buddy... self.sun_violation = any(cnt != 2 for cnt in rule_sun_colors.values()) return self.sun_violation def __init__(self, first_node): ''' ''' Graph.__init__(self) self.travel_partition(first_node) self.shape_violation=None self.rule_shapes=None self.solution_shapes=[] self.total_rule_shape_points=0 self.color_violation=None self.sun_violation=None def solution_shape_to_squares(self): '''Yield the set of Squares corresponding to each "Solution Shape". ''' for solution_shape in self.solution_shapes: squares=set([self[key] for key in solution_shape]) yield squares def solution_shapes_to_edges(self): '''Yield sets of Edges for each Solution Shape. A "Solution Shape" is a set of Points that correspond to a sub-Partition of this Partition. Note that these Points are not necessarily all neighbors, but return the Edges corresponding to those that are. # TODO: Also yield Square with Rule Shape corresponding to Solution Shape? ''' # Create a temporary set of all Edges in this Partition, # (Actually Edge keys, which is a frozenset of two Squares, # but the squares are what we want anyways) edges_to_find=set(self.edges) for squares in self.solution_shape_to_squares(): ldbg('Looking for Edges in solution shape:'+' '.join(s.sym for s in squares)) # Find all edges that are in this shape -> The Edge's Squares are both in this # Solution Shape edges_in_shape=set([e for e in edges_to_find if e.issubset(squares)]) # Finished looping over edges_to_find, remove what we found edges_to_find = edges_to_find - edges_in_shape yield edges_in_shape if not edges_to_find: # Will only happen if Partition is composed of 1 Rule Shape ldbg(' !!!found all edges!!!') break if __name__=='__main__': exit(0)
gpl-3.0
xiaoxiaoyao/MyApp
PythonApplication1/自己的小练习/Sankey桑基图.py
2
4119
from pyecharts.charts import Sankey from pyecharts import options as opts linkes=[ {'source':'03万','target':'续费后03万','value':20}, {'source':'03万','target':'续费后05万','value':70}, {'source':'03万','target':'续费后10万','value':25}, {'source':'03万','target':'续费后15万','value':12}, {'source':'03万','target':'续费后25万','value':10}, {'source':'03万','target':'续费后50万','value':4}, {'source':'03万','target':'续费后包年','value':2}, {'source':'05万','target':'续费后03万','value':6}, {'source':'05万','target':'续费后05万','value':139}, {'source':'05万','target':'续费后10万','value':157}, {'source':'05万','target':'续费后15万','value':91}, {'source':'05万','target':'续费后25万','value':48}, {'source':'05万','target':'续费后50万','value':6}, {'source':'05万','target':'续费后包年','value':8}, {'source':'05万','target':'续费后单价极低','value':1}, {'source':'10万','target':'续费后03万','value':3}, {'source':'10万','target':'续费后05万','value':5}, {'source':'10万','target':'续费后10万','value':22}, {'source':'10万','target':'续费后15万','value':73}, {'source':'10万','target':'续费后25万','value':39}, {'source':'10万','target':'续费后50万','value':21}, {'source':'10万','target':'续费后包年','value':5}, {'source':'10万','target':'续费后单价极低','value':2}, {'source':'15万','target':'续费后03万','value':3}, {'source':'15万','target':'续费后05万','value':3}, {'source':'15万','target':'续费后10万','value':2}, {'source':'15万','target':'续费后15万','value':56}, {'source':'15万','target':'续费后25万','value':73}, {'source':'15万','target':'续费后50万','value':46}, {'source':'15万','target':'续费后包年','value':12}, {'source':'15万','target':'续费后单价极低','value':7}, {'source':'25万','target':'续费后10万','value':1}, {'source':'25万','target':'续费后25万','value':49}, {'source':'25万','target':'续费后50万','value':46}, {'source':'25万','target':'续费后包年','value':40}, {'source':'25万','target':'续费后单价极低','value':25}, {'source':'50万','target':'续费后25万','value':2}, {'source':'50万','target':'续费后50万','value':17}, {'source':'50万','target':'续费后包年','value':7}, {'source':'50万','target':'续费后单价极低','value':11}, {'source':'包年','target':'续费后03万','value':1}, {'source':'包年','target':'续费后05万','value':7}, {'source':'包年','target':'续费后10万','value':2}, {'source':'包年','target':'续费后15万','value':6}, {'source':'包年','target':'续费后25万','value':5}, {'source':'包年','target':'续费后50万','value':5}, {'source':'包年','target':'续费后包年','value':109}, {'source':'包年','target':'续费后单价极低','value':15}, {'source':'单价极低','target':'续费后包年','value':2}, {'source':'单价极低','target':'续费后单价极低','value':4}, ] nodes=[ {'name':'03万'}, {'name':'05万'}, {'name':'10万'}, {'name':'15万'}, {'name':'25万'}, {'name':'50万'}, {'name':'包年'}, {'name':'单价极低'}, {'name':'续费后03万'}, {'name':'续费后05万'}, {'name':'续费后10万'}, {'name':'续费后15万'}, {'name':'续费后25万'}, {'name':'续费后50万'}, {'name':'续费后包年'}, {'name':'续费后单价极低'}, ] pic = ( Sankey() .add('', #图例名称 nodes, #传入节点数据 linkes, #传入边和流量数据 #设置透明度、弯曲度、颜色 linestyle_opt=opts.LineStyleOpts(opacity = 0.3, curve = 0.5, color = "source"), #标签显示位置 label_opts=opts.LabelOpts(position="right"), #节点之前的距离 node_gap = 30, # 鼠标 hover 到节点或边上,相邻接的节点和边高亮的交互,默认关闭,可手动开启 focus_node_adjacency="allEdges" ) .set_global_opts(title_opts=opts.TitleOpts(title = '企业版合同续签客户流向')) ) #输出文件 pic.render(r'C:\Users\YAO\AppData\Local\Temp\test.html')
unlicense
KhronosGroup/COLLADA-CTS
StandardDataSets/collada/library_effects/effect/profile_COMMON/technique/lambert/transparent/effect_lambert_transparent_texture_aone/effect_lambert_transparent_texture_aone.py
2
4099
# Copyright (c) 2012 The Khronos Group Inc. # Permission is hereby granted, free of charge, to any person obtaining a copy of this software and /or associated documentation files (the "Materials "), to deal in the Materials without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Materials, and to permit persons to whom the Materials are furnished to do so, subject to # the following conditions: # The above copyright notice and this permission notice shall be included # in all copies or substantial portions of the Materials. # THE MATERIALS ARE PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE MATERIALS OR THE USE OR OTHER DEALINGS IN THE MATERIALS. # See Core.Logic.FJudgementContext for the information # of the 'context' parameter. # This sample judging object does the following: # # JudgeBaseline: just verifies that the standard steps did not crash. # JudgeSuperior: also verifies that the validation steps are not in error. # JudgeExemplary: same as intermediate badge. # We import an assistant script that includes the common verifications # methods. The assistant buffers its checks, so that running them again # does not incurs an unnecessary performance hint. from StandardDataSets.scripts import JudgeAssistant # Please feed your node list here: tagLst = ['library_effects', 'effect', 'profile_COMMON', 'technique', 'lambert'] attrName = '' attrVal = '' dataToCheck = '' class SimpleJudgingObject: def __init__(self, _tagLst, _attrName, _attrVal, _data): self.tagList = _tagLst self.attrName = _attrName self.attrVal = _attrVal self.dataToCheck = _data self.status_baseline = False self.status_superior = False self.status_exemplary = False self.__assistant = JudgeAssistant.JudgeAssistant() def JudgeBaseline(self, context): # No step should not crash self.__assistant.CheckCrashes(context) # Import/export/validate must exist and pass, while Render must only exist. self.__assistant.CheckSteps(context, ["Import", "Export", "Validate"], ["Render"]) if (self.__assistant.GetResults() == False): self.status_baseline = False return False # Compare the rendered images between import and export # Then compare images against reference test # Last, check for preservation of element if ( self.__assistant.CompareRenderedImages(context) ): if ( self.__assistant.CompareImagesAgainst(context, "effect_lambert_transparent_a_one") ): self.__assistant.ElementPreserved(context, self.tagList) self.status_baseline = self.__assistant.DeferJudgement(context) return self.status_baseline # To pass intermediate you need to pass basic, this object could also include additional # tests that were specific to the intermediate badge. def JudgeSuperior(self, context): self.status_superior = self.status_baseline return self.status_superior # To pass advanced you need to pass intermediate, this object could also include additional # tests that were specific to the advanced badge def JudgeExemplary(self, context): self.status_exemplary = self.status_superior return self.status_exemplary # This is where all the work occurs: "judgingObject" is an absolutely necessary token. # The dynamic loader looks very specifically for a class instance named "judgingObject". # judgingObject = SimpleJudgingObject(tagLst, attrName, attrVal, dataToCheck);
mit
masia02/chainer
tests/cupy_tests/manipulation_tests/test_split.py
15
1695
import unittest from cupy import testing @testing.gpu class TestSplit(unittest.TestCase): _multiprocess_can_split_ = True @testing.numpy_cupy_array_equal() def test_array_spliti1(self, xp): a = testing.shaped_arange((3, 11), xp) split = xp.array_split(a, 4, 1) return xp.concatenate(split, 1) @testing.numpy_cupy_array_equal() def test_array_spliti2(self, xp): a = testing.shaped_arange((3, 11), xp) split = xp.array_split(a, 4, 1) return xp.concatenate(split, -1) @testing.numpy_cupy_array_equal() def test_dsplit(self, xp): a = testing.shaped_arange((3, 3, 12), xp) split = xp.dsplit(a, 4) return xp.dstack(split) @testing.numpy_cupy_array_equal() def test_hsplit_vectors(self, xp): a = testing.shaped_arange((12,), xp) split = xp.hsplit(a, 4) return xp.hstack(split) @testing.numpy_cupy_array_equal() def test_hsplit(self, xp): a = testing.shaped_arange((3, 12), xp) split = xp.hsplit(a, 4) return xp.hstack(split) @testing.numpy_cupy_array_equal() def test_split_by_sections1(self, xp): a = testing.shaped_arange((3, 11), xp) split = xp.split(a, (2, 4, 9), 1) return xp.concatenate(split, 1) @testing.numpy_cupy_array_equal() def test_split_by_sections2(self, xp): a = testing.shaped_arange((3, 11), xp) split = xp.split(a, (2, 4, 9), 1) return xp.concatenate(split, -1) @testing.numpy_cupy_array_equal() def test_vsplit(self, xp): a = testing.shaped_arange((12, 3), xp) split = xp.vsplit(a, 4) return xp.vstack(split)
mit
daevaorn/sentry
src/sentry/quotas/redis.py
1
2795
""" sentry.quotas.redis ~~~~~~~~~~~~~~~~~~~ :copyright: (c) 2010-2014 by the Sentry Team, see AUTHORS for more details. :license: BSD, see LICENSE for more details. """ from __future__ import absolute_import from time import time from sentry.exceptions import InvalidConfiguration from sentry.quotas.base import NotRateLimited, Quota, RateLimited from sentry.utils.redis import get_cluster_from_options, load_script is_rate_limited = load_script('quotas/is_rate_limited.lua') class RedisQuota(Quota): #: The ``grace`` period allows accomodating for clock drift in TTL #: calculation since the clock on the Redis instance used to store quota #: metrics may not be in sync with the computer running this code. grace = 60 def __init__(self, **options): self.cluster, options = get_cluster_from_options(self, options) super(RedisQuota, self).__init__(**options) self.namespace = 'quota' def validate(self): try: with self.cluster.all() as client: client.ping() except Exception as e: raise InvalidConfiguration(unicode(e)) def get_quotas(self, project): return ( ('p:{}'.format(project.id), self.get_project_quota(project), 60), ('o:{}'.format(project.organization.id), self.get_organization_quota(project.organization), 60), ) def get_redis_key(self, key, timestamp, interval): return '{}:{}:{}'.format(self.namespace, key, int(timestamp // interval)) def is_rate_limited(self, project): timestamp = time() quotas = filter( lambda (key, limit, interval): limit and limit > 0, # a zero limit means "no limit", not "reject all" self.get_quotas(project), ) # If there are no quotas to actually check, skip the trip to the database. if not quotas: return NotRateLimited def get_next_period_start(interval): """Return the timestamp when the next rate limit period begins for an interval.""" return ((timestamp // interval) + 1) * interval keys = [] args = [] for key, limit, interval in quotas: keys.append(self.get_redis_key(key, timestamp, interval)) expiry = get_next_period_start(interval) + self.grace args.extend((limit, int(expiry))) client = self.cluster.get_local_client_for_key(str(project.organization.pk)) rejections = is_rate_limited(client, keys, args) if any(rejections): delay = max(get_next_period_start(interval) - timestamp for (key, limit, interval), rejected in zip(quotas, rejections) if rejected) return RateLimited(retry_after=delay) else: return NotRateLimited
bsd-3-clause
Kronuz/pyScss
scss/extension/extra.py
5
16464
"""Functions new to the pyScss library.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from __future__ import unicode_literals import logging import os.path import random import six from six.moves import xrange from scss import config from scss.errors import SassMissingDependency from scss.extension import Extension from scss.namespace import Namespace from scss.types import Color, Number, String, List from scss.util import escape, make_data_url, make_filename_hash try: from PIL import Image, ImageDraw except ImportError: try: import Image import ImageDraw except ImportError: Image = None ImageDraw = None log = logging.getLogger(__name__) class ExtraExtension(Extension): """Extra functions unique to the pyScss library.""" name = 'extra' namespace = Namespace() # Alias to make the below declarations less noisy ns = ExtraExtension.namespace # ------------------------------------------------------------------------------ # Image stuff def _image_noise(pixdata, size, density=None, intensity=None, color=None, opacity=None, monochrome=None, background=None): if not density: density = [0.8] elif not isinstance(density, (tuple, list)): density = [density] if not intensity: intensity = [0.5] elif not isinstance(intensity, (tuple, list)): intensity = [intensity] if not color: color = [(0, 0, 0, 0)] elif not isinstance(color, (tuple, list)) or not isinstance(color[0], (tuple, list)): color = [color] if not opacity: opacity = [0.2] elif not isinstance(opacity, (tuple, list)): opacity = [opacity] if not monochrome: monochrome = [False] elif not isinstance(monochrome, (tuple, list)): monochrome = [monochrome] pixels = {} if background: for y in xrange(size): for x in xrange(size): ca = float(background[3]) pixels[(x, y)] = (background[0] * ca, background[1] * ca, background[2] * ca, ca) loops = max(map(len, (density, intensity, color, opacity, monochrome))) for l in range(loops): _density = density[l % len(density)] _intensity = intensity[l % len(intensity)] _color = color[l % len(color)] _opacity = opacity[l % len(opacity)] _monochrome = monochrome[l % len(monochrome)] _intensity = 1 - _intensity if _intensity < 0.5: cx = 255 * _intensity cm = cx else: cx = 255 * (1 - _intensity) cm = 255 * _intensity xa = int(cm - cx) xb = int(cm + cx) if xa > 0: xa &= 255 else: xa = 0 if xb > 0: xb &= 255 else: xb = 0 r, g, b, a = _color for i in xrange(int(round(_density * size ** 2))): x = random.randint(1, size) y = random.randint(1, size) cc = random.randint(xa, xb) cr = (cc) * (1 - a) + a * r cg = (cc if _monochrome else random.randint(xa, xb)) * (1 - a) + a * g cb = (cc if _monochrome else random.randint(xa, xb)) * (1 - a) + a * b ca = random.random() * _opacity ica = 1 - ca pos = (x - 1, y - 1) dst = pixels.get(pos, (0, 0, 0, 0)) src = (cr * ca, cg * ca, cb * ca, ca) pixels[pos] = (src[0] + dst[0] * ica, src[1] + dst[1] * ica, src[2] + dst[2] * ica, src[3] + dst[3] * ica) for pos, col in pixels.items(): ca = col[3] if ca: pixdata[pos] = tuple(int(round(c)) for c in (col[0] / ca, col[1] / ca, col[2] / ca, ca * 255)) def _image_brushed(pixdata, size, density=None, intensity=None, color=None, opacity=None, monochrome=None, direction=None, spread=None, background=None): if not density: density = [0.8] elif not isinstance(density, (tuple, list)): density = [density] if not intensity: intensity = [0.5] elif not isinstance(intensity, (tuple, list)): intensity = [intensity] if not color: color = [(0, 0, 0, 0)] elif not isinstance(color, (tuple, list)) or not isinstance(color[0], (tuple, list)): color = [color] if not opacity: opacity = [0.2] elif not isinstance(opacity, (tuple, list)): opacity = [opacity] if not monochrome: monochrome = [False] elif not isinstance(monochrome, (tuple, list)): monochrome = [monochrome] if not direction: direction = [0] elif not isinstance(direction, (tuple, list)): direction = [direction] if not spread: spread = [0] elif not isinstance(spread, (tuple, list)): spread = [spread] def ppgen(d): if d is None: return d = d % 4 if d == 0: pp = lambda x, y, o: ((x - o) % size, y) elif d == 1: pp = lambda x, y, o: ((x - o) % size, (y + x - o) % size) elif d == 2: pp = lambda x, y, o: (y, (x - o) % size) else: pp = lambda x, y, o: ((x - o) % size, (y - x - o) % size) return pp pixels = {} if background: for y in xrange(size): for x in xrange(size): ca = float(background[3]) pixels[(x, y)] = (background[0] * ca, background[1] * ca, background[2] * ca, ca) loops = max(map(len, (density, intensity, color, opacity, monochrome, direction, spread))) for l in range(loops): _density = density[l % len(density)] _intensity = intensity[l % len(intensity)] _color = color[l % len(color)] _opacity = opacity[l % len(opacity)] _monochrome = monochrome[l % len(monochrome)] _direction = direction[l % len(direction)] _spread = spread[l % len(spread)] _intensity = 1 - _intensity if _intensity < 0.5: cx = 255 * _intensity cm = cx else: cx = 255 * (1 - _intensity) cm = 255 * _intensity xa = int(cm - cx) xb = int(cm + cx) if xa > 0: xa &= 255 else: xa = 0 if xb > 0: xb &= 255 else: xb = 0 r, g, b, a = _color pp = ppgen(_direction) if pp: for y in xrange(size): if _spread and (y + (l % 2)) % _spread: continue o = random.randint(1, size) cc = random.randint(xa, xb) cr = (cc) * (1 - a) + a * r cg = (cc if _monochrome else random.randint(xa, xb)) * (1 - a) + a * g cb = (cc if _monochrome else random.randint(xa, xb)) * (1 - a) + a * b da = random.randint(0, 255) * _opacity ip = round((size / 2.0 * _density) / int(1 / _density)) iq = round((size / 2.0 * (1 - _density)) / int(1 / _density)) if ip: i = da / ip aa = 0 else: i = 0 aa = da d = 0 p = ip for x in xrange(size): if d == 0: if p > 0: p -= 1 aa += i else: d = 1 q = iq elif d == 1: if q > 0: q -= 1 else: d = 2 p = ip elif d == 2: if p > 0: p -= 1 aa -= i else: d = 3 q = iq elif d == 3: if q > 0: q -= 1 else: d = 0 p = ip if aa > 0: ca = aa / 255.0 else: ca = 0.0 ica = 1 - ca pos = pp(x, y, o) dst = pixels.get(pos, (0, 0, 0, 0)) src = (cr * ca, cg * ca, cb * ca, ca) pixels[pos] = (src[0] + dst[0] * ica, src[1] + dst[1] * ica, src[2] + dst[2] * ica, src[3] + dst[3] * ica) for pos, col in pixels.items(): ca = col[3] if ca: pixdata[pos] = tuple(int(round(c)) for c in (col[0] / ca, col[1] / ca, col[2] / ca, ca * 255)) @ns.declare def background_noise(density=None, opacity=None, size=None, monochrome=False, intensity=(), color=None, background=None, inline=False): if not Image: raise SassMissingDependency('PIL', 'image manipulation') density = [Number(v).value for v in List.from_maybe(density)] intensity = [Number(v).value for v in List.from_maybe(intensity)] color = [Color(v).value for v in List.from_maybe(color) if v] opacity = [Number(v).value for v in List.from_maybe(opacity)] size = int(Number(size).value) if size else 0 if size < 1 or size > 512: size = 200 monochrome = bool(monochrome) background = Color(background).value if background else None new_image = Image.new( mode='RGBA', size=(size, size) ) pixdata = new_image.load() _image_noise(pixdata, size, density, intensity, color, opacity, monochrome) if not inline: key = (size, density, intensity, color, opacity, monochrome) asset_file = 'noise-%s%sx%s' % ('mono-' if monochrome else '', size, size) # asset_file += '-[%s][%s]' % ('-'.join(to_str(s).replace('.', '_') for s in density or []), '-'.join(to_str(s).replace('.', '_') for s in opacity or [])) asset_file += '-' + make_filename_hash(key) asset_file += '.png' asset_path = os.path.join(config.ASSETS_ROOT or os.path.join(config.STATIC_ROOT, 'assets'), asset_file) try: new_image.save(asset_path) except IOError: log.exception("Error while saving image") inline = True # Retry inline version url = '%s%s' % (config.ASSETS_URL, asset_file) if inline: output = six.BytesIO() new_image.save(output, format='PNG') contents = output.getvalue() output.close() url = make_data_url('image/png', contents) inline = 'url("%s")' % escape(url) return String.unquoted(inline) @ns.declare def background_brushed(density=None, intensity=None, color=None, opacity=None, size=None, monochrome=False, direction=(), spread=(), background=None, inline=False): if not Image: raise SassMissingDependency('PIL', 'image manipulation') density = [Number(v).value for v in List.from_maybe(density)] intensity = [Number(v).value for v in List.from_maybe(intensity)] color = [Color(v).value for v in List.from_maybe(color) if v] opacity = [Number(v).value for v in List.from_maybe(opacity)] size = int(Number(size).value) if size else -1 if size < 0 or size > 512: size = 200 monochrome = bool(monochrome) direction = [Number(v).value for v in List.from_maybe(direction)] spread = [Number(v).value for v in List.from_maybe(spread)] background = Color(background).value if background else None new_image = Image.new( mode='RGBA', size=(size, size) ) pixdata = new_image.load() _image_brushed(pixdata, size, density, intensity, color, opacity, monochrome, direction, spread, background) if not inline: key = (size, density, intensity, color, opacity, monochrome, direction, spread, background) asset_file = 'brushed-%s%sx%s' % ('mono-' if monochrome else '', size, size) # asset_file += '-[%s][%s][%s]' % ('-'.join(to_str(s).replace('.', '_') for s in density or []), '-'.join(to_str(s).replace('.', '_') for s in opacity or []), '-'.join(to_str(s).replace('.', '_') for s in direction or [])) asset_file += '-' + make_filename_hash(key) asset_file += '.png' asset_path = os.path.join(config.ASSETS_ROOT or os.path.join(config.STATIC_ROOT, 'assets'), asset_file) try: new_image.save(asset_path) except IOError: log.exception("Error while saving image") inline = True # Retry inline version url = '%s%s' % (config.ASSETS_URL, asset_file) if inline: output = six.BytesIO() new_image.save(output, format='PNG') contents = output.getvalue() output.close() url = make_data_url('image/png', contents) inline = 'url("%s")' % escape(url) return String.unquoted(inline) @ns.declare def grid_image(left_gutter, width, right_gutter, height, columns=1, grid_color=None, baseline_color=None, background_color=None, inline=False): if not Image: raise SassMissingDependency('PIL', 'image manipulation') if grid_color is None: grid_color = (120, 170, 250, 15) else: c = Color(grid_color).value grid_color = (c[0], c[1], c[2], int(c[3] * 255.0)) if baseline_color is None: baseline_color = (120, 170, 250, 30) else: c = Color(baseline_color).value baseline_color = (c[0], c[1], c[2], int(c[3] * 255.0)) if background_color is None: background_color = (0, 0, 0, 0) else: c = Color(background_color).value background_color = (c[0], c[1], c[2], int(c[3] * 255.0)) _height = int(height) if height >= 1 else int(height * 1000.0) _width = int(width) if width >= 1 else int(width * 1000.0) _left_gutter = int(left_gutter) if left_gutter >= 1 else int(left_gutter * 1000.0) _right_gutter = int(right_gutter) if right_gutter >= 1 else int(right_gutter * 1000.0) if _height <= 0 or _width <= 0 or _left_gutter <= 0 or _right_gutter <= 0: raise ValueError _full_width = (_left_gutter + _width + _right_gutter) new_image = Image.new( mode='RGBA', size=(_full_width * int(columns), _height), color=background_color ) draw = ImageDraw.Draw(new_image) for i in range(int(columns)): draw.rectangle((i * _full_width + _left_gutter, 0, i * _full_width + _left_gutter + _width - 1, _height - 1), fill=grid_color) if _height > 1: draw.rectangle((0, _height - 1, _full_width * int(columns) - 1, _height - 1), fill=baseline_color) if not inline: grid_name = 'grid_' if left_gutter: grid_name += str(int(left_gutter)) + '+' grid_name += str(int(width)) if right_gutter: grid_name += '+' + str(int(right_gutter)) if height and height > 1: grid_name += 'x' + str(int(height)) key = (columns, grid_color, baseline_color, background_color) key = grid_name + '-' + make_filename_hash(key) asset_file = key + '.png' asset_path = os.path.join(config.ASSETS_ROOT or os.path.join(config.STATIC_ROOT, 'assets'), asset_file) try: new_image.save(asset_path) except IOError: log.exception("Error while saving image") inline = True # Retry inline version url = '%s%s' % (config.ASSETS_URL, asset_file) if inline: output = six.BytesIO() new_image.save(output, format='PNG') contents = output.getvalue() output.close() url = make_data_url('image/png', contents) inline = 'url("%s")' % escape(url) return String.unquoted(inline) @ns.declare def image_color(color, width=1, height=1): if not Image: raise SassMissingDependency('PIL', 'image manipulation') w = int(Number(width).value) h = int(Number(height).value) if w <= 0 or h <= 0: raise ValueError new_image = Image.new( mode='RGB' if color.alpha == 1 else 'RGBA', size=(w, h), color=color.rgba255, ) output = six.BytesIO() new_image.save(output, format='PNG') contents = output.getvalue() output.close() url = make_data_url('image/png', contents) inline = 'url("%s")' % escape(url) return String.unquoted(inline)
mit
HiroIshikawa/21playground
microblog/flask/lib/python3.5/site-packages/pbr/pbr_json.py
41
1228
# Copyright 2011 OpenStack LLC. # Copyright 2012-2013 Hewlett-Packard Development Company, L.P. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import json from pbr import git def write_pbr_json(cmd, basename, filename): if not hasattr(cmd.distribution, 'pbr') or not cmd.distribution.pbr: return git_dir = git._run_git_functions() if not git_dir: return values = dict() git_version = git.get_git_short_sha(git_dir) is_release = git.get_is_release(git_dir) if git_version is not None: values['git_version'] = git_version values['is_release'] = is_release cmd.write_file('pbr', filename, json.dumps(values))
mit
iblacksand/SimpleDiveMeets
node_modules/dmg-builder/vendor/ds_store/store.py
11
45209
# -*- coding: utf-8 -*- from __future__ import unicode_literals from __future__ import print_function from __future__ import division import binascii import struct import biplist import mac_alias try: next except NameError: next = lambda x: x.next() try: unicode except NameError: unicode = str from . import buddy class ILocCodec(object): @staticmethod def encode(point): return struct.pack(b'>IIII', point[0], point[1], 0xffffffff, 0xffff0000) @staticmethod def decode(bytesData): if isinstance(bytesData, bytearray): x, y = struct.unpack_from(b'>II', bytes(bytesData[:8])) else: x, y = struct.unpack(b'>II', bytesData[:8]) return (x, y) class PlistCodec(object): @staticmethod def encode(plist): return biplist.writePlistToString(plist) @staticmethod def decode(bytes): return biplist.readPlistFromString(bytes) class BookmarkCodec(object): @staticmethod def encode(bmk): return bmk.to_bytes() @staticmethod def decode(bytes): return mac_alias.Bookmark.from_bytes(bytes) # This list tells the code how to decode particular kinds of entry in the # .DS_Store file. This is really a convenience, and we currently only # support a tiny subset of the possible entry types. codecs = { b'Iloc': ILocCodec, b'bwsp': PlistCodec, b'lsvp': PlistCodec, b'lsvP': PlistCodec, b'icvp': PlistCodec, b'pBBk': BookmarkCodec } class DSStoreEntry(object): """Holds the data from an entry in a ``.DS_Store`` file. Note that this is not meant to represent the entry itself---i.e. if you change the type or value, your changes will *not* be reflected in the underlying file. If you want to make a change, you should either use the :class:`DSStore` object's :meth:`DSStore.insert` method (which will replace a key if it already exists), or the mapping access mode for :class:`DSStore` (often simpler anyway). """ def __init__(self, filename, code, typecode, value=None): if str != bytes and type(filename) == bytes: filename = filename.decode('utf-8') if not isinstance(code, bytes): code = code.encode('latin_1') self.filename = filename self.code = code self.type = typecode self.value = value @classmethod def read(cls, block): """Read a ``.DS_Store`` entry from the containing Block""" # First read the filename nlen = block.read(b'>I')[0] filename = block.read(2 * nlen).decode('utf-16be') # Next, read the code and type code, typecode = block.read(b'>4s4s') # Finally, read the data if typecode == b'bool': value = block.read(b'>?')[0] elif typecode == b'long' or typecode == b'shor': value = block.read(b'>I')[0] elif typecode == b'blob': vlen = block.read(b'>I')[0] value = block.read(vlen) codec = codecs.get(code, None) if codec: value = codec.decode(value) typecode = codec elif typecode == b'ustr': vlen = block.read(b'>I')[0] value = block.read(2 * vlen).decode('utf-16be') elif typecode == b'type': value = block.read(b'>4s')[0] elif typecode == b'comp' or typecode == b'dutc': value = block.read(b'>Q')[0] else: raise ValueError('Unknown type code "%s"' % typecode) return DSStoreEntry(filename, code, typecode, value) def __lt__(self, other): if not isinstance(other, DSStoreEntry): raise TypeError('Can only compare against other DSStoreEntry objects') sfl = self.filename.lower() ofl = other.filename.lower() return (sfl < ofl or (self.filename == other.filename and self.code < other.code)) def __le__(self, other): if not isinstance(other, DSStoreEntry): raise TypeError('Can only compare against other DSStoreEntry objects') sfl = self.filename.lower() ofl = other.filename.lower() return (sfl < ofl or (sfl == ofl and self.code <= other.code)) def __eq__(self, other): if not isinstance(other, DSStoreEntry): raise TypeError('Can only compare against other DSStoreEntry objects') sfl = self.filename.lower() ofl = other.filename.lower() return (sfl == ofl and self.code == other.code) def __ne__(self, other): if not isinstance(other, DSStoreEntry): raise TypeError('Can only compare against other DSStoreEntry objects') sfl = self.filename.lower() ofl = other.filename.lower() return (sfl != ofl or self.code != other.code) def __gt__(self, other): if not isinstance(other, DSStoreEntry): raise TypeError('Can only compare against other DSStoreEntry objects') sfl = self.filename.lower() ofl = other.filename.lower() selfCode = self.code if str != bytes and type(selfCode) is bytes: selfCode = selfCode.decode('utf-8') otherCode = other.code if str != bytes and type(otherCode) is bytes: otherCode = otherCode.decode('utf-8') return (sfl > ofl or (sfl == ofl and selfCode > otherCode)) def __ge__(self, other): if not isinstance(other, DSStoreEntry): raise TypeError('Can only compare against other DSStoreEntry objects') sfl = self.filename.lower() ofl = other.filename.lower() return (sfl > ofl or (sfl == ofl and self.code >= other.code)) def __cmp__(self, other): if not isinstance(other, DSStoreEntry): raise TypeError('Can only compare against other DSStoreEntry objects') r = cmp(self.filename.lower(), other.filename.lower()) if r: return r return cmp(self.code, other.code) def byte_length(self): """Compute the length of this entry, in bytes""" utf16 = self.filename.encode('utf-16be') l = 4 + len(utf16) + 8 if isinstance(self.type, unicode): entry_type = self.type.encode('latin_1') value = self.value elif isinstance(self.type, (bytes, str)): entry_type = self.type value = self.value else: entry_type = b'blob' value = self.type.encode(self.value) if entry_type == b'bool': l += 1 elif entry_type == b'long' or entry_type == b'shor': l += 4 elif entry_type == b'blob': l += 4 + len(value) elif entry_type == b'ustr': utf16 = value.encode('utf-16be') l += 4 + len(utf16) elif entry_type == b'type': l += 4 elif entry_type == b'comp' or entry_type == b'dutc': l += 8 else: raise ValueError('Unknown type code "%s"' % entry_type) return l def write(self, block, insert=False): """Write this entry to the specified Block""" if insert: w = block.insert else: w = block.write if isinstance(self.type, unicode): entry_type = self.type.encode('latin_1') value = self.value elif isinstance(self.type, (bytes, str)): entry_type = self.type value = self.value else: entry_type = b'blob' value = self.type.encode(self.value) utf16 = self.filename.encode('utf-16be') w(b'>I', len(utf16) // 2) w(utf16) w(b'>4s4s', self.code, entry_type) if entry_type == b'bool': w(b'>?', value) elif entry_type == b'long' or entry_type == b'shor': w(b'>I', value) elif entry_type == b'blob': w(b'>I', len(value)) w(value) elif entry_type == b'ustr': utf16 = value.encode('utf-16be') w(b'>I', len(utf16) // 2) w(utf16) elif entry_type == b'type': if isinstance(value, unicode): value = value.encode('latin_1') w(b'>4s', value) elif entry_type == b'comp' or entry_type == b'dutc': w(b'>Q', value) else: raise ValueError('Unknown type code "%s"' % entry_type) def __repr__(self): return '<%s %s>' % (self.filename, self.code) class DSStore(object): """Python interface to a ``.DS_Store`` file. Works by manipulating the file on the disk---so this code will work with ``.DS_Store`` files for *very* large directories. A :class:`DSStore` object can be used as if it was a mapping, e.g.:: d['foobar.dat']['Iloc'] will fetch the "Iloc" record for "foobar.dat", or raise :class:`KeyError` if there is no such record. If used in this manner, the :class:`DSStore` object will return (type, value) tuples, unless the type is "blob" and the module knows how to decode it. Currently, we know how to decode "Iloc", "bwsp", "lsvp", "lsvP" and "icvp" blobs. "Iloc" decodes to an (x, y) tuple, while the others are all decoded using ``biplist``. Assignment also works, e.g.:: d['foobar.dat']['note'] = ('ustr', u'Hello World!') as does deletion with ``del``:: del d['foobar.dat']['note'] This is usually going to be the most convenient interface, though occasionally (for instance when creating a new ``.DS_Store`` file) you may wish to drop down to using :class:`DSStoreEntry` objects directly.""" def __init__(self, store): self._store = store self._superblk = self._store['DSDB'] with self._get_block(self._superblk) as s: self._rootnode, self._levels, self._records, \ self._nodes, self._page_size = s.read(b'>IIIII') self._min_usage = 2 * self._page_size // 3 self._dirty = False @classmethod def open(cls, file_or_name, mode='r+', initial_entries=None): """Open a ``.DS_Store`` file; pass either a Python file object, or a filename in the ``file_or_name`` argument and a file access mode in the ``mode`` argument. If you are creating a new file using the "w" or "w+" modes, you may also specify a list of entries with which to initialise the file.""" store = buddy.Allocator.open(file_or_name, mode) if mode == 'w' or mode == 'w+': superblk = store.allocate(20) store['DSDB'] = superblk page_size = 4096 if not initial_entries: root = store.allocate(page_size) with store.get_block(root) as rootblk: rootblk.zero_fill() with store.get_block(superblk) as s: s.write(b'>IIIII', root, 0, 0, 1, page_size) else: # Make sure they're in sorted order initial_entries = list(initial_entries) initial_entries.sort() # Construct the tree current_level = initial_entries next_level = [] levels = [] ptr_size = 0 node_count = 0 while True: total = 8 nodes = [] node = [] for e in current_level: new_total = total + ptr_size + e.byte_length() if new_total > page_size: nodes.append(node) next_level.append(e) total = 8 node = [] else: total = new_total node.append(e) if node: nodes.append(node) node_count += len(nodes) levels.append(nodes) if len(nodes) == 1: break current_level = next_level next_level = [] ptr_size = 4 # Allocate nodes ptrs = [store.allocate(page_size) for n in range(node_count)] # Generate nodes pointers = [] prev_pointers = None for level in levels: ppndx = 0 lptrs = ptrs[-len(level):] del ptrs[-len(level):] for node in level: ndx = lptrs.pop(0) if prev_pointers is None: with store.get_block(ndx) as block: block.write(b'>II', 0, len(node)) for e in node: e.write(block) else: next_node = prev_pointers[ppndx + len(node)] node_ptrs = prev_pointers[ppndx:ppndx+len(node)] with store.get_block(ndx) as block: block.write(b'>II', next_node, len(node)) for ptr, e in zip(node_ptrs, node): block.write(b'>I', ptr) e.write(block) pointers.append(ndx) prev_pointers = pointers pointers = [] root = prev_pointers[0] with store.get_block(superblk) as s: s.write(b'>IIIII', root, len(levels), len(initial_entries), node_count, page_size) return DSStore(store) def _get_block(self, number): return self._store.get_block(number) def flush(self): """Flush any dirty data back to the file.""" if self._dirty: self._dirty = False with self._get_block(self._superblk) as s: s.write(b'>IIIII', self._rootnode, self._levels, self._records, self._nodes, self._page_size) self._store.flush() def close(self): """Flush dirty data and close the underlying file.""" self.flush() self._store.close() def __enter__(self): return self def __exit__(self, exc_type, exc_value, traceback): self.close() # Internal B-Tree nodes look like this: # # [ next | count | (ptr0 | rec0) | (ptr1 | rec1) ... (ptrN | recN) ] # Leaf nodes look like this: # # [ 0 | count | rec0 | rec1 ... recN ] # Iterate over the tree, starting at `node' def _traverse(self, node): if node is None: node = self._rootnode with self._get_block(node) as block: next_node, count = block.read(b'>II') if next_node: for n in range(count): ptr = block.read(b'>I')[0] for e in self._traverse(ptr): yield e e = DSStoreEntry.read(block) yield e for e in self._traverse(next_node): yield e else: for n in range(count): e = DSStoreEntry.read(block) yield e # Display the data in `node' def _dump_node(self, node): with self._get_block(node) as block: next_node, count = block.read(b'>II') print('next: %u\ncount: %u\n' % (next_node, count)) for n in range(count): if next_node: ptr = block.read(b'>I')[0] print('%8u ' % ptr, end=' ') else: print(' ', end=' ') e = DSStoreEntry.read(block) print(e, ' (%u)' % e.byte_length()) print('used: %u' % block.tell()) # Display the data in the super block def _dump_super(self): print('root: %u\nlevels: %u\nrecords: %u\nnodes: %u\npage-size: %u' \ % (self._rootnode, self._levels, self._records, self._nodes, self._page_size)) # Splits entries across two blocks, returning one pivot # # Tries to balance the block usage across the two as best it can def _split2(self, blocks, entries, pointers, before, internal): left_block = blocks[0] right_block = blocks[1] count = len(entries) # Find the feasible splits best_split = None best_diff = None total = before[count] if 8 + total <= self._page_size: # We can use a *single* node for this best_split = count else: # Split into two nodes for n in range(1, count - 1): left_size = 8 + before[n] right_size = 8 + total - before[n + 1] if left_size > self._page_size: break if right_size > self._page_size: continue diff = abs(left_size - right_size) if best_split is None or diff < best_diff: best_split = n best_diff = diff if best_split is None: return None # Write the nodes left_block.seek(0) if internal: next_node = pointers[best_split] else: next_node = 0 left_block.write(b'>II', next_node, best_split) for n in range(best_split): if internal: left_block.write(b'>I', pointers[n]) entries[n].write(left_block) left_block.zero_fill() if best_split == count: return [] right_block.seek(0) if internal: next_node = pointers[count] else: next_node = 0 right_block.write(b'>II', next_node, count - best_split - 1) for n in range(best_split + 1, count): if internal: right_block.write(b'>I', pointers[n]) entries[n].write(right_block) right_block.zero_fill() pivot = entries[best_split] return [pivot] def _split(self, node, entry, right_ptr=0): self._nodes += 1 self._dirty = True new_right = self._store.allocate(self._page_size) with self._get_block(node) as block, \ self._get_block(new_right) as right_block: # First, measure and extract all the elements entry_size = entry.byte_length() entry_pos = None next_node, count = block.read(b'>II') if next_node: entry_size += 4 pointers = [] entries = [] before = [] total = 0 for n in range(count): pos = block.tell() if next_node: ptr = block.read(b'>I')[0] pointers.append(ptr) e = DSStoreEntry.read(block) if e > entry: entry_pos = n entries.append(entry) pointers.append(right_ptr) before.append(total) total += entry_size entries.append(e) before.append(total) total += block.tell() - pos before.append(total) if next_node: pointers.append(next_node) pivot = self._split2([block, right_block], entries, pointers, before, bool(next_node))[0] self._records += 1 self._nodes += 1 self._dirty = True return (pivot, new_right) # Allocate a new root node containing the element `pivot' and the pointers # `left' and `right' def _new_root(self, left, pivot, right): new_root = self._store.allocate(self._page_size) with self._get_block(new_root) as block: block.write(b'>III', right, 1, left) pivot.write(block) self._rootnode = new_root self._levels += 1 self._nodes += 1 self._dirty = True # Insert an entry into an inner node; `path' is the path from the root # to `node', not including `node' itself. `right_ptr' is the new node # pointer (inserted to the RIGHT of `entry') def _insert_inner(self, path, node, entry, right_ptr): with self._get_block(node) as block: next_node, count = block.read(b'>II') insert_pos = None insert_ndx = None n = 0 while n < count: pos = block.tell() ptr = block.read(b'>I')[0] e = DSStoreEntry.read(block) if e == entry: if n == count - 1: right_ptr = next_node next_node = ptr block_seek(pos) else: right_ptr = block.read(b'>I')[0] block.seek(pos + 4) insert_pos = pos insert_ndx = n block.delete(e.byte_length() + 4) count -= 1 self._records += 1 self._dirty = True continue elif insert_pos is None and e > entry: insert_pos = pos insert_ndx = n n += 1 if insert_pos is None: insert_pos = block.tell() insert_ndx = count remaining = self._page_size - block.tell() if remaining < entry.byte_length() + 4: pivot, new_right = self._split(node, entry, right_ptr) if path: self._insert_inner(path[:-1], path[-1], pivot, new_right) else: self._new_root(node, pivot, new_right) else: if insert_ndx == count: block.seek(insert_pos) block.write(b'>I', next_node) entry.write(block) next_node = right_ptr else: block.seek(insert_pos + 4) entry.write(block, True) block.insert('>I', right_ptr) block.seek(0) count += 1 block.write(b'>II', next_node, count) self._records += 1 self._dirty = True # Insert `entry' into the leaf node `node' def _insert_leaf(self, path, node, entry): with self._get_block(node) as block: next_node, count = block.read(b'>II') insert_pos = None insert_ndx = None n = 0 while n < count: pos = block.tell() e = DSStoreEntry.read(block) if e == entry: insert_pos = pos insert_ndx = n block.seek(pos) block.delete(e.byte_length()) count -= 1 self._records += 1 self._dirty = True continue elif insert_pos is None and e > entry: insert_pos = pos insert_ndx = n n += 1 if insert_pos is None: insert_pos = block.tell() insert_ndx = count remaining = self._page_size - block.tell() if remaining < entry.byte_length(): pivot, new_right = self._split(node, entry) if path: self._insert_inner(path[:-1], path[-1], pivot, new_right) else: self._new_root(node, pivot, new_right) else: block.seek(insert_pos) entry.write(block, True) block.seek(0) count += 1 block.write(b'>II', next_node, count) self._records += 1 self._dirty = True def insert(self, entry): """Insert ``entry`` (which should be a :class:`DSStoreEntry`) into the B-Tree.""" path = [] node = self._rootnode while True: with self._get_block(node) as block: next_node, count = block.read(b'>II') if next_node: for n in range(count): ptr = block.read(b'>I')[0] e = DSStoreEntry.read(block) if entry < e: next_node = ptr break elif entry == e: # If we find an existing entry the same, replace it self._insert_inner(path, node, entry, None) return path.append(node) node = next_node else: self._insert_leaf(path, node, entry) return # Return usage information for the specified `node' def _block_usage(self, node): with self._get_block(node) as block: next_node, count = block.read(b'>II') for n in range(count): if next_node: ptr = block.read(b'>I')[0] e = DSStoreEntry.read(block) used = block.tell() return (count, used) # Splits entries across three blocks, returning two pivots def _split3(self, blocks, entries, pointers, before, internal): count = len(entries) # Find the feasible splits best_split = None best_diff = None total = before[count] for n in range(1, count - 3): left_size = 8 + before[n] remaining = 16 + total - before[n + 1] if left_size > self._page_size: break if remaining > 2 * self._page_size: continue for m in range(n + 2, count - 1): mid_size = 8 + before[m] - before[n + 1] right_size = 8 + total - before[m + 1] if mid_size > self._page_size: break if right_size > self._page_size: continue diff = abs(left_size - mid_size) * abs(right_size - mid_size) if best_split is None or diff < best_diff: best_split = (n, m, count) best_diff = diff if best_split is None: return None # Write the nodes prev_split = -1 for block, split in zip(blocks, best_split): block.seek(0) if internal: next_node = pointers[split] else: next_node = 0 block.write(b'>II', next_node, split) for n in range(prev_split + 1, split): if internal: block.write(b'>I', pointers[n]) entries[n].write(block) block.zero_fill() prev_split = split return (entries[best_split[0]], entries[best_split[1]]) # Extract all of the entries from the specified list of `blocks', # separating them by the specified `pivots'. Also computes the # amount of space used before each entry. def _extract(self, blocks, pivots): pointers = [] entries = [] before = [] total = 0 ppivots = pivots + [None] for b,p in zip(blocks, ppivots): b.seek(0) next_node, count = b.read(b'>II') for n in range(count): pos = b.tell() if next_node: ptr = b.read(b'>I')[0] pointers.append(ptr) e = DSStoreEntry.read(b) entries.append(e) before.append(total) total += b.tell() - pos if next_node: pointers.append(next_node) if p: entries.append(p) before.append(total) total += p.byte_length() if next_node: total += 4 before.append(total) return (entries, pointers, before) # Rebalance the specified `node', whose path from the root is `path'. def _rebalance(self, path, node): # Can't rebalance the root if not path: return with self._get_block(node) as block: next_node, count = block.read(b'>II') with self._get_block(path[-1]) as parent: # Find the left and right siblings and respective pivots parent_next, parent_count = parent.read(b'>II') left_pos = None left_node = None left_pivot = None node_pos = None right_pos = None right_node = None right_pivot = None prev_e = prev_ptr = prev_pos = None for n in range(parent_count): pos = parent.tell() ptr = parent.read(b'>I')[0] e = DSStoreEntry.read(parent) if ptr == node: node_pos = pos right_pivot = e left_pos = prev_pos left_pivot = prev_e left_node = prev_ptr elif prev_ptr == node: right_node = ptr right_pos = pos break prev_e = e prev_ptr = ptr prev_pos = pos if parent_next == node: node_pos = parent.tell() left_pos = prev_pos left_pivot = prev_e left_node = prev_ptr elif right_node is None: right_node = parent_next right_pos = parent.tell() parent_used = parent.tell() if left_node and right_node: with self._get_block(left_node) as left, \ self._get_block(right_node) as right: blocks = [left, block, right] pivots = [left_pivot, right_pivot] entries, pointers, before = self._extract(blocks, pivots) # If there's a chance that we could use two pages instead # of three, go for it pivots = self._split2(blocks, entries, pointers, before, bool(next_node)) if pivots is None: ptrs = [left_node, node, right_node] pivots = self._split3(blocks, entries, pointers, before, bool(next_node)) else: if pivots: ptrs = [left_node, node] else: ptrs = [left_node] self._store.release(node) self._nodes -= 1 node = left_node self._store.release(right_node) self._nodes -= 1 self._dirty = True # Remove the pivots from the parent with self._get_block(path[-1]) as parent: if right_node == parent_next: parent.seek(left_pos) parent.delete(right_pos - left_pos) parent_next = left_node else: parent.seek(left_pos + 4) parent.delete(right_pos - left_pos) parent.seek(0) parent_count -= 2 parent.write(b'>II', parent_next, parent_count) self._records -= 2 # Replace with those in pivots for e,rp in zip(pivots, ptrs[1:]): self._insert_inner(path[:-1], path[-1], e, rp) elif left_node: with self._get_block(left_node) as left: blocks = [left, block] pivots = [left_pivot] entries, pointers, before = self._extract(blocks, pivots) pivots = self._split2(blocks, entries, pointers, before, bool(next_node)) # Remove the pivot from the parent with self._get_block(path[-1]) as parent: if node == parent_next: parent.seek(left_pos) parent.delete(node_pos - left_pos) parent_next = left_node else: parent.seek(left_pos + 4) parent.delete(node_pos - left_pos) parent.seek(0) parent_count -= 1 parent.write(b'>II', parent_next, parent_count) self._records -= 1 # Replace the pivot if pivots: self._insert_inner(path[:-1], path[-1], pivots[0], node) elif right_node: with self._get_block(right_node) as right: blocks = [block, right] pivots = [right_pivot] entries, pointers, before = self._extract(blocks, pivots) pivots = self._split2(blocks, entries, pointers, before, bool(next_node)) # Remove the pivot from the parent with self._get_block(path[-1]) as parent: if right_node == parent_next: parent.seek(pos) parent.delete(right_pos - node_pos) parent_next = node else: parent.seek(pos + 4) parent.delete(right_pos - node_pos) parent.seek(0) parent_count -= 1 parent.write(b'>II', parent_next, parent_count) self._records -= 1 # Replace the pivot if pivots: self._insert_inner(path[:-1], path[-1], pivots[0], right_node) if not path and not parent_count: self._store.release(path[-1]) self._nodes -= 1 self._dirty = True self._rootnode = node else: count, used = self._block_usage(path[-1]) if used < self._page_size // 2: self._rebalance(path[:-1], path[-1]) # Delete from the leaf node `node'. `filename_lc' has already been # lower-cased. def _delete_leaf(self, node, filename_lc, code): found = False with self._get_block(node) as block: next_node, count = block.read(b'>II') for n in range(count): pos = block.tell() e = DSStoreEntry.read(block) if e.filename.lower() == filename_lc \ and (code is None or e.code == code): block.seek(pos) block.delete(e.byte_length()) found = True # This does not affect the loop; THIS IS NOT A BUG count -= 1 self._records -= 1 self._dirty = True if found: used = block.tell() block.seek(0) block.write(b'>II', next_node, count) return used < self._page_size // 2 else: return False # Remove the largest entry from the subtree starting at `node' (with # path from root `path'). Returns a tuple (rebalance, entry) where # rebalance is either None if no rebalancing is required, or a # (path, node) tuple giving the details of the node to rebalance. def _take_largest(self, path, node): path = list(path) rebalance = None while True: with self._get_block(node) as block: next_node, count = block.read(b'>II') if next_node: path.append(node) node = next_node continue for n in range(count): pos = block.tell() e = DSStoreEntry.read(block) count -= 1 block.seek(0) block.write(b'>II', next_node, count) if pos < self._page_size // 2: rebalance = (path, node) break return rebalance, e # Delete an entry from an inner node, `node' def _delete_inner(self, path, node, filename_lc, code): rebalance = False with self._get_block(node) as block: next_node, count = block.read(b'>II') for n in range(count): pos = block.tell() ptr = block.read(b'>I')[0] e = DSStoreEntry.read(block) if e.filename.lower() == filename_lc \ and (code is None or e.code == code): # Take the largest from the left subtree rebalance, largest = self._take_largest(path, ptr) # Delete this entry if n == count - 1: right_ptr = next_node next_node = ptr block.seek(pos) else: right_ptr = block.read(b'>I')[0] block.seek(pos + 4) block.delete(e.byte_length() + 4) count -= 1 block.seek(0) block.write(b'>II', next_node, count) self._records -= 1 self._dirty = True break # Replace the pivot value self._insert_inner(path, node, largest, right_ptr) # Rebalance from the node we stole from if rebalance: self._rebalance(rebalance[0], rebalance[1]) return True return False def delete(self, filename, code): """Delete an item, identified by ``filename`` and ``code`` from the B-Tree.""" if isinstance(filename, DSStoreEntry): code = filename.code filename = filename.filename # If we're deleting *every* node for "filename", we must recurse if code is None: ###TODO: Fix this so we can do bulk deletes raise ValueError('You must delete items individually. Sorry') # Otherwise, we're deleting *one* specific node filename_lc = filename.lower() path = [] node = self._rootnode while True: with self._get_block(node) as block: next_node, count = block.read(b'>II') if next_node: for n in range(count): ptr = block.read(b'>I')[0] e = DSStoreEntry.read(block) e_lc = e.filename.lower() if filename_lc < e_lc \ or (filename_lc == e_lc and code < e.code): next_node = ptr break elif filename_lc == e_lc and code == e.code: self._delete_inner(path, node, filename_lc, code) return path.append(node) node = next_node else: if self._delete_leaf(node, filename_lc, code): self._rebalance(path, node) return # Find implementation def _find(self, node, filename_lc, code=None): if not isinstance(code, bytes): code = code.encode('latin_1') with self._get_block(node) as block: next_node, count = block.read(b'>II') if next_node: for n in range(count): ptr = block.read(b'>I')[0] e = DSStoreEntry.read(block) if filename_lc < e.filename.lower(): for e in self._find(ptr, filename_lc, code): yield e return elif filename_lc == e.filename.lower(): if code is None or (code and code < e.code): for e in self._find(ptr, filename_lc, code): yield e if code is None or code == e.code: yield e elif code < e.code: return for e in self._find(next_node, filename_lc, code): yield e else: for n in range(count): e = DSStoreEntry.read(block) if filename_lc == e.filename.lower(): if code is None or code == e.code: yield e elif code < e.code: return def find(self, filename, code=None): """Returns a generator that will iterate over matching entries in the B-Tree.""" if isinstance(filename, DSStoreEntry): code = filename.code filename = filename.filename filename_lc = filename.lower() return self._find(self._rootnode, filename_lc, code) def __len__(self): return self._records def __iter__(self): return self._traverse(self._rootnode) class Partial(object): """This is used to implement indexing.""" def __init__(self, store, filename): self._store = store self._filename = filename def __getitem__(self, code): if code is None: raise KeyError('no such key - [%s][None]' % self._filename) if not isinstance(code, bytes): code = code.encode('latin_1') try: item = next(self._store.find(self._filename, code)) except StopIteration: raise KeyError('no such key - [%s][%s]' % (self._filename, code)) if not isinstance(item.type, (bytes, str, unicode)): return item.value return (item.type, item.value) def __setitem__(self, code, value): if code is None: raise KeyError('bad key - [%s][None]' % self._filename) if not isinstance(code, bytes): code = code.encode('latin_1') codec = codecs.get(code, None) if codec: entry_type = codec entry_value = value else: entry_type = value[0] entry_value = value[1] self._store.insert(DSStoreEntry(self._filename, code, entry_type, entry_value)) def __delitem__(self, code): if code is None: raise KeyError('no such key - [%s][None]' % self._filename) self._store.delete(self._filename, code) def __iter__(self): for item in self._store.find(self._filename): yield item def __getitem__(self, filename): return self.Partial(self, filename)
gpl-3.0
KWierso/treeherder
treeherder/etl/perf.py
1
9481
import copy import logging from hashlib import sha1 from typing import List import simplejson as json from treeherder.log_parser.utils import validate_perf_data from treeherder.model.models import OptionCollection from treeherder.perf.models import (PerformanceDatum, PerformanceFramework, PerformanceSignature) from treeherder.perf.tasks import generate_alerts logger = logging.getLogger(__name__) def _get_application_name(validated_perf_datum: dict): try: return validated_perf_datum['application']['name'] except KeyError: return None def _get_signature_hash(signature_properties): signature_prop_values = list(signature_properties.keys()) str_values = [] for value in signature_properties.values(): if not isinstance(value, str): str_values.append(json.dumps(value, sort_keys=True)) else: str_values.append(value) signature_prop_values.extend(str_values) sha = sha1() sha.update(''.join(map(str, sorted(signature_prop_values))).encode('utf-8')) return sha.hexdigest() def _order_and_concat(words: List) -> str: return ' '.join(sorted(words)) def _create_or_update_signature(repository, signature_hash, framework, defaults): signature, created = PerformanceSignature.objects.get_or_create( repository=repository, signature_hash=signature_hash, framework=framework, defaults=defaults) if not created: if signature.last_updated > defaults['last_updated']: defaults['last_updated'] = signature.last_updated signature, _ = PerformanceSignature.objects.update_or_create( repository=repository, signature_hash=signature_hash, framework=framework, defaults=defaults) return signature def _load_perf_datum(job, perf_datum): validate_perf_data(perf_datum) extra_properties = {} reference_data = { 'option_collection_hash': job.signature.option_collection_hash, 'machine_platform': job.signature.machine_platform } option_collection = OptionCollection.objects.get( option_collection_hash=job.signature.option_collection_hash) try: framework = PerformanceFramework.objects.get( name=perf_datum['framework']['name']) except PerformanceFramework.DoesNotExist: logger.warning("Performance framework %s does not exist, skipping " "load of performance artifacts", perf_datum['framework']['name']) return if not framework.enabled: logger.info("Performance framework %s is not enabled, skipping", perf_datum['framework']['name']) return for suite in perf_datum['suites']: suite_extra_properties = copy.copy(extra_properties) ordered_tags = _order_and_concat(suite.get('tags', [])) suite_extra_options = '' if suite.get('extraOptions'): suite_extra_properties = { 'test_options': sorted(suite['extraOptions']) } suite_extra_options = _order_and_concat(suite['extraOptions']) summary_signature_hash = None # if we have a summary value, create or get its signature by all its subtest # properties. if suite.get('value') is not None: # summary series summary_properties = { 'suite': suite['name'] } summary_properties.update(reference_data) summary_properties.update(suite_extra_properties) summary_signature_hash = _get_signature_hash( summary_properties) signature = _create_or_update_signature( job.repository, summary_signature_hash, framework, { 'test': '', 'suite': suite['name'], 'suite_public_name': suite.get('publicName'), 'option_collection': option_collection, 'platform': job.machine_platform, 'tags': ordered_tags, 'extra_options': suite_extra_options, 'measurement_unit': suite.get('unit'), 'application': _get_application_name(perf_datum), 'lower_is_better': suite.get('lowerIsBetter', True), 'has_subtests': True, # these properties below can be either True, False, or null # (None). Null indicates no preference has been set. 'should_alert': suite.get('shouldAlert'), 'alert_change_type': PerformanceSignature._get_alert_change_type( suite.get('alertChangeType')), 'alert_threshold': suite.get('alertThreshold'), 'min_back_window': suite.get('minBackWindow'), 'max_back_window': suite.get('maxBackWindow'), 'fore_window': suite.get('foreWindow'), 'last_updated': job.push.time }) (_, datum_created) = PerformanceDatum.objects.get_or_create( repository=job.repository, job=job, push=job.push, signature=signature, push_timestamp=job.push.time, defaults={'value': suite['value']}) if signature.should_alert is not False and datum_created and \ job.repository.performance_alerts_enabled: generate_alerts.apply_async(args=[signature.id], queue='generate_perf_alerts') for subtest in suite['subtests']: subtest_properties = { 'suite': suite['name'], 'test': subtest['name'] } subtest_properties.update(reference_data) subtest_properties.update(suite_extra_properties) summary_signature = None if summary_signature_hash is not None: subtest_properties.update({'parent_signature': summary_signature_hash}) summary_signature = PerformanceSignature.objects.get( repository=job.repository, framework=framework, signature_hash=summary_signature_hash) subtest_signature_hash = _get_signature_hash(subtest_properties) value = list(subtest['value'] for subtest in suite['subtests'] if subtest['name'] == subtest_properties['test']) signature = _create_or_update_signature( job.repository, subtest_signature_hash, framework, { 'test': subtest_properties['test'], 'suite': suite['name'], 'test_public_name': subtest.get('publicName'), 'suite_public_name': suite.get('publicName'), 'option_collection': option_collection, 'platform': job.machine_platform, 'tags': ordered_tags, 'extra_options': suite_extra_options, 'measurement_unit': subtest.get('unit'), 'application': _get_application_name(perf_datum), 'lower_is_better': subtest.get('lowerIsBetter', True), 'has_subtests': False, # these properties below can be either True, False, or # null (None). Null indicates no preference has been # set. 'should_alert': subtest.get('shouldAlert'), 'alert_change_type': PerformanceSignature._get_alert_change_type( subtest.get('alertChangeType')), 'alert_threshold': subtest.get('alertThreshold'), 'min_back_window': subtest.get('minBackWindow'), 'max_back_window': subtest.get('maxBackWindow'), 'fore_window': subtest.get('foreWindow'), 'parent_signature': summary_signature, 'last_updated': job.push.time }) (_, datum_created) = PerformanceDatum.objects.get_or_create( repository=job.repository, job=job, push=job.push, signature=signature, push_timestamp=job.push.time, defaults={'value': value[0]}) # by default if there is no summary, we should schedule a # generate alerts task for the subtest, since we have new data # (this can be over-ridden by the optional "should alert" # property) if ((signature.should_alert or (signature.should_alert is None and suite.get('value') is None)) and datum_created and job.repository.performance_alerts_enabled): generate_alerts.apply_async(args=[signature.id], queue='generate_perf_alerts') def store_performance_artifact(job, artifact): blob = json.loads(artifact['blob']) performance_data = blob['performance_data'] if isinstance(performance_data, list): for perfdatum in performance_data: _load_perf_datum(job, perfdatum) else: _load_perf_datum(job, performance_data)
mpl-2.0
dendisuhubdy/tensorflow
tensorflow/python/ops/image_grad_test.py
18
15052
# Copyright 2015 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for Python ops defined in image_grad.py.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import numpy as np from tensorflow.python.framework import constant_op from tensorflow.python.ops import gradient_checker from tensorflow.python.ops import gradients_impl from tensorflow.python.ops import image_ops from tensorflow.python.platform import test class ResizeNearestNeighborOpTest(test.TestCase): TYPES = [np.float32, np.float64] def testShapeIsCorrectAfterOp(self): in_shape = [1, 2, 2, 1] out_shape = [1, 4, 6, 1] for nptype in self.TYPES: x = np.arange(0, 4).reshape(in_shape).astype(nptype) with self.test_session(use_gpu=True) as sess: input_tensor = constant_op.constant(x, shape=in_shape) resize_out = image_ops.resize_nearest_neighbor(input_tensor, out_shape[1:3]) self.assertEqual(out_shape, list(resize_out.get_shape())) resize_out = sess.run(resize_out) self.assertEqual(out_shape, list(resize_out.shape)) def testGradFromResizeToLargerInBothDims(self): in_shape = [1, 2, 3, 1] out_shape = [1, 4, 6, 1] for nptype in self.TYPES: x = np.arange(0, 6).reshape(in_shape).astype(nptype) with self.test_session(use_gpu=True): input_tensor = constant_op.constant(x, shape=in_shape) resize_out = image_ops.resize_nearest_neighbor(input_tensor, out_shape[1:3]) err = gradient_checker.compute_gradient_error( input_tensor, in_shape, resize_out, out_shape, x_init_value=x) self.assertLess(err, 1e-3) def testGradFromResizeToSmallerInBothDims(self): in_shape = [1, 4, 6, 1] out_shape = [1, 2, 3, 1] for nptype in self.TYPES: x = np.arange(0, 24).reshape(in_shape).astype(nptype) with self.test_session(use_gpu=True): input_tensor = constant_op.constant(x, shape=in_shape) resize_out = image_ops.resize_nearest_neighbor(input_tensor, out_shape[1:3]) err = gradient_checker.compute_gradient_error( input_tensor, in_shape, resize_out, out_shape, x_init_value=x) self.assertLess(err, 1e-3) def testCompareGpuVsCpu(self): in_shape = [1, 4, 6, 3] out_shape = [1, 8, 16, 3] for nptype in self.TYPES: x = np.arange(0, np.prod(in_shape)).reshape(in_shape).astype(nptype) for align_corners in [True, False]: with self.test_session(use_gpu=False): input_tensor = constant_op.constant(x, shape=in_shape) resize_out = image_ops.resize_nearest_neighbor( input_tensor, out_shape[1:3], align_corners=align_corners) grad_cpu = gradient_checker.compute_gradient( input_tensor, in_shape, resize_out, out_shape, x_init_value=x) with self.test_session(use_gpu=True): input_tensor = constant_op.constant(x, shape=in_shape) resize_out = image_ops.resize_nearest_neighbor( input_tensor, out_shape[1:3], align_corners=align_corners) grad_gpu = gradient_checker.compute_gradient( input_tensor, in_shape, resize_out, out_shape, x_init_value=x) self.assertAllClose(grad_cpu, grad_gpu, rtol=1e-5, atol=1e-5) class ResizeBilinearOpTest(test.TestCase): def testShapeIsCorrectAfterOp(self): in_shape = [1, 2, 2, 1] out_shape = [1, 4, 6, 1] x = np.arange(0, 4).reshape(in_shape).astype(np.float32) with self.test_session() as sess: input_tensor = constant_op.constant(x, shape=in_shape) resize_out = image_ops.resize_bilinear(input_tensor, out_shape[1:3]) self.assertEqual(out_shape, list(resize_out.get_shape())) resize_out = sess.run(resize_out) self.assertEqual(out_shape, list(resize_out.shape)) def testGradFromResizeToLargerInBothDims(self): in_shape = [1, 2, 3, 1] out_shape = [1, 4, 6, 1] x = np.arange(0, 6).reshape(in_shape).astype(np.float32) with self.test_session(): input_tensor = constant_op.constant(x, shape=in_shape) resize_out = image_ops.resize_bilinear(input_tensor, out_shape[1:3]) err = gradient_checker.compute_gradient_error( input_tensor, in_shape, resize_out, out_shape, x_init_value=x) self.assertLess(err, 1e-3) def testGradFromResizeToSmallerInBothDims(self): in_shape = [1, 4, 6, 1] out_shape = [1, 2, 3, 1] x = np.arange(0, 24).reshape(in_shape).astype(np.float32) with self.test_session(): input_tensor = constant_op.constant(x, shape=in_shape) resize_out = image_ops.resize_bilinear(input_tensor, out_shape[1:3]) err = gradient_checker.compute_gradient_error( input_tensor, in_shape, resize_out, out_shape, x_init_value=x) self.assertLess(err, 1e-3) def testCompareGpuVsCpu(self): in_shape = [2, 4, 6, 3] out_shape = [2, 8, 16, 3] size = np.prod(in_shape) x = 1.0 / size * np.arange(0, size).reshape(in_shape).astype(np.float32) for align_corners in [True, False]: grad = {} for use_gpu in [False, True]: with self.test_session(use_gpu=use_gpu): input_tensor = constant_op.constant(x, shape=in_shape) resized_tensor = image_ops.resize_bilinear( input_tensor, out_shape[1:3], align_corners=align_corners) grad[use_gpu] = gradient_checker.compute_gradient( input_tensor, in_shape, resized_tensor, out_shape, x_init_value=x) self.assertAllClose(grad[False], grad[True], rtol=1e-4, atol=1e-4) def testTypes(self): in_shape = [1, 4, 6, 1] out_shape = [1, 2, 3, 1] x = np.arange(0, 24).reshape(in_shape) with self.test_session() as sess: for dtype in [np.float16, np.float32, np.float64]: input_tensor = constant_op.constant(x.astype(dtype), shape=in_shape) resize_out = image_ops.resize_bilinear(input_tensor, out_shape[1:3]) grad = sess.run(gradients_impl.gradients(resize_out, input_tensor))[0] self.assertAllEqual(in_shape, grad.shape) # Not using gradient_checker.compute_gradient as I didn't work out # the changes required to compensate for the lower precision of # float16 when computing the numeric jacobian. # Instead, we just test the theoretical jacobian. self.assertAllEqual([[[[1.], [0.], [1.], [0.], [1.], [0.]], [[0.], [ 0. ], [0.], [0.], [0.], [0.]], [[1.], [0.], [1.], [0.], [1.], [0.]], [[0.], [0.], [0.], [0.], [0.], [0.]]]], grad) class ResizeBicubicOpTest(test.TestCase): def testShapeIsCorrectAfterOp(self): in_shape = [1, 2, 2, 1] out_shape = [1, 4, 6, 1] x = np.arange(0, 4).reshape(in_shape).astype(np.float32) for align_corners in [True, False]: with self.test_session() as sess: input_tensor = constant_op.constant(x, shape=in_shape) resize_out = image_ops.resize_bicubic(input_tensor, out_shape[1:3], align_corners=align_corners) self.assertEqual(out_shape, list(resize_out.get_shape())) resize_out = sess.run(resize_out) self.assertEqual(out_shape, list(resize_out.shape)) def testGradFromResizeToLargerInBothDims(self): in_shape = [1, 2, 3, 1] out_shape = [1, 4, 6, 1] x = np.arange(0, 6).reshape(in_shape).astype(np.float32) for align_corners in [True, False]: with self.test_session(): input_tensor = constant_op.constant(x, shape=in_shape) resize_out = image_ops.resize_bicubic(input_tensor, out_shape[1:3], align_corners=align_corners) err = gradient_checker.compute_gradient_error( input_tensor, in_shape, resize_out, out_shape, x_init_value=x) self.assertLess(err, 1e-3) def testGradFromResizeToSmallerInBothDims(self): in_shape = [1, 4, 6, 1] out_shape = [1, 2, 3, 1] x = np.arange(0, 24).reshape(in_shape).astype(np.float32) for align_corners in [True, False]: with self.test_session(): input_tensor = constant_op.constant(x, shape=in_shape) resize_out = image_ops.resize_bicubic(input_tensor, out_shape[1:3], align_corners=align_corners) err = gradient_checker.compute_gradient_error( input_tensor, in_shape, resize_out, out_shape, x_init_value=x) self.assertLess(err, 1e-3) def testGradOnUnsupportedType(self): in_shape = [1, 4, 6, 1] out_shape = [1, 2, 3, 1] x = np.arange(0, 24).reshape(in_shape).astype(np.uint8) with self.test_session(): input_tensor = constant_op.constant(x, shape=in_shape) resize_out = image_ops.resize_bicubic(input_tensor, out_shape[1:3]) grad = gradients_impl.gradients(input_tensor, [resize_out]) self.assertEqual([None], grad) class CropAndResizeOpTest(test.TestCase): def testShapeIsCorrectAfterOp(self): batch = 2 image_height = 3 image_width = 4 crop_height = 4 crop_width = 5 depth = 2 num_boxes = 2 image_shape = [batch, image_height, image_width, depth] crop_size = [crop_height, crop_width] crops_shape = [num_boxes, crop_height, crop_width, depth] image = np.arange(0, batch * image_height * image_width * depth).reshape(image_shape).astype(np.float32) boxes = np.array([[0, 0, 1, 1], [.1, .2, .7, .8]], dtype=np.float32) box_ind = np.array([0, 1], dtype=np.int32) with self.test_session(use_gpu=True) as sess: crops = image_ops.crop_and_resize( constant_op.constant( image, shape=image_shape), constant_op.constant( boxes, shape=[num_boxes, 4]), constant_op.constant( box_ind, shape=[num_boxes]), constant_op.constant( crop_size, shape=[2])) self.assertEqual(crops_shape, list(crops.get_shape())) crops = sess.run(crops) self.assertEqual(crops_shape, list(crops.shape)) def _randomUniformAvoidAnchors(self, low, high, anchors, radius, num_samples): """Generate samples that are far enough from a set of anchor points. We generate uniform samples in [low, high], then reject those that are less than radius away from any point in anchors. We stop after we have accepted num_samples samples. Args: low: The lower end of the interval. high: The upper end of the interval. anchors: A list of length num_crops with anchor points to avoid. radius: Distance threshold for the samples from the anchors. num_samples: How many samples to produce. Returns: samples: A list of length num_samples with the accepted samples. """ self.assertTrue(low < high) self.assertTrue(radius >= 0) num_anchors = len(anchors) # Make sure that at least half of the interval is not forbidden. self.assertTrue(2 * radius * num_anchors < 0.5 * (high - low)) anchors = np.reshape(anchors, num_anchors) samples = [] while len(samples) < num_samples: sample = np.random.uniform(low, high) if np.all(np.fabs(sample - anchors) > radius): samples.append(sample) return samples def testGradRandomBoxes(self): """Test that the gradient is correct for randomly generated boxes. The mapping is piecewise differentiable with respect to the box coordinates. The points where the function is not differentiable are those which are mapped to image pixels, i.e., the normalized y coordinates in np.linspace(0, 1, image_height) and normalized x coordinates in np.linspace(0, 1, image_width). Make sure that the box coordinates are sufficiently far away from those rectangular grid centers that are points of discontinuity, so that the finite difference Jacobian is close to the computed one. """ np.random.seed(1) # Make it reproducible. delta = 1e-3 radius = 2 * delta low, high = -0.5, 1.5 # Also covers the case of extrapolation. image_height = 4 for image_width in range(1, 3): for crop_height in range(1, 3): for crop_width in range(2, 4): for depth in range(1, 3): for num_boxes in range(1, 3): batch = num_boxes image_shape = [batch, image_height, image_width, depth] crop_size = [crop_height, crop_width] crops_shape = [num_boxes, crop_height, crop_width, depth] boxes_shape = [num_boxes, 4] image = np.arange(0, batch * image_height * image_width * depth).reshape(image_shape).astype(np.float32) boxes = [] for _ in range(num_boxes): # pylint: disable=unbalanced-tuple-unpacking y1, y2 = self._randomUniformAvoidAnchors( low, high, np.linspace(0, 1, image_height), radius, 2) x1, x2 = self._randomUniformAvoidAnchors( low, high, np.linspace(0, 1, image_width), radius, 2) # pylint: enable=unbalanced-tuple-unpacking boxes.append([y1, x1, y2, x2]) boxes = np.array(boxes, dtype=np.float32) box_ind = np.arange(batch, dtype=np.int32) with self.test_session(use_gpu=True): image_tensor = constant_op.constant(image, shape=image_shape) boxes_tensor = constant_op.constant(boxes, shape=[num_boxes, 4]) box_ind_tensor = constant_op.constant( box_ind, shape=[num_boxes]) crops = image_ops.crop_and_resize( image_tensor, boxes_tensor, box_ind_tensor, constant_op.constant( crop_size, shape=[2])) err = gradient_checker.compute_gradient_error( [image_tensor, boxes_tensor], [image_shape, boxes_shape], crops, crops_shape, delta=delta, x_init_value=[image, boxes]) self.assertLess(err, 2e-3) if __name__ == "__main__": test.main()
apache-2.0
meghana1995/sympy
sympy/ntheory/generate.py
58
17478
""" Generating and counting primes. """ from __future__ import print_function, division import random from bisect import bisect # Using arrays for sieving instead of lists greatly reduces # memory consumption from array import array as _array from .primetest import isprime from sympy.core.compatibility import as_int, range def _arange(a, b): ar = _array('l', [0]*(b - a)) for i, e in enumerate(range(a, b)): ar[i] = e return ar class Sieve: """An infinite list of prime numbers, implemented as a dynamically growing sieve of Eratosthenes. When a lookup is requested involving an odd number that has not been sieved, the sieve is automatically extended up to that number. >>> from sympy import sieve >>> from array import array # this line and next for doctest only >>> sieve._list = array('l', [2, 3, 5, 7, 11, 13]) >>> 25 in sieve False >>> sieve._list array('l', [2, 3, 5, 7, 11, 13, 17, 19, 23]) """ # data shared (and updated) by all Sieve instances _list = _array('l', [2, 3, 5, 7, 11, 13]) def __repr__(self): return "<Sieve with %i primes sieved: 2, 3, 5, ... %i, %i>" % \ (len(self._list), self._list[-2], self._list[-1]) def extend(self, n): """Grow the sieve to cover all primes <= n (a real number). Examples ======== >>> from sympy import sieve >>> from array import array # this line and next for doctest only >>> sieve._list = array('l', [2, 3, 5, 7, 11, 13]) >>> sieve.extend(30) >>> sieve[10] == 29 True """ n = int(n) if n <= self._list[-1]: return # We need to sieve against all bases up to sqrt(n). # This is a recursive call that will do nothing if there are enough # known bases already. maxbase = int(n**0.5) + 1 self.extend(maxbase) # Create a new sieve starting from sqrt(n) begin = self._list[-1] + 1 newsieve = _arange(begin, n + 1) # Now eliminate all multiples of primes in [2, sqrt(n)] for p in self.primerange(2, maxbase): # Start counting at a multiple of p, offsetting # the index to account for the new sieve's base index startindex = (-begin) % p for i in range(startindex, len(newsieve), p): newsieve[i] = 0 # Merge the sieves self._list += _array('l', [x for x in newsieve if x]) def extend_to_no(self, i): """Extend to include the ith prime number. i must be an integer. The list is extended by 50% if it is too short, so it is likely that it will be longer than requested. Examples ======== >>> from sympy import sieve >>> from array import array # this line and next for doctest only >>> sieve._list = array('l', [2, 3, 5, 7, 11, 13]) >>> sieve.extend_to_no(9) >>> sieve._list array('l', [2, 3, 5, 7, 11, 13, 17, 19, 23]) """ i = as_int(i) while len(self._list) < i: self.extend(int(self._list[-1] * 1.5)) def primerange(self, a, b): """Generate all prime numbers in the range [a, b). Examples ======== >>> from sympy import sieve >>> print([i for i in sieve.primerange(7, 18)]) [7, 11, 13, 17] """ from sympy.functions.elementary.integers import ceiling # wrapping ceiling in int will raise an error if there was a problem # determining whether the expression was exactly an integer or not a = max(2, int(ceiling(a))) b = int(ceiling(b)) if a >= b: return self.extend(b) i = self.search(a)[1] maxi = len(self._list) + 1 while i < maxi: p = self._list[i - 1] if p < b: yield p i += 1 else: return def search(self, n): """Return the indices i, j of the primes that bound n. If n is prime then i == j. Although n can be an expression, if ceiling cannot convert it to an integer then an n error will be raised. Examples ======== >>> from sympy import sieve >>> sieve.search(25) (9, 10) >>> sieve.search(23) (9, 9) """ from sympy.functions.elementary.integers import ceiling # wrapping ceiling in int will raise an error if there was a problem # determining whether the expression was exactly an integer or not test = int(ceiling(n)) n = int(n) if n < 2: raise ValueError("n should be >= 2 but got: %s" % n) if n > self._list[-1]: self.extend(n) b = bisect(self._list, n) if self._list[b - 1] == test: return b, b else: return b, b + 1 def __contains__(self, n): try: n = as_int(n) assert n >= 2 except (ValueError, AssertionError): return False if n % 2 == 0: return n == 2 a, b = self.search(n) return a == b def __getitem__(self, n): """Return the nth prime number""" if isinstance(n, slice): self.extend_to_no(n.stop) return self._list[n.start - 1:n.stop - 1:n.step] else: n = as_int(n) self.extend_to_no(n) return self._list[n - 1] # Generate a global object for repeated use in trial division etc sieve = Sieve() def prime(nth): """ Return the nth prime, with the primes indexed as prime(1) = 2, prime(2) = 3, etc.... The nth prime is approximately n*log(n) and can never be larger than 2**n. References ========== - http://primes.utm.edu/glossary/xpage/BertrandsPostulate.html Examples ======== >>> from sympy import prime >>> prime(10) 29 >>> prime(1) 2 See Also ======== sympy.ntheory.primetest.isprime : Test if n is prime primerange : Generate all primes in a given range primepi : Return the number of primes less than or equal to n """ n = as_int(nth) if n < 1: raise ValueError("nth must be a positive integer; prime(1) == 2") return sieve[n] def primepi(n): """ Return the value of the prime counting function pi(n) = the number of prime numbers less than or equal to n. Examples ======== >>> from sympy import primepi >>> primepi(25) 9 See Also ======== sympy.ntheory.primetest.isprime : Test if n is prime primerange : Generate all primes in a given range prime : Return the nth prime """ n = int(n) if n < 2: return 0 else: return sieve.search(n)[0] def nextprime(n, ith=1): """ Return the ith prime greater than n. i must be an integer. Notes ===== Potential primes are located at 6*j +/- 1. This property is used during searching. >>> from sympy import nextprime >>> [(i, nextprime(i)) for i in range(10, 15)] [(10, 11), (11, 13), (12, 13), (13, 17), (14, 17)] >>> nextprime(2, ith=2) # the 2nd prime after 2 5 See Also ======== prevprime : Return the largest prime smaller than n primerange : Generate all primes in a given range """ n = int(n) i = as_int(ith) if i > 1: pr = n j = 1 while 1: pr = nextprime(pr) j += 1 if j > i: break return pr if n < 2: return 2 if n < 7: return {2: 3, 3: 5, 4: 5, 5: 7, 6: 7}[n] nn = 6*(n//6) if nn == n: n += 1 if isprime(n): return n n += 4 elif n - nn == 5: n += 2 if isprime(n): return n n += 4 else: n = nn + 5 while 1: if isprime(n): return n n += 2 if isprime(n): return n n += 4 def prevprime(n): """ Return the largest prime smaller than n. Notes ===== Potential primes are located at 6*j +/- 1. This property is used during searching. >>> from sympy import prevprime >>> [(i, prevprime(i)) for i in range(10, 15)] [(10, 7), (11, 7), (12, 11), (13, 11), (14, 13)] See Also ======== nextprime : Return the ith prime greater than n primerange : Generates all primes in a given range """ from sympy.functions.elementary.integers import ceiling # wrapping ceiling in int will raise an error if there was a problem # determining whether the expression was exactly an integer or not n = int(ceiling(n)) if n < 3: raise ValueError("no preceding primes") if n < 8: return {3: 2, 4: 3, 5: 3, 6: 5, 7: 5}[n] nn = 6*(n//6) if n - nn <= 1: n = nn - 1 if isprime(n): return n n -= 4 else: n = nn + 1 while 1: if isprime(n): return n n -= 2 if isprime(n): return n n -= 4 def primerange(a, b): """ Generate a list of all prime numbers in the range [a, b). If the range exists in the default sieve, the values will be returned from there; otherwise values will be returned but will not modify the sieve. Notes ===== Some famous conjectures about the occurence of primes in a given range are [1]: - Twin primes: though often not, the following will give 2 primes an infinite number of times: primerange(6*n - 1, 6*n + 2) - Legendre's: the following always yields at least one prime primerange(n**2, (n+1)**2+1) - Bertrand's (proven): there is always a prime in the range primerange(n, 2*n) - Brocard's: there are at least four primes in the range primerange(prime(n)**2, prime(n+1)**2) The average gap between primes is log(n) [2]; the gap between primes can be arbitrarily large since sequences of composite numbers are arbitrarily large, e.g. the numbers in the sequence n! + 2, n! + 3 ... n! + n are all composite. References ========== 1. http://en.wikipedia.org/wiki/Prime_number 2. http://primes.utm.edu/notes/gaps.html Examples ======== >>> from sympy import primerange, sieve >>> print([i for i in primerange(1, 30)]) [2, 3, 5, 7, 11, 13, 17, 19, 23, 29] The Sieve method, primerange, is generally faster but it will occupy more memory as the sieve stores values. The default instance of Sieve, named sieve, can be used: >>> list(sieve.primerange(1, 30)) [2, 3, 5, 7, 11, 13, 17, 19, 23, 29] See Also ======== nextprime : Return the ith prime greater than n prevprime : Return the largest prime smaller than n randprime : Returns a random prime in a given range primorial : Returns the product of primes based on condition Sieve.primerange : return range from already computed primes or extend the sieve to contain the requested range. """ from sympy.functions.elementary.integers import ceiling # if we already have the range, return it if b <= sieve._list[-1]: for i in sieve.primerange(a, b): yield i return # otherwise compute, without storing, the desired range if a >= b: return # wrapping ceiling in int will raise an error if there was a problem # determining whether the expression was exactly an integer or not a = int(ceiling(a)) - 1 b = int(ceiling(b)) while 1: a = nextprime(a) if a < b: yield a else: return def randprime(a, b): """ Return a random prime number in the range [a, b). Bertrand's postulate assures that randprime(a, 2*a) will always succeed for a > 1. References ========== - http://en.wikipedia.org/wiki/Bertrand's_postulate Examples ======== >>> from sympy import randprime, isprime >>> randprime(1, 30) #doctest: +SKIP 13 >>> isprime(randprime(1, 30)) True See Also ======== primerange : Generate all primes in a given range """ if a >= b: return a, b = map(int, (a, b)) n = random.randint(a - 1, b) p = nextprime(n) if p >= b: p = prevprime(b) if p < a: raise ValueError("no primes exist in the specified range") return p def primorial(n, nth=True): """ Returns the product of the first n primes (default) or the primes less than or equal to n (when ``nth=False``). >>> from sympy.ntheory.generate import primorial, randprime, primerange >>> from sympy import factorint, Mul, primefactors, sqrt >>> primorial(4) # the first 4 primes are 2, 3, 5, 7 210 >>> primorial(4, nth=False) # primes <= 4 are 2 and 3 6 >>> primorial(1) 2 >>> primorial(1, nth=False) 1 >>> primorial(sqrt(101), nth=False) 210 One can argue that the primes are infinite since if you take a set of primes and multiply them together (e.g. the primorial) and then add or subtract 1, the result cannot be divided by any of the original factors, hence either 1 or more new primes must divide this product of primes. In this case, the number itself is a new prime: >>> factorint(primorial(4) + 1) {211: 1} In this case two new primes are the factors: >>> factorint(primorial(4) - 1) {11: 1, 19: 1} Here, some primes smaller and larger than the primes multiplied together are obtained: >>> p = list(primerange(10, 20)) >>> sorted(set(primefactors(Mul(*p) + 1)).difference(set(p))) [2, 5, 31, 149] See Also ======== primerange : Generate all primes in a given range """ if nth: n = as_int(n) else: n = int(n) if n < 1: raise ValueError("primorial argument must be >= 1") p = 1 if nth: for i in range(1, n + 1): p *= prime(i) else: for i in primerange(2, n + 1): p *= i return p def cycle_length(f, x0, nmax=None, values=False): """For a given iterated sequence, return a generator that gives the length of the iterated cycle (lambda) and the length of terms before the cycle begins (mu); if ``values`` is True then the terms of the sequence will be returned instead. The sequence is started with value ``x0``. Note: more than the first lambda + mu terms may be returned and this is the cost of cycle detection with Brent's method; there are, however, generally less terms calculated than would have been calculated if the proper ending point were determined, e.g. by using Floyd's method. >>> from sympy.ntheory.generate import cycle_length This will yield successive values of i <-- func(i): >>> def iter(func, i): ... while 1: ... ii = func(i) ... yield ii ... i = ii ... A function is defined: >>> func = lambda i: (i**2 + 1) % 51 and given a seed of 4 and the mu and lambda terms calculated: >>> next(cycle_length(func, 4)) (6, 2) We can see what is meant by looking at the output: >>> n = cycle_length(func, 4, values=True) >>> list(ni for ni in n) [17, 35, 2, 5, 26, 14, 44, 50, 2, 5, 26, 14] There are 6 repeating values after the first 2. If a sequence is suspected of being longer than you might wish, ``nmax`` can be used to exit early (and mu will be returned as None): >>> next(cycle_length(func, 4, nmax = 4)) (4, None) >>> [ni for ni in cycle_length(func, 4, nmax = 4, values=True)] [17, 35, 2, 5] Code modified from: http://en.wikipedia.org/wiki/Cycle_detection. """ nmax = int(nmax or 0) # main phase: search successive powers of two power = lam = 1 tortoise, hare = x0, f(x0) # f(x0) is the element/node next to x0. i = 0 while tortoise != hare and (not nmax or i < nmax): i += 1 if power == lam: # time to start a new power of two? tortoise = hare power *= 2 lam = 0 if values: yield hare hare = f(hare) lam += 1 if nmax and i == nmax: if values: return else: yield nmax, None return if not values: # Find the position of the first repetition of length lambda mu = 0 tortoise = hare = x0 for i in range(lam): hare = f(hare) while tortoise != hare: tortoise = f(tortoise) hare = f(hare) mu += 1 if mu: mu -= 1 yield lam, mu
bsd-3-clause
skycucumber/xuemc
python/venv/lib/python2.7/site-packages/whoosh/codec/plaintext.py
30
14257
# Copyright 2012 Matt Chaput. All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # 1. Redistributions of source code must retain the above copyright notice, # this list of conditions and the following disclaimer. # # 2. Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the distribution. # # THIS SOFTWARE IS PROVIDED BY MATT CHAPUT ``AS IS'' AND ANY EXPRESS OR # IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF # MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO # EVENT SHALL MATT CHAPUT OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, # INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, # OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF # LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING # NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, # EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # # The views and conclusions contained in the software and documentation are # those of the authors and should not be interpreted as representing official # policies, either expressed or implied, of Matt Chaput. from ast import literal_eval from whoosh.compat import b, bytes_type, text_type, integer_types, PY3 from whoosh.compat import iteritems, dumps, loads, xrange from whoosh.codec import base from whoosh.matching import ListMatcher from whoosh.reading import TermInfo, TermNotFound if not PY3: class memoryview: pass _reprable = (bytes_type, text_type, integer_types, float) # Mixin classes for producing and consuming the simple text format class LineWriter(object): def _print_line(self, indent, command, **kwargs): self._dbfile.write(b(" ") * indent) self._dbfile.write(command.encode("latin1")) for k, v in iteritems(kwargs): if isinstance(v, memoryview): v = bytes(v) if v is not None and not isinstance(v, _reprable): raise TypeError(type(v)) self._dbfile.write(("\t%s=%r" % (k, v)).encode("latin1")) self._dbfile.write(b("\n")) class LineReader(object): def __init__(self, dbfile): self._dbfile = dbfile def _reset(self): self._dbfile.seek(0) def _find_line(self, indent, command, **kwargs): for largs in self._find_lines(indent, command, **kwargs): return largs def _find_lines(self, indent, command, **kwargs): while True: line = self._dbfile.readline() if not line: return c = self._parse_line(line) if c is None: return lindent, lcommand, largs = c if lindent == indent and lcommand == command: matched = True if kwargs: for k in kwargs: if kwargs[k] != largs.get(k): matched = False break if matched: yield largs elif lindent < indent: return def _parse_line(self, line): line = line.decode("latin1") line = line.rstrip() l = len(line) line = line.lstrip() if not line or line.startswith("#"): return None indent = (l - len(line)) // 2 parts = line.split("\t") command = parts[0] args = {} for i in xrange(1, len(parts)): n, v = parts[i].split("=") args[n] = literal_eval(v) return (indent, command, args) def _find_root(self, command): self._reset() c = self._find_line(0, command) if c is None: raise Exception("No root section %r" % (command,)) # Codec class class PlainTextCodec(base.Codec): length_stats = False def per_document_writer(self, storage, segment): return PlainPerDocWriter(storage, segment) def field_writer(self, storage, segment): return PlainFieldWriter(storage, segment) def per_document_reader(self, storage, segment): return PlainPerDocReader(storage, segment) def terms_reader(self, storage, segment): return PlainTermsReader(storage, segment) def new_segment(self, storage, indexname): return PlainSegment(indexname) class PlainPerDocWriter(base.PerDocumentWriter, LineWriter): def __init__(self, storage, segment): self._dbfile = storage.create_file(segment.make_filename(".dcs")) self._print_line(0, "DOCS") self.is_closed = False def start_doc(self, docnum): self._print_line(1, "DOC", dn=docnum) def add_field(self, fieldname, fieldobj, value, length): if value is not None: value = dumps(value, -1) self._print_line(2, "DOCFIELD", fn=fieldname, v=value, len=length) def add_column_value(self, fieldname, columnobj, value): self._print_line(2, "COLVAL", fn=fieldname, v=value) def add_vector_items(self, fieldname, fieldobj, items): self._print_line(2, "VECTOR", fn=fieldname) for text, weight, vbytes in items: self._print_line(3, "VPOST", t=text, w=weight, v=vbytes) def finish_doc(self): pass def close(self): self._dbfile.close() self.is_closed = True class PlainPerDocReader(base.PerDocumentReader, LineReader): def __init__(self, storage, segment): self._dbfile = storage.open_file(segment.make_filename(".dcs")) self._segment = segment self.is_closed = False def doc_count(self): return self._segment.doc_count() def doc_count_all(self): return self._segment.doc_count() def has_deletions(self): return False def is_deleted(self, docnum): return False def deleted_docs(self): return frozenset() def _find_doc(self, docnum): self._find_root("DOCS") c = self._find_line(1, "DOC") while c is not None: dn = c["dn"] if dn == docnum: return True elif dn > docnum: return False c = self._find_line(1, "DOC") return False def _iter_docs(self): self._find_root("DOCS") c = self._find_line(1, "DOC") while c is not None: yield c["dn"] c = self._find_line(1, "DOC") def _iter_docfields(self, fieldname): for _ in self._iter_docs(): for c in self._find_lines(2, "DOCFIELD", fn=fieldname): yield c def _iter_lengths(self, fieldname): return (c.get("len", 0) for c in self._iter_docfields(fieldname)) def doc_field_length(self, docnum, fieldname, default=0): for dn in self._iter_docs(): if dn == docnum: c = self._find_line(2, "DOCFIELD", fn=fieldname) if c is not None: return c.get("len", default) elif dn > docnum: break return default def _column_values(self, fieldname): for i, docnum in enumerate(self._iter_docs()): if i != docnum: raise Exception("Missing column value for field %r doc %d?" % (fieldname, i)) c = self._find_line(2, "COLVAL", fn=fieldname) if c is None: raise Exception("Missing column value for field %r doc %d?" % (fieldname, docnum)) yield c.get("v") def has_column(self, fieldname): for _ in self._column_values(fieldname): return True return False def column_reader(self, fieldname, column): return list(self._column_values(fieldname)) def field_length(self, fieldname): return sum(self._iter_lengths(fieldname)) def min_field_length(self, fieldname): return min(self._iter_lengths(fieldname)) def max_field_length(self, fieldname): return max(self._iter_lengths(fieldname)) def has_vector(self, docnum, fieldname): if self._find_doc(docnum): if self._find_line(2, "VECTOR"): return True return False def vector(self, docnum, fieldname, format_): if not self._find_doc(docnum): raise Exception if not self._find_line(2, "VECTOR"): raise Exception ids = [] weights = [] values = [] c = self._find_line(3, "VPOST") while c is not None: ids.append(c["t"]) weights.append(c["w"]) values.append(c["v"]) c = self._find_line(3, "VPOST") return ListMatcher(ids, weights, values, format_,) def _read_stored_fields(self): sfs = {} c = self._find_line(2, "DOCFIELD") while c is not None: v = c.get("v") if v is not None: v = loads(v) sfs[c["fn"]] = v c = self._find_line(2, "DOCFIELD") return sfs def stored_fields(self, docnum): if not self._find_doc(docnum): raise Exception return self._read_stored_fields() def iter_docs(self): return enumerate(self.all_stored_fields()) def all_stored_fields(self): for _ in self._iter_docs(): yield self._read_stored_fields() def close(self): self._dbfile.close() self.is_closed = True class PlainFieldWriter(base.FieldWriter, LineWriter): def __init__(self, storage, segment): self._dbfile = storage.create_file(segment.make_filename(".trm")) self._print_line(0, "TERMS") @property def is_closed(self): return self._dbfile.is_closed def start_field(self, fieldname, fieldobj): self._fieldobj = fieldobj self._print_line(1, "TERMFIELD", fn=fieldname) def start_term(self, btext): self._terminfo = TermInfo() self._print_line(2, "BTEXT", t=btext) def add(self, docnum, weight, vbytes, length): self._terminfo.add_posting(docnum, weight, length) self._print_line(3, "POST", dn=docnum, w=weight, v=vbytes) def finish_term(self): ti = self._terminfo self._print_line(3, "TERMINFO", df=ti.doc_frequency(), weight=ti.weight(), minlength=ti.min_length(), maxlength=ti.max_length(), maxweight=ti.max_weight(), minid=ti.min_id(), maxid=ti.max_id()) def add_spell_word(self, fieldname, text): self._print_line(2, "SPELL", fn=fieldname, t=text) def close(self): self._dbfile.close() class PlainTermsReader(base.TermsReader, LineReader): def __init__(self, storage, segment): self._dbfile = storage.open_file(segment.make_filename(".trm")) self._segment = segment self.is_closed = False def _find_field(self, fieldname): self._find_root("TERMS") if self._find_line(1, "TERMFIELD", fn=fieldname) is None: raise TermNotFound("No field %r" % fieldname) def _iter_fields(self): self._find_root() c = self._find_line(1, "TERMFIELD") while c is not None: yield c["fn"] c = self._find_line(1, "TERMFIELD") def _iter_btexts(self): c = self._find_line(2, "BTEXT") while c is not None: yield c["t"] c = self._find_line(2, "BTEXT") def _find_term(self, fieldname, btext): self._find_field(fieldname) for t in self._iter_btexts(): if t == btext: return True elif t > btext: break return False def _find_terminfo(self): c = self._find_line(3, "TERMINFO") return TermInfo(**c) def __contains__(self, term): fieldname, btext = term return self._find_term(fieldname, btext) def indexed_field_names(self): return self._iter_fields() def terms(self): for fieldname in self._iter_fields(): for btext in self._iter_btexts(): yield (fieldname, btext) def terms_from(self, fieldname, prefix): self._find_field(fieldname) for btext in self._iter_btexts(): if btext < prefix: continue yield (fieldname, btext) def items(self): for fieldname, btext in self.terms(): yield (fieldname, btext), self._find_terminfo() def items_from(self, fieldname, prefix): for fieldname, btext in self.terms_from(fieldname, prefix): yield (fieldname, btext), self._find_terminfo() def term_info(self, fieldname, btext): if not self._find_term(fieldname, btext): raise TermNotFound((fieldname, btext)) return self._find_terminfo() def matcher(self, fieldname, btext, format_, scorer=None): if not self._find_term(fieldname, btext): raise TermNotFound((fieldname, btext)) ids = [] weights = [] values = [] c = self._find_line(3, "POST") while c is not None: ids.append(c["dn"]) weights.append(c["w"]) values.append(c["v"]) c = self._find_line(3, "POST") return ListMatcher(ids, weights, values, format_, scorer=scorer) def close(self): self._dbfile.close() self.is_closed = True class PlainSegment(base.Segment): def __init__(self, indexname): base.Segment.__init__(self, indexname) self._doccount = 0 def codec(self): return PlainTextCodec() def set_doc_count(self, doccount): self._doccount = doccount def doc_count(self): return self._doccount def should_assemble(self): return False
gpl-2.0
faulkner/swampdragon
tests/test_base_model_router_delete.py
13
1163
from swampdragon.route_handler import BaseModelRouter, SUCCESS from swampdragon.serializers.model_serializer import ModelSerializer from swampdragon.testing.dragon_testcase import DragonTestCase from .models import TwoFieldModel class Serializer(ModelSerializer): class Meta: update_fields = ('text', 'number') model = TwoFieldModel class Router(BaseModelRouter): model = TwoFieldModel serializer_class = Serializer def get_object(self, **kwargs): return self.model.objects.get(pk=kwargs['id']) class TestBaseModelRouter(DragonTestCase): def setUp(self): self.router = Router(self.connection) self.obj = self.router.model.objects.create(text='text', number=1) def test_successful_delete(self): data = {'id': self.obj.pk} self.router.delete(**data) self.assertFalse(self.router.model.objects.exists()) def test_deleted(self): data = {'id': self.obj.pk} self.router.delete(**data) actual = self.connection.last_message self.assertEqual(actual['context']['state'], SUCCESS) self.assertEqual(actual['data']['id'], data['id'])
bsd-3-clause
moutai/scikit-learn
sklearn/covariance/__init__.py
389
1157
""" The :mod:`sklearn.covariance` module includes methods and algorithms to robustly estimate the covariance of features given a set of points. The precision matrix defined as the inverse of the covariance is also estimated. Covariance estimation is closely related to the theory of Gaussian Graphical Models. """ from .empirical_covariance_ import empirical_covariance, EmpiricalCovariance, \ log_likelihood from .shrunk_covariance_ import shrunk_covariance, ShrunkCovariance, \ ledoit_wolf, ledoit_wolf_shrinkage, \ LedoitWolf, oas, OAS from .robust_covariance import fast_mcd, MinCovDet from .graph_lasso_ import graph_lasso, GraphLasso, GraphLassoCV from .outlier_detection import EllipticEnvelope __all__ = ['EllipticEnvelope', 'EmpiricalCovariance', 'GraphLasso', 'GraphLassoCV', 'LedoitWolf', 'MinCovDet', 'OAS', 'ShrunkCovariance', 'empirical_covariance', 'fast_mcd', 'graph_lasso', 'ledoit_wolf', 'ledoit_wolf_shrinkage', 'log_likelihood', 'oas', 'shrunk_covariance']
bsd-3-clause
openplans/shareabouts-api
src/sa_api_v2/south_migrations/0033_create_clients_for_existing_api_keys.py
2
9942
# -*- coding: utf-8 -*- import datetime from south.db import db from south.v2 import DataMigration from django.db import models class Migration(DataMigration): depends_on = ( ("apikey", "0002_auto__add_field_apikey_client"), ) def forwards(self, orm): "Write your forwards methods here." for key in orm['apikey.ApiKey'].objects.all(): if key.client is None: client = orm.Client.objects.create(owner=key.user) client.keys.add(key) def backwards(self, orm): "Write your backwards methods here." models = { 'apikey.apikey': { 'Meta': {'object_name': 'ApiKey'}, 'client': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'keys'", 'null': 'True', 'to': "orm['sa_api_v2.Client']"}), 'datasets': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'api_keys'", 'blank': 'True', 'to': "orm['sa_api_v2.DataSet']"}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'key': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '32'}), 'last_used': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}), 'logged_ip': ('django.db.models.fields.IPAddressField', [], {'max_length': '15', 'null': 'True', 'blank': 'True'}), 'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'api_keys'", 'to': "orm['auth.User']"}) }, 'auth.group': { 'Meta': {'object_name': 'Group'}, 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}), 'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}) }, 'auth.permission': { 'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'}, 'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '50'}) }, 'auth.user': { 'Meta': {'object_name': 'User'}, 'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}), 'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}), 'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), 'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}), 'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}), 'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}), 'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'}) }, 'contenttypes.contenttype': { 'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"}, 'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}) }, 'sa_api_v2.action': { 'Meta': {'ordering': "['-created_datetime']", 'object_name': 'Action', 'db_table': "'sa_api_activity'"}, 'action': ('django.db.models.fields.CharField', [], {'default': "'create'", 'max_length': '16'}), 'created_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'thing': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'actions'", 'db_column': "'data_id'", 'to': "orm['sa_api_v2.SubmittedThing']"}), 'updated_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}) }, 'sa_api_v2.attachment': { 'Meta': {'object_name': 'Attachment', 'db_table': "'sa_api_attachment'"}, 'created_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}), 'file': ('django.db.models.fields.files.FileField', [], {'max_length': '100'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True', 'blank': 'True'}), 'thing': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'attachments'", 'to': "orm['sa_api_v2.SubmittedThing']"}), 'updated_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}) }, 'sa_api_v2.client': { 'Meta': {'object_name': 'Client', 'db_table': "'sa_api_client'"}, 'created_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}), 'data': ('django.db.models.fields.TextField', [], {'default': "'{}'"}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'owner': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'clients'", 'to': "orm['auth.User']"}), 'updated_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}) }, 'sa_api_v2.dataset': { 'Meta': {'unique_together': "(('owner', 'slug'),)", 'object_name': 'DataSet', 'db_table': "'sa_api_dataset'"}, 'display_name': ('django.db.models.fields.CharField', [], {'max_length': '128'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'owner': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'datasets'", 'to': "orm['auth.User']"}), 'slug': ('django.db.models.fields.SlugField', [], {'default': "u''", 'max_length': '128'}) }, 'sa_api_v2.place': { 'Meta': {'object_name': 'Place', 'db_table': "'sa_api_place'", '_ormbases': ['sa_api_v2.SubmittedThing']}, 'geometry': ('django.contrib.gis.db.models.fields.GeometryField', [], {}), 'submittedthing_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['sa_api_v2.SubmittedThing']", 'unique': 'True', 'primary_key': 'True'}) }, 'sa_api_v2.submission': { 'Meta': {'object_name': 'Submission', 'db_table': "'sa_api_submission'", '_ormbases': ['sa_api_v2.SubmittedThing']}, 'parent': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'children'", 'to': "orm['sa_api_v2.SubmissionSet']"}), 'submittedthing_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['sa_api_v2.SubmittedThing']", 'unique': 'True', 'primary_key': 'True'}) }, 'sa_api_v2.submissionset': { 'Meta': {'unique_together': "(('place', 'name'),)", 'object_name': 'SubmissionSet', 'db_table': "'sa_api_submissionset'"}, 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '128'}), 'place': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'submission_sets'", 'to': "orm['sa_api_v2.Place']"}) }, 'sa_api_v2.submittedthing': { 'Meta': {'object_name': 'SubmittedThing', 'db_table': "'sa_api_submittedthing'"}, 'created_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}), 'data': ('django.db.models.fields.TextField', [], {'default': "'{}'"}), 'dataset': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'things'", 'blank': 'True', 'to': "orm['sa_api_v2.DataSet']"}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'submitter': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'things'", 'null': 'True', 'to': "orm['auth.User']"}), 'updated_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}), 'visible': ('django.db.models.fields.BooleanField', [], {'default': 'True'}) } } complete_apps = ['sa_api_v2'] symmetrical = True
gpl-3.0
flacjacket/pywayland
example/surface.py
1
5108
# Copyright 2015 Sean Vig # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from __future__ import absolute_import, print_function import mmap import os import sys this_file = os.path.abspath(__file__) this_dir = os.path.split(this_file)[0] root_dir = os.path.split(this_dir)[0] pywayland_dir = os.path.join(root_dir, "pywayland") if os.path.exists(pywayland_dir): sys.path.append(root_dir) from pywayland.client import Display # noqa: E402 from pywayland.protocol.wayland import WlCompositor, WlShell, WlShm # noqa: E402 from pywayland.utils import AnonymousFile # noqa: E402 WIDTH = 480 HEIGHT = 256 MARGIN = 10 class Window(object): def __init__(self): self.buffer = None self.compositor = None self.shell = None self.shm = None self.shm_data = None self.surface = None self.line_pos = MARGIN self.line_speed = +1 def shell_surface_ping_handler(shell_surface, serial): shell_surface.pong(serial) print("pinged/ponged") def shm_format_handler(shm, format_): if format_ == WlShm.format.argb8888.value: s = "ARGB8888" elif format_ == WlShm.format.xrgb8888.value: s = "XRGB8888" elif format_ == WlShm.format.rgb565.value: s = "RGB565" else: s = "other format" print("Possible shmem format: {}".format(s)) def registry_global_handler(registry, id_, interface, version): window = registry.user_data if interface == "wl_compositor": print("got compositor") window.compositor = registry.bind(id_, WlCompositor, version) elif interface == "wl_shell": print("got shell") window.shell = registry.bind(id_, WlShell, version) elif interface == "wl_shm": print("got shm") window.shm = registry.bind(id_, WlShm, version) window.shm.dispatcher["format"] = shm_format_handler def registry_global_remover(registry, id_): print("got a registry losing event for {}".format(id)) def create_buffer(window): stride = WIDTH * 4 size = stride * HEIGHT with AnonymousFile(size) as fd: window.shm_data = mmap.mmap( fd, size, prot=mmap.PROT_READ | mmap.PROT_WRITE, flags=mmap.MAP_SHARED ) pool = window.shm.create_pool(fd, size) buff = pool.create_buffer(0, WIDTH, HEIGHT, stride, WlShm.format.argb8888.value) pool.destroy() return buff def create_window(window): window.buffer = create_buffer(window) window.surface.attach(window.buffer, 0, 0) window.surface.commit() def redraw(callback, time, destroy_callback=True): window = callback.user_data if destroy_callback: callback._destroy() paint(window) window.surface.damage(0, 0, WIDTH, HEIGHT) callback = window.surface.frame() callback.dispatcher["done"] = redraw callback.user_data = window window.surface.attach(window.buffer, 0, 0) window.surface.commit() def paint(window): mm = window.shm_data # clear mm.seek(0) mm.write(b"\xff" * 4 * WIDTH * HEIGHT) # draw progressing line mm.seek((window.line_pos * WIDTH + MARGIN) * 4) mm.write(b"\x00\x00\x00\xff" * (WIDTH - 2 * MARGIN)) window.line_pos += window.line_speed # maybe reverse direction of progression if window.line_pos >= HEIGHT - MARGIN or window.line_pos <= MARGIN: window.line_speed = -window.line_speed def main(): window = Window() display = Display() display.connect() print("connected to display") registry = display.get_registry() registry.dispatcher["global"] = registry_global_handler registry.dispatcher["global_remove"] = registry_global_remover registry.user_data = window display.dispatch(block=True) display.roundtrip() if window.compositor is None: raise RuntimeError("no compositor found") elif window.shell is None: raise RuntimeError("no shell found") elif window.shm is None: raise RuntimeError("no shm found") window.surface = window.compositor.create_surface() shell_surface = window.shell.get_shell_surface(window.surface) shell_surface.set_toplevel() shell_surface.dispatcher["ping"] = shell_surface_ping_handler frame_callback = window.surface.frame() frame_callback.dispatcher["done"] = redraw frame_callback.user_data = window create_window(window) redraw(frame_callback, 0, destroy_callback=False) while display.dispatch(block=True) != -1: pass import time time.sleep(1) display.disconnect() if __name__ == "__main__": main()
apache-2.0
ChemiKhazi/Sprytile
rx/backpressure/pausable.py
2
1729
from rx.core import Observable, ObservableBase, Disposable from rx.internal import extensionmethod from rx.disposables import CompositeDisposable from rx.subjects import Subject class PausableObservable(ObservableBase): def __init__(self, source, pauser=None): self.source = source self.controller = Subject() if pauser and hasattr(pauser, "subscribe"): self.pauser = self.controller.merge(pauser) else: self.pauser = self.controller super(PausableObservable, self).__init__() def _subscribe_core(self, observer): conn = self.source.publish() subscription = conn.subscribe(observer) connection = [Disposable.empty()] def on_next(b): if b: connection[0] = conn.connect() else: connection[0].dispose() connection[0] = Disposable.empty() pausable = self.pauser.distinct_until_changed().subscribe(on_next) return CompositeDisposable(subscription, connection[0], pausable) def pause(self): self.controller.on_next(False) def resume(self): self.controller.on_next(True) @extensionmethod(Observable) def pausable(self, pauser): """Pauses the underlying observable sequence based upon the observable sequence which yields True/False. Example: pauser = rx.Subject() source = rx.Observable.interval(100).pausable(pauser) Keyword parameters: pauser -- {Observable} The observable sequence used to pause the underlying sequence. Returns the observable {Observable} sequence which is paused based upon the pauser. """ return PausableObservable(self, pauser)
mit
beck/django
django/apps/config.py
131
8077
import os from importlib import import_module from django.core.exceptions import AppRegistryNotReady, ImproperlyConfigured from django.utils._os import upath from django.utils.module_loading import module_has_submodule MODELS_MODULE_NAME = 'models' class AppConfig(object): """ Class representing a Django application and its configuration. """ def __init__(self, app_name, app_module): # Full Python path to the application eg. 'django.contrib.admin'. self.name = app_name # Root module for the application eg. <module 'django.contrib.admin' # from 'django/contrib/admin/__init__.pyc'>. self.module = app_module # The following attributes could be defined at the class level in a # subclass, hence the test-and-set pattern. # Last component of the Python path to the application eg. 'admin'. # This value must be unique across a Django project. if not hasattr(self, 'label'): self.label = app_name.rpartition(".")[2] # Human-readable name for the application eg. "Admin". if not hasattr(self, 'verbose_name'): self.verbose_name = self.label.title() # Filesystem path to the application directory eg. # u'/usr/lib/python2.7/dist-packages/django/contrib/admin'. Unicode on # Python 2 and a str on Python 3. if not hasattr(self, 'path'): self.path = self._path_from_module(app_module) # Module containing models eg. <module 'django.contrib.admin.models' # from 'django/contrib/admin/models.pyc'>. Set by import_models(). # None if the application doesn't have a models module. self.models_module = None # Mapping of lower case model names to model classes. Initially set to # None to prevent accidental access before import_models() runs. self.models = None def __repr__(self): return '<%s: %s>' % (self.__class__.__name__, self.label) def _path_from_module(self, module): """Attempt to determine app's filesystem path from its module.""" # See #21874 for extended discussion of the behavior of this method in # various cases. # Convert paths to list because Python 3's _NamespacePath does not # support indexing. paths = list(getattr(module, '__path__', [])) if len(paths) != 1: filename = getattr(module, '__file__', None) if filename is not None: paths = [os.path.dirname(filename)] if len(paths) > 1: raise ImproperlyConfigured( "The app module %r has multiple filesystem locations (%r); " "you must configure this app with an AppConfig subclass " "with a 'path' class attribute." % (module, paths)) elif not paths: raise ImproperlyConfigured( "The app module %r has no filesystem location, " "you must configure this app with an AppConfig subclass " "with a 'path' class attribute." % (module,)) return upath(paths[0]) @classmethod def create(cls, entry): """ Factory that creates an app config from an entry in INSTALLED_APPS. """ try: # If import_module succeeds, entry is a path to an app module, # which may specify an app config class with default_app_config. # Otherwise, entry is a path to an app config class or an error. module = import_module(entry) except ImportError: # Track that importing as an app module failed. If importing as an # app config class fails too, we'll trigger the ImportError again. module = None mod_path, _, cls_name = entry.rpartition('.') # Raise the original exception when entry cannot be a path to an # app config class. if not mod_path: raise else: try: # If this works, the app module specifies an app config class. entry = module.default_app_config except AttributeError: # Otherwise, it simply uses the default app config class. return cls(entry, module) else: mod_path, _, cls_name = entry.rpartition('.') # If we're reaching this point, we must attempt to load the app config # class located at <mod_path>.<cls_name> mod = import_module(mod_path) try: cls = getattr(mod, cls_name) except AttributeError: if module is None: # If importing as an app module failed, that error probably # contains the most informative traceback. Trigger it again. import_module(entry) else: raise # Check for obvious errors. (This check prevents duck typing, but # it could be removed if it became a problem in practice.) if not issubclass(cls, AppConfig): raise ImproperlyConfigured( "'%s' isn't a subclass of AppConfig." % entry) # Obtain app name here rather than in AppClass.__init__ to keep # all error checking for entries in INSTALLED_APPS in one place. try: app_name = cls.name except AttributeError: raise ImproperlyConfigured( "'%s' must supply a name attribute." % entry) # Ensure app_name points to a valid module. app_module = import_module(app_name) # Entry is a path to an app config class. return cls(app_name, app_module) def check_models_ready(self): """ Raises an exception if models haven't been imported yet. """ if self.models is None: raise AppRegistryNotReady( "Models for app '%s' haven't been imported yet." % self.label) def get_model(self, model_name): """ Returns the model with the given case-insensitive model_name. Raises LookupError if no model exists with this name. """ self.check_models_ready() try: return self.models[model_name.lower()] except KeyError: raise LookupError( "App '%s' doesn't have a '%s' model." % (self.label, model_name)) def get_models(self, include_auto_created=False, include_deferred=False, include_swapped=False): """ Returns an iterable of models. By default, the following models aren't included: - auto-created models for many-to-many relations without an explicit intermediate table, - models created to satisfy deferred attribute queries, - models that have been swapped out. Set the corresponding keyword argument to True to include such models. Keyword arguments aren't documented; they're a private API. """ self.check_models_ready() for model in self.models.values(): if model._deferred and not include_deferred: continue if model._meta.auto_created and not include_auto_created: continue if model._meta.swapped and not include_swapped: continue yield model def import_models(self, all_models): # Dictionary of models for this app, primarily maintained in the # 'all_models' attribute of the Apps this AppConfig is attached to. # Injected as a parameter because it gets populated when models are # imported, which might happen before populate() imports models. self.models = all_models if module_has_submodule(self.module, MODELS_MODULE_NAME): models_module_name = '%s.%s' % (self.name, MODELS_MODULE_NAME) self.models_module = import_module(models_module_name) def ready(self): """ Override this method in subclasses to run code when Django starts. """
bsd-3-clause
mfasDa/raadev
analysis/base/DataSet.py
1
10982
#************************************************************************** #* Copyright(c) 1998-2014, ALICE Experiment at CERN, All rights reserved. * #* * #* Author: The ALICE Off-line Project. * #* Contributors are mentioned in the code where appropriate. * #* * #* Permission to use, copy, modify and distribute this software and its * #* documentation strictly for non-commercial purposes is hereby granted * #* without fee, provided that the above copyright notice appears in all * #* copies and that both the copyright notice and this permission notice * #* appear in the supporting documentation. The authors make no claims * #* about the suitability of this software for any purpose. It is * #* provided "as is" without express or implied warranty. * #************************************************************************** """ Module for trigger class dependent data set, consiting of - cluster containers - track containers - jet containers (tracks found in jets) :organization: ALICE Collaboration :copyright: :author: Markus Fasel :contact: markus.fasel@cern.ch :organization: Lawrence Berkeley National Laboratory """ from ROOT import TIter, TList from copy import copy,deepcopy from base.MergeException import MergeException from base.struct.JetContainer import JetContainer from base.struct.DataContainers import TrackContainer, ClusterContainer class DataSet(object): """ Data set for a given trigger class. A data set contains a set of cluster containers and an set of track containers """ class ContentException(Exception): """ Exception indicating that a certain key is not existing in the container """ def __init__(self, searchkey, container): """ Constructor :param searchkey: key raising the exception :type searchkey: str :param container: Name of the container raising the exception :type container: str """ self.__searchkey = searchkey self.__container = container def __str__(self): """ Create string representation of the container """ return "%s already present in container %s" %(self.__searchkey, self.__container) def __init__(self): """ Constructor """ self.__trackContainers = {} self.__clusterContainers = {} self.__jetContainer = JetContainer() def __copy__(self): """ shallow copy constructor """ print "Simple copy called from %s" %(self.__class__) newobject = DataSet() for name,tc in self.__trackContainers.iteritems(): newobject.AddTrackContainer(name, copy(tc)) for name,cc in self.__clusterContainers.iteritems(): newobject.AddClusterContainer(name, copy(cc)) return newobject def __deepcopy__(self, memo): """ deep copy constructor """ print "deep copy called from %s" %(self.__class__) newobject = DataSet() for name,tc in self.__trackContainers.iteritems(): newobject.AddTrackContainer(name, deepcopy(tc, memo)) for name,cc in self.__clusterContainers.iteritems(): newobject.AddClusterContainer(name, deepcopy(cc, memo)) return newobject def AddTrackContainer(self, name, data): """ Add a new track container to the dataset :param name: name of the track container :type name: str :param data: track container to be added :type data: TrackContainer """ if name in self.__trackContainers.keys(): raise DataSet.ContentException(name, "TrackContainer") self.__trackContainers[name] = data def AddJetSpectrum(self, spectrum, jetpt, isMCkine): """ Add pt spectrum of tracks in jets to the dataset :param spectrum: pt spectrum :type spectrum: JetTHnSparse :param jetpt: min pt of jets :type jetpt: float :param isMCkine: If true MC kine is used :type isMCkine: bool """ self.__jetContainer.SetJetPtHist(jetpt, spectrum, isMCkine) def AddEventHistForJets(self, hist): """ Add event hist to the jet pt container :param hist: event histogram :type hist: TH1 """ self.__jetContainer.SetEventHist(hist) def AddClusterContainer(self, name, data): """ Add a new cluster container to the dataset :param name: name of the cluster container :type name: str :param data: cluster container to be added :type data: ClusterContainer """ if name in self.__clusterContainers.keys(): raise DataSet.ContentException(name, "ClusterContainer") self.__clusterContainers[name] = data def FindTrackContainer(self, name): """ Find a track container within the dataset :param name: name of the track container :type name: str :return: The track container (None if not found) :rtype: TrackContainer """ if not name in self.__trackContainers.keys(): return None return self.__trackContainers[name] def FindClusterContainer(self, name): """ Find a cluster container within the dataset :param name: name of the cluster container :type name: str :return: The cluster container (None if not found) :rtype: TrackContainer """ if not name in self.__clusterContainers: return None return self.__clusterContainers[name] def GetJetContainer(self): """ Return the jet container :return: the jet container :rtype: JetContainer """ return self.__jetContainer def GetListOfTrackContainers(self): """ Get a list of track container names :return: list of container names :rtype: list """ return self.__trackContainers.keys() def GetListOfClusterContainers(self): """ Get a list of cluster container names :return: list of container names :rtype: list """ return self.__clusterContainers.keys() def Add(self, other): """ Add other data set to this one :param other: data set to be added to this one :type other: DataSet """ if not isinstance(other, DataSet): raise MergeException("Incompatible types: this(Dataset), other(%s)" %(str(other.__class__))) nfailure = 0 for cont in self.GetListOfTrackContainers(): othercont = other.FindTrackContainer(cont) if othercont: self.__trackContainers[cont].Add(othercont) else: nfailure += 1 for cont in self.GetListOfClusterContainers(): othercont = other.FindClusterContainer(cont) if othercont: self.__clusterContainers[cont].Add(othercont) else: nfailure += 1 if nfailure > 0: raise MergeException("Several containers have not been found inside the other datase") def Scale(self, scalefactor): """ Scale all track or cluster containers with the underlying scale factor :param scalefactor: Scale factor applied :type scalefactor: float """ for cont in self.__trackContainers.values(): cont.Scale(scalefactor) for cont in self.__clusterContainers.values(): cont.Scale(scalefactor) def GetRootPrimitive(self, listname): """ Make root primitives (for root IO) """ result = TList() result.SetName(listname) tracklist = TList() tracklist.SetName("trackContainers") for name,tc in self.__trackContainers.iteritems(): tracklist.Add(tc.GetRootPrimitive("trackcontainer_%s" %(name))) clusterlist = TList() clusterlist.SetName("clusterContainers") for name,cc in self.__clusterContainers.iteritems(): clusterlist.Add(cc.GetRootPrimitive("clustercontainer_%s" %(name))) result.Add(tracklist) result.Add(clusterlist) return result @staticmethod def BuildFromRootPrimitive(rootprimitive): """ Build dataset from a root primitive :param rootprimitive: List of root primitive objects :type rootprimitive: TList :return: Reconstructed dataset :rtype: DataSet """ result = DataSet() trackContainers = rootprimitive.FindObject("trackContainers") clusterContainers = rootprimitive.FindObject("clusterContainers") trackIter = TIter(trackContainers) clusterIter = TIter(clusterContainers) currententry = trackIter.Next() while currententry: entryname = currententry.GetName() if "trackcontainer" in entryname: contname = entryname.replace("trackcontainer_","") result.AddTrackContainer(contname, TrackContainer.BuildFromRootPrimitive(currententry)) currententry = trackIter.Next() currententry = clusterIter.Next() while currententry: entryname = currententry.GetName() if "clustercontainer" in entryname: contname = entryname.replace("clustercontainer_","") result.AddClusterContainer(contname, ClusterContainer.BuildFromRootPrimitive(currententry)) currententry = clusterIter.Next() return result def Print(self): """ Print content of the data set """ print "Dataset content:" print "============================================" print " Track Containers:" for cont in self.__trackContainers.keys(): print " %s" %(cont) print " Cluster Containers:" for cont in self.__clusterContainers.keys(): print " %s" %(cont) print "--------------------------------------------" print "Status of the different containers:" for contname, container in self.__trackContainers.iteritems(): print " %s:" %(contname) container.Print() for contname, container in self.__clusterContainers.iteritems(): print " %s:" %(cont) container.Print()
gpl-3.0
chronicwaffle/PokemonGo-DesktopMap
app/pywin/Lib/lib-tk/test/test_ttk/test_extensions.py
38
10397
import sys import unittest import Tkinter as tkinter import ttk from test.test_support import requires, run_unittest, swap_attr from test_ttk.support import AbstractTkTest, destroy_default_root requires('gui') class LabeledScaleTest(AbstractTkTest, unittest.TestCase): def tearDown(self): self.root.update_idletasks() super(LabeledScaleTest, self).tearDown() def test_widget_destroy(self): # automatically created variable x = ttk.LabeledScale(self.root) var = x._variable._name x.destroy() self.assertRaises(tkinter.TclError, x.tk.globalgetvar, var) # manually created variable myvar = tkinter.DoubleVar(self.root) name = myvar._name x = ttk.LabeledScale(self.root, variable=myvar) x.destroy() if self.wantobjects: self.assertEqual(x.tk.globalgetvar(name), myvar.get()) else: self.assertEqual(float(x.tk.globalgetvar(name)), myvar.get()) del myvar self.assertRaises(tkinter.TclError, x.tk.globalgetvar, name) # checking that the tracing callback is properly removed myvar = tkinter.IntVar(self.root) # LabeledScale will start tracing myvar x = ttk.LabeledScale(self.root, variable=myvar) x.destroy() # Unless the tracing callback was removed, creating a new # LabeledScale with the same var will cause an error now. This # happens because the variable will be set to (possibly) a new # value which causes the tracing callback to be called and then # it tries calling instance attributes not yet defined. ttk.LabeledScale(self.root, variable=myvar) if hasattr(sys, 'last_type'): self.assertNotEqual(sys.last_type, tkinter.TclError) def test_initialization_no_master(self): # no master passing with swap_attr(tkinter, '_default_root', None), \ swap_attr(tkinter, '_support_default_root', True): try: x = ttk.LabeledScale() self.assertIsNotNone(tkinter._default_root) self.assertEqual(x.master, tkinter._default_root) self.assertEqual(x.tk, tkinter._default_root.tk) x.destroy() finally: destroy_default_root() def test_initialization(self): # master passing master = tkinter.Frame(self.root) x = ttk.LabeledScale(master) self.assertEqual(x.master, master) x.destroy() # variable initialization/passing passed_expected = (('0', 0), (0, 0), (10, 10), (-1, -1), (sys.maxint + 1, sys.maxint + 1)) if self.wantobjects: passed_expected += ((2.5, 2),) for pair in passed_expected: x = ttk.LabeledScale(self.root, from_=pair[0]) self.assertEqual(x.value, pair[1]) x.destroy() x = ttk.LabeledScale(self.root, from_='2.5') self.assertRaises(ValueError, x._variable.get) x.destroy() x = ttk.LabeledScale(self.root, from_=None) self.assertRaises(ValueError, x._variable.get) x.destroy() # variable should have its default value set to the from_ value myvar = tkinter.DoubleVar(self.root, value=20) x = ttk.LabeledScale(self.root, variable=myvar) self.assertEqual(x.value, 0) x.destroy() # check that it is really using a DoubleVar x = ttk.LabeledScale(self.root, variable=myvar, from_=0.5) self.assertEqual(x.value, 0.5) self.assertEqual(x._variable._name, myvar._name) x.destroy() # widget positionment def check_positions(scale, scale_pos, label, label_pos): self.assertEqual(scale.pack_info()['side'], scale_pos) self.assertEqual(label.place_info()['anchor'], label_pos) x = ttk.LabeledScale(self.root, compound='top') check_positions(x.scale, 'bottom', x.label, 'n') x.destroy() x = ttk.LabeledScale(self.root, compound='bottom') check_positions(x.scale, 'top', x.label, 's') x.destroy() # invert default positions x = ttk.LabeledScale(self.root, compound='unknown') check_positions(x.scale, 'top', x.label, 's') x.destroy() x = ttk.LabeledScale(self.root) # take default positions check_positions(x.scale, 'bottom', x.label, 'n') x.destroy() # extra, and invalid, kwargs self.assertRaises(tkinter.TclError, ttk.LabeledScale, master, a='b') def test_horizontal_range(self): lscale = ttk.LabeledScale(self.root, from_=0, to=10) lscale.pack() lscale.wait_visibility() lscale.update() linfo_1 = lscale.label.place_info() prev_xcoord = lscale.scale.coords()[0] self.assertEqual(prev_xcoord, int(linfo_1['x'])) # change range to: from -5 to 5. This should change the x coord of # the scale widget, since 0 is at the middle of the new # range. lscale.scale.configure(from_=-5, to=5) # The following update is needed since the test doesn't use mainloop, # at the same time this shouldn't affect test outcome lscale.update() curr_xcoord = lscale.scale.coords()[0] self.assertNotEqual(prev_xcoord, curr_xcoord) # the label widget should have been repositioned too linfo_2 = lscale.label.place_info() self.assertEqual(lscale.label['text'], 0 if self.wantobjects else '0') self.assertEqual(curr_xcoord, int(linfo_2['x'])) # change the range back lscale.scale.configure(from_=0, to=10) self.assertNotEqual(prev_xcoord, curr_xcoord) self.assertEqual(prev_xcoord, int(linfo_1['x'])) lscale.destroy() def test_variable_change(self): x = ttk.LabeledScale(self.root) x.pack() x.wait_visibility() x.update() curr_xcoord = x.scale.coords()[0] newval = x.value + 1 x.value = newval # The following update is needed since the test doesn't use mainloop, # at the same time this shouldn't affect test outcome x.update() self.assertEqual(x.label['text'], newval if self.wantobjects else str(newval)) self.assertGreater(x.scale.coords()[0], curr_xcoord) self.assertEqual(x.scale.coords()[0], int(x.label.place_info()['x'])) # value outside range if self.wantobjects: conv = lambda x: x else: conv = int x.value = conv(x.scale['to']) + 1 # no changes shouldn't happen x.update() self.assertEqual(conv(x.label['text']), newval) self.assertEqual(x.scale.coords()[0], int(x.label.place_info()['x'])) x.destroy() def test_resize(self): x = ttk.LabeledScale(self.root) x.pack(expand=True, fill='both') x.wait_visibility() x.update() width, height = x.master.winfo_width(), x.master.winfo_height() width_new, height_new = width * 2, height * 2 x.value = 3 x.update() x.master.wm_geometry("%dx%d" % (width_new, height_new)) self.assertEqual(int(x.label.place_info()['x']), x.scale.coords()[0]) # Reset geometry x.master.wm_geometry("%dx%d" % (width, height)) x.destroy() class OptionMenuTest(AbstractTkTest, unittest.TestCase): def setUp(self): super(OptionMenuTest, self).setUp() self.textvar = tkinter.StringVar(self.root) def tearDown(self): del self.textvar super(OptionMenuTest, self).tearDown() def test_widget_destroy(self): var = tkinter.StringVar(self.root) optmenu = ttk.OptionMenu(self.root, var) name = var._name optmenu.update_idletasks() optmenu.destroy() self.assertEqual(optmenu.tk.globalgetvar(name), var.get()) del var self.assertRaises(tkinter.TclError, optmenu.tk.globalgetvar, name) def test_initialization(self): self.assertRaises(tkinter.TclError, ttk.OptionMenu, self.root, self.textvar, invalid='thing') optmenu = ttk.OptionMenu(self.root, self.textvar, 'b', 'a', 'b') self.assertEqual(optmenu._variable.get(), 'b') self.assertTrue(optmenu['menu']) self.assertTrue(optmenu['textvariable']) optmenu.destroy() def test_menu(self): items = ('a', 'b', 'c') default = 'a' optmenu = ttk.OptionMenu(self.root, self.textvar, default, *items) found_default = False for i in range(len(items)): value = optmenu['menu'].entrycget(i, 'value') self.assertEqual(value, items[i]) if value == default: found_default = True self.assertTrue(found_default) optmenu.destroy() # default shouldn't be in menu if it is not part of values default = 'd' optmenu = ttk.OptionMenu(self.root, self.textvar, default, *items) curr = None i = 0 while True: last, curr = curr, optmenu['menu'].entryconfigure(i, 'value') if last == curr: # no more menu entries break self.assertNotEqual(curr, default) i += 1 self.assertEqual(i, len(items)) # check that variable is updated correctly optmenu.pack() optmenu.wait_visibility() optmenu['menu'].invoke(0) self.assertEqual(optmenu._variable.get(), items[0]) # changing to an invalid index shouldn't change the variable self.assertRaises(tkinter.TclError, optmenu['menu'].invoke, -1) self.assertEqual(optmenu._variable.get(), items[0]) optmenu.destroy() # specifying a callback success = [] def cb_test(item): self.assertEqual(item, items[1]) success.append(True) optmenu = ttk.OptionMenu(self.root, self.textvar, 'a', command=cb_test, *items) optmenu['menu'].invoke(1) if not success: self.fail("Menu callback not invoked") optmenu.destroy() tests_gui = (LabeledScaleTest, OptionMenuTest) if __name__ == "__main__": run_unittest(*tests_gui)
mit
blackzw/openwrt_sdk_dev1
staging_dir/target-mips_r2_uClibc-0.9.33.2/usr/lib/python2.7/xml/sax/_exceptions.py
250
4785
"""Different kinds of SAX Exceptions""" import sys if sys.platform[:4] == "java": from java.lang import Exception del sys # ===== SAXEXCEPTION ===== class SAXException(Exception): """Encapsulate an XML error or warning. This class can contain basic error or warning information from either the XML parser or the application: you can subclass it to provide additional functionality, or to add localization. Note that although you will receive a SAXException as the argument to the handlers in the ErrorHandler interface, you are not actually required to throw the exception; instead, you can simply read the information in it.""" def __init__(self, msg, exception=None): """Creates an exception. The message is required, but the exception is optional.""" self._msg = msg self._exception = exception Exception.__init__(self, msg) def getMessage(self): "Return a message for this exception." return self._msg def getException(self): "Return the embedded exception, or None if there was none." return self._exception def __str__(self): "Create a string representation of the exception." return self._msg def __getitem__(self, ix): """Avoids weird error messages if someone does exception[ix] by mistake, since Exception has __getitem__ defined.""" raise AttributeError("__getitem__") # ===== SAXPARSEEXCEPTION ===== class SAXParseException(SAXException): """Encapsulate an XML parse error or warning. This exception will include information for locating the error in the original XML document. Note that although the application will receive a SAXParseException as the argument to the handlers in the ErrorHandler interface, the application is not actually required to throw the exception; instead, it can simply read the information in it and take a different action. Since this exception is a subclass of SAXException, it inherits the ability to wrap another exception.""" def __init__(self, msg, exception, locator): "Creates the exception. The exception parameter is allowed to be None." SAXException.__init__(self, msg, exception) self._locator = locator # We need to cache this stuff at construction time. # If this exception is thrown, the objects through which we must # traverse to get this information may be deleted by the time # it gets caught. self._systemId = self._locator.getSystemId() self._colnum = self._locator.getColumnNumber() self._linenum = self._locator.getLineNumber() def getColumnNumber(self): """The column number of the end of the text where the exception occurred.""" return self._colnum def getLineNumber(self): "The line number of the end of the text where the exception occurred." return self._linenum def getPublicId(self): "Get the public identifier of the entity where the exception occurred." return self._locator.getPublicId() def getSystemId(self): "Get the system identifier of the entity where the exception occurred." return self._systemId def __str__(self): "Create a string representation of the exception." sysid = self.getSystemId() if sysid is None: sysid = "<unknown>" linenum = self.getLineNumber() if linenum is None: linenum = "?" colnum = self.getColumnNumber() if colnum is None: colnum = "?" return "%s:%s:%s: %s" % (sysid, linenum, colnum, self._msg) # ===== SAXNOTRECOGNIZEDEXCEPTION ===== class SAXNotRecognizedException(SAXException): """Exception class for an unrecognized identifier. An XMLReader will raise this exception when it is confronted with an unrecognized feature or property. SAX applications and extensions may use this class for similar purposes.""" # ===== SAXNOTSUPPORTEDEXCEPTION ===== class SAXNotSupportedException(SAXException): """Exception class for an unsupported operation. An XMLReader will raise this exception when a service it cannot perform is requested (specifically setting a state or value). SAX applications and extensions may use this class for similar purposes.""" # ===== SAXNOTSUPPORTEDEXCEPTION ===== class SAXReaderNotAvailable(SAXNotSupportedException): """Exception class for a missing driver. An XMLReader module (driver) should raise this exception when it is first imported, e.g. when a support module cannot be imported. It also may be raised during parsing, e.g. if executing an external program is not permitted."""
gpl-2.0
cloudsigma/cloud-init
cloudinit/sources/helpers/openstack.py
1
15113
# vi: ts=4 expandtab # # Copyright (C) 2012 Canonical Ltd. # Copyright (C) 2012 Yahoo! Inc. # # Author: Scott Moser <scott.moser@canonical.com> # Author: Joshua Harlow <harlowja@yahoo-inc.com> # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License version 3, as # published by the Free Software Foundation. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. import abc import base64 import copy import os from cloudinit import ec2_utils from cloudinit import log as logging from cloudinit import sources from cloudinit import url_helper from cloudinit import util # For reference: http://tinyurl.com/laora4c LOG = logging.getLogger(__name__) FILES_V1 = { # Path <-> (metadata key name, translator function, default value) 'etc/network/interfaces': ('network_config', lambda x: x, ''), 'meta.js': ('meta_js', util.load_json, {}), "root/.ssh/authorized_keys": ('authorized_keys', lambda x: x, ''), } KEY_COPIES = ( # Cloud-init metadata names <-> (metadata key, is required) ('local-hostname', 'hostname', False), ('instance-id', 'uuid', True), ) OS_VERSIONS = ( '2012-08-10', # folsom '2013-04-04', # grizzly '2013-10-17', # havana ) OS_LATEST = 'latest' class NonReadable(IOError): pass class BrokenMetadata(IOError): pass class SourceMixin(object): def _ec2_name_to_device(self, name): if not self.ec2_metadata: return None bdm = self.ec2_metadata.get('block-device-mapping', {}) for (ent_name, device) in bdm.items(): if name == ent_name: return device return None def get_public_ssh_keys(self): name = "public_keys" if self.version == 1: name = "public-keys" return sources.normalize_pubkey_data(self.metadata.get(name)) def _os_name_to_device(self, name): device = None try: criteria = 'LABEL=%s' % (name) if name == 'swap': criteria = 'TYPE=%s' % (name) dev_entries = util.find_devs_with(criteria) if dev_entries: device = dev_entries[0] except util.ProcessExecutionError: pass return device def _validate_device_name(self, device): if not device: return None if not device.startswith("/"): device = "/dev/%s" % device if os.path.exists(device): return device # Durn, try adjusting the mapping remapped = self._remap_device(os.path.basename(device)) if remapped: LOG.debug("Remapped device name %s => %s", device, remapped) return remapped return None def device_name_to_device(self, name): # Translate a 'name' to a 'physical' device if not name: return None # Try the ec2 mapping first names = [name] if name == 'root': names.insert(0, 'ami') if name == 'ami': names.append('root') device = None LOG.debug("Using ec2 style lookup to find device %s", names) for n in names: device = self._ec2_name_to_device(n) device = self._validate_device_name(device) if device: break # Try the openstack way second if not device: LOG.debug("Using openstack style lookup to find device %s", names) for n in names: device = self._os_name_to_device(n) device = self._validate_device_name(device) if device: break # Ok give up... if not device: return None else: LOG.debug("Mapped %s to device %s", name, device) return device class BaseReader(object): __metaclass__ = abc.ABCMeta def __init__(self, base_path): self.base_path = base_path @abc.abstractmethod def _path_join(self, base, *add_ons): pass @abc.abstractmethod def _path_exists(self, path): pass @abc.abstractmethod def _path_read(self, path): pass @abc.abstractmethod def _read_ec2_metadata(self): pass def _read_content_path(self, item): path = item.get('content_path', '').lstrip("/") path_pieces = path.split("/") valid_pieces = [p for p in path_pieces if len(p)] if not valid_pieces: raise BrokenMetadata("Item %s has no valid content path" % (item)) path = self._path_join(self.base_path, "openstack", *path_pieces) return self._path_read(path) def _find_working_version(self, version): search_versions = [version] + list(OS_VERSIONS) for potential_version in search_versions: if not potential_version: continue path = self._path_join(self.base_path, "openstack", potential_version) if self._path_exists(path): if potential_version != version: LOG.warn("Version '%s' not available, attempting to use" " version '%s' instead", version, potential_version) return potential_version LOG.warn("Version '%s' not available, attempting to use '%s'" " instead", version, OS_LATEST) return OS_LATEST def read_v2(self, version=None): """Reads a version 2 formatted location. Return a dict with metadata, userdata, ec2-metadata, dsmode, network_config, files and version (2). If not a valid location, raise a NonReadable exception. """ def datafiles(version): files = {} files['metadata'] = ( # File path to read self._path_join("openstack", version, 'meta_data.json'), # Is it required? True, # Translator function (applied after loading) util.load_json, ) files['userdata'] = ( self._path_join("openstack", version, 'user_data'), False, lambda x: x, ) files['vendordata'] = ( self._path_join("openstack", version, 'vendor_data.json'), False, util.load_json, ) return files version = self._find_working_version(version) results = { 'userdata': '', 'version': 2, } data = datafiles(version) for (name, (path, required, translator)) in data.iteritems(): path = self._path_join(self.base_path, path) data = None found = False if self._path_exists(path): try: data = self._path_read(path) except IOError: raise NonReadable("Failed to read: %s" % path) found = True else: if required: raise NonReadable("Missing mandatory path: %s" % path) if found and translator: try: data = translator(data) except Exception as e: raise BrokenMetadata("Failed to process " "path %s: %s" % (path, e)) if found: results[name] = data metadata = results['metadata'] if 'random_seed' in metadata: random_seed = metadata['random_seed'] try: metadata['random_seed'] = base64.b64decode(random_seed) except (ValueError, TypeError) as e: raise BrokenMetadata("Badly formatted metadata" " random_seed entry: %s" % e) # load any files that were provided files = {} metadata_files = metadata.get('files', []) for item in metadata_files: if 'path' not in item: continue path = item['path'] try: files[path] = self._read_content_path(item) except Exception as e: raise BrokenMetadata("Failed to read provided " "file %s: %s" % (path, e)) results['files'] = files # The 'network_config' item in metadata is a content pointer # to the network config that should be applied. It is just a # ubuntu/debian '/etc/network/interfaces' file. net_item = metadata.get("network_config", None) if net_item: try: results['network_config'] = self._read_content_path(net_item) except IOError as e: raise BrokenMetadata("Failed to read network" " configuration: %s" % (e)) # To openstack, user can specify meta ('nova boot --meta=key=value') # and those will appear under metadata['meta']. # if they specify 'dsmode' they're indicating the mode that they intend # for this datasource to operate in. try: results['dsmode'] = metadata['meta']['dsmode'] except KeyError: pass # Read any ec2-metadata (if applicable) results['ec2-metadata'] = self._read_ec2_metadata() # Perform some misc. metadata key renames... for (target_key, source_key, is_required) in KEY_COPIES: if is_required and source_key not in metadata: raise BrokenMetadata("No '%s' entry in metadata" % source_key) if source_key in metadata: metadata[target_key] = metadata.get(source_key) return results class ConfigDriveReader(BaseReader): def __init__(self, base_path): super(ConfigDriveReader, self).__init__(base_path) def _path_join(self, base, *add_ons): components = [base] + list(add_ons) return os.path.join(*components) def _path_exists(self, path): return os.path.exists(path) def _path_read(self, path): return util.load_file(path) def _read_ec2_metadata(self): path = self._path_join(self.base_path, 'ec2', 'latest', 'meta-data.json') if not self._path_exists(path): return {} else: try: return util.load_json(self._path_read(path)) except Exception as e: raise BrokenMetadata("Failed to process " "path %s: %s" % (path, e)) def read_v1(self): """Reads a version 1 formatted location. Return a dict with metadata, userdata, dsmode, files and version (1). If not a valid path, raise a NonReadable exception. """ found = {} for name in FILES_V1.keys(): path = self._path_join(self.base_path, name) if self._path_exists(path): found[name] = path if len(found) == 0: raise NonReadable("%s: no files found" % (self.base_path)) md = {} for (name, (key, translator, default)) in FILES_V1.iteritems(): if name in found: path = found[name] try: contents = self._path_read(path) except IOError: raise BrokenMetadata("Failed to read: %s" % path) try: md[key] = translator(contents) except Exception as e: raise BrokenMetadata("Failed to process " "path %s: %s" % (path, e)) else: md[key] = copy.deepcopy(default) keydata = md['authorized_keys'] meta_js = md['meta_js'] # keydata in meta_js is preferred over "injected" keydata = meta_js.get('public-keys', keydata) if keydata: lines = keydata.splitlines() md['public-keys'] = [l for l in lines if len(l) and not l.startswith("#")] # config-drive-v1 has no way for openstack to provide the instance-id # so we copy that into metadata from the user input if 'instance-id' in meta_js: md['instance-id'] = meta_js['instance-id'] results = { 'version': 1, 'metadata': md, } # allow the user to specify 'dsmode' in a meta tag if 'dsmode' in meta_js: results['dsmode'] = meta_js['dsmode'] # config-drive-v1 has no way of specifying user-data, so the user has # to cheat and stuff it in a meta tag also. results['userdata'] = meta_js.get('user-data', '') # this implementation does not support files other than # network/interfaces and authorized_keys... results['files'] = {} return results class MetadataReader(BaseReader): def __init__(self, base_url, ssl_details=None, timeout=5, retries=5): super(MetadataReader, self).__init__(base_url) self.ssl_details = ssl_details self.timeout = float(timeout) self.retries = int(retries) def _path_read(self, path): response = url_helper.readurl(path, retries=self.retries, ssl_details=self.ssl_details, timeout=self.timeout) return response.contents def _path_exists(self, path): def should_retry_cb(request, cause): try: code = int(cause.code) if code >= 400: return False except (TypeError, ValueError): # Older versions of requests didn't have a code. pass return True try: response = url_helper.readurl(path, retries=self.retries, ssl_details=self.ssl_details, timeout=self.timeout, exception_cb=should_retry_cb) return response.ok() except IOError: return False def _path_join(self, base, *add_ons): return url_helper.combine_url(base, *add_ons) def _read_ec2_metadata(self): return ec2_utils.get_instance_metadata(ssl_details=self.ssl_details, timeout=self.timeout, retries=self.retries)
gpl-3.0
smoitra87/gerbil
deepnet/sparse_coder.py
13
9682
from neuralnet import * from sparse_code_layer import * import scipy.linalg class SparseCoder(NeuralNet): def SetLayerAndEdgeClass(self): self.LayerClass = SparseCodeLayer self.EdgeClass = Edge def Show(self): encoder = self.encoder.params['weight'].asarray() decoder = self.decoder.params['weight'].asarray() recon = self.input_layer.approximator.asarray() recep_field = self.encoder.proto.receptive_field_width rows = self.encoder.proto.display_rows cols = self.encoder.proto.display_cols visualize.display_wsorted(encoder, recep_field, rows, cols, 1, title='encoder') visualize.display_wsorted(decoder.T, recep_field, rows, cols, 2, title='decoder') visualize.display_w(recon[:, :100], recep_field, 10, 10, 3, title='reconstructions') visualize.display_hidden(self.code_layer.state.asarray(), 4, 'code distribution', prob=False) def Sort(self): assert len(self.layer) == 2 assert len(self.edge) == 2 if self.layer[0].is_input: self.input_layer = self.layer[0] self.code_layer = self.layer[1] else: self.input_layer = self.layer[1] self.code_layer = self.layer[0] if self.edge[0].node1 == self.input_layer: self.encoder = self.edge[0] self.decoder = self.edge[1] else: self.encoder = self.edge[1] self.decoder = self.edge[0] return [self.input_layer, self.code_layer] def SolveForZ(self): """Solve for z in (alpha + beta.wd.wd^T)z = wd . x + alpha z_est - gamma exactly. Output goes in z. temp is a matrix to store wd^Twd. """ input_layer = self.input_layer code_layer = self.code_layer z = code_layer.state wd = self.decoder.params['weight'] hyp = code_layer.hyperparams alpha = hyp.sc_alpha beta = hyp.sc_beta gamma = hyp.sc_gamma temp = code_layer.m_by_m temp2 = code_layer.deriv eye_m_by_m = code_layer.eye_m_by_m cm.dot(wd, wd.T, target=temp) temp.mult(beta) temp.add(alpha) z_est.mult(alpha, target=temp2) temp2.add_dot(wd, x, mult=beta) temp2.subtract(gamma) # Copy matrices to cpu. A = temp.asarray() B = temp2.asarray() # Solve AZ = B Z = scipy.linalg.solve(A, B, overwrite_a=True, overwrite_b=True) # Copy result back to gpu. z.overwrite(Z) def IterateForZ(self, train=False): """Solve for z in (alpha + beta.wd.wd^T)z = wd . x + alpha z_est - gamma using gradient descent. Output goes in z. temp is a matrix to store wd^Twd. """ input_layer = self.input_layer code_layer = self.code_layer epsilon = 0.01 steps = 20 z = code_layer.state wd = self.decoder.params['weight'] hyp = code_layer.hyperparams alpha = hyp.sc_alpha beta = hyp.sc_beta gamma = hyp.sc_gamma temp = code_layer.m_by_m temp2 = code_layer.deriv temp3 = code_layer.temp3 # This is bad! use better names. grad = code_layer.grad z_est = code_layer.approximator avg_models = hyp.dropout and (not hyp.dropout or not train) cm.dot(wd, wd.T, target=temp) temp.mult(beta) if avg_models: temp.mult((1.0 - hyp.dropout_prob)**2) temp.mult_diagonal(1. / (1.0 - hyp.dropout_prob)) temp.add_diagonal(alpha) z_est.mult(alpha, target=temp2) if avg_models: temp2.add_dot(wd, input_layer.state, mult=beta * (1.0 - hyp.dropout_prob)) #temp2.add_dot(wd, input_layer.state, mult=beta) elif hyp.dropout: temp2.add_dot(wd, input_layer.state, mult=beta) temp2.mult(code_layer.mask) else: temp2.add_dot(wd, input_layer.state, mult=beta) z.assign(z_est) #pdb.set_trace() for i in range(steps): cm.dot(temp, z, target=grad) grad.subtract(temp2) z.sign(target=temp3) grad.add_mult(temp3, alpha=gamma) if hyp.dropout and train: #code_layer.mask.fill_with_rand() #code_layer.mask.greater_than(hyp.dropout_prob) grad.mult(code_layer.mask) z.add_mult(grad, alpha=-epsilon) #pdb.set_trace() def ForwardPropagate(self, train=False, method='iter'): """Loads input and computes the sparse code for it.""" input_layer = self.input_layer code_layer = self.code_layer # Load data into state. input_layer.GetData() # Run it through the encoder. inputs = input_layer.state we = self.encoder.params['weight'] be = code_layer.params['bias'] scale = code_layer.params['scale'] hyp = code_layer.hyperparams code_approx = code_layer.approximator cm.dot(we.T, inputs, target=code_approx) code_approx.add_col_vec(be) code_layer.ApplyActivation(code_approx) code_approx.mult_by_col(scale) if hyp.dropout and train: code_layer.mask.fill_with_rand() code_layer.mask.greater_than(hyp.dropout_prob) code_approx.mult(code_layer.mask) # Infer z. if train: if method == 'iter': self.IterateForZ(train=train) elif method == 'exact': self.SolveForZ() else: if method == 'iter': self.IterateForZ(train=train) #code_layer.state.assign(code_approx) def GetLoss(self, train=False): """Computes loss and its derivatives.""" input_layer = self.input_layer code_layer = self.code_layer # Decode z. hyp = code_layer.hyperparams wd = self.decoder.params['weight'] bd = input_layer.params['bias'] z = code_layer.state input_recon = input_layer.approximator cm.dot(wd.T, z, target=input_recon) input_recon.add_col_vec(bd) # Compute loss function. code_approx = code_layer.approximator alpha = hyp.sc_alpha gamma = hyp.sc_gamma beta = hyp.sc_beta input_recon.subtract(input_layer.state, target=input_layer.deriv) # input reconstruction residual. code_approx.subtract(z, target=code_layer.deriv) # code construction residual. cm.abs(z, target=code_layer.temp) # L1 norm of code. code_layer.temp.sum(axis=1, target=code_layer.dimsize) code_layer.dimsize.sum(axis=0, target=code_layer.unitcell) loss1 = 0.5 * beta * input_layer.deriv.euclid_norm()**2 loss2 = 0.5 * alpha * code_layer.deriv.euclid_norm()**2 loss3 = gamma * code_layer.unitcell.euclid_norm() loss4 = loss1 + loss2 + loss3 err = [] for l in [loss1, loss2, loss3, loss4]: perf = deepnet_pb2.Metrics() perf.MergeFrom(code_layer.proto.performance_stats) perf.count = self.batchsize perf.error = l err.append(perf) return err def UpdateParameters(self, step): """Update the encoder and decoder weigths and biases. Args: step: Time step of training. """ numcases = self.batchsize code_layer = self.code_layer input_layer = self.input_layer encoder = self.encoder decoder = self.decoder wd = decoder.params['weight'] bd = input_layer.params['bias'] z = code_layer.state inputs = input_layer.state we = encoder.params['weight'] be = code_layer.params['bias'] scale = code_layer.params['scale'] code_approx = code_layer.approximator hyp = code_layer.hyperparams alpha = hyp.sc_alpha beta = hyp.sc_beta gamma = hyp.sc_gamma enc_hyp = encoder.hyperparams dec_hyp = decoder.hyperparams # Derivatives for decoder weights. deriv = input_layer.deriv momentum, epsilon = decoder.GetMomentumAndEpsilon(step) wd_delta = self.decoder.grad_weight wd_delta.mult(momentum) wd_delta.add_dot(z, deriv.T, beta / numcases) if dec_hyp.apply_l2_decay: wd_delta.add_mult(wd, alpha=dec_hyp.l2_decay) # Derivatives for decoder bias. momentum, epsilon = input_layer.GetMomentumAndEpsilon(step) bd_delta = input_layer.grad_bias bd_delta.mult(momentum) bd_delta.add_sums(deriv, axis=1, mult=beta / numcases) # Derivatives for scale. deriv = code_layer.deriv code_approx.div_by_col(scale) scale_delta = code_layer.grad_scale scale_delta.mult(momentum) temp = code_layer.temp3 code_approx.mult(deriv, target=temp) scale_delta.add_sums(temp, axis=1, mult=alpha / numcases) # Derivatives for encoder weights. code_layer.deriv.mult_by_col(scale) code_layer.ComputeDeriv(code_approx) # backprop through non-linearity. deriv = code_layer.deriv momentum, epsilon = encoder.GetMomentumAndEpsilon(step) we_delta = self.encoder.grad_weight we_delta.mult(momentum) we_delta.add_dot(inputs, deriv.T, alpha / numcases) if enc_hyp.apply_l2_decay: we_delta.add_mult(we, alpha=enc_hyp.l2_decay) # Derivatives for encoder bias. momentum, epsilon = code_layer.GetMomentumAndEpsilon(step) be_delta = code_layer.grad_bias be_delta.mult(momentum) be_delta.add_sums(deriv, axis=1, mult=alpha / numcases) # Apply the updates. scale.add_mult(scale_delta, -epsilon) bd.add_mult(bd_delta, -epsilon) wd.add_mult(wd_delta, -epsilon) be.add_mult(be_delta, -epsilon) we.add_mult(we_delta, -epsilon) if dec_hyp.apply_weight_norm: wd.norm_limit(dec_hyp.weight_norm, axis=0) if enc_hyp.apply_weight_norm: we.norm_limit(enc_hyp.weight_norm, axis=0) def EvaluateOneBatch(self): """Evaluate on one mini-batch. Args: step: Training step. """ self.ForwardPropagate() return self.GetLoss() def TrainOneBatch(self, step): """Train using one mini-batch. Args: step: Training step. """ """ if step > self.code_layer.hyperparams.switch_on_sc_alpha_after: self.code_layer.hyperparams.sc_alpha = 1.0 """ self.ForwardPropagate(train=True) losses = self.GetLoss(train=True) self.UpdateParameters(step) return losses
bsd-3-clause
theshadowx/enigma2
lib/python/Screens/HelpMenu.py
13
1630
from Screen import Screen from Components.Pixmap import Pixmap, MovingPixmap from Components.Label import Label from Components.ActionMap import ActionMap from Components.HelpMenuList import HelpMenuList from Screens.Rc import Rc class HelpMenu(Screen, Rc): def __init__(self, session, list): Screen.__init__(self, session) self.onSelChanged = [ ] self["list"] = HelpMenuList(list, self.close) self["list"].onSelChanged.append(self.SelectionChanged) Rc.__init__(self) self["long_key"] = Label("") self["actions"] = ActionMap(["WizardActions"], { "ok": self["list"].ok, "back": self.close, }, -1) self.onLayoutFinish.append(self.SelectionChanged) def SelectionChanged(self): self.clearSelectedKeys() selection = self["list"].getCurrent() if selection: selection = selection[3] #arrow = self["arrowup"] print "selection:", selection longText = "" if selection and len(selection) > 1: if selection[1] == "SHIFT": self.selectKey("SHIFT") elif selection[1] == "long": longText = _("Long key press") self["long_key"].setText(longText) self.selectKey(selection[0]) #if selection is None: print "select arrow" # arrow.moveTo(selection[1], selection[2], 1) # arrow.startMoving() # arrow.show() class HelpableScreen: def __init__(self): self["helpActions"] = ActionMap( [ "HelpActions" ], { "displayHelp": self.showHelp, }) def showHelp(self): self.session.openWithCallback(self.callHelpAction, HelpMenu, self.helpList) def callHelpAction(self, *args): if args: (actionmap, context, action) = args actionmap.action(context, action)
gpl-2.0
NERC-CEH/jules-jasmin
job_runner/job_runner/config/environment.py
1
1725
"""Pylons environment configuration""" import os from mako.lookup import TemplateLookup from pylons.configuration import PylonsConfig from pylons.error import handle_mako_error import job_runner.lib.app_globals as app_globals import job_runner.lib.helpers from job_runner.config.routing import make_map def load_environment(global_conf, app_conf): """Configure the Pylons environment via the ``pylons.config`` object """ config = PylonsConfig() # Pylons paths root = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) paths = dict(root=root, controllers=os.path.join(root, 'controllers'), static_files=os.path.join(root, 'public'), templates=[os.path.join(root, 'templates')]) # Initialize config with the basic options config.init_app(global_conf, app_conf, package='job_runner', paths=paths) config['routes.map'] = make_map(config) config['pylons.app_globals'] = app_globals.Globals(config) config['pylons.h'] = job_runner.lib.helpers # Setup cache object as early as possible import pylons pylons.cache._push_object(config['pylons.app_globals'].cache) # Create the Mako TemplateLookup, with the default auto-escaping config['pylons.app_globals'].mako_lookup = TemplateLookup( directories=paths['templates'], error_handler=handle_mako_error, module_directory=os.path.join(app_conf['cache_dir'], 'templates'), input_encoding='utf-8', default_filters=['escape'], imports=['from markupsafe import escape']) # CONFIGURATION OPTIONS HERE (note: all config options will override # any Pylons config options) return config
gpl-2.0
neerajvashistha/pa-dude
lib/python2.7/site-packages/docx/opc/part.py
13
8157
# encoding: utf-8 """ Open Packaging Convention (OPC) objects related to package parts. """ from __future__ import ( absolute_import, division, print_function, unicode_literals ) from .compat import cls_method_fn from .oxml import serialize_part_xml from ..oxml import parse_xml from .packuri import PackURI from .rel import Relationships from .shared import lazyproperty class Part(object): """ Base class for package parts. Provides common properties and methods, but intended to be subclassed in client code to implement specific part behaviors. """ def __init__(self, partname, content_type, blob=None, package=None): super(Part, self).__init__() self._partname = partname self._content_type = content_type self._blob = blob self._package = package def after_unmarshal(self): """ Entry point for post-unmarshaling processing, for example to parse the part XML. May be overridden by subclasses without forwarding call to super. """ # don't place any code here, just catch call if not overridden by # subclass pass def before_marshal(self): """ Entry point for pre-serialization processing, for example to finalize part naming if necessary. May be overridden by subclasses without forwarding call to super. """ # don't place any code here, just catch call if not overridden by # subclass pass @property def blob(self): """ Contents of this package part as a sequence of bytes. May be text or binary. Intended to be overridden by subclasses. Default behavior is to return load blob. """ return self._blob @property def content_type(self): """ Content type of this part. """ return self._content_type def drop_rel(self, rId): """ Remove the relationship identified by *rId* if its reference count is less than 2. Relationships with a reference count of 0 are implicit relationships. """ if self._rel_ref_count(rId) < 2: del self.rels[rId] @classmethod def load(cls, partname, content_type, blob, package): return cls(partname, content_type, blob, package) def load_rel(self, reltype, target, rId, is_external=False): """ Return newly added |_Relationship| instance of *reltype* between this part and *target* with key *rId*. Target mode is set to ``RTM.EXTERNAL`` if *is_external* is |True|. Intended for use during load from a serialized package, where the rId is well-known. Other methods exist for adding a new relationship to a part when manipulating a part. """ return self.rels.add_relationship(reltype, target, rId, is_external) @property def package(self): """ |OpcPackage| instance this part belongs to. """ return self._package @property def partname(self): """ |PackURI| instance holding partname of this part, e.g. '/ppt/slides/slide1.xml' """ return self._partname @partname.setter def partname(self, partname): if not isinstance(partname, PackURI): tmpl = "partname must be instance of PackURI, got '%s'" raise TypeError(tmpl % type(partname).__name__) self._partname = partname def part_related_by(self, reltype): """ Return part to which this part has a relationship of *reltype*. Raises |KeyError| if no such relationship is found and |ValueError| if more than one such relationship is found. Provides ability to resolve implicitly related part, such as Slide -> SlideLayout. """ return self.rels.part_with_reltype(reltype) def relate_to(self, target, reltype, is_external=False): """ Return rId key of relationship of *reltype* to *target*, from an existing relationship if there is one, otherwise a newly created one. """ if is_external: return self.rels.get_or_add_ext_rel(reltype, target) else: rel = self.rels.get_or_add(reltype, target) return rel.rId @property def related_parts(self): """ Dictionary mapping related parts by rId, so child objects can resolve explicit relationships present in the part XML, e.g. sldIdLst to a specific |Slide| instance. """ return self.rels.related_parts @lazyproperty def rels(self): """ |Relationships| instance holding the relationships for this part. """ return Relationships(self._partname.baseURI) def target_ref(self, rId): """ Return URL contained in target ref of relationship identified by *rId*. """ rel = self.rels[rId] return rel.target_ref def _rel_ref_count(self, rId): """ Return the count of references in this part's XML to the relationship identified by *rId*. """ rIds = self._element.xpath('//@r:id') return len([_rId for _rId in rIds if _rId == rId]) class PartFactory(object): """ Provides a way for client code to specify a subclass of |Part| to be constructed by |Unmarshaller| based on its content type and/or a custom callable. Setting ``PartFactory.part_class_selector`` to a callable object will cause that object to be called with the parameters ``content_type, reltype``, once for each part in the package. If the callable returns an object, it is used as the class for that part. If it returns |None|, part class selection falls back to the content type map defined in ``PartFactory.part_type_for``. If no class is returned from either of these, the class contained in ``PartFactory.default_part_type`` is used to construct the part, which is by default ``opc.package.Part``. """ part_class_selector = None part_type_for = {} default_part_type = Part def __new__(cls, partname, content_type, reltype, blob, package): PartClass = None if cls.part_class_selector is not None: part_class_selector = cls_method_fn(cls, 'part_class_selector') PartClass = part_class_selector(content_type, reltype) if PartClass is None: PartClass = cls._part_cls_for(content_type) return PartClass.load(partname, content_type, blob, package) @classmethod def _part_cls_for(cls, content_type): """ Return the custom part class registered for *content_type*, or the default part class if no custom class is registered for *content_type*. """ if content_type in cls.part_type_for: return cls.part_type_for[content_type] return cls.default_part_type class XmlPart(Part): """ Base class for package parts containing an XML payload, which is most of them. Provides additional methods to the |Part| base class that take care of parsing and reserializing the XML payload and managing relationships to other parts. """ def __init__(self, partname, content_type, element, package): super(XmlPart, self).__init__( partname, content_type, package=package ) self._element = element @property def blob(self): return serialize_part_xml(self._element) @property def element(self): """ The root XML element of this XML part. """ return self._element @classmethod def load(cls, partname, content_type, blob, package): element = parse_xml(blob) return cls(partname, content_type, element, package) @property def part(self): """ Part of the parent protocol, "children" of the document will not know the part that contains them so must ask their parent object. That chain of delegation ends here for child objects. """ return self
mit
wnoc-drexel/gem5-stable
src/mem/AddrMapper.py
69
3530
# Copyright (c) 2012 ARM Limited # All rights reserved. # # The license below extends only to copyright in the software and shall # not be construed as granting a license to any other intellectual # property including but not limited to intellectual property relating # to a hardware implementation of the functionality of the software # licensed hereunder. You may use the software subject to the license # terms below provided that you ensure that this notice is replicated # unmodified and in its entirety in all distributions of the software, # modified or unmodified, in source code or in binary form. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer; # redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the distribution; # neither the name of the copyright holders nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # # Authors: Andreas Hansson from m5.params import * from MemObject import MemObject # An address mapper changes the packet addresses in going from the # slave port side of the mapper to the master port side. When the # slave port is queried for the address ranges, it also performs the # necessary range updates. Note that snoop requests that travel from # the master port (i.e. the memory side) to the slave port are # currently not modified. class AddrMapper(MemObject): type = 'AddrMapper' cxx_header = 'mem/addr_mapper.hh' abstract = True # one port in each direction master = MasterPort("Master port") slave = SlavePort("Slave port") # Range address mapper that maps a set of original ranges to a set of # remapped ranges, where a specific range is of the same size # (original and remapped), only with an offset. class RangeAddrMapper(AddrMapper): type = 'RangeAddrMapper' cxx_header = 'mem/addr_mapper.hh' # These two vectors should be the exact same length and each range # should be the exact same size. Each range in original_ranges is # mapped to the corresponding element in the remapped_ranges. Note # that the same range can occur multiple times in the remapped # ranges for address aliasing. original_ranges = VectorParam.AddrRange( "Ranges of memory that should me remapped") remapped_ranges = VectorParam.AddrRange( "Ranges of memory that are being mapped to")
bsd-3-clause
joausaga/participa
cparte/test.py
1
9070
from django.test import TestCase from cparte.models import Channel import channel_middleware import ConfigParser import json import re import tweepy class TwitterTestCase(TestCase): fixtures = ['cparte.json'] url = "https://twitter.com/" config = ConfigParser.ConfigParser() config.read('cparte/config') def setUp(self): auth = tweepy.OAuthHandler(self.config.get('twitter_api','consumer_key'), self.config.get('twitter_api','consumer_secret')) auth.set_access_token(self.config.get('twitter_api','token'), self.config.get('twitter_api','token_secret')) api = tweepy.API(auth) api.retry_count = 2 api.retry_delay = 5 self.testing_posts = self.get_array_testing_tweets() tweet_ids = self.get_id_testing_tweets() statuses = api.statuses_lookup(tweet_ids) self.save_testing_statuses(statuses) channel = Channel.objects.get(name="twitter") session_info = channel_middleware.get_session_info([1]) channel.connect("", json.dumps(session_info)) self.limit_incorrect_inputs = self.config.getint('app', 'limit_wrong_input') self.limit_incorrect_requests = self.config.getint('app', 'limit_wrong_request') # Add here id of testing tweets def get_array_testing_tweets(self): return [{'id': "513014488925077505", 'type': 'correct_post_new_user_new_challenge', 'status': ''}, {'id': "513014347379908608", 'type': 'incorrect_post_new_user_new_challenge', 'status': ''}, {'id': "509884504974950401", 'type': 'correct_post_existing_user_new_challenge', 'status': ''}, {'id': "513022343044542464", 'type': 'incorrect_post_existing_user_new_challenge', 'status': ''}, {'id': "509053831166980096", 'type': 'correct_post_existing_user_answered_challenge', 'status': ''}, {'id': "513020781341573120", 'type': 'incorrect_post_existing_user_answered_challenge', 'status': ''}] def get_id_testing_tweets(self): tweet_ids = [] for tweet in self.testing_posts: tweet_ids.append(tweet['id']) return tweet_ids def save_testing_statuses(self, statuses): for status in statuses: for tweet in self.testing_posts: if tweet['id'] == status.id_str: tweet['status'] = status def to_dict(self, status): try: if status.retweeted_status: retweet = self.get_tweet_dict(status.retweeted_status) else: retweet = None except AttributeError: retweet = None status_dict = self.get_tweet_dict(status) status_dict["org_post"] = retweet return status_dict def get_tweet_dict(self, status): author = status.author # Extract tweet source source = re.sub("(<[a|A][^>]*>|</[a|A]>)", "", status.source) # Source is equal to Twitter for Websites if the tweet was posted through twitter social sharing button if source == "Twitter for Websites": through_sharing_button = True else: through_sharing_button = False return {"id": status.id_str, "text": status.text, "parent_id": status.in_reply_to_status_id_str, "datetime": status.created_at, "url": self.build_url_post(status), "votes": 0, "re_posts": status.retweet_count, "bookmarks": status.favorite_count, "hashtags": self.build_hashtags_array(status), "source": source, "sharing_post": through_sharing_button, "author": {"id": author.id_str, "name": author.name, "screen_name": author.screen_name, "print_name": "@" + author.screen_name, "url": self.url + author.screen_name, "description": author.description, "language": author.lang, "posts_count": author.statuses_count, "friends": author.friends_count, "followers": author.followers_count, "groups": author.listed_count}, "channel": "twitter" } def build_url_post(self, status): return self.url + status.author.screen_name + "/status/" + status.id_str def build_hashtags_array(self, status): hashtags = [] for hashtag in status.entities['hashtags']: hashtags.append(hashtag['text'].lower().strip()) return hashtags class TestAppBehavior(TwitterTestCase): def test_manage_post_new_user_correct_answer_to_new_challenge(self): # Input: post from a new author and containing a correct answer # Output: a message asking for extra info correct_post_new_user_new_challenge = None for testing_post in self.testing_posts: if testing_post['type'] == "correct_post_new_user_new_challenge": correct_post_new_user_new_challenge = self.to_dict(testing_post['status']) output = channel_middleware.process_post(correct_post_new_user_new_challenge, "twitter") self.assertNotEqual(output.category, None) self.assertEqual(output.category, "request_author_extrainfo") def test_manage_post_new_user_incorrect_answer_to_new_challenge(self): # Input: post from a new author and containing an unexpected answer # Output: a message notifying the author that his/her contribution is in an incorrect format incorrect_post_new_user_new_challenge = None for testing_post in self.testing_posts: if testing_post['type'] == "incorrect_post_new_user_new_challenge": incorrect_post_new_user_new_challenge = self.to_dict(testing_post['status']) output = channel_middleware.process_post(incorrect_post_new_user_new_challenge, "twitter") self.assertNotEqual(output.category, None) self.assertEqual(output.category, "incorrect_answer") def test_manage_post_existing_user_correct_answer_to_new_challenge(self): # Input: post from an existing author and containing an expected answer # Output: a message thanking the author for his/her contribution correct_post_existing_user_new_challenge = None for testing_post in self.testing_posts: if testing_post['type'] == "correct_post_existing_user_new_challenge": correct_post_existing_user_new_challenge = self.to_dict(testing_post['status']) output = channel_middleware.process_post(correct_post_existing_user_new_challenge, "twitter") self.assertNotEqual(output.category, None) self.assertEqual(output.category, "thanks_contribution") def test_manage_post_existing_user_correct_answer_to_previously_answered_challenge(self): # Input: post from an existing author and containing an expected answer to a previously answered challenge # Output: a message asking the author to change his/her previous contribution correct_post_existing_user_answered_challenge = None for testing_post in self.testing_posts: if testing_post['type'] == "correct_post_existing_user_answered_challenge": correct_post_existing_user_answered_challenge = self.to_dict(testing_post['status']) output = channel_middleware.process_post(correct_post_existing_user_answered_challenge, "twitter") self.assertNotEqual(output.category, None) self.assertEqual(output.category, "ask_change_contribution") def test_manage_post_existing_user_incorrect_answer_to_new_challenge(self): # Input: post from an existing author and containing an unexpected answer # Output: a message notifying the author that his/her contribution is in an incorrect format incorrect_post_existing_user_new_challenge = None for testing_post in self.testing_posts: if testing_post['type'] == "incorrect_post_existing_user_new_challenge": incorrect_post_existing_user_new_challenge = self.to_dict(testing_post['status']) output = channel_middleware.process_post(incorrect_post_existing_user_new_challenge, "twitter") self.assertNotEqual(output.category, None) self.assertEqual(output.category, "incorrect_answer") def test_manage_post_existing_user_incorrect_answer_to_previously_answered_challenge(self): # Input: post from an existing author and containing an expected answer # Output: a message notifying the author that his/her contribution is in an incorrect format incorrect_post_existing_user_answered_challenge = None for testing_post in self.testing_posts: if testing_post['type'] == "incorrect_post_existing_user_answered_challenge": incorrect_post_existing_user_answered_challenge = self.to_dict(testing_post['status']) output = channel_middleware.process_post(incorrect_post_existing_user_answered_challenge, "twitter") self.assertNotEqual(output.category, None) self.assertEqual(output.category, "incorrect_answer")
mit
deltreey/ansible
lib/ansible/utils/unicode.py
88
11283
# (c) 2012-2014, Toshio Kuraotmi <a.badger@gmail.com> # # This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see <http://www.gnu.org/licenses/>. # Make coding more python3-ish from __future__ import (absolute_import, division, print_function) __metaclass__ = type from six import string_types, text_type, binary_type, PY3 # to_bytes and to_unicode were written by Toshio Kuratomi for the # python-kitchen library https://pypi.python.org/pypi/kitchen # They are licensed in kitchen under the terms of the GPLv2+ # They were copied and modified for use in ansible by Toshio in Jan 2015 # (simply removing the deprecated features) #: Aliases for the utf-8 codec _UTF8_ALIASES = frozenset(('utf-8', 'UTF-8', 'utf8', 'UTF8', 'utf_8', 'UTF_8', 'utf', 'UTF', 'u8', 'U8')) #: Aliases for the latin-1 codec _LATIN1_ALIASES = frozenset(('latin-1', 'LATIN-1', 'latin1', 'LATIN1', 'latin', 'LATIN', 'l1', 'L1', 'cp819', 'CP819', '8859', 'iso8859-1', 'ISO8859-1', 'iso-8859-1', 'ISO-8859-1')) # EXCEPTION_CONVERTERS is defined below due to using to_unicode if PY3: basestring = (str, bytes) def to_unicode(obj, encoding='utf-8', errors='replace', nonstring=None): '''Convert an object into a :class:`unicode` string :arg obj: Object to convert to a :class:`unicode` string. This should normally be a byte :class:`str` :kwarg encoding: What encoding to try converting the byte :class:`str` as. Defaults to :term:`utf-8` :kwarg errors: If errors are found while decoding, perform this action. Defaults to ``replace`` which replaces the invalid bytes with a character that means the bytes were unable to be decoded. Other values are the same as the error handling schemes in the `codec base classes <http://docs.python.org/library/codecs.html#codec-base-classes>`_. For instance ``strict`` which raises an exception and ``ignore`` which simply omits the non-decodable characters. :kwarg nonstring: How to treat nonstring values. Possible values are: :simplerepr: Attempt to call the object's "simple representation" method and return that value. Python-2.3+ has two methods that try to return a simple representation: :meth:`object.__unicode__` and :meth:`object.__str__`. We first try to get a usable value from :meth:`object.__unicode__`. If that fails we try the same with :meth:`object.__str__`. :empty: Return an empty :class:`unicode` string :strict: Raise a :exc:`TypeError` :passthru: Return the object unchanged :repr: Attempt to return a :class:`unicode` string of the repr of the object Default is ``simplerepr`` :raises TypeError: if :attr:`nonstring` is ``strict`` and a non-:class:`basestring` object is passed in or if :attr:`nonstring` is set to an unknown value :raises UnicodeDecodeError: if :attr:`errors` is ``strict`` and :attr:`obj` is not decodable using the given encoding :returns: :class:`unicode` string or the original object depending on the value of :attr:`nonstring`. Usually this should be used on a byte :class:`str` but it can take both byte :class:`str` and :class:`unicode` strings intelligently. Nonstring objects are handled in different ways depending on the setting of the :attr:`nonstring` parameter. The default values of this function are set so as to always return a :class:`unicode` string and never raise an error when converting from a byte :class:`str` to a :class:`unicode` string. However, when you do not pass validly encoded text (or a nonstring object), you may end up with output that you don't expect. Be sure you understand the requirements of your data, not just ignore errors by passing it through this function. ''' # Could use isbasestring/isunicode here but we want this code to be as # fast as possible if isinstance(obj, basestring): if isinstance(obj, text_type): return obj if encoding in _UTF8_ALIASES: return text_type(obj, 'utf-8', errors) if encoding in _LATIN1_ALIASES: return text_type(obj, 'latin-1', errors) return obj.decode(encoding, errors) if not nonstring: nonstring = 'simplerepr' if nonstring == 'empty': return u'' elif nonstring == 'passthru': return obj elif nonstring == 'simplerepr': try: simple = obj.__unicode__() except (AttributeError, UnicodeError): simple = None if not simple: try: simple = text_type(obj) except UnicodeError: try: simple = obj.__str__() except (UnicodeError, AttributeError): simple = u'' if isinstance(simple, binary_type): return text_type(simple, encoding, errors) return simple elif nonstring in ('repr', 'strict'): obj_repr = repr(obj) if isinstance(obj_repr, binary_type): obj_repr = text_type(obj_repr, encoding, errors) if nonstring == 'repr': return obj_repr raise TypeError('to_unicode was given "%(obj)s" which is neither' ' a byte string (str) or a unicode string' % {'obj': obj_repr.encode(encoding, 'replace')}) raise TypeError('nonstring value, %(param)s, is not set to a valid' ' action' % {'param': nonstring}) def to_bytes(obj, encoding='utf-8', errors='replace', nonstring=None): '''Convert an object into a byte :class:`str` :arg obj: Object to convert to a byte :class:`str`. This should normally be a :class:`unicode` string. :kwarg encoding: Encoding to use to convert the :class:`unicode` string into a byte :class:`str`. Defaults to :term:`utf-8`. :kwarg errors: If errors are found while encoding, perform this action. Defaults to ``replace`` which replaces the invalid bytes with a character that means the bytes were unable to be encoded. Other values are the same as the error handling schemes in the `codec base classes <http://docs.python.org/library/codecs.html#codec-base-classes>`_. For instance ``strict`` which raises an exception and ``ignore`` which simply omits the non-encodable characters. :kwarg nonstring: How to treat nonstring values. Possible values are: :simplerepr: Attempt to call the object's "simple representation" method and return that value. Python-2.3+ has two methods that try to return a simple representation: :meth:`object.__unicode__` and :meth:`object.__str__`. We first try to get a usable value from :meth:`object.__str__`. If that fails we try the same with :meth:`object.__unicode__`. :empty: Return an empty byte :class:`str` :strict: Raise a :exc:`TypeError` :passthru: Return the object unchanged :repr: Attempt to return a byte :class:`str` of the :func:`repr` of the object Default is ``simplerepr``. :raises TypeError: if :attr:`nonstring` is ``strict`` and a non-:class:`basestring` object is passed in or if :attr:`nonstring` is set to an unknown value. :raises UnicodeEncodeError: if :attr:`errors` is ``strict`` and all of the bytes of :attr:`obj` are unable to be encoded using :attr:`encoding`. :returns: byte :class:`str` or the original object depending on the value of :attr:`nonstring`. .. warning:: If you pass a byte :class:`str` into this function the byte :class:`str` is returned unmodified. It is **not** re-encoded with the specified :attr:`encoding`. The easiest way to achieve that is:: to_bytes(to_unicode(text), encoding='utf-8') The initial :func:`to_unicode` call will ensure text is a :class:`unicode` string. Then, :func:`to_bytes` will turn that into a byte :class:`str` with the specified encoding. Usually, this should be used on a :class:`unicode` string but it can take either a byte :class:`str` or a :class:`unicode` string intelligently. Nonstring objects are handled in different ways depending on the setting of the :attr:`nonstring` parameter. The default values of this function are set so as to always return a byte :class:`str` and never raise an error when converting from unicode to bytes. However, when you do not pass an encoding that can validly encode the object (or a non-string object), you may end up with output that you don't expect. Be sure you understand the requirements of your data, not just ignore errors by passing it through this function. ''' # Could use isbasestring, isbytestring here but we want this to be as fast # as possible if isinstance(obj, basestring): if isinstance(obj, binary_type): return obj return obj.encode(encoding, errors) if not nonstring: nonstring = 'simplerepr' if nonstring == 'empty': return b'' elif nonstring == 'passthru': return obj elif nonstring == 'simplerepr': try: simple = binary_type(obj) except UnicodeError: try: simple = obj.__str__() except (AttributeError, UnicodeError): simple = None if not simple: try: simple = obj.__unicode__() except (AttributeError, UnicodeError): simple = b'' if isinstance(simple, text_type): simple = simple.encode(encoding, 'replace') return simple elif nonstring in ('repr', 'strict'): try: obj_repr = obj.__repr__() except (AttributeError, UnicodeError): obj_repr = b'' if isinstance(obj_repr, text_type): obj_repr = obj_repr.encode(encoding, errors) else: obj_repr = binary_type(obj_repr) if nonstring == 'repr': return obj_repr raise TypeError('to_bytes was given "%(obj)s" which is neither' ' a unicode string or a byte string (str)' % {'obj': obj_repr}) raise TypeError('nonstring value, %(param)s, is not set to a valid' ' action' % {'param': nonstring}) # force the return value of a function to be unicode. Use with partial to # ensure that a filter will return unicode values. def unicode_wrap(func, *args, **kwargs): return to_unicode(func(*args, **kwargs), nonstring='passthru')
gpl-3.0
DevOps4Networks/ansible
lib/ansible/plugins/lookup/shelvefile.py
132
2906
# (c) 2015, Alejandro Guirao <lekumberri@gmail.com> # # This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see <http://www.gnu.org/licenses/>. from __future__ import (absolute_import, division, print_function) __metaclass__ = type import shelve import os from ansible.errors import AnsibleError from ansible.plugins.lookup import LookupBase class LookupModule(LookupBase): def read_shelve(self, shelve_filename, key): """ Read the value of "key" from a shelve file """ d = shelve.open(shelve_filename) res = d.get(key, None) d.close() return res def run(self, terms, variables=None, **kwargs): if not isinstance(terms, list): terms = [ terms ] ret = [] for term in terms: playbook_path = None relative_path = None paramvals = {"file": None, "key": None} params = term.split() try: for param in params: name, value = param.split('=') assert(name in paramvals) paramvals[name] = value except (ValueError, AssertionError) as e: # In case "file" or "key" are not present raise AnsibleError(e) file = paramvals['file'] key = paramvals['key'] basedir_path = self._loader.path_dwim(file) # Search also in the role/files directory and in the playbook directory if 'role_path' in variables: relative_path = self._loader.path_dwim_relative(variables['role_path'], 'files', file) if 'playbook_dir' in variables: playbook_path = self._loader.path_dwim_relative(variables['playbook_dir'],'files', file) for path in (basedir_path, relative_path, playbook_path): if path and os.path.exists(path): res = self.read_shelve(path, key) if res is None: raise AnsibleError("Key %s not found in shelve file %s" % (key, file)) # Convert the value read to string ret.append(str(res)) break else: raise AnsibleError("Could not locate shelve file in lookup: %s" % file) return ret
gpl-3.0
PySyncer/shows_sync
shows_sync/providers/output/myanimelist.py
1
2643
from lxml import etree import time import requests import constants as CONSTANTS import urllib import logging from pymal import Mal import sys class MyAnimeList(object): def __init__(self, username, password): self.username = username self.password = password self.mal = Mal.Mal(username, password) def update(self, episodes): for show_key, show in episodes.items(): for season_key, season in show['tvdb']['seasons'].items(): try: # Try to get ID of show on MAL anime = self.mal.anime.search(show['tvdb']['original_title'])[0] id = anime['id'] except: logging.warning('Cannot find show {0}.'.format(show['tvdb']['original_title'])) break # IF the total number of epiosde is smaller than the absolute number of the episode, # MAL probably add another show coresponding to the season of the episode, so we will search this show if int(anime['episodes']) < season['episodes'][-1]['absoluteNumber']: try: # Search show coresponding to the season anime = self.mal.anime.search(show['tvdb']['original_title'] + ' ' + season_key)[0] id = anime['id'] except: logging.warning('Cannot find show {0}.'.format(show['tvdb']['original_title'] + ' ' + season_key)) break # Mark as watched with not absolute number r = self.mark_as_watched(id, season['episodes'][-1]['episode']) self.watch_log(r, anime['title'], season['episodes'][-1]['episode']) else: # Mark as watched with absolute number r = self.mark_as_watched(id, season['episodes'][-1]['absoluteNumber']) self.watch_log(r, anime['title'], season['episodes'][-1]['absoluteNumber']) def watch_log(self, r, title, episode): if r == 'Updated' or r == 'Created': logging.info('{0} {1}, episode {2}.'.format(r, title, episode)) else: logging.warning('Cannot update {0}, episode {1} with message : {2}.'.format(title, episode, r)) def mark_as_watched(self, id, episode): r = self.mal.anime.add(id, str(episode)) if 'is already in the list' in r: logging.debug(r) r = self.mal.anime.update(id, str(episode)) return r def reduce(self, history): # TODO pass
mit
soarpenguin/ansible
lib/ansible/plugins/action/fetch.py
42
9898
# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com> # # This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see <http://www.gnu.org/licenses/>. from __future__ import (absolute_import, division, print_function) __metaclass__ = type import os import base64 from ansible.errors import AnsibleError from ansible.module_utils._text import to_bytes from ansible.module_utils.six import string_types from ansible.module_utils.parsing.convert_bool import boolean from ansible.plugins.action import ActionBase from ansible.utils.hashing import checksum, checksum_s, md5, secure_hash from ansible.utils.path import makedirs_safe try: from __main__ import display except ImportError: from ansible.utils.display import Display display = Display() class ActionModule(ActionBase): def run(self, tmp=None, task_vars=None): ''' handler for fetch operations ''' if task_vars is None: task_vars = dict() result = super(ActionModule, self).run(tmp, task_vars) if self._play_context.check_mode: result['skipped'] = True result['msg'] = 'check mode not (yet) supported for this module' return result source = self._task.args.get('src', None) dest = self._task.args.get('dest', None) flat = boolean(self._task.args.get('flat'), strict=False) fail_on_missing = boolean(self._task.args.get('fail_on_missing'), strict=False) validate_checksum = boolean(self._task.args.get('validate_checksum', self._task.args.get('validate_md5', True)), strict=False) # validate source and dest are strings FIXME: use basic.py and module specs if not isinstance(source, string_types): result['msg'] = "Invalid type supplied for source option, it must be a string" if not isinstance(dest, string_types): result['msg'] = "Invalid type supplied for dest option, it must be a string" # validate_md5 is the deprecated way to specify validate_checksum if 'validate_md5' in self._task.args and 'validate_checksum' in self._task.args: result['msg'] = "validate_checksum and validate_md5 cannot both be specified" if 'validate_md5' in self._task.args: display.deprecated('Use validate_checksum instead of validate_md5', version='2.8') if source is None or dest is None: result['msg'] = "src and dest are required" if result.get('msg'): result['failed'] = True return result source = self._connection._shell.join_path(source) source = self._remote_expand_user(source) remote_checksum = None if not self._play_context.become: # calculate checksum for the remote file, don't bother if using become as slurp will be used # Force remote_checksum to follow symlinks because fetch always follows symlinks remote_checksum = self._remote_checksum(source, all_vars=task_vars, follow=True) # use slurp if permissions are lacking or privilege escalation is needed remote_data = None if remote_checksum in ('1', '2', None): slurpres = self._execute_module(module_name='slurp', module_args=dict(src=source), task_vars=task_vars, tmp=tmp) if slurpres.get('failed'): if not fail_on_missing and (slurpres.get('msg').startswith('file not found') or remote_checksum == '1'): result['msg'] = "the remote file does not exist, not transferring, ignored" result['file'] = source result['changed'] = False else: result.update(slurpres) return result else: if slurpres['encoding'] == 'base64': remote_data = base64.b64decode(slurpres['content']) if remote_data is not None: remote_checksum = checksum_s(remote_data) # the source path may have been expanded on the # target system, so we compare it here and use the # expanded version if it's different remote_source = slurpres.get('source') if remote_source and remote_source != source: source = remote_source # calculate the destination name if os.path.sep not in self._connection._shell.join_path('a', ''): source = self._connection._shell._unquote(source) source_local = source.replace('\\', '/') else: source_local = source dest = os.path.expanduser(dest) if flat: if os.path.isdir(to_bytes(dest, errors='surrogate_or_strict')) and not dest.endswith(os.sep): result['msg'] = "dest is an existing directory, use a trailing slash if you want to fetch src into that directory" result['file'] = dest result['failed'] = True return result if dest.endswith(os.sep): # if the path ends with "/", we'll use the source filename as the # destination filename base = os.path.basename(source_local) dest = os.path.join(dest, base) if not dest.startswith("/"): # if dest does not start with "/", we'll assume a relative path dest = self._loader.path_dwim(dest) else: # files are saved in dest dir, with a subdir for each host, then the filename if 'inventory_hostname' in task_vars: target_name = task_vars['inventory_hostname'] else: target_name = self._play_context.remote_addr dest = "%s/%s/%s" % (self._loader.path_dwim(dest), target_name, source_local) dest = dest.replace("//", "/") if remote_checksum in ('0', '1', '2', '3', '4', '5'): result['changed'] = False result['file'] = source if remote_checksum == '0': result['msg'] = "unable to calculate the checksum of the remote file" elif remote_checksum == '1': result['msg'] = "the remote file does not exist" elif remote_checksum == '2': result['msg'] = "no read permission on remote file" elif remote_checksum == '3': result['msg'] = "remote file is a directory, fetch cannot work on directories" elif remote_checksum == '4': result['msg'] = "python isn't present on the system. Unable to compute checksum" elif remote_checksum == '5': result['msg'] = "stdlib json or simplejson was not found on the remote machine. Only the raw module can work without those installed" # Historically, these don't fail because you may want to transfer # a log file that possibly MAY exist but keep going to fetch other # log files. Today, this is better achieved by adding # ignore_errors or failed_when to the task. Control the behaviour # via fail_when_missing if fail_on_missing: result['failed'] = True del result['changed'] else: result['msg'] += ", not transferring, ignored" return result # calculate checksum for the local file local_checksum = checksum(dest) if remote_checksum != local_checksum: # create the containing directories, if needed makedirs_safe(os.path.dirname(dest)) # fetch the file and check for changes if remote_data is None: self._connection.fetch_file(source, dest) else: try: f = open(to_bytes(dest, errors='surrogate_or_strict'), 'wb') f.write(remote_data) f.close() except (IOError, OSError) as e: raise AnsibleError("Failed to fetch the file: %s" % e) new_checksum = secure_hash(dest) # For backwards compatibility. We'll return None on FIPS enabled systems try: new_md5 = md5(dest) except ValueError: new_md5 = None if validate_checksum and new_checksum != remote_checksum: result.update(dict(failed=True, md5sum=new_md5, msg="checksum mismatch", file=source, dest=dest, remote_md5sum=None, checksum=new_checksum, remote_checksum=remote_checksum)) else: result.update({'changed': True, 'md5sum': new_md5, 'dest': dest, 'remote_md5sum': None, 'checksum': new_checksum, 'remote_checksum': remote_checksum}) else: # For backwards compatibility. We'll return None on FIPS enabled systems try: local_md5 = md5(dest) except ValueError: local_md5 = None result.update(dict(changed=False, md5sum=local_md5, file=source, dest=dest, checksum=local_checksum)) return result
gpl-3.0
eyalfa/spark
python/pyspark/ml/wrapper.py
24
12950
# # Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # from abc import ABCMeta, abstractmethod import sys if sys.version >= '3': xrange = range from pyspark import SparkContext from pyspark.sql import DataFrame from pyspark.ml import Estimator, Transformer, Model from pyspark.ml.param import Params from pyspark.ml.util import _jvm from pyspark.ml.common import inherit_doc, _java2py, _py2java class JavaWrapper(object): """ Wrapper class for a Java companion object """ def __init__(self, java_obj=None): super(JavaWrapper, self).__init__() self._java_obj = java_obj def __del__(self): if SparkContext._active_spark_context and self._java_obj is not None: SparkContext._active_spark_context._gateway.detach(self._java_obj) @classmethod def _create_from_java_class(cls, java_class, *args): """ Construct this object from given Java classname and arguments """ java_obj = JavaWrapper._new_java_obj(java_class, *args) return cls(java_obj) def _call_java(self, name, *args): m = getattr(self._java_obj, name) sc = SparkContext._active_spark_context java_args = [_py2java(sc, arg) for arg in args] return _java2py(sc, m(*java_args)) @staticmethod def _new_java_obj(java_class, *args): """ Returns a new Java object. """ sc = SparkContext._active_spark_context java_obj = _jvm() for name in java_class.split("."): java_obj = getattr(java_obj, name) java_args = [_py2java(sc, arg) for arg in args] return java_obj(*java_args) @staticmethod def _new_java_array(pylist, java_class): """ Create a Java array of given java_class type. Useful for calling a method with a Scala Array from Python with Py4J. :param pylist: Python list to convert to a Java Array. :param java_class: Java class to specify the type of Array. Should be in the form of sc._gateway.jvm.* (sc is a valid Spark Context). :return: Java Array of converted pylist. Example primitive Java classes: - basestring -> sc._gateway.jvm.java.lang.String - int -> sc._gateway.jvm.java.lang.Integer - float -> sc._gateway.jvm.java.lang.Double - bool -> sc._gateway.jvm.java.lang.Boolean """ sc = SparkContext._active_spark_context java_array = sc._gateway.new_array(java_class, len(pylist)) for i in xrange(len(pylist)): java_array[i] = pylist[i] return java_array @inherit_doc class JavaParams(JavaWrapper, Params): """ Utility class to help create wrapper classes from Java/Scala implementations of pipeline components. """ #: The param values in the Java object should be #: synced with the Python wrapper in fit/transform/evaluate/copy. __metaclass__ = ABCMeta def _make_java_param_pair(self, param, value): """ Makes a Java param pair. """ sc = SparkContext._active_spark_context param = self._resolveParam(param) java_param = self._java_obj.getParam(param.name) java_value = _py2java(sc, value) return java_param.w(java_value) def _transfer_params_to_java(self): """ Transforms the embedded params to the companion Java object. """ pair_defaults = [] for param in self.params: if self.isSet(param): pair = self._make_java_param_pair(param, self._paramMap[param]) self._java_obj.set(pair) if self.hasDefault(param): pair = self._make_java_param_pair(param, self._defaultParamMap[param]) pair_defaults.append(pair) if len(pair_defaults) > 0: sc = SparkContext._active_spark_context pair_defaults_seq = sc._jvm.PythonUtils.toSeq(pair_defaults) self._java_obj.setDefault(pair_defaults_seq) def _transfer_param_map_to_java(self, pyParamMap): """ Transforms a Python ParamMap into a Java ParamMap. """ paramMap = JavaWrapper._new_java_obj("org.apache.spark.ml.param.ParamMap") for param in self.params: if param in pyParamMap: pair = self._make_java_param_pair(param, pyParamMap[param]) paramMap.put([pair]) return paramMap def _create_params_from_java(self): """ SPARK-10931: Temporary fix to create params that are defined in the Java obj but not here """ java_params = list(self._java_obj.params()) from pyspark.ml.param import Param for java_param in java_params: java_param_name = java_param.name() if not hasattr(self, java_param_name): param = Param(self, java_param_name, java_param.doc()) setattr(param, "created_from_java_param", True) setattr(self, java_param_name, param) self._params = None # need to reset so self.params will discover new params def _transfer_params_from_java(self): """ Transforms the embedded params from the companion Java object. """ sc = SparkContext._active_spark_context for param in self.params: if self._java_obj.hasParam(param.name): java_param = self._java_obj.getParam(param.name) # SPARK-14931: Only check set params back to avoid default params mismatch. if self._java_obj.isSet(java_param): value = _java2py(sc, self._java_obj.getOrDefault(java_param)) self._set(**{param.name: value}) # SPARK-10931: Temporary fix for params that have a default in Java if self._java_obj.hasDefault(java_param) and not self.isDefined(param): value = _java2py(sc, self._java_obj.getDefault(java_param)).get() self._setDefault(**{param.name: value}) def _transfer_param_map_from_java(self, javaParamMap): """ Transforms a Java ParamMap into a Python ParamMap. """ sc = SparkContext._active_spark_context paramMap = dict() for pair in javaParamMap.toList(): param = pair.param() if self.hasParam(str(param.name())): paramMap[self.getParam(param.name())] = _java2py(sc, pair.value()) return paramMap @staticmethod def _empty_java_param_map(): """ Returns an empty Java ParamMap reference. """ return _jvm().org.apache.spark.ml.param.ParamMap() def _to_java(self): """ Transfer this instance's Params to the wrapped Java object, and return the Java object. Used for ML persistence. Meta-algorithms such as Pipeline should override this method. :return: Java object equivalent to this instance. """ self._transfer_params_to_java() return self._java_obj @staticmethod def _from_java(java_stage): """ Given a Java object, create and return a Python wrapper of it. Used for ML persistence. Meta-algorithms such as Pipeline should override this method as a classmethod. """ def __get_class(clazz): """ Loads Python class from its name. """ parts = clazz.split('.') module = ".".join(parts[:-1]) m = __import__(module) for comp in parts[1:]: m = getattr(m, comp) return m stage_name = java_stage.getClass().getName().replace("org.apache.spark", "pyspark") # Generate a default new instance from the stage_name class. py_type = __get_class(stage_name) if issubclass(py_type, JavaParams): # Load information from java_stage to the instance. py_stage = py_type() py_stage._java_obj = java_stage # SPARK-10931: Temporary fix so that persisted models would own params from Estimator if issubclass(py_type, JavaModel): py_stage._create_params_from_java() py_stage._resetUid(java_stage.uid()) py_stage._transfer_params_from_java() elif hasattr(py_type, "_from_java"): py_stage = py_type._from_java(java_stage) else: raise NotImplementedError("This Java stage cannot be loaded into Python currently: %r" % stage_name) return py_stage def copy(self, extra=None): """ Creates a copy of this instance with the same uid and some extra params. This implementation first calls Params.copy and then make a copy of the companion Java pipeline component with extra params. So both the Python wrapper and the Java pipeline component get copied. :param extra: Extra parameters to copy to the new instance :return: Copy of this instance """ if extra is None: extra = dict() that = super(JavaParams, self).copy(extra) if self._java_obj is not None: that._java_obj = self._java_obj.copy(self._empty_java_param_map()) that._transfer_params_to_java() return that @inherit_doc class JavaEstimator(JavaParams, Estimator): """ Base class for :py:class:`Estimator`s that wrap Java/Scala implementations. """ __metaclass__ = ABCMeta @abstractmethod def _create_model(self, java_model): """ Creates a model from the input Java model reference. """ raise NotImplementedError() def _fit_java(self, dataset): """ Fits a Java model to the input dataset. :param dataset: input dataset, which is an instance of :py:class:`pyspark.sql.DataFrame` :param params: additional params (overwriting embedded values) :return: fitted Java model """ self._transfer_params_to_java() return self._java_obj.fit(dataset._jdf) def _fit(self, dataset): java_model = self._fit_java(dataset) model = self._create_model(java_model) return self._copyValues(model) @inherit_doc class JavaTransformer(JavaParams, Transformer): """ Base class for :py:class:`Transformer`s that wrap Java/Scala implementations. Subclasses should ensure they have the transformer Java object available as _java_obj. """ __metaclass__ = ABCMeta def _transform(self, dataset): self._transfer_params_to_java() return DataFrame(self._java_obj.transform(dataset._jdf), dataset.sql_ctx) @inherit_doc class JavaModel(JavaTransformer, Model): """ Base class for :py:class:`Model`s that wrap Java/Scala implementations. Subclasses should inherit this class before param mix-ins, because this sets the UID from the Java model. """ __metaclass__ = ABCMeta def __init__(self, java_model=None): """ Initialize this instance with a Java model object. Subclasses should call this constructor, initialize params, and then call _transfer_params_from_java. This instance can be instantiated without specifying java_model, it will be assigned after that, but this scenario only used by :py:class:`JavaMLReader` to load models. This is a bit of a hack, but it is easiest since a proper fix would require MLReader (in pyspark.ml.util) to depend on these wrappers, but these wrappers depend on pyspark.ml.util (both directly and via other ML classes). """ super(JavaModel, self).__init__(java_model) if java_model is not None: # SPARK-10931: This is a temporary fix to allow models to own params # from estimators. Eventually, these params should be in models through # using common base classes between estimators and models. self._create_params_from_java() self._resetUid(java_model.uid())
apache-2.0
quoclieu/codebrew17-starving
env/lib/python3.5/site-packages/Crypto/Math/Numbers.py
10
5358
# =================================================================== # # Copyright (c) 2014, Legrandin <helderijs@gmail.com> # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions # are met: # # 1. Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # 2. Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in # the documentation and/or other materials provided with the # distribution. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS # FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE # COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, # INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, # BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; # LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT # LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN # ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE # POSSIBILITY OF SUCH DAMAGE. # =================================================================== """Fast, arbitrary precision integers. :undocumented: __package__ """ __all__ = ["Integer"] from Crypto.Util.py3compat import * from Crypto import Random try: from Crypto.Math._Numbers_gmp import Integer from Crypto.Math._Numbers_gmp import implementation as _implementation except (ImportError, OSError): from Crypto.Math._Numbers_int import Integer _implementation = { } def _random(**kwargs): """Generate a random natural integer of a certain size. :Keywords: exact_bits : positive integer The length in bits of the resulting random Integer number. The number is guaranteed to fulfil the relation: 2^bits > result >= 2^(bits - 1) max_bits : positive integer The maximum length in bits of the resulting random Integer number. The number is guaranteed to fulfil the relation: 2^bits > result >=0 randfunc : callable A function that returns a random byte string. The length of the byte string is passed as parameter. Optional. If not provided (or ``None``), randomness is read from the system RNG. :Return: a Integer object """ exact_bits = kwargs.pop("exact_bits", None) max_bits = kwargs.pop("max_bits", None) randfunc = kwargs.pop("randfunc", None) if randfunc is None: randfunc = Random.new().read if exact_bits is None and max_bits is None: raise ValueError("Either 'exact_bits' or 'max_bits' must be specified") if exact_bits is not None and max_bits is not None: raise ValueError("'exact_bits' and 'max_bits' are mutually exclusive") bits = exact_bits or max_bits bytes_needed = ((bits - 1) // 8) + 1 significant_bits_msb = 8 - (bytes_needed * 8 - bits) msb = bord(randfunc(1)[0]) if exact_bits is not None: msb |= 1 << (significant_bits_msb - 1) msb &= (1 << significant_bits_msb) - 1 return Integer.from_bytes(bchr(msb) + randfunc(bytes_needed - 1)) def _random_range(**kwargs): """Generate a random integer within a given internal. :Keywords: min_inclusive : integer The lower end of the interval (inclusive). max_inclusive : integer The higher end of the interval (inclusive). max_exclusive : integer The higher end of the interval (exclusive). randfunc : callable A function that returns a random byte string. The length of the byte string is passed as parameter. Optional. If not provided (or ``None``), randomness is read from the system RNG. :Returns: An Integer randomly taken in the given interval. """ min_inclusive = kwargs.pop("min_inclusive", None) max_inclusive = kwargs.pop("max_inclusive", None) max_exclusive = kwargs.pop("max_exclusive", None) randfunc = kwargs.pop("randfunc", None) if kwargs: raise ValueError("Unknown keywords: " + str(kwargs.keys)) if None not in (max_inclusive, max_exclusive): raise ValueError("max_inclusive and max_exclusive cannot be both" " specified") if max_exclusive is not None: max_inclusive = max_exclusive - 1 if None in (min_inclusive, max_inclusive): raise ValueError("Missing keyword to identify the interval") if randfunc is None: randfunc = Random.new().read norm_maximum = max_inclusive - min_inclusive bits_needed = Integer(norm_maximum).size_in_bits() norm_candidate = -1 while not 0 <= norm_candidate <= norm_maximum: norm_candidate = _random( max_bits=bits_needed, randfunc=randfunc ) return norm_candidate + min_inclusive Integer.random = staticmethod(_random) Integer.random_range = staticmethod(_random_range)
mit