repo_name
stringlengths
6
100
path
stringlengths
4
294
copies
stringclasses
981 values
size
stringlengths
4
6
content
stringlengths
606
896k
license
stringclasses
15 values
jounex/hue
desktop/core/ext-py/Django-1.6.10/tests/tablespaces/models.py
150
1819
from django.db import models # Since the test database doesn't have tablespaces, it's impossible for Django # to create the tables for models where db_tablespace is set. To avoid this # problem, we mark the models as unmanaged, and temporarily revert them to # managed during each test. We also set them to use the same tables as the # "reference" models to avoid errors when other tests run 'syncdb' # (proxy_models_inheritance does). class ScientistRef(models.Model): name = models.CharField(max_length=50) class ArticleRef(models.Model): title = models.CharField(max_length=50, unique=True) code = models.CharField(max_length=50, unique=True) authors = models.ManyToManyField(ScientistRef, related_name='articles_written_set') reviewers = models.ManyToManyField(ScientistRef, related_name='articles_reviewed_set') class Scientist(models.Model): name = models.CharField(max_length=50) class Meta: db_table = 'tablespaces_scientistref' db_tablespace = 'tbl_tbsp' managed = False class Article(models.Model): title = models.CharField(max_length=50, unique=True) code = models.CharField(max_length=50, unique=True, db_tablespace='idx_tbsp') authors = models.ManyToManyField(Scientist, related_name='articles_written_set') reviewers = models.ManyToManyField(Scientist, related_name='articles_reviewed_set', db_tablespace='idx_tbsp') class Meta: db_table = 'tablespaces_articleref' db_tablespace = 'tbl_tbsp' managed = False # Also set the tables for automatically created models Authors = Article._meta.get_field('authors').rel.through Authors._meta.db_table = 'tablespaces_articleref_authors' Reviewers = Article._meta.get_field('reviewers').rel.through Reviewers._meta.db_table = 'tablespaces_articleref_reviewers'
apache-2.0
seanli9jan/tensorflow
tensorflow/python/tools/freeze_graph.py
12
18668
# Copyright 2015 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== r"""Converts checkpoint variables into Const ops in a standalone GraphDef file. This script is designed to take a GraphDef proto, a SaverDef proto, and a set of variable values stored in a checkpoint file, and output a GraphDef with all of the variable ops converted into const ops containing the values of the variables. It's useful to do this when we need to load a single file in C++, especially in environments like mobile or embedded where we may not have access to the RestoreTensor ops and file loading calls that they rely on. An example of command-line usage is: bazel build tensorflow/python/tools:freeze_graph && \ bazel-bin/tensorflow/python/tools/freeze_graph \ --input_graph=some_graph_def.pb \ --input_checkpoint=model.ckpt-8361242 \ --output_graph=/tmp/frozen_graph.pb --output_node_names=softmax You can also look at freeze_graph_test.py for an example of how to use it. """ from __future__ import absolute_import from __future__ import division from __future__ import print_function import argparse import re import sys from google.protobuf import text_format from tensorflow.core.framework import graph_pb2 from tensorflow.core.protobuf import saver_pb2 from tensorflow.core.protobuf.meta_graph_pb2 import MetaGraphDef from tensorflow.python import pywrap_tensorflow from tensorflow.python.client import session from tensorflow.python.framework import graph_util from tensorflow.python.framework import importer from tensorflow.python.platform import app from tensorflow.python.platform import gfile from tensorflow.python.saved_model import loader from tensorflow.python.saved_model import tag_constants from tensorflow.python.tools import saved_model_utils from tensorflow.python.training import checkpoint_management from tensorflow.python.training import saver as saver_lib def _has_no_variables(sess): """Determines if the graph has any variables. Args: sess: TensorFlow Session. Returns: Bool. """ for op in sess.graph.get_operations(): if op.type.startswith("Variable") or op.type.endswith("VariableOp"): return False return True def freeze_graph_with_def_protos(input_graph_def, input_saver_def, input_checkpoint, output_node_names, restore_op_name, filename_tensor_name, output_graph, clear_devices, initializer_nodes, variable_names_whitelist="", variable_names_blacklist="", input_meta_graph_def=None, input_saved_model_dir=None, saved_model_tags=None, checkpoint_version=saver_pb2.SaverDef.V2): """Converts all variables in a graph and checkpoint into constants. Args: input_graph_def: A `GraphDef`. input_saver_def: A `SaverDef` (optional). input_checkpoint: The prefix of a V1 or V2 checkpoint, with V2 taking priority. Typically the result of `Saver.save()` or that of `tf.train.latest_checkpoint()`, regardless of sharded/non-sharded or V1/V2. output_node_names: The name(s) of the output nodes, comma separated. restore_op_name: Unused. filename_tensor_name: Unused. output_graph: String where to write the frozen `GraphDef`. clear_devices: A Bool whether to remove device specifications. initializer_nodes: Comma separated string of initializer nodes to run before freezing. variable_names_whitelist: The set of variable names to convert (optional, by default, all variables are converted). variable_names_blacklist: The set of variable names to omit converting to constants (optional). input_meta_graph_def: A `MetaGraphDef` (optional), input_saved_model_dir: Path to the dir with TensorFlow 'SavedModel' file and variables (optional). saved_model_tags: Group of comma separated tag(s) of the MetaGraphDef to load, in string format (optional). checkpoint_version: Tensorflow variable file format (saver_pb2.SaverDef.V1 or saver_pb2.SaverDef.V2) Returns: Location of the output_graph_def. """ del restore_op_name, filename_tensor_name # Unused by updated loading code. # 'input_checkpoint' may be a prefix if we're using Saver V2 format if (not input_saved_model_dir and not checkpoint_management.checkpoint_exists(input_checkpoint)): print("Input checkpoint '" + input_checkpoint + "' doesn't exist!") return -1 if not output_node_names: print("You need to supply the name of a node to --output_node_names.") return -1 # Remove all the explicit device specifications for this node. This helps to # make the graph more portable. if clear_devices: if input_meta_graph_def: for node in input_meta_graph_def.graph_def.node: node.device = "" elif input_graph_def: for node in input_graph_def.node: node.device = "" if input_graph_def: _ = importer.import_graph_def(input_graph_def, name="") with session.Session() as sess: if input_saver_def: saver = saver_lib.Saver( saver_def=input_saver_def, write_version=checkpoint_version) saver.restore(sess, input_checkpoint) elif input_meta_graph_def: restorer = saver_lib.import_meta_graph( input_meta_graph_def, clear_devices=True) restorer.restore(sess, input_checkpoint) if initializer_nodes: sess.run(initializer_nodes.replace(" ", "").split(",")) elif input_saved_model_dir: if saved_model_tags is None: saved_model_tags = [] loader.load(sess, saved_model_tags, input_saved_model_dir) else: var_list = {} reader = pywrap_tensorflow.NewCheckpointReader(input_checkpoint) var_to_shape_map = reader.get_variable_to_shape_map() # List of all partition variables. Because the condition is heuristic # based, the list could include false positives. all_parition_variable_names = [ tensor.name.split(":")[0] for op in sess.graph.get_operations() for tensor in op.values() if re.search(r"/part_\d+/", tensor.name) ] has_partition_var = False for key in var_to_shape_map: try: tensor = sess.graph.get_tensor_by_name(key + ":0") if any(key in name for name in all_parition_variable_names): has_partition_var = True except KeyError: # This tensor doesn't exist in the graph (for example it's # 'global_step' or a similar housekeeping element) so skip it. continue var_list[key] = tensor try: saver = saver_lib.Saver( var_list=var_list, write_version=checkpoint_version) except TypeError as e: # `var_list` is required to be a map of variable names to Variable # tensors. Partition variables are Identity tensors that cannot be # handled by Saver. if has_partition_var: print("Models containing partition variables cannot be converted " "from checkpoint files. Please pass in a SavedModel using " "the flag --input_saved_model_dir.") return -1 # Models that have been frozen previously do not contain Variables. elif _has_no_variables(sess): print("No variables were found in this model. It is likely the model " "was frozen previously. You cannot freeze a graph twice.") return 0 else: raise e saver.restore(sess, input_checkpoint) if initializer_nodes: sess.run(initializer_nodes.replace(" ", "").split(",")) variable_names_whitelist = ( variable_names_whitelist.replace(" ", "").split(",") if variable_names_whitelist else None) variable_names_blacklist = ( variable_names_blacklist.replace(" ", "").split(",") if variable_names_blacklist else None) if input_meta_graph_def: output_graph_def = graph_util.convert_variables_to_constants( sess, input_meta_graph_def.graph_def, output_node_names.replace(" ", "").split(","), variable_names_whitelist=variable_names_whitelist, variable_names_blacklist=variable_names_blacklist) else: output_graph_def = graph_util.convert_variables_to_constants( sess, input_graph_def, output_node_names.replace(" ", "").split(","), variable_names_whitelist=variable_names_whitelist, variable_names_blacklist=variable_names_blacklist) # Write GraphDef to file if output path has been given. if output_graph: with gfile.GFile(output_graph, "wb") as f: f.write(output_graph_def.SerializeToString()) return output_graph_def def _parse_input_graph_proto(input_graph, input_binary): """Parser input tensorflow graph into GraphDef proto.""" if not gfile.Exists(input_graph): print("Input graph file '" + input_graph + "' does not exist!") return -1 input_graph_def = graph_pb2.GraphDef() mode = "rb" if input_binary else "r" with gfile.FastGFile(input_graph, mode) as f: if input_binary: input_graph_def.ParseFromString(f.read()) else: text_format.Merge(f.read(), input_graph_def) return input_graph_def def _parse_input_meta_graph_proto(input_graph, input_binary): """Parser input tensorflow graph into MetaGraphDef proto.""" if not gfile.Exists(input_graph): print("Input meta graph file '" + input_graph + "' does not exist!") return -1 input_meta_graph_def = MetaGraphDef() mode = "rb" if input_binary else "r" with gfile.FastGFile(input_graph, mode) as f: if input_binary: input_meta_graph_def.ParseFromString(f.read()) else: text_format.Merge(f.read(), input_meta_graph_def) print("Loaded meta graph file '" + input_graph) return input_meta_graph_def def _parse_input_saver_proto(input_saver, input_binary): """Parser input tensorflow Saver into SaverDef proto.""" if not gfile.Exists(input_saver): print("Input saver file '" + input_saver + "' does not exist!") return -1 mode = "rb" if input_binary else "r" with gfile.FastGFile(input_saver, mode) as f: saver_def = saver_pb2.SaverDef() if input_binary: saver_def.ParseFromString(f.read()) else: text_format.Merge(f.read(), saver_def) return saver_def def freeze_graph(input_graph, input_saver, input_binary, input_checkpoint, output_node_names, restore_op_name, filename_tensor_name, output_graph, clear_devices, initializer_nodes, variable_names_whitelist="", variable_names_blacklist="", input_meta_graph=None, input_saved_model_dir=None, saved_model_tags=tag_constants.SERVING, checkpoint_version=saver_pb2.SaverDef.V2): """Converts all variables in a graph and checkpoint into constants. Args: input_graph: A `GraphDef` file to load. input_saver: A TensorFlow Saver file. input_binary: A Bool. True means input_graph is .pb, False indicates .pbtxt. input_checkpoint: The prefix of a V1 or V2 checkpoint, with V2 taking priority. Typically the result of `Saver.save()` or that of `tf.train.latest_checkpoint()`, regardless of sharded/non-sharded or V1/V2. output_node_names: The name(s) of the output nodes, comma separated. restore_op_name: Unused. filename_tensor_name: Unused. output_graph: String where to write the frozen `GraphDef`. clear_devices: A Bool whether to remove device specifications. initializer_nodes: Comma separated list of initializer nodes to run before freezing. variable_names_whitelist: The set of variable names to convert (optional, by default, all variables are converted), variable_names_blacklist: The set of variable names to omit converting to constants (optional). input_meta_graph: A `MetaGraphDef` file to load (optional). input_saved_model_dir: Path to the dir with TensorFlow 'SavedModel' file and variables (optional). saved_model_tags: Group of comma separated tag(s) of the MetaGraphDef to load, in string format. checkpoint_version: Tensorflow variable file format (saver_pb2.SaverDef.V1 or saver_pb2.SaverDef.V2). Returns: String that is the location of frozen GraphDef. """ input_graph_def = None if input_saved_model_dir: input_graph_def = saved_model_utils.get_meta_graph_def( input_saved_model_dir, saved_model_tags).graph_def elif input_graph: input_graph_def = _parse_input_graph_proto(input_graph, input_binary) input_meta_graph_def = None if input_meta_graph: input_meta_graph_def = _parse_input_meta_graph_proto( input_meta_graph, input_binary) input_saver_def = None if input_saver: input_saver_def = _parse_input_saver_proto(input_saver, input_binary) freeze_graph_with_def_protos( input_graph_def, input_saver_def, input_checkpoint, output_node_names, restore_op_name, filename_tensor_name, output_graph, clear_devices, initializer_nodes, variable_names_whitelist, variable_names_blacklist, input_meta_graph_def, input_saved_model_dir, saved_model_tags.replace(" ", "").split(","), checkpoint_version=checkpoint_version) def main(unused_args, flags): if flags.checkpoint_version == 1: checkpoint_version = saver_pb2.SaverDef.V1 elif flags.checkpoint_version == 2: checkpoint_version = saver_pb2.SaverDef.V2 else: print("Invalid checkpoint version (must be '1' or '2'): %d" % flags.checkpoint_version) return -1 freeze_graph(flags.input_graph, flags.input_saver, flags.input_binary, flags.input_checkpoint, flags.output_node_names, flags.restore_op_name, flags.filename_tensor_name, flags.output_graph, flags.clear_devices, flags.initializer_nodes, flags.variable_names_whitelist, flags.variable_names_blacklist, flags.input_meta_graph, flags.input_saved_model_dir, flags.saved_model_tags, checkpoint_version) def run_main(): parser = argparse.ArgumentParser() parser.register("type", "bool", lambda v: v.lower() == "true") parser.add_argument( "--input_graph", type=str, default="", help="TensorFlow \'GraphDef\' file to load.") parser.add_argument( "--input_saver", type=str, default="", help="TensorFlow saver file to load.") parser.add_argument( "--input_checkpoint", type=str, default="", help="TensorFlow variables file to load.") parser.add_argument( "--checkpoint_version", type=int, default=2, help="Tensorflow variable file format") parser.add_argument( "--output_graph", type=str, default="", help="Output \'GraphDef\' file name.") parser.add_argument( "--input_binary", nargs="?", const=True, type="bool", default=False, help="Whether the input files are in binary format.") parser.add_argument( "--output_node_names", type=str, default="", help="The name of the output nodes, comma separated.") parser.add_argument( "--restore_op_name", type=str, default="save/restore_all", help="""\ The name of the master restore operator. Deprecated, unused by updated \ loading code. """) parser.add_argument( "--filename_tensor_name", type=str, default="save/Const:0", help="""\ The name of the tensor holding the save path. Deprecated, unused by \ updated loading code. """) parser.add_argument( "--clear_devices", nargs="?", const=True, type="bool", default=True, help="Whether to remove device specifications.") parser.add_argument( "--initializer_nodes", type=str, default="", help="Comma separated list of initializer nodes to run before freezing.") parser.add_argument( "--variable_names_whitelist", type=str, default="", help="""\ Comma separated list of variables to convert to constants. If specified, \ only those variables will be converted to constants.\ """) parser.add_argument( "--variable_names_blacklist", type=str, default="", help="""\ Comma separated list of variables to skip converting to constants.\ """) parser.add_argument( "--input_meta_graph", type=str, default="", help="TensorFlow \'MetaGraphDef\' file to load.") parser.add_argument( "--input_saved_model_dir", type=str, default="", help="Path to the dir with TensorFlow \'SavedModel\' file and variables.") parser.add_argument( "--saved_model_tags", type=str, default="serve", help="""\ Group of tag(s) of the MetaGraphDef to load, in string format,\ separated by \',\'. For tag-set contains multiple tags, all tags \ must be passed in.\ """) flags, unparsed = parser.parse_known_args() my_main = lambda unused_args: main(unused_args, flags) app.run(main=my_main, argv=[sys.argv[0]] + unparsed) if __name__ == '__main__': run_main()
apache-2.0
ChadKillingsworth/closure-library
closure/bin/scopify.py
329
6785
#!/usr/bin/python # # Copyright 2010 The Closure Library Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS-IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Automatically converts codebases over to goog.scope. Usage: cd path/to/my/dir; ../../../../javascript/closure/bin/scopify.py Scans every file in this directory, recursively. Looks for existing goog.scope calls, and goog.require'd symbols. If it makes sense to generate a goog.scope call for the file, then we will do so, and try to auto-generate some aliases based on the goog.require'd symbols. Known Issues: When a file is goog.scope'd, the file contents will be indented +2. This may put some lines over 80 chars. These will need to be fixed manually. We will only try to create aliases for capitalized names. We do not check to see if those names will conflict with any existing locals. This creates merge conflicts for every line of every outstanding change. If you intend to run this on your codebase, make sure your team members know. Better yet, send them this script so that they can scopify their outstanding changes and "accept theirs". When an alias is "captured", it can no longer be stubbed out for testing. Run your tests. """ __author__ = 'nicksantos@google.com (Nick Santos)' import os.path import re import sys REQUIRES_RE = re.compile(r"goog.require\('([^']*)'\)") # Edit this manually if you want something to "always" be aliased. # TODO(nicksantos): Add a flag for this. DEFAULT_ALIASES = {} def Transform(lines): """Converts the contents of a file into javascript that uses goog.scope. Arguments: lines: A list of strings, corresponding to each line of the file. Returns: A new list of strings, or None if the file was not modified. """ requires = [] # Do an initial scan to be sure that this file can be processed. for line in lines: # Skip this file if it has already been scopified. if line.find('goog.scope') != -1: return None # If there are any global vars or functions, then we also have # to skip the whole file. We might be able to deal with this # more elegantly. if line.find('var ') == 0 or line.find('function ') == 0: return None for match in REQUIRES_RE.finditer(line): requires.append(match.group(1)) if len(requires) == 0: return None # Backwards-sort the requires, so that when one is a substring of another, # we match the longer one first. for val in DEFAULT_ALIASES.values(): if requires.count(val) == 0: requires.append(val) requires.sort() requires.reverse() # Generate a map of requires to their aliases aliases_to_globals = DEFAULT_ALIASES.copy() for req in requires: index = req.rfind('.') if index == -1: alias = req else: alias = req[(index + 1):] # Don't scopify lowercase namespaces, because they may conflict with # local variables. if alias[0].isupper(): aliases_to_globals[alias] = req aliases_to_matchers = {} globals_to_aliases = {} for alias, symbol in aliases_to_globals.items(): globals_to_aliases[symbol] = alias aliases_to_matchers[alias] = re.compile('\\b%s\\b' % symbol) # Insert a goog.scope that aliases all required symbols. result = [] START = 0 SEEN_REQUIRES = 1 IN_SCOPE = 2 mode = START aliases_used = set() insertion_index = None num_blank_lines = 0 for line in lines: if mode == START: result.append(line) if re.search(REQUIRES_RE, line): mode = SEEN_REQUIRES elif mode == SEEN_REQUIRES: if (line and not re.search(REQUIRES_RE, line) and not line.isspace()): # There should be two blank lines before goog.scope result += ['\n'] * 2 result.append('goog.scope(function() {\n') insertion_index = len(result) result += ['\n'] * num_blank_lines mode = IN_SCOPE elif line.isspace(): # Keep track of the number of blank lines before each block of code so # that we can move them after the goog.scope line if necessary. num_blank_lines += 1 else: # Print the blank lines we saw before this code block result += ['\n'] * num_blank_lines num_blank_lines = 0 result.append(line) if mode == IN_SCOPE: for symbol in requires: if not symbol in globals_to_aliases: continue alias = globals_to_aliases[symbol] matcher = aliases_to_matchers[alias] for match in matcher.finditer(line): # Check to make sure we're not in a string. # We do this by being as conservative as possible: # if there are any quote or double quote characters # before the symbol on this line, then bail out. before_symbol = line[:match.start(0)] if before_symbol.count('"') > 0 or before_symbol.count("'") > 0: continue line = line.replace(match.group(0), alias) aliases_used.add(alias) if line.isspace(): # Truncate all-whitespace lines result.append('\n') else: result.append(line) if len(aliases_used): aliases_used = [alias for alias in aliases_used] aliases_used.sort() aliases_used.reverse() for alias in aliases_used: symbol = aliases_to_globals[alias] result.insert(insertion_index, 'var %s = %s;\n' % (alias, symbol)) result.append('}); // goog.scope\n') return result else: return None def TransformFileAt(path): """Converts a file into javascript that uses goog.scope. Arguments: path: A path to a file. """ f = open(path) lines = Transform(f.readlines()) if lines: f = open(path, 'w') for l in lines: f.write(l) f.close() if __name__ == '__main__': args = sys.argv[1:] if not len(args): args = '.' for file_name in args: if os.path.isdir(file_name): for root, dirs, files in os.walk(file_name): for name in files: if name.endswith('.js') and \ not os.path.islink(os.path.join(root, name)): TransformFileAt(os.path.join(root, name)) else: if file_name.endswith('.js') and \ not os.path.islink(file_name): TransformFileAt(file_name)
apache-2.0
jelugbo/ddi
lms/djangoapps/shoppingcart/migrations/0011_auto__add_invoice__add_field_courseregistrationcode_invoice.py
13
14604
# -*- coding: utf-8 -*- import datetime from south.db import db from south.v2 import SchemaMigration from django.db import models class Migration(SchemaMigration): def forwards(self, orm): # Adding model 'Invoice' db.create_table('shoppingcart_invoice', ( ('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)), ('total_amount', self.gf('django.db.models.fields.FloatField')()), ('purchaser_name', self.gf('django.db.models.fields.CharField')(max_length=255, db_index=True)), ('purchaser_contact', self.gf('django.db.models.fields.CharField')(max_length=255)), ('purchaser_email', self.gf('django.db.models.fields.CharField')(max_length=255)), ('tax_id', self.gf('django.db.models.fields.CharField')(max_length=64, null=True)), ('reference', self.gf('django.db.models.fields.CharField')(max_length=255, null=True)), )) db.send_create_signal('shoppingcart', ['Invoice']) # Adding field 'CourseRegistrationCode.invoice' db.add_column('shoppingcart_courseregistrationcode', 'invoice', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['shoppingcart.Invoice'], null=True), keep_default=False) def backwards(self, orm): # Deleting model 'Invoice' db.delete_table('shoppingcart_invoice') # Deleting field 'CourseRegistrationCode.invoice' db.delete_column('shoppingcart_courseregistrationcode', 'invoice_id') models = { 'auth.group': { 'Meta': {'object_name': 'Group'}, 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}), 'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}) }, 'auth.permission': { 'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'}, 'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '50'}) }, 'auth.user': { 'Meta': {'object_name': 'User'}, 'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}), 'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}), 'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), 'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}), 'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}), 'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}), 'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'}) }, 'contenttypes.contenttype': { 'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"}, 'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}) }, 'shoppingcart.certificateitem': { 'Meta': {'object_name': 'CertificateItem', '_ormbases': ['shoppingcart.OrderItem']}, 'course_enrollment': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['student.CourseEnrollment']"}), 'course_id': ('xmodule_django.models.CourseKeyField', [], {'max_length': '128', 'db_index': 'True'}), 'mode': ('django.db.models.fields.SlugField', [], {'max_length': '50'}), 'orderitem_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['shoppingcart.OrderItem']", 'unique': 'True', 'primary_key': 'True'}) }, 'shoppingcart.coupon': { 'Meta': {'object_name': 'Coupon'}, 'code': ('django.db.models.fields.CharField', [], {'max_length': '32', 'db_index': 'True'}), 'course_id': ('xmodule_django.models.CourseKeyField', [], {'max_length': '255'}), 'created_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2014, 8, 6, 0, 0)'}), 'created_by': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}), 'description': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), 'percentage_discount': ('django.db.models.fields.IntegerField', [], {'default': '0'}) }, 'shoppingcart.couponredemption': { 'Meta': {'object_name': 'CouponRedemption'}, 'coupon': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['shoppingcart.Coupon']"}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'order': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['shoppingcart.Order']"}), 'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}) }, 'shoppingcart.courseregistrationcode': { 'Meta': {'object_name': 'CourseRegistrationCode'}, 'code': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '32', 'db_index': 'True'}), 'course_id': ('xmodule_django.models.CourseKeyField', [], {'max_length': '255', 'db_index': 'True'}), 'created_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2014, 8, 6, 0, 0)'}), 'created_by': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'created_by_user'", 'to': "orm['auth.User']"}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'invoice': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['shoppingcart.Invoice']", 'null': 'True'}), 'transaction_group_name': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '255', 'null': 'True', 'blank': 'True'}) }, 'shoppingcart.invoice': { 'Meta': {'object_name': 'Invoice'}, 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'purchaser_contact': ('django.db.models.fields.CharField', [], {'max_length': '255'}), 'purchaser_email': ('django.db.models.fields.CharField', [], {'max_length': '255'}), 'purchaser_name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'}), 'reference': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True'}), 'tax_id': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}), 'total_amount': ('django.db.models.fields.FloatField', [], {}) }, 'shoppingcart.order': { 'Meta': {'object_name': 'Order'}, 'bill_to_cardtype': ('django.db.models.fields.CharField', [], {'max_length': '32', 'blank': 'True'}), 'bill_to_ccnum': ('django.db.models.fields.CharField', [], {'max_length': '8', 'blank': 'True'}), 'bill_to_city': ('django.db.models.fields.CharField', [], {'max_length': '64', 'blank': 'True'}), 'bill_to_country': ('django.db.models.fields.CharField', [], {'max_length': '64', 'blank': 'True'}), 'bill_to_first': ('django.db.models.fields.CharField', [], {'max_length': '64', 'blank': 'True'}), 'bill_to_last': ('django.db.models.fields.CharField', [], {'max_length': '64', 'blank': 'True'}), 'bill_to_postalcode': ('django.db.models.fields.CharField', [], {'max_length': '16', 'blank': 'True'}), 'bill_to_state': ('django.db.models.fields.CharField', [], {'max_length': '8', 'blank': 'True'}), 'bill_to_street1': ('django.db.models.fields.CharField', [], {'max_length': '128', 'blank': 'True'}), 'bill_to_street2': ('django.db.models.fields.CharField', [], {'max_length': '128', 'blank': 'True'}), 'currency': ('django.db.models.fields.CharField', [], {'default': "'usd'", 'max_length': '8'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'processor_reply_dump': ('django.db.models.fields.TextField', [], {'blank': 'True'}), 'purchase_time': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}), 'refunded_time': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}), 'status': ('django.db.models.fields.CharField', [], {'default': "'cart'", 'max_length': '32'}), 'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}) }, 'shoppingcart.orderitem': { 'Meta': {'object_name': 'OrderItem'}, 'currency': ('django.db.models.fields.CharField', [], {'default': "'usd'", 'max_length': '8'}), 'fulfilled_time': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'db_index': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'line_desc': ('django.db.models.fields.CharField', [], {'default': "'Misc. Item'", 'max_length': '1024'}), 'list_price': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '30', 'decimal_places': '2'}), 'order': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['shoppingcart.Order']"}), 'qty': ('django.db.models.fields.IntegerField', [], {'default': '1'}), 'refund_requested_time': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'db_index': 'True'}), 'report_comments': ('django.db.models.fields.TextField', [], {'default': "''"}), 'service_fee': ('django.db.models.fields.DecimalField', [], {'default': '0.0', 'max_digits': '30', 'decimal_places': '2'}), 'status': ('django.db.models.fields.CharField', [], {'default': "'cart'", 'max_length': '32', 'db_index': 'True'}), 'unit_cost': ('django.db.models.fields.DecimalField', [], {'default': '0.0', 'max_digits': '30', 'decimal_places': '2'}), 'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}) }, 'shoppingcart.paidcourseregistration': { 'Meta': {'object_name': 'PaidCourseRegistration', '_ormbases': ['shoppingcart.OrderItem']}, 'course_id': ('xmodule_django.models.CourseKeyField', [], {'max_length': '128', 'db_index': 'True'}), 'mode': ('django.db.models.fields.SlugField', [], {'default': "'honor'", 'max_length': '50'}), 'orderitem_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['shoppingcart.OrderItem']", 'unique': 'True', 'primary_key': 'True'}) }, 'shoppingcart.paidcourseregistrationannotation': { 'Meta': {'object_name': 'PaidCourseRegistrationAnnotation'}, 'annotation': ('django.db.models.fields.TextField', [], {'null': 'True'}), 'course_id': ('xmodule_django.models.CourseKeyField', [], {'unique': 'True', 'max_length': '128', 'db_index': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}) }, 'shoppingcart.registrationcoderedemption': { 'Meta': {'object_name': 'RegistrationCodeRedemption'}, 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'order': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['shoppingcart.Order']"}), 'redeemed_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2014, 8, 6, 0, 0)', 'null': 'True'}), 'redeemed_by': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}), 'registration_code': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['shoppingcart.CourseRegistrationCode']"}) }, 'student.courseenrollment': { 'Meta': {'ordering': "('user', 'course_id')", 'unique_together': "(('user', 'course_id'),)", 'object_name': 'CourseEnrollment'}, 'course_id': ('xmodule_django.models.CourseKeyField', [], {'max_length': '255', 'db_index': 'True'}), 'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'db_index': 'True', 'blank': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), 'mode': ('django.db.models.fields.CharField', [], {'default': "'honor'", 'max_length': '100'}), 'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}) } } complete_apps = ['shoppingcart']
agpl-3.0
durandj/ynot-django
ynot/django/themes/templatetags/breadcrumbs.py
1
2421
from django import template as django_template from django.template import defaulttags as django_defaulttags from django.utils import encoding as django_encoding # pylint: disable=invalid-name, too-few-public-methods register = django_template.Library() # pylint: disable=unused-argument @register.tag def breadcrumb(parser, token): """ Render breadcrumbs in the form of: {% breadcrumb 'Breadcrumb title' url %} """ return BreadcrumbNode(token.split_contents()[1:]) # pylint: enable=unused-argument @register.tag def breadcrumb_url(parser, token): """ Render breadcrumbs in the form of: {% breadcrumb 'Breadcrumb title' url args %} """ contents = token.split_contents() if len(contents) == 2: return breadcrumb(parser, token) # Shortcut to normal breadcrumbs title = contents.pop(1) token.contents = ' '.join(contents) url = django_defaulttags.url(parser, token) return UrlBreadcrumbNode(title, url) class BreadcrumbNode(django_template.Node): def __init__(self, args): self.args = [django_template.Variable(arg) for arg in args] def render(self, context): title = self.args[0].var if title.find('\'') == -1 and title.find('\"') == -1: try: val = self.args[0] title = val.resolve(context) except: title = '' else: title = django_encoding.smart_unicode(title.strip('\'').strip('\"')) url = None if len(self.args) > 1: val = self.args[1] try: url = val.resolve(context) except django_template.VariableDoesNotExist: url = None return render_breadcrumb(title, url = url) class UrlBreadcrumbNode(django_template.Node): def __init__(self, title, url_node): self.title = django_template.Variable(title) self.url_node = url_node def render(self, context): title = self.title.var if title.find('\'') == -1 and title.find('\"') == -1: try: val = self.title title = val.resolve(context) except: title = '' else: title = django_encoding.smart_unicode(title.strip('\'').strip('\"')) url = self.url_node.render(context) return render_breadcrumb(title, url = url) def render_breadcrumb(title, url = None): if url: breadcrumb_node = '<a href="{url}">{title}</a>'.format( title = title, url = url, ) else: breadcrumb_node = '<span>{title}</span>'.format(title = title) breadcrumb_node = '<span class="ynot-breadcrumb">{}</span>'.format(breadcrumb_node) return breadcrumb
mit
chrismeyersfsu/ansible-modules-core
network/iosxr/iosxr_facts.py
19
13332
#!/usr/bin/python # # This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see <http://www.gnu.org/licenses/>. # DOCUMENTATION = """ --- module: iosxr_facts version_added: "2.2" author: "Peter Sprygada (@privateip)" short_description: Collect facts from remote devices running IOS-XR description: - Collects a base set of device facts from a remote device that is running iosxr. This module prepends all of the base network fact keys with C(ansible_net_<fact>). The facts module will always collect a base set of facts from the device and can enable or disable collection of additional facts. extends_documentation_fragment: iosxr options: gather_subset: description: - When supplied, this argument will restrict the facts collected to a given subset. Possible values for this argument include all, hardware, config, and interfaces. Can specify a list of values to include a larger subset. Values can also be used with an initial C(M(!)) to specify that a specific subset should not be collected. required: false default: '!config' """ EXAMPLES = """ # Collect all facts from the device - iosxr_facts: gather_subset: all # Collect only the config and default facts - iosxr_facts: gather_subset: - config # Do not collect hardware facts - iosxr_facts: gather_subset: - "!hardware" """ RETURN = """ ansible_net_gather_subset: description: The list of fact subsets collected from the device returned: always type: list # default ansible_net_version: description: The operating system version running on the remote device returned: always type: str ansible_net_hostname: description: The configured hostname of the device returned: always type: string ansible_net_image: description: The image file the device is running returned: always type: string # hardware ansible_net_filesystems: description: All file system names available on the device returned: when hardware is configured type: list ansible_net_memfree_mb: description: The available free memory on the remote device in Mb returned: when hardware is configured type: int ansible_net_memtotal_mb: description: The total memory on the remote device in Mb returned: when hardware is configured type: int # config ansible_net_config: description: The current active config from the device returned: when config is configured type: str # interfaces ansible_net_all_ipv4_addresses: description: All IPv4 addresses configured on the device returned: when interfaces is configured type: list ansible_net_all_ipv6_addresses: description: All IPv6 addresses configured on the device returned: when interfaces is configured type: list ansible_net_interfaces: description: A hash of all interfaces running on the system returned: when interfaces is configured type: dict ansible_net_neighbors: description: The list of LLDP neighbors from the remote device returned: when interfaces is configured type: dict """ import re import ansible.module_utils.iosxr from ansible.module_utils.netcli import CommandRunner, AddCommandError from ansible.module_utils.network import NetworkModule from ansible.module_utils.six import iteritems from ansible.module_utils.six.moves import zip def add_command(runner, command): try: runner.add_command(command) except AddCommandError: # AddCommandError is raised for any issue adding a command to # the runner. Silently ignore the exception in this case pass class FactsBase(object): def __init__(self, runner): self.runner = runner self.facts = dict() self.commands() def commands(self): raise NotImplementedError class Default(FactsBase): def commands(self): add_command(self.runner, 'show version brief') def populate(self): data = self.runner.get_command('show version brief') self.facts['version'] = self.parse_version(data) self.facts['image'] = self.parse_image(data) self.facts['hostname'] = self.parse_hostname(data) def parse_version(self, data): match = re.search(r'Version (\S+)$', data, re.M) if match: return match.group(1) def parse_hostname(self, data): match = re.search(r'^(.+) uptime', data, re.M) if match: return match.group(1) def parse_image(self, data): match = re.search(r'image file is "(.+)"', data) if match: return match.group(1) class Hardware(FactsBase): def commands(self): add_command(self.runner, 'dir /all | include Directory') add_command(self.runner, 'show memory summary') def populate(self): data = self.runner.get_command('dir /all | include Directory') self.facts['filesystems'] = self.parse_filesystems(data) data = self.runner.get_command('show memory summary') match = re.search(r'Physical Memory (\d+)M total \((\d+)', data) if match: self.facts['memtotal_mb'] = int(match[0]) self.facts['memfree_mb'] = int(match[1]) def parse_filesystems(self, data): return re.findall(r'^Directory of (\S+)', data, re.M) class Config(FactsBase): def commands(self): add_command(self.runner, 'show running-config') def populate(self): self.facts['config'] = self.runner.get_command('show running-config') class Interfaces(FactsBase): def commands(self): add_command(self.runner, 'show interfaces') add_command(self.runner, 'show ipv6 interface') add_command(self.runner, 'show lldp') add_command(self.runner, 'show lldp neighbors detail') def populate(self): self.facts['all_ipv4_addresses'] = list() self.facts['all_ipv6_addresses'] = list() data = self.runner.get_command('show interfaces') interfaces = self.parse_interfaces(data) self.facts['interfaces'] = self.populate_interfaces(interfaces) data = self.runner.get_command('show ipv6 interface') if len(data) > 0: data = self.parse_interfaces(data) self.populate_ipv6_interfaces(data) if 'LLDP is not enabled' not in self.runner.get_command('show lldp'): neighbors = self.runner.get_command('show lldp neighbors detail') self.facts['neighbors'] = self.parse_neighbors(neighbors) def populate_interfaces(self, interfaces): facts = dict() for key, value in iteritems(interfaces): intf = dict() intf['description'] = self.parse_description(value) intf['macaddress'] = self.parse_macaddress(value) ipv4 = self.parse_ipv4(value) intf['ipv4'] = self.parse_ipv4(value) if ipv4: self.add_ip_address(ipv4['address'], 'ipv4') intf['mtu'] = self.parse_mtu(value) intf['bandwidth'] = self.parse_bandwidth(value) intf['duplex'] = self.parse_duplex(value) intf['lineprotocol'] = self.parse_lineprotocol(value) intf['operstatus'] = self.parse_operstatus(value) intf['type'] = self.parse_type(value) facts[key] = intf return facts def populate_ipv6_interfaces(self, data): for key, value in iteritems(data): self.facts['interfaces'][key]['ipv6'] = list() addresses = re.findall(r'\s+(.+), subnet', value, re.M) subnets = re.findall(r', subnet is (.+)$', value, re.M) for addr, subnet in zip(addresses, subnets): ipv6 = dict(address=addr.strip(), subnet=subnet.strip()) self.add_ip_address(addr.strip(), 'ipv6') self.facts['interfaces'][key]['ipv6'].append(ipv6) def add_ip_address(self, address, family): if family == 'ipv4': self.facts['all_ipv4_addresses'].append(address) else: self.facts['all_ipv6_addresses'].append(address) def parse_neighbors(self, neighbors): facts = dict() nbors = neighbors.split('------------------------------------------------') for entry in nbors[1:]: if entry == '': continue intf = self.parse_lldp_intf(entry) if intf not in facts: facts[intf] = list() fact = dict() fact['host'] = self.parse_lldp_host(entry) fact['port'] = self.parse_lldp_port(entry) facts[intf].append(fact) return facts def parse_interfaces(self, data): parsed = dict() key = '' for line in data.split('\n'): if len(line) == 0: continue elif line[0] == ' ': parsed[key] += '\n%s' % line else: match = re.match(r'^(\S+)', line) if match: key = match.group(1) parsed[key] = line return parsed def parse_description(self, data): match = re.search(r'Description: (.+)$', data, re.M) if match: return match.group(1) def parse_macaddress(self, data): match = re.search(r'address is (\S+)', data) if match: return match.group(1) def parse_ipv4(self, data): match = re.search(r'Internet address is (\S+)/(\d+)', data) if match: addr = match.group(1) masklen = int(match.group(2)) return dict(address=addr, masklen=masklen) def parse_mtu(self, data): match = re.search(r'MTU (\d+)', data) if match: return int(match.group(1)) def parse_bandwidth(self, data): match = re.search(r'BW (\d+)', data) if match: return int(match.group(1)) def parse_duplex(self, data): match = re.search(r'(\w+) Duplex', data, re.M) if match: return match.group(1) def parse_type(self, data): match = re.search(r'Hardware is (.+),', data, re.M) if match: return match.group(1) def parse_lineprotocol(self, data): match = re.search(r'line protocol is (.+)\s+?$', data, re.M) if match: return match.group(1) def parse_operstatus(self, data): match = re.search(r'^(?:.+) is (.+),', data, re.M) if match: return match.group(1) def parse_lldp_intf(self, data): match = re.search(r'^Local Interface: (.+)$', data, re.M) if match: return match.group(1) def parse_lldp_host(self, data): match = re.search(r'System Name: (.+)$', data, re.M) if match: return match.group(1) def parse_lldp_port(self, data): match = re.search(r'Port id: (.+)$', data, re.M) if match: return match.group(1) FACT_SUBSETS = dict( default=Default, hardware=Hardware, interfaces=Interfaces, config=Config, ) VALID_SUBSETS = frozenset(FACT_SUBSETS.keys()) def main(): spec = dict( gather_subset=dict(default=['!config'], type='list') ) module = NetworkModule(argument_spec=spec, supports_check_mode=True) gather_subset = module.params['gather_subset'] runable_subsets = set() exclude_subsets = set() for subset in gather_subset: if subset == 'all': runable_subsets.update(VALID_SUBSETS) continue if subset.startswith('!'): subset = subset[1:] if subset == 'all': exclude_subsets.update(VALID_SUBSETS) continue exclude = True else: exclude = False if subset not in VALID_SUBSETS: module.fail_json(msg='Bad subset') if exclude: exclude_subsets.add(subset) else: runable_subsets.add(subset) if not runable_subsets: runable_subsets.update(VALID_SUBSETS) runable_subsets.difference_update(exclude_subsets) runable_subsets.add('default') facts = dict() facts['gather_subset'] = list(runable_subsets) runner = CommandRunner(module) instances = list() for key in runable_subsets: instances.append(FACT_SUBSETS[key](runner)) runner.run() try: for inst in instances: inst.populate() facts.update(inst.facts) except Exception: module.exit_json(out=module.from_json(runner.items)) ansible_facts = dict() for key, value in iteritems(facts): key = 'ansible_net_%s' % key ansible_facts[key] = value module.exit_json(ansible_facts=ansible_facts) if __name__ == '__main__': main()
gpl-3.0
dimkal/mne-python
tutorials/plot_cluster_stats_spatio_temporal_2samp.py
15
4321
""" .. _tut_stats_cluster_source_2samp: ========================================================================= 2 samples permutation test on source data with spatio-temporal clustering ========================================================================= Tests if the source space data are significantly different between 2 groups of subjects (simulated here using one subject's data). The multiple comparisons problem is addressed with a cluster-level permutation test across space and time. """ # Authors: Alexandre Gramfort <alexandre.gramfort@telecom-paristech.fr> # Eric Larson <larson.eric.d@gmail.com> # License: BSD (3-clause) import os.path as op import numpy as np from scipy import stats as stats import mne from mne import spatial_tris_connectivity, grade_to_tris from mne.stats import spatio_temporal_cluster_test, summarize_clusters_stc from mne.datasets import sample print(__doc__) ############################################################################### # Set parameters data_path = sample.data_path() stc_fname = data_path + '/MEG/sample/sample_audvis-meg-lh.stc' subjects_dir = data_path + '/subjects' # Load stc to in common cortical space (fsaverage) stc = mne.read_source_estimate(stc_fname) stc.resample(50) stc = mne.morph_data('sample', 'fsaverage', stc, grade=5, smooth=20, subjects_dir=subjects_dir) n_vertices_fsave, n_times = stc.data.shape tstep = stc.tstep n_subjects1, n_subjects2 = 7, 9 print('Simulating data for %d and %d subjects.' % (n_subjects1, n_subjects2)) # Let's make sure our results replicate, so set the seed. np.random.seed(0) X1 = np.random.randn(n_vertices_fsave, n_times, n_subjects1) * 10 X2 = np.random.randn(n_vertices_fsave, n_times, n_subjects2) * 10 X1[:, :, :] += stc.data[:, :, np.newaxis] # make the activity bigger for the second set of subjects X2[:, :, :] += 3 * stc.data[:, :, np.newaxis] # We want to compare the overall activity levels for each subject X1 = np.abs(X1) # only magnitude X2 = np.abs(X2) # only magnitude ############################################################################### # Compute statistic # To use an algorithm optimized for spatio-temporal clustering, we # just pass the spatial connectivity matrix (instead of spatio-temporal) print('Computing connectivity.') connectivity = spatial_tris_connectivity(grade_to_tris(5)) # Note that X needs to be a list of multi-dimensional array of shape # samples (subjects_k) x time x space, so we permute dimensions X1 = np.transpose(X1, [2, 1, 0]) X2 = np.transpose(X2, [2, 1, 0]) X = [X1, X2] # Now let's actually do the clustering. This can take a long time... # Here we set the threshold quite high to reduce computation. p_threshold = 0.0001 f_threshold = stats.distributions.f.ppf(1. - p_threshold / 2., n_subjects1 - 1, n_subjects2 - 1) print('Clustering.') T_obs, clusters, cluster_p_values, H0 = clu =\ spatio_temporal_cluster_test(X, connectivity=connectivity, n_jobs=2, threshold=f_threshold) # Now select the clusters that are sig. at p < 0.05 (note that this value # is multiple-comparisons corrected). good_cluster_inds = np.where(cluster_p_values < 0.05)[0] ############################################################################### # Visualize the clusters print('Visualizing clusters.') # Now let's build a convenient representation of each cluster, where each # cluster becomes a "time point" in the SourceEstimate fsave_vertices = [np.arange(10242), np.arange(10242)] stc_all_cluster_vis = summarize_clusters_stc(clu, tstep=tstep, vertices=fsave_vertices, subject='fsaverage') # Let's actually plot the first "time point" in the SourceEstimate, which # shows all the clusters, weighted by duration subjects_dir = op.join(data_path, 'subjects') # blue blobs are for condition A != condition B brain = stc_all_cluster_vis.plot('fsaverage', hemi='both', colormap='mne', subjects_dir=subjects_dir, time_label='Duration significant (ms)') brain.set_data_time_index(0) brain.show_view('lateral') brain.save_image('clusters.png')
bsd-3-clause
cle1109/scot
doc/sphinxext/inheritance_diagram.py
4
13650
""" Defines a docutils directive for inserting inheritance diagrams. Provide the directive with one or more classes or modules (separated by whitespace). For modules, all of the classes in that module will be used. Example:: Given the following classes: class A: pass class B(A): pass class C(A): pass class D(B, C): pass class E(B): pass .. inheritance-diagram: D E Produces a graph like the following: A / \ B C / \ / E D The graph is inserted as a PNG+image map into HTML and a PDF in LaTeX. """ import inspect import os import re import subprocess try: from hashlib import md5 except ImportError: from md5 import md5 from docutils.nodes import Body, Element from docutils.parsers.rst import directives from sphinx.roles import xfileref_role def my_import(name): """Module importer - taken from the python documentation. This function allows importing names with dots in them.""" mod = __import__(name) components = name.split('.') for comp in components[1:]: mod = getattr(mod, comp) return mod class DotException(Exception): pass class InheritanceGraph(object): """ Given a list of classes, determines the set of classes that they inherit from all the way to the root "object", and then is able to generate a graphviz dot graph from them. """ def __init__(self, class_names, show_builtins=False): """ *class_names* is a list of child classes to show bases from. If *show_builtins* is True, then Python builtins will be shown in the graph. """ self.class_names = class_names self.classes = self._import_classes(class_names) self.all_classes = self._all_classes(self.classes) if len(self.all_classes) == 0: raise ValueError("No classes found for inheritance diagram") self.show_builtins = show_builtins py_sig_re = re.compile(r'''^([\w.]*\.)? # class names (\w+) \s* $ # optionally arguments ''', re.VERBOSE) def _import_class_or_module(self, name): """ Import a class using its fully-qualified *name*. """ try: path, base = self.py_sig_re.match(name).groups() except: raise ValueError( "Invalid class or module '%s' specified for inheritance diagram" % name) fullname = (path or '') + base path = (path and path.rstrip('.')) if not path: path = base try: module = __import__(path, None, None, []) # We must do an import of the fully qualified name. Otherwise if a # subpackage 'a.b' is requested where 'import a' does NOT provide # 'a.b' automatically, then 'a.b' will not be found below. This # second call will force the equivalent of 'import a.b' to happen # after the top-level import above. my_import(fullname) except ImportError: raise ValueError( "Could not import class or module '%s' specified for inheritance diagram" % name) try: todoc = module for comp in fullname.split('.')[1:]: todoc = getattr(todoc, comp) except AttributeError: raise ValueError( "Could not find class or module '%s' specified for inheritance diagram" % name) # If a class, just return it if inspect.isclass(todoc): return [todoc] elif inspect.ismodule(todoc): classes = [] for cls in todoc.__dict__.values(): if inspect.isclass(cls) and cls.__module__ == todoc.__name__: classes.append(cls) return classes raise ValueError( "'%s' does not resolve to a class or module" % name) def _import_classes(self, class_names): """ Import a list of classes. """ classes = [] for name in class_names: classes.extend(self._import_class_or_module(name)) return classes def _all_classes(self, classes): """ Return a list of all classes that are ancestors of *classes*. """ all_classes = {} def recurse(cls): all_classes[cls] = None for c in cls.__bases__: if c not in all_classes: recurse(c) for cls in classes: recurse(cls) return all_classes.keys() def class_name(self, cls, parts=0): """ Given a class object, return a fully-qualified name. This works for things I've tested in matplotlib so far, but may not be completely general. """ module = cls.__module__ if module == '__builtin__': fullname = cls.__name__ else: fullname = "%s.%s" % (module, cls.__name__) if parts == 0: return fullname name_parts = fullname.split('.') return '.'.join(name_parts[-parts:]) def get_all_class_names(self): """ Get all of the class names involved in the graph. """ return [self.class_name(x) for x in self.all_classes] # These are the default options for graphviz default_graph_options = { "rankdir": "LR", "size": '"8.0, 12.0"' } default_node_options = { "shape": "box", "fontsize": 10, "height": 0.25, "fontname": "Vera Sans, DejaVu Sans, Liberation Sans, Arial, Helvetica, sans", "style": '"setlinewidth(0.5)"' } default_edge_options = { "arrowsize": 0.5, "style": '"setlinewidth(0.5)"' } def _format_node_options(self, options): return ','.join(["%s=%s" % x for x in options.items()]) def _format_graph_options(self, options): return ''.join(["%s=%s;\n" % x for x in options.items()]) def generate_dot(self, fd, name, parts=0, urls={}, graph_options={}, node_options={}, edge_options={}): """ Generate a graphviz dot graph from the classes that were passed in to __init__. *fd* is a Python file-like object to write to. *name* is the name of the graph *urls* is a dictionary mapping class names to http urls *graph_options*, *node_options*, *edge_options* are dictionaries containing key/value pairs to pass on as graphviz properties. """ g_options = self.default_graph_options.copy() g_options.update(graph_options) n_options = self.default_node_options.copy() n_options.update(node_options) e_options = self.default_edge_options.copy() e_options.update(edge_options) fd.write('digraph %s {\n' % name) fd.write(self._format_graph_options(g_options)) for cls in self.all_classes: if not self.show_builtins and cls in __builtins__.values(): continue name = self.class_name(cls, parts) # Write the node this_node_options = n_options.copy() url = urls.get(self.class_name(cls)) if url is not None: this_node_options['URL'] = '"%s"' % url fd.write(' "%s" [%s];\n' % (name, self._format_node_options(this_node_options))) # Write the edges for base in cls.__bases__: if not self.show_builtins and base in __builtins__.values(): continue base_name = self.class_name(base, parts) fd.write(' "%s" -> "%s" [%s];\n' % (base_name, name, self._format_node_options(e_options))) fd.write('}\n') def run_dot(self, args, name, parts=0, urls={}, graph_options={}, node_options={}, edge_options={}): """ Run graphviz 'dot' over this graph, returning whatever 'dot' writes to stdout. *args* will be passed along as commandline arguments. *name* is the name of the graph *urls* is a dictionary mapping class names to http urls Raises DotException for any of the many os and installation-related errors that may occur. """ try: dot = subprocess.Popen(['dot'] + list(args), stdin=subprocess.PIPE, stdout=subprocess.PIPE, close_fds=True) except OSError: raise DotException("Could not execute 'dot'. Are you sure you have 'graphviz' installed?") except ValueError: raise DotException("'dot' called with invalid arguments") except: raise DotException("Unexpected error calling 'dot'") self.generate_dot(dot.stdin, name, parts, urls, graph_options, node_options, edge_options) dot.stdin.close() result = dot.stdout.read() returncode = dot.wait() if returncode != 0: raise DotException("'dot' returned the errorcode %d" % returncode) return result class inheritance_diagram(Body, Element): """ A docutils node to use as a placeholder for the inheritance diagram. """ pass def inheritance_diagram_directive(name, arguments, options, content, lineno, content_offset, block_text, state, state_machine): """ Run when the inheritance_diagram directive is first encountered. """ node = inheritance_diagram() class_names = arguments # Create a graph starting with the list of classes graph = InheritanceGraph(class_names) # Create xref nodes for each target of the graph's image map and # add them to the doc tree so that Sphinx can resolve the # references to real URLs later. These nodes will eventually be # removed from the doctree after we're done with them. for name in graph.get_all_class_names(): refnodes, x = xfileref_role( 'class', ':class:`%s`' % name, name, 0, state) node.extend(refnodes) # Store the graph object so we can use it to generate the # dot file later node['graph'] = graph # Store the original content for use as a hash node['parts'] = options.get('parts', 0) node['content'] = " ".join(class_names) return [node] def get_graph_hash(node): return md5(node['content'] + str(node['parts'])).hexdigest()[-10:] def html_output_graph(self, node): """ Output the graph for HTML. This will insert a PNG with clickable image map. """ graph = node['graph'] parts = node['parts'] graph_hash = get_graph_hash(node) name = "inheritance%s" % graph_hash path = '_images' dest_path = os.path.join(setup.app.builder.outdir, path) if not os.path.exists(dest_path): os.makedirs(dest_path) png_path = os.path.join(dest_path, name + ".png") path = setup.app.builder.imgpath # Create a mapping from fully-qualified class names to URLs. urls = {} for child in node: if child.get('refuri') is not None: urls[child['reftitle']] = child.get('refuri') elif child.get('refid') is not None: urls[child['reftitle']] = '#' + child.get('refid') # These arguments to dot will save a PNG file to disk and write # an HTML image map to stdout. image_map = graph.run_dot(['-Tpng', '-o%s' % png_path, '-Tcmapx'], name, parts, urls) return ('<img src="%s/%s.png" usemap="#%s" class="inheritance"/>%s' % (path, name, name, image_map)) def latex_output_graph(self, node): """ Output the graph for LaTeX. This will insert a PDF. """ graph = node['graph'] parts = node['parts'] graph_hash = get_graph_hash(node) name = "inheritance%s" % graph_hash dest_path = os.path.abspath(os.path.join(setup.app.builder.outdir, '_images')) if not os.path.exists(dest_path): os.makedirs(dest_path) pdf_path = os.path.abspath(os.path.join(dest_path, name + ".pdf")) graph.run_dot(['-Tpdf', '-o%s' % pdf_path], name, parts, graph_options={'size': '"6.0,6.0"'}) return '\n\\includegraphics{%s}\n\n' % pdf_path def visit_inheritance_diagram(inner_func): """ This is just a wrapper around html/latex_output_graph to make it easier to handle errors and insert warnings. """ def visitor(self, node): try: content = inner_func(self, node) except DotException as e: # Insert the exception as a warning in the document warning = self.document.reporter.warning(str(e), line=node.line) warning.parent = node node.children = [warning] else: source = self.document.attributes['source'] self.body.append(content) node.children = [] return visitor def do_nothing(self, node): pass def setup(app): setup.app = app setup.confdir = app.confdir app.add_node( inheritance_diagram, latex=(visit_inheritance_diagram(latex_output_graph), do_nothing), html=(visit_inheritance_diagram(html_output_graph), do_nothing)) app.add_directive( 'inheritance-diagram', inheritance_diagram_directive, False, (1, 100, 0), parts = directives.nonnegative_int)
mit
sahmed95/sympy
sympy/physics/mechanics/tests/test_kane2.py
58
18815
from sympy.core.compatibility import range from sympy import cos, Matrix, simplify, sin, solve, tan, pi from sympy import symbols, trigsimp, zeros from sympy.physics.mechanics import (cross, dot, dynamicsymbols, KanesMethod, inertia, inertia_of_point_mass, Point, ReferenceFrame, RigidBody) def test_aux_dep(): # This test is about rolling disc dynamics, comparing the results found # with KanesMethod to those found when deriving the equations "manually" # with SymPy. # The terms Fr, Fr*, and Fr*_steady are all compared between the two # methods. Here, Fr*_steady refers to the generalized inertia forces for an # equilibrium configuration. # Note: comparing to the test of test_rolling_disc() in test_kane.py, this # test also tests auxiliary speeds and configuration and motion constraints #, seen in the generalized dependent coordinates q[3], and depend speeds # u[3], u[4] and u[5]. # First, mannual derivation of Fr, Fr_star, Fr_star_steady. # Symbols for time and constant parameters. # Symbols for contact forces: Fx, Fy, Fz. t, r, m, g, I, J = symbols('t r m g I J') Fx, Fy, Fz = symbols('Fx Fy Fz') # Configuration variables and their time derivatives: # q[0] -- yaw # q[1] -- lean # q[2] -- spin # q[3] -- dot(-r*B.z, A.z) -- distance from ground plane to disc center in # A.z direction # Generalized speeds and their time derivatives: # u[0] -- disc angular velocity component, disc fixed x direction # u[1] -- disc angular velocity component, disc fixed y direction # u[2] -- disc angular velocity component, disc fixed z direction # u[3] -- disc velocity component, A.x direction # u[4] -- disc velocity component, A.y direction # u[5] -- disc velocity component, A.z direction # Auxiliary generalized speeds: # ua[0] -- contact point auxiliary generalized speed, A.x direction # ua[1] -- contact point auxiliary generalized speed, A.y direction # ua[2] -- contact point auxiliary generalized speed, A.z direction q = dynamicsymbols('q:4') qd = [qi.diff(t) for qi in q] u = dynamicsymbols('u:6') ud = [ui.diff(t) for ui in u] ud_zero = dict(zip(ud, [0.]*len(ud))) ua = dynamicsymbols('ua:3') ua_zero = dict(zip(ua, [0.]*len(ua))) # Reference frames: # Yaw intermediate frame: A. # Lean intermediate frame: B. # Disc fixed frame: C. N = ReferenceFrame('N') A = N.orientnew('A', 'Axis', [q[0], N.z]) B = A.orientnew('B', 'Axis', [q[1], A.x]) C = B.orientnew('C', 'Axis', [q[2], B.y]) # Angular velocity and angular acceleration of disc fixed frame # u[0], u[1] and u[2] are generalized independent speeds. C.set_ang_vel(N, u[0]*B.x + u[1]*B.y + u[2]*B.z) C.set_ang_acc(N, C.ang_vel_in(N).diff(t, B) + cross(B.ang_vel_in(N), C.ang_vel_in(N))) # Velocity and acceleration of points: # Disc-ground contact point: P. # Center of disc: O, defined from point P with depend coordinate: q[3] # u[3], u[4] and u[5] are generalized dependent speeds. P = Point('P') P.set_vel(N, ua[0]*A.x + ua[1]*A.y + ua[2]*A.z) O = P.locatenew('O', q[3]*A.z + r*sin(q[1])*A.y) O.set_vel(N, u[3]*A.x + u[4]*A.y + u[5]*A.z) O.set_acc(N, O.vel(N).diff(t, A) + cross(A.ang_vel_in(N), O.vel(N))) # Kinematic differential equations: # Two equalities: one is w_c_n_qd = C.ang_vel_in(N) in three coordinates # directions of B, for qd0, qd1 and qd2. # the other is v_o_n_qd = O.vel(N) in A.z direction for qd3. # Then, solve for dq/dt's in terms of u's: qd_kd. w_c_n_qd = qd[0]*A.z + qd[1]*B.x + qd[2]*B.y v_o_n_qd = O.pos_from(P).diff(t, A) + cross(A.ang_vel_in(N), O.pos_from(P)) kindiffs = Matrix([dot(w_c_n_qd - C.ang_vel_in(N), uv) for uv in B] + [dot(v_o_n_qd - O.vel(N), A.z)]) qd_kd = solve(kindiffs, qd) # Values of generalized speeds during a steady turn for later substitution # into the Fr_star_steady. steady_conditions = solve(kindiffs.subs({qd[1] : 0, qd[3] : 0}), u) steady_conditions.update({qd[1] : 0, qd[3] : 0}) # Partial angular velocities and velocities. partial_w_C = [C.ang_vel_in(N).diff(ui, N) for ui in u + ua] partial_v_O = [O.vel(N).diff(ui, N) for ui in u + ua] partial_v_P = [P.vel(N).diff(ui, N) for ui in u + ua] # Configuration constraint: f_c, the projection of radius r in A.z direction # is q[3]. # Velocity constraints: f_v, for u3, u4 and u5. # Acceleration constraints: f_a. f_c = Matrix([dot(-r*B.z, A.z) - q[3]]) f_v = Matrix([dot(O.vel(N) - (P.vel(N) + cross(C.ang_vel_in(N), O.pos_from(P))), ai).expand() for ai in A]) v_o_n = cross(C.ang_vel_in(N), O.pos_from(P)) a_o_n = v_o_n.diff(t, A) + cross(A.ang_vel_in(N), v_o_n) f_a = Matrix([dot(O.acc(N) - a_o_n, ai) for ai in A]) # Solve for constraint equations in the form of # u_dependent = A_rs * [u_i; u_aux]. # First, obtain constraint coefficient matrix: M_v * [u; ua] = 0; # Second, taking u[0], u[1], u[2] as independent, # taking u[3], u[4], u[5] as dependent, # rearranging the matrix of M_v to be A_rs for u_dependent. # Third, u_aux ==0 for u_dep, and resulting dictionary of u_dep_dict. M_v = zeros(3, 9) for i in range(3): for j, ui in enumerate(u + ua): M_v[i, j] = f_v[i].diff(ui) M_v_i = M_v[:, :3] M_v_d = M_v[:, 3:6] M_v_aux = M_v[:, 6:] M_v_i_aux = M_v_i.row_join(M_v_aux) A_rs = - M_v_d.inv() * M_v_i_aux u_dep = A_rs[:, :3] * Matrix(u[:3]) u_dep_dict = dict(zip(u[3:], u_dep)) # Active forces: F_O acting on point O; F_P acting on point P. # Generalized active forces (unconstrained): Fr_u = F_point * pv_point. F_O = m*g*A.z F_P = Fx * A.x + Fy * A.y + Fz * A.z Fr_u = Matrix([dot(F_O, pv_o) + dot(F_P, pv_p) for pv_o, pv_p in zip(partial_v_O, partial_v_P)]) # Inertia force: R_star_O. # Inertia of disc: I_C_O, where J is a inertia component about principal axis. # Inertia torque: T_star_C. # Generalized inertia forces (unconstrained): Fr_star_u. R_star_O = -m*O.acc(N) I_C_O = inertia(B, I, J, I) T_star_C = -(dot(I_C_O, C.ang_acc_in(N)) \ + cross(C.ang_vel_in(N), dot(I_C_O, C.ang_vel_in(N)))) Fr_star_u = Matrix([dot(R_star_O, pv) + dot(T_star_C, pav) for pv, pav in zip(partial_v_O, partial_w_C)]) # Form nonholonomic Fr: Fr_c, and nonholonomic Fr_star: Fr_star_c. # Also, nonholonomic Fr_star in steady turning condition: Fr_star_steady. Fr_c = Fr_u[:3, :].col_join(Fr_u[6:, :]) + A_rs.T * Fr_u[3:6, :] Fr_star_c = Fr_star_u[:3, :].col_join(Fr_star_u[6:, :])\ + A_rs.T * Fr_star_u[3:6, :] Fr_star_steady = Fr_star_c.subs(ud_zero).subs(u_dep_dict)\ .subs(steady_conditions).subs({q[3]: -r*cos(q[1])}).expand() # Second, using KaneMethod in mechanics for fr, frstar and frstar_steady. # Rigid Bodies: disc, with inertia I_C_O. iner_tuple = (I_C_O, O) disc = RigidBody('disc', O, C, m, iner_tuple) bodyList = [disc] # Generalized forces: Gravity: F_o; Auxiliary forces: F_p. F_o = (O, F_O) F_p = (P, F_P) forceList = [F_o, F_p] # KanesMethod. kane = KanesMethod( N, q_ind= q[:3], u_ind= u[:3], kd_eqs=kindiffs, q_dependent=q[3:], configuration_constraints = f_c, u_dependent=u[3:], velocity_constraints= f_v, u_auxiliary=ua ) # fr, frstar, frstar_steady and kdd(kinematic differential equations). (fr, frstar)= kane.kanes_equations(forceList, bodyList) frstar_steady = frstar.subs(ud_zero).subs(u_dep_dict).subs(steady_conditions)\ .subs({q[3]: -r*cos(q[1])}).expand() kdd = kane.kindiffdict() assert Matrix(Fr_c).expand() == fr.expand() assert Matrix(Fr_star_c.subs(kdd)).expand() == frstar.expand() assert (simplify(Matrix(Fr_star_steady).expand()) == simplify(frstar_steady.expand())) def test_non_central_inertia(): # This tests that the calculation of Fr* does not depend the point # about which the inertia of a rigid body is defined. This test solves # exercises 8.12, 8.17 from Kane 1985. # Declare symbols q1, q2, q3 = dynamicsymbols('q1:4') q1d, q2d, q3d = dynamicsymbols('q1:4', level=1) u1, u2, u3, u4, u5 = dynamicsymbols('u1:6') u_prime, R, M, g, e, f, theta = symbols('u\' R, M, g, e, f, theta') a, b, mA, mB, IA, J, K, t = symbols('a b mA mB IA J K t') Q1, Q2, Q3 = symbols('Q1, Q2 Q3') IA22, IA23, IA33 = symbols('IA22 IA23 IA33') # Reference Frames F = ReferenceFrame('F') P = F.orientnew('P', 'axis', [-theta, F.y]) A = P.orientnew('A', 'axis', [q1, P.x]) A.set_ang_vel(F, u1*A.x + u3*A.z) # define frames for wheels B = A.orientnew('B', 'axis', [q2, A.z]) C = A.orientnew('C', 'axis', [q3, A.z]) B.set_ang_vel(A, u4 * A.z) C.set_ang_vel(A, u5 * A.z) # define points D, S*, Q on frame A and their velocities pD = Point('D') pD.set_vel(A, 0) # u3 will not change v_D_F since wheels are still assumed to roll without slip. pD.set_vel(F, u2 * A.y) pS_star = pD.locatenew('S*', e*A.y) pQ = pD.locatenew('Q', f*A.y - R*A.x) for p in [pS_star, pQ]: p.v2pt_theory(pD, F, A) # masscenters of bodies A, B, C pA_star = pD.locatenew('A*', a*A.y) pB_star = pD.locatenew('B*', b*A.z) pC_star = pD.locatenew('C*', -b*A.z) for p in [pA_star, pB_star, pC_star]: p.v2pt_theory(pD, F, A) # points of B, C touching the plane P pB_hat = pB_star.locatenew('B^', -R*A.x) pC_hat = pC_star.locatenew('C^', -R*A.x) pB_hat.v2pt_theory(pB_star, F, B) pC_hat.v2pt_theory(pC_star, F, C) # the velocities of B^, C^ are zero since B, C are assumed to roll without slip kde = [q1d - u1, q2d - u4, q3d - u5] vc = [dot(p.vel(F), A.y) for p in [pB_hat, pC_hat]] # inertias of bodies A, B, C # IA22, IA23, IA33 are not specified in the problem statement, but are # necessary to define an inertia object. Although the values of # IA22, IA23, IA33 are not known in terms of the variables given in the # problem statement, they do not appear in the general inertia terms. inertia_A = inertia(A, IA, IA22, IA33, 0, IA23, 0) inertia_B = inertia(B, K, K, J) inertia_C = inertia(C, K, K, J) # define the rigid bodies A, B, C rbA = RigidBody('rbA', pA_star, A, mA, (inertia_A, pA_star)) rbB = RigidBody('rbB', pB_star, B, mB, (inertia_B, pB_star)) rbC = RigidBody('rbC', pC_star, C, mB, (inertia_C, pC_star)) km = KanesMethod(F, q_ind=[q1, q2, q3], u_ind=[u1, u2], kd_eqs=kde, u_dependent=[u4, u5], velocity_constraints=vc, u_auxiliary=[u3]) forces = [(pS_star, -M*g*F.x), (pQ, Q1*A.x + Q2*A.y + Q3*A.z)] bodies = [rbA, rbB, rbC] fr, fr_star = km.kanes_equations(forces, bodies) vc_map = solve(vc, [u4, u5]) # KanesMethod returns the negative of Fr, Fr* as defined in Kane1985. fr_star_expected = Matrix([ -(IA + 2*J*b**2/R**2 + 2*K + mA*a**2 + 2*mB*b**2) * u1.diff(t) - mA*a*u1*u2, -(mA + 2*mB +2*J/R**2) * u2.diff(t) + mA*a*u1**2, 0]) assert (trigsimp(fr_star.subs(vc_map).subs(u3, 0)).doit().expand() == fr_star_expected.expand()) # define inertias of rigid bodies A, B, C about point D # I_S/O = I_S/S* + I_S*/O bodies2 = [] for rb, I_star in zip([rbA, rbB, rbC], [inertia_A, inertia_B, inertia_C]): I = I_star + inertia_of_point_mass(rb.mass, rb.masscenter.pos_from(pD), rb.frame) bodies2.append(RigidBody('', rb.masscenter, rb.frame, rb.mass, (I, pD))) fr2, fr_star2 = km.kanes_equations(forces, bodies2) assert (trigsimp(fr_star2.subs(vc_map).subs(u3, 0)).doit().expand() == fr_star_expected.expand()) def test_sub_qdot(): # This test solves exercises 8.12, 8.17 from Kane 1985 and defines # some velocities in terms of q, qdot. ## --- Declare symbols --- q1, q2, q3 = dynamicsymbols('q1:4') q1d, q2d, q3d = dynamicsymbols('q1:4', level=1) u1, u2, u3 = dynamicsymbols('u1:4') u_prime, R, M, g, e, f, theta = symbols('u\' R, M, g, e, f, theta') a, b, mA, mB, IA, J, K, t = symbols('a b mA mB IA J K t') IA22, IA23, IA33 = symbols('IA22 IA23 IA33') Q1, Q2, Q3 = symbols('Q1 Q2 Q3') # --- Reference Frames --- F = ReferenceFrame('F') P = F.orientnew('P', 'axis', [-theta, F.y]) A = P.orientnew('A', 'axis', [q1, P.x]) A.set_ang_vel(F, u1*A.x + u3*A.z) # define frames for wheels B = A.orientnew('B', 'axis', [q2, A.z]) C = A.orientnew('C', 'axis', [q3, A.z]) ## --- define points D, S*, Q on frame A and their velocities --- pD = Point('D') pD.set_vel(A, 0) # u3 will not change v_D_F since wheels are still assumed to roll w/o slip pD.set_vel(F, u2 * A.y) pS_star = pD.locatenew('S*', e*A.y) pQ = pD.locatenew('Q', f*A.y - R*A.x) # masscenters of bodies A, B, C pA_star = pD.locatenew('A*', a*A.y) pB_star = pD.locatenew('B*', b*A.z) pC_star = pD.locatenew('C*', -b*A.z) for p in [pS_star, pQ, pA_star, pB_star, pC_star]: p.v2pt_theory(pD, F, A) # points of B, C touching the plane P pB_hat = pB_star.locatenew('B^', -R*A.x) pC_hat = pC_star.locatenew('C^', -R*A.x) pB_hat.v2pt_theory(pB_star, F, B) pC_hat.v2pt_theory(pC_star, F, C) # --- relate qdot, u --- # the velocities of B^, C^ are zero since B, C are assumed to roll w/o slip kde = [dot(p.vel(F), A.y) for p in [pB_hat, pC_hat]] kde += [u1 - q1d] kde_map = solve(kde, [q1d, q2d, q3d]) for k, v in list(kde_map.items()): kde_map[k.diff(t)] = v.diff(t) # inertias of bodies A, B, C # IA22, IA23, IA33 are not specified in the problem statement, but are # necessary to define an inertia object. Although the values of # IA22, IA23, IA33 are not known in terms of the variables given in the # problem statement, they do not appear in the general inertia terms. inertia_A = inertia(A, IA, IA22, IA33, 0, IA23, 0) inertia_B = inertia(B, K, K, J) inertia_C = inertia(C, K, K, J) # define the rigid bodies A, B, C rbA = RigidBody('rbA', pA_star, A, mA, (inertia_A, pA_star)) rbB = RigidBody('rbB', pB_star, B, mB, (inertia_B, pB_star)) rbC = RigidBody('rbC', pC_star, C, mB, (inertia_C, pC_star)) ## --- use kanes method --- km = KanesMethod(F, [q1, q2, q3], [u1, u2], kd_eqs=kde, u_auxiliary=[u3]) forces = [(pS_star, -M*g*F.x), (pQ, Q1*A.x + Q2*A.y + Q3*A.z)] bodies = [rbA, rbB, rbC] # Q2 = -u_prime * u2 * Q1 / sqrt(u2**2 + f**2 * u1**2) # -u_prime * R * u2 / sqrt(u2**2 + f**2 * u1**2) = R / Q1 * Q2 fr_expected = Matrix([ f*Q3 + M*g*e*sin(theta)*cos(q1), Q2 + M*g*sin(theta)*sin(q1), e*M*g*cos(theta) - Q1*f - Q2*R]) #Q1 * (f - u_prime * R * u2 / sqrt(u2**2 + f**2 * u1**2)))]) fr_star_expected = Matrix([ -(IA + 2*J*b**2/R**2 + 2*K + mA*a**2 + 2*mB*b**2) * u1.diff(t) - mA*a*u1*u2, -(mA + 2*mB +2*J/R**2) * u2.diff(t) + mA*a*u1**2, 0]) fr, fr_star = km.kanes_equations(forces, bodies) assert (fr.expand() == fr_expected.expand()) assert (trigsimp(fr_star).expand() == fr_star_expected.expand()) def test_sub_qdot2(): # This test solves exercises 8.3 from Kane 1985 and defines # all velocities in terms of q, qdot. We check that the generalized active # forces are correctly computed if u terms are only defined in the # kinematic differential equations. # # This functionality was added in PR 8948. Without qdot/u substitution, the # KanesMethod constructor will fail during the constraint initialization as # the B matrix will be poorly formed and inversion of the dependent part # will fail. g, m, Px, Py, Pz, R, t = symbols('g m Px Py Pz R t') q = dynamicsymbols('q:5') qd = dynamicsymbols('q:5', level=1) u = dynamicsymbols('u:5') ## Define inertial, intermediate, and rigid body reference frames A = ReferenceFrame('A') B_prime = A.orientnew('B_prime', 'Axis', [q[0], A.z]) B = B_prime.orientnew('B', 'Axis', [pi/2 - q[1], B_prime.x]) C = B.orientnew('C', 'Axis', [q[2], B.z]) ## Define points of interest and their velocities pO = Point('O') pO.set_vel(A, 0) # R is the point in plane H that comes into contact with disk C. pR = pO.locatenew('R', q[3]*A.x + q[4]*A.y) pR.set_vel(A, pR.pos_from(pO).diff(t, A)) pR.set_vel(B, 0) # C^ is the point in disk C that comes into contact with plane H. pC_hat = pR.locatenew('C^', 0) pC_hat.set_vel(C, 0) # C* is the point at the center of disk C. pCs = pC_hat.locatenew('C*', R*B.y) pCs.set_vel(C, 0) pCs.set_vel(B, 0) # calculate velocites of points C* and C^ in frame A pCs.v2pt_theory(pR, A, B) # points C* and R are fixed in frame B pC_hat.v2pt_theory(pCs, A, C) # points C* and C^ are fixed in frame C ## Define forces on each point of the system R_C_hat = Px*A.x + Py*A.y + Pz*A.z R_Cs = -m*g*A.z forces = [(pC_hat, R_C_hat), (pCs, R_Cs)] ## Define kinematic differential equations # let ui = omega_C_A & bi (i = 1, 2, 3) # u4 = qd4, u5 = qd5 u_expr = [C.ang_vel_in(A) & uv for uv in B] u_expr += qd[3:] kde = [ui - e for ui, e in zip(u, u_expr)] km1 = KanesMethod(A, q, u, kde) fr1, _ = km1.kanes_equations(forces, []) ## Calculate generalized active forces if we impose the condition that the # disk C is rolling without slipping u_indep = u[:3] u_dep = list(set(u) - set(u_indep)) vc = [pC_hat.vel(A) & uv for uv in [A.x, A.y]] km2 = KanesMethod(A, q, u_indep, kde, u_dependent=u_dep, velocity_constraints=vc) fr2, _ = km2.kanes_equations(forces, []) fr1_expected = Matrix([ -R*g*m*sin(q[1]), -R*(Px*cos(q[0]) + Py*sin(q[0]))*tan(q[1]), R*(Px*cos(q[0]) + Py*sin(q[0])), Px, Py]) fr2_expected = Matrix([ -R*g*m*sin(q[1]), 0, 0]) assert (trigsimp(fr1.expand()) == trigsimp(fr1_expected.expand())) assert (trigsimp(fr2.expand()) == trigsimp(fr2_expected.expand()))
bsd-3-clause
kongseokhwan/kulcloud-iitp-neutron
neutron/db/migration/alembic_migrations/versions/1421183d533f_nsx_dhcp_metadata.py
15
1701
# Copyright 2014 VMware, Inc. # All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # """NSX DHCP/metadata support Revision ID: 1421183d533f Revises: 50e86cb2637a Create Date: 2013-10-11 14:33:37.303215 """ revision = '1421183d533f' down_revision = '50e86cb2637a' from alembic import op import sqlalchemy as sa def upgrade(): op.create_table( 'lsn', sa.Column('net_id', sa.String(length=36), nullable=False), sa.Column('lsn_id', sa.String(length=36), nullable=False), sa.PrimaryKeyConstraint('lsn_id')) op.create_table( 'lsn_port', sa.Column('lsn_port_id', sa.String(length=36), nullable=False), sa.Column('lsn_id', sa.String(length=36), nullable=False), sa.Column('sub_id', sa.String(length=36), nullable=False, unique=True), sa.Column('mac_addr', sa.String(length=32), nullable=False, unique=True), sa.ForeignKeyConstraint(['lsn_id'], ['lsn.lsn_id'], ondelete='CASCADE'), sa.PrimaryKeyConstraint('lsn_port_id'))
apache-2.0
Tranzystorek/servo
tests/wpt/harness/wptrunner/update/state.py
196
4417
# This Source Code Form is subject to the terms of the Mozilla Public # License, v. 2.0. If a copy of the MPL was not distributed with this # file, You can obtain one at http://mozilla.org/MPL/2.0/. import os import cPickle as pickle here = os.path.abspath(os.path.split(__file__)[0]) class State(object): filename = os.path.join(here, ".wpt-update.lock") def __new__(cls, logger): rv = cls.load(logger) if rv is not None: logger.debug("Existing state found") return rv logger.debug("No existing state found") return object.__new__(cls, logger) def __init__(self, logger): """Object containing state variables created when running Steps. On write the state is serialized to disk, such that it can be restored in the event that the program is interrupted before all steps are complete. Note that this only works well if the values are immutable; mutating an existing value will not cause the data to be serialized. Variables are set and get as attributes e.g. state_obj.spam = "eggs". :param parent: Parent State object or None if this is the root object. """ if hasattr(self, "_data"): return self._data = [{}] self._logger = logger self._index = 0 def __getstate__(self): rv = self.__dict__.copy() del rv["_logger"] return rv @classmethod def load(cls, logger): """Load saved state from a file""" try: with open(cls.filename) as f: try: rv = pickle.load(f) logger.debug("Loading data %r" % (rv._data,)) rv._logger = logger rv._index = 0 return rv except EOFError: logger.warning("Found empty state file") except IOError: logger.debug("IOError loading stored state") def push(self, init_values): """Push a new clean state dictionary :param init_values: List of variable names in the current state dict to copy into the new state dict.""" return StateContext(self, init_values) def save(self): """Write the state to disk""" with open(self.filename, "w") as f: pickle.dump(self, f) def is_empty(self): return len(self._data) == 1 and self._data[0] == {} def clear(self): """Remove all state and delete the stored copy.""" try: os.unlink(self.filename) except OSError: pass self._data = [{}] def __setattr__(self, key, value): if key.startswith("_"): object.__setattr__(self, key, value) else: self._data[self._index][key] = value self.save() def __getattr__(self, key): if key.startswith("_"): raise AttributeError try: return self._data[self._index][key] except KeyError: raise AttributeError def __contains__(self, key): return key in self._data[self._index] def update(self, items): """Add a dictionary of {name: value} pairs to the state""" self._data[self._index].update(items) self.save() def keys(self): return self._data[self._index].keys() class StateContext(object): def __init__(self, state, init_values): self.state = state self.init_values = init_values def __enter__(self): if len(self.state._data) == self.state._index + 1: # This is the case where there is no stored state new_state = {} for key in self.init_values: new_state[key] = self.state._data[self.state._index][key] self.state._data.append(new_state) self.state._index += 1 self.state._logger.debug("Incremented index to %s" % self.state._index) def __exit__(self, *args, **kwargs): if len(self.state._data) > 1: assert self.state._index == len(self.state._data) - 1 self.state._data.pop() self.state._index -= 1 self.state._logger.debug("Decremented index to %s" % self.state._index) assert self.state._index >= 0 else: raise ValueError("Tried to pop the top state")
mpl-2.0
Septima/qgis-wedgebuffer
src/WedgeBuffer/test/test_translations.py
116
1741
# coding=utf-8 """Safe Translations Test. .. note:: This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. """ from utilities import get_qgis_app __author__ = 'ismailsunni@yahoo.co.id' __date__ = '12/10/2011' __copyright__ = ('Copyright 2012, Australia Indonesia Facility for ' 'Disaster Reduction') import unittest import os from PyQt4.QtCore import QCoreApplication, QTranslator QGIS_APP = get_qgis_app() class SafeTranslationsTest(unittest.TestCase): """Test translations work.""" def setUp(self): """Runs before each test.""" if 'LANG' in os.environ.iterkeys(): os.environ.__delitem__('LANG') def tearDown(self): """Runs after each test.""" if 'LANG' in os.environ.iterkeys(): os.environ.__delitem__('LANG') def test_qgis_translations(self): """Test that translations work.""" parent_path = os.path.join(__file__, os.path.pardir, os.path.pardir) dir_path = os.path.abspath(parent_path) file_path = os.path.join( dir_path, 'i18n', 'af.qm') translator = QTranslator() translator.load(file_path) QCoreApplication.installTranslator(translator) expected_message = 'Goeie more' real_message = QCoreApplication.translate("@default", 'Good morning') self.assertEqual(real_message, expected_message) if __name__ == "__main__": suite = unittest.makeSuite(SafeTranslationsTest) runner = unittest.TextTestRunner(verbosity=2) runner.run(suite)
gpl-2.0
edx/edx-enterprise
enterprise/migrations/0111_pendingenterprisecustomeradminuser.py
1
3009
# Generated by Django 2.2.15 on 2020-09-09 14:31 import simple_history.models import django.db.models.deletion import django.utils.timezone from django.conf import settings from django.db import migrations, models import model_utils.fields class Migration(migrations.Migration): dependencies = [ migrations.swappable_dependency(settings.AUTH_USER_MODEL), ('enterprise', '0110_add_default_contract_discount'), ] operations = [ migrations.CreateModel( name='HistoricalPendingEnterpriseCustomerAdminUser', fields=[ ('id', models.IntegerField(auto_created=True, blank=True, db_index=True, verbose_name='ID')), ('created', model_utils.fields.AutoCreatedField(default=django.utils.timezone.now, editable=False, verbose_name='created')), ('modified', model_utils.fields.AutoLastModifiedField(default=django.utils.timezone.now, editable=False, verbose_name='modified')), ('user_email', models.EmailField(max_length=254)), ('history_id', models.AutoField(primary_key=True, serialize=False)), ('history_date', models.DateTimeField()), ('history_change_reason', models.CharField(max_length=100, null=True)), ('history_type', models.CharField(choices=[('+', 'Created'), ('~', 'Changed'), ('-', 'Deleted')], max_length=1)), ('enterprise_customer', models.ForeignKey(blank=True, db_constraint=False, null=True, on_delete=django.db.models.deletion.DO_NOTHING, related_name='+', to='enterprise.EnterpriseCustomer')), ('history_user', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='+', to=settings.AUTH_USER_MODEL)), ], options={ 'get_latest_by': 'history_date', 'verbose_name': 'historical pending enterprise customer admin user', 'ordering': ('-history_date', '-history_id'), }, bases=(simple_history.models.HistoricalChanges, models.Model), ), migrations.CreateModel( name='PendingEnterpriseCustomerAdminUser', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('created', model_utils.fields.AutoCreatedField(default=django.utils.timezone.now, editable=False, verbose_name='created')), ('modified', model_utils.fields.AutoLastModifiedField(default=django.utils.timezone.now, editable=False, verbose_name='modified')), ('user_email', models.EmailField(max_length=254)), ('enterprise_customer', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='enterprise.EnterpriseCustomer')), ], options={ 'unique_together': {('enterprise_customer', 'user_email')}, 'ordering': ['created'], }, ), ]
agpl-3.0
openatv/enigma2
tools/svg2skin.py
98
2458
#!/usr/bin/python # don't expect too much. # this is a really simple&stupid svg parser, which will use rectangles # and text fields to produce <widget> snippets for a skin. # use object "id" fields for source names if you want. # extracting font information is buggy. # if you want text fields, please use flow text regions, instead of simple # text. otherwise, width and height are unknown. # # tested only with a single inkscape-generated SVG. import sys from xml.sax import make_parser from xml.sax.handler import ContentHandler def getattrs(attrs, *a): res = [] for x in a: res.append(float(attrs[x])) return res def parsedict(attrs): if not attrs: return [] d = attrs.split(';') r = { } for x in d: (key, val) = x.split(':') r[key] = val return r def px(x): return int(float(x[:-2]) + .5) def contains(box_o, box_i): return box_o[0] <= box_i[0] and box_o[1] <= box_i[1] and box_o[2] >= box_i[2] and box_o[3] >= box_i[3] class parseXML(ContentHandler): def __init__(self): self.isPointsElement, self.isReboundsElement = 0, 0 self.bbox = None self.find_bbox = False self.flow = None def startElement(self, name, attrs): if self.find_bbox: if name != "rect": return box = getattrs(attrs, "x", "y", "width", "height") if not self.bbox or contains(box, self.bbox): self.bbox = box return if name == "rect": (x, y, width, height) = getattrs(attrs, "x", "y", "width", "height") x -= self.bbox[0] y -= self.bbox[1] id = attrs["id"] if self.flow: id = self.flow self.flow = None styles = parsedict(attrs.get("style", "")) elif name == "text": (x, y) = getattrs(attrs, "x", "y") x -= self.bbox[0] y -= self.bbox[1] width, height = 0, 0 styles = parsedict(attrs["style"]) id = attrs["id"] elif name == "flowRoot": self.flow = attrs["id"] return else: return if "font-size" in styles: font = ' font="Regular;%d"' % px(styles["font-size"]) else: font = "" print """\t\t<widget source="%s" render="Label" position="%d,%d" size="%d,%d" %s />""" % (id, x, y, width, height, font) parser = make_parser() contentHandler = parseXML() parser.setContentHandler(contentHandler) contentHandler.find_bbox = True parser.parse(sys.argv[1]) bboxi = tuple([int(x) for x in contentHandler.bbox]) contentHandler.find_bbox = False print '\t<screen name="" position="%d,%d" size="%d,%d" title="">' % bboxi parser.parse(sys.argv[1]) print '\t</screen>'
gpl-2.0
hnakamur/ansible
v1/ansible/runner/lookup_plugins/url.py
86
1588
# (c) 2015, Brian Coca <bcoca@ansible.com> # # This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see <http://www.gnu.org/licenses/>. from ansible import utils import urllib2 class LookupModule(object): def __init__(self, basedir=None, **kwargs): self.basedir = basedir def run(self, terms, inject=None, **kwargs): terms = utils.listify_lookup_plugin_terms(terms, self.basedir, inject) if isinstance(terms, basestring): terms = [ terms ] ret = [] for term in terms: try: r = urllib2.Request(term) response = urllib2.urlopen(r) except URLError, e: utils.warnings("Failed lookup url for %s : %s" % (term, str(e))) continue except HTTPError, e: utils.warnings("Received HTTP error for %s : %s" % (term, str(e))) continue for line in response.read().splitlines(): ret.append(line) return ret
gpl-3.0
KaranToor/MA450
google-cloud-sdk/lib/googlecloudsdk/third_party/appengine/api/yaml_builder.py
7
14943
# # Copyright 2007 Google Inc. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """PyYAML event builder handler Receives events from YAML listener and forwards them to a builder object so that it can construct a properly structured object. """ # WARNING: This file is externally viewable by our users. All comments from # this file will be stripped. The docstrings will NOT. Do not put sensitive # information in docstrings. If you must communicate internal information in # this source file, please place them in comments only. from googlecloudsdk.third_party.appengine.api import yaml_errors from googlecloudsdk.third_party.appengine.api import yaml_listener import yaml # Token constants used by handler for keeping track of handler state. _TOKEN_DOCUMENT = 'document' _TOKEN_SEQUENCE = 'sequence' _TOKEN_MAPPING = 'mapping' _TOKEN_KEY = 'key' _TOKEN_VALUES = frozenset(( _TOKEN_DOCUMENT, _TOKEN_SEQUENCE, _TOKEN_MAPPING, _TOKEN_KEY)) class Builder(object): """Interface for building documents and type from YAML events. Implement this interface to create a new builder. Builders are passed to the BuilderHandler and used as a factory and assembler for creating concrete representations of YAML files. """ def BuildDocument(self): """Build new document. The object built by this method becomes the top level entity that the builder handler constructs. The actual type is determined by the sub-class of the Builder class and can essentially be any type at all. This method is always called when the parser encounters the start of a new document. Returns: New object instance representing concrete document which is returned to user via BuilderHandler.GetResults(). """ def InitializeDocument(self, document, value): """Initialize document with value from top level of document. This method is called when the root document element is encountered at the top level of a YAML document. It should get called immediately after BuildDocument. Receiving the None value indicates the empty document. Args: document: Document as constructed in BuildDocument. value: Scalar value to initialize the document with. """ def BuildMapping(self, top_value): """Build a new mapping representation. Called when StartMapping event received. Type of object is determined by Builder sub-class. Args: top_value: Object which will be new mappings parant. Will be object returned from previous call to BuildMapping or BuildSequence. Returns: Instance of new object that represents a mapping type in target model. """ def EndMapping(self, top_value, mapping): """Previously constructed mapping scope is at an end. Called when the end of a mapping block is encountered. Useful for additional clean up or end of scope validation. Args: top_value: Value which is parent of the mapping. mapping: Mapping which is at the end of its scope. """ def BuildSequence(self, top_value): """Build a new sequence representation. Called when StartSequence event received. Type of object is determined by Builder sub-class. Args: top_value: Object which will be new sequences parant. Will be object returned from previous call to BuildMapping or BuildSequence. Returns: Instance of new object that represents a sequence type in target model. """ def EndSequence(self, top_value, sequence): """Previously constructed sequence scope is at an end. Called when the end of a sequence block is encountered. Useful for additional clean up or end of scope validation. Args: top_value: Value which is parent of the sequence. sequence: Sequence which is at the end of its scope. """ def MapTo(self, subject, key, value): """Map value to a mapping representation. Implementation is defined by sub-class of Builder. Args: subject: Object that represents mapping. Value returned from BuildMapping. key: Key used to map value to subject. Can be any scalar value. value: Value which is mapped to subject. Can be any kind of value. """ def AppendTo(self, subject, value): """Append value to a sequence representation. Implementation is defined by sub-class of Builder. Args: subject: Object that represents sequence. Value returned from BuildSequence value: Value to be appended to subject. Can be any kind of value. """ class BuilderHandler(yaml_listener.EventHandler): """PyYAML event handler used to build objects. Maintains state information as it receives parse events so that object nesting is maintained. Uses provided builder object to construct and assemble objects as it goes. As it receives events from the YAML parser, it builds a stack of data representing structural tokens. As the scope of documents, mappings and sequences end, those token, value pairs are popped from the top of the stack so that the original scope can resume processing. A special case is made for the _KEY token. It represents a temporary value which only occurs inside mappings. It is immediately popped off the stack when it's associated value is encountered in the parse stream. It is necessary to do this because the YAML parser does not combine key and value information in to a single event. """ def __init__(self, builder): """Initialization for builder handler. Args: builder: Instance of Builder class. Raises: ListenerConfigurationError when builder is not a Builder class. """ if not isinstance(builder, Builder): raise yaml_errors.ListenerConfigurationError( 'Must provide builder of type yaml_listener.Builder') self._builder = builder self._stack = None self._top = None self._results = [] def _Push(self, token, value): """Push values to stack at start of nesting. When a new object scope is beginning, will push the token (type of scope) along with the new objects value, the latter of which is provided through the various build methods of the builder. Args: token: Token indicating the type of scope which is being created; must belong to _TOKEN_VALUES. value: Value to associate with given token. Construction of value is determined by the builder provided to this handler at construction. """ # _top is an easy to use reference to the top of the handler stack. self._top = (token, value) self._stack.append(self._top) def _Pop(self): """Pop values from stack at end of nesting. Called to indicate the end of a nested scope. Returns: Previously pushed value at the top of the stack. """ assert self._stack != [] and self._stack is not None token, value = self._stack.pop() # Restore _top variable with previous values. if self._stack: self._top = self._stack[-1] else: self._top = None return value def _HandleAnchor(self, event): """Handle anchor attached to event. Currently will raise an error if anchor is used. Anchors are used to define a document wide tag to a given value (scalar, mapping or sequence). Args: event: Event which may have anchor property set. Raises: NotImplementedError if event attempts to use an anchor. """ # TODO(user): Implement anchors and aliases. # If there is an anchor raise an error. if hasattr(event, 'anchor') and event.anchor is not None: raise NotImplementedError('Anchors not supported in this handler') def _HandleValue(self, value): """Handle given value based on state of parser This method handles the various values that are created by the builder at the beginning of scope events (such as mappings and sequences) or when a scalar value is received. Method is called when handler receives a parser, MappingStart or SequenceStart. Args: value: Value received as scalar value or newly constructed mapping or sequence instance. Raises: InternalError if the building process encounters an unexpected token. This is an indication of an implementation error in BuilderHandler. """ token, top_value = self._top # If the last token was a key, it means that it is necessary # to insert the value in to a map. if token == _TOKEN_KEY: # Fetch the key (removing from the stack) key = self._Pop() # New values at top of stack mapping_token, mapping = self._top assert _TOKEN_MAPPING == mapping_token # Forward to builder for assembly self._builder.MapTo(mapping, key, value) # Parent object for new value is a mapping. It means that # this value that is passed in is a scalar and should # get placed on the stack as the key for the next value # from the parser. elif token == _TOKEN_MAPPING: self._Push(_TOKEN_KEY, value) # Parent is a sequence object. Append value to sequence. elif token == _TOKEN_SEQUENCE: self._builder.AppendTo(top_value, value) # Events received at the document level are sent to the # builder to initialize the actual document. elif token == _TOKEN_DOCUMENT: self._builder.InitializeDocument(top_value, value) else: raise yaml_errors.InternalError('Unrecognized builder token:\n%s' % token) def StreamStart(self, event, loader): """Initializes internal state of handler Args: event: Ignored. """ assert self._stack is None self._stack = [] self._top = None self._results = [] def StreamEnd(self, event, loader): """Cleans up internal state of handler after parsing Args: event: Ignored. """ assert self._stack == [] and self._top is None self._stack = None def DocumentStart(self, event, loader): """Build new document. Pushes new document on to stack. Args: event: Ignored. """ assert self._stack == [] self._Push(_TOKEN_DOCUMENT, self._builder.BuildDocument()) def DocumentEnd(self, event, loader): """End of document. Args: event: Ignored. """ assert self._top[0] == _TOKEN_DOCUMENT self._results.append(self._Pop()) def Alias(self, event, loader): """Not implemented yet. Args: event: Ignored. """ raise NotImplementedError('References not supported in this handler') def Scalar(self, event, loader): """Handle scalar value Since scalars are simple values that are passed directly in by the parser, handle like any value with no additional processing. Of course, key values will be handles specially. A key value is recognized when the top token is _TOKEN_MAPPING. Args: event: Event containing scalar value. """ self._HandleAnchor(event) if event.tag is None and self._top[0] != _TOKEN_MAPPING: # Try to calculate what tag should be. Might be an implicit # type based on regex or path. try: tag = loader.resolve(yaml.nodes.ScalarNode, event.value, event.implicit) except IndexError: # This exception might be thrown by PyYAML versions previous to # 3.05. In this event, set default mapping. tag = loader.DEFAULT_SCALAR_TAG else: tag = event.tag if tag is None: value = event.value else: # Do conversion of value to properly inferred type. node = yaml.nodes.ScalarNode(tag, event.value, event.start_mark, event.end_mark, event.style) value = loader.construct_object(node) self._HandleValue(value) def SequenceStart(self, event, loader): """Start of sequence scope Create a new sequence from the builder and then handle in the context of its parent. Args: event: SequenceStartEvent generated by loader. loader: Loader that generated event. """ self._HandleAnchor(event) token, parent = self._top # If token is on stack, need to look one below it for real parent. if token == _TOKEN_KEY: token, parent = self._stack[-2] sequence = self._builder.BuildSequence(parent) self._HandleValue(sequence) self._Push(_TOKEN_SEQUENCE, sequence) def SequenceEnd(self, event, loader): """End of sequence. Args: event: Ignored loader: Ignored. """ assert self._top[0] == _TOKEN_SEQUENCE end_object = self._Pop() top_value = self._top[1] self._builder.EndSequence(top_value, end_object) def MappingStart(self, event, loader): """Start of mapping scope. Create a mapping from builder and then handle in the context of its parent. Args: event: MappingStartEvent generated by loader. loader: Loader that generated event. """ self._HandleAnchor(event) token, parent = self._top # If token is on stack, need to look one below it for real parent. # A KEY indicates that the parser is processing a mapping. Since # it is on the stack and will be removed by the _HandleValue it # is necessary to look for the enclosing mapping object below the # key on the stack. if token == _TOKEN_KEY: token, parent = self._stack[-2] mapping = self._builder.BuildMapping(parent) self._HandleValue(mapping) self._Push(_TOKEN_MAPPING, mapping) def MappingEnd(self, event, loader): """End of mapping Args: event: Ignored. loader: Ignored. """ assert self._top[0] == _TOKEN_MAPPING end_object = self._Pop() top_value = self._top[1] self._builder.EndMapping(top_value, end_object) def GetResults(self): """Get results of document stream processing. This method can be invoked after fully parsing the entire YAML file to retrieve constructed contents of YAML file. Called after EndStream. Returns: A tuple of all document objects that were parsed from YAML stream. Raises: InternalError if the builder stack is not empty by the end of parsing. """ if self._stack is not None: raise yaml_errors.InternalError('Builder stack is not empty.') return tuple(self._results)
apache-2.0
eino-makitalo/odoo
addons/account/wizard/account_period_close.py
341
2646
# -*- coding: utf-8 -*- ############################################################################## # # OpenERP, Open Source Management Solution # Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>). # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ############################################################################## from openerp.osv import fields, osv from openerp.tools.translate import _ class account_period_close(osv.osv_memory): """ close period """ _name = "account.period.close" _description = "period close" _columns = { 'sure': fields.boolean('Check this box'), } def data_save(self, cr, uid, ids, context=None): """ This function close period @param cr: the current row, from the database cursor, @param uid: the current user’s ID for security checks, @param ids: account period close’s ID or list of IDs """ journal_period_pool = self.pool.get('account.journal.period') period_pool = self.pool.get('account.period') account_move_obj = self.pool.get('account.move') mode = 'done' for form in self.read(cr, uid, ids, context=context): if form['sure']: for id in context['active_ids']: account_move_ids = account_move_obj.search(cr, uid, [('period_id', '=', id), ('state', '=', "draft")], context=context) if account_move_ids: raise osv.except_osv(_('Invalid Action!'), _('In order to close a period, you must first post related journal entries.')) cr.execute('update account_journal_period set state=%s where period_id=%s', (mode, id)) cr.execute('update account_period set state=%s where id=%s', (mode, id)) self.invalidate_cache(cr, uid, context=context) return {'type': 'ir.actions.act_window_close'} # vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
agpl-3.0
Sorsly/subtle
google-cloud-sdk/lib/third_party/docker/docker/utils/types.py
43
2304
import six class LogConfigTypesEnum(object): _values = ( 'json-file', 'syslog', 'journald', 'gelf', 'fluentd', 'none' ) JSON, SYSLOG, JOURNALD, GELF, FLUENTD, NONE = _values class DictType(dict): def __init__(self, init): for k, v in six.iteritems(init): self[k] = v class LogConfig(DictType): types = LogConfigTypesEnum def __init__(self, **kwargs): log_driver_type = kwargs.get('type', kwargs.get('Type')) config = kwargs.get('config', kwargs.get('Config')) or {} if config and not isinstance(config, dict): raise ValueError("LogConfig.config must be a dictionary") super(LogConfig, self).__init__({ 'Type': log_driver_type, 'Config': config }) @property def type(self): return self['Type'] @type.setter def type(self, value): self['Type'] = value @property def config(self): return self['Config'] def set_config_value(self, key, value): self.config[key] = value def unset_config(self, key): if key in self.config: del self.config[key] class Ulimit(DictType): def __init__(self, **kwargs): name = kwargs.get('name', kwargs.get('Name')) soft = kwargs.get('soft', kwargs.get('Soft')) hard = kwargs.get('hard', kwargs.get('Hard')) if not isinstance(name, six.string_types): raise ValueError("Ulimit.name must be a string") if soft and not isinstance(soft, int): raise ValueError("Ulimit.soft must be an integer") if hard and not isinstance(hard, int): raise ValueError("Ulimit.hard must be an integer") super(Ulimit, self).__init__({ 'Name': name, 'Soft': soft, 'Hard': hard }) @property def name(self): return self['Name'] @name.setter def name(self, value): self['Name'] = value @property def soft(self): return self.get('Soft') @soft.setter def soft(self, value): self['Soft'] = value @property def hard(self): return self.get('Hard') @hard.setter def hard(self, value): self['Hard'] = value
mit
blueshed/blueshed-micro
blueshed/micro/utils/executor.py
1
2437
from blueshed.micro.utils import resources from tornado.concurrent import Future from tornado.ioloop import IOLoop from tornado.autoreload import add_reload_hook from functools import wraps import logging import os import inspect from concurrent.futures.process import ProcessPoolExecutor LOGGER = logging.getLogger(__name__) _pool_ = None def pool_init(pool): global _pool_ _pool_ = pool def pool_init_processes(pool_size, debug=False): micro_pool = ProcessPoolExecutor(pool_size) pool_init(micro_pool) if debug is True: add_reload_hook(micro_pool.shutdown) logging.info("pool intialized with %s processes", pool_size) return micro_pool def global_pool(): global _pool_ return _pool_ def register_pool(name, pool): resources.set_resource(name, pool) def has_micro_context(f): for k, v in inspect.signature(f).parameters.items(): if v.annotation == 'micro_context': return k def run_in_pool(_pid, _f, _has_context, context, *args, **kwargs): # globals from the parent process in the # IOLoop so clear them. subprocess = os.getpid() != _pid if subprocess and IOLoop.current(False): LOGGER.debug("clearing tornado globals") IOLoop.clear_current() IOLoop.clear_instance() LOGGER.debug("running %s %s", os.getpid(), context) if _has_context: kwargs[_has_context] = context result = _f(*args, **kwargs) if not subprocess: return result if isinstance(result, Future): LOGGER.debug('running up tornado to complete') def done(*args, **kwargs): LOGGER.debug('stopping tornado') IOLoop.current().stop() result.add_done_callback(done) IOLoop.current().start() result = result.result() return context, result def pool(_f, resource_name=None): has_context = has_micro_context(_f) @wraps(_f) def call(_f, context, *args, **kwargs): global _pool_ if resource_name: pool = resources.get_resource(resource_name) elif _pool_: pool = _pool_ if pool: result = pool.submit(run_in_pool, os.getpid(), _f, has_context, context, *args, **kwargs) else: if has_context: kwargs[has_context] = context result = _f(*args, **kwargs) return result return call
mit
mischief/systemd
.ycm_extra_conf.py
129
1551
import itertools import os import subprocess def GetFlagsFromMakefile(varname): return subprocess.check_output([ "make", "-s", "print-%s" % varname]).decode().split() def Flatten(lists): return list(itertools.chain.from_iterable(lists)) def DirectoryOfThisScript(): return os.path.dirname(os.path.abspath(__file__)) def MakeRelativePathsInFlagsAbsolute(flags, working_directory): if not working_directory: return flags new_flags = [] make_next_absolute = False path_flags = [ '-isystem', '-I', '-iquote', '--sysroot=' ] for flag in flags: new_flag = flag if make_next_absolute: make_next_absolute = False if not flag.startswith('/'): new_flag = os.path.join(working_directory, flag) for path_flag in path_flags: if flag == path_flag: make_next_absolute = True break if flag.startswith(path_flag): path = flag[ len(path_flag): ] new_flag = path_flag + os.path.join(working_directory, path) break if new_flag: new_flags.append(new_flag) return new_flags def FlagsForFile(filename): relative_to = DirectoryOfThisScript() return { 'flags': MakeRelativePathsInFlagsAbsolute(flags, relative_to), 'do_cache': True } flags = Flatten(map(GetFlagsFromMakefile, [ 'AM_CPPFLAGS', 'CPPFLAGS', 'AM_CFLAGS', 'CFLAGS', ])) # these flags cause crashes in libclang, so remove them flags.remove('-Wlogical-op') flags.remove('-Wsuggest-attribute=noreturn') flags.remove('-Wdate-time') # vim: set et ts=2 sw=2:
gpl-2.0
edry/edx-platform
common/djangoapps/third_party_auth/tests/test_views.py
42
5678
""" Test the views served by third_party_auth. """ # pylint: disable=no-member import ddt from lxml import etree from onelogin.saml2.errors import OneLogin_Saml2_Error import unittest from .testutil import AUTH_FEATURE_ENABLED, SAMLTestCase # Define some XML namespaces: from third_party_auth.tasks import SAML_XML_NS XMLDSIG_XML_NS = 'http://www.w3.org/2000/09/xmldsig#' @unittest.skipUnless(AUTH_FEATURE_ENABLED, 'third_party_auth not enabled') @ddt.ddt class SAMLMetadataTest(SAMLTestCase): """ Test the SAML metadata view """ METADATA_URL = '/auth/saml/metadata.xml' def test_saml_disabled(self): """ When SAML is not enabled, the metadata view should return 404 """ self.enable_saml(enabled=False) response = self.client.get(self.METADATA_URL) self.assertEqual(response.status_code, 404) def test_metadata(self): self.enable_saml() doc = self._fetch_metadata() # Check the ACS URL: acs_node = doc.find(".//{}".format(etree.QName(SAML_XML_NS, 'AssertionConsumerService'))) self.assertIsNotNone(acs_node) self.assertEqual(acs_node.attrib['Location'], 'http://example.none/auth/complete/tpa-saml/') def test_default_contact_info(self): self.enable_saml() self.check_metadata_contacts( xml=self._fetch_metadata(), tech_name="edX Support", tech_email="technical@example.com", support_name="edX Support", support_email="technical@example.com" ) def test_custom_contact_info(self): self.enable_saml( other_config_str=( '{' '"TECHNICAL_CONTACT": {"givenName": "Jane Tech", "emailAddress": "jane@example.com"},' '"SUPPORT_CONTACT": {"givenName": "Joe Support", "emailAddress": "joe@example.com"}' '}' ) ) self.check_metadata_contacts( xml=self._fetch_metadata(), tech_name="Jane Tech", tech_email="jane@example.com", support_name="Joe Support", support_email="joe@example.com" ) @ddt.data( # Test two slightly different key pair export formats ('saml_key', 'MIICsDCCAhmgAw'), ('saml_key_alt', 'MIICWDCCAcGgAw'), ) @ddt.unpack def test_signed_metadata(self, key_name, pub_key_starts_with): self.enable_saml( private_key=self._get_private_key(key_name), public_key=self._get_public_key(key_name), other_config_str='{"SECURITY_CONFIG": {"signMetadata": true} }', ) self._validate_signed_metadata(pub_key_starts_with=pub_key_starts_with) def test_secure_key_configuration(self): """ Test that the SAML private key can be stored in Django settings and not the DB """ self.enable_saml( public_key='', private_key='', other_config_str='{"SECURITY_CONFIG": {"signMetadata": true} }', ) with self.assertRaises(OneLogin_Saml2_Error): self._fetch_metadata() # OneLogin_Saml2_Error: Cannot sign metadata: missing SP private key. with self.settings( SOCIAL_AUTH_SAML_SP_PRIVATE_KEY=self._get_private_key('saml_key'), SOCIAL_AUTH_SAML_SP_PUBLIC_CERT=self._get_public_key('saml_key'), ): self._validate_signed_metadata() def _validate_signed_metadata(self, pub_key_starts_with='MIICsDCCAhmgAw'): """ Fetch the SAML metadata and do some validation """ doc = self._fetch_metadata() sig_node = doc.find(".//{}".format(etree.QName(XMLDSIG_XML_NS, 'SignatureValue'))) self.assertIsNotNone(sig_node) # Check that the right public key was used: pub_key_node = doc.find(".//{}".format(etree.QName(XMLDSIG_XML_NS, 'X509Certificate'))) self.assertIsNotNone(pub_key_node) self.assertIn(pub_key_starts_with, pub_key_node.text) def _fetch_metadata(self): """ Fetch and parse the metadata XML at self.METADATA_URL """ response = self.client.get(self.METADATA_URL) self.assertEqual(response.status_code, 200) self.assertEqual(response['Content-Type'], 'text/xml') # The result should be valid XML: try: metadata_doc = etree.fromstring(response.content) except etree.LxmlError: self.fail('SAML metadata must be valid XML') self.assertEqual(metadata_doc.tag, etree.QName(SAML_XML_NS, 'EntityDescriptor')) return metadata_doc def check_metadata_contacts(self, xml, tech_name, tech_email, support_name, support_email): """ Validate that the contact info in the metadata has the expected values """ technical_node = xml.find(".//{}[@contactType='technical']".format(etree.QName(SAML_XML_NS, 'ContactPerson'))) self.assertIsNotNone(technical_node) tech_name_node = technical_node.find(etree.QName(SAML_XML_NS, 'GivenName')) self.assertEqual(tech_name_node.text, tech_name) tech_email_node = technical_node.find(etree.QName(SAML_XML_NS, 'EmailAddress')) self.assertEqual(tech_email_node.text, tech_email) support_node = xml.find(".//{}[@contactType='support']".format(etree.QName(SAML_XML_NS, 'ContactPerson'))) self.assertIsNotNone(support_node) support_name_node = support_node.find(etree.QName(SAML_XML_NS, 'GivenName')) self.assertEqual(support_name_node.text, support_name) support_email_node = support_node.find(etree.QName(SAML_XML_NS, 'EmailAddress')) self.assertEqual(support_email_node.text, support_email)
agpl-3.0
beacloudgenius/edx-platform
lms/envs/static.py
100
2249
""" This config file runs the simplest dev environment using sqlite, and db-based sessions. Assumes structure: /envroot/ /db # This is where it'll write the database file /edx-platform # The location of this repo /log # Where we're going to write log files """ # We intentionally define lots of variables that aren't used, and # want to import all variables from base settings files # pylint: disable=wildcard-import, unused-wildcard-import from .common import * from openedx.core.lib.logsettings import get_logger_config STATIC_GRAB = True LOGGING = get_logger_config(ENV_ROOT / "log", logging_env="dev", tracking_filename="tracking.log", debug=False) DATABASES = { 'default': { 'ENGINE': 'django.db.backends.sqlite3', 'NAME': ENV_ROOT / "db" / "edx.db", } } CACHES = { # This is the cache used for most things. # In staging/prod envs, the sessions also live here. 'default': { 'BACKEND': 'django.core.cache.backends.locmem.LocMemCache', 'LOCATION': 'edx_loc_mem_cache', 'KEY_FUNCTION': 'util.memcache.safe_key', }, # The general cache is what you get if you use our util.cache. It's used for # things like caching the course.xml file for different A/B test groups. # We set it to be a DummyCache to force reloading of course.xml in dev. # In staging environments, we would grab VERSION from data uploaded by the # push process. 'general': { 'BACKEND': 'django.core.cache.backends.dummy.DummyCache', 'KEY_PREFIX': 'general', 'VERSION': 4, 'KEY_FUNCTION': 'util.memcache.safe_key', } } # Dummy secret key for dev SECRET_KEY = '85920908f28904ed733fe576320db18cabd7b6cd' ############################ FILE UPLOADS (for discussion forums) ############################# DEFAULT_FILE_STORAGE = 'django.core.files.storage.FileSystemStorage' MEDIA_ROOT = ENV_ROOT / "uploads" MEDIA_URL = "/discussion/upfiles/" FILE_UPLOAD_TEMP_DIR = ENV_ROOT / "uploads" FILE_UPLOAD_HANDLERS = ( 'django.core.files.uploadhandler.MemoryFileUploadHandler', 'django.core.files.uploadhandler.TemporaryFileUploadHandler', )
agpl-3.0
apexkid/Wikiapiary
migrations/0009.create-statistics-weekly.py
3
1389
""" This is a weekly aggregation table to collect raw statistics data into weekly summary information. This migration implements part of the manually created Apiary DB that WikiApiary launched with. """ from yoyo import step step( "CREATE TABLE `statistics_weekly` ( \ `website_id` int(11) NOT NULL, \ `website_date` date NOT NULL, \ `users_min` bigint(20) NOT NULL, \ `users_max` bigint(20) NOT NULL, \ `activeusers_max` bigint(20) NOT NULL, \ `admins_max` bigint(20) NOT NULL, \ `articles_min` bigint(20) NOT NULL, \ `articles_max` bigint(20) NOT NULL, \ `edits_min` bigint(20) NOT NULL, \ `edits_max` bigint(20) NOT NULL, \ `jobs_max` bigint(20) NOT NULL, \ `pages_min` bigint(20) NOT NULL, \ `pages_max` bigint(20) NOT NULL, \ `pages_last` bigint(20) NOT NULL, \ `views_min` bigint(20) NOT NULL, \ `views_max` bigint(20) NOT NULL, \ `smw_propcount_min` bigint(20) NOT NULL, \ `smw_propcount_max` bigint(20) NOT NULL, \ `smw_proppagecount_last` int(11) NOT NULL, \ `smw_usedpropcount_last` int(11) NOT NULL, \ `smw_declaredpropcount_last` int(11) NOT NULL, \ PRIMARY KEY (`website_id`,`website_date`) \ ) ENGINE=InnoDB DEFAULT CHARSET=utf8 ROW_FORMAT=COMPACT", "DROP TABLE `statistics_weekly`", )
gpl-2.0
espressofiend/NCIL-SOC-2015
PsychoPy/stroop_lastrun.py
1
14481
#!/usr/bin/env python2 # -*- coding: utf-8 -*- """ This experiment was created using PsychoPy2 Experiment Builder (v1.82.00), Mon Jun 22 22:53:33 2015 If you publish work using this script please cite the relevant PsychoPy publications Peirce, JW (2007) PsychoPy - Psychophysics software in Python. Journal of Neuroscience Methods, 162(1-2), 8-13. Peirce, JW (2009) Generating stimuli for neuroscience using PsychoPy. Frontiers in Neuroinformatics, 2:10. doi: 10.3389/neuro.11.010.2008 """ from __future__ import division # so that 1/3=0.333 instead of 1/3=0 from psychopy import visual, core, data, event, logging, sound, gui from psychopy.constants import * # things like STARTED, FINISHED import numpy as np # whole numpy lib is available, prepend 'np.' from numpy import sin, cos, tan, log, log10, pi, average, sqrt, std, deg2rad, rad2deg, linspace, asarray from numpy.random import random, randint, normal, shuffle import os # handy system and path functions # Ensure that relative paths start from the same directory as this script _thisDir = os.path.dirname(os.path.abspath(__file__)) os.chdir(_thisDir) # Store info about the experiment session expName = u'stroop' # from the Builder filename that created this script expInfo = {u'session': u'001', u'participant': u''} dlg = gui.DlgFromDict(dictionary=expInfo, title=expName) if dlg.OK == False: core.quit() # user pressed cancel expInfo['date'] = data.getDateStr() # add a simple timestamp expInfo['expName'] = expName # Data file name stem = absolute path + name; later add .psyexp, .csv, .log, etc filename = _thisDir + os.sep + 'data/%s_%s_%s' %(expInfo['participant'], expName, expInfo['date']) # An ExperimentHandler isn't essential but helps with data saving thisExp = data.ExperimentHandler(name=expName, version='', extraInfo=expInfo, runtimeInfo=None, originPath=u'/Users/aaron/Documents/GitHub/NCIL-SOC-2015/PsychoPy/stroop.psyexp', savePickle=True, saveWideText=True, dataFileName=filename) #save a log file for detail verbose info logFile = logging.LogFile(filename+'.log', level=logging.EXP) logging.console.setLevel(logging.WARNING) # this outputs to the screen, not a file endExpNow = False # flag for 'escape' or other condition => quit the exp # Start Code - component code to be run before the window creation # Setup the Window win = visual.Window(size=(2560, 1440), fullscr=True, screen=0, allowGUI=False, allowStencil=False, monitor=u'testMonitor', color=[0,0,0], colorSpace='rgb', blendMode='avg', useFBO=True, ) # store frame rate of monitor if we can measure it successfully expInfo['frameRate']=win.getActualFrameRate() if expInfo['frameRate']!=None: frameDur = 1.0/round(expInfo['frameRate']) else: frameDur = 1.0/60.0 # couldn't get a reliable measure so guess # Initialize components for Routine "instr" instrClock = core.Clock() instructionText = visual.TextStim(win=win, ori=0, name='instructionText', text=u'Press left arrow if colour matches word\n\nPress right arrow if colour does not match word\n\nPress either arrow to start.', font=u'Arial', pos=[0, 0], height=0.1, wrapWidth=None, color=u'white', colorSpace='rgb', opacity=1, depth=0.0) # Initialize components for Routine "trial" trialClock = core.Clock() ISI = core.StaticPeriod(win=win, screenHz=expInfo['frameRate'], name='ISI') text = visual.TextStim(win=win, ori=0, name='text', text=u'XXXXXX', font=u'Arial', pos=[0, 0], height=0.1, wrapWidth=None, color=u'white', colorSpace='rgb', opacity=1, depth=-1.0) text_2 = visual.TextStim(win=win, ori=0, name='text_2', text=u'+', font=u'Arial', pos=[0, 0], height=0.1, wrapWidth=None, color=u'blue', colorSpace='rgb', opacity=1, depth=-2.0) text_3 = visual.TextStim(win=win, ori=0, name='text_3', text='default text', font=u'Arial', pos=[0, 0], height=0.1, wrapWidth=None, color=1.0, colorSpace='rgb', opacity=1, depth=-3.0) # Create some handy timers globalClock = core.Clock() # to track the time since experiment started routineTimer = core.CountdownTimer() # to track time remaining of each (non-slip) routine #------Prepare to start Routine "instr"------- t = 0 instrClock.reset() # clock frameN = -1 # update component parameters for each repeat key_resp_3 = event.BuilderKeyResponse() # create an object of type KeyResponse key_resp_3.status = NOT_STARTED # keep track of which components have finished instrComponents = [] instrComponents.append(instructionText) instrComponents.append(key_resp_3) for thisComponent in instrComponents: if hasattr(thisComponent, 'status'): thisComponent.status = NOT_STARTED #-------Start Routine "instr"------- continueRoutine = True while continueRoutine: # get current time t = instrClock.getTime() frameN = frameN + 1 # number of completed frames (so 0 is the first frame) # update/draw components on each frame # *instructionText* updates if t >= 0.0 and instructionText.status == NOT_STARTED: # keep track of start time/frame for later instructionText.tStart = t # underestimates by a little under one frame instructionText.frameNStart = frameN # exact frame index instructionText.setAutoDraw(True) # *key_resp_3* updates if t >= 0.0 and key_resp_3.status == NOT_STARTED: # keep track of start time/frame for later key_resp_3.tStart = t # underestimates by a little under one frame key_resp_3.frameNStart = frameN # exact frame index key_resp_3.status = STARTED # keyboard checking is just starting key_resp_3.clock.reset() # now t=0 event.clearEvents(eventType='keyboard') if key_resp_3.status == STARTED: theseKeys = event.getKeys(keyList=['left', 'right']) # check for quit: if "escape" in theseKeys: endExpNow = True if len(theseKeys) > 0: # at least one key was pressed key_resp_3.keys = theseKeys[-1] # just the last key pressed key_resp_3.rt = key_resp_3.clock.getTime() # a response ends the routine continueRoutine = False # check if all components have finished if not continueRoutine: # a component has requested a forced-end of Routine routineTimer.reset() # if we abort early the non-slip timer needs reset break continueRoutine = False # will revert to True if at least one component still running for thisComponent in instrComponents: if hasattr(thisComponent, "status") and thisComponent.status != FINISHED: continueRoutine = True break # at least one component has not yet finished # check for quit (the Esc key) if endExpNow or event.getKeys(keyList=["escape"]): core.quit() # refresh the screen if continueRoutine: # don't flip if this routine is over or we'll get a blank screen win.flip() else: # this Routine was not non-slip safe so reset non-slip timer routineTimer.reset() #-------Ending Routine "instr"------- for thisComponent in instrComponents: if hasattr(thisComponent, "setAutoDraw"): thisComponent.setAutoDraw(False) # check responses if key_resp_3.keys in ['', [], None]: # No response was made key_resp_3.keys=None # store data for thisExp (ExperimentHandler) thisExp.addData('key_resp_3.keys',key_resp_3.keys) if key_resp_3.keys != None: # we had a response thisExp.addData('key_resp_3.rt', key_resp_3.rt) thisExp.nextEntry() # set up handler to look after randomisation of conditions etc trials = data.TrialHandler(nReps=1, method='random', extraInfo=expInfo, originPath=u'/Users/aaron/Documents/GitHub/NCIL-SOC-2015/PsychoPy/stroop.psyexp', trialList=data.importConditions(u'psychopy_playing_conditions.xlsx'), seed=666, name='trials') thisExp.addLoop(trials) # add the loop to the experiment thisTrial = trials.trialList[0] # so we can initialise stimuli with some values # abbreviate parameter names if possible (e.g. rgb=thisTrial.rgb) if thisTrial != None: for paramName in thisTrial.keys(): exec(paramName + '= thisTrial.' + paramName) for thisTrial in trials: currentLoop = trials # abbreviate parameter names if possible (e.g. rgb = thisTrial.rgb) if thisTrial != None: for paramName in thisTrial.keys(): exec(paramName + '= thisTrial.' + paramName) #------Prepare to start Routine "trial"------- t = 0 trialClock.reset() # clock frameN = -1 routineTimer.add(5.000000) # update component parameters for each repeat text_3.setColor(colour, colorSpace='rgb') text_3.setText(word) key_resp_2 = event.BuilderKeyResponse() # create an object of type KeyResponse key_resp_2.status = NOT_STARTED # keep track of which components have finished trialComponents = [] trialComponents.append(ISI) trialComponents.append(text) trialComponents.append(text_2) trialComponents.append(text_3) trialComponents.append(key_resp_2) for thisComponent in trialComponents: if hasattr(thisComponent, 'status'): thisComponent.status = NOT_STARTED #-------Start Routine "trial"------- continueRoutine = True while continueRoutine and routineTimer.getTime() > 0: # get current time t = trialClock.getTime() frameN = frameN + 1 # number of completed frames (so 0 is the first frame) # update/draw components on each frame # *text* updates if t >= 0.0 and text.status == NOT_STARTED: # keep track of start time/frame for later text.tStart = t # underestimates by a little under one frame text.frameNStart = frameN # exact frame index text.setAutoDraw(True) if text.status == STARTED and t >= (0.0 + (1.0-win.monitorFramePeriod*0.75)): #most of one frame period left text.setAutoDraw(False) # *text_2* updates if t >= 1.5 and text_2.status == NOT_STARTED: # keep track of start time/frame for later text_2.tStart = t # underestimates by a little under one frame text_2.frameNStart = frameN # exact frame index text_2.setAutoDraw(True) if text_2.status == STARTED and t >= (1.5 + (1.0-win.monitorFramePeriod*0.75)): #most of one frame period left text_2.setAutoDraw(False) # *text_3* updates if t >= 3 and text_3.status == NOT_STARTED: # keep track of start time/frame for later text_3.tStart = t # underestimates by a little under one frame text_3.frameNStart = frameN # exact frame index text_3.setAutoDraw(True) if text_3.status == STARTED and t >= (3 + (2-win.monitorFramePeriod*0.75)): #most of one frame period left text_3.setAutoDraw(False) # *key_resp_2* updates if t >= 3 and key_resp_2.status == NOT_STARTED: # keep track of start time/frame for later key_resp_2.tStart = t # underestimates by a little under one frame key_resp_2.frameNStart = frameN # exact frame index key_resp_2.status = STARTED # keyboard checking is just starting key_resp_2.clock.reset() # now t=0 event.clearEvents(eventType='keyboard') if key_resp_2.status == STARTED and t >= (3 + (2-win.monitorFramePeriod*0.75)): #most of one frame period left key_resp_2.status = STOPPED if key_resp_2.status == STARTED: theseKeys = event.getKeys(keyList=['left', 'right']) # check for quit: if "escape" in theseKeys: endExpNow = True if len(theseKeys) > 0: # at least one key was pressed key_resp_2.keys = theseKeys[-1] # just the last key pressed key_resp_2.rt = key_resp_2.clock.getTime() # was this 'correct'? if (key_resp_2.keys == str(corrAns)) or (key_resp_2.keys == corrAns): key_resp_2.corr = 1 else: key_resp_2.corr = 0 # a response ends the routine continueRoutine = False # *ISI* period if t >= 0.0 and ISI.status == NOT_STARTED: # keep track of start time/frame for later ISI.tStart = t # underestimates by a little under one frame ISI.frameNStart = frameN # exact frame index ISI.start(0.5) elif ISI.status == STARTED: #one frame should pass before updating params and completing ISI.complete() #finish the static period # check if all components have finished if not continueRoutine: # a component has requested a forced-end of Routine routineTimer.reset() # if we abort early the non-slip timer needs reset break continueRoutine = False # will revert to True if at least one component still running for thisComponent in trialComponents: if hasattr(thisComponent, "status") and thisComponent.status != FINISHED: continueRoutine = True break # at least one component has not yet finished # check for quit (the Esc key) if endExpNow or event.getKeys(keyList=["escape"]): core.quit() # refresh the screen if continueRoutine: # don't flip if this routine is over or we'll get a blank screen win.flip() #-------Ending Routine "trial"------- for thisComponent in trialComponents: if hasattr(thisComponent, "setAutoDraw"): thisComponent.setAutoDraw(False) # check responses if key_resp_2.keys in ['', [], None]: # No response was made key_resp_2.keys=None # was no response the correct answer?! if str(corrAns).lower() == 'none': key_resp_2.corr = 1 # correct non-response else: key_resp_2.corr = 0 # failed to respond (incorrectly) # store data for trials (TrialHandler) trials.addData('key_resp_2.keys',key_resp_2.keys) trials.addData('key_resp_2.corr', key_resp_2.corr) if key_resp_2.keys != None: # we had a response trials.addData('key_resp_2.rt', key_resp_2.rt) thisExp.nextEntry() # completed 1 repeats of 'trials' win.close() core.quit()
mit
XPRIZE/GLEXP-Team-SlideSpeech
appengine-try-python-flask-master/lib/werkzeug/debug/repr.py
313
9350
# -*- coding: utf-8 -*- """ werkzeug.debug.repr ~~~~~~~~~~~~~~~~~~~ This module implements object representations for debugging purposes. Unlike the default repr these reprs expose a lot more information and produce HTML instead of ASCII. Together with the CSS and JavaScript files of the debugger this gives a colorful and more compact output. :copyright: (c) 2013 by the Werkzeug Team, see AUTHORS for more details. :license: BSD. """ import sys import re import codecs from traceback import format_exception_only try: from collections import deque except ImportError: # pragma: no cover deque = None from werkzeug.utils import escape from werkzeug._compat import iteritems, PY2, text_type, integer_types, \ string_types missing = object() _paragraph_re = re.compile(r'(?:\r\n|\r|\n){2,}') RegexType = type(_paragraph_re) HELP_HTML = '''\ <div class=box> <h3>%(title)s</h3> <pre class=help>%(text)s</pre> </div>\ ''' OBJECT_DUMP_HTML = '''\ <div class=box> <h3>%(title)s</h3> %(repr)s <table>%(items)s</table> </div>\ ''' def debug_repr(obj): """Creates a debug repr of an object as HTML unicode string.""" return DebugReprGenerator().repr(obj) def dump(obj=missing): """Print the object details to stdout._write (for the interactive console of the web debugger. """ gen = DebugReprGenerator() if obj is missing: rv = gen.dump_locals(sys._getframe(1).f_locals) else: rv = gen.dump_object(obj) sys.stdout._write(rv) class _Helper(object): """Displays an HTML version of the normal help, for the interactive debugger only because it requires a patched sys.stdout. """ def __repr__(self): return 'Type help(object) for help about object.' def __call__(self, topic=None): if topic is None: sys.stdout._write('<span class=help>%s</span>' % repr(self)) return import pydoc pydoc.help(topic) rv = sys.stdout.reset() if isinstance(rv, bytes): rv = rv.decode('utf-8', 'ignore') paragraphs = _paragraph_re.split(rv) if len(paragraphs) > 1: title = paragraphs[0] text = '\n\n'.join(paragraphs[1:]) else: # pragma: no cover title = 'Help' text = paragraphs[0] sys.stdout._write(HELP_HTML % {'title': title, 'text': text}) helper = _Helper() def _add_subclass_info(inner, obj, base): if isinstance(base, tuple): for base in base: if type(obj) is base: return inner elif type(obj) is base: return inner module = '' if obj.__class__.__module__ not in ('__builtin__', 'exceptions'): module = '<span class="module">%s.</span>' % obj.__class__.__module__ return '%s%s(%s)' % (module, obj.__class__.__name__, inner) class DebugReprGenerator(object): def __init__(self): self._stack = [] def _sequence_repr_maker(left, right, base=object(), limit=8): def proxy(self, obj, recursive): if recursive: return _add_subclass_info(left + '...' + right, obj, base) buf = [left] have_extended_section = False for idx, item in enumerate(obj): if idx: buf.append(', ') if idx == limit: buf.append('<span class="extended">') have_extended_section = True buf.append(self.repr(item)) if have_extended_section: buf.append('</span>') buf.append(right) return _add_subclass_info(u''.join(buf), obj, base) return proxy list_repr = _sequence_repr_maker('[', ']', list) tuple_repr = _sequence_repr_maker('(', ')', tuple) set_repr = _sequence_repr_maker('set([', '])', set) frozenset_repr = _sequence_repr_maker('frozenset([', '])', frozenset) if deque is not None: deque_repr = _sequence_repr_maker('<span class="module">collections.' '</span>deque([', '])', deque) del _sequence_repr_maker def regex_repr(self, obj): pattern = repr(obj.pattern) if PY2: pattern = pattern.decode('string-escape', 'ignore') else: pattern = codecs.decode(pattern, 'unicode-escape', 'ignore') if pattern[:1] == 'u': pattern = 'ur' + pattern[1:] else: pattern = 'r' + pattern return u're.compile(<span class="string regex">%s</span>)' % pattern def string_repr(self, obj, limit=70): buf = ['<span class="string">'] escaped = escape(obj) a = repr(escaped[:limit]) b = repr(escaped[limit:]) if isinstance(obj, text_type) and PY2: buf.append('u') a = a[1:] b = b[1:] if b != "''": buf.extend((a[:-1], '<span class="extended">', b[1:], '</span>')) else: buf.append(a) buf.append('</span>') return _add_subclass_info(u''.join(buf), obj, (bytes, text_type)) def dict_repr(self, d, recursive, limit=5): if recursive: return _add_subclass_info(u'{...}', d, dict) buf = ['{'] have_extended_section = False for idx, (key, value) in enumerate(iteritems(d)): if idx: buf.append(', ') if idx == limit - 1: buf.append('<span class="extended">') have_extended_section = True buf.append('<span class="pair"><span class="key">%s</span>: ' '<span class="value">%s</span></span>' % (self.repr(key), self.repr(value))) if have_extended_section: buf.append('</span>') buf.append('}') return _add_subclass_info(u''.join(buf), d, dict) def object_repr(self, obj): r = repr(obj) if PY2: r = r.decode('utf-8', 'replace') return u'<span class="object">%s</span>' % escape(r) def dispatch_repr(self, obj, recursive): if obj is helper: return u'<span class="help">%r</span>' % helper if isinstance(obj, (integer_types, float, complex)): return u'<span class="number">%r</span>' % obj if isinstance(obj, string_types): return self.string_repr(obj) if isinstance(obj, RegexType): return self.regex_repr(obj) if isinstance(obj, list): return self.list_repr(obj, recursive) if isinstance(obj, tuple): return self.tuple_repr(obj, recursive) if isinstance(obj, set): return self.set_repr(obj, recursive) if isinstance(obj, frozenset): return self.frozenset_repr(obj, recursive) if isinstance(obj, dict): return self.dict_repr(obj, recursive) if deque is not None and isinstance(obj, deque): return self.deque_repr(obj, recursive) return self.object_repr(obj) def fallback_repr(self): try: info = ''.join(format_exception_only(*sys.exc_info()[:2])) except Exception: # pragma: no cover info = '?' if PY2: info = info.decode('utf-8', 'ignore') return u'<span class="brokenrepr">&lt;broken repr (%s)&gt;' \ u'</span>' % escape(info.strip()) def repr(self, obj): recursive = False for item in self._stack: if item is obj: recursive = True break self._stack.append(obj) try: try: return self.dispatch_repr(obj, recursive) except Exception: return self.fallback_repr() finally: self._stack.pop() def dump_object(self, obj): repr = items = None if isinstance(obj, dict): title = 'Contents of' items = [] for key, value in iteritems(obj): if not isinstance(key, string_types): items = None break items.append((key, self.repr(value))) if items is None: items = [] repr = self.repr(obj) for key in dir(obj): try: items.append((key, self.repr(getattr(obj, key)))) except Exception: pass title = 'Details for' title += ' ' + object.__repr__(obj)[1:-1] return self.render_object_dump(items, title, repr) def dump_locals(self, d): items = [(key, self.repr(value)) for key, value in d.items()] return self.render_object_dump(items, 'Local variables in frame') def render_object_dump(self, items, title, repr=None): html_items = [] for key, value in items: html_items.append('<tr><th>%s<td><pre class=repr>%s</pre>' % (escape(key), value)) if not html_items: html_items.append('<tr><td><em>Nothing</em>') return OBJECT_DUMP_HTML % { 'title': escape(title), 'repr': repr and '<pre class=repr>%s</pre>' % repr or '', 'items': '\n'.join(html_items) }
apache-2.0
dsturnbull/thrift
test/py.tornado/test_suite.py
10
6355
#!/usr/bin/env python # # Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. # import datetime import glob import sys import time import unittest sys.path.insert(0, './gen-py.tornado') sys.path.insert(0, glob.glob('../../lib/py/build/lib.*')[0]) try: __import__('tornado') except ImportError: print "module `tornado` not found, skipping test" sys.exit(0) from tornado import gen, ioloop, stack_context from tornado.testing import AsyncTestCase, get_unused_port from thrift import TTornado from thrift.protocol import TBinaryProtocol from ThriftTest import ThriftTest from ThriftTest.ttypes import * class TestHandler(object): def __init__(self, test_instance): self.test_instance = test_instance def testVoid(self, callback): callback() def testString(self, s, callback): callback(s) def testByte(self, b, callback): callback(b) def testI16(self, i16, callback): callback(i16) def testI32(self, i32, callback): callback(i32) def testI64(self, i64, callback): callback(i64) def testDouble(self, dub, callback): callback(dub) def testStruct(self, thing, callback): callback(thing) def testException(self, s, callback): if s == 'Xception': x = Xception() x.errorCode = 1001 x.message = s raise x elif s == 'throw_undeclared': raise ValueError("foo") callback() def testOneway(self, seconds, callback=None): start = time.time() def fire_oneway(): end = time.time() self.test_instance.stop((start, end, seconds)) ioloop.IOLoop.instance().add_timeout( datetime.timedelta(seconds=seconds), fire_oneway) if callback: callback() def testNest(self, thing, callback): callback(thing) def testMap(self, thing, callback): callback(thing) def testSet(self, thing, callback): callback(thing) def testList(self, thing, callback): callback(thing) def testEnum(self, thing, callback): callback(thing) def testTypedef(self, thing, callback): callback(thing) class ThriftTestCase(AsyncTestCase): def get_new_ioloop(self): return ioloop.IOLoop.instance() def setUp(self): self.port = get_unused_port() self.io_loop = self.get_new_ioloop() # server self.handler = TestHandler(self) self.processor = ThriftTest.Processor(self.handler) self.pfactory = TBinaryProtocol.TBinaryProtocolFactory() self.server = TTornado.TTornadoServer(self.processor, self.pfactory) self.server.bind(self.port) self.server.start(1) # client transport = TTornado.TTornadoStreamTransport('localhost', self.port) pfactory = TBinaryProtocol.TBinaryProtocolFactory() self.client = ThriftTest.Client(transport, pfactory) transport.open(callback=self.stop) self.wait(timeout=1) def test_void(self): self.client.testVoid(callback=self.stop) v = self.wait(timeout=1) self.assertEquals(v, None) def test_string(self): self.client.testString('Python', callback=self.stop) v = self.wait(timeout=1) self.assertEquals(v, 'Python') def test_byte(self): self.client.testByte(63, callback=self.stop) v = self.wait(timeout=1) self.assertEquals(v, 63) def test_i32(self): self.client.testI32(-1, callback=self.stop) v = self.wait(timeout=1) self.assertEquals(v, -1) self.client.testI32(0, callback=self.stop) v = self.wait(timeout=1) self.assertEquals(v, 0) def test_i64(self): self.client.testI64(-34359738368, callback=self.stop) v = self.wait(timeout=1) self.assertEquals(v, -34359738368) def test_double(self): self.client.testDouble(-5.235098235, callback=self.stop) v = self.wait(timeout=1) self.assertEquals(v, -5.235098235) def test_struct(self): x = Xtruct() x.string_thing = "Zero" x.byte_thing = 1 x.i32_thing = -3 x.i64_thing = -5 self.client.testStruct(x, callback=self.stop) y = self.wait(timeout=1) self.assertEquals(y.string_thing, "Zero") self.assertEquals(y.byte_thing, 1) self.assertEquals(y.i32_thing, -3) self.assertEquals(y.i64_thing, -5) def test_exception(self): self.client.testException('Safe', callback=self.stop) v = self.wait(timeout=1) self.client.testException('Xception', callback=self.stop) ex = self.wait(timeout=1) if type(ex) == Xception: self.assertEquals(ex.errorCode, 1001) self.assertEquals(ex.message, 'Xception') else: self.fail("should have gotten exception") def test_oneway(self): def return_from_send(): self.stop('done with send') self.client.testOneway(0.5, callback=return_from_send) self.assertEquals(self.wait(timeout=1), 'done with send') start, end, seconds = self.wait(timeout=1) self.assertAlmostEquals(seconds, (end - start), places=3) def suite(): suite = unittest.TestSuite() loader = unittest.TestLoader() suite.addTest(loader.loadTestsFromTestCase(ThriftTestCase)) return suite if __name__ == '__main__': unittest.TestProgram(defaultTest='suite', testRunner=unittest.TextTestRunner(verbosity=1))
apache-2.0
mollstam/UnrealPy
UnrealPyEmbed/Development/Python/2015.08.07-Python2710-x64-Source-vs2015/Python27/Source/django-1.8.2/tests/files/tests.py
24
9641
# -*- coding: utf-8 -*- from __future__ import unicode_literals import gzip import os import tempfile import unittest import zlib from io import BytesIO, StringIO from django.core.files import File from django.core.files.base import ContentFile from django.core.files.move import file_move_safe from django.core.files.temp import NamedTemporaryFile from django.core.files.uploadedfile import SimpleUploadedFile, UploadedFile from django.utils import six from django.utils._os import upath try: from PIL import Image except ImportError: Image = None else: from django.core.files import images class FileTests(unittest.TestCase): def test_unicode_uploadedfile_name(self): uf = UploadedFile(name='¿Cómo?', content_type='text') self.assertIs(type(repr(uf)), str) def test_unicode_file_name(self): f = File(None, 'djángö') self.assertIs(type(repr(f)), str) def test_context_manager(self): orig_file = tempfile.TemporaryFile() base_file = File(orig_file) with base_file as f: self.assertIs(base_file, f) self.assertFalse(f.closed) self.assertTrue(f.closed) self.assertTrue(orig_file.closed) def test_namedtemporaryfile_closes(self): """ The symbol django.core.files.NamedTemporaryFile is assigned as a different class on different operating systems. In any case, the result should minimally mock some of the API of tempfile.NamedTemporaryFile from the Python standard library. """ tempfile = NamedTemporaryFile() self.assertTrue(hasattr(tempfile, "closed")) self.assertFalse(tempfile.closed) tempfile.close() self.assertTrue(tempfile.closed) def test_file_mode(self): # Should not set mode to None if it is not present. # See #14681, stdlib gzip module crashes if mode is set to None file = SimpleUploadedFile("mode_test.txt", b"content") self.assertFalse(hasattr(file, 'mode')) gzip.GzipFile(fileobj=file) def test_file_iteration(self): """ File objects should yield lines when iterated over. Refs #22107. """ file = File(BytesIO(b'one\ntwo\nthree')) self.assertEqual(list(file), [b'one\n', b'two\n', b'three']) def test_file_iteration_windows_newlines(self): """ #8149 - File objects with \r\n line endings should yield lines when iterated over. """ f = File(BytesIO(b'one\r\ntwo\r\nthree')) self.assertEqual(list(f), [b'one\r\n', b'two\r\n', b'three']) def test_file_iteration_mac_newlines(self): """ #8149 - File objects with \r line endings should yield lines when iterated over. """ f = File(BytesIO(b'one\rtwo\rthree')) self.assertEqual(list(f), [b'one\r', b'two\r', b'three']) def test_file_iteration_mixed_newlines(self): f = File(BytesIO(b'one\rtwo\nthree\r\nfour')) self.assertEqual(list(f), [b'one\r', b'two\n', b'three\r\n', b'four']) def test_file_iteration_with_unix_newline_at_chunk_boundary(self): f = File(BytesIO(b'one\ntwo\nthree')) # Set chunk size to create a boundary after \n: # b'one\n... # ^ f.DEFAULT_CHUNK_SIZE = 4 self.assertEqual(list(f), [b'one\n', b'two\n', b'three']) def test_file_iteration_with_windows_newline_at_chunk_boundary(self): f = File(BytesIO(b'one\r\ntwo\r\nthree')) # Set chunk size to create a boundary between \r and \n: # b'one\r\n... # ^ f.DEFAULT_CHUNK_SIZE = 4 self.assertEqual(list(f), [b'one\r\n', b'two\r\n', b'three']) def test_file_iteration_with_mac_newline_at_chunk_boundary(self): f = File(BytesIO(b'one\rtwo\rthree')) # Set chunk size to create a boundary after \r: # b'one\r... # ^ f.DEFAULT_CHUNK_SIZE = 4 self.assertEqual(list(f), [b'one\r', b'two\r', b'three']) def test_file_iteration_with_text(self): f = File(StringIO('one\ntwo\nthree')) self.assertEqual(list(f), ['one\n', 'two\n', 'three']) class NoNameFileTestCase(unittest.TestCase): """ Other examples of unnamed files may be tempfile.SpooledTemporaryFile or urllib.urlopen() """ def test_noname_file_default_name(self): self.assertEqual(File(BytesIO(b'A file with no name')).name, None) def test_noname_file_get_size(self): self.assertEqual(File(BytesIO(b'A file with no name')).size, 19) class ContentFileTestCase(unittest.TestCase): def test_content_file_default_name(self): self.assertEqual(ContentFile(b"content").name, None) def test_content_file_custom_name(self): """ Test that the constructor of ContentFile accepts 'name' (#16590). """ name = "I can have a name too!" self.assertEqual(ContentFile(b"content", name=name).name, name) def test_content_file_input_type(self): """ Test that ContentFile can accept both bytes and unicode and that the retrieved content is of the same type. """ self.assertIsInstance(ContentFile(b"content").read(), bytes) if six.PY3: self.assertIsInstance(ContentFile("español").read(), six.text_type) else: self.assertIsInstance(ContentFile("español").read(), bytes) class DimensionClosingBug(unittest.TestCase): """ Test that get_image_dimensions() properly closes files (#8817) """ @unittest.skipUnless(Image, "Pillow not installed") def test_not_closing_of_files(self): """ Open files passed into get_image_dimensions() should stay opened. """ empty_io = BytesIO() try: images.get_image_dimensions(empty_io) finally: self.assertTrue(not empty_io.closed) @unittest.skipUnless(Image, "Pillow not installed") def test_closing_of_filenames(self): """ get_image_dimensions() called with a filename should closed the file. """ # We need to inject a modified open() builtin into the images module # that checks if the file was closed properly if the function is # called with a filename instead of an file object. # get_image_dimensions will call our catching_open instead of the # regular builtin one. class FileWrapper(object): _closed = [] def __init__(self, f): self.f = f def __getattr__(self, name): return getattr(self.f, name) def close(self): self._closed.append(True) self.f.close() def catching_open(*args): return FileWrapper(open(*args)) images.open = catching_open try: images.get_image_dimensions(os.path.join(os.path.dirname(upath(__file__)), "test1.png")) finally: del images.open self.assertTrue(FileWrapper._closed) class InconsistentGetImageDimensionsBug(unittest.TestCase): """ Test that get_image_dimensions() works properly after various calls using a file handler (#11158) """ @unittest.skipUnless(Image, "Pillow not installed") def test_multiple_calls(self): """ Multiple calls of get_image_dimensions() should return the same size. """ img_path = os.path.join(os.path.dirname(upath(__file__)), "test.png") with open(img_path, 'rb') as fh: image = images.ImageFile(fh) image_pil = Image.open(fh) size_1 = images.get_image_dimensions(image) size_2 = images.get_image_dimensions(image) self.assertEqual(image_pil.size, size_1) self.assertEqual(size_1, size_2) @unittest.skipUnless(Image, "Pillow not installed") def test_bug_19457(self): """ Regression test for #19457 get_image_dimensions fails on some pngs, while Image.size is working good on them """ img_path = os.path.join(os.path.dirname(upath(__file__)), "magic.png") try: size = images.get_image_dimensions(img_path) except zlib.error: self.fail("Exception raised from get_image_dimensions().") with open(img_path, 'rb') as fh: self.assertEqual(size, Image.open(fh).size) class FileMoveSafeTests(unittest.TestCase): def test_file_move_overwrite(self): handle_a, self.file_a = tempfile.mkstemp() handle_b, self.file_b = tempfile.mkstemp() # file_move_safe should raise an IOError exception if destination file exists and allow_overwrite is False self.assertRaises(IOError, lambda: file_move_safe(self.file_a, self.file_b, allow_overwrite=False)) # should allow it and continue on if allow_overwrite is True self.assertIsNone(file_move_safe(self.file_a, self.file_b, allow_overwrite=True)) os.close(handle_a) os.close(handle_b) class SpooledTempTests(unittest.TestCase): def test_in_memory_spooled_temp(self): with tempfile.SpooledTemporaryFile() as temp: temp.write(b"foo bar baz quux\n") django_file = File(temp, name="something.txt") self.assertEqual(django_file.size, 17) def test_written_spooled_temp(self): with tempfile.SpooledTemporaryFile(max_size=4) as temp: temp.write(b"foo bar baz quux\n") django_file = File(temp, name="something.txt") self.assertEqual(django_file.size, 17)
mit
google-research/graph-attribution
tests/test_graphnet_techniques.py
1
4088
# Copyright 2020 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # Lint as: python3 """Tests for graphnet_techniques.""" from absl.testing import absltest from absl.testing import parameterized import graph_nets import numpy as np import tensorflow as tf import experiments import featurization import graphnet_models as models import graphnet_techniques as techniques import graphs as graph_utils import templates class AttributionTechniquesTests(parameterized.TestCase): """Test attribution interface correctness.""" def _setup_graphs_model(self): """Setup graphs and smiles if needed.""" tensorizer = featurization.MolTensorizer() smiles = ['CO', 'CCC', 'CN1C=NC2=C1C(=O)N(C(=O)N2C)C'] graphs = graph_utils.smiles_to_graphs_tuple(smiles, tensorizer) # Fix seed so that initialization is deterministic. tf.random.set_seed(0) model = experiments.GNN(5, 3, 10, 1, models.BlockType('gcn'), 'relu', templates.TargetType.globals, 3) model(graphs) return graphs, model, tensorizer def _setup_technique(self, name, tensorizer): """Setup attribution techniques.""" methods = techniques.get_techniques_dict(*tensorizer.get_null_vectors()) return methods[name] def assertAttribution(self, graphs, atts): atts = graph_nets.utils_tf.concat(atts, axis=0) self.assertEqual(atts.nodes.ndim, 1) self.assertEqual(atts.edges.ndim, 1) self.assertEqual(graphs.nodes.shape[0], atts.nodes.shape[0]) self.assertEqual(graphs.edges.shape[0], atts.edges.shape[0]) np.testing.assert_allclose(graphs.n_node, atts.n_node) np.testing.assert_allclose(graphs.n_edge, atts.n_edge) np.testing.assert_allclose(graphs.senders, atts.senders) np.testing.assert_allclose(graphs.receivers, atts.receivers) @parameterized.parameters([ 'Random', 'CAM', 'GradCAM-last', 'GradCAM-all', 'GradInput', 'SmoothGrad(GradInput)', 'IG' ]) def test_attribute(self, method_name): """Check we can attribute.""" graphs, model, tensorizer = self._setup_graphs_model() method = self._setup_technique(method_name, tensorizer) atts = method.attribute(graphs, model) self.assertAttribution(graphs, atts) @parameterized.parameters( ['CAM', 'GradCAM-last', 'GradCAM-all', 'GradInput', 'IG']) def test_attribute_independence(self, method_name): """Check that atts are the same batched and non-batched.""" graphs, model, tensorizer = self._setup_graphs_model() method = self._setup_technique(method_name, tensorizer) atts = method.attribute(graphs, model) single_graphs = graph_utils.split_graphs_tuple(graphs) for xi, actual in zip(single_graphs, atts): expected = method.attribute(xi, model) np.testing.assert_allclose(actual.nodes, expected[0].nodes, rtol=1e-2) np.testing.assert_allclose(actual.edges, expected[0].edges, rtol=1e-2) self.assertAttribution(xi, expected) def test_ig_sanity_check(self): """Check that IG improves with more integration steps.""" graphs, model, tensorizer = self._setup_graphs_model() ref_fn = techniques.make_reference_fn(*tensorizer.get_null_vectors()) method_25 = techniques.IntegratedGradients(25, ref_fn) method_100 = techniques.IntegratedGradients(100, ref_fn) error_25 = method_25.sanity_check(graphs, model)['ig_error'].mean() error_100 = method_100.sanity_check(graphs, model)['ig_error'].mean() self.assertLessEqual(error_100, error_25) if __name__ == '__main__': tf.config.experimental_run_functions_eagerly(True) absltest.main()
apache-2.0
Big-B702/python-for-android
python-modules/twisted/twisted/conch/interfaces.py
61
12993
# Copyright (c) 2007-2008 Twisted Matrix Laboratories. # See LICENSE for details. """ This module contains interfaces defined for the L{twisted.conch} package. """ from zope.interface import Interface, Attribute class IConchUser(Interface): """ A user who has been authenticated to Cred through Conch. This is the interface between the SSH connection and the user. """ conn = Attribute('The SSHConnection object for this user.') def lookupChannel(channelType, windowSize, maxPacket, data): """ The other side requested a channel of some sort. channelType is the type of channel being requested, windowSize is the initial size of the remote window, maxPacket is the largest packet we should send, data is any other packet data (often nothing). We return a subclass of L{SSHChannel<ssh.channel.SSHChannel>}. If an appropriate channel can not be found, an exception will be raised. If a L{ConchError<error.ConchError>} is raised, the .value will be the message, and the .data will be the error code. @type channelType: C{str} @type windowSize: C{int} @type maxPacket: C{int} @type data: C{str} @rtype: subclass of L{SSHChannel}/C{tuple} """ def lookupSubsystem(subsystem, data): """ The other side requested a subsystem. subsystem is the name of the subsystem being requested. data is any other packet data (often nothing). We return a L{Protocol}. """ def gotGlobalRequest(requestType, data): """ A global request was sent from the other side. By default, this dispatches to a method 'channel_channelType' with any non-alphanumerics in the channelType replace with _'s. If it cannot find a suitable method, it returns an OPEN_UNKNOWN_CHANNEL_TYPE error. The method is called with arguments of windowSize, maxPacket, data. """ class ISession(Interface): def getPty(term, windowSize, modes): """ Get a psuedo-terminal for use by a shell or command. If a psuedo-terminal is not available, or the request otherwise fails, raise an exception. """ def openShell(proto): """ Open a shell and connect it to proto. @param proto: a L{ProcessProtocol} instance. """ def execCommand(proto, command): """ Execute a command. @param proto: a L{ProcessProtocol} instance. """ def windowChanged(newWindowSize): """ Called when the size of the remote screen has changed. """ def eofReceived(): """ Called when the other side has indicated no more data will be sent. """ def closed(): """ Called when the session is closed. """ class ISFTPServer(Interface): """ The only attribute of this class is "avatar". It is the avatar returned by the Realm that we are authenticated with, and represents the logged-in user. Each method should check to verify that the user has permission for their actions. """ def gotVersion(otherVersion, extData): """ Called when the client sends their version info. otherVersion is an integer representing the version of the SFTP protocol they are claiming. extData is a dictionary of extended_name : extended_data items. These items are sent by the client to indicate additional features. This method should return a dictionary of extended_name : extended_data items. These items are the additional features (if any) supported by the server. """ return {} def openFile(filename, flags, attrs): """ Called when the clients asks to open a file. @param filename: a string representing the file to open. @param flags: an integer of the flags to open the file with, ORed together. The flags and their values are listed at the bottom of this file. @param attrs: a list of attributes to open the file with. It is a dictionary, consisting of 0 or more keys. The possible keys are:: size: the size of the file in bytes uid: the user ID of the file as an integer gid: the group ID of the file as an integer permissions: the permissions of the file with as an integer. the bit representation of this field is defined by POSIX. atime: the access time of the file as seconds since the epoch. mtime: the modification time of the file as seconds since the epoch. ext_*: extended attributes. The server is not required to understand this, but it may. NOTE: there is no way to indicate text or binary files. it is up to the SFTP client to deal with this. This method returns an object that meets the ISFTPFile interface. Alternatively, it can return a L{Deferred} that will be called back with the object. """ def removeFile(filename): """ Remove the given file. This method returns when the remove succeeds, or a Deferred that is called back when it succeeds. @param filename: the name of the file as a string. """ def renameFile(oldpath, newpath): """ Rename the given file. This method returns when the rename succeeds, or a L{Deferred} that is called back when it succeeds. If the rename fails, C{renameFile} will raise an implementation-dependent exception. @param oldpath: the current location of the file. @param newpath: the new file name. """ def makeDirectory(path, attrs): """ Make a directory. This method returns when the directory is created, or a Deferred that is called back when it is created. @param path: the name of the directory to create as a string. @param attrs: a dictionary of attributes to create the directory with. Its meaning is the same as the attrs in the L{openFile} method. """ def removeDirectory(path): """ Remove a directory (non-recursively) It is an error to remove a directory that has files or directories in it. This method returns when the directory is removed, or a Deferred that is called back when it is removed. @param path: the directory to remove. """ def openDirectory(path): """ Open a directory for scanning. This method returns an iterable object that has a close() method, or a Deferred that is called back with same. The close() method is called when the client is finished reading from the directory. At this point, the iterable will no longer be used. The iterable should return triples of the form (filename, longname, attrs) or Deferreds that return the same. The sequence must support __getitem__, but otherwise may be any 'sequence-like' object. filename is the name of the file relative to the directory. logname is an expanded format of the filename. The recommended format is: -rwxr-xr-x 1 mjos staff 348911 Mar 25 14:29 t-filexfer 1234567890 123 12345678 12345678 12345678 123456789012 The first line is sample output, the second is the length of the field. The fields are: permissions, link count, user owner, group owner, size in bytes, modification time. attrs is a dictionary in the format of the attrs argument to openFile. @param path: the directory to open. """ def getAttrs(path, followLinks): """ Return the attributes for the given path. This method returns a dictionary in the same format as the attrs argument to openFile or a Deferred that is called back with same. @param path: the path to return attributes for as a string. @param followLinks: a boolean. If it is True, follow symbolic links and return attributes for the real path at the base. If it is False, return attributes for the specified path. """ def setAttrs(path, attrs): """ Set the attributes for the path. This method returns when the attributes are set or a Deferred that is called back when they are. @param path: the path to set attributes for as a string. @param attrs: a dictionary in the same format as the attrs argument to L{openFile}. """ def readLink(path): """ Find the root of a set of symbolic links. This method returns the target of the link, or a Deferred that returns the same. @param path: the path of the symlink to read. """ def makeLink(linkPath, targetPath): """ Create a symbolic link. This method returns when the link is made, or a Deferred that returns the same. @param linkPath: the pathname of the symlink as a string. @param targetPath: the path of the target of the link as a string. """ def realPath(path): """ Convert any path to an absolute path. This method returns the absolute path as a string, or a Deferred that returns the same. @param path: the path to convert as a string. """ def extendedRequest(extendedName, extendedData): """ This is the extension mechanism for SFTP. The other side can send us arbitrary requests. If we don't implement the request given by extendedName, raise NotImplementedError. The return value is a string, or a Deferred that will be called back with a string. @param extendedName: the name of the request as a string. @param extendedData: the data the other side sent with the request, as a string. """ class IKnownHostEntry(Interface): """ A L{IKnownHostEntry} is an entry in an OpenSSH-formatted C{known_hosts} file. @since: 8.2 """ def matchesKey(key): """ Return True if this entry matches the given Key object, False otherwise. @param key: The key object to match against. @type key: L{twisted.conch.ssh.Key} """ def matchesHost(hostname): """ Return True if this entry matches the given hostname, False otherwise. Note that this does no name resolution; if you want to match an IP address, you have to resolve it yourself, and pass it in as a dotted quad string. @param key: The hostname to match against. @type key: L{str} """ def toString(): """ @return: a serialized string representation of this entry, suitable for inclusion in a known_hosts file. (Newline not included.) @rtype: L{str} """ class ISFTPFile(Interface): """ This represents an open file on the server. An object adhering to this interface should be returned from L{openFile}(). """ def close(): """ Close the file. This method returns nothing if the close succeeds immediately, or a Deferred that is called back when the close succeeds. """ def readChunk(offset, length): """ Read from the file. If EOF is reached before any data is read, raise EOFError. This method returns the data as a string, or a Deferred that is called back with same. @param offset: an integer that is the index to start from in the file. @param length: the maximum length of data to return. The actual amount returned may less than this. For normal disk files, however, this should read the requested number (up to the end of the file). """ def writeChunk(offset, data): """ Write to the file. This method returns when the write completes, or a Deferred that is called when it completes. @param offset: an integer that is the index to start from in the file. @param data: a string that is the data to write. """ def getAttrs(): """ Return the attributes for the file. This method returns a dictionary in the same format as the attrs argument to L{openFile} or a L{Deferred} that is called back with same. """ def setAttrs(attrs): """ Set the attributes for the file. This method returns when the attributes are set or a Deferred that is called back when they are. @param attrs: a dictionary in the same format as the attrs argument to L{openFile}. """
apache-2.0
SauloAislan/ironic
ironic/common/keystone.py
1
4019
# coding=utf-8 # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Central place for handling Keystone authorization and service lookup.""" from keystoneauth1 import exceptions as kaexception from keystoneauth1 import loading as kaloading from oslo_log import log as logging import six from ironic.common import exception from ironic.conf import CONF LOG = logging.getLogger(__name__) def ks_exceptions(f): """Wraps keystoneclient functions and centralizes exception handling.""" @six.wraps(f) def wrapper(*args, **kwargs): try: return f(*args, **kwargs) except kaexception.EndpointNotFound: service_type = kwargs.get('service_type', 'baremetal') endpoint_type = kwargs.get('endpoint_type', 'internal') raise exception.CatalogNotFound( service_type=service_type, endpoint_type=endpoint_type) except (kaexception.Unauthorized, kaexception.AuthorizationFailure): raise exception.KeystoneUnauthorized() except (kaexception.NoMatchingPlugin, kaexception.MissingRequiredOptions) as e: raise exception.ConfigInvalid(six.text_type(e)) except Exception as e: LOG.exception('Keystone request failed: %(msg)s', {'msg': six.text_type(e)}) raise exception.KeystoneFailure(six.text_type(e)) return wrapper @ks_exceptions def get_session(group, **session_kwargs): """Loads session object from options in a configuration file section. The session_kwargs will be passed directly to keystoneauth1 Session and will override the values loaded from config. Consult keystoneauth1 docs for available options. :param group: name of the config section to load session options from """ return kaloading.load_session_from_conf_options( CONF, group, **session_kwargs) @ks_exceptions def get_auth(group, **auth_kwargs): """Loads auth plugin from options in a configuration file section. The auth_kwargs will be passed directly to keystoneauth1 auth plugin and will override the values loaded from config. Note that the accepted kwargs will depend on auth plugin type as defined by [group]auth_type option. Consult keystoneauth1 docs for available auth plugins and their options. :param group: name of the config section to load auth plugin options from """ try: auth = kaloading.load_auth_from_conf_options(CONF, group, **auth_kwargs) except kaexception.MissingRequiredOptions: LOG.error('Failed to load auth plugin from group %s', group) raise return auth # NOTE(pas-ha) Used by neutronclient and resolving ironic API only # FIXME(pas-ha) remove this while moving to kesytoneauth adapters @ks_exceptions def get_service_url(session, **kwargs): """Find endpoint for given service in keystone catalog. If 'interrace' is provided, fetches service url of this interface. Otherwise, first tries to fetch 'internal' endpoint, and then the 'public' one. :param session: keystoneauth Session object :param kwargs: any other arguments accepted by Session.get_endpoint method """ if 'interface' in kwargs: return session.get_endpoint(**kwargs) try: return session.get_endpoint(interface='internal', **kwargs) except kaexception.EndpointNotFound: return session.get_endpoint(interface='public', **kwargs)
apache-2.0
BT-fgarbely/odoo
addons/l10n_fr/wizard/__init__.py
424
1462
# -*- coding: utf-8 -*- ############################################################################## # # Copyright (c) 2008 JAILLET Simon - CrysaLEAD - www.crysalead.fr # # WARNING: This program as such is intended to be used by professional # programmers who take the whole responsability of assessing all potential # consequences resulting from its eventual inadequacies and bugs # End users who are looking for a ready-to-use solution with commercial # garantees and support are strongly adviced to contract a Free Software # Service Company # # This program is Free Software; you can redistribute it and/or # modify it under the terms of the GNU General Public License # as published by the Free Software Foundation; either version 2 # of the License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA # ############################################################################## import fr_report_bilan import fr_report_compute_resultant # vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
agpl-3.0
srajag/nova
nova/tests/api/openstack/compute/test_extensions.py
12
29594
# Copyright (c) 2011 X.commerce, a business unit of eBay Inc. # Copyright 2011 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import iso8601 from lxml import etree from oslo.config import cfg import webob from nova.api.openstack import compute from nova.api.openstack.compute import extensions as compute_extensions from nova.api.openstack import extensions as base_extensions from nova.api.openstack import wsgi from nova.api.openstack import xmlutil from nova import exception from nova.openstack.common import jsonutils import nova.policy from nova import test from nova.tests.api.openstack import fakes from nova.tests import matchers CONF = cfg.CONF NS = "{http://docs.openstack.org/common/api/v1.0}" ATOMNS = "{http://www.w3.org/2005/Atom}" response_body = "Try to say this Mr. Knox, sir..." extension_body = "I am not a fox!" class StubController(object): def __init__(self, body): self.body = body def index(self, req): return self.body def create(self, req, body): msg = 'All aboard the fail train!' raise webob.exc.HTTPBadRequest(explanation=msg) def show(self, req, id): raise webob.exc.HTTPNotFound() class StubActionController(wsgi.Controller): def __init__(self, body): self.body = body @wsgi.action('fooAction') def _action_foo(self, req, id, body): return self.body class StubControllerExtension(base_extensions.ExtensionDescriptor): name = 'twaadle' def __init__(self): pass class StubEarlyExtensionController(wsgi.Controller): def __init__(self, body): self.body = body @wsgi.extends def index(self, req): yield self.body @wsgi.extends(action='fooAction') def _action_foo(self, req, id, body): yield self.body class StubLateExtensionController(wsgi.Controller): def __init__(self, body): self.body = body @wsgi.extends def index(self, req, resp_obj): return self.body @wsgi.extends(action='fooAction') def _action_foo(self, req, resp_obj, id, body): return self.body class StubExtensionManager(object): """Provides access to Tweedle Beetles.""" name = "Tweedle Beetle Extension" alias = "TWDLBETL" def __init__(self, resource_ext=None, action_ext=None, request_ext=None, controller_ext=None): self.resource_ext = resource_ext self.action_ext = action_ext self.request_ext = request_ext self.controller_ext = controller_ext self.extra_resource_ext = None def get_resources(self): resource_exts = [] if self.resource_ext: resource_exts.append(self.resource_ext) if self.extra_resource_ext: resource_exts.append(self.extra_resource_ext) return resource_exts def get_actions(self): action_exts = [] if self.action_ext: action_exts.append(self.action_ext) return action_exts def get_request_extensions(self): request_extensions = [] if self.request_ext: request_extensions.append(self.request_ext) return request_extensions def get_controller_extensions(self): controller_extensions = [] if self.controller_ext: controller_extensions.append(self.controller_ext) return controller_extensions class ExtensionTestCase(test.TestCase): def setUp(self): super(ExtensionTestCase, self).setUp() ext_list = CONF.osapi_compute_extension[:] fox = ('nova.tests.api.openstack.compute.extensions.' 'foxinsocks.Foxinsocks') if fox not in ext_list: ext_list.append(fox) self.flags(osapi_compute_extension=ext_list) self.fake_context = nova.context.RequestContext('fake', 'fake') def test_extension_authorizer_throws_exception_if_policy_fails(self): target = {'project_id': '1234', 'user_id': '5678'} self.mox.StubOutWithMock(nova.policy, 'enforce') nova.policy.enforce(self.fake_context, "compute_extension:used_limits_for_admin", target).AndRaise( exception.PolicyNotAuthorized( action="compute_extension:used_limits_for_admin")) self.mox.ReplayAll() authorize = base_extensions.extension_authorizer('compute', 'used_limits_for_admin' ) self.assertRaises(exception.PolicyNotAuthorized, authorize, self.fake_context, target=target) def test_core_authorizer_throws_exception_if_policy_fails(self): target = {'project_id': '1234', 'user_id': '5678'} self.mox.StubOutWithMock(nova.policy, 'enforce') nova.policy.enforce(self.fake_context, "compute:used_limits_for_admin", target).AndRaise( exception.PolicyNotAuthorized( action="compute:used_limits_for_admin")) self.mox.ReplayAll() authorize = base_extensions.core_authorizer('compute', 'used_limits_for_admin' ) self.assertRaises(exception.PolicyNotAuthorized, authorize, self.fake_context, target=target) class ExtensionControllerTest(ExtensionTestCase): def setUp(self): super(ExtensionControllerTest, self).setUp() self.ext_list = [ "AdminActions", "Aggregates", "AssistedVolumeSnapshots", "AvailabilityZone", "Agents", "Certificates", "Cloudpipe", "CloudpipeUpdate", "ConsoleOutput", "Consoles", "Createserverext", "DeferredDelete", "DiskConfig", "ExtendedAvailabilityZone", "ExtendedFloatingIps", "ExtendedIps", "ExtendedIpsMac", "ExtendedVIFNet", "Evacuate", "ExtendedStatus", "ExtendedVolumes", "ExtendedServerAttributes", "FixedIPs", "FlavorAccess", "FlavorDisabled", "FlavorExtraSpecs", "FlavorExtraData", "FlavorManage", "FlavorRxtx", "FlavorSwap", "FloatingIps", "FloatingIpDns", "FloatingIpPools", "FloatingIpsBulk", "Fox In Socks", "Hosts", "ImageSize", "InstanceActions", "Keypairs", "Multinic", "MultipleCreate", "QuotaClasses", "Quotas", "ExtendedQuotas", "Rescue", "SchedulerHints", "SecurityGroupDefaultRules", "SecurityGroups", "ServerDiagnostics", "ServerListMultiStatus", "ServerPassword", "ServerStartStop", "Services", "SimpleTenantUsage", "UsedLimits", "UserData", "VirtualInterfaces", "VolumeAttachmentUpdate", "Volumes", ] self.ext_list.sort() def test_list_extensions_json(self): app = compute.APIRouter(init_only=('extensions',)) request = webob.Request.blank("/fake/extensions") response = request.get_response(app) self.assertEqual(200, response.status_int) # Make sure we have all the extensions, extra extensions being OK. data = jsonutils.loads(response.body) names = [str(x['name']) for x in data['extensions'] if str(x['name']) in self.ext_list] names.sort() self.assertEqual(names, self.ext_list) # Ensure all the timestamps are valid according to iso8601 for ext in data['extensions']: iso8601.parse_date(ext['updated']) # Make sure that at least Fox in Sox is correct. (fox_ext, ) = [ x for x in data['extensions'] if x['alias'] == 'FOXNSOX'] self.assertEqual(fox_ext, { 'namespace': 'http://www.fox.in.socks/api/ext/pie/v1.0', 'name': 'Fox In Socks', 'updated': '2011-01-22T13:25:27-06:00', 'description': 'The Fox In Socks Extension.', 'alias': 'FOXNSOX', 'links': [] }, ) for ext in data['extensions']: url = '/fake/extensions/%s' % ext['alias'] request = webob.Request.blank(url) response = request.get_response(app) output = jsonutils.loads(response.body) self.assertEqual(output['extension']['alias'], ext['alias']) def test_get_extension_json(self): app = compute.APIRouter(init_only=('extensions',)) request = webob.Request.blank("/fake/extensions/FOXNSOX") response = request.get_response(app) self.assertEqual(200, response.status_int) data = jsonutils.loads(response.body) self.assertEqual(data['extension'], { "namespace": "http://www.fox.in.socks/api/ext/pie/v1.0", "name": "Fox In Socks", "updated": "2011-01-22T13:25:27-06:00", "description": "The Fox In Socks Extension.", "alias": "FOXNSOX", "links": []}) def test_get_non_existing_extension_json(self): app = compute.APIRouter(init_only=('extensions',)) request = webob.Request.blank("/fake/extensions/4") response = request.get_response(app) self.assertEqual(404, response.status_int) def test_list_extensions_xml(self): app = compute.APIRouter(init_only=('servers', 'flavors', 'extensions')) request = webob.Request.blank("/fake/extensions") request.accept = "application/xml" response = request.get_response(app) self.assertEqual(200, response.status_int) root = etree.XML(response.body) self.assertEqual(root.tag.split('extensions')[0], NS) # Make sure we have all the extensions, extras extensions being OK. exts = root.findall('{0}extension'.format(NS)) self.assertTrue(len(exts) >= len(self.ext_list)) # Make sure that at least Fox in Sox is correct. (fox_ext, ) = [x for x in exts if x.get('alias') == 'FOXNSOX'] self.assertEqual(fox_ext.get('name'), 'Fox In Socks') self.assertEqual(fox_ext.get('namespace'), 'http://www.fox.in.socks/api/ext/pie/v1.0') self.assertEqual(fox_ext.get('updated'), '2011-01-22T13:25:27-06:00') self.assertEqual(fox_ext.findtext('{0}description'.format(NS)), 'The Fox In Socks Extension.') xmlutil.validate_schema(root, 'extensions') def test_get_extension_xml(self): app = compute.APIRouter(init_only=('servers', 'flavors', 'extensions')) request = webob.Request.blank("/fake/extensions/FOXNSOX") request.accept = "application/xml" response = request.get_response(app) self.assertEqual(200, response.status_int) xml = response.body root = etree.XML(xml) self.assertEqual(root.tag.split('extension')[0], NS) self.assertEqual(root.get('alias'), 'FOXNSOX') self.assertEqual(root.get('name'), 'Fox In Socks') self.assertEqual(root.get('namespace'), 'http://www.fox.in.socks/api/ext/pie/v1.0') self.assertEqual(root.get('updated'), '2011-01-22T13:25:27-06:00') self.assertEqual(root.findtext('{0}description'.format(NS)), 'The Fox In Socks Extension.') xmlutil.validate_schema(root, 'extension') class ResourceExtensionTest(ExtensionTestCase): def test_no_extension_present(self): manager = StubExtensionManager(None) app = compute.APIRouter(manager) request = webob.Request.blank("/blah") response = request.get_response(app) self.assertEqual(404, response.status_int) def test_get_resources(self): res_ext = base_extensions.ResourceExtension('tweedles', StubController(response_body)) manager = StubExtensionManager(res_ext) app = compute.APIRouter(manager) request = webob.Request.blank("/fake/tweedles") response = request.get_response(app) self.assertEqual(200, response.status_int) self.assertEqual(response_body, response.body) def test_get_resources_with_controller(self): res_ext = base_extensions.ResourceExtension('tweedles', StubController(response_body)) manager = StubExtensionManager(res_ext) app = compute.APIRouter(manager) request = webob.Request.blank("/fake/tweedles") response = request.get_response(app) self.assertEqual(200, response.status_int) self.assertEqual(response_body, response.body) def test_bad_request(self): res_ext = base_extensions.ResourceExtension('tweedles', StubController(response_body)) manager = StubExtensionManager(res_ext) app = compute.APIRouter(manager) request = webob.Request.blank("/fake/tweedles") request.method = "POST" response = request.get_response(app) self.assertEqual(400, response.status_int) self.assertEqual('application/json', response.content_type) body = jsonutils.loads(response.body) expected = { "badRequest": { "message": "All aboard the fail train!", "code": 400 } } self.assertThat(expected, matchers.DictMatches(body)) def test_non_exist_resource(self): res_ext = base_extensions.ResourceExtension('tweedles', StubController(response_body)) manager = StubExtensionManager(res_ext) app = compute.APIRouter(manager) request = webob.Request.blank("/fake/tweedles/1") response = request.get_response(app) self.assertEqual(404, response.status_int) self.assertEqual('application/json', response.content_type) body = jsonutils.loads(response.body) expected = { "itemNotFound": { "message": "The resource could not be found.", "code": 404 } } self.assertThat(expected, matchers.DictMatches(body)) class InvalidExtension(object): alias = "THIRD" class ExtensionManagerTest(ExtensionTestCase): response_body = "Try to say this Mr. Knox, sir..." def test_get_resources(self): app = compute.APIRouter() request = webob.Request.blank("/fake/foxnsocks") response = request.get_response(app) self.assertEqual(200, response.status_int) self.assertEqual(response_body, response.body) def test_invalid_extensions(self): # Don't need the serialization middleware here because we're # not testing any serialization compute.APIRouter() ext_mgr = compute_extensions.ExtensionManager() ext_mgr.register(InvalidExtension()) self.assertTrue(ext_mgr.is_loaded('FOXNSOX')) self.assertFalse(ext_mgr.is_loaded('THIRD')) class ActionExtensionTest(ExtensionTestCase): def _send_server_action_request(self, url, body): app = compute.APIRouter(init_only=('servers',)) request = webob.Request.blank(url) request.method = 'POST' request.content_type = 'application/json' request.body = jsonutils.dumps(body) response = request.get_response(app) return response def test_extended_action(self): body = dict(add_tweedle=dict(name="test")) url = "/fake/servers/abcd/action" response = self._send_server_action_request(url, body) self.assertEqual(200, response.status_int) self.assertEqual("Tweedle Beetle Added.", response.body) body = dict(delete_tweedle=dict(name="test")) response = self._send_server_action_request(url, body) self.assertEqual(200, response.status_int) self.assertEqual("Tweedle Beetle Deleted.", response.body) def test_invalid_action(self): body = dict(blah=dict(name="test")) # Doesn't exist url = "/fake/servers/abcd/action" response = self._send_server_action_request(url, body) self.assertEqual(400, response.status_int) self.assertEqual('application/json', response.content_type) body = jsonutils.loads(response.body) expected = { "badRequest": { "message": "There is no such action: blah", "code": 400 } } self.assertThat(expected, matchers.DictMatches(body)) def test_non_exist_action(self): body = dict(blah=dict(name="test")) url = "/fake/fdsa/1/action" response = self._send_server_action_request(url, body) self.assertEqual(404, response.status_int) def test_failed_action(self): body = dict(fail=dict(name="test")) url = "/fake/servers/abcd/action" response = self._send_server_action_request(url, body) self.assertEqual(400, response.status_int) self.assertEqual('application/json', response.content_type) body = jsonutils.loads(response.body) expected = { "badRequest": { "message": "Tweedle fail", "code": 400 } } self.assertThat(expected, matchers.DictMatches(body)) class RequestExtensionTest(ExtensionTestCase): def test_get_resources_with_stub_mgr(self): class GooGoose(wsgi.Controller): @wsgi.extends def show(self, req, resp_obj, id): # only handle JSON responses resp_obj.obj['flavor']['googoose'] = req.GET.get('chewing') req_ext = base_extensions.ControllerExtension( StubControllerExtension(), 'flavors', GooGoose()) manager = StubExtensionManager(None, None, None, req_ext) app = fakes.wsgi_app(ext_mgr=manager) request = webob.Request.blank("/v2/fake/flavors/1?chewing=bluegoo") request.environ['api.version'] = '2' response = request.get_response(app) self.assertEqual(200, response.status_int) response_data = jsonutils.loads(response.body) self.assertEqual('bluegoo', response_data['flavor']['googoose']) def test_get_resources_with_mgr(self): app = fakes.wsgi_app(init_only=('flavors',)) request = webob.Request.blank("/v2/fake/flavors/1?chewing=newblue") request.environ['api.version'] = '2' response = request.get_response(app) self.assertEqual(200, response.status_int) response_data = jsonutils.loads(response.body) self.assertEqual('newblue', response_data['flavor']['googoose']) self.assertEqual("Pig Bands!", response_data['big_bands']) class ControllerExtensionTest(ExtensionTestCase): def test_controller_extension_early(self): controller = StubController(response_body) res_ext = base_extensions.ResourceExtension('tweedles', controller) ext_controller = StubEarlyExtensionController(extension_body) extension = StubControllerExtension() cont_ext = base_extensions.ControllerExtension(extension, 'tweedles', ext_controller) manager = StubExtensionManager(resource_ext=res_ext, controller_ext=cont_ext) app = compute.APIRouter(manager) request = webob.Request.blank("/fake/tweedles") response = request.get_response(app) self.assertEqual(200, response.status_int) self.assertEqual(extension_body, response.body) def test_controller_extension_late(self): # Need a dict for the body to convert to a ResponseObject controller = StubController(dict(foo=response_body)) res_ext = base_extensions.ResourceExtension('tweedles', controller) ext_controller = StubLateExtensionController(extension_body) extension = StubControllerExtension() cont_ext = base_extensions.ControllerExtension(extension, 'tweedles', ext_controller) manager = StubExtensionManager(resource_ext=res_ext, controller_ext=cont_ext) app = compute.APIRouter(manager) request = webob.Request.blank("/fake/tweedles") response = request.get_response(app) self.assertEqual(200, response.status_int) self.assertEqual(extension_body, response.body) def test_controller_extension_late_inherited_resource(self): # Need a dict for the body to convert to a ResponseObject controller = StubController(dict(foo=response_body)) parent_ext = base_extensions.ResourceExtension('tweedles', controller) ext_controller = StubLateExtensionController(extension_body) extension = StubControllerExtension() cont_ext = base_extensions.ControllerExtension(extension, 'tweedles', ext_controller) manager = StubExtensionManager(resource_ext=parent_ext, controller_ext=cont_ext) child_ext = base_extensions.ResourceExtension('beetles', controller, inherits='tweedles') manager.extra_resource_ext = child_ext app = compute.APIRouter(manager) request = webob.Request.blank("/fake/beetles") response = request.get_response(app) self.assertEqual(200, response.status_int) self.assertEqual(extension_body, response.body) def test_controller_action_extension_early(self): controller = StubActionController(response_body) actions = dict(action='POST') res_ext = base_extensions.ResourceExtension('tweedles', controller, member_actions=actions) ext_controller = StubEarlyExtensionController(extension_body) extension = StubControllerExtension() cont_ext = base_extensions.ControllerExtension(extension, 'tweedles', ext_controller) manager = StubExtensionManager(resource_ext=res_ext, controller_ext=cont_ext) app = compute.APIRouter(manager) request = webob.Request.blank("/fake/tweedles/foo/action") request.method = 'POST' request.headers['Content-Type'] = 'application/json' request.body = jsonutils.dumps(dict(fooAction=True)) response = request.get_response(app) self.assertEqual(200, response.status_int) self.assertEqual(extension_body, response.body) def test_controller_action_extension_late(self): # Need a dict for the body to convert to a ResponseObject controller = StubActionController(dict(foo=response_body)) actions = dict(action='POST') res_ext = base_extensions.ResourceExtension('tweedles', controller, member_actions=actions) ext_controller = StubLateExtensionController(extension_body) extension = StubControllerExtension() cont_ext = base_extensions.ControllerExtension(extension, 'tweedles', ext_controller) manager = StubExtensionManager(resource_ext=res_ext, controller_ext=cont_ext) app = compute.APIRouter(manager) request = webob.Request.blank("/fake/tweedles/foo/action") request.method = 'POST' request.headers['Content-Type'] = 'application/json' request.body = jsonutils.dumps(dict(fooAction=True)) response = request.get_response(app) self.assertEqual(200, response.status_int) self.assertEqual(extension_body, response.body) class ExtensionsXMLSerializerTest(test.TestCase): def test_serialize_extension(self): serializer = base_extensions.ExtensionTemplate() data = {'extension': { 'name': 'ext1', 'namespace': 'http://docs.rack.com/servers/api/ext/pie/v1.0', 'alias': 'RS-PIE', 'updated': '2011-01-22T13:25:27-06:00', 'description': 'Adds the capability to share an image.', 'links': [{'rel': 'describedby', 'type': 'application/pdf', 'href': 'http://docs.rack.com/servers/api/ext/cs.pdf'}, {'rel': 'describedby', 'type': 'application/vnd.sun.wadl+xml', 'href': 'http://docs.rack.com/servers/api/ext/cs.wadl'}]}} xml = serializer.serialize(data) root = etree.XML(xml) ext_dict = data['extension'] self.assertEqual(root.findtext('{0}description'.format(NS)), ext_dict['description']) for key in ['name', 'namespace', 'alias', 'updated']: self.assertEqual(root.get(key), ext_dict[key]) link_nodes = root.findall('{0}link'.format(ATOMNS)) self.assertEqual(len(link_nodes), 2) for i, link in enumerate(ext_dict['links']): for key, value in link.items(): self.assertEqual(link_nodes[i].get(key), value) xmlutil.validate_schema(root, 'extension') def test_serialize_extensions(self): serializer = base_extensions.ExtensionsTemplate() data = {"extensions": [{ "name": "Public Image Extension", "namespace": "http://foo.com/api/ext/pie/v1.0", "alias": "RS-PIE", "updated": "2011-01-22T13:25:27-06:00", "description": "Adds the capability to share an image.", "links": [{"rel": "describedby", "type": "application/pdf", "href": "http://foo.com/api/ext/cs-pie.pdf"}, {"rel": "describedby", "type": "application/vnd.sun.wadl+xml", "href": "http://foo.com/api/ext/cs-pie.wadl"}]}, {"name": "Cloud Block Storage", "namespace": "http://foo.com/api/ext/cbs/v1.0", "alias": "RS-CBS", "updated": "2011-01-12T11:22:33-06:00", "description": "Allows mounting cloud block storage.", "links": [{"rel": "describedby", "type": "application/pdf", "href": "http://foo.com/api/ext/cs-cbs.pdf"}, {"rel": "describedby", "type": "application/vnd.sun.wadl+xml", "href": "http://foo.com/api/ext/cs-cbs.wadl"}]}]} xml = serializer.serialize(data) root = etree.XML(xml) ext_elems = root.findall('{0}extension'.format(NS)) self.assertEqual(len(ext_elems), 2) for i, ext_elem in enumerate(ext_elems): ext_dict = data['extensions'][i] self.assertEqual(ext_elem.findtext('{0}description'.format(NS)), ext_dict['description']) for key in ['name', 'namespace', 'alias', 'updated']: self.assertEqual(ext_elem.get(key), ext_dict[key]) link_nodes = ext_elem.findall('{0}link'.format(ATOMNS)) self.assertEqual(len(link_nodes), 2) for i, link in enumerate(ext_dict['links']): for key, value in link.items(): self.assertEqual(link_nodes[i].get(key), value) xmlutil.validate_schema(root, 'extensions') class ExtensionControllerIdFormatTest(test.TestCase): def _bounce_id(self, test_id): class BounceController(object): def show(self, req, id): return id res_ext = base_extensions.ResourceExtension('bounce', BounceController()) manager = StubExtensionManager(res_ext) app = compute.APIRouter(manager) request = webob.Request.blank("/fake/bounce/%s" % test_id) response = request.get_response(app) return response.body def test_id_with_xml_format(self): result = self._bounce_id('foo.xml') self.assertEqual(result, 'foo') def test_id_with_json_format(self): result = self._bounce_id('foo.json') self.assertEqual(result, 'foo') def test_id_with_bad_format(self): result = self._bounce_id('foo.bad') self.assertEqual(result, 'foo.bad')
apache-2.0
azukov/py-orbit
py/orbit/errors/__init__.py
2
1880
## \namespace orbit::errors ## \brief The classes and functions for errors ## ## Classes: ## ErrorNode - error node for TEAPOT lattices ## ## Functions: ## addErrorNode - function to add one error ## node to the lattice from orbit.errors.ErrorNode import coorddisplacement from orbit.errors.ErrorNode import longdisplacement from orbit.errors.ErrorNode import straightrotationxy from orbit.errors.ErrorNode import straightrotationxsi from orbit.errors.ErrorNode import straightrotationxsf from orbit.errors.ErrorNode import straightrotationysi from orbit.errors.ErrorNode import straightrotationysf from orbit.errors.ErrorNode import bendfieldi from orbit.errors.ErrorNode import bendfieldf from orbit.errors.ErrorNode import benddisplacementxi from orbit.errors.ErrorNode import benddisplacementxf from orbit.errors.ErrorNode import benddisplacementyi from orbit.errors.ErrorNode import benddisplacementyf from orbit.errors.ErrorNode import benddisplacementli from orbit.errors.ErrorNode import benddisplacementlf from orbit.errors.ErrorNode import rotationi from orbit.errors.ErrorNode import rotationf from orbit.errors.ErrorNode import dipolekicker from orbit.errors.ErrorNode import dipolekickerosc from orbit.errors.ErrorNode import quadkicker from orbit.errors.ErrorNode import quadkickerosc from orbit.errors.ErrorNode import AddErrorNode from orbit.errors.ErrorNode import AddErrorSet from orbit.errors.ErrorLatticeModifications import addErrorNode from orbit.errors.ErrorLatticeModifications import addErrorNodeAsChild from orbit.errors.ErrorLatticeModifications import addErrorNodeAsChild_I from orbit.errors.ErrorLatticeModifications import addErrorNodeAsChild_F __all__ = [] __all__.append("") __all__.append("addErrorNode") __all__.append("addErrorNodeAsChild") __all__.append("addErrorNodeAsChild_I") __all__.append("addErrorNodeAsChild_F")
mit
lavvy/osmc
package/mediacenter-skin-osmc/files/usr/share/kodi/addons/script.module.unidecode/lib/unidecode/x0ca.py
253
5007
data = ( 'jjael', # 0x00 'jjaelg', # 0x01 'jjaelm', # 0x02 'jjaelb', # 0x03 'jjaels', # 0x04 'jjaelt', # 0x05 'jjaelp', # 0x06 'jjaelh', # 0x07 'jjaem', # 0x08 'jjaeb', # 0x09 'jjaebs', # 0x0a 'jjaes', # 0x0b 'jjaess', # 0x0c 'jjaeng', # 0x0d 'jjaej', # 0x0e 'jjaec', # 0x0f 'jjaek', # 0x10 'jjaet', # 0x11 'jjaep', # 0x12 'jjaeh', # 0x13 'jjya', # 0x14 'jjyag', # 0x15 'jjyagg', # 0x16 'jjyags', # 0x17 'jjyan', # 0x18 'jjyanj', # 0x19 'jjyanh', # 0x1a 'jjyad', # 0x1b 'jjyal', # 0x1c 'jjyalg', # 0x1d 'jjyalm', # 0x1e 'jjyalb', # 0x1f 'jjyals', # 0x20 'jjyalt', # 0x21 'jjyalp', # 0x22 'jjyalh', # 0x23 'jjyam', # 0x24 'jjyab', # 0x25 'jjyabs', # 0x26 'jjyas', # 0x27 'jjyass', # 0x28 'jjyang', # 0x29 'jjyaj', # 0x2a 'jjyac', # 0x2b 'jjyak', # 0x2c 'jjyat', # 0x2d 'jjyap', # 0x2e 'jjyah', # 0x2f 'jjyae', # 0x30 'jjyaeg', # 0x31 'jjyaegg', # 0x32 'jjyaegs', # 0x33 'jjyaen', # 0x34 'jjyaenj', # 0x35 'jjyaenh', # 0x36 'jjyaed', # 0x37 'jjyael', # 0x38 'jjyaelg', # 0x39 'jjyaelm', # 0x3a 'jjyaelb', # 0x3b 'jjyaels', # 0x3c 'jjyaelt', # 0x3d 'jjyaelp', # 0x3e 'jjyaelh', # 0x3f 'jjyaem', # 0x40 'jjyaeb', # 0x41 'jjyaebs', # 0x42 'jjyaes', # 0x43 'jjyaess', # 0x44 'jjyaeng', # 0x45 'jjyaej', # 0x46 'jjyaec', # 0x47 'jjyaek', # 0x48 'jjyaet', # 0x49 'jjyaep', # 0x4a 'jjyaeh', # 0x4b 'jjeo', # 0x4c 'jjeog', # 0x4d 'jjeogg', # 0x4e 'jjeogs', # 0x4f 'jjeon', # 0x50 'jjeonj', # 0x51 'jjeonh', # 0x52 'jjeod', # 0x53 'jjeol', # 0x54 'jjeolg', # 0x55 'jjeolm', # 0x56 'jjeolb', # 0x57 'jjeols', # 0x58 'jjeolt', # 0x59 'jjeolp', # 0x5a 'jjeolh', # 0x5b 'jjeom', # 0x5c 'jjeob', # 0x5d 'jjeobs', # 0x5e 'jjeos', # 0x5f 'jjeoss', # 0x60 'jjeong', # 0x61 'jjeoj', # 0x62 'jjeoc', # 0x63 'jjeok', # 0x64 'jjeot', # 0x65 'jjeop', # 0x66 'jjeoh', # 0x67 'jje', # 0x68 'jjeg', # 0x69 'jjegg', # 0x6a 'jjegs', # 0x6b 'jjen', # 0x6c 'jjenj', # 0x6d 'jjenh', # 0x6e 'jjed', # 0x6f 'jjel', # 0x70 'jjelg', # 0x71 'jjelm', # 0x72 'jjelb', # 0x73 'jjels', # 0x74 'jjelt', # 0x75 'jjelp', # 0x76 'jjelh', # 0x77 'jjem', # 0x78 'jjeb', # 0x79 'jjebs', # 0x7a 'jjes', # 0x7b 'jjess', # 0x7c 'jjeng', # 0x7d 'jjej', # 0x7e 'jjec', # 0x7f 'jjek', # 0x80 'jjet', # 0x81 'jjep', # 0x82 'jjeh', # 0x83 'jjyeo', # 0x84 'jjyeog', # 0x85 'jjyeogg', # 0x86 'jjyeogs', # 0x87 'jjyeon', # 0x88 'jjyeonj', # 0x89 'jjyeonh', # 0x8a 'jjyeod', # 0x8b 'jjyeol', # 0x8c 'jjyeolg', # 0x8d 'jjyeolm', # 0x8e 'jjyeolb', # 0x8f 'jjyeols', # 0x90 'jjyeolt', # 0x91 'jjyeolp', # 0x92 'jjyeolh', # 0x93 'jjyeom', # 0x94 'jjyeob', # 0x95 'jjyeobs', # 0x96 'jjyeos', # 0x97 'jjyeoss', # 0x98 'jjyeong', # 0x99 'jjyeoj', # 0x9a 'jjyeoc', # 0x9b 'jjyeok', # 0x9c 'jjyeot', # 0x9d 'jjyeop', # 0x9e 'jjyeoh', # 0x9f 'jjye', # 0xa0 'jjyeg', # 0xa1 'jjyegg', # 0xa2 'jjyegs', # 0xa3 'jjyen', # 0xa4 'jjyenj', # 0xa5 'jjyenh', # 0xa6 'jjyed', # 0xa7 'jjyel', # 0xa8 'jjyelg', # 0xa9 'jjyelm', # 0xaa 'jjyelb', # 0xab 'jjyels', # 0xac 'jjyelt', # 0xad 'jjyelp', # 0xae 'jjyelh', # 0xaf 'jjyem', # 0xb0 'jjyeb', # 0xb1 'jjyebs', # 0xb2 'jjyes', # 0xb3 'jjyess', # 0xb4 'jjyeng', # 0xb5 'jjyej', # 0xb6 'jjyec', # 0xb7 'jjyek', # 0xb8 'jjyet', # 0xb9 'jjyep', # 0xba 'jjyeh', # 0xbb 'jjo', # 0xbc 'jjog', # 0xbd 'jjogg', # 0xbe 'jjogs', # 0xbf 'jjon', # 0xc0 'jjonj', # 0xc1 'jjonh', # 0xc2 'jjod', # 0xc3 'jjol', # 0xc4 'jjolg', # 0xc5 'jjolm', # 0xc6 'jjolb', # 0xc7 'jjols', # 0xc8 'jjolt', # 0xc9 'jjolp', # 0xca 'jjolh', # 0xcb 'jjom', # 0xcc 'jjob', # 0xcd 'jjobs', # 0xce 'jjos', # 0xcf 'jjoss', # 0xd0 'jjong', # 0xd1 'jjoj', # 0xd2 'jjoc', # 0xd3 'jjok', # 0xd4 'jjot', # 0xd5 'jjop', # 0xd6 'jjoh', # 0xd7 'jjwa', # 0xd8 'jjwag', # 0xd9 'jjwagg', # 0xda 'jjwags', # 0xdb 'jjwan', # 0xdc 'jjwanj', # 0xdd 'jjwanh', # 0xde 'jjwad', # 0xdf 'jjwal', # 0xe0 'jjwalg', # 0xe1 'jjwalm', # 0xe2 'jjwalb', # 0xe3 'jjwals', # 0xe4 'jjwalt', # 0xe5 'jjwalp', # 0xe6 'jjwalh', # 0xe7 'jjwam', # 0xe8 'jjwab', # 0xe9 'jjwabs', # 0xea 'jjwas', # 0xeb 'jjwass', # 0xec 'jjwang', # 0xed 'jjwaj', # 0xee 'jjwac', # 0xef 'jjwak', # 0xf0 'jjwat', # 0xf1 'jjwap', # 0xf2 'jjwah', # 0xf3 'jjwae', # 0xf4 'jjwaeg', # 0xf5 'jjwaegg', # 0xf6 'jjwaegs', # 0xf7 'jjwaen', # 0xf8 'jjwaenj', # 0xf9 'jjwaenh', # 0xfa 'jjwaed', # 0xfb 'jjwael', # 0xfc 'jjwaelg', # 0xfd 'jjwaelm', # 0xfe 'jjwaelb', # 0xff )
gpl-2.0
googleapis/googleapis-gen
google/cloud/dialogflow/cx/v3beta1/dialogflow-cx-v3beta1-py/docs/conf.py
2
12524
# -*- coding: utf-8 -*- # Copyright 2020 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # # google-cloud-dialogflowcx documentation build configuration file # # This file is execfile()d with the current directory set to its # containing dir. # # Note that not all possible configuration values are present in this # autogenerated file. # # All configuration values have a default; values that are commented out # serve to show the default. import sys import os import shlex # If extensions (or modules to document with autodoc) are in another directory, # add these directories to sys.path here. If the directory is relative to the # documentation root, use os.path.abspath to make it absolute, like shown here. sys.path.insert(0, os.path.abspath("..")) __version__ = "0.1.0" # -- General configuration ------------------------------------------------ # If your documentation needs a minimal Sphinx version, state it here. needs_sphinx = "1.6.3" # Add any Sphinx extension module names here, as strings. They can be # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom # ones. extensions = [ "sphinx.ext.autodoc", "sphinx.ext.autosummary", "sphinx.ext.intersphinx", "sphinx.ext.coverage", "sphinx.ext.napoleon", "sphinx.ext.todo", "sphinx.ext.viewcode", ] # autodoc/autosummary flags autoclass_content = "both" autodoc_default_flags = ["members"] autosummary_generate = True # Add any paths that contain templates here, relative to this directory. templates_path = ["_templates"] # Allow markdown includes (so releases.md can include CHANGLEOG.md) # http://www.sphinx-doc.org/en/master/markdown.html source_parsers = {".md": "recommonmark.parser.CommonMarkParser"} # The suffix(es) of source filenames. # You can specify multiple suffix as a list of string: source_suffix = [".rst", ".md"] # The encoding of source files. # source_encoding = 'utf-8-sig' # The master toctree document. master_doc = "index" # General information about the project. project = u"google-cloud-dialogflowcx" copyright = u"2020, Google, LLC" author = u"Google APIs" # TODO: autogenerate this bit # The version info for the project you're documenting, acts as replacement for # |version| and |release|, also used in various other places throughout the # built documents. # # The full version, including alpha/beta/rc tags. release = __version__ # The short X.Y version. version = ".".join(release.split(".")[0:2]) # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. # # This is also used if you do content translation via gettext catalogs. # Usually you set "language" from the command line for these cases. language = None # There are two options for replacing |today|: either, you set today to some # non-false value, then it is used: # today = '' # Else, today_fmt is used as the format for a strftime call. # today_fmt = '%B %d, %Y' # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. exclude_patterns = ["_build"] # The reST default role (used for this markup: `text`) to use for all # documents. # default_role = None # If true, '()' will be appended to :func: etc. cross-reference text. # add_function_parentheses = True # If true, the current module name will be prepended to all description # unit titles (such as .. function::). # add_module_names = True # If true, sectionauthor and moduleauthor directives will be shown in the # output. They are ignored by default. # show_authors = False # The name of the Pygments (syntax highlighting) style to use. pygments_style = "sphinx" # A list of ignored prefixes for module index sorting. # modindex_common_prefix = [] # If true, keep warnings as "system message" paragraphs in the built documents. # keep_warnings = False # If true, `todo` and `todoList` produce output, else they produce nothing. todo_include_todos = True # -- Options for HTML output ---------------------------------------------- # The theme to use for HTML and HTML Help pages. See the documentation for # a list of builtin themes. html_theme = "alabaster" # Theme options are theme-specific and customize the look and feel of a theme # further. For a list of options available for each theme, see the # documentation. html_theme_options = { "description": "Google Cloud Client Libraries for Python", "github_user": "googleapis", "github_repo": "google-cloud-python", "github_banner": True, "font_family": "'Roboto', Georgia, sans", "head_font_family": "'Roboto', Georgia, serif", "code_font_family": "'Roboto Mono', 'Consolas', monospace", } # Add any paths that contain custom themes here, relative to this directory. # html_theme_path = [] # The name for this set of Sphinx documents. If None, it defaults to # "<project> v<release> documentation". # html_title = None # A shorter title for the navigation bar. Default is the same as html_title. # html_short_title = None # The name of an image file (relative to this directory) to place at the top # of the sidebar. # html_logo = None # The name of an image file (within the static path) to use as favicon of the # docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 # pixels large. # html_favicon = None # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". html_static_path = ["_static"] # Add any extra paths that contain custom files (such as robots.txt or # .htaccess) here, relative to this directory. These files are copied # directly to the root of the documentation. # html_extra_path = [] # If not '', a 'Last updated on:' timestamp is inserted at every page bottom, # using the given strftime format. # html_last_updated_fmt = '%b %d, %Y' # If true, SmartyPants will be used to convert quotes and dashes to # typographically correct entities. # html_use_smartypants = True # Custom sidebar templates, maps document names to template names. # html_sidebars = {} # Additional templates that should be rendered to pages, maps page names to # template names. # html_additional_pages = {} # If false, no module index is generated. # html_domain_indices = True # If false, no index is generated. # html_use_index = True # If true, the index is split into individual pages for each letter. # html_split_index = False # If true, links to the reST sources are added to the pages. # html_show_sourcelink = True # If true, "Created using Sphinx" is shown in the HTML footer. Default is True. # html_show_sphinx = True # If true, "(C) Copyright ..." is shown in the HTML footer. Default is True. # html_show_copyright = True # If true, an OpenSearch description file will be output, and all pages will # contain a <link> tag referring to it. The value of this option must be the # base URL from which the finished HTML is served. # html_use_opensearch = '' # This is the file name suffix for HTML files (e.g. ".xhtml"). # html_file_suffix = None # Language to be used for generating the HTML full-text search index. # Sphinx supports the following languages: # 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja' # 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr' # html_search_language = 'en' # A dictionary with options for the search language support, empty by default. # Now only 'ja' uses this config value # html_search_options = {'type': 'default'} # The name of a javascript file (relative to the configuration directory) that # implements a search results scorer. If empty, the default will be used. # html_search_scorer = 'scorer.js' # Output file base name for HTML help builder. htmlhelp_basename = "google-cloud-dialogflowcx-doc" # -- Options for warnings ------------------------------------------------------ suppress_warnings = [ # Temporarily suppress this to avoid "more than one target found for # cross-reference" warning, which are intractable for us to avoid while in # a mono-repo. # See https://github.com/sphinx-doc/sphinx/blob # /2a65ffeef5c107c19084fabdd706cdff3f52d93c/sphinx/domains/python.py#L843 "ref.python" ] # -- Options for LaTeX output --------------------------------------------- latex_elements = { # The paper size ('letterpaper' or 'a4paper'). # 'papersize': 'letterpaper', # The font size ('10pt', '11pt' or '12pt'). # 'pointsize': '10pt', # Additional stuff for the LaTeX preamble. # 'preamble': '', # Latex figure (float) alignment # 'figure_align': 'htbp', } # Grouping the document tree into LaTeX files. List of tuples # (source start file, target name, title, # author, documentclass [howto, manual, or own class]). latex_documents = [ ( master_doc, "google-cloud-dialogflowcx.tex", u"google-cloud-dialogflowcx Documentation", author, "manual", ) ] # The name of an image file (relative to this directory) to place at the top of # the title page. # latex_logo = None # For "manual" documents, if this is true, then toplevel headings are parts, # not chapters. # latex_use_parts = False # If true, show page references after internal links. # latex_show_pagerefs = False # If true, show URL addresses after external links. # latex_show_urls = False # Documents to append as an appendix to all manuals. # latex_appendices = [] # If false, no module index is generated. # latex_domain_indices = True # -- Options for manual page output --------------------------------------- # One entry per manual page. List of tuples # (source start file, name, description, authors, manual section). man_pages = [ ( master_doc, "google-cloud-dialogflowcx", u"Google Cloud Dialogflowcx Documentation", [author], 1, ) ] # If true, show URL addresses after external links. # man_show_urls = False # -- Options for Texinfo output ------------------------------------------- # Grouping the document tree into Texinfo files. List of tuples # (source start file, target name, title, author, # dir menu entry, description, category) texinfo_documents = [ ( master_doc, "google-cloud-dialogflowcx", u"google-cloud-dialogflowcx Documentation", author, "google-cloud-dialogflowcx", "GAPIC library for Google Cloud Dialogflowcx API", "APIs", ) ] # Documents to append as an appendix to all manuals. # texinfo_appendices = [] # If false, no module index is generated. # texinfo_domain_indices = True # How to display URL addresses: 'footnote', 'no', or 'inline'. # texinfo_show_urls = 'footnote' # If true, do not generate a @detailmenu in the "Top" node's menu. # texinfo_no_detailmenu = False # Example configuration for intersphinx: refer to the Python standard library. intersphinx_mapping = { "python": ("http://python.readthedocs.org/en/latest/", None), "gax": ("https://gax-python.readthedocs.org/en/latest/", None), "google-auth": ("https://google-auth.readthedocs.io/en/stable", None), "google-gax": ("https://gax-python.readthedocs.io/en/latest/", None), "google.api_core": ("https://googleapis.dev/python/google-api-core/latest/", None), "grpc": ("https://grpc.io/grpc/python/", None), "requests": ("http://requests.kennethreitz.org/en/stable/", None), "proto": ("https://proto-plus-python.readthedocs.io/en/stable", None), "protobuf": ("https://googleapis.dev/python/protobuf/latest/", None), } # Napoleon settings napoleon_google_docstring = True napoleon_numpy_docstring = True napoleon_include_private_with_doc = False napoleon_include_special_with_doc = True napoleon_use_admonition_for_examples = False napoleon_use_admonition_for_notes = False napoleon_use_admonition_for_references = False napoleon_use_ivar = False napoleon_use_param = True napoleon_use_rtype = True
apache-2.0
azverkan/scons
src/engine/SCons/Tool/RCS.py
5
2246
"""SCons.Tool.RCS.py Tool-specific initialization for RCS. There normally shouldn't be any need to import this module directly. It will usually be imported through the generic SCons.Tool.Tool() selection method. """ # __COPYRIGHT__ # # Permission is hereby granted, free of charge, to any person obtaining # a copy of this software and associated documentation files (the # "Software"), to deal in the Software without restriction, including # without limitation the rights to use, copy, modify, merge, publish, # distribute, sublicense, and/or sell copies of the Software, and to # permit persons to whom the Software is furnished to do so, subject to # the following conditions: # # The above copyright notice and this permission notice shall be included # in all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY # KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE # WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND # NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE # LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION # OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION # WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. __revision__ = "__FILE__ __REVISION__ __DATE__ __DEVELOPER__" import SCons.Action import SCons.Builder import SCons.Util def generate(env): """Add a Builder factory function and construction variables for RCS to an Environment.""" def RCSFactory(env=env): """ """ import SCons.Warnings as W W.warn(W.DeprecatedSourceCodeWarning, """The RCS() factory is deprecated and there is no replacement.""") act = SCons.Action.Action('$RCS_COCOM', '$RCS_COCOMSTR') return SCons.Builder.Builder(action = act, env = env) #setattr(env, 'RCS', RCSFactory) env.RCS = RCSFactory env['RCS'] = 'rcs' env['RCS_CO'] = 'co' env['RCS_COFLAGS'] = SCons.Util.CLVar('') env['RCS_COCOM'] = '$RCS_CO $RCS_COFLAGS $TARGET' def exists(env): return env.Detect('rcs') # Local Variables: # tab-width:4 # indent-tabs-mode:nil # End: # vim: set expandtab tabstop=4 shiftwidth=4:
mit
andymckay/django
tests/regressiontests/templates/nodelist.py
28
2558
from django.template import VariableNode, Context from django.template.loader import get_template_from_string from django.utils.unittest import TestCase from django.test.utils import override_settings class NodelistTest(TestCase): def test_for(self): source = '{% for i in 1 %}{{ a }}{% endfor %}' template = get_template_from_string(source) vars = template.nodelist.get_nodes_by_type(VariableNode) self.assertEqual(len(vars), 1) def test_if(self): source = '{% if x %}{{ a }}{% endif %}' template = get_template_from_string(source) vars = template.nodelist.get_nodes_by_type(VariableNode) self.assertEqual(len(vars), 1) def test_ifequal(self): source = '{% ifequal x y %}{{ a }}{% endifequal %}' template = get_template_from_string(source) vars = template.nodelist.get_nodes_by_type(VariableNode) self.assertEqual(len(vars), 1) def test_ifchanged(self): source = '{% ifchanged x %}{{ a }}{% endifchanged %}' template = get_template_from_string(source) vars = template.nodelist.get_nodes_by_type(VariableNode) self.assertEqual(len(vars), 1) class ErrorIndexTest(TestCase): """ Checks whether index of error is calculated correctly in template debugger in for loops. Refs ticket #5831 """ @override_settings(DEBUG=True, TEMPLATE_DEBUG = True) def test_correct_exception_index(self): tests = [ ('{% load bad_tag %}{% for i in range %}{% badsimpletag %}{% endfor %}', (38, 56)), ('{% load bad_tag %}{% for i in range %}{% for j in range %}{% badsimpletag %}{% endfor %}{% endfor %}', (58, 76)), ('{% load bad_tag %}{% for i in range %}{% badsimpletag %}{% for j in range %}Hello{% endfor %}{% endfor %}', (38, 56)), ('{% load bad_tag %}{% for i in range %}{% for j in five %}{% badsimpletag %}{% endfor %}{% endfor %}', (38, 57)), ('{% load bad_tag %}{% for j in five %}{% badsimpletag %}{% endfor %}', (18, 37)), ] context = Context({ 'range': range(5), 'five': 5, }) for source, expected_error_source_index in tests: template = get_template_from_string(source) try: template.render(context) except (RuntimeError, TypeError), e: error_source_index = e.django_template_source[1] self.assertEqual(error_source_index, expected_error_source_index)
bsd-3-clause
windskyer/nova
nova/virt/libvirt/volume/hgst.py
33
2109
# Copyright 2015 HGST # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from os_brick.initiator import connector from oslo_config import cfg from nova import utils from nova.virt.libvirt.volume import volume as libvirt_volume CONF = cfg.CONF CONF.import_opt('num_iscsi_scan_tries', 'nova.virt.libvirt.volume.iscsi', group='libvirt') class LibvirtHGSTVolumeDriver(libvirt_volume.LibvirtBaseVolumeDriver): """Driver to attach HGST volumes to libvirt.""" def __init__(self, connection): super(LibvirtHGSTVolumeDriver, self).__init__(connection, is_block_dev=True) self.connector = connector.InitiatorConnector.factory( 'HGST', utils.get_root_helper(), device_scan_attempts=CONF.libvirt.num_iscsi_scan_tries) def get_config(self, connection_info, disk_info): """Returns xml for libvirt.""" conf = super(LibvirtHGSTVolumeDriver, self).get_config(connection_info, disk_info) conf.source_type = "block" conf.source_path = connection_info['data']['device_path'] return conf def connect_volume(self, connection_info, mount_device): device_info = self.connector.connect_volume(connection_info['data']) connection_info['data']['device_path'] = device_info['path'] def disconnect_volume(self, connection_info, disk_dev): self.connector.disconnect_volume(connection_info['data'], None) super(LibvirtHGSTVolumeDriver, self).disconnect_volume(connection_info, disk_dev)
gpl-2.0
lanselin/pysal
pysal/contrib/spint/tests/test_vec_SA.py
5
2040
""" Tests for analysis of spatial autocorrelation within vectors """ __author__ = 'Taylor Oshan tayoshan@gmail.com' import unittest import numpy as np np.random.seed(1) import pysal from pysal.contrib.spint.vec_SA import VecMoran from pysal.weights import DistanceBand class TestVecMoran(unittest.TestCase): """Tests VecMoran class""" def setUp(self): self.vecs = np.array([[1, 55, 60, 100, 500], [2, 60, 55, 105, 501], [3, 500, 55, 155, 500], [4, 505, 60, 160, 500], [5, 105, 950, 105, 500], [6, 155, 950, 155, 499]]) self.origins = self.vecs[:, 1:3] self.dests = self.vecs[:, 3:5] def test_origin_focused_A(self): wo = DistanceBand(self.origins, threshold=9999, alpha=-1.5, binary=False) vmo = VecMoran(self.vecs, wo, focus='origin', rand='A') self.assertAlmostEquals(vmo.I, 0.645944594367) self.assertAlmostEquals(vmo.p_z_sim, 0.099549579548) def test_dest_focused_A(self): wd = DistanceBand(self.dests, threshold=9999, alpha=-1.5, binary=False) vmd = VecMoran(self.vecs, wd, focus='destination', rand='A') self.assertAlmostEquals(vmd.I, -0.764603695022) self.assertAlmostEquals(vmd.p_z_sim, 0.149472673677) def test_origin_focused_B(self): wo = DistanceBand(self.origins, threshold=9999, alpha=-1.5, binary=False) vmo = VecMoran(self.vecs, wo, focus='origin', rand='B') self.assertAlmostEquals(vmo.I, 0.645944594367) self.assertAlmostEquals(vmo.p_z_sim, 0.071427063787951814) def test_dest_focused_B(self): wd = DistanceBand(self.dests, threshold=9999, alpha=-1.5, binary=False) vmd = VecMoran(self.vecs, wd, focus='destination', rand='B') self.assertAlmostEquals(vmd.I, -0.764603695022) self.assertAlmostEquals(vmd.p_z_sim, 0.086894261015806051) if __name__ == '__main__': unittest.main()
bsd-3-clause
gundalow/ansible
lib/ansible/module_utils/urls.py
19
77426
# This code is part of Ansible, but is an independent component. # This particular file snippet, and this file snippet only, is BSD licensed. # Modules you write using this snippet, which is embedded dynamically by Ansible # still belong to the author of the module, and may assign their own license # to the complete work. # # Copyright (c), Michael DeHaan <michael.dehaan@gmail.com>, 2012-2013 # Copyright (c), Toshio Kuratomi <tkuratomi@ansible.com>, 2015 # # Simplified BSD License (see licenses/simplified_bsd.txt or https://opensource.org/licenses/BSD-2-Clause) # # The match_hostname function and supporting code is under the terms and # conditions of the Python Software Foundation License. They were taken from # the Python3 standard library and adapted for use in Python2. See comments in the # source for which code precisely is under this License. # # PSF License (see licenses/PSF-license.txt or https://opensource.org/licenses/Python-2.0) ''' The **urls** utils module offers a replacement for the urllib2 python library. urllib2 is the python stdlib way to retrieve files from the Internet but it lacks some security features (around verifying SSL certificates) that users should care about in most situations. Using the functions in this module corrects deficiencies in the urllib2 module wherever possible. There are also third-party libraries (for instance, requests) which can be used to replace urllib2 with a more secure library. However, all third party libraries require that the library be installed on the managed machine. That is an extra step for users making use of a module. If possible, avoid third party libraries by using this code instead. ''' from __future__ import (absolute_import, division, print_function) __metaclass__ = type import atexit import base64 import email.mime.multipart import email.mime.nonmultipart import email.mime.application import email.parser import email.utils import functools import mimetypes import netrc import os import platform import re import socket import sys import tempfile import traceback from contextlib import contextmanager try: import email.policy except ImportError: # Py2 import email.generator try: import httplib except ImportError: # Python 3 import http.client as httplib import ansible.module_utils.six.moves.http_cookiejar as cookiejar import ansible.module_utils.six.moves.urllib.request as urllib_request import ansible.module_utils.six.moves.urllib.error as urllib_error from ansible.module_utils.common.collections import Mapping from ansible.module_utils.six import PY3, string_types from ansible.module_utils.six.moves import cStringIO from ansible.module_utils.basic import get_distribution, missing_required_lib from ansible.module_utils._text import to_bytes, to_native, to_text try: # python3 import urllib.request as urllib_request from urllib.request import AbstractHTTPHandler, BaseHandler except ImportError: # python2 import urllib2 as urllib_request from urllib2 import AbstractHTTPHandler, BaseHandler urllib_request.HTTPRedirectHandler.http_error_308 = urllib_request.HTTPRedirectHandler.http_error_307 try: from ansible.module_utils.six.moves.urllib.parse import urlparse, urlunparse HAS_URLPARSE = True except Exception: HAS_URLPARSE = False try: import ssl HAS_SSL = True except Exception: HAS_SSL = False try: # SNI Handling needs python2.7.9's SSLContext from ssl import create_default_context, SSLContext HAS_SSLCONTEXT = True except ImportError: HAS_SSLCONTEXT = False # SNI Handling for python < 2.7.9 with urllib3 support try: # urllib3>=1.15 HAS_URLLIB3_SSL_WRAP_SOCKET = False try: from urllib3.contrib.pyopenssl import PyOpenSSLContext except ImportError: from requests.packages.urllib3.contrib.pyopenssl import PyOpenSSLContext HAS_URLLIB3_PYOPENSSLCONTEXT = True except ImportError: # urllib3<1.15,>=1.6 HAS_URLLIB3_PYOPENSSLCONTEXT = False try: try: from urllib3.contrib.pyopenssl import ssl_wrap_socket except ImportError: from requests.packages.urllib3.contrib.pyopenssl import ssl_wrap_socket HAS_URLLIB3_SSL_WRAP_SOCKET = True except ImportError: pass # Select a protocol that includes all secure tls protocols # Exclude insecure ssl protocols if possible if HAS_SSL: # If we can't find extra tls methods, ssl.PROTOCOL_TLSv1 is sufficient PROTOCOL = ssl.PROTOCOL_TLSv1 if not HAS_SSLCONTEXT and HAS_SSL: try: import ctypes import ctypes.util except ImportError: # python 2.4 (likely rhel5 which doesn't have tls1.1 support in its openssl) pass else: libssl_name = ctypes.util.find_library('ssl') libssl = ctypes.CDLL(libssl_name) for method in ('TLSv1_1_method', 'TLSv1_2_method'): try: libssl[method] # Found something - we'll let openssl autonegotiate and hope # the server has disabled sslv2 and 3. best we can do. PROTOCOL = ssl.PROTOCOL_SSLv23 break except AttributeError: pass del libssl # The following makes it easier for us to script updates of the bundled backports.ssl_match_hostname # The bundled backports.ssl_match_hostname should really be moved into its own file for processing _BUNDLED_METADATA = {"pypi_name": "backports.ssl_match_hostname", "version": "3.7.0.1"} LOADED_VERIFY_LOCATIONS = set() HAS_MATCH_HOSTNAME = True try: from ssl import match_hostname, CertificateError except ImportError: try: from backports.ssl_match_hostname import match_hostname, CertificateError except ImportError: HAS_MATCH_HOSTNAME = False HAS_CRYPTOGRAPHY = True try: from cryptography import x509 from cryptography.hazmat.backends import default_backend from cryptography.hazmat.primitives import hashes from cryptography.exceptions import UnsupportedAlgorithm except ImportError: HAS_CRYPTOGRAPHY = False # Old import for GSSAPI authentication, this is not used in urls.py but kept for backwards compatibility. try: import urllib_gssapi HAS_GSSAPI = True except ImportError: HAS_GSSAPI = False GSSAPI_IMP_ERR = None try: import gssapi class HTTPGSSAPIAuthHandler(BaseHandler): """ Handles Negotiate/Kerberos support through the gssapi library. """ AUTH_HEADER_PATTERN = re.compile(r'(?:.*)\s*(Negotiate|Kerberos)\s*([^,]*),?', re.I) handler_order = 480 # Handle before Digest authentication def __init__(self, username=None, password=None): self.username = username self.password = password self._context = None def get_auth_value(self, headers): auth_match = self.AUTH_HEADER_PATTERN.search(headers.get('www-authenticate', '')) if auth_match: return auth_match.group(1), base64.b64decode(auth_match.group(2)) def http_error_401(self, req, fp, code, msg, headers): # If we've already attempted the auth and we've reached this again then there was a failure. if self._context: return parsed = generic_urlparse(urlparse(req.get_full_url())) auth_header = self.get_auth_value(headers) if not auth_header: return auth_protocol, in_token = auth_header username = None if self.username: username = gssapi.Name(self.username, name_type=gssapi.NameType.user) if username and self.password: if not hasattr(gssapi.raw, 'acquire_cred_with_password'): raise NotImplementedError("Platform GSSAPI library does not support " "gss_acquire_cred_with_password, cannot acquire GSSAPI credential with " "explicit username and password.") b_password = to_bytes(self.password, errors='surrogate_or_strict') cred = gssapi.raw.acquire_cred_with_password(username, b_password, usage='initiate').creds else: cred = gssapi.Credentials(name=username, usage='initiate') # Get the peer certificate for the channel binding token if possible (HTTPS). A bug on macOS causes the # authentication to fail when the CBT is present. Just skip that platform. cbt = None cert = getpeercert(fp, True) if cert and platform.system() != 'Darwin': cert_hash = get_channel_binding_cert_hash(cert) if cert_hash: cbt = gssapi.raw.ChannelBindings(application_data=b"tls-server-end-point:" + cert_hash) # TODO: We could add another option that is set to include the port in the SPN if desired in the future. target = gssapi.Name("HTTP@%s" % parsed['hostname'], gssapi.NameType.hostbased_service) self._context = gssapi.SecurityContext(usage="initiate", name=target, creds=cred, channel_bindings=cbt) resp = None while not self._context.complete: out_token = self._context.step(in_token) if not out_token: break auth_header = '%s %s' % (auth_protocol, to_native(base64.b64encode(out_token))) req.add_unredirected_header('Authorization', auth_header) resp = self.parent.open(req) # The response could contain a token that the client uses to validate the server auth_header = self.get_auth_value(resp.headers) if not auth_header: break in_token = auth_header[1] return resp except ImportError: GSSAPI_IMP_ERR = traceback.format_exc() HTTPGSSAPIAuthHandler = None if not HAS_MATCH_HOSTNAME: # The following block of code is under the terms and conditions of the # Python Software Foundation License """The match_hostname() function from Python 3.4, essential when using SSL.""" try: # Divergence: Python-3.7+'s _ssl has this exception type but older Pythons do not from _ssl import SSLCertVerificationError CertificateError = SSLCertVerificationError except ImportError: class CertificateError(ValueError): pass def _dnsname_match(dn, hostname): """Matching according to RFC 6125, section 6.4.3 - Hostnames are compared lower case. - For IDNA, both dn and hostname must be encoded as IDN A-label (ACE). - Partial wildcards like 'www*.example.org', multiple wildcards, sole wildcard or wildcards in labels other then the left-most label are not supported and a CertificateError is raised. - A wildcard must match at least one character. """ if not dn: return False wildcards = dn.count('*') # speed up common case w/o wildcards if not wildcards: return dn.lower() == hostname.lower() if wildcards > 1: # Divergence .format() to percent formatting for Python < 2.6 raise CertificateError( "too many wildcards in certificate DNS name: %s" % repr(dn)) dn_leftmost, sep, dn_remainder = dn.partition('.') if '*' in dn_remainder: # Only match wildcard in leftmost segment. # Divergence .format() to percent formatting for Python < 2.6 raise CertificateError( "wildcard can only be present in the leftmost label: " "%s." % repr(dn)) if not sep: # no right side # Divergence .format() to percent formatting for Python < 2.6 raise CertificateError( "sole wildcard without additional labels are not support: " "%s." % repr(dn)) if dn_leftmost != '*': # no partial wildcard matching # Divergence .format() to percent formatting for Python < 2.6 raise CertificateError( "partial wildcards in leftmost label are not supported: " "%s." % repr(dn)) hostname_leftmost, sep, hostname_remainder = hostname.partition('.') if not hostname_leftmost or not sep: # wildcard must match at least one char return False return dn_remainder.lower() == hostname_remainder.lower() def _inet_paton(ipname): """Try to convert an IP address to packed binary form Supports IPv4 addresses on all platforms and IPv6 on platforms with IPv6 support. """ # inet_aton() also accepts strings like '1' # Divergence: We make sure we have native string type for all python versions try: b_ipname = to_bytes(ipname, errors='strict') except UnicodeError: raise ValueError("%s must be an all-ascii string." % repr(ipname)) # Set ipname in native string format if sys.version_info < (3,): n_ipname = b_ipname else: n_ipname = ipname if n_ipname.count('.') == 3: try: return socket.inet_aton(n_ipname) # Divergence: OSError on late python3. socket.error earlier. # Null bytes generate ValueError on python3(we want to raise # ValueError anyway), TypeError # earlier except (OSError, socket.error, TypeError): pass try: return socket.inet_pton(socket.AF_INET6, n_ipname) # Divergence: OSError on late python3. socket.error earlier. # Null bytes generate ValueError on python3(we want to raise # ValueError anyway), TypeError # earlier except (OSError, socket.error, TypeError): # Divergence .format() to percent formatting for Python < 2.6 raise ValueError("%s is neither an IPv4 nor an IP6 " "address." % repr(ipname)) except AttributeError: # AF_INET6 not available pass # Divergence .format() to percent formatting for Python < 2.6 raise ValueError("%s is not an IPv4 address." % repr(ipname)) def _ipaddress_match(ipname, host_ip): """Exact matching of IP addresses. RFC 6125 explicitly doesn't define an algorithm for this (section 1.7.2 - "Out of Scope"). """ # OpenSSL may add a trailing newline to a subjectAltName's IP address ip = _inet_paton(ipname.rstrip()) return ip == host_ip def match_hostname(cert, hostname): """Verify that *cert* (in decoded format as returned by SSLSocket.getpeercert()) matches the *hostname*. RFC 2818 and RFC 6125 rules are followed. The function matches IP addresses rather than dNSNames if hostname is a valid ipaddress string. IPv4 addresses are supported on all platforms. IPv6 addresses are supported on platforms with IPv6 support (AF_INET6 and inet_pton). CertificateError is raised on failure. On success, the function returns nothing. """ if not cert: raise ValueError("empty or no certificate, match_hostname needs a " "SSL socket or SSL context with either " "CERT_OPTIONAL or CERT_REQUIRED") try: # Divergence: Deal with hostname as bytes host_ip = _inet_paton(to_text(hostname, errors='strict')) except UnicodeError: # Divergence: Deal with hostname as byte strings. # IP addresses should be all ascii, so we consider it not # an IP address if this fails host_ip = None except ValueError: # Not an IP address (common case) host_ip = None dnsnames = [] san = cert.get('subjectAltName', ()) for key, value in san: if key == 'DNS': if host_ip is None and _dnsname_match(value, hostname): return dnsnames.append(value) elif key == 'IP Address': if host_ip is not None and _ipaddress_match(value, host_ip): return dnsnames.append(value) if not dnsnames: # The subject is only checked when there is no dNSName entry # in subjectAltName for sub in cert.get('subject', ()): for key, value in sub: # XXX according to RFC 2818, the most specific Common Name # must be used. if key == 'commonName': if _dnsname_match(value, hostname): return dnsnames.append(value) if len(dnsnames) > 1: raise CertificateError("hostname %r doesn't match either of %s" % (hostname, ', '.join(map(repr, dnsnames)))) elif len(dnsnames) == 1: raise CertificateError("hostname %r doesn't match %r" % (hostname, dnsnames[0])) else: raise CertificateError("no appropriate commonName or subjectAltName fields were found") # End of Python Software Foundation Licensed code HAS_MATCH_HOSTNAME = True # This is a dummy cacert provided for macOS since you need at least 1 # ca cert, regardless of validity, for Python on macOS to use the # keychain functionality in OpenSSL for validating SSL certificates. # See: http://mercurial.selenic.com/wiki/CACertificates#Mac_OS_X_10.6_and_higher b_DUMMY_CA_CERT = b"""-----BEGIN CERTIFICATE----- MIICvDCCAiWgAwIBAgIJAO8E12S7/qEpMA0GCSqGSIb3DQEBBQUAMEkxCzAJBgNV BAYTAlVTMRcwFQYDVQQIEw5Ob3J0aCBDYXJvbGluYTEPMA0GA1UEBxMGRHVyaGFt MRAwDgYDVQQKEwdBbnNpYmxlMB4XDTE0MDMxODIyMDAyMloXDTI0MDMxNTIyMDAy MlowSTELMAkGA1UEBhMCVVMxFzAVBgNVBAgTDk5vcnRoIENhcm9saW5hMQ8wDQYD VQQHEwZEdXJoYW0xEDAOBgNVBAoTB0Fuc2libGUwgZ8wDQYJKoZIhvcNAQEBBQAD gY0AMIGJAoGBANtvpPq3IlNlRbCHhZAcP6WCzhc5RbsDqyh1zrkmLi0GwcQ3z/r9 gaWfQBYhHpobK2Tiq11TfraHeNB3/VfNImjZcGpN8Fl3MWwu7LfVkJy3gNNnxkA1 4Go0/LmIvRFHhbzgfuo9NFgjPmmab9eqXJceqZIlz2C8xA7EeG7ku0+vAgMBAAGj gaswgagwHQYDVR0OBBYEFPnN1nPRqNDXGlCqCvdZchRNi/FaMHkGA1UdIwRyMHCA FPnN1nPRqNDXGlCqCvdZchRNi/FaoU2kSzBJMQswCQYDVQQGEwJVUzEXMBUGA1UE CBMOTm9ydGggQ2Fyb2xpbmExDzANBgNVBAcTBkR1cmhhbTEQMA4GA1UEChMHQW5z aWJsZYIJAO8E12S7/qEpMAwGA1UdEwQFMAMBAf8wDQYJKoZIhvcNAQEFBQADgYEA MUB80IR6knq9K/tY+hvPsZer6eFMzO3JGkRFBh2kn6JdMDnhYGX7AXVHGflrwNQH qFy+aenWXsC0ZvrikFxbQnX8GVtDADtVznxOi7XzFw7JOxdsVrpXgSN0eh0aMzvV zKPZsZ2miVGclicJHzm5q080b1p/sZtuKIEZk6vZqEg= -----END CERTIFICATE----- """ # # Exceptions # class ConnectionError(Exception): """Failed to connect to the server""" pass class ProxyError(ConnectionError): """Failure to connect because of a proxy""" pass class SSLValidationError(ConnectionError): """Failure to connect due to SSL validation failing""" pass class NoSSLError(SSLValidationError): """Needed to connect to an HTTPS url but no ssl library available to verify the certificate""" pass class MissingModuleError(Exception): """Failed to import 3rd party module required by the caller""" def __init__(self, message, import_traceback): super(MissingModuleError, self).__init__(message) self.import_traceback = import_traceback # Some environments (Google Compute Engine's CoreOS deploys) do not compile # against openssl and thus do not have any HTTPS support. CustomHTTPSConnection = None CustomHTTPSHandler = None HTTPSClientAuthHandler = None UnixHTTPSConnection = None if hasattr(httplib, 'HTTPSConnection') and hasattr(urllib_request, 'HTTPSHandler'): class CustomHTTPSConnection(httplib.HTTPSConnection): def __init__(self, *args, **kwargs): httplib.HTTPSConnection.__init__(self, *args, **kwargs) self.context = None if HAS_SSLCONTEXT: self.context = self._context elif HAS_URLLIB3_PYOPENSSLCONTEXT: self.context = self._context = PyOpenSSLContext(PROTOCOL) if self.context and self.cert_file: self.context.load_cert_chain(self.cert_file, self.key_file) def connect(self): "Connect to a host on a given (SSL) port." if hasattr(self, 'source_address'): sock = socket.create_connection((self.host, self.port), self.timeout, self.source_address) else: sock = socket.create_connection((self.host, self.port), self.timeout) server_hostname = self.host # Note: self._tunnel_host is not available on py < 2.6 but this code # isn't used on py < 2.6 (lack of create_connection) if self._tunnel_host: self.sock = sock self._tunnel() server_hostname = self._tunnel_host if HAS_SSLCONTEXT or HAS_URLLIB3_PYOPENSSLCONTEXT: self.sock = self.context.wrap_socket(sock, server_hostname=server_hostname) elif HAS_URLLIB3_SSL_WRAP_SOCKET: self.sock = ssl_wrap_socket(sock, keyfile=self.key_file, cert_reqs=ssl.CERT_NONE, certfile=self.cert_file, ssl_version=PROTOCOL, server_hostname=server_hostname) else: self.sock = ssl.wrap_socket(sock, keyfile=self.key_file, certfile=self.cert_file, ssl_version=PROTOCOL) class CustomHTTPSHandler(urllib_request.HTTPSHandler): def https_open(self, req): kwargs = {} if HAS_SSLCONTEXT: kwargs['context'] = self._context return self.do_open( functools.partial( CustomHTTPSConnection, **kwargs ), req ) https_request = AbstractHTTPHandler.do_request_ class HTTPSClientAuthHandler(urllib_request.HTTPSHandler): '''Handles client authentication via cert/key This is a fairly lightweight extension on HTTPSHandler, and can be used in place of HTTPSHandler ''' def __init__(self, client_cert=None, client_key=None, unix_socket=None, **kwargs): urllib_request.HTTPSHandler.__init__(self, **kwargs) self.client_cert = client_cert self.client_key = client_key self._unix_socket = unix_socket def https_open(self, req): return self.do_open(self._build_https_connection, req) def _build_https_connection(self, host, **kwargs): kwargs.update({ 'cert_file': self.client_cert, 'key_file': self.client_key, }) try: kwargs['context'] = self._context except AttributeError: pass if self._unix_socket: return UnixHTTPSConnection(self._unix_socket)(host, **kwargs) return httplib.HTTPSConnection(host, **kwargs) @contextmanager def unix_socket_patch_httpconnection_connect(): '''Monkey patch ``httplib.HTTPConnection.connect`` to be ``UnixHTTPConnection.connect`` so that when calling ``super(UnixHTTPSConnection, self).connect()`` we get the correct behavior of creating self.sock for the unix socket ''' _connect = httplib.HTTPConnection.connect httplib.HTTPConnection.connect = UnixHTTPConnection.connect yield httplib.HTTPConnection.connect = _connect class UnixHTTPSConnection(httplib.HTTPSConnection): def __init__(self, unix_socket): self._unix_socket = unix_socket def connect(self): # This method exists simply to ensure we monkeypatch # httplib.HTTPConnection.connect to call UnixHTTPConnection.connect with unix_socket_patch_httpconnection_connect(): # Disable pylint check for the super() call. It complains about UnixHTTPSConnection # being a NoneType because of the initial definition above, but it won't actually # be a NoneType when this code runs # pylint: disable=bad-super-call super(UnixHTTPSConnection, self).connect() def __call__(self, *args, **kwargs): httplib.HTTPSConnection.__init__(self, *args, **kwargs) return self class UnixHTTPConnection(httplib.HTTPConnection): '''Handles http requests to a unix socket file''' def __init__(self, unix_socket): self._unix_socket = unix_socket def connect(self): self.sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) try: self.sock.connect(self._unix_socket) except OSError as e: raise OSError('Invalid Socket File (%s): %s' % (self._unix_socket, e)) if self.timeout is not socket._GLOBAL_DEFAULT_TIMEOUT: self.sock.settimeout(self.timeout) def __call__(self, *args, **kwargs): httplib.HTTPConnection.__init__(self, *args, **kwargs) return self class UnixHTTPHandler(urllib_request.HTTPHandler): '''Handler for Unix urls''' def __init__(self, unix_socket, **kwargs): urllib_request.HTTPHandler.__init__(self, **kwargs) self._unix_socket = unix_socket def http_open(self, req): return self.do_open(UnixHTTPConnection(self._unix_socket), req) class ParseResultDottedDict(dict): ''' A dict that acts similarly to the ParseResult named tuple from urllib ''' def __init__(self, *args, **kwargs): super(ParseResultDottedDict, self).__init__(*args, **kwargs) self.__dict__ = self def as_list(self): ''' Generate a list from this dict, that looks like the ParseResult named tuple ''' return [self.get(k, None) for k in ('scheme', 'netloc', 'path', 'params', 'query', 'fragment')] def generic_urlparse(parts): ''' Returns a dictionary of url parts as parsed by urlparse, but accounts for the fact that older versions of that library do not support named attributes (ie. .netloc) ''' generic_parts = ParseResultDottedDict() if hasattr(parts, 'netloc'): # urlparse is newer, just read the fields straight # from the parts object generic_parts['scheme'] = parts.scheme generic_parts['netloc'] = parts.netloc generic_parts['path'] = parts.path generic_parts['params'] = parts.params generic_parts['query'] = parts.query generic_parts['fragment'] = parts.fragment generic_parts['username'] = parts.username generic_parts['password'] = parts.password hostname = parts.hostname if hostname and hostname[0] == '[' and '[' in parts.netloc and ']' in parts.netloc: # Py2.6 doesn't parse IPv6 addresses correctly hostname = parts.netloc.split(']')[0][1:].lower() generic_parts['hostname'] = hostname try: port = parts.port except ValueError: # Py2.6 doesn't parse IPv6 addresses correctly netloc = parts.netloc.split('@')[-1].split(']')[-1] if ':' in netloc: port = netloc.split(':')[1] if port: port = int(port) else: port = None generic_parts['port'] = port else: # we have to use indexes, and then parse out # the other parts not supported by indexing generic_parts['scheme'] = parts[0] generic_parts['netloc'] = parts[1] generic_parts['path'] = parts[2] generic_parts['params'] = parts[3] generic_parts['query'] = parts[4] generic_parts['fragment'] = parts[5] # get the username, password, etc. try: netloc_re = re.compile(r'^((?:\w)+(?::(?:\w)+)?@)?([A-Za-z0-9.-]+)(:\d+)?$') match = netloc_re.match(parts[1]) auth = match.group(1) hostname = match.group(2) port = match.group(3) if port: # the capture group for the port will include the ':', # so remove it and convert the port to an integer port = int(port[1:]) if auth: # the capture group above includes the @, so remove it # and then split it up based on the first ':' found auth = auth[:-1] username, password = auth.split(':', 1) else: username = password = None generic_parts['username'] = username generic_parts['password'] = password generic_parts['hostname'] = hostname generic_parts['port'] = port except Exception: generic_parts['username'] = None generic_parts['password'] = None generic_parts['hostname'] = parts[1] generic_parts['port'] = None return generic_parts class RequestWithMethod(urllib_request.Request): ''' Workaround for using DELETE/PUT/etc with urllib2 Originally contained in library/net_infrastructure/dnsmadeeasy ''' def __init__(self, url, method, data=None, headers=None, origin_req_host=None, unverifiable=True): if headers is None: headers = {} self._method = method.upper() urllib_request.Request.__init__(self, url, data, headers, origin_req_host, unverifiable) def get_method(self): if self._method: return self._method else: return urllib_request.Request.get_method(self) def RedirectHandlerFactory(follow_redirects=None, validate_certs=True, ca_path=None): """This is a class factory that closes over the value of ``follow_redirects`` so that the RedirectHandler class has access to that value without having to use globals, and potentially cause problems where ``open_url`` or ``fetch_url`` are used multiple times in a module. """ class RedirectHandler(urllib_request.HTTPRedirectHandler): """This is an implementation of a RedirectHandler to match the functionality provided by httplib2. It will utilize the value of ``follow_redirects`` that is passed into ``RedirectHandlerFactory`` to determine how redirects should be handled in urllib2. """ def redirect_request(self, req, fp, code, msg, hdrs, newurl): if not HAS_SSLCONTEXT: handler = maybe_add_ssl_handler(newurl, validate_certs, ca_path=ca_path) if handler: urllib_request._opener.add_handler(handler) # Preserve urllib2 compatibility if follow_redirects == 'urllib2': return urllib_request.HTTPRedirectHandler.redirect_request(self, req, fp, code, msg, hdrs, newurl) # Handle disabled redirects elif follow_redirects in ['no', 'none', False]: raise urllib_error.HTTPError(newurl, code, msg, hdrs, fp) method = req.get_method() # Handle non-redirect HTTP status or invalid follow_redirects if follow_redirects in ['all', 'yes', True]: if code < 300 or code >= 400: raise urllib_error.HTTPError(req.get_full_url(), code, msg, hdrs, fp) elif follow_redirects == 'safe': if code < 300 or code >= 400 or method not in ('GET', 'HEAD'): raise urllib_error.HTTPError(req.get_full_url(), code, msg, hdrs, fp) else: raise urllib_error.HTTPError(req.get_full_url(), code, msg, hdrs, fp) try: # Python 2-3.3 data = req.get_data() origin_req_host = req.get_origin_req_host() except AttributeError: # Python 3.4+ data = req.data origin_req_host = req.origin_req_host # Be conciliant with URIs containing a space newurl = newurl.replace(' ', '%20') # Suport redirect with payload and original headers if code in (307, 308): # Preserve payload and headers headers = req.headers else: # Do not preserve payload and filter headers data = None headers = dict((k, v) for k, v in req.headers.items() if k.lower() not in ("content-length", "content-type", "transfer-encoding")) # http://tools.ietf.org/html/rfc7231#section-6.4.4 if code == 303 and method != 'HEAD': method = 'GET' # Do what the browsers do, despite standards... # First, turn 302s into GETs. if code == 302 and method != 'HEAD': method = 'GET' # Second, if a POST is responded to with a 301, turn it into a GET. if code == 301 and method == 'POST': method = 'GET' return RequestWithMethod(newurl, method=method, headers=headers, data=data, origin_req_host=origin_req_host, unverifiable=True, ) return RedirectHandler def build_ssl_validation_error(hostname, port, paths, exc=None): '''Inteligently build out the SSLValidationError based on what support you have installed ''' msg = [ ('Failed to validate the SSL certificate for %s:%s.' ' Make sure your managed systems have a valid CA' ' certificate installed.') ] if not HAS_SSLCONTEXT: msg.append('If the website serving the url uses SNI you need' ' python >= 2.7.9 on your managed machine') msg.append(' (the python executable used (%s) is version: %s)' % (sys.executable, ''.join(sys.version.splitlines()))) if not HAS_URLLIB3_PYOPENSSLCONTEXT and not HAS_URLLIB3_SSL_WRAP_SOCKET: msg.append('or you can install the `urllib3`, `pyOpenSSL`,' ' `ndg-httpsclient`, and `pyasn1` python modules') msg.append('to perform SNI verification in python >= 2.6.') msg.append('You can use validate_certs=False if you do' ' not need to confirm the servers identity but this is' ' unsafe and not recommended.' ' Paths checked for this platform: %s.') if exc: msg.append('The exception msg was: %s.' % to_native(exc)) raise SSLValidationError(' '.join(msg) % (hostname, port, ", ".join(paths))) def atexit_remove_file(filename): if os.path.exists(filename): try: os.unlink(filename) except Exception: # just ignore if we cannot delete, things should be ok pass class SSLValidationHandler(urllib_request.BaseHandler): ''' A custom handler class for SSL validation. Based on: http://stackoverflow.com/questions/1087227/validate-ssl-certificates-with-python http://techknack.net/python-urllib2-handlers/ ''' CONNECT_COMMAND = "CONNECT %s:%s HTTP/1.0\r\n" def __init__(self, hostname, port, ca_path=None): self.hostname = hostname self.port = port self.ca_path = ca_path def get_ca_certs(self): # tries to find a valid CA cert in one of the # standard locations for the current distribution ca_certs = [] cadata = bytearray() paths_checked = [] if self.ca_path: paths_checked = [self.ca_path] with open(to_bytes(self.ca_path, errors='surrogate_or_strict'), 'rb') as f: if HAS_SSLCONTEXT: cadata.extend( ssl.PEM_cert_to_DER_cert( to_native(f.read(), errors='surrogate_or_strict') ) ) return self.ca_path, cadata, paths_checked if not HAS_SSLCONTEXT: paths_checked.append('/etc/ssl/certs') system = to_text(platform.system(), errors='surrogate_or_strict') # build a list of paths to check for .crt/.pem files # based on the platform type if system == u'Linux': paths_checked.append('/etc/pki/ca-trust/extracted/pem') paths_checked.append('/etc/pki/tls/certs') paths_checked.append('/usr/share/ca-certificates/cacert.org') elif system == u'FreeBSD': paths_checked.append('/usr/local/share/certs') elif system == u'OpenBSD': paths_checked.append('/etc/ssl') elif system == u'NetBSD': ca_certs.append('/etc/openssl/certs') elif system == u'SunOS': paths_checked.append('/opt/local/etc/openssl/certs') # fall back to a user-deployed cert in a standard # location if the OS platform one is not available paths_checked.append('/etc/ansible') tmp_path = None if not HAS_SSLCONTEXT: tmp_fd, tmp_path = tempfile.mkstemp() atexit.register(atexit_remove_file, tmp_path) # Write the dummy ca cert if we are running on macOS if system == u'Darwin': if HAS_SSLCONTEXT: cadata.extend( ssl.PEM_cert_to_DER_cert( to_native(b_DUMMY_CA_CERT, errors='surrogate_or_strict') ) ) else: os.write(tmp_fd, b_DUMMY_CA_CERT) # Default Homebrew path for OpenSSL certs paths_checked.append('/usr/local/etc/openssl') # for all of the paths, find any .crt or .pem files # and compile them into single temp file for use # in the ssl check to speed up the test for path in paths_checked: if os.path.exists(path) and os.path.isdir(path): dir_contents = os.listdir(path) for f in dir_contents: full_path = os.path.join(path, f) if os.path.isfile(full_path) and os.path.splitext(f)[1] in ('.crt', '.pem'): try: if full_path not in LOADED_VERIFY_LOCATIONS: with open(full_path, 'rb') as cert_file: b_cert = cert_file.read() if HAS_SSLCONTEXT: try: cadata.extend( ssl.PEM_cert_to_DER_cert( to_native(b_cert, errors='surrogate_or_strict') ) ) except Exception: continue else: os.write(tmp_fd, b_cert) os.write(tmp_fd, b'\n') except (OSError, IOError): pass if HAS_SSLCONTEXT: default_verify_paths = ssl.get_default_verify_paths() paths_checked[:0] = [default_verify_paths.capath] else: os.close(tmp_fd) return (tmp_path, cadata, paths_checked) def validate_proxy_response(self, response, valid_codes=None): ''' make sure we get back a valid code from the proxy ''' valid_codes = [200] if valid_codes is None else valid_codes try: (http_version, resp_code, msg) = re.match(br'(HTTP/\d\.\d) (\d\d\d) (.*)', response).groups() if int(resp_code) not in valid_codes: raise Exception except Exception: raise ProxyError('Connection to proxy failed') def detect_no_proxy(self, url): ''' Detect if the 'no_proxy' environment variable is set and honor those locations. ''' env_no_proxy = os.environ.get('no_proxy') if env_no_proxy: env_no_proxy = env_no_proxy.split(',') netloc = urlparse(url).netloc for host in env_no_proxy: if netloc.endswith(host) or netloc.split(':')[0].endswith(host): # Our requested URL matches something in no_proxy, so don't # use the proxy for this return False return True def make_context(self, cafile, cadata): cafile = self.ca_path or cafile if self.ca_path: cadata = None else: cadata = cadata or None if HAS_SSLCONTEXT: context = create_default_context(cafile=cafile) elif HAS_URLLIB3_PYOPENSSLCONTEXT: context = PyOpenSSLContext(PROTOCOL) else: raise NotImplementedError('Host libraries are too old to support creating an sslcontext') if cafile or cadata: context.load_verify_locations(cafile=cafile, cadata=cadata) return context def http_request(self, req): tmp_ca_cert_path, cadata, paths_checked = self.get_ca_certs() # Detect if 'no_proxy' environment variable is set and if our URL is included use_proxy = self.detect_no_proxy(req.get_full_url()) https_proxy = os.environ.get('https_proxy') context = None try: context = self.make_context(tmp_ca_cert_path, cadata) except NotImplementedError: # We'll make do with no context below pass try: if use_proxy and https_proxy: proxy_parts = generic_urlparse(urlparse(https_proxy)) port = proxy_parts.get('port') or 443 proxy_hostname = proxy_parts.get('hostname', None) if proxy_hostname is None or proxy_parts.get('scheme') == '': raise ProxyError("Failed to parse https_proxy environment variable." " Please make sure you export https proxy as 'https_proxy=<SCHEME>://<IP_ADDRESS>:<PORT>'") s = socket.create_connection((proxy_hostname, port)) if proxy_parts.get('scheme') == 'http': s.sendall(to_bytes(self.CONNECT_COMMAND % (self.hostname, self.port), errors='surrogate_or_strict')) if proxy_parts.get('username'): credentials = "%s:%s" % (proxy_parts.get('username', ''), proxy_parts.get('password', '')) s.sendall(b'Proxy-Authorization: Basic %s\r\n' % base64.b64encode(to_bytes(credentials, errors='surrogate_or_strict')).strip()) s.sendall(b'\r\n') connect_result = b"" while connect_result.find(b"\r\n\r\n") <= 0: connect_result += s.recv(4096) # 128 kilobytes of headers should be enough for everyone. if len(connect_result) > 131072: raise ProxyError('Proxy sent too verbose headers. Only 128KiB allowed.') self.validate_proxy_response(connect_result) if context: ssl_s = context.wrap_socket(s, server_hostname=self.hostname) elif HAS_URLLIB3_SSL_WRAP_SOCKET: ssl_s = ssl_wrap_socket(s, ca_certs=tmp_ca_cert_path, cert_reqs=ssl.CERT_REQUIRED, ssl_version=PROTOCOL, server_hostname=self.hostname) else: ssl_s = ssl.wrap_socket(s, ca_certs=tmp_ca_cert_path, cert_reqs=ssl.CERT_REQUIRED, ssl_version=PROTOCOL) match_hostname(ssl_s.getpeercert(), self.hostname) else: raise ProxyError('Unsupported proxy scheme: %s. Currently ansible only supports HTTP proxies.' % proxy_parts.get('scheme')) else: s = socket.create_connection((self.hostname, self.port)) if context: ssl_s = context.wrap_socket(s, server_hostname=self.hostname) elif HAS_URLLIB3_SSL_WRAP_SOCKET: ssl_s = ssl_wrap_socket(s, ca_certs=tmp_ca_cert_path, cert_reqs=ssl.CERT_REQUIRED, ssl_version=PROTOCOL, server_hostname=self.hostname) else: ssl_s = ssl.wrap_socket(s, ca_certs=tmp_ca_cert_path, cert_reqs=ssl.CERT_REQUIRED, ssl_version=PROTOCOL) match_hostname(ssl_s.getpeercert(), self.hostname) # close the ssl connection # ssl_s.unwrap() s.close() except (ssl.SSLError, CertificateError) as e: build_ssl_validation_error(self.hostname, self.port, paths_checked, e) except socket.error as e: raise ConnectionError('Failed to connect to %s at port %s: %s' % (self.hostname, self.port, to_native(e))) return req https_request = http_request def maybe_add_ssl_handler(url, validate_certs, ca_path=None): parsed = generic_urlparse(urlparse(url)) if parsed.scheme == 'https' and validate_certs: if not HAS_SSL: raise NoSSLError('SSL validation is not available in your version of python. You can use validate_certs=False,' ' however this is unsafe and not recommended') # create the SSL validation handler and # add it to the list of handlers return SSLValidationHandler(parsed.hostname, parsed.port or 443, ca_path=ca_path) def getpeercert(response, binary_form=False): """ Attempt to get the peer certificate of the response from urlopen. """ # The response from urllib2.open() is different across Python 2 and 3 if PY3: socket = response.fp.raw._sock else: socket = response.fp._sock.fp._sock try: return socket.getpeercert(binary_form) except AttributeError: pass # Not HTTPS def get_channel_binding_cert_hash(certificate_der): """ Gets the channel binding app data for a TLS connection using the peer cert. """ if not HAS_CRYPTOGRAPHY: return # Logic documented in RFC 5929 section 4 https://tools.ietf.org/html/rfc5929#section-4 cert = x509.load_der_x509_certificate(certificate_der, default_backend()) hash_algorithm = None try: hash_algorithm = cert.signature_hash_algorithm except UnsupportedAlgorithm: pass # If the signature hash algorithm is unknown/unsupported or md5/sha1 we must use SHA256. if not hash_algorithm or hash_algorithm.name in ['md5', 'sha1']: hash_algorithm = hashes.SHA256() digest = hashes.Hash(hash_algorithm, default_backend()) digest.update(certificate_der) return digest.finalize() def rfc2822_date_string(timetuple, zone='-0000'): """Accepts a timetuple and optional zone which defaults to ``-0000`` and returns a date string as specified by RFC 2822, e.g.: Fri, 09 Nov 2001 01:08:47 -0000 Copied from email.utils.formatdate and modified for separate use """ return '%s, %02d %s %04d %02d:%02d:%02d %s' % ( ['Mon', 'Tue', 'Wed', 'Thu', 'Fri', 'Sat', 'Sun'][timetuple[6]], timetuple[2], ['Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', 'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec'][timetuple[1] - 1], timetuple[0], timetuple[3], timetuple[4], timetuple[5], zone) class Request: def __init__(self, headers=None, use_proxy=True, force=False, timeout=10, validate_certs=True, url_username=None, url_password=None, http_agent=None, force_basic_auth=False, follow_redirects='urllib2', client_cert=None, client_key=None, cookies=None, unix_socket=None, ca_path=None): """This class works somewhat similarly to the ``Session`` class of from requests by defining a cookiejar that an be used across requests as well as cascaded defaults that can apply to repeated requests For documentation of params, see ``Request.open`` >>> from ansible.module_utils.urls import Request >>> r = Request() >>> r.open('GET', 'http://httpbin.org/cookies/set?k1=v1').read() '{\n "cookies": {\n "k1": "v1"\n }\n}\n' >>> r = Request(url_username='user', url_password='passwd') >>> r.open('GET', 'http://httpbin.org/basic-auth/user/passwd').read() '{\n "authenticated": true, \n "user": "user"\n}\n' >>> r = Request(headers=dict(foo='bar')) >>> r.open('GET', 'http://httpbin.org/get', headers=dict(baz='qux')).read() """ self.headers = headers or {} if not isinstance(self.headers, dict): raise ValueError("headers must be a dict: %r" % self.headers) self.use_proxy = use_proxy self.force = force self.timeout = timeout self.validate_certs = validate_certs self.url_username = url_username self.url_password = url_password self.http_agent = http_agent self.force_basic_auth = force_basic_auth self.follow_redirects = follow_redirects self.client_cert = client_cert self.client_key = client_key self.unix_socket = unix_socket self.ca_path = ca_path if isinstance(cookies, cookiejar.CookieJar): self.cookies = cookies else: self.cookies = cookiejar.CookieJar() def _fallback(self, value, fallback): if value is None: return fallback return value def open(self, method, url, data=None, headers=None, use_proxy=None, force=None, last_mod_time=None, timeout=None, validate_certs=None, url_username=None, url_password=None, http_agent=None, force_basic_auth=None, follow_redirects=None, client_cert=None, client_key=None, cookies=None, use_gssapi=False, unix_socket=None, ca_path=None, unredirected_headers=None): """ Sends a request via HTTP(S) or FTP using urllib2 (Python2) or urllib (Python3) Does not require the module environment Returns :class:`HTTPResponse` object. :arg method: method for the request :arg url: URL to request :kwarg data: (optional) bytes, or file-like object to send in the body of the request :kwarg headers: (optional) Dictionary of HTTP Headers to send with the request :kwarg use_proxy: (optional) Boolean of whether or not to use proxy :kwarg force: (optional) Boolean of whether or not to set `cache-control: no-cache` header :kwarg last_mod_time: (optional) Datetime object to use when setting If-Modified-Since header :kwarg timeout: (optional) How long to wait for the server to send data before giving up, as a float :kwarg validate_certs: (optional) Booleani that controls whether we verify the server's TLS certificate :kwarg url_username: (optional) String of the user to use when authenticating :kwarg url_password: (optional) String of the password to use when authenticating :kwarg http_agent: (optional) String of the User-Agent to use in the request :kwarg force_basic_auth: (optional) Boolean determining if auth header should be sent in the initial request :kwarg follow_redirects: (optional) String of urllib2, all/yes, safe, none to determine how redirects are followed, see RedirectHandlerFactory for more information :kwarg client_cert: (optional) PEM formatted certificate chain file to be used for SSL client authentication. This file can also include the key as well, and if the key is included, client_key is not required :kwarg client_key: (optional) PEM formatted file that contains your private key to be used for SSL client authentication. If client_cert contains both the certificate and key, this option is not required :kwarg cookies: (optional) CookieJar object to send with the request :kwarg use_gssapi: (optional) Use GSSAPI handler of requests. :kwarg unix_socket: (optional) String of file system path to unix socket file to use when establishing connection to the provided url :kwarg ca_path: (optional) String of file system path to CA cert bundle to use :kwarg unredirected_headers: (optional) A list of headers to not attach on a redirected request :returns: HTTPResponse. Added in Ansible 2.9 """ method = method.upper() if headers is None: headers = {} elif not isinstance(headers, dict): raise ValueError("headers must be a dict") headers = dict(self.headers, **headers) use_proxy = self._fallback(use_proxy, self.use_proxy) force = self._fallback(force, self.force) timeout = self._fallback(timeout, self.timeout) validate_certs = self._fallback(validate_certs, self.validate_certs) url_username = self._fallback(url_username, self.url_username) url_password = self._fallback(url_password, self.url_password) http_agent = self._fallback(http_agent, self.http_agent) force_basic_auth = self._fallback(force_basic_auth, self.force_basic_auth) follow_redirects = self._fallback(follow_redirects, self.follow_redirects) client_cert = self._fallback(client_cert, self.client_cert) client_key = self._fallback(client_key, self.client_key) cookies = self._fallback(cookies, self.cookies) unix_socket = self._fallback(unix_socket, self.unix_socket) ca_path = self._fallback(ca_path, self.ca_path) handlers = [] if unix_socket: handlers.append(UnixHTTPHandler(unix_socket)) ssl_handler = maybe_add_ssl_handler(url, validate_certs, ca_path=ca_path) if ssl_handler and not HAS_SSLCONTEXT: handlers.append(ssl_handler) parsed = generic_urlparse(urlparse(url)) if parsed.scheme != 'ftp': username = url_username password = url_password if username: netloc = parsed.netloc elif '@' in parsed.netloc: credentials, netloc = parsed.netloc.split('@', 1) if ':' in credentials: username, password = credentials.split(':', 1) else: username = credentials password = '' parsed_list = parsed.as_list() parsed_list[1] = netloc # reconstruct url without credentials url = urlunparse(parsed_list) if use_gssapi: if HTTPGSSAPIAuthHandler: handlers.append(HTTPGSSAPIAuthHandler(username, password)) else: imp_err_msg = missing_required_lib('gssapi', reason='for use_gssapi=True', url='https://pypi.org/project/gssapi/') raise MissingModuleError(imp_err_msg, import_traceback=GSSAPI_IMP_ERR) elif username and not force_basic_auth: passman = urllib_request.HTTPPasswordMgrWithDefaultRealm() # this creates a password manager passman.add_password(None, netloc, username, password) # because we have put None at the start it will always # use this username/password combination for urls # for which `theurl` is a super-url authhandler = urllib_request.HTTPBasicAuthHandler(passman) digest_authhandler = urllib_request.HTTPDigestAuthHandler(passman) # create the AuthHandler handlers.append(authhandler) handlers.append(digest_authhandler) elif username and force_basic_auth: headers["Authorization"] = basic_auth_header(username, password) else: try: rc = netrc.netrc(os.environ.get('NETRC')) login = rc.authenticators(parsed.hostname) except IOError: login = None if login: username, _, password = login if username and password: headers["Authorization"] = basic_auth_header(username, password) if not use_proxy: proxyhandler = urllib_request.ProxyHandler({}) handlers.append(proxyhandler) context = None if HAS_SSLCONTEXT and not validate_certs: # In 2.7.9, the default context validates certificates context = SSLContext(ssl.PROTOCOL_SSLv23) if ssl.OP_NO_SSLv2: context.options |= ssl.OP_NO_SSLv2 context.options |= ssl.OP_NO_SSLv3 context.verify_mode = ssl.CERT_NONE context.check_hostname = False handlers.append(HTTPSClientAuthHandler(client_cert=client_cert, client_key=client_key, context=context, unix_socket=unix_socket)) elif client_cert or unix_socket: handlers.append(HTTPSClientAuthHandler(client_cert=client_cert, client_key=client_key, unix_socket=unix_socket)) if ssl_handler and HAS_SSLCONTEXT and validate_certs: tmp_ca_path, cadata, paths_checked = ssl_handler.get_ca_certs() try: context = ssl_handler.make_context(tmp_ca_path, cadata) except NotImplementedError: pass # pre-2.6 versions of python cannot use the custom https # handler, since the socket class is lacking create_connection. # Some python builds lack HTTPS support. if hasattr(socket, 'create_connection') and CustomHTTPSHandler: kwargs = {} if HAS_SSLCONTEXT: kwargs['context'] = context handlers.append(CustomHTTPSHandler(**kwargs)) handlers.append(RedirectHandlerFactory(follow_redirects, validate_certs, ca_path=ca_path)) # add some nicer cookie handling if cookies is not None: handlers.append(urllib_request.HTTPCookieProcessor(cookies)) opener = urllib_request.build_opener(*handlers) urllib_request.install_opener(opener) data = to_bytes(data, nonstring='passthru') request = RequestWithMethod(url, method, data) # add the custom agent header, to help prevent issues # with sites that block the default urllib agent string if http_agent: request.add_header('User-agent', http_agent) # Cache control # Either we directly force a cache refresh if force: request.add_header('cache-control', 'no-cache') # or we do it if the original is more recent than our copy elif last_mod_time: tstamp = rfc2822_date_string(last_mod_time.timetuple(), 'GMT') request.add_header('If-Modified-Since', tstamp) # user defined headers now, which may override things we've set above unredirected_headers = unredirected_headers or [] for header in headers: if header in unredirected_headers: request.add_unredirected_header(header, headers[header]) else: request.add_header(header, headers[header]) return urllib_request.urlopen(request, None, timeout) def get(self, url, **kwargs): r"""Sends a GET request. Returns :class:`HTTPResponse` object. :arg url: URL to request :kwarg \*\*kwargs: Optional arguments that ``open`` takes. :returns: HTTPResponse """ return self.open('GET', url, **kwargs) def options(self, url, **kwargs): r"""Sends a OPTIONS request. Returns :class:`HTTPResponse` object. :arg url: URL to request :kwarg \*\*kwargs: Optional arguments that ``open`` takes. :returns: HTTPResponse """ return self.open('OPTIONS', url, **kwargs) def head(self, url, **kwargs): r"""Sends a HEAD request. Returns :class:`HTTPResponse` object. :arg url: URL to request :kwarg \*\*kwargs: Optional arguments that ``open`` takes. :returns: HTTPResponse """ return self.open('HEAD', url, **kwargs) def post(self, url, data=None, **kwargs): r"""Sends a POST request. Returns :class:`HTTPResponse` object. :arg url: URL to request. :kwarg data: (optional) bytes, or file-like object to send in the body of the request. :kwarg \*\*kwargs: Optional arguments that ``open`` takes. :returns: HTTPResponse """ return self.open('POST', url, data=data, **kwargs) def put(self, url, data=None, **kwargs): r"""Sends a PUT request. Returns :class:`HTTPResponse` object. :arg url: URL to request. :kwarg data: (optional) bytes, or file-like object to send in the body of the request. :kwarg \*\*kwargs: Optional arguments that ``open`` takes. :returns: HTTPResponse """ return self.open('PUT', url, data=data, **kwargs) def patch(self, url, data=None, **kwargs): r"""Sends a PATCH request. Returns :class:`HTTPResponse` object. :arg url: URL to request. :kwarg data: (optional) bytes, or file-like object to send in the body of the request. :kwarg \*\*kwargs: Optional arguments that ``open`` takes. :returns: HTTPResponse """ return self.open('PATCH', url, data=data, **kwargs) def delete(self, url, **kwargs): r"""Sends a DELETE request. Returns :class:`HTTPResponse` object. :arg url: URL to request :kwargs \*\*kwargs: Optional arguments that ``open`` takes. :returns: HTTPResponse """ return self.open('DELETE', url, **kwargs) def open_url(url, data=None, headers=None, method=None, use_proxy=True, force=False, last_mod_time=None, timeout=10, validate_certs=True, url_username=None, url_password=None, http_agent=None, force_basic_auth=False, follow_redirects='urllib2', client_cert=None, client_key=None, cookies=None, use_gssapi=False, unix_socket=None, ca_path=None, unredirected_headers=None): ''' Sends a request via HTTP(S) or FTP using urllib2 (Python2) or urllib (Python3) Does not require the module environment ''' method = method or ('POST' if data else 'GET') return Request().open(method, url, data=data, headers=headers, use_proxy=use_proxy, force=force, last_mod_time=last_mod_time, timeout=timeout, validate_certs=validate_certs, url_username=url_username, url_password=url_password, http_agent=http_agent, force_basic_auth=force_basic_auth, follow_redirects=follow_redirects, client_cert=client_cert, client_key=client_key, cookies=cookies, use_gssapi=use_gssapi, unix_socket=unix_socket, ca_path=ca_path, unredirected_headers=unredirected_headers) def prepare_multipart(fields): """Takes a mapping, and prepares a multipart/form-data body :arg fields: Mapping :returns: tuple of (content_type, body) where ``content_type`` is the ``multipart/form-data`` ``Content-Type`` header including ``boundary`` and ``body`` is the prepared bytestring body Payload content from a file will be base64 encoded and will include the appropriate ``Content-Transfer-Encoding`` and ``Content-Type`` headers. Example: { "file1": { "filename": "/bin/true", "mime_type": "application/octet-stream" }, "file2": { "content": "text based file content", "filename": "fake.txt", "mime_type": "text/plain", }, "text_form_field": "value" } """ if not isinstance(fields, Mapping): raise TypeError( 'Mapping is required, cannot be type %s' % fields.__class__.__name__ ) m = email.mime.multipart.MIMEMultipart('form-data') for field, value in sorted(fields.items()): if isinstance(value, string_types): main_type = 'text' sub_type = 'plain' content = value filename = None elif isinstance(value, Mapping): filename = value.get('filename') content = value.get('content') if not any((filename, content)): raise ValueError('at least one of filename or content must be provided') mime = value.get('mime_type') if not mime: try: mime = mimetypes.guess_type(filename or '', strict=False)[0] or 'application/octet-stream' except Exception: mime = 'application/octet-stream' main_type, sep, sub_type = mime.partition('/') else: raise TypeError( 'value must be a string, or mapping, cannot be type %s' % value.__class__.__name__ ) if not content and filename: with open(to_bytes(filename, errors='surrogate_or_strict'), 'rb') as f: part = email.mime.application.MIMEApplication(f.read()) del part['Content-Type'] part.add_header('Content-Type', '%s/%s' % (main_type, sub_type)) else: part = email.mime.nonmultipart.MIMENonMultipart(main_type, sub_type) part.set_payload(to_bytes(content)) part.add_header('Content-Disposition', 'form-data') del part['MIME-Version'] part.set_param( 'name', field, header='Content-Disposition' ) if filename: part.set_param( 'filename', to_native(os.path.basename(filename)), header='Content-Disposition' ) m.attach(part) if PY3: # Ensure headers are not split over multiple lines # The HTTP policy also uses CRLF by default b_data = m.as_bytes(policy=email.policy.HTTP) else: # Py2 # We cannot just call ``as_string`` since it provides no way # to specify ``maxheaderlen`` fp = cStringIO() # cStringIO seems to be required here # Ensure headers are not split over multiple lines g = email.generator.Generator(fp, maxheaderlen=0) g.flatten(m) # ``fix_eols`` switches from ``\n`` to ``\r\n`` b_data = email.utils.fix_eols(fp.getvalue()) del m headers, sep, b_content = b_data.partition(b'\r\n\r\n') del b_data if PY3: parser = email.parser.BytesHeaderParser().parsebytes else: # Py2 parser = email.parser.HeaderParser().parsestr return ( parser(headers)['content-type'], # Message converts to native strings b_content ) # # Module-related functions # def basic_auth_header(username, password): """Takes a username and password and returns a byte string suitable for using as value of an Authorization header to do basic auth. """ return b"Basic %s" % base64.b64encode(to_bytes("%s:%s" % (username, password), errors='surrogate_or_strict')) def url_argument_spec(): ''' Creates an argument spec that can be used with any module that will be requesting content via urllib/urllib2 ''' return dict( url=dict(type='str'), force=dict(type='bool', default=False, aliases=['thirsty'], deprecated_aliases=[dict(name='thirsty', version='2.13', collection_name='ansible.builtin')]), http_agent=dict(type='str', default='ansible-httpget'), use_proxy=dict(type='bool', default=True), validate_certs=dict(type='bool', default=True), url_username=dict(type='str'), url_password=dict(type='str', no_log=True), force_basic_auth=dict(type='bool', default=False), client_cert=dict(type='path'), client_key=dict(type='path'), use_gssapi=dict(type='bool', default=False), ) def fetch_url(module, url, data=None, headers=None, method=None, use_proxy=True, force=False, last_mod_time=None, timeout=10, use_gssapi=False, unix_socket=None, ca_path=None, cookies=None): """Sends a request via HTTP(S) or FTP (needs the module as parameter) :arg module: The AnsibleModule (used to get username, password etc. (s.b.). :arg url: The url to use. :kwarg data: The data to be sent (in case of POST/PUT). :kwarg headers: A dict with the request headers. :kwarg method: "POST", "PUT", etc. :kwarg boolean use_proxy: Default: True :kwarg boolean force: If True: Do not get a cached copy (Default: False) :kwarg last_mod_time: Default: None :kwarg int timeout: Default: 10 :kwarg boolean use_gssapi: Default: False :kwarg unix_socket: (optional) String of file system path to unix socket file to use when establishing connection to the provided url :kwarg ca_path: (optional) String of file system path to CA cert bundle to use :returns: A tuple of (**response**, **info**). Use ``response.read()`` to read the data. The **info** contains the 'status' and other meta data. When a HttpError (status >= 400) occurred then ``info['body']`` contains the error response data:: Example:: data={...} resp, info = fetch_url(module, "http://example.com", data=module.jsonify(data), headers={'Content-type': 'application/json'}, method="POST") status_code = info["status"] body = resp.read() if status_code >= 400 : body = info['body'] """ if not HAS_URLPARSE: module.fail_json(msg='urlparse is not installed') # ensure we use proper tempdir old_tempdir = tempfile.tempdir tempfile.tempdir = module.tmpdir # Get validate_certs from the module params validate_certs = module.params.get('validate_certs', True) username = module.params.get('url_username', '') password = module.params.get('url_password', '') http_agent = module.params.get('http_agent', 'ansible-httpget') force_basic_auth = module.params.get('force_basic_auth', '') follow_redirects = module.params.get('follow_redirects', 'urllib2') client_cert = module.params.get('client_cert') client_key = module.params.get('client_key') use_gssapi = module.params.get('use_gssapi', use_gssapi) if not isinstance(cookies, cookiejar.CookieJar): cookies = cookiejar.LWPCookieJar() r = None info = dict(url=url, status=-1) try: r = open_url(url, data=data, headers=headers, method=method, use_proxy=use_proxy, force=force, last_mod_time=last_mod_time, timeout=timeout, validate_certs=validate_certs, url_username=username, url_password=password, http_agent=http_agent, force_basic_auth=force_basic_auth, follow_redirects=follow_redirects, client_cert=client_cert, client_key=client_key, cookies=cookies, use_gssapi=use_gssapi, unix_socket=unix_socket, ca_path=ca_path) # Lowercase keys, to conform to py2 behavior, so that py3 and py2 are predictable info.update(dict((k.lower(), v) for k, v in r.info().items())) # Don't be lossy, append header values for duplicate headers # In Py2 there is nothing that needs done, py2 does this for us if PY3: temp_headers = {} for name, value in r.headers.items(): # The same as above, lower case keys to match py2 behavior, and create more consistent results name = name.lower() if name in temp_headers: temp_headers[name] = ', '.join((temp_headers[name], value)) else: temp_headers[name] = value info.update(temp_headers) # parse the cookies into a nice dictionary cookie_list = [] cookie_dict = dict() # Python sorts cookies in order of most specific (ie. longest) path first. See ``CookieJar._cookie_attrs`` # Cookies with the same path are reversed from response order. # This code makes no assumptions about that, and accepts the order given by python for cookie in cookies: cookie_dict[cookie.name] = cookie.value cookie_list.append((cookie.name, cookie.value)) info['cookies_string'] = '; '.join('%s=%s' % c for c in cookie_list) info['cookies'] = cookie_dict # finally update the result with a message about the fetch info.update(dict(msg="OK (%s bytes)" % r.headers.get('Content-Length', 'unknown'), url=r.geturl(), status=r.code)) except NoSSLError as e: distribution = get_distribution() if distribution is not None and distribution.lower() == 'redhat': module.fail_json(msg='%s. You can also install python-ssl from EPEL' % to_native(e), **info) else: module.fail_json(msg='%s' % to_native(e), **info) except (ConnectionError, ValueError) as e: module.fail_json(msg=to_native(e), **info) except MissingModuleError as e: module.fail_json(msg=to_text(e), exception=e.import_traceback) except urllib_error.HTTPError as e: try: body = e.read() except AttributeError: body = '' # Try to add exception info to the output but don't fail if we can't try: # Lowercase keys, to conform to py2 behavior, so that py3 and py2 are predictable info.update(dict((k.lower(), v) for k, v in e.info().items())) except Exception: pass info.update({'msg': to_native(e), 'body': body, 'status': e.code}) except urllib_error.URLError as e: code = int(getattr(e, 'code', -1)) info.update(dict(msg="Request failed: %s" % to_native(e), status=code)) except socket.error as e: info.update(dict(msg="Connection failure: %s" % to_native(e), status=-1)) except httplib.BadStatusLine as e: info.update(dict(msg="Connection failure: connection was closed before a valid response was received: %s" % to_native(e.line), status=-1)) except Exception as e: info.update(dict(msg="An unknown error occurred: %s" % to_native(e), status=-1), exception=traceback.format_exc()) finally: tempfile.tempdir = old_tempdir return r, info def fetch_file(module, url, data=None, headers=None, method=None, use_proxy=True, force=False, last_mod_time=None, timeout=10): '''Download and save a file via HTTP(S) or FTP (needs the module as parameter). This is basically a wrapper around fetch_url(). :arg module: The AnsibleModule (used to get username, password etc. (s.b.). :arg url: The url to use. :kwarg data: The data to be sent (in case of POST/PUT). :kwarg headers: A dict with the request headers. :kwarg method: "POST", "PUT", etc. :kwarg boolean use_proxy: Default: True :kwarg boolean force: If True: Do not get a cached copy (Default: False) :kwarg last_mod_time: Default: None :kwarg int timeout: Default: 10 :returns: A string, the path to the downloaded file. ''' # download file bufsize = 65536 file_name, file_ext = os.path.splitext(str(url.rsplit('/', 1)[1])) fetch_temp_file = tempfile.NamedTemporaryFile(dir=module.tmpdir, prefix=file_name, suffix=file_ext, delete=False) module.add_cleanup_file(fetch_temp_file.name) try: rsp, info = fetch_url(module, url, data, headers, method, use_proxy, force, last_mod_time, timeout) if not rsp: module.fail_json(msg="Failure downloading %s, %s" % (url, info['msg'])) data = rsp.read(bufsize) while data: fetch_temp_file.write(data) data = rsp.read(bufsize) fetch_temp_file.close() except Exception as e: module.fail_json(msg="Failure downloading %s, %s" % (url, to_native(e))) return fetch_temp_file.name
gpl-3.0
divmain/GitSavvy
tests/mockito/verification.py
2
5295
# Copyright (c) 2008-2016 Szczepan Faber, Serhiy Oplakanets, Herr Kaste # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN # THE SOFTWARE. __all__ = ['never', 'VerificationError'] class VerificationError(AssertionError): '''Indicates error during verification of invocations. Raised if verification fails. Error message contains the cause. ''' pass class AtLeast(object): def __init__(self, wanted_count): self.wanted_count = wanted_count def verify(self, invocation, actual_count): if actual_count < self.wanted_count: raise VerificationError("\nWanted at least: %i, actual times: %i" % (self.wanted_count, actual_count)) def __repr__(self): return "<%s wanted=%s>" % (type(self).__name__, self.wanted_count) class AtMost(object): def __init__(self, wanted_count): self.wanted_count = wanted_count def verify(self, invocation, actual_count): if actual_count > self.wanted_count: raise VerificationError("\nWanted at most: %i, actual times: %i" % (self.wanted_count, actual_count)) def __repr__(self): return "<%s wanted=%s>" % (type(self).__name__, self.wanted_count) class Between(object): def __init__(self, wanted_from, wanted_to): self.wanted_from = wanted_from self.wanted_to = wanted_to def verify(self, invocation, actual_count): if actual_count < self.wanted_from or actual_count > self.wanted_to: raise VerificationError( "\nWanted between: [%i, %i], actual times: %i" % (self.wanted_from, self.wanted_to, actual_count)) def __repr__(self): return "<%s [%s, %s]>" % ( type(self).__name__, self.wanted_from, self.wanted_to) class Times(object): def __init__(self, wanted_count): self.wanted_count = wanted_count def verify(self, invocation, actual_count): if actual_count == self.wanted_count: return if actual_count == 0: invocations = ( [ invoc for invoc in invocation.mock.invocations if invoc.method_name == invocation.method_name ] or invocation.mock.invocations or ['Nothing'] ) raise VerificationError( """ Wanted but not invoked: %s Instead got: %s """ % ( invocation, "\n ".join( str(invoc) for invoc in reversed(invocations) ) ) ) else: if self.wanted_count == 0: raise VerificationError( "\nUnwanted invocation of %s, times: %i" % (invocation, actual_count)) else: raise VerificationError("\nWanted times: %i, actual times: %i" % (self.wanted_count, actual_count)) def __repr__(self): return "<%s wanted=%s>" % (type(self).__name__, self.wanted_count) class InOrder(object): '''Verifies invocations in order. Verifies if invocation was in expected order, and if yes -- degrades to original Verifier (AtLeast, Times, Between, ...). ''' def __init__(self, original_verification): ''' @param original_verification: Original verifiaction to degrade to if order of invocation was ok. ''' self.original_verification = original_verification def verify(self, wanted_invocation, count): for invocation in reversed(wanted_invocation.mock.invocations): if not invocation.verified_inorder: if not wanted_invocation.matches(invocation): raise VerificationError( '\nWanted %s to be invoked,' '\ngot %s instead.' % (wanted_invocation, invocation)) invocation.verified_inorder = True break # proceed with original verification self.original_verification.verify(wanted_invocation, count) never = 0
mit
gnoack/ukechord
chordpro.py
1
4308
"""Read ChordPro files and output them through a PDFWriter object""" import re import song import uke class ChordProError(Exception): """Error in a ChordPro input.""" pass def _analyze_chordpro_textline(line): """Analyze the text and chords in a line of text. Args: line: The line of text, with chords in square brackets. Returns: A list of (chord, textchunk) tuples. The chord is None for a leading piece of text without preceding chord. Example: Input: "This is [Dm]an example [C]line." Output: [(None, "This is "), ("Dm", "an example "), ("C", "line.")] """ matches = list(re.finditer(r"\[([^\]]+)\]([^\[]*)", line)) if matches: result = [] if matches[0].start(0): result.append((None, line[:matches[0].start(0)])) for match in matches: result.append(match.groups()) return result return [(None, line)] def _chordpro_line(line): """Analyze a ChordPro line into a key value pair. For commands of the form "{key:value}", those will be the key and value. For empty lines, key is "$empty", and value is None. For text lines, returns "$lyrics" as key and a list of (chord, text) tuples as value """ line = line.strip() if not line or line.startswith("#"): return ("$empty", None) if line.startswith("{") and line.endswith("}"): key, unused_colon, value = line[1:-1].partition(":") return (key, value) else: return ("$lyrics", _analyze_chordpro_textline(line)) def _parse_chord_definition(value): # TODO: Is it required to define 'fingers' in each chord definition? match = re.match( r"\s+(?P<name>[A-Za-z0-9/+#]*)\s+" r"frets\s+(?P<frets>[\d\s]+)" r"fingers\s+(?P<fingers>[\d\s]+)$", value) # TODO: Implement finger positioning support # TODO: Catch too high fret values if not match: raise ChordProError("Chord definition parsing failed", value) frets = [int(fret) for fret in match.group('frets').split(' ') if fret] if any(fret > uke.MAX_FRET for fret in frets): raise ChordProError("Frets beyond %d don't exist.", uke.MAX_FRET) return match.group('name'), tuple(frets) def _convert_lines_to_ast_nodes(lines, chords, end_of_section_markers=()): result = [] for key, value in lines: if key in end_of_section_markers: break elif key == "$empty": pass # ignore elif key in ("$lyrics", "comment"): if key == "$lyrics": first_verse_item = song.Line(value) elif key == "comment": first_verse_item = song.Comment(value) else: raise ChordProError("Should never happen. - Programming error") # Text if end_of_section_markers: # If we're in a section, lines are fine. result.append(first_verse_item) else: verse_lines = _convert_lines_to_ast_nodes( lines, chords=chords, end_of_section_markers=("$empty")) result.append(song.Verse([first_verse_item] + verse_lines)) elif key in ("soc", "start-of-chorus", "start_of_chorus"): if end_of_section_markers: raise ChordProError("ChordPro: Nested choruses are not supported.") result.append(song.Chorus( _convert_lines_to_ast_nodes( lines, chords=chords, end_of_section_markers=("eoc", "end-of-chorus", "end_of_chorus")))) elif key == "define": name, frets = _parse_chord_definition(value) chords[name] = frets elif key in ("title", "subtitle"): continue # Handled earlier. elif key == "fontsize": # TODO: How to handle font size? pass # Should translate to pdf_writer.setFontsize(int(value)) elif key in ("eoc", "end-of-chorus", "end_of_chorus"): # If not already part of breaking condition. raise ChordProError( "End-of-chorus ChordPro command without matching start.") else: raise ChordProError("Unknown ChordPro command: %s", key) return result def to_ast(infile): lines = [_chordpro_line(line) for line in infile.readlines()] keys_and_values = dict(lines) title = keys_and_values.get("title", "").strip() subtitle = keys_and_values.get("subtitle", "").strip() chords = {} children = _convert_lines_to_ast_nodes(iter(lines), chords=chords) return song.Song(children, title=title, subtitle=subtitle, chords=chords)
apache-2.0
remind101/stacker_blueprints
stacker_blueprints/policies.py
1
6595
from awacs.aws import ( Action, Allow, Policy, Principal, Statement, ) from troposphere import ( Sub, Join, Region, AccountId, AWSHelperFn ) from awacs import ( sts, s3, logs, ec2, dynamodb, cloudwatch, ) def make_simple_assume_statement(*principals): return Statement( Principal=Principal('Service', principals), Effect=Allow, Action=[sts.AssumeRole]) def make_simple_assume_policy(*principals): return Policy( Statement=[ make_simple_assume_statement(*principals)]) def dynamodb_arn(table_name): return 'arn:aws:dynamodb:::table/{}'.format(table_name) def dynamodb_arns(table_names): return [dynamodb_arn(table_name) for table_name in table_names] def s3_arn(bucket): if isinstance(bucket, AWSHelperFn): return Sub('arn:aws:s3:::${Bucket}', Bucket=bucket) else: return 'arn:aws:s3:::%s' % bucket def s3_objects_arn(bucket, folder="*"): if isinstance(bucket, AWSHelperFn): return Sub('arn:aws:s3:::${Bucket}/%s' % folder, Bucket=bucket) else: return 'arn:aws:s3:::%s/%s' % (bucket, folder) def read_only_s3_bucket_policy_statements(buckets, folder="*"): """ Read only policy an s3 bucket. """ list_buckets = [s3_arn(b) for b in buckets] object_buckets = [s3_objects_arn(b, folder) for b in buckets] bucket_resources = list_buckets + object_buckets return [ Statement( Effect=Allow, Resource=[s3_arn("*")], Action=[s3.ListAllMyBuckets] ), Statement( Effect=Allow, Resource=bucket_resources, Action=[Action('s3', 'Get*'), Action('s3', 'List*')] ) ] def read_only_s3_bucket_policy(buckets): return Policy(Statement=read_only_s3_bucket_policy_statements(buckets)) def read_write_s3_bucket_policy_statements(buckets, folder="*"): list_buckets = [s3_arn(b) for b in buckets] object_buckets = [s3_objects_arn(b, folder) for b in buckets] return [ Statement( Effect="Allow", Action=[ s3.GetBucketLocation, s3.ListAllMyBuckets, ], Resource=[s3_arn("*")] ), Statement( Effect=Allow, Action=[ s3.ListBucket, s3.GetBucketVersioning, ], Resource=list_buckets, ), Statement( Effect=Allow, Action=[ s3.GetObject, s3.PutObject, s3.PutObjectAcl, s3.DeleteObject, s3.GetObjectVersion, s3.DeleteObjectVersion, ], Resource=object_buckets, ), ] def read_write_s3_bucket_policy(buckets): return Policy(Statement=read_write_s3_bucket_policy_statements(buckets)) def static_website_bucket_policy(bucket): """ Attach this policy directly to an S3 bucket to make it a static website. This policy grants read access to **all unauthenticated** users. """ return Policy( Statement=[ Statement( Effect=Allow, Principal=Principal("*"), Action=[s3.GetObject], Resource=[s3_objects_arn(bucket)], ) ] ) def log_stream_arn(log_group_name, log_stream_name): return Join( '', [ "arn:aws:logs:", Region, ":", AccountId, ":log-group:", log_group_name, ":log-stream:", log_stream_name ] ) def write_to_cloudwatch_logs_stream_statements(log_group_name, log_stream_name): return [ Statement( Effect=Allow, Action=[logs.PutLogEvents], Resource=[log_stream_arn(log_group_name, log_stream_name)] ) ] def write_to_cloudwatch_logs_stream_policy(log_group_name, log_stream_name): return Policy( Statement=write_to_cloudwatch_logs_stream_statements(log_group_name, log_stream_name) ) def cloudwatch_logs_write_statements(log_group=None): resources = ["arn:aws:logs:*:*:*"] if log_group: log_group_parts = ["arn:aws:logs:", Region, ":", AccountId, ":log-group:", log_group] log_group_arn = Join("", log_group_parts) log_stream_wild = Join("", log_group_parts + [":*"]) resources = [log_group_arn, log_stream_wild] return [ Statement( Effect=Allow, Resource=resources, Action=[ logs.CreateLogGroup, logs.CreateLogStream, logs.PutLogEvents ] ) ] def lambda_basic_execution_statements(function_name): log_group = Join("/", ["/aws/lambda", function_name]) return cloudwatch_logs_write_statements(log_group) def lambda_basic_execution_policy(function_name): return Policy(Statement=lambda_basic_execution_statements(function_name)) def lambda_vpc_execution_statements(): """Allow Lambda to manipuate EC2 ENIs for VPC support.""" return [ Statement( Effect=Allow, Resource=['*'], Action=[ ec2.CreateNetworkInterface, ec2.DescribeNetworkInterfaces, ec2.DeleteNetworkInterface, ] ) ] def flowlogs_assumerole_policy(): return make_simple_assume_policy("vpc-flow-logs.amazonaws.com") # reference: https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-dynamodb-table.html#cfn-dynamodb-table-examples-application-autoscaling # noqa def dynamodb_autoscaling_policy(tables): """Policy to allow AutoScaling a list of DynamoDB tables.""" return Policy( Statement=[ Statement( Effect=Allow, Resource=dynamodb_arns(tables), Action=[ dynamodb.DescribeTable, dynamodb.UpdateTable, ] ), Statement( Effect=Allow, Resource=['*'], Action=[ cloudwatch.PutMetricAlarm, cloudwatch.DescribeAlarms, cloudwatch.GetMetricStatistics, cloudwatch.SetAlarmState, cloudwatch.DeleteAlarms, ] ), ] )
bsd-2-clause
alexlee-gk/visual_dynamics
visual_dynamics/envs/quad_panda3d_env.py
1
2967
import numpy as np import citysim3d.envs from visual_dynamics.envs import Panda3dEnv from visual_dynamics.spaces import Space, BoxSpace, TranslationAxisAngleSpace from visual_dynamics.utils.config import ConfigObject class SimpleQuadPanda3dEnv(citysim3d.envs.SimpleQuadPanda3dEnv, Panda3dEnv): def _get_config(self): config = super(SimpleQuadPanda3dEnv, self)._get_config() car_action_space = self.car_action_space if not isinstance(car_action_space, ConfigObject): car_action_space = Space.create(car_action_space) config.update({'action_space': self.action_space, 'sensor_names': self.sensor_names, 'camera_size': self.camera_size, 'camera_hfov': self.camera_hfov, 'offset': self.offset.tolist(), 'car_env_class': self.car_env_class, 'car_action_space': car_action_space, 'car_model_names': self.car_model_names}) return config class Point3dSimpleQuadPanda3dEnv(SimpleQuadPanda3dEnv): def __init__(self, action_space, **kwargs): super(Point3dSimpleQuadPanda3dEnv, self).__init__(action_space, **kwargs) self._observation_space.spaces['pos'] = BoxSpace(-np.inf, np.inf, shape=(3,)) def observe(self): obs = super(Point3dSimpleQuadPanda3dEnv, self).observe() obs['pos'] = np.array(self.car_node.getTransform(self.camera_node).getPos()) return obs def main(): import os import numpy as np from panda3d.core import loadPrcFile assert "CITYSIM3D_DIR" in os.environ loadPrcFile(os.path.expandvars('${CITYSIM3D_DIR}/config.prc')) action_space = TranslationAxisAngleSpace(np.array([-20, -10, -10, -1.5707963267948966]), np.array([20, 10, 10, 1.5707963267948966])) sensor_names = ['image', 'depth_image'] env = SimpleQuadPanda3dEnv(action_space, sensor_names) import time import cv2 start_time = time.time() frames = 0 from visual_dynamics.policies.quad_target_policy import QuadTargetPolicy pol = QuadTargetPolicy(env, (12, 18), (-np.pi / 2, np.pi / 2)) obs = env.reset() pol.reset() image, depth_image = obs while True: try: env.render() image = cv2.cvtColor(image, cv2.COLOR_RGB2BGR) cv2.imshow('Image window', image) key = cv2.waitKey(1) key &= 255 if key == 27 or key == ord('q'): print("Pressed ESC or q, exiting") break quad_action = pol.act(obs) obs, _, _, _ = env.step(quad_action) image, depth_image = obs frames += 1 except KeyboardInterrupt: break end_time = time.time() print("average FPS: {}".format(frames / (end_time - start_time))) if __name__ == "__main__": main()
mit
vsajip/django
django/forms/util.py
5
3642
from __future__ import unicode_literals from django.conf import settings from django.utils.html import format_html, format_html_join from django.utils.encoding import StrAndUnicode, force_unicode from django.utils.safestring import mark_safe from django.utils import timezone from django.utils.translation import ugettext_lazy as _ # Import ValidationError so that it can be imported from this # module to maintain backwards compatibility. from django.core.exceptions import ValidationError def flatatt(attrs): """ Convert a dictionary of attributes to a single string. The returned string will contain a leading space followed by key="value", XML-style pairs. It is assumed that the keys do not need to be XML-escaped. If the passed dictionary is empty, then return an empty string. The result is passed through 'mark_safe'. """ return format_html_join('', ' {0}="{1}"', attrs.items()) class ErrorDict(dict, StrAndUnicode): """ A collection of errors that knows how to display itself in various formats. The dictionary keys are the field names, and the values are the errors. """ def __unicode__(self): return self.as_ul() def as_ul(self): if not self: return '' return format_html('<ul class="errorlist">{0}</ul>', format_html_join('', '<li>{0}{1}</li>', ((k, force_unicode(v)) for k, v in self.items()) )) def as_text(self): return '\n'.join(['* %s\n%s' % (k, '\n'.join([' * %s' % force_unicode(i) for i in v])) for k, v in self.items()]) class ErrorList(list, StrAndUnicode): """ A collection of errors that knows how to display itself in various formats. """ def __unicode__(self): return self.as_ul() def as_ul(self): if not self: return '' return format_html('<ul class="errorlist">{0}</ul>', format_html_join('', '<li>{0}</li>', ((force_unicode(e),) for e in self) ) ) def as_text(self): if not self: return '' return '\n'.join(['* %s' % force_unicode(e) for e in self]) def __repr__(self): return repr([force_unicode(e) for e in self]) # Utilities for time zone support in DateTimeField et al. def from_current_timezone(value): """ When time zone support is enabled, convert naive datetimes entered in the current time zone to aware datetimes. """ if settings.USE_TZ and value is not None and timezone.is_naive(value): current_timezone = timezone.get_current_timezone() try: return timezone.make_aware(value, current_timezone) except Exception: raise ValidationError(_('%(datetime)s couldn\'t be interpreted ' 'in time zone %(current_timezone)s; it ' 'may be ambiguous or it may not exist.') % {'datetime': value, 'current_timezone': current_timezone}) return value def to_current_timezone(value): """ When time zone support is enabled, convert aware datetimes to naive dateimes in the current time zone for display. """ if settings.USE_TZ and value is not None and timezone.is_aware(value): current_timezone = timezone.get_current_timezone() return timezone.make_naive(value, current_timezone) return value
bsd-3-clause
willingc/oh-mainline
vendor/packages/twisted/doc/mail/examples/smtpclient_tls.py
22
4733
""" Demonstrate sending mail via SMTP while employing TLS and performing authentication. """ import sys from OpenSSL.SSL import SSLv3_METHOD from twisted.mail.smtp import ESMTPSenderFactory from twisted.python.usage import Options, UsageError from twisted.internet.ssl import ClientContextFactory from twisted.internet.defer import Deferred from twisted.internet import reactor def sendmail( authenticationUsername, authenticationSecret, fromAddress, toAddress, messageFile, smtpHost, smtpPort=25 ): """ @param authenticationUsername: The username with which to authenticate. @param authenticationSecret: The password with which to authenticate. @param fromAddress: The SMTP reverse path (ie, MAIL FROM) @param toAddress: The SMTP forward path (ie, RCPT TO) @param messageFile: A file-like object containing the headers and body of the message to send. @param smtpHost: The MX host to which to connect. @param smtpPort: The port number to which to connect. @return: A Deferred which will be called back when the message has been sent or which will errback if it cannot be sent. """ # Create a context factory which only allows SSLv3 and does not verify # the peer's certificate. contextFactory = ClientContextFactory() contextFactory.method = SSLv3_METHOD resultDeferred = Deferred() senderFactory = ESMTPSenderFactory( authenticationUsername, authenticationSecret, fromAddress, toAddress, messageFile, resultDeferred, contextFactory=contextFactory) reactor.connectTCP(smtpHost, smtpPort, senderFactory) return resultDeferred class SendmailOptions(Options): synopsis = "smtpclient_tls.py [options]" optParameters = [ ('username', 'u', None, 'The username with which to authenticate to the SMTP server.'), ('password', 'p', None, 'The password with which to authenticate to the SMTP server.'), ('from-address', 'f', None, 'The address from which to send the message.'), ('to-address', 't', None, 'The address to which to send the message.'), ('message', 'm', None, 'The filename which contains the message to send.'), ('smtp-host', 'h', None, 'The host through which to send the message.'), ('smtp-port', None, '25', 'The port number on smtp-host to which to connect.')] def postOptions(self): """ Parse integer parameters, open the message file, and make sure all required parameters have been specified. """ try: self['smtp-port'] = int(self['smtp-port']) except ValueError: raise UsageError("--smtp-port argument must be an integer.") if self['username'] is None: raise UsageError( "Must specify authentication username with --username") if self['password'] is None: raise UsageError( "Must specify authentication password with --password") if self['from-address'] is None: raise UsageError("Must specify from address with --from-address") if self['to-address'] is None: raise UsageError("Must specify from address with --to-address") if self['smtp-host'] is None: raise UsageError("Must specify smtp host with --smtp-host") if self['message'] is None: raise UsageError( "Must specify a message file to send with --message") try: self['message'] = file(self['message']) except Exception, e: raise UsageError(e) def cbSentMessage(result): """ Called when the message has been sent. Report success to the user and then stop the reactor. """ print "Message sent" reactor.stop() def ebSentMessage(err): """ Called if the message cannot be sent. Report the failure to the user and then stop the reactor. """ err.printTraceback() reactor.stop() def main(args=None): """ Parse arguments and send an email based on them. """ o = SendmailOptions() try: o.parseOptions(args) except UsageError, e: raise SystemExit(e) else: from twisted.python import log log.startLogging(sys.stdout) result = sendmail( o['username'], o['password'], o['from-address'], o['to-address'], o['message'], o['smtp-host'], o['smtp-port']) result.addCallbacks(cbSentMessage, ebSentMessage) reactor.run() if __name__ == '__main__': main(sys.argv[1:])
agpl-3.0
samdoran/ansible
lib/ansible/plugins/__init__.py
15
19872
# (c) 2012, Daniel Hokka Zakrisson <daniel@hozac.com> # (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com> and others # (c) 2017, Toshio Kuratomi <tkuratomi@ansible.com> # # This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see <http://www.gnu.org/licenses/>. # Make coding more python3-ish from __future__ import (absolute_import, division, print_function) __metaclass__ = type import glob import imp import os import os.path import sys import warnings from collections import defaultdict from ansible import constants as C from ansible.module_utils._text import to_text try: from __main__ import display except ImportError: from ansible.utils.display import Display display = Display() # Global so that all instances of a PluginLoader will share the caches MODULE_CACHE = {} PATH_CACHE = {} PLUGIN_PATH_CACHE = {} def get_all_plugin_loaders(): return [(name, obj) for (name, obj) in globals().items() if isinstance(obj, PluginLoader)] class PluginLoader: ''' PluginLoader loads plugins from the configured plugin directories. It searches for plugins by iterating through the combined list of play basedirs, configured paths, and the python path. The first match is used. ''' def __init__(self, class_name, package, config, subdir, aliases={}, required_base_class=None): self.class_name = class_name self.base_class = required_base_class self.package = package self.subdir = subdir self.aliases = aliases if config and not isinstance(config, list): config = [config] elif not config: config = [] self.config = config if class_name not in MODULE_CACHE: MODULE_CACHE[class_name] = {} if class_name not in PATH_CACHE: PATH_CACHE[class_name] = None if class_name not in PLUGIN_PATH_CACHE: PLUGIN_PATH_CACHE[class_name] = defaultdict(dict) self._module_cache = MODULE_CACHE[class_name] self._paths = PATH_CACHE[class_name] self._plugin_path_cache = PLUGIN_PATH_CACHE[class_name] self._extra_dirs = [] self._searched_paths = set() def __setstate__(self, data): ''' Deserializer. ''' class_name = data.get('class_name') package = data.get('package') config = data.get('config') subdir = data.get('subdir') aliases = data.get('aliases') base_class = data.get('base_class') PATH_CACHE[class_name] = data.get('PATH_CACHE') PLUGIN_PATH_CACHE[class_name] = data.get('PLUGIN_PATH_CACHE') self.__init__(class_name, package, config, subdir, aliases, base_class) self._extra_dirs = data.get('_extra_dirs', []) self._searched_paths = data.get('_searched_paths', set()) def __getstate__(self): ''' Serializer. ''' return dict( class_name=self.class_name, base_class=self.base_class, package=self.package, config=self.config, subdir=self.subdir, aliases=self.aliases, _extra_dirs=self._extra_dirs, _searched_paths=self._searched_paths, PATH_CACHE=PATH_CACHE[self.class_name], PLUGIN_PATH_CACHE=PLUGIN_PATH_CACHE[self.class_name], ) def format_paths(self, paths): ''' Returns a string suitable for printing of the search path ''' # Uses a list to get the order right ret = [] for i in paths: if i not in ret: ret.append(i) return os.pathsep.join(ret) def print_paths(self): return self.format_paths(self._get_paths()) def _all_directories(self, dir): results = [] results.append(dir) for root, subdirs, files in os.walk(dir, followlinks=True): if '__init__.py' in files: for x in subdirs: results.append(os.path.join(root, x)) return results def _get_package_paths(self, subdirs=True): ''' Gets the path of a Python package ''' if not self.package: return [] if not hasattr(self, 'package_path'): m = __import__(self.package) parts = self.package.split('.')[1:] for parent_mod in parts: m = getattr(m, parent_mod) self.package_path = os.path.dirname(m.__file__) if subdirs: return self._all_directories(self.package_path) return [self.package_path] def _get_paths(self, subdirs=True): ''' Return a list of paths to search for plugins in ''' # FIXME: This is potentially buggy if subdirs is sometimes True and # sometimes False. In current usage, everything calls this with # subdirs=True except for module_utils_loader which always calls it # with subdirs=False. So there currently isn't a problem with this # caching. if self._paths is not None: return self._paths ret = self._extra_dirs[:] # look in any configured plugin paths, allow one level deep for subcategories if self.config is not None: for path in self.config: path = os.path.realpath(os.path.expanduser(path)) if subdirs: contents = glob.glob("%s/*" % path) + glob.glob("%s/*/*" % path) for c in contents: if os.path.isdir(c) and c not in ret: ret.append(c) if path not in ret: ret.append(path) # look for any plugins installed in the package subtree # Note package path always gets added last so that every other type of # path is searched before it. ret.extend(self._get_package_paths(subdirs=subdirs)) # HACK: because powershell modules are in the same directory # hierarchy as other modules we have to process them last. This is # because powershell only works on windows but the other modules work # anywhere (possibly including windows if the correct language # interpreter is installed). the non-powershell modules can have any # file extension and thus powershell modules are picked up in that. # The non-hack way to fix this is to have powershell modules be # a different PluginLoader/ModuleLoader. But that requires changing # other things too (known thing to change would be PATHS_CACHE, # PLUGIN_PATHS_CACHE, and MODULE_CACHE. Since those three dicts key # on the class_name and neither regular modules nor powershell modules # would have class_names, they would not work as written. reordered_paths = [] win_dirs = [] for path in ret: if path.endswith('windows'): win_dirs.append(path) else: reordered_paths.append(path) reordered_paths.extend(win_dirs) # cache and return the result self._paths = reordered_paths return reordered_paths def add_directory(self, directory, with_subdir=False): ''' Adds an additional directory to the search path ''' directory = os.path.realpath(directory) if directory is not None: if with_subdir: directory = os.path.join(directory, self.subdir) if directory not in self._extra_dirs: # append the directory and invalidate the path cache self._extra_dirs.append(directory) self._paths = None def find_plugin(self, name, mod_type='', ignore_deprecated=False): ''' Find a plugin named name ''' if mod_type: suffix = mod_type elif self.class_name: # Ansible plugins that run in the controller process (most plugins) suffix = '.py' else: # Only Ansible Modules. Ansible modules can be any executable so # they can have any suffix suffix = '' # The particular cache to look for modules within. This matches the # requested mod_type pull_cache = self._plugin_path_cache[suffix] try: return pull_cache[name] except KeyError: # Cache miss. Now let's find the plugin pass # TODO: Instead of using the self._paths cache (PATH_CACHE) and # self._searched_paths we could use an iterator. Before enabling that # we need to make sure we don't want to add additional directories # (add_directory()) once we start using the iterator. Currently, it # looks like _get_paths() never forces a cache refresh so if we expect # additional directories to be added later, it is buggy. for path in (p for p in self._get_paths() if p not in self._searched_paths and os.path.isdir(p)): try: full_paths = (os.path.join(path, f) for f in os.listdir(path)) except OSError as e: display.warning("Error accessing plugin paths: %s" % to_text(e)) for full_path in (f for f in full_paths if os.path.isfile(f) and not f.endswith('__init__.py')): full_name = os.path.basename(full_path) # HACK: We have no way of executing python byte # compiled files as ansible modules so specifically exclude them # FIXME: I believe this is only correct for modules and # module_utils. For all other plugins we want .pyc and .pyo should # bew valid if full_path.endswith(('.pyc', '.pyo')): continue splitname = os.path.splitext(full_name) base_name = splitname[0] try: extension = splitname[1] except IndexError: extension = '' # Module found, now enter it into the caches that match # this file if base_name not in self._plugin_path_cache['']: self._plugin_path_cache[''][base_name] = full_path if full_name not in self._plugin_path_cache['']: self._plugin_path_cache[''][full_name] = full_path if base_name not in self._plugin_path_cache[extension]: self._plugin_path_cache[extension][base_name] = full_path if full_name not in self._plugin_path_cache[extension]: self._plugin_path_cache[extension][full_name] = full_path self._searched_paths.add(path) try: return pull_cache[name] except KeyError: # Didn't find the plugin in this directory. Load modules from # the next one pass # if nothing is found, try finding alias/deprecated if not name.startswith('_'): alias_name = '_' + name # We've already cached all the paths at this point if alias_name in pull_cache: if not ignore_deprecated and not os.path.islink(pull_cache[alias_name]): display.deprecated('%s is kept for backwards compatibility ' 'but usage is discouraged. The module ' 'documentation details page may explain ' 'more about this rationale.' % name.lstrip('_')) return pull_cache[alias_name] return None def has_plugin(self, name): ''' Checks if a plugin named name exists ''' return self.find_plugin(name) is not None __contains__ = has_plugin def _load_module_source(self, name, path): if name in sys.modules: # See https://github.com/ansible/ansible/issues/13110 return sys.modules[name] with warnings.catch_warnings(): warnings.simplefilter("ignore", RuntimeWarning) with open(path, 'rb') as module_file: module = imp.load_source(name, path, module_file) return module def get(self, name, *args, **kwargs): ''' instantiates a plugin of the given name using arguments ''' found_in_cache = True class_only = kwargs.pop('class_only', False) if name in self.aliases: name = self.aliases[name] path = self.find_plugin(name) if path is None: return None if path not in self._module_cache: self._module_cache[path] = self._load_module_source('.'.join([self.package, name]), path) found_in_cache = False obj = getattr(self._module_cache[path], self.class_name) if self.base_class: # The import path is hardcoded and should be the right place, # so we are not expecting an ImportError. module = __import__(self.package, fromlist=[self.base_class]) # Check whether this obj has the required base class. try: plugin_class = getattr(module, self.base_class) except AttributeError: return None if not issubclass(obj, plugin_class): return None self._display_plugin_load(self.class_name, name, self._searched_paths, path, found_in_cache=found_in_cache, class_only=class_only) if not class_only: try: obj = obj(*args, **kwargs) except TypeError as e: if "abstract" in e.args[0]: # Abstract Base Class. The found plugin file does not # fully implement the defined interface. return None raise # set extra info on the module, in case we want it later setattr(obj, '_original_path', path) setattr(obj, '_load_name', name) return obj def _display_plugin_load(self, class_name, name, searched_paths, path, found_in_cache=None, class_only=None): msg = 'Loading %s \'%s\' from %s' % (class_name, os.path.basename(name), path) if len(searched_paths) > 1: msg = '%s (searched paths: %s)' % (msg, self.format_paths(searched_paths)) if found_in_cache or class_only: msg = '%s (found_in_cache=%s, class_only=%s)' % (msg, found_in_cache, class_only) display.debug(msg) def all(self, *args, **kwargs): ''' instantiates all plugins with the same arguments ''' path_only = kwargs.pop('path_only', False) class_only = kwargs.pop('class_only', False) all_matches = [] found_in_cache = True for i in self._get_paths(): all_matches.extend(glob.glob(os.path.join(i, "*.py"))) for path in sorted(all_matches, key=lambda match: os.path.basename(match)): name, _ = os.path.splitext(path) if '__init__' in name: continue if path_only: yield path continue if path not in self._module_cache: self._module_cache[path] = self._load_module_source(name, path) found_in_cache = False try: obj = getattr(self._module_cache[path], self.class_name) except AttributeError as e: display.warning("Skipping plugin (%s) as it seems to be invalid: %s" % (path, to_text(e))) continue if self.base_class: # The import path is hardcoded and should be the right place, # so we are not expecting an ImportError. module = __import__(self.package, fromlist=[self.base_class]) # Check whether this obj has the required base class. try: plugin_class = getattr(module, self.base_class) except AttributeError: continue if not issubclass(obj, plugin_class): continue self._display_plugin_load(self.class_name, name, self._searched_paths, path, found_in_cache=found_in_cache, class_only=class_only) if not class_only: try: obj = obj(*args, **kwargs) except TypeError as e: display.warning("Skipping plugin (%s) as it seems to be incomplete: %s" % (path, to_text(e))) # set extra info on the module, in case we want it later setattr(obj, '_original_path', path) setattr(obj, '_load_name', name) yield obj action_loader = PluginLoader( 'ActionModule', 'ansible.plugins.action', C.DEFAULT_ACTION_PLUGIN_PATH, 'action_plugins', required_base_class='ActionBase', ) cache_loader = PluginLoader( 'CacheModule', 'ansible.plugins.cache', C.DEFAULT_CACHE_PLUGIN_PATH, 'cache_plugins', ) callback_loader = PluginLoader( 'CallbackModule', 'ansible.plugins.callback', C.DEFAULT_CALLBACK_PLUGIN_PATH, 'callback_plugins', ) connection_loader = PluginLoader( 'Connection', 'ansible.plugins.connection', C.DEFAULT_CONNECTION_PLUGIN_PATH, 'connection_plugins', aliases={'paramiko': 'paramiko_ssh'}, required_base_class='ConnectionBase', ) shell_loader = PluginLoader( 'ShellModule', 'ansible.plugins.shell', 'shell_plugins', 'shell_plugins', ) module_loader = PluginLoader( '', 'ansible.modules', C.DEFAULT_MODULE_PATH, 'library', ) module_utils_loader = PluginLoader( '', 'ansible.module_utils', C.DEFAULT_MODULE_UTILS_PATH, 'module_utils', ) lookup_loader = PluginLoader( 'LookupModule', 'ansible.plugins.lookup', C.DEFAULT_LOOKUP_PLUGIN_PATH, 'lookup_plugins', required_base_class='LookupBase', ) filter_loader = PluginLoader( 'FilterModule', 'ansible.plugins.filter', C.DEFAULT_FILTER_PLUGIN_PATH, 'filter_plugins', ) test_loader = PluginLoader( 'TestModule', 'ansible.plugins.test', C.DEFAULT_TEST_PLUGIN_PATH, 'test_plugins' ) fragment_loader = PluginLoader( 'ModuleDocFragment', 'ansible.utils.module_docs_fragments', os.path.join(os.path.dirname(__file__), 'module_docs_fragments'), '', ) strategy_loader = PluginLoader( 'StrategyModule', 'ansible.plugins.strategy', C.DEFAULT_STRATEGY_PLUGIN_PATH, 'strategy_plugins', required_base_class='StrategyBase', ) terminal_loader = PluginLoader( 'TerminalModule', 'ansible.plugins.terminal', 'terminal_plugins', 'terminal_plugins' ) vars_loader = PluginLoader( 'VarsModule', 'ansible.plugins.vars', C.DEFAULT_VARS_PLUGIN_PATH, 'vars_plugins', ) cliconf_loader = PluginLoader( 'Cliconf', 'ansible.plugins.cliconf', 'cliconf_plugins', 'cliconf_plugins', required_base_class='CliconfBase' ) netconf_loader = PluginLoader( 'Netconf', 'ansible.plugins.netconf', 'netconf_plugins', 'netconf_plugins', required_base_class='NetconfBase' )
gpl-3.0
endlessm/chromium-browser
third_party/pyelftools/elftools/construct/core.py
24
44500
from struct import Struct as Packer from .lib.py3compat import BytesIO, advance_iterator, bchr from .lib import Container, ListContainer, LazyContainer #=============================================================================== # exceptions #=============================================================================== class ConstructError(Exception): __slots__ = [] class FieldError(ConstructError): __slots__ = [] class SizeofError(ConstructError): __slots__ = [] class AdaptationError(ConstructError): __slots__ = [] class ArrayError(ConstructError): __slots__ = [] class RangeError(ConstructError): __slots__ = [] class SwitchError(ConstructError): __slots__ = [] class SelectError(ConstructError): __slots__ = [] class TerminatorError(ConstructError): __slots__ = [] #=============================================================================== # abstract constructs #=============================================================================== class Construct(object): """ The mother of all constructs. This object is generally not directly instantiated, and it does not directly implement parsing and building, so it is largely only of interest to subclass implementors. The external user API: * parse() * parse_stream() * build() * build_stream() * sizeof() Subclass authors should not override the external methods. Instead, another API is available: * _parse() * _build() * _sizeof() There is also a flag API: * _set_flag() * _clear_flag() * _inherit_flags() * _is_flag() And stateful copying: * __getstate__() * __setstate__() Attributes and Inheritance ========================== All constructs have a name and flags. The name is used for naming struct members and context dictionaries. Note that the name can either be a string, or None if the name is not needed. A single underscore ("_") is a reserved name, and so are names starting with a less-than character ("<"). The name should be descriptive, short, and valid as a Python identifier, although these rules are not enforced. The flags specify additional behavioral information about this construct. Flags are used by enclosing constructs to determine a proper course of action. Flags are inherited by default, from inner subconstructs to outer constructs. The enclosing construct may set new flags or clear existing ones, as necessary. For example, if FLAG_COPY_CONTEXT is set, repeaters will pass a copy of the context for each iteration, which is necessary for OnDemand parsing. """ FLAG_COPY_CONTEXT = 0x0001 FLAG_DYNAMIC = 0x0002 FLAG_EMBED = 0x0004 FLAG_NESTING = 0x0008 __slots__ = ["name", "conflags"] def __init__(self, name, flags = 0): if name is not None: if type(name) is not str: raise TypeError("name must be a string or None", name) if name == "_" or name.startswith("<"): raise ValueError("reserved name", name) self.name = name self.conflags = flags def __repr__(self): return "%s(%r)" % (self.__class__.__name__, self.name) def _set_flag(self, flag): """ Set the given flag or flags. :param int flag: flag to set; may be OR'd combination of flags """ self.conflags |= flag def _clear_flag(self, flag): """ Clear the given flag or flags. :param int flag: flag to clear; may be OR'd combination of flags """ self.conflags &= ~flag def _inherit_flags(self, *subcons): """ Pull flags from subconstructs. """ for sc in subcons: self._set_flag(sc.conflags) def _is_flag(self, flag): """ Check whether a given flag is set. :param int flag: flag to check """ return bool(self.conflags & flag) def __getstate__(self): """ Obtain a dictionary representing this construct's state. """ attrs = {} if hasattr(self, "__dict__"): attrs.update(self.__dict__) slots = [] c = self.__class__ while c is not None: if hasattr(c, "__slots__"): slots.extend(c.__slots__) c = c.__base__ for name in slots: if hasattr(self, name): attrs[name] = getattr(self, name) return attrs def __setstate__(self, attrs): """ Set this construct's state to a given state. """ for name, value in attrs.items(): setattr(self, name, value) def __copy__(self): """returns a copy of this construct""" self2 = object.__new__(self.__class__) self2.__setstate__(self.__getstate__()) return self2 def parse(self, data): """ Parse an in-memory buffer. Strings, buffers, memoryviews, and other complete buffers can be parsed with this method. """ return self.parse_stream(BytesIO(data)) def parse_stream(self, stream): """ Parse a stream. Files, pipes, sockets, and other streaming sources of data are handled by this method. """ return self._parse(stream, Container()) def _parse(self, stream, context): """ Override me in your subclass. """ raise NotImplementedError() def build(self, obj): """ Build an object in memory. """ stream = BytesIO() self.build_stream(obj, stream) return stream.getvalue() def build_stream(self, obj, stream): """ Build an object directly into a stream. """ self._build(obj, stream, Container()) def _build(self, obj, stream, context): """ Override me in your subclass. """ raise NotImplementedError() def sizeof(self, context=None): """ Calculate the size of this object, optionally using a context. Some constructs have no fixed size and can only know their size for a given hunk of data; these constructs will raise an error if they are not passed a context. :param ``Container`` context: contextual data :returns: int of the length of this construct :raises SizeofError: the size could not be determined """ if context is None: context = Container() try: return self._sizeof(context) except Exception as e: raise SizeofError(e) def _sizeof(self, context): """ Override me in your subclass. """ raise SizeofError("Raw Constructs have no size!") class Subconstruct(Construct): """ Abstract subconstruct (wraps an inner construct, inheriting its name and flags). Parameters: * subcon - the construct to wrap """ __slots__ = ["subcon"] def __init__(self, subcon): Construct.__init__(self, subcon.name, subcon.conflags) self.subcon = subcon def _parse(self, stream, context): return self.subcon._parse(stream, context) def _build(self, obj, stream, context): self.subcon._build(obj, stream, context) def _sizeof(self, context): return self.subcon._sizeof(context) class Adapter(Subconstruct): """ Abstract adapter: calls _decode for parsing and _encode for building. Parameters: * subcon - the construct to wrap """ __slots__ = [] def _parse(self, stream, context): return self._decode(self.subcon._parse(stream, context), context) def _build(self, obj, stream, context): self.subcon._build(self._encode(obj, context), stream, context) def _decode(self, obj, context): raise NotImplementedError() def _encode(self, obj, context): raise NotImplementedError() #=============================================================================== # Fields #=============================================================================== def _read_stream(stream, length): if length < 0: raise ValueError("length must be >= 0", length) data = stream.read(length) if len(data) != length: raise FieldError("expected %d, found %d" % (length, len(data))) return data def _write_stream(stream, length, data): if length < 0: raise ValueError("length must be >= 0", length) if len(data) != length: raise FieldError("expected %d, found %d" % (length, len(data))) stream.write(data) class StaticField(Construct): """ A fixed-size byte field. :param str name: field name :param int length: number of bytes in the field """ __slots__ = ["length"] def __init__(self, name, length): Construct.__init__(self, name) self.length = length def _parse(self, stream, context): return _read_stream(stream, self.length) def _build(self, obj, stream, context): _write_stream(stream, self.length, obj) def _sizeof(self, context): return self.length class FormatField(StaticField): """ A field that uses ``struct`` to pack and unpack data. See ``struct`` documentation for instructions on crafting format strings. :param str name: name of the field :param str endianness: format endianness string; one of "<", ">", or "=" :param str format: a single format character """ __slots__ = ["packer"] def __init__(self, name, endianity, format): if endianity not in (">", "<", "="): raise ValueError("endianity must be be '=', '<', or '>'", endianity) if len(format) != 1: raise ValueError("must specify one and only one format char") self.packer = Packer(endianity + format) StaticField.__init__(self, name, self.packer.size) def __getstate__(self): attrs = StaticField.__getstate__(self) attrs["packer"] = attrs["packer"].format return attrs def __setstate__(self, attrs): attrs["packer"] = Packer(attrs["packer"]) return StaticField.__setstate__(attrs) def _parse(self, stream, context): try: return self.packer.unpack(_read_stream(stream, self.length))[0] except Exception as ex: raise FieldError(ex) def _build(self, obj, stream, context): try: _write_stream(stream, self.length, self.packer.pack(obj)) except Exception as ex: raise FieldError(ex) class MetaField(Construct): """ A variable-length field. The length is obtained at runtime from a function. :param str name: name of the field :param callable lengthfunc: callable that takes a context and returns length as an int >>> foo = Struct("foo", ... Byte("length"), ... MetaField("data", lambda ctx: ctx["length"]) ... ) >>> foo.parse("\\x03ABC") Container(data = 'ABC', length = 3) >>> foo.parse("\\x04ABCD") Container(data = 'ABCD', length = 4) """ __slots__ = ["lengthfunc"] def __init__(self, name, lengthfunc): Construct.__init__(self, name) self.lengthfunc = lengthfunc self._set_flag(self.FLAG_DYNAMIC) def _parse(self, stream, context): return _read_stream(stream, self.lengthfunc(context)) def _build(self, obj, stream, context): _write_stream(stream, self.lengthfunc(context), obj) def _sizeof(self, context): return self.lengthfunc(context) #=============================================================================== # arrays and repeaters #=============================================================================== class MetaArray(Subconstruct): """ An array (repeater) of a meta-count. The array will iterate exactly `countfunc()` times. Will raise ArrayError if less elements are found. See also Array, Range and RepeatUntil. Parameters: * countfunc - a function that takes the context as a parameter and returns the number of elements of the array (count) * subcon - the subcon to repeat `countfunc()` times Example: MetaArray(lambda ctx: 5, UBInt8("foo")) """ __slots__ = ["countfunc"] def __init__(self, countfunc, subcon): Subconstruct.__init__(self, subcon) self.countfunc = countfunc self._clear_flag(self.FLAG_COPY_CONTEXT) self._set_flag(self.FLAG_DYNAMIC) def _parse(self, stream, context): obj = ListContainer() c = 0 count = self.countfunc(context) try: if self.subcon.conflags & self.FLAG_COPY_CONTEXT: while c < count: obj.append(self.subcon._parse(stream, context.__copy__())) c += 1 else: while c < count: obj.append(self.subcon._parse(stream, context)) c += 1 except ConstructError as ex: raise ArrayError("expected %d, found %d" % (count, c), ex) return obj def _build(self, obj, stream, context): count = self.countfunc(context) if len(obj) != count: raise ArrayError("expected %d, found %d" % (count, len(obj))) if self.subcon.conflags & self.FLAG_COPY_CONTEXT: for subobj in obj: self.subcon._build(subobj, stream, context.__copy__()) else: for subobj in obj: self.subcon._build(subobj, stream, context) def _sizeof(self, context): return self.subcon._sizeof(context) * self.countfunc(context) class Range(Subconstruct): """ A range-array. The subcon will iterate between `mincount` to `maxcount` times. If less than `mincount` elements are found, raises RangeError. See also GreedyRange and OptionalGreedyRange. The general-case repeater. Repeats the given unit for at least mincount times, and up to maxcount times. If an exception occurs (EOF, validation error), the repeater exits. If less than mincount units have been successfully parsed, a RangeError is raised. .. note:: This object requires a seekable stream for parsing. :param int mincount: the minimal count :param int maxcount: the maximal count :param Construct subcon: the subcon to repeat >>> c = Range(3, 7, UBInt8("foo")) >>> c.parse("\\x01\\x02") Traceback (most recent call last): ... construct.core.RangeError: expected 3..7, found 2 >>> c.parse("\\x01\\x02\\x03") [1, 2, 3] >>> c.parse("\\x01\\x02\\x03\\x04\\x05\\x06") [1, 2, 3, 4, 5, 6] >>> c.parse("\\x01\\x02\\x03\\x04\\x05\\x06\\x07") [1, 2, 3, 4, 5, 6, 7] >>> c.parse("\\x01\\x02\\x03\\x04\\x05\\x06\\x07\\x08\\x09") [1, 2, 3, 4, 5, 6, 7] >>> c.build([1,2]) Traceback (most recent call last): ... construct.core.RangeError: expected 3..7, found 2 >>> c.build([1,2,3,4]) '\\x01\\x02\\x03\\x04' >>> c.build([1,2,3,4,5,6,7,8]) Traceback (most recent call last): ... construct.core.RangeError: expected 3..7, found 8 """ __slots__ = ["mincount", "maxcout"] def __init__(self, mincount, maxcout, subcon): Subconstruct.__init__(self, subcon) self.mincount = mincount self.maxcout = maxcout self._clear_flag(self.FLAG_COPY_CONTEXT) self._set_flag(self.FLAG_DYNAMIC) def _parse(self, stream, context): obj = ListContainer() c = 0 try: if self.subcon.conflags & self.FLAG_COPY_CONTEXT: while c < self.maxcout: pos = stream.tell() obj.append(self.subcon._parse(stream, context.__copy__())) c += 1 else: while c < self.maxcout: pos = stream.tell() obj.append(self.subcon._parse(stream, context)) c += 1 except ConstructError as ex: if c < self.mincount: raise RangeError("expected %d to %d, found %d" % (self.mincount, self.maxcout, c), ex) stream.seek(pos) return obj def _build(self, obj, stream, context): if len(obj) < self.mincount or len(obj) > self.maxcout: raise RangeError("expected %d to %d, found %d" % (self.mincount, self.maxcout, len(obj))) cnt = 0 try: if self.subcon.conflags & self.FLAG_COPY_CONTEXT: for subobj in obj: if isinstance(obj, bytes): subobj = bchr(subobj) self.subcon._build(subobj, stream, context.__copy__()) cnt += 1 else: for subobj in obj: if isinstance(obj, bytes): subobj = bchr(subobj) self.subcon._build(subobj, stream, context) cnt += 1 except ConstructError as ex: if cnt < self.mincount: raise RangeError("expected %d to %d, found %d" % (self.mincount, self.maxcout, len(obj)), ex) def _sizeof(self, context): raise SizeofError("can't calculate size") class RepeatUntil(Subconstruct): """ An array that repeats until the predicate indicates it to stop. Note that the last element (which caused the repeat to exit) is included in the return value. Parameters: * predicate - a predicate function that takes (obj, context) and returns True if the stop-condition is met, or False to continue. * subcon - the subcon to repeat. Example: # will read chars until b\x00 (inclusive) RepeatUntil(lambda obj, ctx: obj == b"\x00", Field("chars", 1) ) """ __slots__ = ["predicate"] def __init__(self, predicate, subcon): Subconstruct.__init__(self, subcon) self.predicate = predicate self._clear_flag(self.FLAG_COPY_CONTEXT) self._set_flag(self.FLAG_DYNAMIC) def _parse(self, stream, context): obj = [] try: if self.subcon.conflags & self.FLAG_COPY_CONTEXT: while True: subobj = self.subcon._parse(stream, context.__copy__()) obj.append(subobj) if self.predicate(subobj, context): break else: while True: subobj = self.subcon._parse(stream, context) obj.append(subobj) if self.predicate(subobj, context): break except ConstructError as ex: raise ArrayError("missing terminator", ex) return obj def _build(self, obj, stream, context): terminated = False if self.subcon.conflags & self.FLAG_COPY_CONTEXT: for subobj in obj: self.subcon._build(subobj, stream, context.__copy__()) if self.predicate(subobj, context): terminated = True break else: for subobj in obj: subobj = bchr(subobj) self.subcon._build(subobj, stream, context.__copy__()) if self.predicate(subobj, context): terminated = True break if not terminated: raise ArrayError("missing terminator") def _sizeof(self, context): raise SizeofError("can't calculate size") #=============================================================================== # structures and sequences #=============================================================================== class Struct(Construct): """ A sequence of named constructs, similar to structs in C. The elements are parsed and built in the order they are defined. See also Embedded. Parameters: * name - the name of the structure * subcons - a sequence of subconstructs that make up this structure. * nested - a keyword-only argument that indicates whether this struct creates a nested context. The default is True. This parameter is considered "advanced usage", and may be removed in the future. Example: Struct("foo", UBInt8("first_element"), UBInt16("second_element"), Padding(2), UBInt8("third_element"), ) """ __slots__ = ["subcons", "nested"] def __init__(self, name, *subcons, **kw): self.nested = kw.pop("nested", True) if kw: raise TypeError("the only keyword argument accepted is 'nested'", kw) Construct.__init__(self, name) self.subcons = subcons self._inherit_flags(*subcons) self._clear_flag(self.FLAG_EMBED) def _parse(self, stream, context): if "<obj>" in context: obj = context["<obj>"] del context["<obj>"] else: obj = Container() if self.nested: context = Container(_ = context) for sc in self.subcons: if sc.conflags & self.FLAG_EMBED: context["<obj>"] = obj sc._parse(stream, context) else: subobj = sc._parse(stream, context) if sc.name is not None: obj[sc.name] = subobj context[sc.name] = subobj return obj def _build(self, obj, stream, context): if "<unnested>" in context: del context["<unnested>"] elif self.nested: context = Container(_ = context) for sc in self.subcons: if sc.conflags & self.FLAG_EMBED: context["<unnested>"] = True subobj = obj elif sc.name is None: subobj = None else: subobj = getattr(obj, sc.name) context[sc.name] = subobj sc._build(subobj, stream, context) def _sizeof(self, context): if self.nested: context = Container(_ = context) return sum(sc._sizeof(context) for sc in self.subcons) class Sequence(Struct): """ A sequence of unnamed constructs. The elements are parsed and built in the order they are defined. See also Embedded. Parameters: * name - the name of the structure * subcons - a sequence of subconstructs that make up this structure. * nested - a keyword-only argument that indicates whether this struct creates a nested context. The default is True. This parameter is considered "advanced usage", and may be removed in the future. Example: Sequence("foo", UBInt8("first_element"), UBInt16("second_element"), Padding(2), UBInt8("third_element"), ) """ __slots__ = [] def _parse(self, stream, context): if "<obj>" in context: obj = context["<obj>"] del context["<obj>"] else: obj = ListContainer() if self.nested: context = Container(_ = context) for sc in self.subcons: if sc.conflags & self.FLAG_EMBED: context["<obj>"] = obj sc._parse(stream, context) else: subobj = sc._parse(stream, context) if sc.name is not None: obj.append(subobj) context[sc.name] = subobj return obj def _build(self, obj, stream, context): if "<unnested>" in context: del context["<unnested>"] elif self.nested: context = Container(_ = context) objiter = iter(obj) for sc in self.subcons: if sc.conflags & self.FLAG_EMBED: context["<unnested>"] = True subobj = objiter elif sc.name is None: subobj = None else: subobj = advance_iterator(objiter) context[sc.name] = subobj sc._build(subobj, stream, context) class Union(Construct): """ a set of overlapping fields (like unions in C). when parsing, all fields read the same data; when building, only the first subcon (called "master") is used. Parameters: * name - the name of the union * master - the master subcon, i.e., the subcon used for building and calculating the total size * subcons - additional subcons Example: Union("what_are_four_bytes", UBInt32("one_dword"), Struct("two_words", UBInt16("first"), UBInt16("second")), Struct("four_bytes", UBInt8("a"), UBInt8("b"), UBInt8("c"), UBInt8("d") ), ) """ __slots__ = ["parser", "builder"] def __init__(self, name, master, *subcons, **kw): Construct.__init__(self, name) args = [Peek(sc) for sc in subcons] args.append(MetaField(None, lambda ctx: master._sizeof(ctx))) self.parser = Struct(name, Peek(master, perform_build = True), *args) self.builder = Struct(name, master) def _parse(self, stream, context): return self.parser._parse(stream, context) def _build(self, obj, stream, context): return self.builder._build(obj, stream, context) def _sizeof(self, context): return self.builder._sizeof(context) #=============================================================================== # conditional #=============================================================================== class Switch(Construct): """ A conditional branch. Switch will choose the case to follow based on the return value of keyfunc. If no case is matched, and no default value is given, SwitchError will be raised. See also Pass. Parameters: * name - the name of the construct * keyfunc - a function that takes the context and returns a key, which will ne used to choose the relevant case. * cases - a dictionary mapping keys to constructs. the keys can be any values that may be returned by keyfunc. * default - a default value to use when the key is not found in the cases. if not supplied, an exception will be raised when the key is not found. You can use the builtin construct Pass for 'do-nothing'. * include_key - whether or not to include the key in the return value of parsing. defualt is False. Example: Struct("foo", UBInt8("type"), Switch("value", lambda ctx: ctx.type, { 1 : UBInt8("spam"), 2 : UBInt16("spam"), 3 : UBInt32("spam"), 4 : UBInt64("spam"), } ), ) """ class NoDefault(Construct): def _parse(self, stream, context): raise SwitchError("no default case defined") def _build(self, obj, stream, context): raise SwitchError("no default case defined") def _sizeof(self, context): raise SwitchError("no default case defined") NoDefault = NoDefault("No default value specified") __slots__ = ["subcons", "keyfunc", "cases", "default", "include_key"] def __init__(self, name, keyfunc, cases, default = NoDefault, include_key = False): Construct.__init__(self, name) self._inherit_flags(*cases.values()) self.keyfunc = keyfunc self.cases = cases self.default = default self.include_key = include_key self._inherit_flags(*cases.values()) self._set_flag(self.FLAG_DYNAMIC) def _parse(self, stream, context): key = self.keyfunc(context) obj = self.cases.get(key, self.default)._parse(stream, context) if self.include_key: return key, obj else: return obj def _build(self, obj, stream, context): if self.include_key: key, obj = obj else: key = self.keyfunc(context) case = self.cases.get(key, self.default) case._build(obj, stream, context) def _sizeof(self, context): case = self.cases.get(self.keyfunc(context), self.default) return case._sizeof(context) class Select(Construct): """ Selects the first matching subconstruct. It will literally try each of the subconstructs, until one matches. Notes: * requires a seekable stream. Parameters: * name - the name of the construct * subcons - the subcons to try (order-sensitive) * include_name - a keyword only argument, indicating whether to include the name of the selected subcon in the return value of parsing. default is false. Example: Select("foo", UBInt64("large"), UBInt32("medium"), UBInt16("small"), UBInt8("tiny"), ) """ __slots__ = ["subcons", "include_name"] def __init__(self, name, *subcons, **kw): include_name = kw.pop("include_name", False) if kw: raise TypeError("the only keyword argument accepted " "is 'include_name'", kw) Construct.__init__(self, name) self.subcons = subcons self.include_name = include_name self._inherit_flags(*subcons) self._set_flag(self.FLAG_DYNAMIC) def _parse(self, stream, context): for sc in self.subcons: pos = stream.tell() context2 = context.__copy__() try: obj = sc._parse(stream, context2) except ConstructError: stream.seek(pos) else: context.__update__(context2) if self.include_name: return sc.name, obj else: return obj raise SelectError("no subconstruct matched") def _build(self, obj, stream, context): if self.include_name: name, obj = obj for sc in self.subcons: if sc.name == name: sc._build(obj, stream, context) return else: for sc in self.subcons: stream2 = BytesIO() context2 = context.__copy__() try: sc._build(obj, stream2, context2) except Exception: pass else: context.__update__(context2) stream.write(stream2.getvalue()) return raise SelectError("no subconstruct matched", obj) def _sizeof(self, context): raise SizeofError("can't calculate size") #=============================================================================== # stream manipulation #=============================================================================== class Pointer(Subconstruct): """ Changes the stream position to a given offset, where the construction should take place, and restores the stream position when finished. See also Anchor, OnDemand and OnDemandPointer. Notes: * requires a seekable stream. Parameters: * offsetfunc: a function that takes the context and returns an absolute stream position, where the construction would take place * subcon - the subcon to use at `offsetfunc()` Example: Struct("foo", UBInt32("spam_pointer"), Pointer(lambda ctx: ctx.spam_pointer, Array(5, UBInt8("spam")) ) ) """ __slots__ = ["offsetfunc"] def __init__(self, offsetfunc, subcon): Subconstruct.__init__(self, subcon) self.offsetfunc = offsetfunc def _parse(self, stream, context): newpos = self.offsetfunc(context) origpos = stream.tell() stream.seek(newpos) obj = self.subcon._parse(stream, context) stream.seek(origpos) return obj def _build(self, obj, stream, context): newpos = self.offsetfunc(context) origpos = stream.tell() stream.seek(newpos) self.subcon._build(obj, stream, context) stream.seek(origpos) def _sizeof(self, context): return 0 class Peek(Subconstruct): """ Peeks at the stream: parses without changing the stream position. See also Union. If the end of the stream is reached when peeking, returns None. Notes: * requires a seekable stream. Parameters: * subcon - the subcon to peek at * perform_build - whether or not to perform building. by default this parameter is set to False, meaning building is a no-op. Example: Peek(UBInt8("foo")) """ __slots__ = ["perform_build"] def __init__(self, subcon, perform_build = False): Subconstruct.__init__(self, subcon) self.perform_build = perform_build def _parse(self, stream, context): pos = stream.tell() try: return self.subcon._parse(stream, context) except FieldError: pass finally: stream.seek(pos) def _build(self, obj, stream, context): if self.perform_build: self.subcon._build(obj, stream, context) def _sizeof(self, context): return 0 class OnDemand(Subconstruct): """ Allows for on-demand (lazy) parsing. When parsing, it will return a LazyContainer that represents a pointer to the data, but does not actually parses it from stream until it's "demanded". By accessing the 'value' property of LazyContainers, you will demand the data from the stream. The data will be parsed and cached for later use. You can use the 'has_value' property to know whether the data has already been demanded. See also OnDemandPointer. Notes: * requires a seekable stream. Parameters: * subcon - * advance_stream - whether or not to advance the stream position. by default this is True, but if subcon is a pointer, this should be False. * force_build - whether or not to force build. If set to False, and the LazyContainer has not been demaned, building is a no-op. Example: OnDemand(Array(10000, UBInt8("foo")) """ __slots__ = ["advance_stream", "force_build"] def __init__(self, subcon, advance_stream = True, force_build = True): Subconstruct.__init__(self, subcon) self.advance_stream = advance_stream self.force_build = force_build def _parse(self, stream, context): obj = LazyContainer(self.subcon, stream, stream.tell(), context) if self.advance_stream: stream.seek(self.subcon._sizeof(context), 1) return obj def _build(self, obj, stream, context): if not isinstance(obj, LazyContainer): self.subcon._build(obj, stream, context) elif self.force_build or obj.has_value: self.subcon._build(obj.value, stream, context) elif self.advance_stream: stream.seek(self.subcon._sizeof(context), 1) class Buffered(Subconstruct): """ Creates an in-memory buffered stream, which can undergo encoding and decoding prior to being passed on to the subconstruct. See also Bitwise. Note: * Do not use pointers inside Buffered Parameters: * subcon - the subcon which will operate on the buffer * encoder - a function that takes a string and returns an encoded string (used after building) * decoder - a function that takes a string and returns a decoded string (used before parsing) * resizer - a function that takes the size of the subcon and "adjusts" or "resizes" it according to the encoding/decoding process. Example: Buffered(BitField("foo", 16), encoder = decode_bin, decoder = encode_bin, resizer = lambda size: size / 8, ) """ __slots__ = ["encoder", "decoder", "resizer"] def __init__(self, subcon, decoder, encoder, resizer): Subconstruct.__init__(self, subcon) self.encoder = encoder self.decoder = decoder self.resizer = resizer def _parse(self, stream, context): data = _read_stream(stream, self._sizeof(context)) stream2 = BytesIO(self.decoder(data)) return self.subcon._parse(stream2, context) def _build(self, obj, stream, context): size = self._sizeof(context) stream2 = BytesIO() self.subcon._build(obj, stream2, context) data = self.encoder(stream2.getvalue()) assert len(data) == size _write_stream(stream, self._sizeof(context), data) def _sizeof(self, context): return self.resizer(self.subcon._sizeof(context)) class Restream(Subconstruct): """ Wraps the stream with a read-wrapper (for parsing) or a write-wrapper (for building). The stream wrapper can buffer the data internally, reading it from- or writing it to the underlying stream as needed. For example, BitStreamReader reads whole bytes from the underlying stream, but returns them as individual bits. See also Bitwise. When the parsing or building is done, the stream's close method will be invoked. It can perform any finalization needed for the stream wrapper, but it must not close the underlying stream. Note: * Do not use pointers inside Restream Parameters: * subcon - the subcon * stream_reader - the read-wrapper * stream_writer - the write wrapper * resizer - a function that takes the size of the subcon and "adjusts" or "resizes" it according to the encoding/decoding process. Example: Restream(BitField("foo", 16), stream_reader = BitStreamReader, stream_writer = BitStreamWriter, resizer = lambda size: size / 8, ) """ __slots__ = ["stream_reader", "stream_writer", "resizer"] def __init__(self, subcon, stream_reader, stream_writer, resizer): Subconstruct.__init__(self, subcon) self.stream_reader = stream_reader self.stream_writer = stream_writer self.resizer = resizer def _parse(self, stream, context): stream2 = self.stream_reader(stream) obj = self.subcon._parse(stream2, context) stream2.close() return obj def _build(self, obj, stream, context): stream2 = self.stream_writer(stream) self.subcon._build(obj, stream2, context) stream2.close() def _sizeof(self, context): return self.resizer(self.subcon._sizeof(context)) #=============================================================================== # miscellaneous #=============================================================================== class Reconfig(Subconstruct): """ Reconfigures a subconstruct. Reconfig can be used to change the name and set and clear flags of the inner subcon. Parameters: * name - the new name * subcon - the subcon to reconfigure * setflags - the flags to set (default is 0) * clearflags - the flags to clear (default is 0) Example: Reconfig("foo", UBInt8("bar")) """ __slots__ = [] def __init__(self, name, subcon, setflags = 0, clearflags = 0): Construct.__init__(self, name, subcon.conflags) self.subcon = subcon self._set_flag(setflags) self._clear_flag(clearflags) class Anchor(Construct): """ Returns the "anchor" (stream position) at the point where it's inserted. Useful for adjusting relative offsets to absolute positions, or to measure sizes of constructs. absolute pointer = anchor + relative offset size = anchor_after - anchor_before See also Pointer. Notes: * requires a seekable stream. Parameters: * name - the name of the anchor Example: Struct("foo", Anchor("base"), UBInt8("relative_offset"), Pointer(lambda ctx: ctx.relative_offset + ctx.base, UBInt8("data") ) ) """ __slots__ = [] def _parse(self, stream, context): return stream.tell() def _build(self, obj, stream, context): context[self.name] = stream.tell() def _sizeof(self, context): return 0 class Value(Construct): """ A computed value. Parameters: * name - the name of the value * func - a function that takes the context and return the computed value Example: Struct("foo", UBInt8("width"), UBInt8("height"), Value("total_pixels", lambda ctx: ctx.width * ctx.height), ) """ __slots__ = ["func"] def __init__(self, name, func): Construct.__init__(self, name) self.func = func self._set_flag(self.FLAG_DYNAMIC) def _parse(self, stream, context): return self.func(context) def _build(self, obj, stream, context): context[self.name] = self.func(context) def _sizeof(self, context): return 0 #class Dynamic(Construct): # """ # Dynamically creates a construct and uses it for parsing and building. # This allows you to create change the construction tree on the fly. # Deprecated. # # Parameters: # * name - the name of the construct # * factoryfunc - a function that takes the context and returns a new # construct object which will be used for parsing and building. # # Example: # def factory(ctx): # if ctx.bar == 8: # return UBInt8("spam") # if ctx.bar == 9: # return String("spam", 9) # # Struct("foo", # UBInt8("bar"), # Dynamic("spam", factory), # ) # """ # __slots__ = ["factoryfunc"] # def __init__(self, name, factoryfunc): # Construct.__init__(self, name, self.FLAG_COPY_CONTEXT) # self.factoryfunc = factoryfunc # self._set_flag(self.FLAG_DYNAMIC) # def _parse(self, stream, context): # return self.factoryfunc(context)._parse(stream, context) # def _build(self, obj, stream, context): # return self.factoryfunc(context)._build(obj, stream, context) # def _sizeof(self, context): # return self.factoryfunc(context)._sizeof(context) class LazyBound(Construct): """ Lazily bound construct, useful for constructs that need to make cyclic references (linked-lists, expression trees, etc.). Parameters: Example: foo = Struct("foo", UBInt8("bar"), LazyBound("next", lambda: foo), ) """ __slots__ = ["bindfunc", "bound"] def __init__(self, name, bindfunc): Construct.__init__(self, name) self.bound = None self.bindfunc = bindfunc def _parse(self, stream, context): if self.bound is None: self.bound = self.bindfunc() return self.bound._parse(stream, context) def _build(self, obj, stream, context): if self.bound is None: self.bound = self.bindfunc() self.bound._build(obj, stream, context) def _sizeof(self, context): if self.bound is None: self.bound = self.bindfunc() return self.bound._sizeof(context) class Pass(Construct): """ A do-nothing construct, useful as the default case for Switch, or to indicate Enums. See also Switch and Enum. Notes: * this construct is a singleton. do not try to instatiate it, as it will not work... Example: Pass """ __slots__ = [] def _parse(self, stream, context): pass def _build(self, obj, stream, context): assert obj is None def _sizeof(self, context): return 0 Pass = Pass(None) class Terminator(Construct): """ Asserts the end of the stream has been reached at the point it's placed. You can use this to ensure no more unparsed data follows. Notes: * this construct is only meaningful for parsing. for building, it's a no-op. * this construct is a singleton. do not try to instatiate it, as it will not work... Example: Terminator """ __slots__ = [] def _parse(self, stream, context): if stream.read(1): raise TerminatorError("expected end of stream") def _build(self, obj, stream, context): assert obj is None def _sizeof(self, context): return 0 Terminator = Terminator(None)
bsd-3-clause
osborne6/luminotes
view/Page_navigation.py
1
1736
from Tags import P, Span, A, Strong class Page_navigation( P ): def __init__( self, page_path, displayed_item_count, total_item_count, start, items_per_page, return_text = None ): if start is None or items_per_page is None: P.__init__( self ) return if displayed_item_count == 1 and displayed_item_count < total_item_count: if not return_text: P.__init__( self ) return P.__init__( self, Span( A( return_text, href = "%s" % page_path, ), ), ) return if start == 0 and items_per_page >= total_item_count: P.__init__( self ) return P.__init__( self, ( start > 0 ) and Span( A( u"previous", href = self.href( page_path, max( start - items_per_page, 0 ), items_per_page ), ), u" | ", ) or None, [ Span( ( start == page_start ) and Strong( unicode( page_number + 1 ) ) or A( Strong( unicode( page_number + 1 ) ), href = self.href( page_path, page_start, items_per_page ), ), ) for ( page_number, page_start ) in enumerate( range( 0, total_item_count, items_per_page ) ) ], ( start + items_per_page < total_item_count ) and Span( u" | ", A( u"next", href = self.href( page_path, min( start + items_per_page, total_item_count - 1 ), items_per_page ), ), ) or None, ) @staticmethod def href( page_path, start, count ): # if start is zero, leave off start and count parameters and just use the defaults if start == 0: return page_path return u"%s?start=%d&count=%d" % ( page_path, start, count )
gpl-3.0
tambetm/gymexperiments
a2c_atari.py
1
10250
import argparse import os import multiprocessing from multiprocessing import Process, Queue, Array import pickle import gym from gym.spaces import Box, Discrete from keras.models import Model from keras.layers import Input, TimeDistributed, Convolution2D, Flatten, LSTM, Dense from keras.objectives import categorical_crossentropy from keras.optimizers import Adam from keras.utils import np_utils import keras.backend as K import numpy as np from atari_utils import RandomizedResetEnv, AtariRescale42x42Env def create_env(env_id): env = gym.make(env_id) env = RandomizedResetEnv(env) env = AtariRescale42x42Env(env) return env def create_model(env, batch_size, num_steps): # network inputs are observations and advantages h = x = Input(batch_shape=(batch_size, num_steps) + env.observation_space.shape, name="x") A = Input(batch_shape=(batch_size, num_steps), name="A") # convolutional layers h = TimeDistributed(Convolution2D(32, 3, 3, subsample=(2, 2), border_mode="same", activation='elu', dim_ordering='tf'), name='c1')(h) h = TimeDistributed(Convolution2D(32, 3, 3, subsample=(2, 2), border_mode="same", activation='elu', dim_ordering='tf'), name='c2')(h) h = TimeDistributed(Convolution2D(32, 3, 3, subsample=(2, 2), border_mode="same", activation='elu', dim_ordering='tf'), name='c3')(h) h = TimeDistributed(Convolution2D(64, 3, 3, subsample=(2, 2), border_mode="same", activation='elu', dim_ordering='tf'), name='c4')(h) h = TimeDistributed(Flatten(), name="fl")(h) # recurrent layer h = LSTM(32, return_sequences=True, stateful=True, name="r1")(h) # policy network p = TimeDistributed(Dense(env.action_space.n, activation='softmax'), name="p")(h) # baseline network b = TimeDistributed(Dense(1), name="b")(h) # inputs to the model are observation and advantages, # outputs are action probabilities and baseline model = Model(input=[x, A], output=[p, b]) # policy gradient loss and entropy bonus def policy_gradient_loss(l_sampled, l_predicted): return K.mean(A * categorical_crossentropy(l_sampled, l_predicted), axis=1) \ - 0.01 * K.mean(categorical_crossentropy(l_predicted, l_predicted), axis=1) # baseline is optimized with MSE model.compile(optimizer='adam', loss=[policy_gradient_loss, 'mse']) return model def predict(model, observation): # create inputs for batch (and timestep) of size 1 x = np.array([[observation]]) A = np.zeros((1, 1)) # dummy advantage # predict action probabilities (and baseline state value) p, b = model.predict_on_batch([x, A]) # return action probabilities and baseline return p[0, 0], b[0, 0, 0] def discount(rewards, terminals, v, gamma): # calculate discounted future rewards for this trajectory returns = [] # start with the predicted value of the last state R = v for r, t in zip(reversed(rewards), reversed(terminals)): # if it was terminal state then restart from 0 if t: R = 0 R = r + R * gamma returns.insert(0, R) return returns def runner(shared_buffer, fifo, num_timesteps, monitor, args): proc_name = multiprocessing.current_process().name print("Runner %s started" % proc_name) # local environment for runner env = create_env(args.env_id) # start monitor to record statistics and videos if monitor: env.monitor.start(args.env_id) # copy of model model = create_model(env, batch_size=1, num_steps=1) # record episode lengths and rewards for statistics episode_rewards = [] episode_lengths = [] episode_reward = 0 episode_length = 0 observation = env.reset() for i in range(num_timesteps // args.num_local_steps): # copy weights from main network at the beginning of iteration # the main network's weights are only read, never modified # but we create our own model instance, because Keras is not thread-safe model.set_weights(pickle.loads(shared_buffer.raw)) observations = [] actions = [] rewards = [] terminals = [] baselines = [] for t in range(args.num_local_steps): if args.display: env.render() # predict action probabilities (and baseline state value) p, b = predict(model, observation) # sample action using those probabilities p /= np.sum(p) # ensure p-s sum up to 1 action = np.random.choice(env.action_space.n, p=p) # log data observations.append(observation) actions.append(action) baselines.append(b) # step environment observation, reward, terminal, _ = env.step(int(action)) rewards.append(reward) terminals.append(terminal) episode_reward += reward episode_length += 1 # reset if terminal state if terminal: episode_rewards.append(episode_reward) episode_lengths.append(episode_length) episode_reward = 0 episode_length = 0 observation = env.reset() # calculate discounted returns if terminal: # if the last was terminal state then start from 0 returns = discount(rewards, terminals, 0, 0.99) else: # otherwise calculate the value of the last state _, v = predict(model, observation) returns = discount(rewards, terminals, v, 0.99) # convert to numpy arrays observations = np.array(observations) actions = np_utils.to_categorical(actions, env.action_space.n) baselines = np.array(baselines) returns = np.array(returns) advantages = returns - baselines # send observations, actions, rewards and returns. blocks if fifo is full. fifo.put((observations, actions, returns, advantages, episode_rewards, episode_lengths)) episode_rewards = [] episode_lengths = [] if monitor: env.monitor.close() print("Runner %s finished" % proc_name) def trainer(model, fifos, shared_buffer, args): proc_name = multiprocessing.current_process().name print("Trainer %s started" % proc_name) episode_rewards = [] episode_lengths = [] timestep = 0 while len(multiprocessing.active_children()) > 0 and timestep < args.num_timesteps: batch_observations = [] batch_actions = [] batch_returns = [] batch_advantages = [] # loop over fifos from all runners for q, fifo in enumerate(fifos): # wait for a new trajectory and statistics observations, actions, returns, advantages, rewards, lengths = fifo.get() # add to batch batch_observations.append(observations) batch_actions.append(actions) batch_returns.append(returns) batch_advantages.append(advantages) # log statistics episode_rewards += rewards episode_lengths += lengths timestep += len(observations) # form training data from observations, actions and returns x = np.array(batch_observations) p = np.array(batch_actions) R = np.array(batch_returns)[:, :, np.newaxis] A = np.array(batch_advantages) # anneal learning rate model.optimizer.lr = max(0.001 * (args.num_timesteps - timestep) / args.num_timesteps, 0) # train the model total_loss, policy_loss, baseline_loss = model.train_on_batch([x, A], [p, R]) # share model parameters shared_buffer.raw = pickle.dumps(model.get_weights(), pickle.HIGHEST_PROTOCOL) if timestep % args.stats_interval == 0: print("Step %d/%d: episodes %d, mean episode reward %.2f, mean episode length %.2f." % (timestep, args.num_timesteps, len(episode_rewards), np.mean(episode_rewards), np.mean(episode_lengths))) episode_rewards = [] episode_lengths = [] print("Trainer %s finished" % proc_name) def run(args): # create dummy environment to be able to create model env = create_env(args.env_id) assert isinstance(env.observation_space, Box) assert isinstance(env.action_space, Discrete) print("Observation space: " + str(env.observation_space)) print("Action space: " + str(env.action_space)) # create main model model = create_model(env, batch_size=args.num_runners, num_steps=args.num_local_steps) model.summary() env.close() # for better compatibility with Theano and Tensorflow multiprocessing.set_start_method('spawn') # create shared buffer for sharing weights blob = pickle.dumps(model.get_weights(), pickle.HIGHEST_PROTOCOL) shared_buffer = Array('c', len(blob)) shared_buffer.raw = blob # force runner processes to use cpu, child processes inherit environment variables os.environ["CUDA_VISIBLE_DEVICES"] = "" # create fifos and processes for all runners fifos = [] for i in range(args.num_runners): fifo = Queue(args.queue_length) fifos.append(fifo) process = Process(target=runner, args=(shared_buffer, fifo, args.num_timesteps // args.num_runners, args.monitor and i == 0, args)) process.start() # start trainer in main thread trainer(model, fifos, shared_buffer, args) print("All done") if __name__ == '__main__': parser = argparse.ArgumentParser() # parallelization parser.add_argument('--num_runners', type=int, default=2) parser.add_argument('--queue_length', type=int, default=2) # how long parser.add_argument('--num_timesteps', type=int, default=5000000) parser.add_argument('--num_local_steps', type=int, default=20) parser.add_argument('--stats_interval', type=int, default=10000) # technical parser.add_argument('--display', action='store_true', default=False) parser.add_argument('--monitor', action='store_true', default=False) # mandatory parser.add_argument('env_id') args = parser.parse_args() run(args)
mit
matrixise/odoo
addons/l10n_in/__openerp__.py
83
2248
# -*- coding: utf-8 -*- ############################################################################## # # OpenERP, Open Source Management Solution # Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>). # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ############################################################################## { 'name': 'Indian - Accounting', 'version': '1.0', 'description': """ Indian Accounting: Chart of Account. ==================================== Indian accounting chart and localization. OpenERP allows to manage Indian Accounting by providing Two Formats Of Chart of Accounts i.e Indian Chart Of Accounts - Standard and Indian Chart Of Accounts - Schedule VI. Note: The Schedule VI has been revised by MCA and is applicable for all Balance Sheet made after 31st March, 2011. The Format has done away with earlier two options of format of Balance Sheet, now only Vertical format has been permitted Which is Supported By OpenERP. """, 'author': ['OpenERP SA'], 'category': 'Localization/Account Charts', 'depends': [ 'account', 'account_chart' ], 'demo': [], 'data': [ 'l10n_in_tax_code_template.xml', 'l10n_in_standard_chart.xml', 'l10n_in_standard_tax_template.xml', 'l10n_in_schedule6_chart.xml', 'l10n_in_schedule6_tax_template.xml', 'l10n_in_wizard.xml', ], 'auto_install': False, 'installable': True, 'images': ['images/config_chart_l10n_in.jpeg','images/l10n_in_chart.jpeg'], } # vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
agpl-3.0
shingo103/Kuku
test/gtest-1.7.0/scripts/fuse_gtest_files.py
2577
8813
#!/usr/bin/env python # # Copyright 2009, Google Inc. # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above # copyright notice, this list of conditions and the following disclaimer # in the documentation and/or other materials provided with the # distribution. # * Neither the name of Google Inc. nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. """fuse_gtest_files.py v0.2.0 Fuses Google Test source code into a .h file and a .cc file. SYNOPSIS fuse_gtest_files.py [GTEST_ROOT_DIR] OUTPUT_DIR Scans GTEST_ROOT_DIR for Google Test source code, and generates two files: OUTPUT_DIR/gtest/gtest.h and OUTPUT_DIR/gtest/gtest-all.cc. Then you can build your tests by adding OUTPUT_DIR to the include search path and linking with OUTPUT_DIR/gtest/gtest-all.cc. These two files contain everything you need to use Google Test. Hence you can "install" Google Test by copying them to wherever you want. GTEST_ROOT_DIR can be omitted and defaults to the parent directory of the directory holding this script. EXAMPLES ./fuse_gtest_files.py fused_gtest ./fuse_gtest_files.py path/to/unpacked/gtest fused_gtest This tool is experimental. In particular, it assumes that there is no conditional inclusion of Google Test headers. Please report any problems to googletestframework@googlegroups.com. You can read http://code.google.com/p/googletest/wiki/GoogleTestAdvancedGuide for more information. """ __author__ = 'wan@google.com (Zhanyong Wan)' import os import re import sets import sys # We assume that this file is in the scripts/ directory in the Google # Test root directory. DEFAULT_GTEST_ROOT_DIR = os.path.join(os.path.dirname(__file__), '..') # Regex for matching '#include "gtest/..."'. INCLUDE_GTEST_FILE_REGEX = re.compile(r'^\s*#\s*include\s*"(gtest/.+)"') # Regex for matching '#include "src/..."'. INCLUDE_SRC_FILE_REGEX = re.compile(r'^\s*#\s*include\s*"(src/.+)"') # Where to find the source seed files. GTEST_H_SEED = 'include/gtest/gtest.h' GTEST_SPI_H_SEED = 'include/gtest/gtest-spi.h' GTEST_ALL_CC_SEED = 'src/gtest-all.cc' # Where to put the generated files. GTEST_H_OUTPUT = 'gtest/gtest.h' GTEST_ALL_CC_OUTPUT = 'gtest/gtest-all.cc' def VerifyFileExists(directory, relative_path): """Verifies that the given file exists; aborts on failure. relative_path is the file path relative to the given directory. """ if not os.path.isfile(os.path.join(directory, relative_path)): print 'ERROR: Cannot find %s in directory %s.' % (relative_path, directory) print ('Please either specify a valid project root directory ' 'or omit it on the command line.') sys.exit(1) def ValidateGTestRootDir(gtest_root): """Makes sure gtest_root points to a valid gtest root directory. The function aborts the program on failure. """ VerifyFileExists(gtest_root, GTEST_H_SEED) VerifyFileExists(gtest_root, GTEST_ALL_CC_SEED) def VerifyOutputFile(output_dir, relative_path): """Verifies that the given output file path is valid. relative_path is relative to the output_dir directory. """ # Makes sure the output file either doesn't exist or can be overwritten. output_file = os.path.join(output_dir, relative_path) if os.path.exists(output_file): # TODO(wan@google.com): The following user-interaction doesn't # work with automated processes. We should provide a way for the # Makefile to force overwriting the files. print ('%s already exists in directory %s - overwrite it? (y/N) ' % (relative_path, output_dir)) answer = sys.stdin.readline().strip() if answer not in ['y', 'Y']: print 'ABORTED.' sys.exit(1) # Makes sure the directory holding the output file exists; creates # it and all its ancestors if necessary. parent_directory = os.path.dirname(output_file) if not os.path.isdir(parent_directory): os.makedirs(parent_directory) def ValidateOutputDir(output_dir): """Makes sure output_dir points to a valid output directory. The function aborts the program on failure. """ VerifyOutputFile(output_dir, GTEST_H_OUTPUT) VerifyOutputFile(output_dir, GTEST_ALL_CC_OUTPUT) def FuseGTestH(gtest_root, output_dir): """Scans folder gtest_root to generate gtest/gtest.h in output_dir.""" output_file = file(os.path.join(output_dir, GTEST_H_OUTPUT), 'w') processed_files = sets.Set() # Holds all gtest headers we've processed. def ProcessFile(gtest_header_path): """Processes the given gtest header file.""" # We don't process the same header twice. if gtest_header_path in processed_files: return processed_files.add(gtest_header_path) # Reads each line in the given gtest header. for line in file(os.path.join(gtest_root, gtest_header_path), 'r'): m = INCLUDE_GTEST_FILE_REGEX.match(line) if m: # It's '#include "gtest/..."' - let's process it recursively. ProcessFile('include/' + m.group(1)) else: # Otherwise we copy the line unchanged to the output file. output_file.write(line) ProcessFile(GTEST_H_SEED) output_file.close() def FuseGTestAllCcToFile(gtest_root, output_file): """Scans folder gtest_root to generate gtest/gtest-all.cc in output_file.""" processed_files = sets.Set() def ProcessFile(gtest_source_file): """Processes the given gtest source file.""" # We don't process the same #included file twice. if gtest_source_file in processed_files: return processed_files.add(gtest_source_file) # Reads each line in the given gtest source file. for line in file(os.path.join(gtest_root, gtest_source_file), 'r'): m = INCLUDE_GTEST_FILE_REGEX.match(line) if m: if 'include/' + m.group(1) == GTEST_SPI_H_SEED: # It's '#include "gtest/gtest-spi.h"'. This file is not # #included by "gtest/gtest.h", so we need to process it. ProcessFile(GTEST_SPI_H_SEED) else: # It's '#include "gtest/foo.h"' where foo is not gtest-spi. # We treat it as '#include "gtest/gtest.h"', as all other # gtest headers are being fused into gtest.h and cannot be # #included directly. # There is no need to #include "gtest/gtest.h" more than once. if not GTEST_H_SEED in processed_files: processed_files.add(GTEST_H_SEED) output_file.write('#include "%s"\n' % (GTEST_H_OUTPUT,)) else: m = INCLUDE_SRC_FILE_REGEX.match(line) if m: # It's '#include "src/foo"' - let's process it recursively. ProcessFile(m.group(1)) else: output_file.write(line) ProcessFile(GTEST_ALL_CC_SEED) def FuseGTestAllCc(gtest_root, output_dir): """Scans folder gtest_root to generate gtest/gtest-all.cc in output_dir.""" output_file = file(os.path.join(output_dir, GTEST_ALL_CC_OUTPUT), 'w') FuseGTestAllCcToFile(gtest_root, output_file) output_file.close() def FuseGTest(gtest_root, output_dir): """Fuses gtest.h and gtest-all.cc.""" ValidateGTestRootDir(gtest_root) ValidateOutputDir(output_dir) FuseGTestH(gtest_root, output_dir) FuseGTestAllCc(gtest_root, output_dir) def main(): argc = len(sys.argv) if argc == 2: # fuse_gtest_files.py OUTPUT_DIR FuseGTest(DEFAULT_GTEST_ROOT_DIR, sys.argv[1]) elif argc == 3: # fuse_gtest_files.py GTEST_ROOT_DIR OUTPUT_DIR FuseGTest(sys.argv[1], sys.argv[2]) else: print __doc__ sys.exit(1) if __name__ == '__main__': main()
mit
marcuskelly/recover
Lib/encodings/iso8859_10.py
272
13589
""" Python Character Mapping Codec iso8859_10 generated from 'MAPPINGS/ISO8859/8859-10.TXT' with gencodec.py. """#" import codecs ### Codec APIs class Codec(codecs.Codec): def encode(self,input,errors='strict'): return codecs.charmap_encode(input,errors,encoding_table) def decode(self,input,errors='strict'): return codecs.charmap_decode(input,errors,decoding_table) class IncrementalEncoder(codecs.IncrementalEncoder): def encode(self, input, final=False): return codecs.charmap_encode(input,self.errors,encoding_table)[0] class IncrementalDecoder(codecs.IncrementalDecoder): def decode(self, input, final=False): return codecs.charmap_decode(input,self.errors,decoding_table)[0] class StreamWriter(Codec,codecs.StreamWriter): pass class StreamReader(Codec,codecs.StreamReader): pass ### encodings module API def getregentry(): return codecs.CodecInfo( name='iso8859-10', encode=Codec().encode, decode=Codec().decode, incrementalencoder=IncrementalEncoder, incrementaldecoder=IncrementalDecoder, streamreader=StreamReader, streamwriter=StreamWriter, ) ### Decoding Table decoding_table = ( '\x00' # 0x00 -> NULL '\x01' # 0x01 -> START OF HEADING '\x02' # 0x02 -> START OF TEXT '\x03' # 0x03 -> END OF TEXT '\x04' # 0x04 -> END OF TRANSMISSION '\x05' # 0x05 -> ENQUIRY '\x06' # 0x06 -> ACKNOWLEDGE '\x07' # 0x07 -> BELL '\x08' # 0x08 -> BACKSPACE '\t' # 0x09 -> HORIZONTAL TABULATION '\n' # 0x0A -> LINE FEED '\x0b' # 0x0B -> VERTICAL TABULATION '\x0c' # 0x0C -> FORM FEED '\r' # 0x0D -> CARRIAGE RETURN '\x0e' # 0x0E -> SHIFT OUT '\x0f' # 0x0F -> SHIFT IN '\x10' # 0x10 -> DATA LINK ESCAPE '\x11' # 0x11 -> DEVICE CONTROL ONE '\x12' # 0x12 -> DEVICE CONTROL TWO '\x13' # 0x13 -> DEVICE CONTROL THREE '\x14' # 0x14 -> DEVICE CONTROL FOUR '\x15' # 0x15 -> NEGATIVE ACKNOWLEDGE '\x16' # 0x16 -> SYNCHRONOUS IDLE '\x17' # 0x17 -> END OF TRANSMISSION BLOCK '\x18' # 0x18 -> CANCEL '\x19' # 0x19 -> END OF MEDIUM '\x1a' # 0x1A -> SUBSTITUTE '\x1b' # 0x1B -> ESCAPE '\x1c' # 0x1C -> FILE SEPARATOR '\x1d' # 0x1D -> GROUP SEPARATOR '\x1e' # 0x1E -> RECORD SEPARATOR '\x1f' # 0x1F -> UNIT SEPARATOR ' ' # 0x20 -> SPACE '!' # 0x21 -> EXCLAMATION MARK '"' # 0x22 -> QUOTATION MARK '#' # 0x23 -> NUMBER SIGN '$' # 0x24 -> DOLLAR SIGN '%' # 0x25 -> PERCENT SIGN '&' # 0x26 -> AMPERSAND "'" # 0x27 -> APOSTROPHE '(' # 0x28 -> LEFT PARENTHESIS ')' # 0x29 -> RIGHT PARENTHESIS '*' # 0x2A -> ASTERISK '+' # 0x2B -> PLUS SIGN ',' # 0x2C -> COMMA '-' # 0x2D -> HYPHEN-MINUS '.' # 0x2E -> FULL STOP '/' # 0x2F -> SOLIDUS '0' # 0x30 -> DIGIT ZERO '1' # 0x31 -> DIGIT ONE '2' # 0x32 -> DIGIT TWO '3' # 0x33 -> DIGIT THREE '4' # 0x34 -> DIGIT FOUR '5' # 0x35 -> DIGIT FIVE '6' # 0x36 -> DIGIT SIX '7' # 0x37 -> DIGIT SEVEN '8' # 0x38 -> DIGIT EIGHT '9' # 0x39 -> DIGIT NINE ':' # 0x3A -> COLON ';' # 0x3B -> SEMICOLON '<' # 0x3C -> LESS-THAN SIGN '=' # 0x3D -> EQUALS SIGN '>' # 0x3E -> GREATER-THAN SIGN '?' # 0x3F -> QUESTION MARK '@' # 0x40 -> COMMERCIAL AT 'A' # 0x41 -> LATIN CAPITAL LETTER A 'B' # 0x42 -> LATIN CAPITAL LETTER B 'C' # 0x43 -> LATIN CAPITAL LETTER C 'D' # 0x44 -> LATIN CAPITAL LETTER D 'E' # 0x45 -> LATIN CAPITAL LETTER E 'F' # 0x46 -> LATIN CAPITAL LETTER F 'G' # 0x47 -> LATIN CAPITAL LETTER G 'H' # 0x48 -> LATIN CAPITAL LETTER H 'I' # 0x49 -> LATIN CAPITAL LETTER I 'J' # 0x4A -> LATIN CAPITAL LETTER J 'K' # 0x4B -> LATIN CAPITAL LETTER K 'L' # 0x4C -> LATIN CAPITAL LETTER L 'M' # 0x4D -> LATIN CAPITAL LETTER M 'N' # 0x4E -> LATIN CAPITAL LETTER N 'O' # 0x4F -> LATIN CAPITAL LETTER O 'P' # 0x50 -> LATIN CAPITAL LETTER P 'Q' # 0x51 -> LATIN CAPITAL LETTER Q 'R' # 0x52 -> LATIN CAPITAL LETTER R 'S' # 0x53 -> LATIN CAPITAL LETTER S 'T' # 0x54 -> LATIN CAPITAL LETTER T 'U' # 0x55 -> LATIN CAPITAL LETTER U 'V' # 0x56 -> LATIN CAPITAL LETTER V 'W' # 0x57 -> LATIN CAPITAL LETTER W 'X' # 0x58 -> LATIN CAPITAL LETTER X 'Y' # 0x59 -> LATIN CAPITAL LETTER Y 'Z' # 0x5A -> LATIN CAPITAL LETTER Z '[' # 0x5B -> LEFT SQUARE BRACKET '\\' # 0x5C -> REVERSE SOLIDUS ']' # 0x5D -> RIGHT SQUARE BRACKET '^' # 0x5E -> CIRCUMFLEX ACCENT '_' # 0x5F -> LOW LINE '`' # 0x60 -> GRAVE ACCENT 'a' # 0x61 -> LATIN SMALL LETTER A 'b' # 0x62 -> LATIN SMALL LETTER B 'c' # 0x63 -> LATIN SMALL LETTER C 'd' # 0x64 -> LATIN SMALL LETTER D 'e' # 0x65 -> LATIN SMALL LETTER E 'f' # 0x66 -> LATIN SMALL LETTER F 'g' # 0x67 -> LATIN SMALL LETTER G 'h' # 0x68 -> LATIN SMALL LETTER H 'i' # 0x69 -> LATIN SMALL LETTER I 'j' # 0x6A -> LATIN SMALL LETTER J 'k' # 0x6B -> LATIN SMALL LETTER K 'l' # 0x6C -> LATIN SMALL LETTER L 'm' # 0x6D -> LATIN SMALL LETTER M 'n' # 0x6E -> LATIN SMALL LETTER N 'o' # 0x6F -> LATIN SMALL LETTER O 'p' # 0x70 -> LATIN SMALL LETTER P 'q' # 0x71 -> LATIN SMALL LETTER Q 'r' # 0x72 -> LATIN SMALL LETTER R 's' # 0x73 -> LATIN SMALL LETTER S 't' # 0x74 -> LATIN SMALL LETTER T 'u' # 0x75 -> LATIN SMALL LETTER U 'v' # 0x76 -> LATIN SMALL LETTER V 'w' # 0x77 -> LATIN SMALL LETTER W 'x' # 0x78 -> LATIN SMALL LETTER X 'y' # 0x79 -> LATIN SMALL LETTER Y 'z' # 0x7A -> LATIN SMALL LETTER Z '{' # 0x7B -> LEFT CURLY BRACKET '|' # 0x7C -> VERTICAL LINE '}' # 0x7D -> RIGHT CURLY BRACKET '~' # 0x7E -> TILDE '\x7f' # 0x7F -> DELETE '\x80' # 0x80 -> <control> '\x81' # 0x81 -> <control> '\x82' # 0x82 -> <control> '\x83' # 0x83 -> <control> '\x84' # 0x84 -> <control> '\x85' # 0x85 -> <control> '\x86' # 0x86 -> <control> '\x87' # 0x87 -> <control> '\x88' # 0x88 -> <control> '\x89' # 0x89 -> <control> '\x8a' # 0x8A -> <control> '\x8b' # 0x8B -> <control> '\x8c' # 0x8C -> <control> '\x8d' # 0x8D -> <control> '\x8e' # 0x8E -> <control> '\x8f' # 0x8F -> <control> '\x90' # 0x90 -> <control> '\x91' # 0x91 -> <control> '\x92' # 0x92 -> <control> '\x93' # 0x93 -> <control> '\x94' # 0x94 -> <control> '\x95' # 0x95 -> <control> '\x96' # 0x96 -> <control> '\x97' # 0x97 -> <control> '\x98' # 0x98 -> <control> '\x99' # 0x99 -> <control> '\x9a' # 0x9A -> <control> '\x9b' # 0x9B -> <control> '\x9c' # 0x9C -> <control> '\x9d' # 0x9D -> <control> '\x9e' # 0x9E -> <control> '\x9f' # 0x9F -> <control> '\xa0' # 0xA0 -> NO-BREAK SPACE '\u0104' # 0xA1 -> LATIN CAPITAL LETTER A WITH OGONEK '\u0112' # 0xA2 -> LATIN CAPITAL LETTER E WITH MACRON '\u0122' # 0xA3 -> LATIN CAPITAL LETTER G WITH CEDILLA '\u012a' # 0xA4 -> LATIN CAPITAL LETTER I WITH MACRON '\u0128' # 0xA5 -> LATIN CAPITAL LETTER I WITH TILDE '\u0136' # 0xA6 -> LATIN CAPITAL LETTER K WITH CEDILLA '\xa7' # 0xA7 -> SECTION SIGN '\u013b' # 0xA8 -> LATIN CAPITAL LETTER L WITH CEDILLA '\u0110' # 0xA9 -> LATIN CAPITAL LETTER D WITH STROKE '\u0160' # 0xAA -> LATIN CAPITAL LETTER S WITH CARON '\u0166' # 0xAB -> LATIN CAPITAL LETTER T WITH STROKE '\u017d' # 0xAC -> LATIN CAPITAL LETTER Z WITH CARON '\xad' # 0xAD -> SOFT HYPHEN '\u016a' # 0xAE -> LATIN CAPITAL LETTER U WITH MACRON '\u014a' # 0xAF -> LATIN CAPITAL LETTER ENG '\xb0' # 0xB0 -> DEGREE SIGN '\u0105' # 0xB1 -> LATIN SMALL LETTER A WITH OGONEK '\u0113' # 0xB2 -> LATIN SMALL LETTER E WITH MACRON '\u0123' # 0xB3 -> LATIN SMALL LETTER G WITH CEDILLA '\u012b' # 0xB4 -> LATIN SMALL LETTER I WITH MACRON '\u0129' # 0xB5 -> LATIN SMALL LETTER I WITH TILDE '\u0137' # 0xB6 -> LATIN SMALL LETTER K WITH CEDILLA '\xb7' # 0xB7 -> MIDDLE DOT '\u013c' # 0xB8 -> LATIN SMALL LETTER L WITH CEDILLA '\u0111' # 0xB9 -> LATIN SMALL LETTER D WITH STROKE '\u0161' # 0xBA -> LATIN SMALL LETTER S WITH CARON '\u0167' # 0xBB -> LATIN SMALL LETTER T WITH STROKE '\u017e' # 0xBC -> LATIN SMALL LETTER Z WITH CARON '\u2015' # 0xBD -> HORIZONTAL BAR '\u016b' # 0xBE -> LATIN SMALL LETTER U WITH MACRON '\u014b' # 0xBF -> LATIN SMALL LETTER ENG '\u0100' # 0xC0 -> LATIN CAPITAL LETTER A WITH MACRON '\xc1' # 0xC1 -> LATIN CAPITAL LETTER A WITH ACUTE '\xc2' # 0xC2 -> LATIN CAPITAL LETTER A WITH CIRCUMFLEX '\xc3' # 0xC3 -> LATIN CAPITAL LETTER A WITH TILDE '\xc4' # 0xC4 -> LATIN CAPITAL LETTER A WITH DIAERESIS '\xc5' # 0xC5 -> LATIN CAPITAL LETTER A WITH RING ABOVE '\xc6' # 0xC6 -> LATIN CAPITAL LETTER AE '\u012e' # 0xC7 -> LATIN CAPITAL LETTER I WITH OGONEK '\u010c' # 0xC8 -> LATIN CAPITAL LETTER C WITH CARON '\xc9' # 0xC9 -> LATIN CAPITAL LETTER E WITH ACUTE '\u0118' # 0xCA -> LATIN CAPITAL LETTER E WITH OGONEK '\xcb' # 0xCB -> LATIN CAPITAL LETTER E WITH DIAERESIS '\u0116' # 0xCC -> LATIN CAPITAL LETTER E WITH DOT ABOVE '\xcd' # 0xCD -> LATIN CAPITAL LETTER I WITH ACUTE '\xce' # 0xCE -> LATIN CAPITAL LETTER I WITH CIRCUMFLEX '\xcf' # 0xCF -> LATIN CAPITAL LETTER I WITH DIAERESIS '\xd0' # 0xD0 -> LATIN CAPITAL LETTER ETH (Icelandic) '\u0145' # 0xD1 -> LATIN CAPITAL LETTER N WITH CEDILLA '\u014c' # 0xD2 -> LATIN CAPITAL LETTER O WITH MACRON '\xd3' # 0xD3 -> LATIN CAPITAL LETTER O WITH ACUTE '\xd4' # 0xD4 -> LATIN CAPITAL LETTER O WITH CIRCUMFLEX '\xd5' # 0xD5 -> LATIN CAPITAL LETTER O WITH TILDE '\xd6' # 0xD6 -> LATIN CAPITAL LETTER O WITH DIAERESIS '\u0168' # 0xD7 -> LATIN CAPITAL LETTER U WITH TILDE '\xd8' # 0xD8 -> LATIN CAPITAL LETTER O WITH STROKE '\u0172' # 0xD9 -> LATIN CAPITAL LETTER U WITH OGONEK '\xda' # 0xDA -> LATIN CAPITAL LETTER U WITH ACUTE '\xdb' # 0xDB -> LATIN CAPITAL LETTER U WITH CIRCUMFLEX '\xdc' # 0xDC -> LATIN CAPITAL LETTER U WITH DIAERESIS '\xdd' # 0xDD -> LATIN CAPITAL LETTER Y WITH ACUTE '\xde' # 0xDE -> LATIN CAPITAL LETTER THORN (Icelandic) '\xdf' # 0xDF -> LATIN SMALL LETTER SHARP S (German) '\u0101' # 0xE0 -> LATIN SMALL LETTER A WITH MACRON '\xe1' # 0xE1 -> LATIN SMALL LETTER A WITH ACUTE '\xe2' # 0xE2 -> LATIN SMALL LETTER A WITH CIRCUMFLEX '\xe3' # 0xE3 -> LATIN SMALL LETTER A WITH TILDE '\xe4' # 0xE4 -> LATIN SMALL LETTER A WITH DIAERESIS '\xe5' # 0xE5 -> LATIN SMALL LETTER A WITH RING ABOVE '\xe6' # 0xE6 -> LATIN SMALL LETTER AE '\u012f' # 0xE7 -> LATIN SMALL LETTER I WITH OGONEK '\u010d' # 0xE8 -> LATIN SMALL LETTER C WITH CARON '\xe9' # 0xE9 -> LATIN SMALL LETTER E WITH ACUTE '\u0119' # 0xEA -> LATIN SMALL LETTER E WITH OGONEK '\xeb' # 0xEB -> LATIN SMALL LETTER E WITH DIAERESIS '\u0117' # 0xEC -> LATIN SMALL LETTER E WITH DOT ABOVE '\xed' # 0xED -> LATIN SMALL LETTER I WITH ACUTE '\xee' # 0xEE -> LATIN SMALL LETTER I WITH CIRCUMFLEX '\xef' # 0xEF -> LATIN SMALL LETTER I WITH DIAERESIS '\xf0' # 0xF0 -> LATIN SMALL LETTER ETH (Icelandic) '\u0146' # 0xF1 -> LATIN SMALL LETTER N WITH CEDILLA '\u014d' # 0xF2 -> LATIN SMALL LETTER O WITH MACRON '\xf3' # 0xF3 -> LATIN SMALL LETTER O WITH ACUTE '\xf4' # 0xF4 -> LATIN SMALL LETTER O WITH CIRCUMFLEX '\xf5' # 0xF5 -> LATIN SMALL LETTER O WITH TILDE '\xf6' # 0xF6 -> LATIN SMALL LETTER O WITH DIAERESIS '\u0169' # 0xF7 -> LATIN SMALL LETTER U WITH TILDE '\xf8' # 0xF8 -> LATIN SMALL LETTER O WITH STROKE '\u0173' # 0xF9 -> LATIN SMALL LETTER U WITH OGONEK '\xfa' # 0xFA -> LATIN SMALL LETTER U WITH ACUTE '\xfb' # 0xFB -> LATIN SMALL LETTER U WITH CIRCUMFLEX '\xfc' # 0xFC -> LATIN SMALL LETTER U WITH DIAERESIS '\xfd' # 0xFD -> LATIN SMALL LETTER Y WITH ACUTE '\xfe' # 0xFE -> LATIN SMALL LETTER THORN (Icelandic) '\u0138' # 0xFF -> LATIN SMALL LETTER KRA ) ### Encoding table encoding_table=codecs.charmap_build(decoding_table)
bsd-2-clause
SymbiFlow/symbiflow-arch-defs
utils/lib/parse_route.py
1
1541
""" Library for parsing route output from VPR route files. """ from collections import namedtuple Node = namedtuple('Node', 'inode x_low y_low x_high y_high ptc') def format_name(s): """ Converts VPR parenthesized name to just name. """ assert s[0] == '(' assert s[-1] == ')' return s[1:-1] def format_coordinates(coord): """ Parses coordinates from VPR route file in format of (x,y). """ coord = format_name(coord) x, y = coord.split(',') return int(x), int(y) def find_net_sources(f): """ Yields tuple of (net string, Node namedtuple) from file object. File object should be formatted as VPR route output file. """ net = None for e in f: tokens = e.strip().split() if not tokens: continue elif tokens[0][0] == '#': continue elif tokens[0] == 'Net': net = format_name(tokens[2]) elif e == "\n\nUsed in local cluster only, reserved one CLB pin\n\n": continue else: if net is not None: inode = int(tokens[1]) assert tokens[2] == 'SOURCE' x, y = format_coordinates(tokens[3]) if tokens[4] == 'to': x2, y2 = format_coordinates(tokens[5]) offset = 2 else: x2, y2 = x, y offset = 0 ptc = int(tokens[5 + offset]) yield net, Node(inode, x, y, x2, y2, ptc) net = None
isc
erwilan/ansible
lib/ansible/modules/network/f5/bigip_pool.py
41
20012
#!/usr/bin/python # -*- coding: utf-8 -*- # (c) 2013, Matt Hite <mhite@hotmail.com> # # This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see <http://www.gnu.org/licenses/>. ANSIBLE_METADATA = {'metadata_version': '1.0', 'status': ['preview'], 'supported_by': 'community'} DOCUMENTATION = ''' --- module: bigip_pool short_description: "Manages F5 BIG-IP LTM pools" description: - Manages F5 BIG-IP LTM pools via iControl SOAP API version_added: 1.2 author: - Matt Hite (@mhite) - Tim Rupp (@caphrim007) notes: - Requires BIG-IP software version >= 11 - F5 developed module 'bigsuds' required (see http://devcentral.f5.com) - Best run as a local_action in your playbook requirements: - bigsuds options: description: description: - Specifies descriptive text that identifies the pool. required: false version_added: "2.3" state: description: - Pool/pool member state required: false default: present choices: - present - absent aliases: [] name: description: - Pool name required: true default: null choices: [] aliases: - pool partition: description: - Partition of pool/pool member required: false default: 'Common' choices: [] aliases: [] lb_method: description: - Load balancing method version_added: "1.3" required: False default: 'round_robin' choices: - round_robin - ratio_member - least_connection_member - observed_member - predictive_member - ratio_node_address - least_connection_node_address - fastest_node_address - observed_node_address - predictive_node_address - dynamic_ratio - fastest_app_response - least_sessions - dynamic_ratio_member - l3_addr - weighted_least_connection_member - weighted_least_connection_node_address - ratio_session - ratio_least_connection_member - ratio_least_connection_node_address aliases: [] monitor_type: description: - Monitor rule type when monitors > 1 version_added: "1.3" required: False default: null choices: ['and_list', 'm_of_n'] aliases: [] quorum: description: - Monitor quorum value when monitor_type is m_of_n version_added: "1.3" required: False default: null choices: [] aliases: [] monitors: description: - Monitor template name list. Always use the full path to the monitor. version_added: "1.3" required: False default: null choices: [] aliases: [] slow_ramp_time: description: - Sets the ramp-up time (in seconds) to gradually ramp up the load on newly added or freshly detected up pool members version_added: "1.3" required: False default: null choices: [] aliases: [] reselect_tries: description: - Sets the number of times the system tries to contact a pool member after a passive failure version_added: "2.2" required: False default: null choices: [] aliases: [] service_down_action: description: - Sets the action to take when node goes down in pool version_added: "1.3" required: False default: null choices: - none - reset - drop - reselect aliases: [] host: description: - "Pool member IP" required: False default: null choices: [] aliases: - address port: description: - Pool member port required: False default: null choices: [] aliases: [] extends_documentation_fragment: f5 ''' EXAMPLES = ''' - name: Create pool bigip_pool: server: "lb.mydomain.com" user: "admin" password: "secret" state: "present" name: "my-pool" partition: "Common" lb_method: "least_connection_member" slow_ramp_time: 120 delegate_to: localhost - name: Modify load balancer method bigip_pool: server: "lb.mydomain.com" user: "admin" password: "secret" state: "present" name: "my-pool" partition: "Common" lb_method: "round_robin" - name: Add pool member bigip_pool: server: "lb.mydomain.com" user: "admin" password: "secret" state: "present" name: "my-pool" partition: "Common" host: "{{ ansible_default_ipv4['address'] }}" port: 80 - name: Remove pool member from pool bigip_pool: server: "lb.mydomain.com" user: "admin" password: "secret" state: "absent" name: "my-pool" partition: "Common" host: "{{ ansible_default_ipv4['address'] }}" port: 80 - name: Delete pool bigip_pool: server: "lb.mydomain.com" user: "admin" password: "secret" state: "absent" name: "my-pool" partition: "Common" ''' RETURN = ''' ''' def pool_exists(api, pool): # hack to determine if pool exists result = False try: api.LocalLB.Pool.get_object_status(pool_names=[pool]) result = True except bigsuds.OperationFailed as e: if "was not found" in str(e): result = False else: # genuine exception raise return result def create_pool(api, pool, lb_method): # create requires lb_method but we don't want to default # to a value on subsequent runs if not lb_method: lb_method = 'round_robin' lb_method = "LB_METHOD_%s" % lb_method.strip().upper() api.LocalLB.Pool.create_v2(pool_names=[pool], lb_methods=[lb_method], members=[[]]) def remove_pool(api, pool): api.LocalLB.Pool.delete_pool(pool_names=[pool]) def get_lb_method(api, pool): lb_method = api.LocalLB.Pool.get_lb_method(pool_names=[pool])[0] lb_method = lb_method.strip().replace('LB_METHOD_', '').lower() return lb_method def set_lb_method(api, pool, lb_method): lb_method = "LB_METHOD_%s" % lb_method.strip().upper() api.LocalLB.Pool.set_lb_method(pool_names=[pool], lb_methods=[lb_method]) def get_monitors(api, pool): result = api.LocalLB.Pool.get_monitor_association(pool_names=[pool])[0]['monitor_rule'] monitor_type = result['type'].split("MONITOR_RULE_TYPE_")[-1].lower() quorum = result['quorum'] monitor_templates = result['monitor_templates'] return (monitor_type, quorum, monitor_templates) def set_monitors(api, pool, monitor_type, quorum, monitor_templates): monitor_type = "MONITOR_RULE_TYPE_%s" % monitor_type.strip().upper() monitor_rule = {'type': monitor_type, 'quorum': quorum, 'monitor_templates': monitor_templates} monitor_association = {'pool_name': pool, 'monitor_rule': monitor_rule} api.LocalLB.Pool.set_monitor_association(monitor_associations=[monitor_association]) def get_slow_ramp_time(api, pool): result = api.LocalLB.Pool.get_slow_ramp_time(pool_names=[pool])[0] return result def set_slow_ramp_time(api, pool, seconds): api.LocalLB.Pool.set_slow_ramp_time(pool_names=[pool], values=[seconds]) def get_reselect_tries(api, pool): result = api.LocalLB.Pool.get_reselect_tries(pool_names=[pool])[0] return result def set_reselect_tries(api, pool, tries): api.LocalLB.Pool.set_reselect_tries(pool_names=[pool], values=[tries]) def get_action_on_service_down(api, pool): result = api.LocalLB.Pool.get_action_on_service_down(pool_names=[pool])[0] result = result.split("SERVICE_DOWN_ACTION_")[-1].lower() return result def set_action_on_service_down(api, pool, action): action = "SERVICE_DOWN_ACTION_%s" % action.strip().upper() api.LocalLB.Pool.set_action_on_service_down(pool_names=[pool], actions=[action]) def member_exists(api, pool, address, port): # hack to determine if member exists result = False try: members = [{'address': address, 'port': port}] api.LocalLB.Pool.get_member_object_status(pool_names=[pool], members=[members]) result = True except bigsuds.OperationFailed as e: if "was not found" in str(e): result = False else: # genuine exception raise return result def delete_node_address(api, address): result = False try: api.LocalLB.NodeAddressV2.delete_node_address(nodes=[address]) result = True except bigsuds.OperationFailed as e: if "is referenced by a member of pool" in str(e): result = False else: # genuine exception raise return result def remove_pool_member(api, pool, address, port): members = [{'address': address, 'port': port}] api.LocalLB.Pool.remove_member_v2(pool_names=[pool], members=[members]) def add_pool_member(api, pool, address, port): members = [{'address': address, 'port': port}] api.LocalLB.Pool.add_member_v2(pool_names=[pool], members=[members]) def set_description(api, pool, description): api.LocalLB.Pool.set_description( pool_names=[pool], descriptions=[description] ) def get_description(api, pool): return api.LocalLB.Pool.get_description(pool_names=[pool])[0] def main(): lb_method_choices = ['round_robin', 'ratio_member', 'least_connection_member', 'observed_member', 'predictive_member', 'ratio_node_address', 'least_connection_node_address', 'fastest_node_address', 'observed_node_address', 'predictive_node_address', 'dynamic_ratio', 'fastest_app_response', 'least_sessions', 'dynamic_ratio_member', 'l3_addr', 'weighted_least_connection_member', 'weighted_least_connection_node_address', 'ratio_session', 'ratio_least_connection_member', 'ratio_least_connection_node_address'] monitor_type_choices = ['and_list', 'm_of_n'] service_down_choices = ['none', 'reset', 'drop', 'reselect'] argument_spec = f5_argument_spec() meta_args = dict( name=dict(type='str', required=True, aliases=['pool']), lb_method=dict(type='str', choices=lb_method_choices), monitor_type=dict(type='str', choices=monitor_type_choices), quorum=dict(type='int'), monitors=dict(type='list'), slow_ramp_time=dict(type='int'), reselect_tries=dict(type='int'), service_down_action=dict(type='str', choices=service_down_choices), host=dict(type='str', aliases=['address']), port=dict(type='int'), description=dict(type='str') ) argument_spec.update(meta_args) module = AnsibleModule( argument_spec=argument_spec, supports_check_mode=True ) if not bigsuds_found: module.fail_json(msg="the python bigsuds module is required") if module.params['validate_certs']: import ssl if not hasattr(ssl, 'SSLContext'): module.fail_json( msg='bigsuds does not support verifying certificates with python < 2.7.9. Either update python or set validate_certs=False on the task' ) server = module.params['server'] server_port = module.params['server_port'] user = module.params['user'] password = module.params['password'] state = module.params['state'] partition = module.params['partition'] validate_certs = module.params['validate_certs'] description = module.params['description'] name = module.params['name'] pool = fq_name(partition, name) lb_method = module.params['lb_method'] if lb_method: lb_method = lb_method.lower() monitor_type = module.params['monitor_type'] if monitor_type: monitor_type = monitor_type.lower() quorum = module.params['quorum'] monitors = module.params['monitors'] if monitors: monitors = [] for monitor in module.params['monitors']: monitors.append(fq_name(partition, monitor)) slow_ramp_time = module.params['slow_ramp_time'] reselect_tries = module.params['reselect_tries'] service_down_action = module.params['service_down_action'] if service_down_action: service_down_action = service_down_action.lower() host = module.params['host'] address = fq_name(partition, host) port = module.params['port'] # sanity check user supplied values if (host and port is None) or (port is not None and not host): module.fail_json(msg="both host and port must be supplied") if port is not None and (0 > port or port > 65535): module.fail_json(msg="valid ports must be in range 0 - 65535") if monitors: if len(monitors) == 1: # set default required values for single monitor quorum = 0 monitor_type = 'single' elif len(monitors) > 1: if not monitor_type: module.fail_json(msg="monitor_type required for monitors > 1") if monitor_type == 'm_of_n' and not quorum: module.fail_json(msg="quorum value required for monitor_type m_of_n") if monitor_type != 'm_of_n': quorum = 0 elif monitor_type: # no monitors specified but monitor_type exists module.fail_json(msg="monitor_type require monitors parameter") elif quorum is not None: # no monitors specified but quorum exists module.fail_json(msg="quorum requires monitors parameter") try: api = bigip_api(server, user, password, validate_certs, port=server_port) result = {'changed': False} # default if state == 'absent': if host and port and pool: # member removal takes precedent if pool_exists(api, pool) and member_exists(api, pool, address, port): if not module.check_mode: remove_pool_member(api, pool, address, port) deleted = delete_node_address(api, address) result = {'changed': True, 'deleted': deleted} else: result = {'changed': True} elif pool_exists(api, pool): # no host/port supplied, must be pool removal if not module.check_mode: # hack to handle concurrent runs of module # pool might be gone before we actually remove it try: remove_pool(api, pool) result = {'changed': True} except bigsuds.OperationFailed as e: if "was not found" in str(e): result = {'changed': False} else: # genuine exception raise else: # check-mode return value result = {'changed': True} elif state == 'present': update = False if not pool_exists(api, pool): # pool does not exist -- need to create it if not module.check_mode: # a bit of a hack to handle concurrent runs of this module. # even though we've checked the pool doesn't exist, # it may exist by the time we run create_pool(). # this catches the exception and does something smart # about it! try: create_pool(api, pool, lb_method) result = {'changed': True} except bigsuds.OperationFailed as e: if "already exists" in str(e): update = True else: # genuine exception raise else: if monitors: set_monitors(api, pool, monitor_type, quorum, monitors) if slow_ramp_time: set_slow_ramp_time(api, pool, slow_ramp_time) if reselect_tries: set_reselect_tries(api, pool, reselect_tries) if service_down_action: set_action_on_service_down(api, pool, service_down_action) if host and port: add_pool_member(api, pool, address, port) if description: set_description(api, pool, description) else: # check-mode return value result = {'changed': True} else: # pool exists -- potentially modify attributes update = True if update: if lb_method and lb_method != get_lb_method(api, pool): if not module.check_mode: set_lb_method(api, pool, lb_method) result = {'changed': True} if monitors: t_monitor_type, t_quorum, t_monitor_templates = get_monitors(api, pool) if (t_monitor_type != monitor_type) or (t_quorum != quorum) or (set(t_monitor_templates) != set(monitors)): if not module.check_mode: set_monitors(api, pool, monitor_type, quorum, monitors) result = {'changed': True} if slow_ramp_time and slow_ramp_time != get_slow_ramp_time(api, pool): if not module.check_mode: set_slow_ramp_time(api, pool, slow_ramp_time) result = {'changed': True} if reselect_tries and reselect_tries != get_reselect_tries(api, pool): if not module.check_mode: set_reselect_tries(api, pool, reselect_tries) result = {'changed': True} if service_down_action and service_down_action != get_action_on_service_down(api, pool): if not module.check_mode: set_action_on_service_down(api, pool, service_down_action) result = {'changed': True} if (host and port) and not member_exists(api, pool, address, port): if not module.check_mode: add_pool_member(api, pool, address, port) result = {'changed': True} if (host and port == 0) and not member_exists(api, pool, address, port): if not module.check_mode: add_pool_member(api, pool, address, port) result = {'changed': True} if description and description != get_description(api, pool): if not module.check_mode: set_description(api, pool, description) result = {'changed': True} except Exception as e: module.fail_json(msg="received exception: %s" % e) module.exit_json(**result) from ansible.module_utils.basic import * from ansible.module_utils.f5_utils import * if __name__ == '__main__': main()
gpl-3.0
yongshengwang/hue
build/env/lib/python2.7/site-packages/Django-1.6.10-py2.7.egg/django/contrib/sites/tests.py
124
3406
from __future__ import unicode_literals from django.conf import settings from django.contrib.sites.models import Site, RequestSite, get_current_site from django.core.exceptions import ObjectDoesNotExist, ValidationError from django.http import HttpRequest from django.test import TestCase from django.test.utils import override_settings class SitesFrameworkTests(TestCase): def setUp(self): Site(id=settings.SITE_ID, domain="example.com", name="example.com").save() self.old_Site_meta_installed = Site._meta.installed Site._meta.installed = True def tearDown(self): Site._meta.installed = self.old_Site_meta_installed def test_save_another(self): # Regression for #17415 # On some backends the sequence needs reset after save with explicit ID. # Test that there is no sequence collisions by saving another site. Site(domain="example2.com", name="example2.com").save() def test_site_manager(self): # Make sure that get_current() does not return a deleted Site object. s = Site.objects.get_current() self.assertTrue(isinstance(s, Site)) s.delete() self.assertRaises(ObjectDoesNotExist, Site.objects.get_current) def test_site_cache(self): # After updating a Site object (e.g. via the admin), we shouldn't return a # bogus value from the SITE_CACHE. site = Site.objects.get_current() self.assertEqual("example.com", site.name) s2 = Site.objects.get(id=settings.SITE_ID) s2.name = "Example site" s2.save() site = Site.objects.get_current() self.assertEqual("Example site", site.name) def test_delete_all_sites_clears_cache(self): # When all site objects are deleted the cache should also # be cleared and get_current() should raise a DoesNotExist. self.assertIsInstance(Site.objects.get_current(), Site) Site.objects.all().delete() self.assertRaises(Site.DoesNotExist, Site.objects.get_current) @override_settings(ALLOWED_HOSTS=['example.com']) def test_get_current_site(self): # Test that the correct Site object is returned request = HttpRequest() request.META = { "SERVER_NAME": "example.com", "SERVER_PORT": "80", } site = get_current_site(request) self.assertTrue(isinstance(site, Site)) self.assertEqual(site.id, settings.SITE_ID) # Test that an exception is raised if the sites framework is installed # but there is no matching Site site.delete() self.assertRaises(ObjectDoesNotExist, get_current_site, request) # A RequestSite is returned if the sites framework is not installed Site._meta.installed = False site = get_current_site(request) self.assertTrue(isinstance(site, RequestSite)) self.assertEqual(site.name, "example.com") def test_domain_name_with_whitespaces(self): # Regression for #17320 # Domain names are not allowed contain whitespace characters site = Site(name="test name", domain="test test") self.assertRaises(ValidationError, site.full_clean) site.domain = "test\ttest" self.assertRaises(ValidationError, site.full_clean) site.domain = "test\ntest" self.assertRaises(ValidationError, site.full_clean)
apache-2.0
dcroc16/skunk_works
google_appengine/lib/django-1.4/django/dispatch/dispatcher.py
315
9292
import weakref import threading from django.dispatch import saferef WEAKREF_TYPES = (weakref.ReferenceType, saferef.BoundMethodWeakref) def _make_id(target): if hasattr(target, 'im_func'): return (id(target.im_self), id(target.im_func)) return id(target) class Signal(object): """ Base class for all signals Internal attributes: receivers { receriverkey (id) : weakref(receiver) } """ def __init__(self, providing_args=None): """ Create a new signal. providing_args A list of the arguments this signal can pass along in a send() call. """ self.receivers = [] if providing_args is None: providing_args = [] self.providing_args = set(providing_args) self.lock = threading.Lock() def connect(self, receiver, sender=None, weak=True, dispatch_uid=None): """ Connect receiver to sender for signal. Arguments: receiver A function or an instance method which is to receive signals. Receivers must be hashable objects. If weak is True, then receiver must be weak-referencable (more precisely saferef.safeRef() must be able to create a reference to the receiver). Receivers must be able to accept keyword arguments. If receivers have a dispatch_uid attribute, the receiver will not be added if another receiver already exists with that dispatch_uid. sender The sender to which the receiver should respond. Must either be of type Signal, or None to receive events from any sender. weak Whether to use weak references to the receiver. By default, the module will attempt to use weak references to the receiver objects. If this parameter is false, then strong references will be used. dispatch_uid An identifier used to uniquely identify a particular instance of a receiver. This will usually be a string, though it may be anything hashable. """ from django.conf import settings # If DEBUG is on, check that we got a good receiver if settings.DEBUG: import inspect assert callable(receiver), "Signal receivers must be callable." # Check for **kwargs # Not all callables are inspectable with getargspec, so we'll # try a couple different ways but in the end fall back on assuming # it is -- we don't want to prevent registration of valid but weird # callables. try: argspec = inspect.getargspec(receiver) except TypeError: try: argspec = inspect.getargspec(receiver.__call__) except (TypeError, AttributeError): argspec = None if argspec: assert argspec[2] is not None, \ "Signal receivers must accept keyword arguments (**kwargs)." if dispatch_uid: lookup_key = (dispatch_uid, _make_id(sender)) else: lookup_key = (_make_id(receiver), _make_id(sender)) if weak: receiver = saferef.safeRef(receiver, onDelete=self._remove_receiver) self.lock.acquire() try: for r_key, _ in self.receivers: if r_key == lookup_key: break else: self.receivers.append((lookup_key, receiver)) finally: self.lock.release() def disconnect(self, receiver=None, sender=None, weak=True, dispatch_uid=None): """ Disconnect receiver from sender for signal. If weak references are used, disconnect need not be called. The receiver will be remove from dispatch automatically. Arguments: receiver The registered receiver to disconnect. May be none if dispatch_uid is specified. sender The registered sender to disconnect weak The weakref state to disconnect dispatch_uid the unique identifier of the receiver to disconnect """ if dispatch_uid: lookup_key = (dispatch_uid, _make_id(sender)) else: lookup_key = (_make_id(receiver), _make_id(sender)) self.lock.acquire() try: for index in xrange(len(self.receivers)): (r_key, _) = self.receivers[index] if r_key == lookup_key: del self.receivers[index] break finally: self.lock.release() def send(self, sender, **named): """ Send signal from sender to all connected receivers. If any receiver raises an error, the error propagates back through send, terminating the dispatch loop, so it is quite possible to not have all receivers called if a raises an error. Arguments: sender The sender of the signal Either a specific object or None. named Named arguments which will be passed to receivers. Returns a list of tuple pairs [(receiver, response), ... ]. """ responses = [] if not self.receivers: return responses for receiver in self._live_receivers(_make_id(sender)): response = receiver(signal=self, sender=sender, **named) responses.append((receiver, response)) return responses def send_robust(self, sender, **named): """ Send signal from sender to all connected receivers catching errors. Arguments: sender The sender of the signal. Can be any python object (normally one registered with a connect if you actually want something to occur). named Named arguments which will be passed to receivers. These arguments must be a subset of the argument names defined in providing_args. Return a list of tuple pairs [(receiver, response), ... ]. May raise DispatcherKeyError. If any receiver raises an error (specifically any subclass of Exception), the error instance is returned as the result for that receiver. """ responses = [] if not self.receivers: return responses # Call each receiver with whatever arguments it can accept. # Return a list of tuple pairs [(receiver, response), ... ]. for receiver in self._live_receivers(_make_id(sender)): try: response = receiver(signal=self, sender=sender, **named) except Exception, err: responses.append((receiver, err)) else: responses.append((receiver, response)) return responses def _live_receivers(self, senderkey): """ Filter sequence of receivers to get resolved, live receivers. This checks for weak references and resolves them, then returning only live receivers. """ none_senderkey = _make_id(None) receivers = [] for (receiverkey, r_senderkey), receiver in self.receivers: if r_senderkey == none_senderkey or r_senderkey == senderkey: if isinstance(receiver, WEAKREF_TYPES): # Dereference the weak reference. receiver = receiver() if receiver is not None: receivers.append(receiver) else: receivers.append(receiver) return receivers def _remove_receiver(self, receiver): """ Remove dead receivers from connections. """ self.lock.acquire() try: to_remove = [] for key, connected_receiver in self.receivers: if connected_receiver == receiver: to_remove.append(key) for key in to_remove: last_idx = len(self.receivers) - 1 # enumerate in reverse order so that indexes are valid even # after we delete some items for idx, (r_key, _) in enumerate(reversed(self.receivers)): if r_key == key: del self.receivers[last_idx-idx] finally: self.lock.release() def receiver(signal, **kwargs): """ A decorator for connecting receivers to signals. Used by passing in the signal and keyword arguments to connect:: @receiver(post_save, sender=MyModel) def signal_receiver(sender, **kwargs): ... """ def _decorator(func): signal.connect(func, **kwargs) return func return _decorator
mit
sankar-p/opt-hotplug
tools/perf/scripts/python/Perf-Trace-Util/lib/Perf/Trace/EventClass.py
4653
3596
# EventClass.py # # This is a library defining some events types classes, which could # be used by other scripts to analyzing the perf samples. # # Currently there are just a few classes defined for examples, # PerfEvent is the base class for all perf event sample, PebsEvent # is a HW base Intel x86 PEBS event, and user could add more SW/HW # event classes based on requirements. import struct # Event types, user could add more here EVTYPE_GENERIC = 0 EVTYPE_PEBS = 1 # Basic PEBS event EVTYPE_PEBS_LL = 2 # PEBS event with load latency info EVTYPE_IBS = 3 # # Currently we don't have good way to tell the event type, but by # the size of raw buffer, raw PEBS event with load latency data's # size is 176 bytes, while the pure PEBS event's size is 144 bytes. # def create_event(name, comm, dso, symbol, raw_buf): if (len(raw_buf) == 144): event = PebsEvent(name, comm, dso, symbol, raw_buf) elif (len(raw_buf) == 176): event = PebsNHM(name, comm, dso, symbol, raw_buf) else: event = PerfEvent(name, comm, dso, symbol, raw_buf) return event class PerfEvent(object): event_num = 0 def __init__(self, name, comm, dso, symbol, raw_buf, ev_type=EVTYPE_GENERIC): self.name = name self.comm = comm self.dso = dso self.symbol = symbol self.raw_buf = raw_buf self.ev_type = ev_type PerfEvent.event_num += 1 def show(self): print "PMU event: name=%12s, symbol=%24s, comm=%8s, dso=%12s" % (self.name, self.symbol, self.comm, self.dso) # # Basic Intel PEBS (Precise Event-based Sampling) event, whose raw buffer # contains the context info when that event happened: the EFLAGS and # linear IP info, as well as all the registers. # class PebsEvent(PerfEvent): pebs_num = 0 def __init__(self, name, comm, dso, symbol, raw_buf, ev_type=EVTYPE_PEBS): tmp_buf=raw_buf[0:80] flags, ip, ax, bx, cx, dx, si, di, bp, sp = struct.unpack('QQQQQQQQQQ', tmp_buf) self.flags = flags self.ip = ip self.ax = ax self.bx = bx self.cx = cx self.dx = dx self.si = si self.di = di self.bp = bp self.sp = sp PerfEvent.__init__(self, name, comm, dso, symbol, raw_buf, ev_type) PebsEvent.pebs_num += 1 del tmp_buf # # Intel Nehalem and Westmere support PEBS plus Load Latency info which lie # in the four 64 bit words write after the PEBS data: # Status: records the IA32_PERF_GLOBAL_STATUS register value # DLA: Data Linear Address (EIP) # DSE: Data Source Encoding, where the latency happens, hit or miss # in L1/L2/L3 or IO operations # LAT: the actual latency in cycles # class PebsNHM(PebsEvent): pebs_nhm_num = 0 def __init__(self, name, comm, dso, symbol, raw_buf, ev_type=EVTYPE_PEBS_LL): tmp_buf=raw_buf[144:176] status, dla, dse, lat = struct.unpack('QQQQ', tmp_buf) self.status = status self.dla = dla self.dse = dse self.lat = lat PebsEvent.__init__(self, name, comm, dso, symbol, raw_buf, ev_type) PebsNHM.pebs_nhm_num += 1 del tmp_buf
gpl-2.0
alkadis/vcv
scripts/make_user_admin.py
6
1128
#!/usr/bin/env python """ Extract email adresses from adhocracy. Emails from deleted users won't be exported. """ from datetime import datetime from sqlalchemy import and_, or_ from adhocracy.model import Group, Membership, meta, User # boilerplate code. copy that import os import sys sys.path.insert(0, os.path.abspath(os.path.dirname(__file__))) from common import create_parser, get_instances, load_from_args # /end boilerplate code def main(): parser = create_parser(description=__doc__, use_instance=False) parser.add_argument( "username", help=("The name of the user who should become a global admin")) args = parser.parse_args() load_from_args(args) user = User.find(args.username) if user is None: print 'Cannot find user %s\n' % args.username parser.exit() global_membership = [membership for membership in user.memberships if membership.instance is None][0] admin_group = Group.by_code(Group.CODE_ADMIN) global_membership.group = admin_group meta.Session.commit() if __name__ == '__main__': sys.exit(main())
agpl-3.0
graphite/TeX4Web-INVENIO
modules/elmsubmit/lib/elmsubmit_generate_marc.py
35
6779
# -*- coding: utf-8 -*- ## ## This file is part of Invenio. ## Copyright (C) 2006, 2007, 2008, 2010, 2011 CERN. ## ## Invenio is free software; you can redistribute it and/or ## modify it under the terms of the GNU General Public License as ## published by the Free Software Foundation; either version 2 of the ## License, or (at your option) any later version. ## ## Invenio is distributed in the hope that it will be useful, but ## WITHOUT ANY WARRANTY; without even the implied warranty of ## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU ## General Public License for more details. ## ## You should have received a copy of the GNU General Public License ## along with Invenio; if not, write to the Free Software Foundation, Inc., ## 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA. __revision__ = "$Id$" import invenio.elmsubmit_config as elmsubmit_config def generate_marc(submission_dict): """ method generates a marc xml file from the submission dict """ marc_dict = {} for field in submission_dict.keys(): # print "field", field, submission_dict[field] # marc_dict should cotain a dictionary {'marc_code', value, ...} generate_data_field(field, submission_dict[field], marc_dict) # generate an xml file from marc_dict # print 'MARC DICT', marc_dict full_marc = print_marc(marc_dict) return full_marc def print_marc(marc_dict): """ method prints the xml file from the transformed dictionary """ marc_text = '<record>\n' # extract the ind1 and ind2 tags for key in marc_dict.keys(): tag = key[0:3] if key[3] != '_': ind1 = key[3] else: ind1 ='' if key[4] != '_': ind2 = key[4] else: ind2 = '' # subfields joined into one field if key in elmsubmit_config.CFG_ELMSUBMIT_MARC_FIELDS_JOINED.keys(): tuple_list = marc_dict[key] prefix_list = [] prefix_dict = {} # make a list and a dictionary with occurance numbers for subfield_tuple in marc_dict[key]: prefix_list.append(subfield_tuple[0]) if prefix_dict.has_key(subfield_tuple[0]) == 1: prefix_dict[subfield_tuple[0]] = prefix_dict[subfield_tuple[0]] + 1 else: prefix_dict[subfield_tuple[0]] = 1 for linked_prefix_list in elmsubmit_config.CFG_ELMSUBMIT_MARC_FIELDS_JOINED[key]: #we found a list of prefixes to join, build a field out of them while contains_elements(linked_prefix_list, prefix_dict.keys()): marc_text = marc_text + '<datafield tag ="' + tag + '" ind1="' + ind1 + '" ind2="' + ind2 + '">\n' for prefix in linked_prefix_list: tuple_index = prefix_list.index(prefix) sub_tuple = tuple_list[tuple_index] marc_text = marc_text + '<subfield code="' + sub_tuple[0] + '">' + sub_tuple[1] + '</subfield>\n' del tuple_list[tuple_index] del prefix_list[tuple_index] prefix_dict[prefix] = prefix_dict[prefix] - 1 if prefix_dict[prefix] == 0: del prefix_dict[prefix] marc_text = marc_text + '</datafield>\n' # append the actual datafields for sub_tuple in tuple_list: marc_text = marc_text + '<datafield tag ="' + tag + '" ind1="' + ind1 +'" ind2="' + ind2 + '">\n' marc_text = marc_text + '<subfield code="' + sub_tuple[0] + '">' + sub_tuple[1] + '</subfield>\n' prefix_dict[sub_tuple[0]] = prefix_dict[sub_tuple[0]] - 1 if prefix_dict[sub_tuple[0]] == 0: del prefix_dict[sub_tuple[0]] marc_text = marc_text + '</datafield>\n' del tuple_list del prefix_list else: # simply create the datafield for subfield_tuple in marc_dict[key]: marc_text = marc_text + '<datafield tag ="' + tag + '" ind1="' + ind1 + '" ind2="' + ind2 + '">\n' marc_text = marc_text + '<subfield code="' + subfield_tuple[0] + '">' + subfield_tuple[1] + '</subfield>\n' marc_text = marc_text + '</datafield>\n' marc_text = marc_text + '</record>' return marc_text def contains_elements(small_list, big_list): """function checking if all elements of list a are in list b """ for element in small_list: try: a = big_list.index(element) except ValueError: return False return True def generate_data_field(field, value, marc_dict): """ for a given data field, determine if it is in the marc dictionary dictionary and update marc_dict accordingly """ if (field in elmsubmit_config.CFG_ELMSUBMIT_MARC_MAPPING): # print "field:", field # field is a normal field if not isinstance(elmsubmit_config.CFG_ELMSUBMIT_MARC_MAPPING[field], list): for value_part in value: (datafield, subfield) = process_marc(elmsubmit_config.CFG_ELMSUBMIT_MARC_MAPPING[field]) if marc_dict.has_key(datafield) == 1: marc_dict[datafield].append((subfield, value_part)) else: marc_dict[datafield] = [(subfield, value_part)] else: # field is a list #determine the length for value_part in value: if value.index(value_part) == 0: (datafield, subfield) = process_marc(elmsubmit_config.CFG_ELMSUBMIT_MARC_MAPPING[field][0]) if marc_dict.has_key(datafield) == 1: marc_dict[datafield].append((subfield, value_part)) else: marc_dict[datafield] = [(subfield, value_part)] else: (datafield, subfield) = process_marc(elmsubmit_config.CFG_ELMSUBMIT_MARC_MAPPING[field][1]) if marc_dict.has_key(datafield) == 1: marc_dict[datafield].append((subfield, value_part)) else: marc_dict[datafield] = [(subfield, value_part)] else: pass #print "field_not_in Marc:", field def process_marc(marc_code): """ extract the datafield and subfield from a Marc field """ # print "marc_code", marc_code datafield = marc_code[0:5] subfield = marc_code[5] # print "datafield", datafield, "subfield", subfield return (datafield, subfield)
gpl-2.0
inventree/InvenTree
InvenTree/company/migrations/0001_initial.py
2
4647
# Generated by Django 2.2 on 2019-05-20 12:04 import company.models import django.core.validators from django.db import migrations, models import django.db.models.deletion class Migration(migrations.Migration): initial = True dependencies = [ ] operations = [ migrations.CreateModel( name='Company', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('name', models.CharField(help_text='Company name', max_length=100, unique=True)), ('description', models.CharField(help_text='Description of the company', max_length=500)), ('website', models.URLField(blank=True, help_text='Company website URL')), ('address', models.CharField(blank=True, help_text='Company address', max_length=200)), ('phone', models.CharField(blank=True, help_text='Contact phone number', max_length=50)), ('email', models.EmailField(blank=True, help_text='Contact email address', max_length=254)), ('contact', models.CharField(blank=True, help_text='Point of contact', max_length=100)), ('URL', models.URLField(blank=True, help_text='Link to external company information')), ('image', models.ImageField(blank=True, max_length=255, null=True, upload_to=company.models.rename_company_image)), ('notes', models.TextField(blank=True)), ('is_customer', models.BooleanField(default=False, help_text='Do you sell items to this company?')), ('is_supplier', models.BooleanField(default=True, help_text='Do you purchase items from this company?')), ], ), migrations.CreateModel( name='Contact', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('name', models.CharField(max_length=100)), ('phone', models.CharField(blank=True, max_length=100)), ('email', models.EmailField(blank=True, max_length=254)), ('role', models.CharField(blank=True, max_length=100)), ], ), migrations.CreateModel( name='SupplierPart', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('SKU', models.CharField(help_text='Supplier stock keeping unit', max_length=100)), ('manufacturer', models.CharField(blank=True, help_text='Manufacturer', max_length=100)), ('MPN', models.CharField(blank=True, help_text='Manufacturer part number', max_length=100)), ('URL', models.URLField(blank=True, help_text='URL for external supplier part link')), ('description', models.CharField(blank=True, help_text='Supplier part description', max_length=250)), ('note', models.CharField(blank=True, help_text='Notes', max_length=100)), ('base_cost', models.DecimalField(decimal_places=3, default=0, help_text='Minimum charge (e.g. stocking fee)', max_digits=10, validators=[django.core.validators.MinValueValidator(0)])), ('packaging', models.CharField(blank=True, help_text='Part packaging', max_length=50)), ('multiple', models.PositiveIntegerField(default=1, help_text='Order multiple', validators=[django.core.validators.MinValueValidator(1)])), ('minimum', models.PositiveIntegerField(default=1, help_text='Minimum order quantity (MOQ)', validators=[django.core.validators.MinValueValidator(1)])), ('lead_time', models.DurationField(blank=True, null=True)), ], options={ 'db_table': 'part_supplierpart', }, ), migrations.CreateModel( name='SupplierPriceBreak', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('quantity', models.PositiveIntegerField(default=1, validators=[django.core.validators.MinValueValidator(1)])), ('cost', models.DecimalField(decimal_places=5, max_digits=10, validators=[django.core.validators.MinValueValidator(0)])), ('part', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='pricebreaks', to='company.SupplierPart')), ], options={ 'db_table': 'part_supplierpricebreak', }, ), ]
mit
odoo-turkiye/odoo
openerp/addons/test_access_rights/tests/test_ir_rules.py
299
1220
import openerp.exceptions from openerp.tests.common import TransactionCase class TestRules(TransactionCase): def setUp(self): super(TestRules, self).setUp() self.id1 = self.env['test_access_right.some_obj']\ .create({'val': 1}).id self.id2 = self.env['test_access_right.some_obj']\ .create({'val': -1}).id # create a global rule forbidding access to records with a negative # (or zero) val self.env['ir.rule'].create({ 'name': 'Forbid negatives', 'model_id': self.browse_ref('test_access_rights.model_test_access_right_some_obj').id, 'domain_force': "[('val', '>', 0)]" }) def test_basic_access(self): env = self.env(user=self.browse_ref('base.public_user')) # put forbidden record in cache browse2 = env['test_access_right.some_obj'].browse(self.id2) # this is the one we want browse1 = env['test_access_right.some_obj'].browse(self.id1) # this should not blow up self.assertEqual(browse1.val, 1) # but this should with self.assertRaises(openerp.exceptions.AccessError): self.assertEqual(browse2.val, -1)
agpl-3.0
demisto/content
Packs/Silverfort/Integrations/Silverfort/Silverfort_test.py
1
5886
import pytest from unittest.mock import patch from Silverfort import get_user_entity_risk_command, get_resource_entity_risk_command,\ update_user_entity_risk_command, update_resource_entity_risk_command API_KEY = "APIKEY" @pytest.fixture(autouse=True) def upn(): return 'sfuser@silverfort.io' @pytest.fixture(autouse=True) def base_url(): return 'https://test.com' @pytest.fixture(autouse=True) def email(): return 'john@silverfort.com' @pytest.fixture(autouse=True) def domain(): return 'silverfort.io' @pytest.fixture(autouse=True) def api_key(): return 'APIKEY' @pytest.fixture(autouse=True) def risk(): return {'risk_name': 'activity_risk', 'severity': 'medium', 'valid_for': 1, 'description': 'Suspicious activity'} @pytest.fixture(autouse=True) def resource_name(): return 'AA--DC-1' @pytest.fixture(autouse=True) def bad_response(): return 'No valid response' @pytest.fixture(autouse=True) def valid_update_response(): return {"result": "updated successfully!"} @pytest.fixture(autouse=True) def valid_get_risk_response(): return {"risk": "Low", "reasons": ["Password never expires", "Suspicious activity"]} @pytest.fixture(autouse=True) def valid_get_upn_response(upn): return {"user_principal_name": upn} @pytest.fixture(autouse=True) def sam_account(): return 'sfuser' @pytest.fixture(autouse=True) def client(base_url): from Silverfort import Client return Client(base_url=base_url, verify=False) @pytest.fixture(autouse=True) def risk_args(risk): return {'risk_name': 'activity_risk', 'severity': 'medium', 'valid_for': 1, 'description': 'Suspicious activity'} class TestSiverfort(object): @patch('Silverfort.API_KEY', API_KEY) def test_get_status(self, requests_mock, base_url, api_key, client): from Silverfort import test_module requests_mock.get(f'{base_url}/getBootStatus?apikey={api_key}', json="True") output = test_module(client) assert output == "ok" @patch('Silverfort.API_KEY', API_KEY) def test_get_upn_by_email(self, requests_mock, upn, base_url, valid_get_upn_response, api_key, client, email, domain): requests_mock.get(f'{base_url}/getUPN?apikey={api_key}&email={email}&domain={domain}', json=valid_get_upn_response) output = client.get_upn_by_email_or_sam_account_http_request(domain, email=email) assert output == upn @patch('Silverfort.API_KEY', API_KEY) def test_get_upn_by_sam_account(self, requests_mock, upn, base_url, valid_get_upn_response, api_key, client, sam_account, domain): requests_mock.get(f'{base_url}/getUPN?apikey={api_key}&sam_account={sam_account}&domain={domain}', json=valid_get_upn_response) output = client.get_upn_by_email_or_sam_account_http_request(domain, sam_account=sam_account) assert output == upn @patch('Silverfort.API_KEY', API_KEY) def test_get_user_entity_risk(self, requests_mock, upn, base_url, api_key, client, valid_get_risk_response): args = {'upn': upn} requests_mock.get(f'{base_url}/getEntityRisk?apikey={api_key}&user_principal_name={upn}', json=valid_get_risk_response) _, outputs, _ = get_user_entity_risk_command(client, args) outputs = outputs['Silverfort.UserRisk(val.UPN && val.UPN == obj.UPN)'] assert outputs["UPN"] == upn assert outputs["Risk"] == valid_get_risk_response["risk"] assert outputs["Reasons"] == valid_get_risk_response["reasons"] @patch('Silverfort.API_KEY', API_KEY) def test_get_resource_entity_risk(self, requests_mock, base_url, api_key, client, valid_get_risk_response, resource_name, domain): args = {'resource_name': resource_name, 'domain_name': domain} requests_mock.get(f'{base_url}/getEntityRisk?apikey={api_key}&resource_name={resource_name}' f'&domain_name={domain}', json=valid_get_risk_response) _, outputs, _ = get_resource_entity_risk_command(client, args) outputs = outputs['Silverfort.ResourceRisk(val.ResourceName && val.ResourceName == obj.ResourceName)'] assert outputs["ResourceName"] == resource_name assert outputs["Risk"] == valid_get_risk_response["risk"] assert outputs["Reasons"] == valid_get_risk_response["reasons"] @patch('Silverfort.API_KEY', API_KEY) def test_update_user_entity_risk(self, requests_mock, upn, base_url, api_key, client, valid_update_response, bad_response, risk_args): args = risk_args args['upn'] = upn requests_mock.post(f'{base_url}/updateEntityRisk?apikey={api_key}', json=valid_update_response) assert update_user_entity_risk_command(client, args) == "updated successfully!" requests_mock.post(f'{base_url}/updateEntityRisk?apikey={api_key}', json=bad_response) assert update_user_entity_risk_command(client, args) == "Couldn't update the user entity's risk" @patch('Silverfort.API_KEY', API_KEY) def test_update_resource_entity_risk_successfully(self, requests_mock, base_url, api_key, client, valid_update_response, bad_response, risk_args, resource_name, domain): args = risk_args args['resource_name'] = resource_name args['domain_name'] = domain requests_mock.post(f'{base_url}/updateEntityRisk?apikey={api_key}', json=valid_update_response) assert update_resource_entity_risk_command(client, args) == 'updated successfully!' requests_mock.post(f'{base_url}/updateEntityRisk?apikey={api_key}', json=bad_response) assert update_resource_entity_risk_command(client, args) == "Couldn't update the resource entity's risk"
mit
magacoin/magacoin
contrib/devtools/clang-format.py
161
2130
#!/usr/bin/env python ''' Wrapper script for clang-format Copyright (c) 2015 MarcoFalke Copyright (c) 2015 The Bitcoin Core developers Distributed under the MIT software license, see the accompanying file COPYING or http://www.opensource.org/licenses/mit-license.php. ''' import os import sys import subprocess tested_versions = ['3.6.0', '3.6.1', '3.6.2'] # A set of versions known to produce the same output accepted_file_extensions = ('.h', '.cpp') # Files to format def check_clang_format_version(clang_format_exe): try: output = subprocess.check_output([clang_format_exe, '-version']) for ver in tested_versions: if ver in output: print "Detected clang-format version " + ver return raise RuntimeError("Untested version: " + output) except Exception as e: print 'Could not verify version of ' + clang_format_exe + '.' raise e def check_command_line_args(argv): required_args = ['{clang-format-exe}', '{files}'] example_args = ['clang-format-3.x', 'src/main.cpp', 'src/wallet/*'] if(len(argv) < len(required_args) + 1): for word in (['Usage:', argv[0]] + required_args): print word, print '' for word in (['E.g:', argv[0]] + example_args): print word, print '' sys.exit(1) def run_clang_format(clang_format_exe, files): for target in files: if os.path.isdir(target): for path, dirs, files in os.walk(target): run_clang_format(clang_format_exe, (os.path.join(path, f) for f in files)) elif target.endswith(accepted_file_extensions): print "Format " + target subprocess.check_call([clang_format_exe, '-i', '-style=file', target], stdout=open(os.devnull, 'wb'), stderr=subprocess.STDOUT) else: print "Skip " + target def main(argv): check_command_line_args(argv) clang_format_exe = argv[1] files = argv[2:] check_clang_format_version(clang_format_exe) run_clang_format(clang_format_exe, files) if __name__ == "__main__": main(sys.argv)
mit
NaohiroTamura/python-ironicclient
ironicclient/v1/chassis.py
1
6569
# -*- coding: utf-8 -*- # # Copyright © 2013 Red Hat, Inc # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from ironicclient.common import base from ironicclient.common.i18n import _ from ironicclient.common import utils from ironicclient import exc class Chassis(base.Resource): def __repr__(self): return "<Chassis %s>" % self._info class ChassisManager(base.CreateManager): resource_class = Chassis _resource_name = 'chassis' _creation_attributes = ['description', 'extra', 'uuid'] def list(self, marker=None, limit=None, sort_key=None, sort_dir=None, detail=False, fields=None): """Retrieve a list of chassis. :param marker: Optional, the UUID of a chassis, eg the last chassis from a previous result set. Return the next result set. :param limit: The maximum number of results to return per request, if: 1) limit > 0, the maximum number of chassis to return. 2) limit == 0, return the entire list of chassis. 3) limit param is NOT specified (None), the number of items returned respect the maximum imposed by the Ironic API (see Ironic's api.max_limit option). :param sort_key: Optional, field used for sorting. :param sort_dir: Optional, direction of sorting, either 'asc' (the default) or 'desc'. :param detail: Optional, boolean whether to return detailed information about chassis. :param fields: Optional, a list with a specified set of fields of the resource to be returned. Can not be used when 'detail' is set. :returns: A list of chassis. """ if limit is not None: limit = int(limit) if detail and fields: raise exc.InvalidAttribute(_("Can't fetch a subset of fields " "with 'detail' set")) filters = utils.common_filters(marker, limit, sort_key, sort_dir, fields) path = '' if detail: path += 'detail' if filters: path += '?' + '&'.join(filters) if limit is None: return self._list(self._path(path), "chassis") else: return self._list_pagination(self._path(path), "chassis", limit=limit) def list_nodes(self, chassis_id, marker=None, limit=None, sort_key=None, sort_dir=None, detail=False, fields=None, associated=None, maintenance=None, provision_state=None): """List all the nodes for a given chassis. :param chassis_id: The UUID of the chassis. :param marker: Optional, the UUID of a node, eg the last node from a previous result set. Return the next result set. :param limit: The maximum number of results to return per request, if: 1) limit > 0, the maximum number of nodes to return. 2) limit == 0, return the entire list of nodes. 3) limit param is NOT specified (None), the number of items returned respect the maximum imposed by the Ironic API (see Ironic's api.max_limit option). :param sort_key: Optional, field used for sorting. :param sort_dir: Optional, direction of sorting, either 'asc' (the default) or 'desc'. :param detail: Optional, boolean whether to return detailed information about nodes. :param fields: Optional, a list with a specified set of fields of the resource to be returned. Can not be used when 'detail' is set. :param associated: Optional. Either a Boolean or a string representation of a Boolean that indicates whether to return a list of associated (True or "True") or unassociated (False or "False") nodes. :param maintenance: Optional. Either a Boolean or a string representation of a Boolean that indicates whether to return nodes in maintenance mode (True or "True"), or not in maintenance mode (False or "False"). :param provision_state: Optional. String value to get only nodes in that provision state. :returns: A list of nodes. """ if limit is not None: limit = int(limit) if detail and fields: raise exc.InvalidAttribute(_("Can't fetch a subset of fields " "with 'detail' set")) filters = utils.common_filters(marker, limit, sort_key, sort_dir, fields) if associated is not None: filters.append('associated=%s' % associated) if maintenance is not None: filters.append('maintenance=%s' % maintenance) if provision_state is not None: filters.append('provision_state=%s' % provision_state) path = "%s/nodes" % chassis_id if detail: path += '/detail' if filters: path += '?' + '&'.join(filters) if limit is None: return self._list(self._path(path), "nodes") else: return self._list_pagination(self._path(path), "nodes", limit=limit) def get(self, chassis_id, fields=None): return self._get(resource_id=chassis_id, fields=fields) def delete(self, chassis_id): return self._delete(resource_id=chassis_id) def update(self, chassis_id, patch): return self._update(resource_id=chassis_id, patch=patch)
apache-2.0
ptemplier/ansible
lib/ansible/modules/cloud/centurylink/clc_group.py
26
16745
#!/usr/bin/python # # Copyright (c) 2015 CenturyLink # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) from __future__ import absolute_import, division, print_function __metaclass__ = type ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ['preview'], 'supported_by': 'community'} DOCUMENTATION = ''' module: clc_group short_description: Create/delete Server Groups at Centurylink Cloud description: - Create or delete Server Groups at Centurylink Centurylink Cloud version_added: "2.0" options: name: description: - The name of the Server Group required: True description: description: - A description of the Server Group required: False parent: description: - The parent group of the server group. If parent is not provided, it creates the group at top level. required: False location: description: - Datacenter to create the group in. If location is not provided, the group gets created in the default datacenter associated with the account required: False state: description: - Whether to create or delete the group default: present choices: ['present', 'absent'] wait: description: - Whether to wait for the tasks to finish before returning. choices: [ True, False ] default: True required: False requirements: - python = 2.7 - requests >= 2.5.0 - clc-sdk author: "CLC Runner (@clc-runner)" notes: - To use this module, it is required to set the below environment variables which enables access to the Centurylink Cloud - CLC_V2_API_USERNAME, the account login id for the centurylink cloud - CLC_V2_API_PASSWORD, the account password for the centurylink cloud - Alternatively, the module accepts the API token and account alias. The API token can be generated using the CLC account login and password via the HTTP api call @ https://api.ctl.io/v2/authentication/login - CLC_V2_API_TOKEN, the API token generated from https://api.ctl.io/v2/authentication/login - CLC_ACCT_ALIAS, the account alias associated with the centurylink cloud - Users can set CLC_V2_API_URL to specify an endpoint for pointing to a different CLC environment. ''' EXAMPLES = ''' # Create a Server Group --- - name: Create Server Group hosts: localhost gather_facts: False connection: local tasks: - name: Create / Verify a Server Group at CenturyLink Cloud clc_group: name: My Cool Server Group parent: Default Group state: present register: clc - name: debug debug: var: clc # Delete a Server Group --- - name: Delete Server Group hosts: localhost gather_facts: False connection: local tasks: - name: Delete / Verify Absent a Server Group at CenturyLink Cloud clc_group: name: My Cool Server Group parent: Default Group state: absent register: clc - name: debug debug: var: clc ''' RETURN = ''' group: description: The group information returned: success type: dict sample: { "changeInfo":{ "createdBy":"service.wfad", "createdDate":"2015-07-29T18:52:47Z", "modifiedBy":"service.wfad", "modifiedDate":"2015-07-29T18:52:47Z" }, "customFields":[ ], "description":"test group", "groups":[ ], "id":"bb5f12a3c6044ae4ad0a03e73ae12cd1", "links":[ { "href":"/v2/groups/wfad", "rel":"createGroup", "verbs":[ "POST" ] }, { "href":"/v2/servers/wfad", "rel":"createServer", "verbs":[ "POST" ] }, { "href":"/v2/groups/wfad/bb5f12a3c6044ae4ad0a03e73ae12cd1", "rel":"self", "verbs":[ "GET", "PATCH", "DELETE" ] }, { "href":"/v2/groups/wfad/086ac1dfe0b6411989e8d1b77c4065f0", "id":"086ac1dfe0b6411989e8d1b77c4065f0", "rel":"parentGroup" }, { "href":"/v2/groups/wfad/bb5f12a3c6044ae4ad0a03e73ae12cd1/defaults", "rel":"defaults", "verbs":[ "GET", "POST" ] }, { "href":"/v2/groups/wfad/bb5f12a3c6044ae4ad0a03e73ae12cd1/billing", "rel":"billing" }, { "href":"/v2/groups/wfad/bb5f12a3c6044ae4ad0a03e73ae12cd1/archive", "rel":"archiveGroupAction" }, { "href":"/v2/groups/wfad/bb5f12a3c6044ae4ad0a03e73ae12cd1/statistics", "rel":"statistics" }, { "href":"/v2/groups/wfad/bb5f12a3c6044ae4ad0a03e73ae12cd1/upcomingScheduledActivities", "rel":"upcomingScheduledActivities" }, { "href":"/v2/groups/wfad/bb5f12a3c6044ae4ad0a03e73ae12cd1/horizontalAutoscalePolicy", "rel":"horizontalAutoscalePolicyMapping", "verbs":[ "GET", "PUT", "DELETE" ] }, { "href":"/v2/groups/wfad/bb5f12a3c6044ae4ad0a03e73ae12cd1/scheduledActivities", "rel":"scheduledActivities", "verbs":[ "GET", "POST" ] } ], "locationId":"UC1", "name":"test group", "status":"active", "type":"default" } ''' __version__ = '${version}' import os from distutils.version import LooseVersion try: import requests except ImportError: REQUESTS_FOUND = False else: REQUESTS_FOUND = True # # Requires the clc-python-sdk. # sudo pip install clc-sdk # try: import clc as clc_sdk from clc import CLCException except ImportError: CLC_FOUND = False clc_sdk = None else: CLC_FOUND = True from ansible.module_utils.basic import AnsibleModule class ClcGroup(object): clc = None root_group = None def __init__(self, module): """ Construct module """ self.clc = clc_sdk self.module = module self.group_dict = {} if not CLC_FOUND: self.module.fail_json( msg='clc-python-sdk required for this module') if not REQUESTS_FOUND: self.module.fail_json( msg='requests library is required for this module') if requests.__version__ and LooseVersion(requests.__version__) < LooseVersion('2.5.0'): self.module.fail_json( msg='requests library version should be >= 2.5.0') self._set_user_agent(self.clc) def process_request(self): """ Execute the main code path, and handle the request :return: none """ location = self.module.params.get('location') group_name = self.module.params.get('name') parent_name = self.module.params.get('parent') group_description = self.module.params.get('description') state = self.module.params.get('state') self._set_clc_credentials_from_env() self.group_dict = self._get_group_tree_for_datacenter( datacenter=location) if state == "absent": changed, group, requests = self._ensure_group_is_absent( group_name=group_name, parent_name=parent_name) if requests: self._wait_for_requests_to_complete(requests) else: changed, group = self._ensure_group_is_present( group_name=group_name, parent_name=parent_name, group_description=group_description) try: group = group.data except AttributeError: group = group_name self.module.exit_json(changed=changed, group=group) @staticmethod def _define_module_argument_spec(): """ Define the argument spec for the ansible module :return: argument spec dictionary """ argument_spec = dict( name=dict(required=True), description=dict(default=None), parent=dict(default=None), location=dict(default=None), state=dict(default='present', choices=['present', 'absent']), wait=dict(type='bool', default=True)) return argument_spec def _set_clc_credentials_from_env(self): """ Set the CLC Credentials on the sdk by reading environment variables :return: none """ env = os.environ v2_api_token = env.get('CLC_V2_API_TOKEN', False) v2_api_username = env.get('CLC_V2_API_USERNAME', False) v2_api_passwd = env.get('CLC_V2_API_PASSWD', False) clc_alias = env.get('CLC_ACCT_ALIAS', False) api_url = env.get('CLC_V2_API_URL', False) if api_url: self.clc.defaults.ENDPOINT_URL_V2 = api_url if v2_api_token and clc_alias: self.clc._LOGIN_TOKEN_V2 = v2_api_token self.clc._V2_ENABLED = True self.clc.ALIAS = clc_alias elif v2_api_username and v2_api_passwd: self.clc.v2.SetCredentials( api_username=v2_api_username, api_passwd=v2_api_passwd) else: return self.module.fail_json( msg="You must set the CLC_V2_API_USERNAME and CLC_V2_API_PASSWD " "environment variables") def _ensure_group_is_absent(self, group_name, parent_name): """ Ensure that group_name is absent by deleting it if necessary :param group_name: string - the name of the clc server group to delete :param parent_name: string - the name of the parent group for group_name :return: changed, group """ changed = False group = [] results = [] if self._group_exists(group_name=group_name, parent_name=parent_name): if not self.module.check_mode: group.append(group_name) result = self._delete_group(group_name) results.append(result) changed = True return changed, group, results def _delete_group(self, group_name): """ Delete the provided server group :param group_name: string - the server group to delete :return: none """ response = None group, parent = self.group_dict.get(group_name) try: response = group.Delete() except CLCException as ex: self.module.fail_json(msg='Failed to delete group :{0}. {1}'.format( group_name, ex.response_text )) return response def _ensure_group_is_present( self, group_name, parent_name, group_description): """ Checks to see if a server group exists, creates it if it doesn't. :param group_name: the name of the group to validate/create :param parent_name: the name of the parent group for group_name :param group_description: a short description of the server group (used when creating) :return: (changed, group) - changed: Boolean- whether a change was made, group: A clc group object for the group """ assert self.root_group, "Implementation Error: Root Group not set" parent = parent_name if parent_name is not None else self.root_group.name description = group_description changed = False group = group_name parent_exists = self._group_exists(group_name=parent, parent_name=None) child_exists = self._group_exists( group_name=group_name, parent_name=parent) if parent_exists and child_exists: group, parent = self.group_dict[group_name] changed = False elif parent_exists and not child_exists: if not self.module.check_mode: group = self._create_group( group=group, parent=parent, description=description) changed = True else: self.module.fail_json( msg="parent group: " + parent + " does not exist") return changed, group def _create_group(self, group, parent, description): """ Create the provided server group :param group: clc_sdk.Group - the group to create :param parent: clc_sdk.Parent - the parent group for {group} :param description: string - a text description of the group :return: clc_sdk.Group - the created group """ response = None (parent, grandparent) = self.group_dict[parent] try: response = parent.Create(name=group, description=description) except CLCException as ex: self.module.fail_json(msg='Failed to create group :{0}. {1}'.format( group, ex.response_text)) return response def _group_exists(self, group_name, parent_name): """ Check to see if a group exists :param group_name: string - the group to check :param parent_name: string - the parent of group_name :return: boolean - whether the group exists """ result = False if group_name in self.group_dict: (group, parent) = self.group_dict[group_name] if parent_name is None or parent_name == parent.name: result = True return result def _get_group_tree_for_datacenter(self, datacenter=None): """ Walk the tree of groups for a datacenter :param datacenter: string - the datacenter to walk (ex: 'UC1') :return: a dictionary of groups and parents """ self.root_group = self.clc.v2.Datacenter( location=datacenter).RootGroup() return self._walk_groups_recursive( parent_group=None, child_group=self.root_group) def _walk_groups_recursive(self, parent_group, child_group): """ Walk a parent-child tree of groups, starting with the provided child group :param parent_group: clc_sdk.Group - the parent group to start the walk :param child_group: clc_sdk.Group - the child group to start the walk :return: a dictionary of groups and parents """ result = {str(child_group): (child_group, parent_group)} groups = child_group.Subgroups().groups if len(groups) > 0: for group in groups: if group.type != 'default': continue result.update(self._walk_groups_recursive(child_group, group)) return result def _wait_for_requests_to_complete(self, requests_lst): """ Waits until the CLC requests are complete if the wait argument is True :param requests_lst: The list of CLC request objects :return: none """ if not self.module.params['wait']: return for request in requests_lst: request.WaitUntilComplete() for request_details in request.requests: if request_details.Status() != 'succeeded': self.module.fail_json( msg='Unable to process group request') @staticmethod def _set_user_agent(clc): if hasattr(clc, 'SetRequestsSession'): agent_string = "ClcAnsibleModule/" + __version__ ses = requests.Session() ses.headers.update({"Api-Client": agent_string}) ses.headers['User-Agent'] += " " + agent_string clc.SetRequestsSession(ses) def main(): """ The main function. Instantiates the module and calls process_request. :return: none """ module = AnsibleModule( argument_spec=ClcGroup._define_module_argument_spec(), supports_check_mode=True) clc_group = ClcGroup(module) clc_group.process_request() if __name__ == '__main__': main()
gpl-3.0
marcusramberg/dotfiles
link/hammerspoon/hs/node_modules/node-gyp/gyp/pylib/gyp/input.py
713
115880
# Copyright (c) 2012 Google Inc. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. from compiler.ast import Const from compiler.ast import Dict from compiler.ast import Discard from compiler.ast import List from compiler.ast import Module from compiler.ast import Node from compiler.ast import Stmt import compiler import gyp.common import gyp.simple_copy import multiprocessing import optparse import os.path import re import shlex import signal import subprocess import sys import threading import time import traceback from gyp.common import GypError from gyp.common import OrderedSet # A list of types that are treated as linkable. linkable_types = [ 'executable', 'shared_library', 'loadable_module', 'mac_kernel_extension', ] # A list of sections that contain links to other targets. dependency_sections = ['dependencies', 'export_dependent_settings'] # base_path_sections is a list of sections defined by GYP that contain # pathnames. The generators can provide more keys, the two lists are merged # into path_sections, but you should call IsPathSection instead of using either # list directly. base_path_sections = [ 'destination', 'files', 'include_dirs', 'inputs', 'libraries', 'outputs', 'sources', ] path_sections = set() # These per-process dictionaries are used to cache build file data when loading # in parallel mode. per_process_data = {} per_process_aux_data = {} def IsPathSection(section): # If section ends in one of the '=+?!' characters, it's applied to a section # without the trailing characters. '/' is notably absent from this list, # because there's no way for a regular expression to be treated as a path. while section and section[-1:] in '=+?!': section = section[:-1] if section in path_sections: return True # Sections mathing the regexp '_(dir|file|path)s?$' are also # considered PathSections. Using manual string matching since that # is much faster than the regexp and this can be called hundreds of # thousands of times so micro performance matters. if "_" in section: tail = section[-6:] if tail[-1] == 's': tail = tail[:-1] if tail[-5:] in ('_file', '_path'): return True return tail[-4:] == '_dir' return False # base_non_configuration_keys is a list of key names that belong in the target # itself and should not be propagated into its configurations. It is merged # with a list that can come from the generator to # create non_configuration_keys. base_non_configuration_keys = [ # Sections that must exist inside targets and not configurations. 'actions', 'configurations', 'copies', 'default_configuration', 'dependencies', 'dependencies_original', 'libraries', 'postbuilds', 'product_dir', 'product_extension', 'product_name', 'product_prefix', 'rules', 'run_as', 'sources', 'standalone_static_library', 'suppress_wildcard', 'target_name', 'toolset', 'toolsets', 'type', # Sections that can be found inside targets or configurations, but that # should not be propagated from targets into their configurations. 'variables', ] non_configuration_keys = [] # Keys that do not belong inside a configuration dictionary. invalid_configuration_keys = [ 'actions', 'all_dependent_settings', 'configurations', 'dependencies', 'direct_dependent_settings', 'libraries', 'link_settings', 'sources', 'standalone_static_library', 'target_name', 'type', ] # Controls whether or not the generator supports multiple toolsets. multiple_toolsets = False # Paths for converting filelist paths to output paths: { # toplevel, # qualified_output_dir, # } generator_filelist_paths = None def GetIncludedBuildFiles(build_file_path, aux_data, included=None): """Return a list of all build files included into build_file_path. The returned list will contain build_file_path as well as all other files that it included, either directly or indirectly. Note that the list may contain files that were included into a conditional section that evaluated to false and was not merged into build_file_path's dict. aux_data is a dict containing a key for each build file or included build file. Those keys provide access to dicts whose "included" keys contain lists of all other files included by the build file. included should be left at its default None value by external callers. It is used for recursion. The returned list will not contain any duplicate entries. Each build file in the list will be relative to the current directory. """ if included == None: included = [] if build_file_path in included: return included included.append(build_file_path) for included_build_file in aux_data[build_file_path].get('included', []): GetIncludedBuildFiles(included_build_file, aux_data, included) return included def CheckedEval(file_contents): """Return the eval of a gyp file. The gyp file is restricted to dictionaries and lists only, and repeated keys are not allowed. Note that this is slower than eval() is. """ ast = compiler.parse(file_contents) assert isinstance(ast, Module) c1 = ast.getChildren() assert c1[0] is None assert isinstance(c1[1], Stmt) c2 = c1[1].getChildren() assert isinstance(c2[0], Discard) c3 = c2[0].getChildren() assert len(c3) == 1 return CheckNode(c3[0], []) def CheckNode(node, keypath): if isinstance(node, Dict): c = node.getChildren() dict = {} for n in range(0, len(c), 2): assert isinstance(c[n], Const) key = c[n].getChildren()[0] if key in dict: raise GypError("Key '" + key + "' repeated at level " + repr(len(keypath) + 1) + " with key path '" + '.'.join(keypath) + "'") kp = list(keypath) # Make a copy of the list for descending this node. kp.append(key) dict[key] = CheckNode(c[n + 1], kp) return dict elif isinstance(node, List): c = node.getChildren() children = [] for index, child in enumerate(c): kp = list(keypath) # Copy list. kp.append(repr(index)) children.append(CheckNode(child, kp)) return children elif isinstance(node, Const): return node.getChildren()[0] else: raise TypeError("Unknown AST node at key path '" + '.'.join(keypath) + "': " + repr(node)) def LoadOneBuildFile(build_file_path, data, aux_data, includes, is_target, check): if build_file_path in data: return data[build_file_path] if os.path.exists(build_file_path): build_file_contents = open(build_file_path).read() else: raise GypError("%s not found (cwd: %s)" % (build_file_path, os.getcwd())) build_file_data = None try: if check: build_file_data = CheckedEval(build_file_contents) else: build_file_data = eval(build_file_contents, {'__builtins__': None}, None) except SyntaxError, e: e.filename = build_file_path raise except Exception, e: gyp.common.ExceptionAppend(e, 'while reading ' + build_file_path) raise if type(build_file_data) is not dict: raise GypError("%s does not evaluate to a dictionary." % build_file_path) data[build_file_path] = build_file_data aux_data[build_file_path] = {} # Scan for includes and merge them in. if ('skip_includes' not in build_file_data or not build_file_data['skip_includes']): try: if is_target: LoadBuildFileIncludesIntoDict(build_file_data, build_file_path, data, aux_data, includes, check) else: LoadBuildFileIncludesIntoDict(build_file_data, build_file_path, data, aux_data, None, check) except Exception, e: gyp.common.ExceptionAppend(e, 'while reading includes of ' + build_file_path) raise return build_file_data def LoadBuildFileIncludesIntoDict(subdict, subdict_path, data, aux_data, includes, check): includes_list = [] if includes != None: includes_list.extend(includes) if 'includes' in subdict: for include in subdict['includes']: # "include" is specified relative to subdict_path, so compute the real # path to include by appending the provided "include" to the directory # in which subdict_path resides. relative_include = \ os.path.normpath(os.path.join(os.path.dirname(subdict_path), include)) includes_list.append(relative_include) # Unhook the includes list, it's no longer needed. del subdict['includes'] # Merge in the included files. for include in includes_list: if not 'included' in aux_data[subdict_path]: aux_data[subdict_path]['included'] = [] aux_data[subdict_path]['included'].append(include) gyp.DebugOutput(gyp.DEBUG_INCLUDES, "Loading Included File: '%s'", include) MergeDicts(subdict, LoadOneBuildFile(include, data, aux_data, None, False, check), subdict_path, include) # Recurse into subdictionaries. for k, v in subdict.iteritems(): if type(v) is dict: LoadBuildFileIncludesIntoDict(v, subdict_path, data, aux_data, None, check) elif type(v) is list: LoadBuildFileIncludesIntoList(v, subdict_path, data, aux_data, check) # This recurses into lists so that it can look for dicts. def LoadBuildFileIncludesIntoList(sublist, sublist_path, data, aux_data, check): for item in sublist: if type(item) is dict: LoadBuildFileIncludesIntoDict(item, sublist_path, data, aux_data, None, check) elif type(item) is list: LoadBuildFileIncludesIntoList(item, sublist_path, data, aux_data, check) # Processes toolsets in all the targets. This recurses into condition entries # since they can contain toolsets as well. def ProcessToolsetsInDict(data): if 'targets' in data: target_list = data['targets'] new_target_list = [] for target in target_list: # If this target already has an explicit 'toolset', and no 'toolsets' # list, don't modify it further. if 'toolset' in target and 'toolsets' not in target: new_target_list.append(target) continue if multiple_toolsets: toolsets = target.get('toolsets', ['target']) else: toolsets = ['target'] # Make sure this 'toolsets' definition is only processed once. if 'toolsets' in target: del target['toolsets'] if len(toolsets) > 0: # Optimization: only do copies if more than one toolset is specified. for build in toolsets[1:]: new_target = gyp.simple_copy.deepcopy(target) new_target['toolset'] = build new_target_list.append(new_target) target['toolset'] = toolsets[0] new_target_list.append(target) data['targets'] = new_target_list if 'conditions' in data: for condition in data['conditions']: if type(condition) is list: for condition_dict in condition[1:]: if type(condition_dict) is dict: ProcessToolsetsInDict(condition_dict) # TODO(mark): I don't love this name. It just means that it's going to load # a build file that contains targets and is expected to provide a targets dict # that contains the targets... def LoadTargetBuildFile(build_file_path, data, aux_data, variables, includes, depth, check, load_dependencies): # If depth is set, predefine the DEPTH variable to be a relative path from # this build file's directory to the directory identified by depth. if depth: # TODO(dglazkov) The backslash/forward-slash replacement at the end is a # temporary measure. This should really be addressed by keeping all paths # in POSIX until actual project generation. d = gyp.common.RelativePath(depth, os.path.dirname(build_file_path)) if d == '': variables['DEPTH'] = '.' else: variables['DEPTH'] = d.replace('\\', '/') # The 'target_build_files' key is only set when loading target build files in # the non-parallel code path, where LoadTargetBuildFile is called # recursively. In the parallel code path, we don't need to check whether the # |build_file_path| has already been loaded, because the 'scheduled' set in # ParallelState guarantees that we never load the same |build_file_path| # twice. if 'target_build_files' in data: if build_file_path in data['target_build_files']: # Already loaded. return False data['target_build_files'].add(build_file_path) gyp.DebugOutput(gyp.DEBUG_INCLUDES, "Loading Target Build File '%s'", build_file_path) build_file_data = LoadOneBuildFile(build_file_path, data, aux_data, includes, True, check) # Store DEPTH for later use in generators. build_file_data['_DEPTH'] = depth # Set up the included_files key indicating which .gyp files contributed to # this target dict. if 'included_files' in build_file_data: raise GypError(build_file_path + ' must not contain included_files key') included = GetIncludedBuildFiles(build_file_path, aux_data) build_file_data['included_files'] = [] for included_file in included: # included_file is relative to the current directory, but it needs to # be made relative to build_file_path's directory. included_relative = \ gyp.common.RelativePath(included_file, os.path.dirname(build_file_path)) build_file_data['included_files'].append(included_relative) # Do a first round of toolsets expansion so that conditions can be defined # per toolset. ProcessToolsetsInDict(build_file_data) # Apply "pre"/"early" variable expansions and condition evaluations. ProcessVariablesAndConditionsInDict( build_file_data, PHASE_EARLY, variables, build_file_path) # Since some toolsets might have been defined conditionally, perform # a second round of toolsets expansion now. ProcessToolsetsInDict(build_file_data) # Look at each project's target_defaults dict, and merge settings into # targets. if 'target_defaults' in build_file_data: if 'targets' not in build_file_data: raise GypError("Unable to find targets in build file %s" % build_file_path) index = 0 while index < len(build_file_data['targets']): # This procedure needs to give the impression that target_defaults is # used as defaults, and the individual targets inherit from that. # The individual targets need to be merged into the defaults. Make # a deep copy of the defaults for each target, merge the target dict # as found in the input file into that copy, and then hook up the # copy with the target-specific data merged into it as the replacement # target dict. old_target_dict = build_file_data['targets'][index] new_target_dict = gyp.simple_copy.deepcopy( build_file_data['target_defaults']) MergeDicts(new_target_dict, old_target_dict, build_file_path, build_file_path) build_file_data['targets'][index] = new_target_dict index += 1 # No longer needed. del build_file_data['target_defaults'] # Look for dependencies. This means that dependency resolution occurs # after "pre" conditionals and variable expansion, but before "post" - # in other words, you can't put a "dependencies" section inside a "post" # conditional within a target. dependencies = [] if 'targets' in build_file_data: for target_dict in build_file_data['targets']: if 'dependencies' not in target_dict: continue for dependency in target_dict['dependencies']: dependencies.append( gyp.common.ResolveTarget(build_file_path, dependency, None)[0]) if load_dependencies: for dependency in dependencies: try: LoadTargetBuildFile(dependency, data, aux_data, variables, includes, depth, check, load_dependencies) except Exception, e: gyp.common.ExceptionAppend( e, 'while loading dependencies of %s' % build_file_path) raise else: return (build_file_path, dependencies) def CallLoadTargetBuildFile(global_flags, build_file_path, variables, includes, depth, check, generator_input_info): """Wrapper around LoadTargetBuildFile for parallel processing. This wrapper is used when LoadTargetBuildFile is executed in a worker process. """ try: signal.signal(signal.SIGINT, signal.SIG_IGN) # Apply globals so that the worker process behaves the same. for key, value in global_flags.iteritems(): globals()[key] = value SetGeneratorGlobals(generator_input_info) result = LoadTargetBuildFile(build_file_path, per_process_data, per_process_aux_data, variables, includes, depth, check, False) if not result: return result (build_file_path, dependencies) = result # We can safely pop the build_file_data from per_process_data because it # will never be referenced by this process again, so we don't need to keep # it in the cache. build_file_data = per_process_data.pop(build_file_path) # This gets serialized and sent back to the main process via a pipe. # It's handled in LoadTargetBuildFileCallback. return (build_file_path, build_file_data, dependencies) except GypError, e: sys.stderr.write("gyp: %s\n" % e) return None except Exception, e: print >>sys.stderr, 'Exception:', e print >>sys.stderr, traceback.format_exc() return None class ParallelProcessingError(Exception): pass class ParallelState(object): """Class to keep track of state when processing input files in parallel. If build files are loaded in parallel, use this to keep track of state during farming out and processing parallel jobs. It's stored in a global so that the callback function can have access to it. """ def __init__(self): # The multiprocessing pool. self.pool = None # The condition variable used to protect this object and notify # the main loop when there might be more data to process. self.condition = None # The "data" dict that was passed to LoadTargetBuildFileParallel self.data = None # The number of parallel calls outstanding; decremented when a response # was received. self.pending = 0 # The set of all build files that have been scheduled, so we don't # schedule the same one twice. self.scheduled = set() # A list of dependency build file paths that haven't been scheduled yet. self.dependencies = [] # Flag to indicate if there was an error in a child process. self.error = False def LoadTargetBuildFileCallback(self, result): """Handle the results of running LoadTargetBuildFile in another process. """ self.condition.acquire() if not result: self.error = True self.condition.notify() self.condition.release() return (build_file_path0, build_file_data0, dependencies0) = result self.data[build_file_path0] = build_file_data0 self.data['target_build_files'].add(build_file_path0) for new_dependency in dependencies0: if new_dependency not in self.scheduled: self.scheduled.add(new_dependency) self.dependencies.append(new_dependency) self.pending -= 1 self.condition.notify() self.condition.release() def LoadTargetBuildFilesParallel(build_files, data, variables, includes, depth, check, generator_input_info): parallel_state = ParallelState() parallel_state.condition = threading.Condition() # Make copies of the build_files argument that we can modify while working. parallel_state.dependencies = list(build_files) parallel_state.scheduled = set(build_files) parallel_state.pending = 0 parallel_state.data = data try: parallel_state.condition.acquire() while parallel_state.dependencies or parallel_state.pending: if parallel_state.error: break if not parallel_state.dependencies: parallel_state.condition.wait() continue dependency = parallel_state.dependencies.pop() parallel_state.pending += 1 global_flags = { 'path_sections': globals()['path_sections'], 'non_configuration_keys': globals()['non_configuration_keys'], 'multiple_toolsets': globals()['multiple_toolsets']} if not parallel_state.pool: parallel_state.pool = multiprocessing.Pool(multiprocessing.cpu_count()) parallel_state.pool.apply_async( CallLoadTargetBuildFile, args = (global_flags, dependency, variables, includes, depth, check, generator_input_info), callback = parallel_state.LoadTargetBuildFileCallback) except KeyboardInterrupt, e: parallel_state.pool.terminate() raise e parallel_state.condition.release() parallel_state.pool.close() parallel_state.pool.join() parallel_state.pool = None if parallel_state.error: sys.exit(1) # Look for the bracket that matches the first bracket seen in a # string, and return the start and end as a tuple. For example, if # the input is something like "<(foo <(bar)) blah", then it would # return (1, 13), indicating the entire string except for the leading # "<" and trailing " blah". LBRACKETS= set('{[(') BRACKETS = {'}': '{', ']': '[', ')': '('} def FindEnclosingBracketGroup(input_str): stack = [] start = -1 for index, char in enumerate(input_str): if char in LBRACKETS: stack.append(char) if start == -1: start = index elif char in BRACKETS: if not stack: return (-1, -1) if stack.pop() != BRACKETS[char]: return (-1, -1) if not stack: return (start, index + 1) return (-1, -1) def IsStrCanonicalInt(string): """Returns True if |string| is in its canonical integer form. The canonical form is such that str(int(string)) == string. """ if type(string) is str: # This function is called a lot so for maximum performance, avoid # involving regexps which would otherwise make the code much # shorter. Regexps would need twice the time of this function. if string: if string == "0": return True if string[0] == "-": string = string[1:] if not string: return False if '1' <= string[0] <= '9': return string.isdigit() return False # This matches things like "<(asdf)", "<!(cmd)", "<!@(cmd)", "<|(list)", # "<!interpreter(arguments)", "<([list])", and even "<([)" and "<(<())". # In the last case, the inner "<()" is captured in match['content']. early_variable_re = re.compile( r'(?P<replace>(?P<type><(?:(?:!?@?)|\|)?)' r'(?P<command_string>[-a-zA-Z0-9_.]+)?' r'\((?P<is_array>\s*\[?)' r'(?P<content>.*?)(\]?)\))') # This matches the same as early_variable_re, but with '>' instead of '<'. late_variable_re = re.compile( r'(?P<replace>(?P<type>>(?:(?:!?@?)|\|)?)' r'(?P<command_string>[-a-zA-Z0-9_.]+)?' r'\((?P<is_array>\s*\[?)' r'(?P<content>.*?)(\]?)\))') # This matches the same as early_variable_re, but with '^' instead of '<'. latelate_variable_re = re.compile( r'(?P<replace>(?P<type>[\^](?:(?:!?@?)|\|)?)' r'(?P<command_string>[-a-zA-Z0-9_.]+)?' r'\((?P<is_array>\s*\[?)' r'(?P<content>.*?)(\]?)\))') # Global cache of results from running commands so they don't have to be run # more then once. cached_command_results = {} def FixupPlatformCommand(cmd): if sys.platform == 'win32': if type(cmd) is list: cmd = [re.sub('^cat ', 'type ', cmd[0])] + cmd[1:] else: cmd = re.sub('^cat ', 'type ', cmd) return cmd PHASE_EARLY = 0 PHASE_LATE = 1 PHASE_LATELATE = 2 def ExpandVariables(input, phase, variables, build_file): # Look for the pattern that gets expanded into variables if phase == PHASE_EARLY: variable_re = early_variable_re expansion_symbol = '<' elif phase == PHASE_LATE: variable_re = late_variable_re expansion_symbol = '>' elif phase == PHASE_LATELATE: variable_re = latelate_variable_re expansion_symbol = '^' else: assert False input_str = str(input) if IsStrCanonicalInt(input_str): return int(input_str) # Do a quick scan to determine if an expensive regex search is warranted. if expansion_symbol not in input_str: return input_str # Get the entire list of matches as a list of MatchObject instances. # (using findall here would return strings instead of MatchObjects). matches = list(variable_re.finditer(input_str)) if not matches: return input_str output = input_str # Reverse the list of matches so that replacements are done right-to-left. # That ensures that earlier replacements won't mess up the string in a # way that causes later calls to find the earlier substituted text instead # of what's intended for replacement. matches.reverse() for match_group in matches: match = match_group.groupdict() gyp.DebugOutput(gyp.DEBUG_VARIABLES, "Matches: %r", match) # match['replace'] is the substring to look for, match['type'] # is the character code for the replacement type (< > <! >! <| >| <@ # >@ <!@ >!@), match['is_array'] contains a '[' for command # arrays, and match['content'] is the name of the variable (< >) # or command to run (<! >!). match['command_string'] is an optional # command string. Currently, only 'pymod_do_main' is supported. # run_command is true if a ! variant is used. run_command = '!' in match['type'] command_string = match['command_string'] # file_list is true if a | variant is used. file_list = '|' in match['type'] # Capture these now so we can adjust them later. replace_start = match_group.start('replace') replace_end = match_group.end('replace') # Find the ending paren, and re-evaluate the contained string. (c_start, c_end) = FindEnclosingBracketGroup(input_str[replace_start:]) # Adjust the replacement range to match the entire command # found by FindEnclosingBracketGroup (since the variable_re # probably doesn't match the entire command if it contained # nested variables). replace_end = replace_start + c_end # Find the "real" replacement, matching the appropriate closing # paren, and adjust the replacement start and end. replacement = input_str[replace_start:replace_end] # Figure out what the contents of the variable parens are. contents_start = replace_start + c_start + 1 contents_end = replace_end - 1 contents = input_str[contents_start:contents_end] # Do filter substitution now for <|(). # Admittedly, this is different than the evaluation order in other # contexts. However, since filtration has no chance to run on <|(), # this seems like the only obvious way to give them access to filters. if file_list: processed_variables = gyp.simple_copy.deepcopy(variables) ProcessListFiltersInDict(contents, processed_variables) # Recurse to expand variables in the contents contents = ExpandVariables(contents, phase, processed_variables, build_file) else: # Recurse to expand variables in the contents contents = ExpandVariables(contents, phase, variables, build_file) # Strip off leading/trailing whitespace so that variable matches are # simpler below (and because they are rarely needed). contents = contents.strip() # expand_to_list is true if an @ variant is used. In that case, # the expansion should result in a list. Note that the caller # is to be expecting a list in return, and not all callers do # because not all are working in list context. Also, for list # expansions, there can be no other text besides the variable # expansion in the input string. expand_to_list = '@' in match['type'] and input_str == replacement if run_command or file_list: # Find the build file's directory, so commands can be run or file lists # generated relative to it. build_file_dir = os.path.dirname(build_file) if build_file_dir == '' and not file_list: # If build_file is just a leaf filename indicating a file in the # current directory, build_file_dir might be an empty string. Set # it to None to signal to subprocess.Popen that it should run the # command in the current directory. build_file_dir = None # Support <|(listfile.txt ...) which generates a file # containing items from a gyp list, generated at gyp time. # This works around actions/rules which have more inputs than will # fit on the command line. if file_list: if type(contents) is list: contents_list = contents else: contents_list = contents.split(' ') replacement = contents_list[0] if os.path.isabs(replacement): raise GypError('| cannot handle absolute paths, got "%s"' % replacement) if not generator_filelist_paths: path = os.path.join(build_file_dir, replacement) else: if os.path.isabs(build_file_dir): toplevel = generator_filelist_paths['toplevel'] rel_build_file_dir = gyp.common.RelativePath(build_file_dir, toplevel) else: rel_build_file_dir = build_file_dir qualified_out_dir = generator_filelist_paths['qualified_out_dir'] path = os.path.join(qualified_out_dir, rel_build_file_dir, replacement) gyp.common.EnsureDirExists(path) replacement = gyp.common.RelativePath(path, build_file_dir) f = gyp.common.WriteOnDiff(path) for i in contents_list[1:]: f.write('%s\n' % i) f.close() elif run_command: use_shell = True if match['is_array']: contents = eval(contents) use_shell = False # Check for a cached value to avoid executing commands, or generating # file lists more than once. The cache key contains the command to be # run as well as the directory to run it from, to account for commands # that depend on their current directory. # TODO(http://code.google.com/p/gyp/issues/detail?id=111): In theory, # someone could author a set of GYP files where each time the command # is invoked it produces different output by design. When the need # arises, the syntax should be extended to support no caching off a # command's output so it is run every time. cache_key = (str(contents), build_file_dir) cached_value = cached_command_results.get(cache_key, None) if cached_value is None: gyp.DebugOutput(gyp.DEBUG_VARIABLES, "Executing command '%s' in directory '%s'", contents, build_file_dir) replacement = '' if command_string == 'pymod_do_main': # <!pymod_do_main(modulename param eters) loads |modulename| as a # python module and then calls that module's DoMain() function, # passing ["param", "eters"] as a single list argument. For modules # that don't load quickly, this can be faster than # <!(python modulename param eters). Do this in |build_file_dir|. oldwd = os.getcwd() # Python doesn't like os.open('.'): no fchdir. if build_file_dir: # build_file_dir may be None (see above). os.chdir(build_file_dir) try: parsed_contents = shlex.split(contents) try: py_module = __import__(parsed_contents[0]) except ImportError as e: raise GypError("Error importing pymod_do_main" "module (%s): %s" % (parsed_contents[0], e)) replacement = str(py_module.DoMain(parsed_contents[1:])).rstrip() finally: os.chdir(oldwd) assert replacement != None elif command_string: raise GypError("Unknown command string '%s' in '%s'." % (command_string, contents)) else: # Fix up command with platform specific workarounds. contents = FixupPlatformCommand(contents) try: p = subprocess.Popen(contents, shell=use_shell, stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=subprocess.PIPE, cwd=build_file_dir) except Exception, e: raise GypError("%s while executing command '%s' in %s" % (e, contents, build_file)) p_stdout, p_stderr = p.communicate('') if p.wait() != 0 or p_stderr: sys.stderr.write(p_stderr) # Simulate check_call behavior, since check_call only exists # in python 2.5 and later. raise GypError("Call to '%s' returned exit status %d while in %s." % (contents, p.returncode, build_file)) replacement = p_stdout.rstrip() cached_command_results[cache_key] = replacement else: gyp.DebugOutput(gyp.DEBUG_VARIABLES, "Had cache value for command '%s' in directory '%s'", contents,build_file_dir) replacement = cached_value else: if not contents in variables: if contents[-1] in ['!', '/']: # In order to allow cross-compiles (nacl) to happen more naturally, # we will allow references to >(sources/) etc. to resolve to # and empty list if undefined. This allows actions to: # 'action!': [ # '>@(_sources!)', # ], # 'action/': [ # '>@(_sources/)', # ], replacement = [] else: raise GypError('Undefined variable ' + contents + ' in ' + build_file) else: replacement = variables[contents] if type(replacement) is list: for item in replacement: if not contents[-1] == '/' and type(item) not in (str, int): raise GypError('Variable ' + contents + ' must expand to a string or list of strings; ' + 'list contains a ' + item.__class__.__name__) # Run through the list and handle variable expansions in it. Since # the list is guaranteed not to contain dicts, this won't do anything # with conditions sections. ProcessVariablesAndConditionsInList(replacement, phase, variables, build_file) elif type(replacement) not in (str, int): raise GypError('Variable ' + contents + ' must expand to a string or list of strings; ' + 'found a ' + replacement.__class__.__name__) if expand_to_list: # Expanding in list context. It's guaranteed that there's only one # replacement to do in |input_str| and that it's this replacement. See # above. if type(replacement) is list: # If it's already a list, make a copy. output = replacement[:] else: # Split it the same way sh would split arguments. output = shlex.split(str(replacement)) else: # Expanding in string context. encoded_replacement = '' if type(replacement) is list: # When expanding a list into string context, turn the list items # into a string in a way that will work with a subprocess call. # # TODO(mark): This isn't completely correct. This should # call a generator-provided function that observes the # proper list-to-argument quoting rules on a specific # platform instead of just calling the POSIX encoding # routine. encoded_replacement = gyp.common.EncodePOSIXShellList(replacement) else: encoded_replacement = replacement output = output[:replace_start] + str(encoded_replacement) + \ output[replace_end:] # Prepare for the next match iteration. input_str = output if output == input: gyp.DebugOutput(gyp.DEBUG_VARIABLES, "Found only identity matches on %r, avoiding infinite " "recursion.", output) else: # Look for more matches now that we've replaced some, to deal with # expanding local variables (variables defined in the same # variables block as this one). gyp.DebugOutput(gyp.DEBUG_VARIABLES, "Found output %r, recursing.", output) if type(output) is list: if output and type(output[0]) is list: # Leave output alone if it's a list of lists. # We don't want such lists to be stringified. pass else: new_output = [] for item in output: new_output.append( ExpandVariables(item, phase, variables, build_file)) output = new_output else: output = ExpandVariables(output, phase, variables, build_file) # Convert all strings that are canonically-represented integers into integers. if type(output) is list: for index in xrange(0, len(output)): if IsStrCanonicalInt(output[index]): output[index] = int(output[index]) elif IsStrCanonicalInt(output): output = int(output) return output # The same condition is often evaluated over and over again so it # makes sense to cache as much as possible between evaluations. cached_conditions_asts = {} def EvalCondition(condition, conditions_key, phase, variables, build_file): """Returns the dict that should be used or None if the result was that nothing should be used.""" if type(condition) is not list: raise GypError(conditions_key + ' must be a list') if len(condition) < 2: # It's possible that condition[0] won't work in which case this # attempt will raise its own IndexError. That's probably fine. raise GypError(conditions_key + ' ' + condition[0] + ' must be at least length 2, not ' + str(len(condition))) i = 0 result = None while i < len(condition): cond_expr = condition[i] true_dict = condition[i + 1] if type(true_dict) is not dict: raise GypError('{} {} must be followed by a dictionary, not {}'.format( conditions_key, cond_expr, type(true_dict))) if len(condition) > i + 2 and type(condition[i + 2]) is dict: false_dict = condition[i + 2] i = i + 3 if i != len(condition): raise GypError('{} {} has {} unexpected trailing items'.format( conditions_key, cond_expr, len(condition) - i)) else: false_dict = None i = i + 2 if result == None: result = EvalSingleCondition( cond_expr, true_dict, false_dict, phase, variables, build_file) return result def EvalSingleCondition( cond_expr, true_dict, false_dict, phase, variables, build_file): """Returns true_dict if cond_expr evaluates to true, and false_dict otherwise.""" # Do expansions on the condition itself. Since the conditon can naturally # contain variable references without needing to resort to GYP expansion # syntax, this is of dubious value for variables, but someone might want to # use a command expansion directly inside a condition. cond_expr_expanded = ExpandVariables(cond_expr, phase, variables, build_file) if type(cond_expr_expanded) not in (str, int): raise ValueError( 'Variable expansion in this context permits str and int ' + \ 'only, found ' + cond_expr_expanded.__class__.__name__) try: if cond_expr_expanded in cached_conditions_asts: ast_code = cached_conditions_asts[cond_expr_expanded] else: ast_code = compile(cond_expr_expanded, '<string>', 'eval') cached_conditions_asts[cond_expr_expanded] = ast_code if eval(ast_code, {'__builtins__': None}, variables): return true_dict return false_dict except SyntaxError, e: syntax_error = SyntaxError('%s while evaluating condition \'%s\' in %s ' 'at character %d.' % (str(e.args[0]), e.text, build_file, e.offset), e.filename, e.lineno, e.offset, e.text) raise syntax_error except NameError, e: gyp.common.ExceptionAppend(e, 'while evaluating condition \'%s\' in %s' % (cond_expr_expanded, build_file)) raise GypError(e) def ProcessConditionsInDict(the_dict, phase, variables, build_file): # Process a 'conditions' or 'target_conditions' section in the_dict, # depending on phase. # early -> conditions # late -> target_conditions # latelate -> no conditions # # Each item in a conditions list consists of cond_expr, a string expression # evaluated as the condition, and true_dict, a dict that will be merged into # the_dict if cond_expr evaluates to true. Optionally, a third item, # false_dict, may be present. false_dict is merged into the_dict if # cond_expr evaluates to false. # # Any dict merged into the_dict will be recursively processed for nested # conditionals and other expansions, also according to phase, immediately # prior to being merged. if phase == PHASE_EARLY: conditions_key = 'conditions' elif phase == PHASE_LATE: conditions_key = 'target_conditions' elif phase == PHASE_LATELATE: return else: assert False if not conditions_key in the_dict: return conditions_list = the_dict[conditions_key] # Unhook the conditions list, it's no longer needed. del the_dict[conditions_key] for condition in conditions_list: merge_dict = EvalCondition(condition, conditions_key, phase, variables, build_file) if merge_dict != None: # Expand variables and nested conditinals in the merge_dict before # merging it. ProcessVariablesAndConditionsInDict(merge_dict, phase, variables, build_file) MergeDicts(the_dict, merge_dict, build_file, build_file) def LoadAutomaticVariablesFromDict(variables, the_dict): # Any keys with plain string values in the_dict become automatic variables. # The variable name is the key name with a "_" character prepended. for key, value in the_dict.iteritems(): if type(value) in (str, int, list): variables['_' + key] = value def LoadVariablesFromVariablesDict(variables, the_dict, the_dict_key): # Any keys in the_dict's "variables" dict, if it has one, becomes a # variable. The variable name is the key name in the "variables" dict. # Variables that end with the % character are set only if they are unset in # the variables dict. the_dict_key is the name of the key that accesses # the_dict in the_dict's parent dict. If the_dict's parent is not a dict # (it could be a list or it could be parentless because it is a root dict), # the_dict_key will be None. for key, value in the_dict.get('variables', {}).iteritems(): if type(value) not in (str, int, list): continue if key.endswith('%'): variable_name = key[:-1] if variable_name in variables: # If the variable is already set, don't set it. continue if the_dict_key is 'variables' and variable_name in the_dict: # If the variable is set without a % in the_dict, and the_dict is a # variables dict (making |variables| a varaibles sub-dict of a # variables dict), use the_dict's definition. value = the_dict[variable_name] else: variable_name = key variables[variable_name] = value def ProcessVariablesAndConditionsInDict(the_dict, phase, variables_in, build_file, the_dict_key=None): """Handle all variable and command expansion and conditional evaluation. This function is the public entry point for all variable expansions and conditional evaluations. The variables_in dictionary will not be modified by this function. """ # Make a copy of the variables_in dict that can be modified during the # loading of automatics and the loading of the variables dict. variables = variables_in.copy() LoadAutomaticVariablesFromDict(variables, the_dict) if 'variables' in the_dict: # Make sure all the local variables are added to the variables # list before we process them so that you can reference one # variable from another. They will be fully expanded by recursion # in ExpandVariables. for key, value in the_dict['variables'].iteritems(): variables[key] = value # Handle the associated variables dict first, so that any variable # references within can be resolved prior to using them as variables. # Pass a copy of the variables dict to avoid having it be tainted. # Otherwise, it would have extra automatics added for everything that # should just be an ordinary variable in this scope. ProcessVariablesAndConditionsInDict(the_dict['variables'], phase, variables, build_file, 'variables') LoadVariablesFromVariablesDict(variables, the_dict, the_dict_key) for key, value in the_dict.iteritems(): # Skip "variables", which was already processed if present. if key != 'variables' and type(value) is str: expanded = ExpandVariables(value, phase, variables, build_file) if type(expanded) not in (str, int): raise ValueError( 'Variable expansion in this context permits str and int ' + \ 'only, found ' + expanded.__class__.__name__ + ' for ' + key) the_dict[key] = expanded # Variable expansion may have resulted in changes to automatics. Reload. # TODO(mark): Optimization: only reload if no changes were made. variables = variables_in.copy() LoadAutomaticVariablesFromDict(variables, the_dict) LoadVariablesFromVariablesDict(variables, the_dict, the_dict_key) # Process conditions in this dict. This is done after variable expansion # so that conditions may take advantage of expanded variables. For example, # if the_dict contains: # {'type': '<(library_type)', # 'conditions': [['_type=="static_library"', { ... }]]}, # _type, as used in the condition, will only be set to the value of # library_type if variable expansion is performed before condition # processing. However, condition processing should occur prior to recursion # so that variables (both automatic and "variables" dict type) may be # adjusted by conditions sections, merged into the_dict, and have the # intended impact on contained dicts. # # This arrangement means that a "conditions" section containing a "variables" # section will only have those variables effective in subdicts, not in # the_dict. The workaround is to put a "conditions" section within a # "variables" section. For example: # {'conditions': [['os=="mac"', {'variables': {'define': 'IS_MAC'}}]], # 'defines': ['<(define)'], # 'my_subdict': {'defines': ['<(define)']}}, # will not result in "IS_MAC" being appended to the "defines" list in the # current scope but would result in it being appended to the "defines" list # within "my_subdict". By comparison: # {'variables': {'conditions': [['os=="mac"', {'define': 'IS_MAC'}]]}, # 'defines': ['<(define)'], # 'my_subdict': {'defines': ['<(define)']}}, # will append "IS_MAC" to both "defines" lists. # Evaluate conditions sections, allowing variable expansions within them # as well as nested conditionals. This will process a 'conditions' or # 'target_conditions' section, perform appropriate merging and recursive # conditional and variable processing, and then remove the conditions section # from the_dict if it is present. ProcessConditionsInDict(the_dict, phase, variables, build_file) # Conditional processing may have resulted in changes to automatics or the # variables dict. Reload. variables = variables_in.copy() LoadAutomaticVariablesFromDict(variables, the_dict) LoadVariablesFromVariablesDict(variables, the_dict, the_dict_key) # Recurse into child dicts, or process child lists which may result in # further recursion into descendant dicts. for key, value in the_dict.iteritems(): # Skip "variables" and string values, which were already processed if # present. if key == 'variables' or type(value) is str: continue if type(value) is dict: # Pass a copy of the variables dict so that subdicts can't influence # parents. ProcessVariablesAndConditionsInDict(value, phase, variables, build_file, key) elif type(value) is list: # The list itself can't influence the variables dict, and # ProcessVariablesAndConditionsInList will make copies of the variables # dict if it needs to pass it to something that can influence it. No # copy is necessary here. ProcessVariablesAndConditionsInList(value, phase, variables, build_file) elif type(value) is not int: raise TypeError('Unknown type ' + value.__class__.__name__ + \ ' for ' + key) def ProcessVariablesAndConditionsInList(the_list, phase, variables, build_file): # Iterate using an index so that new values can be assigned into the_list. index = 0 while index < len(the_list): item = the_list[index] if type(item) is dict: # Make a copy of the variables dict so that it won't influence anything # outside of its own scope. ProcessVariablesAndConditionsInDict(item, phase, variables, build_file) elif type(item) is list: ProcessVariablesAndConditionsInList(item, phase, variables, build_file) elif type(item) is str: expanded = ExpandVariables(item, phase, variables, build_file) if type(expanded) in (str, int): the_list[index] = expanded elif type(expanded) is list: the_list[index:index+1] = expanded index += len(expanded) # index now identifies the next item to examine. Continue right now # without falling into the index increment below. continue else: raise ValueError( 'Variable expansion in this context permits strings and ' + \ 'lists only, found ' + expanded.__class__.__name__ + ' at ' + \ index) elif type(item) is not int: raise TypeError('Unknown type ' + item.__class__.__name__ + \ ' at index ' + index) index = index + 1 def BuildTargetsDict(data): """Builds a dict mapping fully-qualified target names to their target dicts. |data| is a dict mapping loaded build files by pathname relative to the current directory. Values in |data| are build file contents. For each |data| value with a "targets" key, the value of the "targets" key is taken as a list containing target dicts. Each target's fully-qualified name is constructed from the pathname of the build file (|data| key) and its "target_name" property. These fully-qualified names are used as the keys in the returned dict. These keys provide access to the target dicts, the dicts in the "targets" lists. """ targets = {} for build_file in data['target_build_files']: for target in data[build_file].get('targets', []): target_name = gyp.common.QualifiedTarget(build_file, target['target_name'], target['toolset']) if target_name in targets: raise GypError('Duplicate target definitions for ' + target_name) targets[target_name] = target return targets def QualifyDependencies(targets): """Make dependency links fully-qualified relative to the current directory. |targets| is a dict mapping fully-qualified target names to their target dicts. For each target in this dict, keys known to contain dependency links are examined, and any dependencies referenced will be rewritten so that they are fully-qualified and relative to the current directory. All rewritten dependencies are suitable for use as keys to |targets| or a similar dict. """ all_dependency_sections = [dep + op for dep in dependency_sections for op in ('', '!', '/')] for target, target_dict in targets.iteritems(): target_build_file = gyp.common.BuildFile(target) toolset = target_dict['toolset'] for dependency_key in all_dependency_sections: dependencies = target_dict.get(dependency_key, []) for index in xrange(0, len(dependencies)): dep_file, dep_target, dep_toolset = gyp.common.ResolveTarget( target_build_file, dependencies[index], toolset) if not multiple_toolsets: # Ignore toolset specification in the dependency if it is specified. dep_toolset = toolset dependency = gyp.common.QualifiedTarget(dep_file, dep_target, dep_toolset) dependencies[index] = dependency # Make sure anything appearing in a list other than "dependencies" also # appears in the "dependencies" list. if dependency_key != 'dependencies' and \ dependency not in target_dict['dependencies']: raise GypError('Found ' + dependency + ' in ' + dependency_key + ' of ' + target + ', but not in dependencies') def ExpandWildcardDependencies(targets, data): """Expands dependencies specified as build_file:*. For each target in |targets|, examines sections containing links to other targets. If any such section contains a link of the form build_file:*, it is taken as a wildcard link, and is expanded to list each target in build_file. The |data| dict provides access to build file dicts. Any target that does not wish to be included by wildcard can provide an optional "suppress_wildcard" key in its target dict. When present and true, a wildcard dependency link will not include such targets. All dependency names, including the keys to |targets| and the values in each dependency list, must be qualified when this function is called. """ for target, target_dict in targets.iteritems(): toolset = target_dict['toolset'] target_build_file = gyp.common.BuildFile(target) for dependency_key in dependency_sections: dependencies = target_dict.get(dependency_key, []) # Loop this way instead of "for dependency in" or "for index in xrange" # because the dependencies list will be modified within the loop body. index = 0 while index < len(dependencies): (dependency_build_file, dependency_target, dependency_toolset) = \ gyp.common.ParseQualifiedTarget(dependencies[index]) if dependency_target != '*' and dependency_toolset != '*': # Not a wildcard. Keep it moving. index = index + 1 continue if dependency_build_file == target_build_file: # It's an error for a target to depend on all other targets in # the same file, because a target cannot depend on itself. raise GypError('Found wildcard in ' + dependency_key + ' of ' + target + ' referring to same build file') # Take the wildcard out and adjust the index so that the next # dependency in the list will be processed the next time through the # loop. del dependencies[index] index = index - 1 # Loop through the targets in the other build file, adding them to # this target's list of dependencies in place of the removed # wildcard. dependency_target_dicts = data[dependency_build_file]['targets'] for dependency_target_dict in dependency_target_dicts: if int(dependency_target_dict.get('suppress_wildcard', False)): continue dependency_target_name = dependency_target_dict['target_name'] if (dependency_target != '*' and dependency_target != dependency_target_name): continue dependency_target_toolset = dependency_target_dict['toolset'] if (dependency_toolset != '*' and dependency_toolset != dependency_target_toolset): continue dependency = gyp.common.QualifiedTarget(dependency_build_file, dependency_target_name, dependency_target_toolset) index = index + 1 dependencies.insert(index, dependency) index = index + 1 def Unify(l): """Removes duplicate elements from l, keeping the first element.""" seen = {} return [seen.setdefault(e, e) for e in l if e not in seen] def RemoveDuplicateDependencies(targets): """Makes sure every dependency appears only once in all targets's dependency lists.""" for target_name, target_dict in targets.iteritems(): for dependency_key in dependency_sections: dependencies = target_dict.get(dependency_key, []) if dependencies: target_dict[dependency_key] = Unify(dependencies) def Filter(l, item): """Removes item from l.""" res = {} return [res.setdefault(e, e) for e in l if e != item] def RemoveSelfDependencies(targets): """Remove self dependencies from targets that have the prune_self_dependency variable set.""" for target_name, target_dict in targets.iteritems(): for dependency_key in dependency_sections: dependencies = target_dict.get(dependency_key, []) if dependencies: for t in dependencies: if t == target_name: if targets[t].get('variables', {}).get('prune_self_dependency', 0): target_dict[dependency_key] = Filter(dependencies, target_name) def RemoveLinkDependenciesFromNoneTargets(targets): """Remove dependencies having the 'link_dependency' attribute from the 'none' targets.""" for target_name, target_dict in targets.iteritems(): for dependency_key in dependency_sections: dependencies = target_dict.get(dependency_key, []) if dependencies: for t in dependencies: if target_dict.get('type', None) == 'none': if targets[t].get('variables', {}).get('link_dependency', 0): target_dict[dependency_key] = \ Filter(target_dict[dependency_key], t) class DependencyGraphNode(object): """ Attributes: ref: A reference to an object that this DependencyGraphNode represents. dependencies: List of DependencyGraphNodes on which this one depends. dependents: List of DependencyGraphNodes that depend on this one. """ class CircularException(GypError): pass def __init__(self, ref): self.ref = ref self.dependencies = [] self.dependents = [] def __repr__(self): return '<DependencyGraphNode: %r>' % self.ref def FlattenToList(self): # flat_list is the sorted list of dependencies - actually, the list items # are the "ref" attributes of DependencyGraphNodes. Every target will # appear in flat_list after all of its dependencies, and before all of its # dependents. flat_list = OrderedSet() # in_degree_zeros is the list of DependencyGraphNodes that have no # dependencies not in flat_list. Initially, it is a copy of the children # of this node, because when the graph was built, nodes with no # dependencies were made implicit dependents of the root node. in_degree_zeros = set(self.dependents[:]) while in_degree_zeros: # Nodes in in_degree_zeros have no dependencies not in flat_list, so they # can be appended to flat_list. Take these nodes out of in_degree_zeros # as work progresses, so that the next node to process from the list can # always be accessed at a consistent position. node = in_degree_zeros.pop() flat_list.add(node.ref) # Look at dependents of the node just added to flat_list. Some of them # may now belong in in_degree_zeros. for node_dependent in node.dependents: is_in_degree_zero = True # TODO: We want to check through the # node_dependent.dependencies list but if it's long and we # always start at the beginning, then we get O(n^2) behaviour. for node_dependent_dependency in node_dependent.dependencies: if not node_dependent_dependency.ref in flat_list: # The dependent one or more dependencies not in flat_list. There # will be more chances to add it to flat_list when examining # it again as a dependent of those other dependencies, provided # that there are no cycles. is_in_degree_zero = False break if is_in_degree_zero: # All of the dependent's dependencies are already in flat_list. Add # it to in_degree_zeros where it will be processed in a future # iteration of the outer loop. in_degree_zeros.add(node_dependent) return list(flat_list) def FindCycles(self): """ Returns a list of cycles in the graph, where each cycle is its own list. """ results = [] visited = set() def Visit(node, path): for child in node.dependents: if child in path: results.append([child] + path[:path.index(child) + 1]) elif not child in visited: visited.add(child) Visit(child, [child] + path) visited.add(self) Visit(self, [self]) return results def DirectDependencies(self, dependencies=None): """Returns a list of just direct dependencies.""" if dependencies == None: dependencies = [] for dependency in self.dependencies: # Check for None, corresponding to the root node. if dependency.ref != None and dependency.ref not in dependencies: dependencies.append(dependency.ref) return dependencies def _AddImportedDependencies(self, targets, dependencies=None): """Given a list of direct dependencies, adds indirect dependencies that other dependencies have declared to export their settings. This method does not operate on self. Rather, it operates on the list of dependencies in the |dependencies| argument. For each dependency in that list, if any declares that it exports the settings of one of its own dependencies, those dependencies whose settings are "passed through" are added to the list. As new items are added to the list, they too will be processed, so it is possible to import settings through multiple levels of dependencies. This method is not terribly useful on its own, it depends on being "primed" with a list of direct dependencies such as one provided by DirectDependencies. DirectAndImportedDependencies is intended to be the public entry point. """ if dependencies == None: dependencies = [] index = 0 while index < len(dependencies): dependency = dependencies[index] dependency_dict = targets[dependency] # Add any dependencies whose settings should be imported to the list # if not already present. Newly-added items will be checked for # their own imports when the list iteration reaches them. # Rather than simply appending new items, insert them after the # dependency that exported them. This is done to more closely match # the depth-first method used by DeepDependencies. add_index = 1 for imported_dependency in \ dependency_dict.get('export_dependent_settings', []): if imported_dependency not in dependencies: dependencies.insert(index + add_index, imported_dependency) add_index = add_index + 1 index = index + 1 return dependencies def DirectAndImportedDependencies(self, targets, dependencies=None): """Returns a list of a target's direct dependencies and all indirect dependencies that a dependency has advertised settings should be exported through the dependency for. """ dependencies = self.DirectDependencies(dependencies) return self._AddImportedDependencies(targets, dependencies) def DeepDependencies(self, dependencies=None): """Returns an OrderedSet of all of a target's dependencies, recursively.""" if dependencies is None: # Using a list to get ordered output and a set to do fast "is it # already added" checks. dependencies = OrderedSet() for dependency in self.dependencies: # Check for None, corresponding to the root node. if dependency.ref is None: continue if dependency.ref not in dependencies: dependency.DeepDependencies(dependencies) dependencies.add(dependency.ref) return dependencies def _LinkDependenciesInternal(self, targets, include_shared_libraries, dependencies=None, initial=True): """Returns an OrderedSet of dependency targets that are linked into this target. This function has a split personality, depending on the setting of |initial|. Outside callers should always leave |initial| at its default setting. When adding a target to the list of dependencies, this function will recurse into itself with |initial| set to False, to collect dependencies that are linked into the linkable target for which the list is being built. If |include_shared_libraries| is False, the resulting dependencies will not include shared_library targets that are linked into this target. """ if dependencies is None: # Using a list to get ordered output and a set to do fast "is it # already added" checks. dependencies = OrderedSet() # Check for None, corresponding to the root node. if self.ref is None: return dependencies # It's kind of sucky that |targets| has to be passed into this function, # but that's presently the easiest way to access the target dicts so that # this function can find target types. if 'target_name' not in targets[self.ref]: raise GypError("Missing 'target_name' field in target.") if 'type' not in targets[self.ref]: raise GypError("Missing 'type' field in target %s" % targets[self.ref]['target_name']) target_type = targets[self.ref]['type'] is_linkable = target_type in linkable_types if initial and not is_linkable: # If this is the first target being examined and it's not linkable, # return an empty list of link dependencies, because the link # dependencies are intended to apply to the target itself (initial is # True) and this target won't be linked. return dependencies # Don't traverse 'none' targets if explicitly excluded. if (target_type == 'none' and not targets[self.ref].get('dependencies_traverse', True)): dependencies.add(self.ref) return dependencies # Executables, mac kernel extensions and loadable modules are already fully # and finally linked. Nothing else can be a link dependency of them, there # can only be dependencies in the sense that a dependent target might run # an executable or load the loadable_module. if not initial and target_type in ('executable', 'loadable_module', 'mac_kernel_extension'): return dependencies # Shared libraries are already fully linked. They should only be included # in |dependencies| when adjusting static library dependencies (in order to # link against the shared_library's import lib), but should not be included # in |dependencies| when propagating link_settings. # The |include_shared_libraries| flag controls which of these two cases we # are handling. if (not initial and target_type == 'shared_library' and not include_shared_libraries): return dependencies # The target is linkable, add it to the list of link dependencies. if self.ref not in dependencies: dependencies.add(self.ref) if initial or not is_linkable: # If this is a subsequent target and it's linkable, don't look any # further for linkable dependencies, as they'll already be linked into # this target linkable. Always look at dependencies of the initial # target, and always look at dependencies of non-linkables. for dependency in self.dependencies: dependency._LinkDependenciesInternal(targets, include_shared_libraries, dependencies, False) return dependencies def DependenciesForLinkSettings(self, targets): """ Returns a list of dependency targets whose link_settings should be merged into this target. """ # TODO(sbaig) Currently, chrome depends on the bug that shared libraries' # link_settings are propagated. So for now, we will allow it, unless the # 'allow_sharedlib_linksettings_propagation' flag is explicitly set to # False. Once chrome is fixed, we can remove this flag. include_shared_libraries = \ targets[self.ref].get('allow_sharedlib_linksettings_propagation', True) return self._LinkDependenciesInternal(targets, include_shared_libraries) def DependenciesToLinkAgainst(self, targets): """ Returns a list of dependency targets that are linked into this target. """ return self._LinkDependenciesInternal(targets, True) def BuildDependencyList(targets): # Create a DependencyGraphNode for each target. Put it into a dict for easy # access. dependency_nodes = {} for target, spec in targets.iteritems(): if target not in dependency_nodes: dependency_nodes[target] = DependencyGraphNode(target) # Set up the dependency links. Targets that have no dependencies are treated # as dependent on root_node. root_node = DependencyGraphNode(None) for target, spec in targets.iteritems(): target_node = dependency_nodes[target] target_build_file = gyp.common.BuildFile(target) dependencies = spec.get('dependencies') if not dependencies: target_node.dependencies = [root_node] root_node.dependents.append(target_node) else: for dependency in dependencies: dependency_node = dependency_nodes.get(dependency) if not dependency_node: raise GypError("Dependency '%s' not found while " "trying to load target %s" % (dependency, target)) target_node.dependencies.append(dependency_node) dependency_node.dependents.append(target_node) flat_list = root_node.FlattenToList() # If there's anything left unvisited, there must be a circular dependency # (cycle). if len(flat_list) != len(targets): if not root_node.dependents: # If all targets have dependencies, add the first target as a dependent # of root_node so that the cycle can be discovered from root_node. target = targets.keys()[0] target_node = dependency_nodes[target] target_node.dependencies.append(root_node) root_node.dependents.append(target_node) cycles = [] for cycle in root_node.FindCycles(): paths = [node.ref for node in cycle] cycles.append('Cycle: %s' % ' -> '.join(paths)) raise DependencyGraphNode.CircularException( 'Cycles in dependency graph detected:\n' + '\n'.join(cycles)) return [dependency_nodes, flat_list] def VerifyNoGYPFileCircularDependencies(targets): # Create a DependencyGraphNode for each gyp file containing a target. Put # it into a dict for easy access. dependency_nodes = {} for target in targets.iterkeys(): build_file = gyp.common.BuildFile(target) if not build_file in dependency_nodes: dependency_nodes[build_file] = DependencyGraphNode(build_file) # Set up the dependency links. for target, spec in targets.iteritems(): build_file = gyp.common.BuildFile(target) build_file_node = dependency_nodes[build_file] target_dependencies = spec.get('dependencies', []) for dependency in target_dependencies: try: dependency_build_file = gyp.common.BuildFile(dependency) except GypError, e: gyp.common.ExceptionAppend( e, 'while computing dependencies of .gyp file %s' % build_file) raise if dependency_build_file == build_file: # A .gyp file is allowed to refer back to itself. continue dependency_node = dependency_nodes.get(dependency_build_file) if not dependency_node: raise GypError("Dependancy '%s' not found" % dependency_build_file) if dependency_node not in build_file_node.dependencies: build_file_node.dependencies.append(dependency_node) dependency_node.dependents.append(build_file_node) # Files that have no dependencies are treated as dependent on root_node. root_node = DependencyGraphNode(None) for build_file_node in dependency_nodes.itervalues(): if len(build_file_node.dependencies) == 0: build_file_node.dependencies.append(root_node) root_node.dependents.append(build_file_node) flat_list = root_node.FlattenToList() # If there's anything left unvisited, there must be a circular dependency # (cycle). if len(flat_list) != len(dependency_nodes): if not root_node.dependents: # If all files have dependencies, add the first file as a dependent # of root_node so that the cycle can be discovered from root_node. file_node = dependency_nodes.values()[0] file_node.dependencies.append(root_node) root_node.dependents.append(file_node) cycles = [] for cycle in root_node.FindCycles(): paths = [node.ref for node in cycle] cycles.append('Cycle: %s' % ' -> '.join(paths)) raise DependencyGraphNode.CircularException( 'Cycles in .gyp file dependency graph detected:\n' + '\n'.join(cycles)) def DoDependentSettings(key, flat_list, targets, dependency_nodes): # key should be one of all_dependent_settings, direct_dependent_settings, # or link_settings. for target in flat_list: target_dict = targets[target] build_file = gyp.common.BuildFile(target) if key == 'all_dependent_settings': dependencies = dependency_nodes[target].DeepDependencies() elif key == 'direct_dependent_settings': dependencies = \ dependency_nodes[target].DirectAndImportedDependencies(targets) elif key == 'link_settings': dependencies = \ dependency_nodes[target].DependenciesForLinkSettings(targets) else: raise GypError("DoDependentSettings doesn't know how to determine " 'dependencies for ' + key) for dependency in dependencies: dependency_dict = targets[dependency] if not key in dependency_dict: continue dependency_build_file = gyp.common.BuildFile(dependency) MergeDicts(target_dict, dependency_dict[key], build_file, dependency_build_file) def AdjustStaticLibraryDependencies(flat_list, targets, dependency_nodes, sort_dependencies): # Recompute target "dependencies" properties. For each static library # target, remove "dependencies" entries referring to other static libraries, # unless the dependency has the "hard_dependency" attribute set. For each # linkable target, add a "dependencies" entry referring to all of the # target's computed list of link dependencies (including static libraries # if no such entry is already present. for target in flat_list: target_dict = targets[target] target_type = target_dict['type'] if target_type == 'static_library': if not 'dependencies' in target_dict: continue target_dict['dependencies_original'] = target_dict.get( 'dependencies', [])[:] # A static library should not depend on another static library unless # the dependency relationship is "hard," which should only be done when # a dependent relies on some side effect other than just the build # product, like a rule or action output. Further, if a target has a # non-hard dependency, but that dependency exports a hard dependency, # the non-hard dependency can safely be removed, but the exported hard # dependency must be added to the target to keep the same dependency # ordering. dependencies = \ dependency_nodes[target].DirectAndImportedDependencies(targets) index = 0 while index < len(dependencies): dependency = dependencies[index] dependency_dict = targets[dependency] # Remove every non-hard static library dependency and remove every # non-static library dependency that isn't a direct dependency. if (dependency_dict['type'] == 'static_library' and \ not dependency_dict.get('hard_dependency', False)) or \ (dependency_dict['type'] != 'static_library' and \ not dependency in target_dict['dependencies']): # Take the dependency out of the list, and don't increment index # because the next dependency to analyze will shift into the index # formerly occupied by the one being removed. del dependencies[index] else: index = index + 1 # Update the dependencies. If the dependencies list is empty, it's not # needed, so unhook it. if len(dependencies) > 0: target_dict['dependencies'] = dependencies else: del target_dict['dependencies'] elif target_type in linkable_types: # Get a list of dependency targets that should be linked into this # target. Add them to the dependencies list if they're not already # present. link_dependencies = \ dependency_nodes[target].DependenciesToLinkAgainst(targets) for dependency in link_dependencies: if dependency == target: continue if not 'dependencies' in target_dict: target_dict['dependencies'] = [] if not dependency in target_dict['dependencies']: target_dict['dependencies'].append(dependency) # Sort the dependencies list in the order from dependents to dependencies. # e.g. If A and B depend on C and C depends on D, sort them in A, B, C, D. # Note: flat_list is already sorted in the order from dependencies to # dependents. if sort_dependencies and 'dependencies' in target_dict: target_dict['dependencies'] = [dep for dep in reversed(flat_list) if dep in target_dict['dependencies']] # Initialize this here to speed up MakePathRelative. exception_re = re.compile(r'''["']?[-/$<>^]''') def MakePathRelative(to_file, fro_file, item): # If item is a relative path, it's relative to the build file dict that it's # coming from. Fix it up to make it relative to the build file dict that # it's going into. # Exception: any |item| that begins with these special characters is # returned without modification. # / Used when a path is already absolute (shortcut optimization; # such paths would be returned as absolute anyway) # $ Used for build environment variables # - Used for some build environment flags (such as -lapr-1 in a # "libraries" section) # < Used for our own variable and command expansions (see ExpandVariables) # > Used for our own variable and command expansions (see ExpandVariables) # ^ Used for our own variable and command expansions (see ExpandVariables) # # "/' Used when a value is quoted. If these are present, then we # check the second character instead. # if to_file == fro_file or exception_re.match(item): return item else: # TODO(dglazkov) The backslash/forward-slash replacement at the end is a # temporary measure. This should really be addressed by keeping all paths # in POSIX until actual project generation. ret = os.path.normpath(os.path.join( gyp.common.RelativePath(os.path.dirname(fro_file), os.path.dirname(to_file)), item)).replace('\\', '/') if item[-1] == '/': ret += '/' return ret def MergeLists(to, fro, to_file, fro_file, is_paths=False, append=True): # Python documentation recommends objects which do not support hash # set this value to None. Python library objects follow this rule. is_hashable = lambda val: val.__hash__ # If x is hashable, returns whether x is in s. Else returns whether x is in l. def is_in_set_or_list(x, s, l): if is_hashable(x): return x in s return x in l prepend_index = 0 # Make membership testing of hashables in |to| (in particular, strings) # faster. hashable_to_set = set(x for x in to if is_hashable(x)) for item in fro: singleton = False if type(item) in (str, int): # The cheap and easy case. if is_paths: to_item = MakePathRelative(to_file, fro_file, item) else: to_item = item if not (type(item) is str and item.startswith('-')): # Any string that doesn't begin with a "-" is a singleton - it can # only appear once in a list, to be enforced by the list merge append # or prepend. singleton = True elif type(item) is dict: # Make a copy of the dictionary, continuing to look for paths to fix. # The other intelligent aspects of merge processing won't apply because # item is being merged into an empty dict. to_item = {} MergeDicts(to_item, item, to_file, fro_file) elif type(item) is list: # Recurse, making a copy of the list. If the list contains any # descendant dicts, path fixing will occur. Note that here, custom # values for is_paths and append are dropped; those are only to be # applied to |to| and |fro|, not sublists of |fro|. append shouldn't # matter anyway because the new |to_item| list is empty. to_item = [] MergeLists(to_item, item, to_file, fro_file) else: raise TypeError( 'Attempt to merge list item of unsupported type ' + \ item.__class__.__name__) if append: # If appending a singleton that's already in the list, don't append. # This ensures that the earliest occurrence of the item will stay put. if not singleton or not is_in_set_or_list(to_item, hashable_to_set, to): to.append(to_item) if is_hashable(to_item): hashable_to_set.add(to_item) else: # If prepending a singleton that's already in the list, remove the # existing instance and proceed with the prepend. This ensures that the # item appears at the earliest possible position in the list. while singleton and to_item in to: to.remove(to_item) # Don't just insert everything at index 0. That would prepend the new # items to the list in reverse order, which would be an unwelcome # surprise. to.insert(prepend_index, to_item) if is_hashable(to_item): hashable_to_set.add(to_item) prepend_index = prepend_index + 1 def MergeDicts(to, fro, to_file, fro_file): # I wanted to name the parameter "from" but it's a Python keyword... for k, v in fro.iteritems(): # It would be nice to do "if not k in to: to[k] = v" but that wouldn't give # copy semantics. Something else may want to merge from the |fro| dict # later, and having the same dict ref pointed to twice in the tree isn't # what anyone wants considering that the dicts may subsequently be # modified. if k in to: bad_merge = False if type(v) in (str, int): if type(to[k]) not in (str, int): bad_merge = True elif type(v) is not type(to[k]): bad_merge = True if bad_merge: raise TypeError( 'Attempt to merge dict value of type ' + v.__class__.__name__ + \ ' into incompatible type ' + to[k].__class__.__name__ + \ ' for key ' + k) if type(v) in (str, int): # Overwrite the existing value, if any. Cheap and easy. is_path = IsPathSection(k) if is_path: to[k] = MakePathRelative(to_file, fro_file, v) else: to[k] = v elif type(v) is dict: # Recurse, guaranteeing copies will be made of objects that require it. if not k in to: to[k] = {} MergeDicts(to[k], v, to_file, fro_file) elif type(v) is list: # Lists in dicts can be merged with different policies, depending on # how the key in the "from" dict (k, the from-key) is written. # # If the from-key has ...the to-list will have this action # this character appended:... applied when receiving the from-list: # = replace # + prepend # ? set, only if to-list does not yet exist # (none) append # # This logic is list-specific, but since it relies on the associated # dict key, it's checked in this dict-oriented function. ext = k[-1] append = True if ext == '=': list_base = k[:-1] lists_incompatible = [list_base, list_base + '?'] to[list_base] = [] elif ext == '+': list_base = k[:-1] lists_incompatible = [list_base + '=', list_base + '?'] append = False elif ext == '?': list_base = k[:-1] lists_incompatible = [list_base, list_base + '=', list_base + '+'] else: list_base = k lists_incompatible = [list_base + '=', list_base + '?'] # Some combinations of merge policies appearing together are meaningless. # It's stupid to replace and append simultaneously, for example. Append # and prepend are the only policies that can coexist. for list_incompatible in lists_incompatible: if list_incompatible in fro: raise GypError('Incompatible list policies ' + k + ' and ' + list_incompatible) if list_base in to: if ext == '?': # If the key ends in "?", the list will only be merged if it doesn't # already exist. continue elif type(to[list_base]) is not list: # This may not have been checked above if merging in a list with an # extension character. raise TypeError( 'Attempt to merge dict value of type ' + v.__class__.__name__ + \ ' into incompatible type ' + to[list_base].__class__.__name__ + \ ' for key ' + list_base + '(' + k + ')') else: to[list_base] = [] # Call MergeLists, which will make copies of objects that require it. # MergeLists can recurse back into MergeDicts, although this will be # to make copies of dicts (with paths fixed), there will be no # subsequent dict "merging" once entering a list because lists are # always replaced, appended to, or prepended to. is_paths = IsPathSection(list_base) MergeLists(to[list_base], v, to_file, fro_file, is_paths, append) else: raise TypeError( 'Attempt to merge dict value of unsupported type ' + \ v.__class__.__name__ + ' for key ' + k) def MergeConfigWithInheritance(new_configuration_dict, build_file, target_dict, configuration, visited): # Skip if previously visted. if configuration in visited: return # Look at this configuration. configuration_dict = target_dict['configurations'][configuration] # Merge in parents. for parent in configuration_dict.get('inherit_from', []): MergeConfigWithInheritance(new_configuration_dict, build_file, target_dict, parent, visited + [configuration]) # Merge it into the new config. MergeDicts(new_configuration_dict, configuration_dict, build_file, build_file) # Drop abstract. if 'abstract' in new_configuration_dict: del new_configuration_dict['abstract'] def SetUpConfigurations(target, target_dict): # key_suffixes is a list of key suffixes that might appear on key names. # These suffixes are handled in conditional evaluations (for =, +, and ?) # and rules/exclude processing (for ! and /). Keys with these suffixes # should be treated the same as keys without. key_suffixes = ['=', '+', '?', '!', '/'] build_file = gyp.common.BuildFile(target) # Provide a single configuration by default if none exists. # TODO(mark): Signal an error if default_configurations exists but # configurations does not. if not 'configurations' in target_dict: target_dict['configurations'] = {'Default': {}} if not 'default_configuration' in target_dict: concrete = [i for (i, config) in target_dict['configurations'].iteritems() if not config.get('abstract')] target_dict['default_configuration'] = sorted(concrete)[0] merged_configurations = {} configs = target_dict['configurations'] for (configuration, old_configuration_dict) in configs.iteritems(): # Skip abstract configurations (saves work only). if old_configuration_dict.get('abstract'): continue # Configurations inherit (most) settings from the enclosing target scope. # Get the inheritance relationship right by making a copy of the target # dict. new_configuration_dict = {} for (key, target_val) in target_dict.iteritems(): key_ext = key[-1:] if key_ext in key_suffixes: key_base = key[:-1] else: key_base = key if not key_base in non_configuration_keys: new_configuration_dict[key] = gyp.simple_copy.deepcopy(target_val) # Merge in configuration (with all its parents first). MergeConfigWithInheritance(new_configuration_dict, build_file, target_dict, configuration, []) merged_configurations[configuration] = new_configuration_dict # Put the new configurations back into the target dict as a configuration. for configuration in merged_configurations.keys(): target_dict['configurations'][configuration] = ( merged_configurations[configuration]) # Now drop all the abstract ones. for configuration in target_dict['configurations'].keys(): old_configuration_dict = target_dict['configurations'][configuration] if old_configuration_dict.get('abstract'): del target_dict['configurations'][configuration] # Now that all of the target's configurations have been built, go through # the target dict's keys and remove everything that's been moved into a # "configurations" section. delete_keys = [] for key in target_dict: key_ext = key[-1:] if key_ext in key_suffixes: key_base = key[:-1] else: key_base = key if not key_base in non_configuration_keys: delete_keys.append(key) for key in delete_keys: del target_dict[key] # Check the configurations to see if they contain invalid keys. for configuration in target_dict['configurations'].keys(): configuration_dict = target_dict['configurations'][configuration] for key in configuration_dict.keys(): if key in invalid_configuration_keys: raise GypError('%s not allowed in the %s configuration, found in ' 'target %s' % (key, configuration, target)) def ProcessListFiltersInDict(name, the_dict): """Process regular expression and exclusion-based filters on lists. An exclusion list is in a dict key named with a trailing "!", like "sources!". Every item in such a list is removed from the associated main list, which in this example, would be "sources". Removed items are placed into a "sources_excluded" list in the dict. Regular expression (regex) filters are contained in dict keys named with a trailing "/", such as "sources/" to operate on the "sources" list. Regex filters in a dict take the form: 'sources/': [ ['exclude', '_(linux|mac|win)\\.cc$'], ['include', '_mac\\.cc$'] ], The first filter says to exclude all files ending in _linux.cc, _mac.cc, and _win.cc. The second filter then includes all files ending in _mac.cc that are now or were once in the "sources" list. Items matching an "exclude" filter are subject to the same processing as would occur if they were listed by name in an exclusion list (ending in "!"). Items matching an "include" filter are brought back into the main list if previously excluded by an exclusion list or exclusion regex filter. Subsequent matching "exclude" patterns can still cause items to be excluded after matching an "include". """ # Look through the dictionary for any lists whose keys end in "!" or "/". # These are lists that will be treated as exclude lists and regular # expression-based exclude/include lists. Collect the lists that are # needed first, looking for the lists that they operate on, and assemble # then into |lists|. This is done in a separate loop up front, because # the _included and _excluded keys need to be added to the_dict, and that # can't be done while iterating through it. lists = [] del_lists = [] for key, value in the_dict.iteritems(): operation = key[-1] if operation != '!' and operation != '/': continue if type(value) is not list: raise ValueError(name + ' key ' + key + ' must be list, not ' + \ value.__class__.__name__) list_key = key[:-1] if list_key not in the_dict: # This happens when there's a list like "sources!" but no corresponding # "sources" list. Since there's nothing for it to operate on, queue up # the "sources!" list for deletion now. del_lists.append(key) continue if type(the_dict[list_key]) is not list: value = the_dict[list_key] raise ValueError(name + ' key ' + list_key + \ ' must be list, not ' + \ value.__class__.__name__ + ' when applying ' + \ {'!': 'exclusion', '/': 'regex'}[operation]) if not list_key in lists: lists.append(list_key) # Delete the lists that are known to be unneeded at this point. for del_list in del_lists: del the_dict[del_list] for list_key in lists: the_list = the_dict[list_key] # Initialize the list_actions list, which is parallel to the_list. Each # item in list_actions identifies whether the corresponding item in # the_list should be excluded, unconditionally preserved (included), or # whether no exclusion or inclusion has been applied. Items for which # no exclusion or inclusion has been applied (yet) have value -1, items # excluded have value 0, and items included have value 1. Includes and # excludes override previous actions. All items in list_actions are # initialized to -1 because no excludes or includes have been processed # yet. list_actions = list((-1,) * len(the_list)) exclude_key = list_key + '!' if exclude_key in the_dict: for exclude_item in the_dict[exclude_key]: for index in xrange(0, len(the_list)): if exclude_item == the_list[index]: # This item matches the exclude_item, so set its action to 0 # (exclude). list_actions[index] = 0 # The "whatever!" list is no longer needed, dump it. del the_dict[exclude_key] regex_key = list_key + '/' if regex_key in the_dict: for regex_item in the_dict[regex_key]: [action, pattern] = regex_item pattern_re = re.compile(pattern) if action == 'exclude': # This item matches an exclude regex, so set its value to 0 (exclude). action_value = 0 elif action == 'include': # This item matches an include regex, so set its value to 1 (include). action_value = 1 else: # This is an action that doesn't make any sense. raise ValueError('Unrecognized action ' + action + ' in ' + name + \ ' key ' + regex_key) for index in xrange(0, len(the_list)): list_item = the_list[index] if list_actions[index] == action_value: # Even if the regex matches, nothing will change so continue (regex # searches are expensive). continue if pattern_re.search(list_item): # Regular expression match. list_actions[index] = action_value # The "whatever/" list is no longer needed, dump it. del the_dict[regex_key] # Add excluded items to the excluded list. # # Note that exclude_key ("sources!") is different from excluded_key # ("sources_excluded"). The exclude_key list is input and it was already # processed and deleted; the excluded_key list is output and it's about # to be created. excluded_key = list_key + '_excluded' if excluded_key in the_dict: raise GypError(name + ' key ' + excluded_key + ' must not be present prior ' ' to applying exclusion/regex filters for ' + list_key) excluded_list = [] # Go backwards through the list_actions list so that as items are deleted, # the indices of items that haven't been seen yet don't shift. That means # that things need to be prepended to excluded_list to maintain them in the # same order that they existed in the_list. for index in xrange(len(list_actions) - 1, -1, -1): if list_actions[index] == 0: # Dump anything with action 0 (exclude). Keep anything with action 1 # (include) or -1 (no include or exclude seen for the item). excluded_list.insert(0, the_list[index]) del the_list[index] # If anything was excluded, put the excluded list into the_dict at # excluded_key. if len(excluded_list) > 0: the_dict[excluded_key] = excluded_list # Now recurse into subdicts and lists that may contain dicts. for key, value in the_dict.iteritems(): if type(value) is dict: ProcessListFiltersInDict(key, value) elif type(value) is list: ProcessListFiltersInList(key, value) def ProcessListFiltersInList(name, the_list): for item in the_list: if type(item) is dict: ProcessListFiltersInDict(name, item) elif type(item) is list: ProcessListFiltersInList(name, item) def ValidateTargetType(target, target_dict): """Ensures the 'type' field on the target is one of the known types. Arguments: target: string, name of target. target_dict: dict, target spec. Raises an exception on error. """ VALID_TARGET_TYPES = ('executable', 'loadable_module', 'static_library', 'shared_library', 'mac_kernel_extension', 'none') target_type = target_dict.get('type', None) if target_type not in VALID_TARGET_TYPES: raise GypError("Target %s has an invalid target type '%s'. " "Must be one of %s." % (target, target_type, '/'.join(VALID_TARGET_TYPES))) if (target_dict.get('standalone_static_library', 0) and not target_type == 'static_library'): raise GypError('Target %s has type %s but standalone_static_library flag is' ' only valid for static_library type.' % (target, target_type)) def ValidateSourcesInTarget(target, target_dict, build_file, duplicate_basename_check): if not duplicate_basename_check: return if target_dict.get('type', None) != 'static_library': return sources = target_dict.get('sources', []) basenames = {} for source in sources: name, ext = os.path.splitext(source) is_compiled_file = ext in [ '.c', '.cc', '.cpp', '.cxx', '.m', '.mm', '.s', '.S'] if not is_compiled_file: continue basename = os.path.basename(name) # Don't include extension. basenames.setdefault(basename, []).append(source) error = '' for basename, files in basenames.iteritems(): if len(files) > 1: error += ' %s: %s\n' % (basename, ' '.join(files)) if error: print('static library %s has several files with the same basename:\n' % target + error + 'libtool on Mac cannot handle that. Use ' '--no-duplicate-basename-check to disable this validation.') raise GypError('Duplicate basenames in sources section, see list above') def ValidateRulesInTarget(target, target_dict, extra_sources_for_rules): """Ensures that the rules sections in target_dict are valid and consistent, and determines which sources they apply to. Arguments: target: string, name of target. target_dict: dict, target spec containing "rules" and "sources" lists. extra_sources_for_rules: a list of keys to scan for rule matches in addition to 'sources'. """ # Dicts to map between values found in rules' 'rule_name' and 'extension' # keys and the rule dicts themselves. rule_names = {} rule_extensions = {} rules = target_dict.get('rules', []) for rule in rules: # Make sure that there's no conflict among rule names and extensions. rule_name = rule['rule_name'] if rule_name in rule_names: raise GypError('rule %s exists in duplicate, target %s' % (rule_name, target)) rule_names[rule_name] = rule rule_extension = rule['extension'] if rule_extension.startswith('.'): rule_extension = rule_extension[1:] if rule_extension in rule_extensions: raise GypError(('extension %s associated with multiple rules, ' + 'target %s rules %s and %s') % (rule_extension, target, rule_extensions[rule_extension]['rule_name'], rule_name)) rule_extensions[rule_extension] = rule # Make sure rule_sources isn't already there. It's going to be # created below if needed. if 'rule_sources' in rule: raise GypError( 'rule_sources must not exist in input, target %s rule %s' % (target, rule_name)) rule_sources = [] source_keys = ['sources'] source_keys.extend(extra_sources_for_rules) for source_key in source_keys: for source in target_dict.get(source_key, []): (source_root, source_extension) = os.path.splitext(source) if source_extension.startswith('.'): source_extension = source_extension[1:] if source_extension == rule_extension: rule_sources.append(source) if len(rule_sources) > 0: rule['rule_sources'] = rule_sources def ValidateRunAsInTarget(target, target_dict, build_file): target_name = target_dict.get('target_name') run_as = target_dict.get('run_as') if not run_as: return if type(run_as) is not dict: raise GypError("The 'run_as' in target %s from file %s should be a " "dictionary." % (target_name, build_file)) action = run_as.get('action') if not action: raise GypError("The 'run_as' in target %s from file %s must have an " "'action' section." % (target_name, build_file)) if type(action) is not list: raise GypError("The 'action' for 'run_as' in target %s from file %s " "must be a list." % (target_name, build_file)) working_directory = run_as.get('working_directory') if working_directory and type(working_directory) is not str: raise GypError("The 'working_directory' for 'run_as' in target %s " "in file %s should be a string." % (target_name, build_file)) environment = run_as.get('environment') if environment and type(environment) is not dict: raise GypError("The 'environment' for 'run_as' in target %s " "in file %s should be a dictionary." % (target_name, build_file)) def ValidateActionsInTarget(target, target_dict, build_file): '''Validates the inputs to the actions in a target.''' target_name = target_dict.get('target_name') actions = target_dict.get('actions', []) for action in actions: action_name = action.get('action_name') if not action_name: raise GypError("Anonymous action in target %s. " "An action must have an 'action_name' field." % target_name) inputs = action.get('inputs', None) if inputs is None: raise GypError('Action in target %s has no inputs.' % target_name) action_command = action.get('action') if action_command and not action_command[0]: raise GypError("Empty action as command in target %s." % target_name) def TurnIntIntoStrInDict(the_dict): """Given dict the_dict, recursively converts all integers into strings. """ # Use items instead of iteritems because there's no need to try to look at # reinserted keys and their associated values. for k, v in the_dict.items(): if type(v) is int: v = str(v) the_dict[k] = v elif type(v) is dict: TurnIntIntoStrInDict(v) elif type(v) is list: TurnIntIntoStrInList(v) if type(k) is int: del the_dict[k] the_dict[str(k)] = v def TurnIntIntoStrInList(the_list): """Given list the_list, recursively converts all integers into strings. """ for index in xrange(0, len(the_list)): item = the_list[index] if type(item) is int: the_list[index] = str(item) elif type(item) is dict: TurnIntIntoStrInDict(item) elif type(item) is list: TurnIntIntoStrInList(item) def PruneUnwantedTargets(targets, flat_list, dependency_nodes, root_targets, data): """Return only the targets that are deep dependencies of |root_targets|.""" qualified_root_targets = [] for target in root_targets: target = target.strip() qualified_targets = gyp.common.FindQualifiedTargets(target, flat_list) if not qualified_targets: raise GypError("Could not find target %s" % target) qualified_root_targets.extend(qualified_targets) wanted_targets = {} for target in qualified_root_targets: wanted_targets[target] = targets[target] for dependency in dependency_nodes[target].DeepDependencies(): wanted_targets[dependency] = targets[dependency] wanted_flat_list = [t for t in flat_list if t in wanted_targets] # Prune unwanted targets from each build_file's data dict. for build_file in data['target_build_files']: if not 'targets' in data[build_file]: continue new_targets = [] for target in data[build_file]['targets']: qualified_name = gyp.common.QualifiedTarget(build_file, target['target_name'], target['toolset']) if qualified_name in wanted_targets: new_targets.append(target) data[build_file]['targets'] = new_targets return wanted_targets, wanted_flat_list def VerifyNoCollidingTargets(targets): """Verify that no two targets in the same directory share the same name. Arguments: targets: A list of targets in the form 'path/to/file.gyp:target_name'. """ # Keep a dict going from 'subdirectory:target_name' to 'foo.gyp'. used = {} for target in targets: # Separate out 'path/to/file.gyp, 'target_name' from # 'path/to/file.gyp:target_name'. path, name = target.rsplit(':', 1) # Separate out 'path/to', 'file.gyp' from 'path/to/file.gyp'. subdir, gyp = os.path.split(path) # Use '.' for the current directory '', so that the error messages make # more sense. if not subdir: subdir = '.' # Prepare a key like 'path/to:target_name'. key = subdir + ':' + name if key in used: # Complain if this target is already used. raise GypError('Duplicate target name "%s" in directory "%s" used both ' 'in "%s" and "%s".' % (name, subdir, gyp, used[key])) used[key] = gyp def SetGeneratorGlobals(generator_input_info): # Set up path_sections and non_configuration_keys with the default data plus # the generator-specific data. global path_sections path_sections = set(base_path_sections) path_sections.update(generator_input_info['path_sections']) global non_configuration_keys non_configuration_keys = base_non_configuration_keys[:] non_configuration_keys.extend(generator_input_info['non_configuration_keys']) global multiple_toolsets multiple_toolsets = generator_input_info[ 'generator_supports_multiple_toolsets'] global generator_filelist_paths generator_filelist_paths = generator_input_info['generator_filelist_paths'] def Load(build_files, variables, includes, depth, generator_input_info, check, circular_check, duplicate_basename_check, parallel, root_targets): SetGeneratorGlobals(generator_input_info) # A generator can have other lists (in addition to sources) be processed # for rules. extra_sources_for_rules = generator_input_info['extra_sources_for_rules'] # Load build files. This loads every target-containing build file into # the |data| dictionary such that the keys to |data| are build file names, # and the values are the entire build file contents after "early" or "pre" # processing has been done and includes have been resolved. # NOTE: data contains both "target" files (.gyp) and "includes" (.gypi), as # well as meta-data (e.g. 'included_files' key). 'target_build_files' keeps # track of the keys corresponding to "target" files. data = {'target_build_files': set()} # Normalize paths everywhere. This is important because paths will be # used as keys to the data dict and for references between input files. build_files = set(map(os.path.normpath, build_files)) if parallel: LoadTargetBuildFilesParallel(build_files, data, variables, includes, depth, check, generator_input_info) else: aux_data = {} for build_file in build_files: try: LoadTargetBuildFile(build_file, data, aux_data, variables, includes, depth, check, True) except Exception, e: gyp.common.ExceptionAppend(e, 'while trying to load %s' % build_file) raise # Build a dict to access each target's subdict by qualified name. targets = BuildTargetsDict(data) # Fully qualify all dependency links. QualifyDependencies(targets) # Remove self-dependencies from targets that have 'prune_self_dependencies' # set to 1. RemoveSelfDependencies(targets) # Expand dependencies specified as build_file:*. ExpandWildcardDependencies(targets, data) # Remove all dependencies marked as 'link_dependency' from the targets of # type 'none'. RemoveLinkDependenciesFromNoneTargets(targets) # Apply exclude (!) and regex (/) list filters only for dependency_sections. for target_name, target_dict in targets.iteritems(): tmp_dict = {} for key_base in dependency_sections: for op in ('', '!', '/'): key = key_base + op if key in target_dict: tmp_dict[key] = target_dict[key] del target_dict[key] ProcessListFiltersInDict(target_name, tmp_dict) # Write the results back to |target_dict|. for key in tmp_dict: target_dict[key] = tmp_dict[key] # Make sure every dependency appears at most once. RemoveDuplicateDependencies(targets) if circular_check: # Make sure that any targets in a.gyp don't contain dependencies in other # .gyp files that further depend on a.gyp. VerifyNoGYPFileCircularDependencies(targets) [dependency_nodes, flat_list] = BuildDependencyList(targets) if root_targets: # Remove, from |targets| and |flat_list|, the targets that are not deep # dependencies of the targets specified in |root_targets|. targets, flat_list = PruneUnwantedTargets( targets, flat_list, dependency_nodes, root_targets, data) # Check that no two targets in the same directory have the same name. VerifyNoCollidingTargets(flat_list) # Handle dependent settings of various types. for settings_type in ['all_dependent_settings', 'direct_dependent_settings', 'link_settings']: DoDependentSettings(settings_type, flat_list, targets, dependency_nodes) # Take out the dependent settings now that they've been published to all # of the targets that require them. for target in flat_list: if settings_type in targets[target]: del targets[target][settings_type] # Make sure static libraries don't declare dependencies on other static # libraries, but that linkables depend on all unlinked static libraries # that they need so that their link steps will be correct. gii = generator_input_info if gii['generator_wants_static_library_dependencies_adjusted']: AdjustStaticLibraryDependencies(flat_list, targets, dependency_nodes, gii['generator_wants_sorted_dependencies']) # Apply "post"/"late"/"target" variable expansions and condition evaluations. for target in flat_list: target_dict = targets[target] build_file = gyp.common.BuildFile(target) ProcessVariablesAndConditionsInDict( target_dict, PHASE_LATE, variables, build_file) # Move everything that can go into a "configurations" section into one. for target in flat_list: target_dict = targets[target] SetUpConfigurations(target, target_dict) # Apply exclude (!) and regex (/) list filters. for target in flat_list: target_dict = targets[target] ProcessListFiltersInDict(target, target_dict) # Apply "latelate" variable expansions and condition evaluations. for target in flat_list: target_dict = targets[target] build_file = gyp.common.BuildFile(target) ProcessVariablesAndConditionsInDict( target_dict, PHASE_LATELATE, variables, build_file) # Make sure that the rules make sense, and build up rule_sources lists as # needed. Not all generators will need to use the rule_sources lists, but # some may, and it seems best to build the list in a common spot. # Also validate actions and run_as elements in targets. for target in flat_list: target_dict = targets[target] build_file = gyp.common.BuildFile(target) ValidateTargetType(target, target_dict) ValidateSourcesInTarget(target, target_dict, build_file, duplicate_basename_check) ValidateRulesInTarget(target, target_dict, extra_sources_for_rules) ValidateRunAsInTarget(target, target_dict, build_file) ValidateActionsInTarget(target, target_dict, build_file) # Generators might not expect ints. Turn them into strs. TurnIntIntoStrInDict(data) # TODO(mark): Return |data| for now because the generator needs a list of # build files that came in. In the future, maybe it should just accept # a list, and not the whole data dict. return [flat_list, targets, data]
mit
1905410/Misago
misago/threads/views/admin/attachments.py
1
3500
from django.contrib import messages from django.core.urlresolvers import reverse from django.db import transaction from django.db.models import Count from django.shortcuts import redirect from django.utils.translation import ugettext_lazy as _ from misago.admin.views import generic from ...forms import SearchAttachmentsForm from ...models import Attachment, Post class AttachmentAdmin(generic.AdminBaseMixin): root_link = 'misago:admin:system:attachments:index' Model = Attachment templates_dir = 'misago/admin/attachments' message_404 = _("Requested attachment could not be found.") def get_queryset(self): qs = super(AttachmentAdmin, self).get_queryset() return qs.select_related('filetype', 'uploader', 'post', 'post__thread', 'post__category') class AttachmentsList(AttachmentAdmin, generic.ListView): items_per_page = 20 ordering = ( ('-id', _("From newest")), ('id', _("From oldest")), ('filename', _("A to z")), ('-filename', _("Z to a")), ('size', _("Smallest files")), ('-size', _("Largest files")), ) selection_label = _('With attachments: 0') empty_selection_label = _('Select attachments') mass_actions = [ { 'action': 'delete', 'name': _("Delete attachments"), 'icon': 'fa fa-times-circle', 'confirmation': _("Are you sure you want to delete selected attachments?"), 'is_atomic': False } ] def get_search_form(self, request): return SearchAttachmentsForm def action_delete(self, request, attachments): deleted_attachments = [] desynced_posts = [] for attachment in attachments: if attachment.post: deleted_attachments.append(attachment.pk) desynced_posts.append(attachment.post_id) if desynced_posts: with transaction.atomic(): for post in Post.objects.select_for_update().filter(id__in=desynced_posts): self.delete_from_cache(post, deleted_attachments) for attachment in attachments: attachment.delete() message = _("Selected attachments have been deleted.") messages.success(request, message) def delete_from_cache(self, post, attachments): if not post.attachments_cache: return # admin action may be taken due to desynced state clean_cache = [] for a in post.attachments_cache: if a['id'] not in attachments: clean_cache.append(a) post.attachments_cache = clean_cache or None post.save(update_fields=['attachments_cache']) class DeleteAttachment(AttachmentAdmin, generic.ButtonView): def button_action(self, request, target): if target.post: self.delete_from_cache(target) target.delete() message = _('Attachment "%(filename)s" has been deleted.') messages.success(request, message % {'filename': target.filename}) def delete_from_cache(self, attachment): if not attachment.post.attachments_cache: return # admin action may be taken due to desynced state clean_cache = [] for a in attachment.post.attachments_cache: if a['id'] != attachment.id: clean_cache.append(a) attachment.post.attachments_cache = clean_cache or None attachment.post.save(update_fields=['attachments_cache'])
gpl-2.0
unreal666/outwiker
plugins/source/source/pygments/styles/stata_light.py
4
1274
# -*- coding: utf-8 -*- """ pygments.styles.stata_light ~~~~~~~~~~~~~~~~~~~~~~~~~~~ Light Style inspired by Stata's do-file editor. Note this is not meant to be a complete style, just for Stata's file formats. :copyright: Copyright 2006-2017 by the Pygments team, see AUTHORS. :license: BSD, see LICENSE for details. """ from pygments.style import Style from pygments.token import Keyword, Name, Comment, String, Error, \ Number, Operator, Whitespace, Text class StataLightStyle(Style): """ Light mode style inspired by Stata's do-file editor. This is not meant to be a complete style, just for use with Stata. """ default_style = '' styles = { Text: '#111111', Whitespace: '#bbbbbb', Error: 'bg:#e3d2d2 #a61717', String: '#7a2424', Number: '#2c2cff', Operator: '', Name.Function: '#2c2cff', Name.Other: '#be646c', Keyword: 'bold #353580', Keyword.Constant: '', Comment: 'italic #008800', Name.Variable: 'bold #35baba', Name.Variable.Global: 'bold #b5565e', }
gpl-3.0
mbernasocchi/inasafe
safe/metadata35/test/test_metadata.py
6
2206
# coding=utf-8 """ InaSAFE Disaster risk assessment tool developed by AusAid - **Exception Classes.** Custom exception classes for the IS application. Contact : ole.moller.nielsen@gmail.com .. note:: This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. """ from safe.metadata35.utils import insert_xml_element __author__ = 'marco@opengis.ch' __revision__ = '$Format:%H$' __date__ = '12/10/2014' __copyright__ = ('Copyright 2012, Australia Indonesia Facility for ' 'Disaster Reduction') from xml.etree import ElementTree from safe.metadata35 import BaseMetadata from safe.metadata35 import ImpactLayerMetadata from unittest import TestCase class TestMetadata(TestCase): def test_no_BaseMeta_instantiation(self): """check that we can't instantiate abstract class BaseMetadata with abstract methods""" with self.assertRaises(TypeError): # intended instantiation test... So pylint should ignore this. # pylint: disable=abstract-class-instantiated BaseMetadata('random_layer_id') def test_metadata(self): """Check we can't instantiate with unsupported xml types""" metadata = ImpactLayerMetadata('random_layer_id') path = 'gmd:MD_Metadata/gmd:dateStamp/gco:RandomString' # using unsupported xml types test_value = 'Random string' with self.assertRaises(KeyError): metadata.set('ISO19115_TEST', test_value, path) def test_insert_xml_element(self): """Check we can't insert custom nested elements""" root = ElementTree.Element('root') b = ElementTree.SubElement(root, 'b') ElementTree.SubElement(b, 'c') new_element_path = 'd/e/f' expected_xml = b'<root><b><c /></b><d><e><f>TESTtext</f></e></d></root>' element = insert_xml_element(root, new_element_path) element.text = 'TESTtext' result_xml = ElementTree.tostring(root) self.assertEqual(expected_xml, result_xml)
gpl-3.0
pombredanne/blivet-1
tests/devicelibs_test/edd_test.py
1
9403
import mock class EddTestCase(mock.TestCase): def setUp(self): self.setupModules( ['_isys', 'logging', 'pyanaconda.anaconda_log', 'block']) def tearDown(self): self.tearDownModules() def test_biosdev_to_edd_dir(self): from blivet.devicelibs import edd path = edd.biosdev_to_edd_dir(138) self.assertEqual("/sys/firmware/edd/int13_dev8a", path) def test_collect_edd_data(self): from blivet.devicelibs import edd # test with vda, vdb fs = EddTestFS(self, edd).vda_vdb() edd_dict = edd.collect_edd_data() self.assertEqual(len(edd_dict), 2) self.assertEqual(edd_dict[0x80].type, "SCSI") self.assertEqual(edd_dict[0x80].scsi_id, 0) self.assertEqual(edd_dict[0x80].scsi_lun, 0) self.assertEqual(edd_dict[0x80].pci_dev, "00:05.0") self.assertEqual(edd_dict[0x80].channel, 0) self.assertEqual(edd_dict[0x80].sectors, 16777216) self.assertEqual(edd_dict[0x81].pci_dev, "00:06.0") # test with sda, vda fs = EddTestFS(self, edd).sda_vda() edd_dict = edd.collect_edd_data() self.assertEqual(len(edd_dict), 2) self.assertEqual(edd_dict[0x80].type, "ATA") self.assertEqual(edd_dict[0x80].scsi_id, None) self.assertEqual(edd_dict[0x80].scsi_lun, None) self.assertEqual(edd_dict[0x80].pci_dev, "00:01.1") self.assertEqual(edd_dict[0x80].channel, 0) self.assertEqual(edd_dict[0x80].sectors, 2097152) self.assertEqual(edd_dict[0x80].ata_device, 0) self.assertEqual(edd_dict[0x80].mbr_signature, "0x000ccb01") def test_collect_edd_data_cciss(self): from blivet.devicelibs import edd fs = EddTestFS(self, edd).sda_cciss() edd_dict = edd.collect_edd_data() self.assertEqual(edd_dict[0x80].pci_dev, None) self.assertEqual(edd_dict[0x80].channel, None) def test_edd_entry_str(self): from blivet.devicelibs import edd fs = EddTestFS(self, edd).sda_vda() edd_dict = edd.collect_edd_data() expected_output = """\ttype: ATA, ata_device: 0 \tchannel: 0, mbr_signature: 0x000ccb01 \tpci_dev: 00:01.1, scsi_id: None \tscsi_lun: None, sectors: 2097152""" self.assertEqual(str(edd_dict[0x80]), expected_output) def test_matcher_device_path(self): from blivet.devicelibs import edd fs = EddTestFS(self, edd).sda_vda() edd_dict = edd.collect_edd_data() analyzer = edd.EddMatcher(edd_dict[0x80]) path = analyzer.devname_from_pci_dev() self.assertEqual(path, "sda") analyzer = edd.EddMatcher(edd_dict[0x81]) path = analyzer.devname_from_pci_dev() self.assertEqual(path, "vda") def test_bad_device_path(self): from blivet.devicelibs import edd fs = EddTestFS(self, edd).sda_vda_no_pcidev() edd_dict = edd.collect_edd_data() analyzer = edd.EddMatcher(edd_dict[0x80]) path = analyzer.devname_from_pci_dev() self.assertEqual(path, None) def test_bad_host_bus(self): from blivet.devicelibs import edd fs = EddTestFS(self, edd).sda_vda_no_host_bus() edd_dict = edd.collect_edd_data() # 0x80 entry is basted so fail without an exception analyzer = edd.EddMatcher(edd_dict[0x80]) devname = analyzer.devname_from_pci_dev() self.assertEqual(devname, None) # but still succeed on 0x81 analyzer = edd.EddMatcher(edd_dict[0x81]) devname = analyzer.devname_from_pci_dev() self.assertEqual(devname, "vda") def test_get_edd_dict_1(self): """ Test get_edd_dict()'s pci_dev matching. """ from blivet.devicelibs import edd fs = EddTestFS(self, edd).sda_vda() self.assertEqual(edd.get_edd_dict([]), {'sda' : 0x80, 'vda' : 0x81}) def test_get_edd_dict_2(self): """ Test get_edd_dict()'s pci_dev matching. """ from blivet.devicelibs import edd edd.collect_mbrs = mock.Mock(return_value = { 'sda' : '0x000ccb01', 'vda' : '0x0006aef1'}) fs = EddTestFS(self, edd).sda_vda_missing_details() self.assertEqual(edd.get_edd_dict([]), {'sda' : 0x80, 'vda' : 0x81}) def test_get_edd_dict_3(self): """ Test scenario when the 0x80 and 0x81 edd directories contain the same data and give no way to distinguish among the two devices. """ from blivet.devicelibs import edd edd.log = mock.Mock() edd.collect_mbrs = mock.Mock(return_value={'sda' : '0x000ccb01', 'vda' : '0x0006aef1'}) fs = EddTestFS(self, edd).sda_sdb_same() self.assertEqual(edd.get_edd_dict([]), {}) self.assertIn((('edd: both edd entries 0x80 and 0x81 seem to map to sda',), {}), edd.log.info.call_args_list) class EddTestFS(object): def __init__(self, test_case, target_module): self.fs = mock.DiskIO() test_case.take_over_io(self.fs, target_module) def sda_vda_missing_details(self): self.fs["/sys/firmware/edd/int13_dev80"] = self.fs.Dir() self.fs["/sys/firmware/edd/int13_dev80/mbr_signature"] = "0x000ccb01\n" self.fs["/sys/firmware/edd/int13_dev81"] = self.fs.Dir() self.fs["/sys/firmware/edd/int13_dev81/mbr_signature"] = "0x0006aef1\n" def sda_vda(self): self.fs["/sys/firmware/edd/int13_dev80"] = self.fs.Dir() self.fs["/sys/firmware/edd/int13_dev80/host_bus"] = "PCI 00:01.1 channel: 0\n" self.fs["/sys/firmware/edd/int13_dev80/interface"] = "ATA device: 0\n" self.fs["/sys/firmware/edd/int13_dev80/mbr_signature"] = "0x000ccb01\n" self.fs["/sys/firmware/edd/int13_dev80/sectors"] = "2097152\n" self.fs["/sys/firmware/edd/int13_dev81"] = self.fs.Dir() self.fs["/sys/firmware/edd/int13_dev81/host_bus"] = "PCI 00:05.0 channel: 0\n" self.fs["/sys/firmware/edd/int13_dev81/interface"] = "SCSI id: 0 lun: 0\n" self.fs["/sys/firmware/edd/int13_dev81/mbr_signature"] = "0x0006aef1\n" self.fs["/sys/firmware/edd/int13_dev81/sectors"] = "16777216\n" self.fs["/sys/devices/pci0000:00/0000:00:01.1/host0/target0:0:0/0:0:0:0/block"] = self.fs.Dir() self.fs["/sys/devices/pci0000:00/0000:00:01.1/host0/target0:0:0/0:0:0:0/block/sda"] = self.fs.Dir() self.fs["/sys/devices/pci0000:00/0000:00:05.0/virtio2/block"] = self.fs.Dir() self.fs["/sys/devices/pci0000:00/0000:00:05.0/virtio2/block/vda"] = self.fs.Dir() return self.fs def sda_vda_no_pcidev(self): self.sda_vda() entries = [e for e in self.fs.fs if e.startswith("/sys/devices/pci")] map(self.fs.os_remove, entries) return self.fs def sda_vda_no_host_bus(self): self.sda_vda() self.fs["/sys/firmware/edd/int13_dev80/host_bus"] = "PCI 00:01.1 channel: \n" self.fs.os_remove("/sys/firmware/edd/int13_dev80/mbr_signature") self.fs.os_remove("/sys/firmware/edd/int13_dev81/mbr_signature") def sda_cciss(self): self.fs["/sys/firmware/edd/int13_dev80"] = self.fs.Dir() self.fs["/sys/firmware/edd/int13_dev80/host_bus"] = "PCIX 05:00.0 channel: 0\n" self.fs["/sys/firmware/edd/int13_dev80/interface"] = "RAID identity_tag: 0\n" self.fs["/sys/firmware/edd/int13_dev80/mbr_signature"] = "0x000ccb01\n" self.fs["/sys/firmware/edd/int13_dev80/sectors"] = "2097152\n" return self.fs def vda_vdb(self): self.fs["/sys/firmware/edd/int13_dev80"] = self.fs.Dir() self.fs["/sys/firmware/edd/int13_dev80/host_bus"] = "PCI 00:05.0 channel: 0\n" self.fs["/sys/firmware/edd/int13_dev80/interface"] = "SCSI id: 0 lun: 0\n" self.fs["/sys/firmware/edd/int13_dev80/sectors"] = "16777216\n" self.fs["/sys/firmware/edd/int13_dev81"] = self.fs.Dir() self.fs["/sys/firmware/edd/int13_dev81/host_bus"] = "PCI 00:06.0 channel: 0\n" self.fs["/sys/firmware/edd/int13_dev81/interface"] = "SCSI id: 0 lun: 0\n" self.fs["/sys/firmware/edd/int13_dev81/sectors"] = "4194304\n" return self.fs def sda_sdb_same(self): self.fs["/sys/firmware/edd/int13_dev80"] = self.fs.Dir() self.fs["/sys/firmware/edd/int13_dev80/host_bus"] = "PCI 00:01.1 channel: 0\n" self.fs["/sys/firmware/edd/int13_dev80/interface"] = "ATA device: 0\n" self.fs["/sys/firmware/edd/int13_dev80/mbr_signature"] = "0x000ccb01" self.fs["/sys/firmware/edd/int13_dev80/sectors"] = "2097152\n" self.fs["/sys/firmware/edd/int13_dev81"] = self.fs.Dir() self.fs["/sys/firmware/edd/int13_dev81/host_bus"] = "PCI 00:01.1 channel: 0\n" self.fs["/sys/firmware/edd/int13_dev81/interface"] = "ATA device: 0\n" self.fs["/sys/firmware/edd/int13_dev81/mbr_signature"] = "0x0006aef1" self.fs["/sys/firmware/edd/int13_dev81/sectors"] = "2097152\n" self.fs["/sys/devices/pci0000:00/0000:00:01.1/host0/target0:0:0/0:0:0:0/block"] = self.fs.Dir() self.fs["/sys/devices/pci0000:00/0000:00:01.1/host0/target0:0:0/0:0:0:0/block/sda"] = self.fs.Dir()
gpl-2.0
bbc/kamaelia
Sketches/MPS/BugReports/FixTests/Kamaelia/Kamaelia/Device/DVB/Parse/ParseEventInformationTable.py
3
25358
#!/usr/bin/env python # -*- coding: utf-8 -*- # Copyright 2010 British Broadcasting Corporation and Kamaelia Contributors(1) # # (1) Kamaelia Contributors are listed in the AUTHORS file and at # http://www.kamaelia.org/AUTHORS - please extend this file, # not this notice. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ------------------------------------------------------------------------- """\ =============================================== Parsing Event Information Tables in DVB streams =============================================== ParseEventInformationTable parses a reconstructed PSI table from a DVB MPEG Transport Stream, and outputs a dictionary containing the data in the table. The Event Information Table carries data about the programmes being broadcast both now (present-following data) and in the future (schedule data) and is typically used to drive Electronic Progamme Guides, scheduled recording and "now and next" information displays. The purpose of the EIT and details of the fields within in are defined in the DVB SI specification: - ETSI EN 300 468 "Digital Video Broadcasting (DVB); Specification for Service Information (SI) in DVB systems" ETSI / EBU (DVB group) See Kamaelia.Support.DVB.Descriptors for information on how they are parsed. Example Usage ~~~~~~~~~~~~~ A simple pipeline to receive, parse and display the "now and next" information for programmes in the current multiplex, from the Event Information Table:: FREQUENCY = 505.833330 feparams = { "inversion" : dvb3.frontend.INVERSION_AUTO, "constellation" : dvb3.frontend.QAM_16, "code_rate_HP" : dvb3.frontend.FEC_3_4, "code_rate_LP" : dvb3.frontend.FEC_3_4, } EIT_PID = 0x12 Pipeline( DVB_Multiplex(FREQUENCY, [NIT_PID], feparams), DVB_Demuxer({ NIT_PID:["outbox"]}), ReassemblePSITables(), ParseEventInformationTable_Subset(actual_presentFollowing=True), PrettifyEventInformationTable(), ConsoleEchoer(), ).run() A slight modification to the pipeline, to convert the parsed tables into a stream of inidividual events:: Pipeline( DVB_Multiplex(FREQUENCY, [NIT_PID], feparams), DVB_Demuxer({ NIT_PID:["outbox"]}), ReassemblePSITables(), ParseEventInformationTable_Subset(actual_presentFollowing=True), SimplifyEIT(), ConsoleEchoer(), ).run() ParseEventInformationTable / ParseEventInformationTable_Subset ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Behaviour --------- At initialisation, specify what sub tables you want ParseEventInformationTable to process (others will be ignored). Event information is grouped into sub tables according to where it is: * 'Actual' data describes programmes broadcast in the same actual multiplex as this data * 'Other' data describes programmes being broadcast in other multiplexes ...and what timeframe it relates to: * 'present following' data describes the now showing (present) and next (following) programme to be shown * 'schedule' data describes programmes being shown later, typically over the next 7 or 8 days. Initialise ParseEventInformationTable by providing a dictionary mapping table ids, to be accepted, to (label, is-present-following-flag) pairs. For example, to accept tables of present-following data for this and other multiplexes:: ParseEventInformationTable(acceptTables = { 0x4e : ("ACTUAL", True), 0x4f : ("OTHER", False), } However it is much simpler to use the ParseEventInformationTable_Subset helper funciton to create it for you. For example, the same effect as above can be achieved with:: ParseEventInformationTable_Subset( actual_presentFollowing = True, other_presentFollowing = True, actual_schedule = False, other_schedule = False, ) Send reconstructed PSI table 'sections' to the "inbox" inbox. When all sections of the table have arrived, ParseNetworkInformationTable will parse the table and send it out of its "outbox" outbox. If the table is unchanged since last time it was parsed, then it will not be sent out. Parsed tables are only sent out when they are new or have just changed. Note that an EIT table is likely to arrive, and be parsed in lots of separate fragments. Because of the way the data format is defined, it is impossible for ParseEventInformationTable to know for certain when it has received everything! The parsed table is sent out as a dictionary data structure, like this (list of event descriptors abridged for brevity):: { 'table_id' : 78, 'table_type' : 'EIT', 'current' : 1, 'actual_other' : 'ACTUAL', 'is_present_following': True, 'transport_stream_id' : 4100, 'original_network_id' : 9018, 'events': [ { 'event_id' : 8735, 'running_status': 1, 'free_CA_mode' : 0, 'starttime' : [2006, 12, 22, 11, 0, 0], 'duration' : (0, 30, 0), 'service_id' : 4164 'descriptors': [ (77, {'type': 'short_event', 'name': 'To Buy or Not to Buy', 'text': 'Series that gives buyers the chance to test-drive a property before they buy it. Sarah Walker and Simon Rimmer are in Birmingham, helping a pair of property professionals. [S]', 'language_code': 'eng'}), (80, {'type': 'component', 'stream_content': 1, 'component_type': 3, 'text': 'Video 1', 'component_tag': 1, 'content,type': ('video', '16:9 aspect ratio without pan vectors, 25 Hz'), 'language_code': ' '}), (80, {'type': 'component', 'stream_content': 2, 'component_type': 3, 'text': 'Audio 2', 'component_tag': 2, 'content,type': ('audio', 'stereo (2 channel)'), 'language_code': 'eng'}), (80, {'type': 'component', 'stream_content': 3, 'component_type': 16, 'text': 'Subtitling 5', 'component_tag': 5, 'content,type': ('DVB subtitles (normal)', 'with no monitor aspect ratio criticality'), 'language_code': ' '}), (80, {'type': 'component', 'stream_content': 4, 'component_type': 1, 'text': 'Data 6', 'component_tag': 6, 'content,type': (4, 1), 'language_code': ' '}), ..... (84, {'type': 'content', 'contents': '\xa0 '}) ], } ] } The above example is an event for the service BBC ONE, broadcast at 10:06 GMT on 22nd December 2006. It describes a 'present-following' event that doesn't start until 11:00 GMT. It is therefore describing the 'next' programme that will be on the channel/service. If this data is sent on through a PrettifyEventInformationTable component, then the equivalent output is a string containing this (again, abridged for brevity):: EIT received: Table ID : 78 Table is valid for : CURRENT (valid) Actual or Other n/w : ACTUAL Present-Following or Schedule : Present-Following Transport stream id : 4100 Original network id : 9018 Events: Service id : 4164 Running status : 1 (NOT RUNNING) Start datetime (UTC) : 2006-12-22 11:00:00 Duration : 00:30:00 (hh:mm:ss) Scrambled? : NO Event descriptors: Descriptor 0x4d : short_event language_code : 'eng' name : 'To Buy or Not to Buy' text : 'Series that gives buyers the chance to test-drive a property before they buy it. Sarah Walker and Simon Rimmer are in Birmingham, helping a pair of property professionals. [S]' Descriptor 0x50 : component component_tag : 1 component_type : 3 content,type : ('video', '16:9 aspect ratio without pan vectors, 25 Hz') language_code : ' ' stream_content : 1 text : 'Video 1' Descriptor 0x50 : component component_tag : 2 component_type : 3 content,type : ('audio', 'stereo (2 channel)') language_code : 'eng' stream_content : 2 text : 'Audio 2' Descriptor 0x50 : component component_tag : 5 component_type : 16 content,type : ('DVB subtitles (normal)', 'with no monitor aspect ratio criticality') language_code : ' ' stream_content : 3 text : 'Subtitling 5' Descriptor 0x50 : component component_tag : 6 component_type : 1 content,type : (4, 1) language_code : ' ' stream_content : 4 text : 'Data 6' ..... Descriptor 0x54 : content contents : '\xa0 ' ParseEventInformationTable can collect the sections of, and then parse the various types of EIT table simultaneously. If a shutdownMicroprocess or producerFinished message is received on the "control" inbox, then it will immediately be sent on out of the "signal" outbox and the component will then immediately terminate. SimplifyEIT ~~~~~~~~~~~ Behaviour --------- Send parsed event information data to the "inbox" inbox, and individual events, in a simplified form, will be sent out the "outbox" outbox one at a time. For example:: { 'event_id' : 8735, 'when' : 'NEXT', 'startdate' : [2006, 12, 22], 'starttime' : [11, 0, 0], 'duration' : (0, 30, 0), 'service' : 4164, 'transportstream': 4100, 'language_code' : 'eng', 'name' : 'To Buy or Not to Buy', 'description' : 'Series that gives buyers the chance to test-drive a property before they buy it. Sarah Walker and Simon Rimmer are in Birmingham, helping a pair of property professionals. [S]' } The possible values of the 'when' field are: * "NOW" -- describes a programme that is happening NOW * "NEXT" -- describes a programme that follows the one happening now * "SCHEDULED" -- part of a schedule describing programmes happening over the next few days If a shutdownMicroprocess or producerFinished message is received on the "control" inbox, then it will immediately be sent on out of the "signal" outbox and the component will then immediately terminate. """ from Axon.Component import component from Axon.Ipc import producerFinished,shutdownMicroprocess from Kamaelia.Support.DVB.Descriptors import parseDescriptor from Kamaelia.Support.DVB.CRC import dvbcrc from Kamaelia.Support.DVB.DateTime import parseMJD, unBCD EIT_PID = 0x12 def ParseEventInformationTable_Subset( actual_presentFollowing = True, other_presentFollowing = False, actual_schedule = False, other_schedule = False, ): """\ ParseEventInformationTable_Subset([actual_presentFollowing][,other_presentFollowing][,actual_schedule][,other_schedule] ) -> new ParseEventInformationTable component Returns a ParseEventInformationTable component, configured to parse the table types specified, and ignore all others. Keyword arguments:: - actual_presentFollowing -- If True, parse 'present-following' data for this multiplex (default=True) - other_presentFollowing -- If True, parse 'present-following' data for other multiplexes (default=False) - actual_schedule -- If True, parse 'schedule' data for this multiplex (default=False) - other_schedule -- If True, parse 'schedule' data for other multiplexes (default=False) """ acceptTables = {} if actual_presentFollowing: acceptTables[0x4e] = ("ACTUAL", True) if other_presentFollowing: acceptTables[0x4f] = ("OTHER", True) if actual_schedule: for x in range(0x50,0x60): acceptTables[x] = ("ACTUAL", False) if other_schedule: for x in range(0x60,0x70): acceptTables[x] = ("OTHER", False) return ParseEventInformationTable(acceptTables = acceptTables) class ParseEventInformationTable(component): """\ ParseEventInformationTable([acceptTables]) -> new ParseEventInformationTable component. Send reconstructed PSI table sections to the "inbox" inbox. When a complete table is assembled and parsed, the result is sent out of the "outbox" outbox as a dictionary. Doesn't emit anything again until the version number of the table changes. Use ParseEventInformationTable_Subset for simpler initialisation with convenient presets. Keyword arguments:: - acceptTables - dict of (table_id,string_description) mappings for tables to be accepted (default={0x4e:("ACTUAL",True)}) """ Inboxes = { "inbox" : "DVB PSI Packets from a single PID containing EIT table sections", "control" : "Shutdown signalling", } Outboxes = { "outbox" : "Parsed EIT table (only when it changes)", "signal" : "Shutdown signalling", } def __init__(self, acceptTables = None): super(ParseEventInformationTable,self).__init__() if not acceptTables: acceptTables = {} acceptTables[0x4e] = ("ACTUAL", True) acceptTables[0x4f] = ("OTHER", True) for x in range(0x50,0x60): acceptTables[x] = ("ACTUAL", False) for x in range(0x60,0x70): acceptTables[x] = ("OTHER", False) self.acceptTables = acceptTables def parseTableSection(self, index, section): (table_id, service_id, current_next, transport_stream_id, original_network_id) = index msg = { "table_type" : "EIT", "table_id" : table_id, "actual_other" : self.acceptTables[table_id][0], "is_present_following": self.acceptTables[table_id][1], "current" : current_next, "transport_stream_id" : transport_stream_id, "original_network_id" : original_network_id, "events" : [], } (data,section_length) = section service_id = (ord(data[3])<<8) + ord(data[4]) i=14 while i < section_length+3-4: e = [ord(data[x]) for x in range(i+0,i+12)] event = { "service_id" : service_id } event["event_id"] = (e[0]<<8) + e[1] # ( Y,M,D, HH,MM,SS ) event["starttime"] = list( parseMJD((e[2]<<8) + e[3]) ) event["starttime"].extend( [unBCD(e[4]), unBCD(e[5]), unBCD(e[6])] ) event["duration"] = unBCD(e[7]), unBCD(e[8]), unBCD(e[9]) event["running_status"] = (e[10] >> 5) & 0x07 event["free_CA_mode"] = e[10] & 0x10 descriptors_length = ((e[10]<<8) + e[11]) & 0x0fff event["descriptors"] = [] i=i+12 descriptors_end = i + descriptors_length while i < descriptors_end: descriptor,i = parseDescriptor(i,data) event['descriptors'].append(descriptor) msg["events"].append(event) return msg def shutdown(self): while self.dataReady("control"): msg = self.recv("control") self.send(msg,"signal") if isinstance(msg, (shutdownMicroprocess, producerFinished)): return True return False def main(self): # initialise buffers # ...for holding table sections (until we get complete table) # indexed by (table_id, current_next, transport_stream_id, original_network_id) sections_found = {} latest_versions = {} last_section_numbers = {} while not self.shutdown(): while self.dataReady("inbox"): data = self.recv("inbox") # extract basic info from this PSI packet - enough to work # out what table it is; what section, and the version e = [ord(data[i]) for i in range(0,3) ] table_id = e[0] if table_id not in self.acceptTables.keys(): continue syntax = e[1] & 0x80 if not syntax: continue section_length = ((e[1]<<8) + e[2]) & 0x0fff # now were reasonably certain we've got a correct packet # we'll convert the rest of the packet e = [ord(data[i]) for i in range(0,12) ] service_id = (e[3]<<8) + e[4] version = (e[5] & 0x3e) # no need to >> 1 current_next = e[5] & 0x01 section_number = e[6] last_section_number = e[7] transport_stream_id = (e[8]<<8) + e[9] original_network_id = (e[10]<<8) + e[11] index = (table_id, service_id, current_next, transport_stream_id, original_network_id) # if version number has changed, flush out all previously fetched tables crcpass = False if version != latest_versions.get(index,-1): if not dvbcrc(data[:3+section_length]): continue else: crcpass = True latest_versions[index] = version sections_found[index] = [False]*(last_section_number+1) # if index[0] == 0x50: # print index, section_number if not sections_found[index][section_number]: if crcpass or dvbcrc(data[:3+section_length]): sections_found[index][section_number] = True # because of interesting decisions regarding subtable segments # in the spec (EN 300 468, page 22) we have no way of knowing if # we have received the whole table, so we're just going to parse # each fragment we get and output it (if we've not seen it before) tablesection = self.parseTableSection(index, (data, section_length)) # print table['actual_other'], table['pf_schedule'] tablesection["version"] = latest_versions[index] tablesection["section"] = section_number tablesection["last_section"] = len(sections_found[index])-1 self.send( tablesection, "outbox") else: pass # ignore data with a bad crc self.pause() yield 1 class SimplifyEIT(component): """\ SimplifyEIT() -> new SimplifyEIT component. Send parsed EIT messages to the "inbox" inbox, and individual, simplified events will be sent out the "outbox" outbox. """ def shutdown(self): while self.dataReady("control"): msg = self.recv("control") self.send(msg,"signal") if isinstance(msg, (shutdownMicroprocess, producerFinished)): return True return False def main(self): while not self.shutdown(): while self.dataReady("inbox"): eventset = self.recv("inbox") for event in eventset['events']: if eventset['is_present_following']: # is now&next information if event['running_status'] in [1,2]: when = "NEXT" elif event['running_status'] in [3,4]: when = "NOW" else: print "pf",event['running_status'] else: # is schedule data if event['running_status'] in [0,1,2]: when = "SCHEDULED" elif event['running_status'] in [3,4]: when = "NOW" else: print "sched",event['running_status'] name = "" description = "" language = "" for dtype, descriptor in event['descriptors']: if dtype == 77: # descriptor['type'] == "short_event": name = descriptor['name'] description = descriptor['text'] language = descriptor['language_code'] msg = { 'service' : event['service_id'], 'event_id' : event['event_id'], 'when' : when, 'startdate' : event['starttime'][0:3], 'starttime' : event['starttime'][3:6], 'duration' : event['duration'], 'transportstream' : eventset['transport_stream_id'], 'name' : name, 'description' : description, 'language_code' : language, } self.send(msg,"outbox") self.pause() yield 1 __kamaelia_components__ = ( ParseEventInformationTable, SimplifyEIT, ) __kamaelia_prefabs__ = ( ParseEventInformationTable_Subset, ) if __name__ == "__main__": from Kamaelia.Chassis.Pipeline import Pipeline from Kamaelia.Util.Console import ConsoleEchoer from Kamaelia.Device.DVB.Core import DVB_Multiplex, DVB_Demuxer from Kamaelia.Device.DVB.Parse.ReassemblePSITables import ReassemblePSITables from Kamaelia.Device.DVB.Parse.PrettifyTables import PrettifyEventInformationTable from Kamaelia.Device.DVB.NowNext import NowNextProgrammeJunctionDetect from Kamaelia.Device.DVB.NowNext import NowNextServiceFilter import dvb3.frontend feparams = { "inversion" : dvb3.frontend.INVERSION_AUTO, "constellation" : dvb3.frontend.QAM_16, "code_rate_HP" : dvb3.frontend.FEC_3_4, "code_rate_LP" : dvb3.frontend.FEC_3_4, } demo="Now and next" # demo="All schedule info" if demo == "Now and next": Pipeline( DVB_Multiplex(505833330.0/1000000.0, [EIT_PID], feparams), DVB_Demuxer({ EIT_PID:["outbox"]}), ReassemblePSITables(), ParseEventInformationTable_Subset(True,False,False,False), # now and next for this mux only SimplifyEIT(), NowNextProgrammeJunctionDetect(), NowNextServiceFilter(4164), ConsoleEchoer(), ).run() elif demo == "All schedule info": Pipeline( DVB_Multiplex(505833330.0/1000000.0, [EIT_PID], feparams), DVB_Demuxer({ EIT_PID:["outbox"]}), ReassemblePSITables(), ParseEventInformationTable_Subset(True,True,True,True), # now and next and schedules for this and other multiplexes PrettifyEventInformationTable(), ConsoleEchoer(), ).run()
apache-2.0
michalliu/OpenWrt-Firefly-Libraries
staging_dir/host/lib/python3.4/multiprocessing/reduction.py
94
8108
# # Module which deals with pickling of objects. # # multiprocessing/reduction.py # # Copyright (c) 2006-2008, R Oudkerk # Licensed to PSF under a Contributor Agreement. # import copyreg import functools import io import os import pickle import socket import sys from . import context __all__ = ['send_handle', 'recv_handle', 'ForkingPickler', 'register', 'dump'] HAVE_SEND_HANDLE = (sys.platform == 'win32' or (hasattr(socket, 'CMSG_LEN') and hasattr(socket, 'SCM_RIGHTS') and hasattr(socket.socket, 'sendmsg'))) # # Pickler subclass # class ForkingPickler(pickle.Pickler): '''Pickler subclass used by multiprocessing.''' _extra_reducers = {} _copyreg_dispatch_table = copyreg.dispatch_table def __init__(self, *args): super().__init__(*args) self.dispatch_table = self._copyreg_dispatch_table.copy() self.dispatch_table.update(self._extra_reducers) @classmethod def register(cls, type, reduce): '''Register a reduce function for a type.''' cls._extra_reducers[type] = reduce @classmethod def dumps(cls, obj, protocol=None): buf = io.BytesIO() cls(buf, protocol).dump(obj) return buf.getbuffer() loads = pickle.loads register = ForkingPickler.register def dump(obj, file, protocol=None): '''Replacement for pickle.dump() using ForkingPickler.''' ForkingPickler(file, protocol).dump(obj) # # Platform specific definitions # if sys.platform == 'win32': # Windows __all__ += ['DupHandle', 'duplicate', 'steal_handle'] import _winapi def duplicate(handle, target_process=None, inheritable=False): '''Duplicate a handle. (target_process is a handle not a pid!)''' if target_process is None: target_process = _winapi.GetCurrentProcess() return _winapi.DuplicateHandle( _winapi.GetCurrentProcess(), handle, target_process, 0, inheritable, _winapi.DUPLICATE_SAME_ACCESS) def steal_handle(source_pid, handle): '''Steal a handle from process identified by source_pid.''' source_process_handle = _winapi.OpenProcess( _winapi.PROCESS_DUP_HANDLE, False, source_pid) try: return _winapi.DuplicateHandle( source_process_handle, handle, _winapi.GetCurrentProcess(), 0, False, _winapi.DUPLICATE_SAME_ACCESS | _winapi.DUPLICATE_CLOSE_SOURCE) finally: _winapi.CloseHandle(source_process_handle) def send_handle(conn, handle, destination_pid): '''Send a handle over a local connection.''' dh = DupHandle(handle, _winapi.DUPLICATE_SAME_ACCESS, destination_pid) conn.send(dh) def recv_handle(conn): '''Receive a handle over a local connection.''' return conn.recv().detach() class DupHandle(object): '''Picklable wrapper for a handle.''' def __init__(self, handle, access, pid=None): if pid is None: # We just duplicate the handle in the current process and # let the receiving process steal the handle. pid = os.getpid() proc = _winapi.OpenProcess(_winapi.PROCESS_DUP_HANDLE, False, pid) try: self._handle = _winapi.DuplicateHandle( _winapi.GetCurrentProcess(), handle, proc, access, False, 0) finally: _winapi.CloseHandle(proc) self._access = access self._pid = pid def detach(self): '''Get the handle. This should only be called once.''' # retrieve handle from process which currently owns it if self._pid == os.getpid(): # The handle has already been duplicated for this process. return self._handle # We must steal the handle from the process whose pid is self._pid. proc = _winapi.OpenProcess(_winapi.PROCESS_DUP_HANDLE, False, self._pid) try: return _winapi.DuplicateHandle( proc, self._handle, _winapi.GetCurrentProcess(), self._access, False, _winapi.DUPLICATE_CLOSE_SOURCE) finally: _winapi.CloseHandle(proc) else: # Unix __all__ += ['DupFd', 'sendfds', 'recvfds'] import array # On MacOSX we should acknowledge receipt of fds -- see Issue14669 ACKNOWLEDGE = sys.platform == 'darwin' def sendfds(sock, fds): '''Send an array of fds over an AF_UNIX socket.''' fds = array.array('i', fds) msg = bytes([len(fds) % 256]) sock.sendmsg([msg], [(socket.SOL_SOCKET, socket.SCM_RIGHTS, fds)]) if ACKNOWLEDGE and sock.recv(1) != b'A': raise RuntimeError('did not receive acknowledgement of fd') def recvfds(sock, size): '''Receive an array of fds over an AF_UNIX socket.''' a = array.array('i') bytes_size = a.itemsize * size msg, ancdata, flags, addr = sock.recvmsg(1, socket.CMSG_LEN(bytes_size)) if not msg and not ancdata: raise EOFError try: if ACKNOWLEDGE: sock.send(b'A') if len(ancdata) != 1: raise RuntimeError('received %d items of ancdata' % len(ancdata)) cmsg_level, cmsg_type, cmsg_data = ancdata[0] if (cmsg_level == socket.SOL_SOCKET and cmsg_type == socket.SCM_RIGHTS): if len(cmsg_data) % a.itemsize != 0: raise ValueError a.frombytes(cmsg_data) assert len(a) % 256 == msg[0] return list(a) except (ValueError, IndexError): pass raise RuntimeError('Invalid data received') def send_handle(conn, handle, destination_pid): '''Send a handle over a local connection.''' with socket.fromfd(conn.fileno(), socket.AF_UNIX, socket.SOCK_STREAM) as s: sendfds(s, [handle]) def recv_handle(conn): '''Receive a handle over a local connection.''' with socket.fromfd(conn.fileno(), socket.AF_UNIX, socket.SOCK_STREAM) as s: return recvfds(s, 1)[0] def DupFd(fd): '''Return a wrapper for an fd.''' popen_obj = context.get_spawning_popen() if popen_obj is not None: return popen_obj.DupFd(popen_obj.duplicate_for_child(fd)) elif HAVE_SEND_HANDLE: from . import resource_sharer return resource_sharer.DupFd(fd) else: raise ValueError('SCM_RIGHTS appears not to be available') # # Try making some callable types picklable # def _reduce_method(m): if m.__self__ is None: return getattr, (m.__class__, m.__func__.__name__) else: return getattr, (m.__self__, m.__func__.__name__) class _C: def f(self): pass register(type(_C().f), _reduce_method) def _reduce_method_descriptor(m): return getattr, (m.__objclass__, m.__name__) register(type(list.append), _reduce_method_descriptor) register(type(int.__add__), _reduce_method_descriptor) def _reduce_partial(p): return _rebuild_partial, (p.func, p.args, p.keywords or {}) def _rebuild_partial(func, args, keywords): return functools.partial(func, *args, **keywords) register(functools.partial, _reduce_partial) # # Make sockets picklable # if sys.platform == 'win32': def _reduce_socket(s): from .resource_sharer import DupSocket return _rebuild_socket, (DupSocket(s),) def _rebuild_socket(ds): return ds.detach() register(socket.socket, _reduce_socket) else: def _reduce_socket(s): df = DupFd(s.fileno()) return _rebuild_socket, (df, s.family, s.type, s.proto) def _rebuild_socket(df, family, type, proto): fd = df.detach() return socket.socket(family, type, proto, fileno=fd) register(socket.socket, _reduce_socket)
gpl-2.0
googleapis/python-pubsublite
google/cloud/pubsublite_v1/services/cursor_service/transports/__init__.py
1
1185
# -*- coding: utf-8 -*- # Copyright 2020 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # from collections import OrderedDict from typing import Dict, Type from .base import CursorServiceTransport from .grpc import CursorServiceGrpcTransport from .grpc_asyncio import CursorServiceGrpcAsyncIOTransport # Compile a registry of transports. _transport_registry = OrderedDict() # type: Dict[str, Type[CursorServiceTransport]] _transport_registry["grpc"] = CursorServiceGrpcTransport _transport_registry["grpc_asyncio"] = CursorServiceGrpcAsyncIOTransport __all__ = ( "CursorServiceTransport", "CursorServiceGrpcTransport", "CursorServiceGrpcAsyncIOTransport", )
apache-2.0
great-expectations/great_expectations
tests/dataset/test_sparkdfdataset.py
1
14191
import importlib.util import json from unittest import mock import pandas as pd import pytest from great_expectations.dataset.sparkdf_dataset import SparkDFDataset from great_expectations.util import is_library_loadable def test_sparkdfdataset_persist(spark_session): df = pd.DataFrame({"a": [1, 2, 3]}) sdf = spark_session.createDataFrame(df) sdf.persist = mock.MagicMock() _ = SparkDFDataset(sdf, persist=True) sdf.persist.assert_called_once() sdf = spark_session.createDataFrame(df) sdf.persist = mock.MagicMock() _ = SparkDFDataset(sdf, persist=False) sdf.persist.assert_not_called() sdf = spark_session.createDataFrame(df) sdf.persist = mock.MagicMock() _ = SparkDFDataset(sdf) sdf.persist.assert_called_once() @pytest.mark.skipif( not is_library_loadable(library_name="pyspark"), reason="pyspark must be installed", ) @pytest.fixture def test_dataframe(spark_session): from pyspark.sql.types import IntegerType, StringType, StructField, StructType schema = StructType( [ StructField("name", StringType(), True), StructField("age", IntegerType(), True), StructField( "address", StructType( [ StructField("street", StringType(), True), StructField("city", StringType(), True), StructField("house_number", IntegerType(), True), ] ), False, ), StructField("name_duplicate", StringType(), True), StructField("non.nested", StringType(), True), StructField("name_with_duplicates", StringType(), True), StructField("age_with_duplicates", IntegerType(), True), StructField( "address_with_duplicates", StructType( [ StructField("street", StringType(), True), StructField("city", StringType(), True), StructField("house_number", IntegerType(), True), ] ), False, ), ] ) rows = [ ( "Alice", 1, ("Street 1", "Alabama", 10), "Alice", "a", "Alice", 1, ("Street 1", "Alabama", 12), ), ( "Bob", 2, ("Street 2", "Brooklyn", 11), "Bob", "b", "Bob", 2, ("Street 1", "Brooklyn", 12), ), ( "Charlie", 3, ("Street 3", "Alabama", 12), "Charlie", "c", "Charlie", 3, ("Street 1", "Alabama", 12), ), ( "Dan", 4, ("Street 4", "Boston", 12), "Dan", "d", "Charlie", 3, ("Street 1", "Boston", 12), ), ] rdd = spark_session.sparkContext.parallelize(rows) df = spark_session.createDataFrame(rdd, schema) return SparkDFDataset(df, persist=True) @pytest.mark.skipif( not is_library_loadable(library_name="pyspark"), reason="pyspark must be installed", ) def test_expect_column_values_to_be_of_type(spark_session, test_dataframe): """ data asset expectation """ from pyspark.sql.utils import AnalysisException assert test_dataframe.expect_column_values_to_be_of_type( "address.street", "StringType" ).success assert test_dataframe.expect_column_values_to_be_of_type( "`non.nested`", "StringType" ).success assert test_dataframe.expect_column_values_to_be_of_type( "name", "StringType" ).success with pytest.raises(AnalysisException): test_dataframe.expect_column_values_to_be_of_type("non.nested", "StringType") @pytest.mark.skipif( not is_library_loadable(library_name="pyspark"), reason="pyspark must be installed", ) def test_expect_column_values_to_be_of_type(spark_session, test_dataframe): """ data asset expectation """ from pyspark.sql.utils import AnalysisException assert test_dataframe.expect_column_values_to_be_of_type( "address.street", "StringType" ).success assert test_dataframe.expect_column_values_to_be_of_type( "`non.nested`", "StringType" ).success assert test_dataframe.expect_column_values_to_be_of_type( "name", "StringType" ).success with pytest.raises(AnalysisException): test_dataframe.expect_column_values_to_be_of_type("non.nested", "StringType") @pytest.mark.skipif( not is_library_loadable(library_name="pyspark"), reason="pyspark must be installed", ) def test_expect_column_values_to_be_in_type_list(spark_session, test_dataframe): """ data asset expectation """ from pyspark.sql.utils import AnalysisException assert test_dataframe.expect_column_values_to_be_in_type_list( "address.street", ["StringType", "IntegerType"] ).success assert test_dataframe.expect_column_values_to_be_in_type_list( "`non.nested`", ["StringType", "IntegerType"] ).success assert test_dataframe.expect_column_values_to_be_in_type_list( "name", ["StringType", "IntegerType"] ).success with pytest.raises(AnalysisException): test_dataframe.expect_column_values_to_be_of_type("non.nested", "StringType") @pytest.mark.skipif( not is_library_loadable(library_name="pyspark"), reason="pyspark must be installed", ) def test_expect_column_pair_values_to_be_equal(spark_session, test_dataframe): """ column_pair_map_expectation """ from pyspark.sql.utils import AnalysisException assert test_dataframe.expect_column_pair_values_to_be_equal( "name", "name_duplicate" ).success assert not test_dataframe.expect_column_pair_values_to_be_equal( "name", "address.street" ).success assert not test_dataframe.expect_column_pair_values_to_be_equal( "name", "`non.nested`" ).success # Expectation should fail when no `` surround a non-nested column with dot notation with pytest.raises(AnalysisException): test_dataframe.expect_column_pair_values_to_be_equal("name", "non.nested") @pytest.mark.skipif( not is_library_loadable(library_name="pyspark"), reason="pyspark must be installed", ) def test_expect_column_pair_values_A_to_be_greater_than_B( spark_session, test_dataframe ): """ column_pair_map_expectation """ assert test_dataframe.expect_column_pair_values_A_to_be_greater_than_B( "address.house_number", "age" ).success assert test_dataframe.expect_column_pair_values_A_to_be_greater_than_B( "age", "age", or_equal=True ).success @pytest.mark.skipif( not is_library_loadable(library_name="pyspark"), reason="pyspark must be installed", ) def test_expect_select_column_values_to_be_unique_within_record( spark_session, test_dataframe ): """ multicolumn_map_expectation """ from pyspark.sql.utils import AnalysisException assert test_dataframe.expect_select_column_values_to_be_unique_within_record( ["name", "age"] ).success assert test_dataframe.expect_select_column_values_to_be_unique_within_record( ["address.street", "name"] ).success assert test_dataframe.expect_select_column_values_to_be_unique_within_record( ["address.street", "`non.nested`"] ).success # Expectation should fail when no `` surround a non-nested column with dot notation with pytest.raises(AnalysisException): test_dataframe.expect_select_column_values_to_be_unique_within_record( ["address.street", "non.nested"] ) @pytest.mark.skipif( not is_library_loadable(library_name="pyspark"), reason="pyspark must be installed", ) def test_expect_compound_columns_to_be_unique(spark_session, test_dataframe): """ multicolumn_map_expectation """ from pyspark.sql.utils import AnalysisException # Positive tests assert test_dataframe.expect_compound_columns_to_be_unique(["name", "age"]).success assert test_dataframe.expect_compound_columns_to_be_unique( ["address.street", "name"] ).success assert test_dataframe.expect_compound_columns_to_be_unique( ["address.street", "address.city"] ).success assert test_dataframe.expect_compound_columns_to_be_unique( ["name_with_duplicates", "age_with_duplicates", "name"] ).success assert test_dataframe.expect_compound_columns_to_be_unique( ["address.street", "`non.nested`"] ).success assert test_dataframe.expect_compound_columns_to_be_unique( ["name", "name_with_duplicates"] ).success assert test_dataframe.expect_compound_columns_to_be_unique( [ "name", "name_with_duplicates", "address_with_duplicates.street", "address_with_duplicates.city", "address_with_duplicates.house_number", ] ).success # Negative tests assert not test_dataframe.expect_compound_columns_to_be_unique( ["address_with_duplicates.city", "address_with_duplicates.house_number"] ).success assert not test_dataframe.expect_compound_columns_to_be_unique( ["name_with_duplicates"] ).success assert not test_dataframe.expect_compound_columns_to_be_unique( ["name_with_duplicates", "address_with_duplicates.street"] ).success assert not test_dataframe.expect_compound_columns_to_be_unique( [ "name_with_duplicates", "address_with_duplicates.street", "address_with_duplicates.house_number", ] ).success # Expectation should fail when no `` surround a non-nested column with dot notation with pytest.raises(AnalysisException): test_dataframe.expect_compound_columns_to_be_unique( ["address.street", "non.nested"] ) @pytest.mark.skipif( not is_library_loadable(library_name="pyspark"), reason="pyspark must be installed", ) def test_expect_column_values_to_be_unique(spark_session, test_dataframe): """ column_map_expectation """ from pyspark.sql.utils import AnalysisException assert test_dataframe.expect_column_values_to_be_unique("name").success assert not test_dataframe.expect_column_values_to_be_unique("address.city").success assert test_dataframe.expect_column_values_to_be_unique("`non.nested`").success # Expectation should fail when no `` surround a non-nested column with dot notation with pytest.raises(AnalysisException): test_dataframe.expect_column_values_to_be_unique("non.nested") @pytest.mark.skipif( not is_library_loadable(library_name="pyspark"), reason="pyspark must be installed", ) def test_expect_column_value_lengths_to_be_between(spark_session, test_dataframe): """ column_map_expectation """ assert test_dataframe.expect_column_value_lengths_to_be_between( "name", 3, 7 ).success assert test_dataframe.expect_column_value_lengths_to_be_between( "address.street", 1, 10 ).success @pytest.mark.skipif( not is_library_loadable(library_name="pyspark"), reason="pyspark must be installed", ) def test_expect_column_value_lengths_to_equal(spark_session, test_dataframe): """ column_map_expectation """ assert test_dataframe.expect_column_value_lengths_to_equal("age", 1).success assert test_dataframe.expect_column_value_lengths_to_equal( "address.street", 8 ).success @pytest.mark.skipif( not is_library_loadable(library_name="pyspark"), reason="pyspark must be installed", ) def test_expect_column_values_to_be_json_parseable(spark_session): d1 = json.dumps({"i": [1, 2, 3], "j": 35, "k": {"x": "five", "y": 5, "z": "101"}}) d2 = json.dumps({"i": 1, "j": 2, "k": [3, 4, 5]}) d3 = json.dumps({"i": "a", "j": "b", "k": "c"}) d4 = json.dumps( {"i": [4, 5], "j": [6, 7], "k": [8, 9], "l": {4: "x", 5: "y", 6: "z"}} ) inner = { "json_col": [d1, d2, d3, d4], "not_json": [4, 5, 6, 7], "py_dict": [ {"a": 1, "out": 1}, {"b": 2, "out": 4}, {"c": 3, "out": 9}, {"d": 4, "out": 16}, ], "most": [d1, d2, d3, "d4"], } data_reshaped = list(zip(*[v for _, v in inner.items()])) df = spark_session.createDataFrame( data_reshaped, ["json_col", "not_json", "py_dict", "most"] ) D = SparkDFDataset(df) D.set_default_expectation_argument("result_format", "COMPLETE") T = [ { "in": {"column": "json_col"}, "out": { "success": True, "unexpected_list": [], }, }, { "in": {"column": "not_json"}, "out": { "success": False, "unexpected_list": [4, 5, 6, 7], }, }, { "in": {"column": "py_dict"}, "out": { "success": False, "unexpected_list": [ {"a": 1, "out": 1}, {"b": 2, "out": 4}, {"c": 3, "out": 9}, {"d": 4, "out": 16}, ], }, }, { "in": {"column": "most"}, "out": { "success": False, "unexpected_list": ["d4"], }, }, { "in": {"column": "most", "mostly": 0.75}, "out": { "success": True, "unexpected_index_list": [3], "unexpected_list": ["d4"], }, }, ] for t in T: out = D.expect_column_values_to_be_json_parseable(**t["in"]) assert t["out"]["success"] == out.success assert t["out"]["unexpected_list"] == out.result["unexpected_list"]
apache-2.0
DickJC123/mxnet
example/distributed_training/cifar10_kvstore_hvd.py
9
7670
#!/usr/bin/env python # Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. """cifar10_dist_hvd.py contains code that runs distributed training of a ResNet18 network using Horovod framework""" import argparse import logging import time import random import types import warnings import numpy as np import mxnet as mx from mxnet import autograd, gluon, kv, nd from mxnet.gluon.model_zoo import vision logging.basicConfig(level=logging.INFO) # Training settings parser = argparse.ArgumentParser(description='MXNet CIFAR Example') parser.add_argument('--batch-size', type=int, default=64, help='training batch size per worker (default: 64)') parser.add_argument('--epochs', type=int, default=5, help='number of training epochs (default: 5)') parser.add_argument('--lr', type=float, default=0.01, help='learning rate (default: 0.01)') parser.add_argument('--no-cuda', action='store_true', default=False, help='disable training on GPU (default: False)') args = parser.parse_args() if not args.no_cuda: # Disable CUDA if there are no GPUs. if mx.context.num_gpus() == 0: args.no_cuda = True # Transform input data def transform(data, label): return nd.transpose(data.astype(np.float32), (2, 0, 1))/255,\ label.astype(np.float32) # Train a batch using multiple GPUs def train(batch_list, context, network, gluon_trainer, metric): """ Training with multiple GPUs Parameters ---------- batch_list: List list of dataset context: List a list of all GPUs to be used for training network: ResNet gluon_trainer: rain module of gluon """ # Run one forward and backward pass def forward_backward(network, data, labels, metric): with autograd.record(): # Compute outputs outputs = [network(X) for X in data] # Compute the loss losses = [loss(yhat, y) for yhat, y in zip(outputs, labels)] # Run the backward pass (calculate gradients) for l in losses: l.backward() metric.update(preds=outputs, labels=labels) # Use cross entropy loss loss = gluon.loss.SoftmaxCrossEntropyLoss() # Split and load data data = batch_list[0] data = gluon.utils.split_and_load(data, context) # Split and load label label = batch_list[1] label = gluon.utils.split_and_load(label, context) # Run the forward and backward pass forward_backward(network, data, label, metric) # Update the parameters this_batch_size = batch_list[0].shape[0] gluon_trainer.step(this_batch_size) # Evaluate accuracy of the given network using the given data def evaluate(data_iterator, network, context): """ Measure the accuracy of ResNet Parameters ---------- data_iterator: Iter examples of dataset network: ResNet Returns ---------- tuple of array element """ acc = mx.gluon.metric.Accuracy() # Iterate through data and label for i, (data, label) in enumerate(data_iterator): # Get the data and label into the GPU data = data.as_in_context(context) label = label.as_in_context(context) # Get network's output which is a probability distribution # Apply argmax on the probability distribution to get network's # classification. output = network(data) predictions = nd.argmax(output, axis=1) # Give network's prediction and the correct label to update the metric acc.update(preds=predictions, labels=label) # Return the accuracy return acc.get()[1] class SplitSampler(gluon.data.sampler.Sampler): """ Split the dataset into `num_parts` parts and sample from the part with index `part_index` Parameters ---------- length: int Number of examples in the dataset num_parts: int Partition the data into multiple parts part_index: int The index of the part to read from """ def __init__(self, length, num_parts=1, part_index=0): # Compute the length of each partition self.part_len = length // num_parts # Compute the start index for this partition self.start = self.part_len * part_index # Compute the end index for this partition self.end = self.start + self.part_len def __iter__(self): # Extract examples between `start` and `end`, shuffle and return them. indices = list(range(self.start, self.end)) random.shuffle(indices) return iter(indices) def __len__(self): return self.part_len # Use Horovod as the KVStore store = kv.create('horovod') # Get the number of workers num_workers = store.num_workers # Create the context based on the local rank of the current process ctx = mx.cpu(store.local_rank) if args.no_cuda else mx.gpu(store.local_rank) # Load the training data train_data = gluon.data.DataLoader(gluon.data.vision.CIFAR10(train=True, transform=transform), args.batch_size, sampler=SplitSampler(50000, num_workers, store.rank)) # Load the test data test_data = gluon.data.DataLoader(gluon.data.vision.CIFAR10(train=False, transform=transform), args.batch_size, shuffle=False) # Load ResNet18 model from GluonCV model zoo net = vision.resnet18_v1() # Initialize the parameters with Xavier initializer net.initialize(mx.init.Xavier(), ctx=ctx) # Use Adam optimizer. Ask trainer to use the distributor kv store. trainer = gluon.Trainer(net.collect_params(), optimizer='adam', optimizer_params={'learning_rate': args.lr}, kvstore=store) train_metric = mx.gluon.metric.Accuracy() # Run as many epochs as required for epoch in range(args.epochs): tic = time.time() train_metric.reset() # Iterate through batches and run training using multiple GPUs batch_num = 1 btic = time.time() for batch in train_data: # Train the batch using multiple GPUs train(batch, [ctx], net, trainer, train_metric) if store.rank == 0 and batch_num % 100 == 0: speed = args.batch_size / (time.time() - btic) logging.info('Epoch[{}] Rank [{}] Batch[{}]\tSpeed: {:.2f} samples/sec' .format(epoch, store.rank, batch_num, speed)) logging.info('{} = {:.2f}'.format(*train_metric.get())) btic = time.time() batch_num += 1 elapsed = time.time() - tic # Print test accuracy after every epoch test_accuracy = evaluate(test_data, net, ctx) if store.rank == 0: logging.info("Epoch %d: Test_acc %f" % (epoch, test_accuracy))
apache-2.0
hnakamur/ansible
lib/ansible/plugins/callback/context_demo.py
144
1447
# (C) 2012, Michael DeHaan, <michael.dehaan@gmail.com> # This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see <http://www.gnu.org/licenses/>. from ansible.plugins.callback import CallbackBase class CallbackModule(CallbackBase): """ This is a very trivial example of how any callback function can get at play and task objects. play will be 'None' for runner invocations, and task will be None for 'setup' invocations. """ CALLBACK_VERSION = 2.0 CALLBACK_TYPE = 'aggregate' CALLBACK_TYPE = 'context_demo' def v2_on_any(self, *args, **kwargs): i = 0 self._display.display(" --- ARGS ") for a in args: self._display.display(' %s: %s' % (i, a)) i += 1 self._display.display(" --- KWARGS ") for k in kwargs: self._display.display(' %s: %s' % (k, kwargs[k]))
gpl-3.0
Marcelpv96/SITWprac2017
sportsBetting/migrations/0018_auto_20170515_1009.py
1
1050
# -*- coding: utf-8 -*- # Generated by Django 1.10.6 on 2017-05-15 10:09 from __future__ import unicode_literals from django.conf import settings from django.db import migrations, models import django.db.models.deletion class Migration(migrations.Migration): dependencies = [ migrations.swappable_dependency(settings.AUTH_USER_MODEL), ('sportsBetting', '0017_auto_20170510_1614'), ] operations = [ migrations.RemoveField( model_name='event', name='api_id', ), migrations.AddField( model_name='event', name='id', field=models.AutoField(auto_created=True, default=1, primary_key=True, serialize=False, verbose_name='ID'), preserve_default=False, ), migrations.AddField( model_name='team', name='created_by', field=models.ForeignKey(default=1, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL), preserve_default=False, ), ]
gpl-3.0
BeDjango/intef-openedx
openedx/core/djangoapps/credit/api/provider.py
24
15608
""" API for initiating and tracking requests for credit from a provider. """ import datetime import logging import uuid import pytz from django.db import transaction from lms.djangoapps.django_comment_client.utils import JsonResponse from openedx.core.djangoapps.credit.exceptions import ( UserIsNotEligible, CreditProviderNotConfigured, RequestAlreadyCompleted, CreditRequestNotFound, InvalidCreditStatus, ) from openedx.core.djangoapps.credit.models import ( CreditProvider, CreditRequirementStatus, CreditRequest, CreditEligibility, ) from openedx.core.djangoapps.credit.signature import signature, get_shared_secret_key from student.models import User from util.date_utils import to_timestamp # TODO: Cleanup this mess! ECOM-2908 log = logging.getLogger(__name__) def get_credit_providers(providers_list=None): """Retrieve all available credit providers or filter on given providers_list. Arguments: providers_list (list of strings or None): contains list of ids of credit providers or None. Returns: list of credit providers represented as dictionaries Response Values: >>> get_credit_providers(['hogwarts']) [ { "id": "hogwarts", "name": "Hogwarts School of Witchcraft and Wizardry", "url": "https://credit.example.com/", "status_url": "https://credit.example.com/status/", "description: "A new model for the Witchcraft and Wizardry School System.", "enable_integration": false, "fulfillment_instructions": " <p>In order to fulfill credit, Hogwarts School of Witchcraft and Wizardry requires learners to:</p> <ul> <li>Sample instruction abc</li> <li>Sample instruction xyz</li> </ul>", }, ... ] """ return CreditProvider.get_credit_providers(providers_list=providers_list) def get_credit_provider_info(request, provider_id): # pylint: disable=unused-argument """Retrieve the 'CreditProvider' model data against provided credit provider. Args: provider_id (str): The identifier for the credit provider Returns: 'CreditProvider' data dictionary Example Usage: >>> get_credit_provider_info("hogwarts") { "provider_id": "hogwarts", "display_name": "Hogwarts School of Witchcraft and Wizardry", "provider_url": "https://credit.example.com/", "provider_status_url": "https://credit.example.com/status/", "provider_description: "A new model for the Witchcraft and Wizardry School System.", "enable_integration": False, "fulfillment_instructions": " <p>In order to fulfill credit, Hogwarts School of Witchcraft and Wizardry requires learners to:</p> <ul> <li>Sample instruction abc</li> <li>Sample instruction xyz</li> </ul>", "thumbnail_url": "https://credit.example.com/logo.png" } """ credit_provider = CreditProvider.get_credit_provider(provider_id=provider_id) credit_provider_data = {} if credit_provider: credit_provider_data = { "provider_id": credit_provider.provider_id, "display_name": credit_provider.display_name, "provider_url": credit_provider.provider_url, "provider_status_url": credit_provider.provider_status_url, "provider_description": credit_provider.provider_description, "enable_integration": credit_provider.enable_integration, "fulfillment_instructions": credit_provider.fulfillment_instructions, "thumbnail_url": credit_provider.thumbnail_url } return JsonResponse(credit_provider_data) @transaction.atomic def create_credit_request(course_key, provider_id, username): """ Initiate a request for credit from a credit provider. This will return the parameters that the user's browser will need to POST to the credit provider. It does NOT calculate the signature. Only users who are eligible for credit (have satisfied all credit requirements) are allowed to make requests. A provider can be configured either with *integration enabled* or not. If automatic integration is disabled, this method will simply return a URL to the credit provider and method set to "GET", so the student can visit the URL and request credit directly. No database record will be created to track these requests. If automatic integration *is* enabled, then this will also return the parameters that the user's browser will need to POST to the credit provider. These parameters will be digitally signed using a secret key shared with the credit provider. A database record will be created to track the request with a 32-character UUID. The returned dictionary can be used by the user's browser to send a POST request to the credit provider. If a pending request already exists, this function should return a request description with the same UUID. (Other parameters, such as the user's full name may be different than the original request). If a completed request (either accepted or rejected) already exists, this function will raise an exception. Users are not allowed to make additional requests once a request has been completed. Arguments: course_key (CourseKey): The identifier for the course. provider_id (str): The identifier of the credit provider. username (str): The user initiating the request. Returns: dict Raises: UserIsNotEligible: The user has not satisfied eligibility requirements for credit. CreditProviderNotConfigured: The credit provider has not been configured for this course. RequestAlreadyCompleted: The user has already submitted a request and received a response from the credit provider. Example Usage: >>> create_credit_request(course.id, "hogwarts", "ron") { "url": "https://credit.example.com/request", "method": "POST", "parameters": { "request_uuid": "557168d0f7664fe59097106c67c3f847", "timestamp": 1434631630, "course_org": "HogwartsX", "course_num": "Potions101", "course_run": "1T2015", "final_grade": "0.95", "user_username": "ron", "user_email": "ron@example.com", "user_full_name": "Ron Weasley", "user_mailing_address": "", "user_country": "US", "signature": "cRCNjkE4IzY+erIjRwOQCpRILgOvXx4q2qvx141BCqI=" } } """ try: user_eligibility = CreditEligibility.objects.select_related('course').get( username=username, course__course_key=course_key ) credit_course = user_eligibility.course credit_provider = CreditProvider.objects.get(provider_id=provider_id) except CreditEligibility.DoesNotExist: log.warning( u'User "%s" tried to initiate a request for credit in course "%s", ' u'but the user is not eligible for credit', username, course_key ) raise UserIsNotEligible except CreditProvider.DoesNotExist: log.error(u'Credit provider with ID "%s" has not been configured.', provider_id) raise CreditProviderNotConfigured # Check if we've enabled automatic integration with the credit # provider. If not, we'll show the user a link to a URL # where the user can request credit directly from the provider. # Note that we do NOT track these requests in our database, # since the state would always be "pending" (we never hear back). if not credit_provider.enable_integration: return { "url": credit_provider.provider_url, "method": "GET", "parameters": {} } else: # If automatic credit integration is enabled, then try # to retrieve the shared signature *before* creating the request. # That way, if there's a misconfiguration, we won't have requests # in our system that we know weren't sent to the provider. shared_secret_key = get_shared_secret_key(credit_provider.provider_id) if shared_secret_key is None: msg = u'Credit provider with ID "{provider_id}" does not have a secret key configured.'.format( provider_id=credit_provider.provider_id ) log.error(msg) raise CreditProviderNotConfigured(msg) # Initiate a new request if one has not already been created credit_request, created = CreditRequest.objects.get_or_create( course=credit_course, provider=credit_provider, username=username, ) # Check whether we've already gotten a response for a request, # If so, we're not allowed to issue any further requests. # Skip checking the status if we know that we just created this record. if not created and credit_request.status != "pending": log.warning( ( u'Cannot initiate credit request because the request with UUID "%s" ' u'exists with status "%s"' ), credit_request.uuid, credit_request.status ) raise RequestAlreadyCompleted if created: credit_request.uuid = uuid.uuid4().hex # Retrieve user account and profile info user = User.objects.select_related('profile').get(username=username) # Retrieve the final grade from the eligibility table try: final_grade = CreditRequirementStatus.objects.get( username=username, requirement__namespace="grade", requirement__name="grade", requirement__course__course_key=course_key, status="satisfied" ).reason["final_grade"] # NOTE (CCB): Limiting the grade to seven characters is a hack for ASU. if len(unicode(final_grade)) > 7: final_grade = u'{:.5f}'.format(final_grade) else: final_grade = unicode(final_grade) except (CreditRequirementStatus.DoesNotExist, TypeError, KeyError): msg = 'Could not retrieve final grade from the credit eligibility table for ' \ 'user [{user_id}] in course [{course_key}].'.format(user_id=user.id, course_key=course_key) log.exception(msg) raise UserIsNotEligible(msg) parameters = { "request_uuid": credit_request.uuid, "timestamp": to_timestamp(datetime.datetime.now(pytz.UTC)), "course_org": course_key.org, "course_num": course_key.course, "course_run": course_key.run, "final_grade": final_grade, "user_username": user.username, "user_email": user.email, "user_full_name": user.profile.name, "user_mailing_address": "", "user_country": ( user.profile.country.code if user.profile.country.code is not None else "" ), } credit_request.parameters = parameters credit_request.save() if created: log.info(u'Created new request for credit with UUID "%s"', credit_request.uuid) else: log.info( u'Updated request for credit with UUID "%s" so the user can re-issue the request', credit_request.uuid ) # Sign the parameters using a secret key we share with the credit provider. parameters["signature"] = signature(parameters, shared_secret_key) return { "url": credit_provider.provider_url, "method": "POST", "parameters": parameters } def update_credit_request_status(request_uuid, provider_id, status): """ Update the status of a credit request. Approve or reject a request for a student to receive credit in a course from a particular credit provider. This function does NOT check that the status update is authorized. The caller needs to handle authentication and authorization (checking the signature of the message received from the credit provider) The function is idempotent; if the request has already been updated to the status, the function does nothing. Arguments: request_uuid (str): The unique identifier for the credit request. provider_id (str): Identifier for the credit provider. status (str): Either "approved" or "rejected" Returns: None Raises: CreditRequestNotFound: No request exists that is associated with the given provider. InvalidCreditStatus: The status is not either "approved" or "rejected". """ if status not in [CreditRequest.REQUEST_STATUS_APPROVED, CreditRequest.REQUEST_STATUS_REJECTED]: raise InvalidCreditStatus try: request = CreditRequest.objects.get(uuid=request_uuid, provider__provider_id=provider_id) old_status = request.status request.status = status request.save() log.info( u'Updated request with UUID "%s" from status "%s" to "%s" for provider with ID "%s".', request_uuid, old_status, status, provider_id ) except CreditRequest.DoesNotExist: msg = ( u'Credit provider with ID "{provider_id}" attempted to ' u'update request with UUID "{request_uuid}", but no request ' u'with this UUID is associated with the provider.' ).format(provider_id=provider_id, request_uuid=request_uuid) log.warning(msg) raise CreditRequestNotFound(msg) def get_credit_requests_for_user(username): """ Retrieve the status of a credit request. Returns either "pending", "approved", or "rejected" Arguments: username (unicode): The username of the user who initiated the requests. Returns: list Example Usage: >>> get_credit_request_status_for_user("bob") [ { "uuid": "557168d0f7664fe59097106c67c3f847", "timestamp": 1434631630, "course_key": "course-v1:HogwartsX+Potions101+1T2015", "provider": { "id": "HogwartsX", "display_name": "Hogwarts School of Witchcraft and Wizardry", }, "status": "pending" # or "approved" or "rejected" } ] """ return CreditRequest.credit_requests_for_user(username) def get_credit_request_status(username, course_key): """Get the credit request status. This function returns the status of credit request of user for given course. It returns the latest request status for the any credit provider. The valid status are 'pending', 'approved' or 'rejected'. Args: username(str): The username of user course_key(CourseKey): The course locator key Returns: A dictionary of credit request user has made if any """ credit_request = CreditRequest.get_user_request_status(username, course_key) return { "uuid": credit_request.uuid, "timestamp": credit_request.modified, "course_key": credit_request.course.course_key, "provider": { "id": credit_request.provider.provider_id, "display_name": credit_request.provider.display_name }, "status": credit_request.status } if credit_request else {}
agpl-3.0
nouiz/pylearn2
pylearn2/termination_criteria/tests/test_init.py
32
3511
""" Tests for pylearn2.termination_criteria.__init__ functions and classes. """ from pylearn2.termination_criteria import EpochCounter from pylearn2.datasets.dense_design_matrix import DenseDesignMatrix from pylearn2.models.mlp import MLP, Softmax from pylearn2.monitor import push_monitor from pylearn2.train import Train from pylearn2.training_algorithms.sgd import SGD import numpy as np def test_epoch_counter(): """ Test epoch counter with max_epochs={True,False} """ def produce_train_obj(new_epochs, max_epochs, model=None): if model is None: model = MLP(layers=[Softmax(layer_name='y', n_classes=2, irange=0.)], nvis=3) else: model = push_monitor(model, 'old_monitor', transfer_experience=True) dataset = DenseDesignMatrix(X=np.random.normal(size=(6, 3)), y=np.random.normal(size=(6, 2))) epoch_counter = EpochCounter(max_epochs=max_epochs, new_epochs=new_epochs) algorithm = SGD(batch_size=2, learning_rate=0.1, termination_criterion=epoch_counter) return Train(dataset=dataset, model=model, algorithm=algorithm) def test_epochs(epochs_seen, n): assert epochs_seen == n, \ "%d epochs seen and should be %d" % (epochs_seen, n) # Tests for 5 new epochs train_obj = produce_train_obj(new_epochs=True, max_epochs=5) train_obj.main_loop() test_epochs(train_obj.model.monitor.get_epochs_seen(), 5) train_obj = produce_train_obj(new_epochs=True, max_epochs=5, model=train_obj.model) train_obj.main_loop() test_epochs(train_obj.model.monitor.get_epochs_seen(), 2 * 5) # Tests for 5 max epochs train_obj = produce_train_obj(new_epochs=False, max_epochs=5) train_obj.main_loop() test_epochs(train_obj.model.monitor.get_epochs_seen(), 5) train_obj = produce_train_obj(new_epochs=False, max_epochs=5, model=train_obj.model) train_obj.main_loop() test_epochs(train_obj.model.monitor.get_epochs_seen(), 5) # Tests for 0 new epochs train_obj = produce_train_obj(new_epochs=True, max_epochs=0) before_train = train_obj.model.get_weights() train_obj.main_loop() after_train = train_obj.model.get_weights() test_epochs(train_obj.model.monitor.get_epochs_seen(), 0) assert np.all(before_train == after_train) train_obj = produce_train_obj(new_epochs=True, max_epochs=0, model=train_obj.model) before_train = train_obj.model.get_weights() train_obj.main_loop() after_train = train_obj.model.get_weights() test_epochs(train_obj.model.monitor.get_epochs_seen(), 0) assert np.all(before_train == after_train) # Tests for 0 max epochs train_obj = produce_train_obj(new_epochs=False, max_epochs=0, model=train_obj.model) before_train = train_obj.model.get_weights() train_obj.main_loop() after_train = train_obj.model.get_weights() test_epochs(train_obj.model.monitor.get_epochs_seen(), 0) assert np.all(before_train == after_train)
bsd-3-clause
leviroth/praw
praw/reddit.py
1
20843
"""Provide the Reddit class.""" import configparser import os from itertools import islice try: from update_checker import update_check UPDATE_CHECKER_MISSING = False except ImportError: # pragma: no cover UPDATE_CHECKER_MISSING = True from prawcore import ( Authorizer, DeviceIDAuthorizer, ReadOnlyAuthorizer, Redirect, Requestor, ScriptAuthorizer, TrustedAuthenticator, UntrustedAuthenticator, session, ) from . import models from .config import Config from .const import __version__, API_PATH, USER_AGENT_FORMAT from .exceptions import ClientException from .objector import Objector class Reddit(object): """The Reddit class provides convenient access to reddit's API. Instances of this class are the gateway to interacting with Reddit's API through PRAW. The canonical way to obtain an instance of this class is via: .. code-block:: python import praw reddit = praw.Reddit(client_id='CLIENT_ID', client_secret="CLIENT_SECRET", password='PASSWORD', user_agent='USERAGENT', username='USERNAME') """ update_checked = False @property def _next_unique(self): value = self._unique_counter self._unique_counter += 1 return value @property def read_only(self): """Return True when using the ReadOnlyAuthorizer.""" return self._core == self._read_only_core @read_only.setter def read_only(self, value): """Set or unset the use of the ReadOnlyAuthorizer. Raise :class:`ClientException` when attempting to unset ``read_only`` and only the ReadOnlyAuthorizer is available. """ if value: self._core = self._read_only_core elif self._authorized_core is None: raise ClientException( "read_only cannot be unset as only the " "ReadOnlyAuthorizer is available." ) else: self._core = self._authorized_core def __enter__(self): """Handle the context manager open.""" return self def __exit__(self, *_args): """Handle the context manager close.""" def __init__( self, site_name=None, requestor_class=None, requestor_kwargs=None, **config_settings ): # noqa: D207, D301 """Initialize a Reddit instance. :param site_name: The name of a section in your ``praw.ini`` file from which to load settings from. This parameter, in tandem with an appropriately configured ``praw.ini``, file is useful if you wish to easily save credentials for different applications, or communicate with other servers running reddit. If ``site_name`` is ``None``, then the site name will be looked for in the environment variable praw_site. If it is not found there, the DEFAULT site will be used. :param requestor_class: A class that will be used to create a requestor. If not set, use ``prawcore.Requestor`` (default: None). :param requestor_kwargs: Dictionary with additional keyword arguments used to initialize the requestor (default: None). Additional keyword arguments will be used to initialize the :class:`.Config` object. This can be used to specify configuration settings during instantiation of the :class:`.Reddit` instance. For more details please see :ref:`configuration`. Required settings are: * client_id * client_secret (for installed applications set this value to ``None``) * user_agent The ``requestor_class`` and ``requestor_kwargs`` allow for customization of the requestor :class:`.Reddit` will use. This allows, e.g., easily adding behavior to the requestor or wrapping its |Session|_ in a caching layer. Example usage: .. |Session| replace:: ``Session`` .. _Session: https://2.python-requests.org/en/master/api/\ #requests.Session .. code-block:: python import json, betamax, requests class JSONDebugRequestor(Requestor): def request(self, *args, **kwargs): response = super().request(*args, **kwargs) print(json.dumps(response.json(), indent=4)) return response my_session = betamax.Betamax(requests.Session()) reddit = Reddit(..., requestor_class=JSONDebugRequestor, requestor_kwargs={'session': my_session}) """ self._core = self._authorized_core = self._read_only_core = None self._objector = None self._unique_counter = 0 try: config_section = site_name or os.getenv("praw_site") or "DEFAULT" self.config = Config(config_section, **config_settings) except configparser.NoSectionError as exc: help_message = ( "You provided the name of a praw.ini " "configuration which does not exist.\n\nFor help " "with creating a Reddit instance, visit\n" "https://praw.readthedocs.io/en/latest/code_overvi" "ew/reddit_instance.html\n\n" "For help on configuring PRAW, visit\n" "https://praw.readthedocs.io/en/latest/getting_sta" "rted/configuration.html" ) if site_name is not None: exc.message += "\n" + help_message raise required_message = ( "Required configuration setting {!r} missing. \n" "This setting can be provided in a praw.ini file, " "as a keyword argument to the `Reddit` class " "constructor, or as an environment variable." ) for attribute in ("client_id", "user_agent"): if getattr(self.config, attribute) in ( self.config.CONFIG_NOT_SET, None, ): raise ClientException(required_message.format(attribute)) if self.config.client_secret is self.config.CONFIG_NOT_SET: raise ClientException( required_message.format("client_secret") + "\nFor installed applications this value " "must be set to None via a keyword argument " "to the `Reddit` class constructor." ) self._check_for_update() self._prepare_objector() self._prepare_prawcore(requestor_class, requestor_kwargs) self.auth = models.Auth(self, None) """An instance of :class:`.Auth`. Provides the interface for interacting with installed and web applications. See :ref:`auth_url` """ self.front = models.Front(self) """An instance of :class:`.Front`. Provides the interface for interacting with front page listings. For example: .. code-block:: python for submission in reddit.front.hot(): print(submission) """ self.inbox = models.Inbox(self, None) """An instance of :class:`.Inbox`. Provides the interface to a user's inbox which produces :class:`.Message`, :class:`.Comment`, and :class:`.Submission` instances. For example to iterate through comments which mention the authorized user run: .. code-block:: python for comment in reddit.inbox.mentions(): print(comment) """ self.live = models.LiveHelper(self, None) """An instance of :class:`.LiveHelper`. Provides the interface for working with :class:`.LiveThread` instances. At present only new LiveThreads can be created. .. code-block:: python reddit.live.create('title', 'description') """ self.multireddit = models.MultiredditHelper(self, None) """An instance of :class:`.MultiredditHelper`. Provides the interface to working with :class:`.Multireddit` instances. For example you can obtain a :class:`.Multireddit` instance via: .. code-block:: python reddit.multireddit('samuraisam', 'programming') """ self.redditors = models.Redditors(self, None) """An instance of :class:`.Redditors`. Provides the interface for Redditor discovery. For example to iterate over the newest Redditors, run: .. code-block:: python for redditor in reddit.redditors.new(limit=None): print(redditor) """ self.subreddit = models.SubredditHelper(self, None) """An instance of :class:`.SubredditHelper`. Provides the interface to working with :class:`.Subreddit` instances. For example to create a Subreddit run: .. code-block:: python reddit.subreddit.create('coolnewsubname') To obtain a lazy a :class:`.Subreddit` instance run: .. code-block:: python reddit.subreddit('redditdev') Note that multiple subreddits can be combined and filtered views of /r/all can also be used just like a subreddit: .. code-block:: python reddit.subreddit('redditdev+learnpython+botwatch') reddit.subreddit('all-redditdev-learnpython') """ self.subreddits = models.Subreddits(self, None) """An instance of :class:`.Subreddits`. Provides the interface for :class:`.Subreddit` discovery. For example to iterate over the set of default subreddits run: .. code-block:: python for subreddit in reddit.subreddits.default(limit=None): print(subreddit) """ self.user = models.User(self) """An instance of :class:`.User`. Provides the interface to the currently authorized :class:`.Redditor`. For example to get the name of the current user run: .. code-block:: python print(reddit.user.me()) """ def _check_for_update(self): if UPDATE_CHECKER_MISSING: return if not Reddit.update_checked and self.config.check_for_updates: update_check(__package__, __version__) Reddit.update_checked = True def _prepare_objector(self): mappings = { self.config.kinds["comment"]: models.Comment, self.config.kinds["message"]: models.Message, self.config.kinds["redditor"]: models.Redditor, self.config.kinds["submission"]: models.Submission, self.config.kinds["subreddit"]: models.Subreddit, self.config.kinds["trophy"]: models.Trophy, "Button": models.Button, "Collection": models.Collection, "Image": models.Image, "LabeledMulti": models.Multireddit, "Listing": models.Listing, "LiveUpdate": models.LiveUpdate, "LiveUpdateEvent": models.LiveThread, "MenuLink": models.MenuLink, "ModmailAction": models.ModmailAction, "ModmailConversation": models.ModmailConversation, "ModmailMessage": models.ModmailMessage, "Submenu": models.Submenu, "TrophyList": models.TrophyList, "UserList": models.RedditorList, "button": models.ButtonWidget, "calendar": models.Calendar, "community-list": models.CommunityList, "custom": models.CustomWidget, "id-card": models.IDCard, "image": models.ImageWidget, "menu": models.Menu, "modaction": models.ModAction, "moderators": models.ModeratorsWidget, "more": models.MoreComments, "post-flair": models.PostFlairWidget, "stylesheet": models.Stylesheet, "subreddit-rules": models.RulesWidget, "textarea": models.TextArea, "widget": models.Widget, } self._objector = Objector(self, mappings) def _prepare_prawcore(self, requestor_class=None, requestor_kwargs=None): requestor_class = requestor_class or Requestor requestor_kwargs = requestor_kwargs or {} requestor = requestor_class( USER_AGENT_FORMAT.format(self.config.user_agent), self.config.oauth_url, self.config.reddit_url, **requestor_kwargs ) if self.config.client_secret: self._prepare_trusted_prawcore(requestor) else: self._prepare_untrusted_prawcore(requestor) def _prepare_trusted_prawcore(self, requestor): authenticator = TrustedAuthenticator( requestor, self.config.client_id, self.config.client_secret, self.config.redirect_uri, ) read_only_authorizer = ReadOnlyAuthorizer(authenticator) self._read_only_core = session(read_only_authorizer) if self.config.username and self.config.password: script_authorizer = ScriptAuthorizer( authenticator, self.config.username, self.config.password ) self._core = self._authorized_core = session(script_authorizer) elif self.config.refresh_token: authorizer = Authorizer(authenticator, self.config.refresh_token) self._core = self._authorized_core = session(authorizer) else: self._core = self._read_only_core def _prepare_untrusted_prawcore(self, requestor): authenticator = UntrustedAuthenticator( requestor, self.config.client_id, self.config.redirect_uri ) read_only_authorizer = DeviceIDAuthorizer(authenticator) self._read_only_core = session(read_only_authorizer) if self.config.refresh_token: authorizer = Authorizer(authenticator, self.config.refresh_token) self._core = self._authorized_core = session(authorizer) else: self._core = self._read_only_core def comment( self, # pylint: disable=invalid-name id=None, # pylint: disable=redefined-builtin url=None, ): """Return a lazy instance of :class:`~.Comment` for ``id``. :param id: The ID of the comment. :param url: A permalink pointing to the comment. .. note:: If you want to obtain the comment's replies, you will need to call :meth:`~.Comment.refresh` on the returned :class:`.Comment`. """ return models.Comment(self, id=id, url=url) def domain(self, domain): """Return an instance of :class:`.DomainListing`. :param domain: The domain to obtain submission listings for. """ return models.DomainListing(self, domain) def get(self, path, params=None): """Return parsed objects returned from a GET request to ``path``. :param path: The path to fetch. :param params: The query parameters to add to the request (default: None). """ data = self.request("GET", path, params=params) return self._objector.objectify(data) def info(self, fullnames=None, url=None): """Fetch information about each item in ``fullnames`` or from ``url``. :param fullnames: A list of fullnames for comments, submissions, and/or subreddits. :param url: A url (as a string) to retrieve lists of link submissions from. :returns: A generator that yields found items in their relative order. Items that cannot be matched will not be generated. Requests will be issued in batches for each 100 fullnames. .. note:: For comments that are retrieved via this method, if you want to obtain its replies, you will need to call :meth:`~.Comment.refresh` on the yielded :class:`.Comment`. .. note:: When using the URL option, it is important to be aware that URLs are treated literally by Reddit's API. As such, the URLs "youtube.com" and "https://www.youtube.com" will provide a different set of submissions. """ none_count = [fullnames, url].count(None) if none_count > 1: raise TypeError("Either `fullnames` or `url` must be provided.") if none_count < 1: raise TypeError( "Mutually exclusive parameters: `fullnames`, `url`" ) if fullnames is not None: if isinstance(fullnames, str): raise TypeError("`fullnames` must be a non-str iterable.") def generator(fullnames): iterable = iter(fullnames) while True: chunk = list(islice(iterable, 100)) if not chunk: break params = {"id": ",".join(chunk)} for result in self.get(API_PATH["info"], params=params): yield result return generator(fullnames) def generator(url): params = {"url": url} for result in self.get(API_PATH["info"], params=params): yield result return generator(url) def patch(self, path, data=None): """Return parsed objects returned from a PATCH request to ``path``. :param path: The path to fetch. :param data: Dictionary, bytes, or file-like object to send in the body of the request (default: None). """ data = self.request("PATCH", path, data=data) return self._objector.objectify(data) def post(self, path, data=None, files=None, params=None): """Return parsed objects returned from a POST request to ``path``. :param path: The path to fetch. :param data: Dictionary, bytes, or file-like object to send in the body of the request (default: None). :param files: Dictionary, filename to file (like) object mapping (default: None). :param params: The query parameters to add to the request (default: None). """ data = self.request( "POST", path, data=data or {}, files=files, params=params ) return self._objector.objectify(data) def put(self, path, data=None): """Return parsed objects returned from a PUT request to ``path``. :param path: The path to fetch. :param data: Dictionary, bytes, or file-like object to send in the body of the request (default: None). """ data = self.request("PUT", path, data=data) return self._objector.objectify(data) def random_subreddit(self, nsfw=False): """Return a random lazy instance of :class:`~.Subreddit`. :param nsfw: Return a random NSFW (not safe for work) subreddit (default: False). """ url = API_PATH["subreddit"].format( subreddit="randnsfw" if nsfw else "random" ) path = None try: self.get(url, params={"unique": self._next_unique}) except Redirect as redirect: path = redirect.path return models.Subreddit(self, path.split("/")[2]) def redditor(self, name): """Return a lazy instance of :class:`~.Redditor` for ``name``. :param name: The name of the redditor. """ return models.Redditor(self, name) def request(self, method, path, params=None, data=None, files=None): """Return the parsed JSON data returned from a request to URL. :param method: The HTTP method (e.g., GET, POST, PUT, DELETE). :param path: The path to fetch. :param params: The query parameters to add to the request (default: None). :param data: Dictionary, bytes, or file-like object to send in the body of the request (default: None). :param files: Dictionary, filename to file (like) object mapping (default: None). """ return self._core.request( method, path, data=data, files=files, params=params ) def submission( # pylint: disable=invalid-name,redefined-builtin self, id=None, url=None ): """Return a lazy instance of :class:`~.Submission`. :param id: A reddit base36 submission ID, e.g., ``2gmzqe``. :param url: A URL supported by :meth:`~praw.models.Submission.id_from_url`.`. Either ``id`` or ``url`` can be provided, but not both. """ return models.Submission(self, id=id, url=url)
bsd-2-clause
lzw120/django
build/lib/django/contrib/gis/gdal/prototypes/srs.py
97
3372
from ctypes import c_char_p, c_int, c_void_p, POINTER from django.contrib.gis.gdal.libgdal import lgdal, std_call from django.contrib.gis.gdal.prototypes.generation import (const_string_output, double_output, int_output, srs_output, string_output, void_output) ## Shortcut generation for routines with known parameters. def srs_double(f): """ Creates a function prototype for the OSR routines that take the OSRSpatialReference object and """ return double_output(f, [c_void_p, POINTER(c_int)], errcheck=True) def units_func(f): """ Creates a ctypes function prototype for OSR units functions, e.g., OSRGetAngularUnits, OSRGetLinearUnits. """ return double_output(f, [c_void_p, POINTER(c_char_p)], strarg=True) # Creation & destruction. clone_srs = srs_output(std_call('OSRClone'), [c_void_p]) new_srs = srs_output(std_call('OSRNewSpatialReference'), [c_char_p]) release_srs = void_output(lgdal.OSRRelease, [c_void_p], errcheck=False) destroy_srs = void_output(std_call('OSRDestroySpatialReference'), [c_void_p], errcheck=False) srs_validate = void_output(lgdal.OSRValidate, [c_void_p]) # Getting the semi_major, semi_minor, and flattening functions. semi_major = srs_double(lgdal.OSRGetSemiMajor) semi_minor = srs_double(lgdal.OSRGetSemiMinor) invflattening = srs_double(lgdal.OSRGetInvFlattening) # WKT, PROJ, EPSG, XML importation routines. from_wkt = void_output(lgdal.OSRImportFromWkt, [c_void_p, POINTER(c_char_p)]) from_proj = void_output(lgdal.OSRImportFromProj4, [c_void_p, c_char_p]) from_epsg = void_output(std_call('OSRImportFromEPSG'), [c_void_p, c_int]) from_xml = void_output(lgdal.OSRImportFromXML, [c_void_p, c_char_p]) from_user_input = void_output(std_call('OSRSetFromUserInput'), [c_void_p, c_char_p]) # Morphing to/from ESRI WKT. morph_to_esri = void_output(lgdal.OSRMorphToESRI, [c_void_p]) morph_from_esri = void_output(lgdal.OSRMorphFromESRI, [c_void_p]) # Identifying the EPSG identify_epsg = void_output(lgdal.OSRAutoIdentifyEPSG, [c_void_p]) # Getting the angular_units, linear_units functions linear_units = units_func(lgdal.OSRGetLinearUnits) angular_units = units_func(lgdal.OSRGetAngularUnits) # For exporting to WKT, PROJ.4, "Pretty" WKT, and XML. to_wkt = string_output(std_call('OSRExportToWkt'), [c_void_p, POINTER(c_char_p)]) to_proj = string_output(std_call('OSRExportToProj4'), [c_void_p, POINTER(c_char_p)]) to_pretty_wkt = string_output(std_call('OSRExportToPrettyWkt'), [c_void_p, POINTER(c_char_p), c_int], offset=-2) # Memory leak fixed in GDAL 1.5; still exists in 1.4. to_xml = string_output(lgdal.OSRExportToXML, [c_void_p, POINTER(c_char_p), c_char_p], offset=-2) # String attribute retrival routines. get_attr_value = const_string_output(std_call('OSRGetAttrValue'), [c_void_p, c_char_p, c_int]) get_auth_name = const_string_output(lgdal.OSRGetAuthorityName, [c_void_p, c_char_p]) get_auth_code = const_string_output(lgdal.OSRGetAuthorityCode, [c_void_p, c_char_p]) # SRS Properties isgeographic = int_output(lgdal.OSRIsGeographic, [c_void_p]) islocal = int_output(lgdal.OSRIsLocal, [c_void_p]) isprojected = int_output(lgdal.OSRIsProjected, [c_void_p]) # Coordinate transformation new_ct= srs_output(std_call('OCTNewCoordinateTransformation'), [c_void_p, c_void_p]) destroy_ct = void_output(std_call('OCTDestroyCoordinateTransformation'), [c_void_p], errcheck=False)
bsd-3-clause
kostimarko/portfolio-v2
node_modules/pygmentize-bundled/vendor/pygments/build-3.3/pygments/styles/colorful.py
364
2778
# -*- coding: utf-8 -*- """ pygments.styles.colorful ~~~~~~~~~~~~~~~~~~~~~~~~ A colorful style, inspired by CodeRay. :copyright: Copyright 2006-2013 by the Pygments team, see AUTHORS. :license: BSD, see LICENSE for details. """ from pygments.style import Style from pygments.token import Keyword, Name, Comment, String, Error, \ Number, Operator, Generic, Whitespace class ColorfulStyle(Style): """ A colorful style, inspired by CodeRay. """ default_style = "" styles = { Whitespace: "#bbbbbb", Comment: "#888", Comment.Preproc: "#579", Comment.Special: "bold #cc0000", Keyword: "bold #080", Keyword.Pseudo: "#038", Keyword.Type: "#339", Operator: "#333", Operator.Word: "bold #000", Name.Builtin: "#007020", Name.Function: "bold #06B", Name.Class: "bold #B06", Name.Namespace: "bold #0e84b5", Name.Exception: "bold #F00", Name.Variable: "#963", Name.Variable.Instance: "#33B", Name.Variable.Class: "#369", Name.Variable.Global: "bold #d70", Name.Constant: "bold #036", Name.Label: "bold #970", Name.Entity: "bold #800", Name.Attribute: "#00C", Name.Tag: "#070", Name.Decorator: "bold #555", String: "bg:#fff0f0", String.Char: "#04D bg:", String.Doc: "#D42 bg:", String.Interpol: "bg:#eee", String.Escape: "bold #666", String.Regex: "bg:#fff0ff #000", String.Symbol: "#A60 bg:", String.Other: "#D20", Number: "bold #60E", Number.Integer: "bold #00D", Number.Float: "bold #60E", Number.Hex: "bold #058", Number.Oct: "bold #40E", Generic.Heading: "bold #000080", Generic.Subheading: "bold #800080", Generic.Deleted: "#A00000", Generic.Inserted: "#00A000", Generic.Error: "#FF0000", Generic.Emph: "italic", Generic.Strong: "bold", Generic.Prompt: "bold #c65d09", Generic.Output: "#888", Generic.Traceback: "#04D", Error: "#F00 bg:#FAA" }
mit
Softmotions/edx-platform
cms/djangoapps/contentstore/management/commands/fix_not_found.py
108
1110
""" Script for fixing the item not found errors in a course """ from django.core.management.base import BaseCommand, CommandError from opaque_keys.edx.keys import CourseKey from xmodule.modulestore.django import modulestore from xmodule.modulestore import ModuleStoreEnum # To run from command line: ./manage.py cms fix_not_found course-v1:org+course+run class Command(BaseCommand): """Fix a course's item not found errors""" help = "Fix a course's ItemNotFound errors" def handle(self, *args, **options): "Execute the command" if len(args) != 1: raise CommandError("requires 1 argument: <course_id>") course_key = CourseKey.from_string(args[0]) # for now only support on split mongo # pylint: disable=protected-access owning_store = modulestore()._get_modulestore_for_courselike(course_key) if hasattr(owning_store, 'fix_not_found'): owning_store.fix_not_found(course_key, ModuleStoreEnum.UserID.mgmt_command) else: raise CommandError("The owning modulestore does not support this command.")
agpl-3.0
forumber/Temiz_Kernel_G3
tools/perf/scripts/python/futex-contention.py
11261
1486
# futex contention # (c) 2010, Arnaldo Carvalho de Melo <acme@redhat.com> # Licensed under the terms of the GNU GPL License version 2 # # Translation of: # # http://sourceware.org/systemtap/wiki/WSFutexContention # # to perf python scripting. # # Measures futex contention import os, sys sys.path.append(os.environ['PERF_EXEC_PATH'] + '/scripts/python/Perf-Trace-Util/lib/Perf/Trace') from Util import * process_names = {} thread_thislock = {} thread_blocktime = {} lock_waits = {} # long-lived stats on (tid,lock) blockage elapsed time process_names = {} # long-lived pid-to-execname mapping def syscalls__sys_enter_futex(event, ctxt, cpu, s, ns, tid, comm, nr, uaddr, op, val, utime, uaddr2, val3): cmd = op & FUTEX_CMD_MASK if cmd != FUTEX_WAIT: return # we don't care about originators of WAKE events process_names[tid] = comm thread_thislock[tid] = uaddr thread_blocktime[tid] = nsecs(s, ns) def syscalls__sys_exit_futex(event, ctxt, cpu, s, ns, tid, comm, nr, ret): if thread_blocktime.has_key(tid): elapsed = nsecs(s, ns) - thread_blocktime[tid] add_stats(lock_waits, (tid, thread_thislock[tid]), elapsed) del thread_blocktime[tid] del thread_thislock[tid] def trace_begin(): print "Press control+C to stop and show the summary" def trace_end(): for (tid, lock) in lock_waits: min, max, avg, count = lock_waits[tid, lock] print "%s[%d] lock %x contended %d times, %d avg ns" % \ (process_names[tid], tid, lock, count, avg)
gpl-2.0
Dr-Drive/hycohanz
examples/create_sphere.py
1
1265
import hycohanz as hfss raw_input('Press "Enter" to connect to HFSS.>') [oAnsoftApp, oDesktop] = hfss.setup_interface() raw_input('Press "Enter" to create a new project.>') oProject = hfss.new_project(oDesktop) raw_input('Press "Enter" to insert a new DrivenModal design named HFSSDesign1.>') oDesign = hfss.insert_design(oProject, "HFSSDesign1", "DrivenModal") raw_input('Press "Enter" to set the active editor to "3D Modeler" (The default and only known correct value).>') oEditor = hfss.set_active_editor(oDesign) raw_input('Press "Enter" to insert some circle properties into the design.>') hfss.add_property(oDesign, "xcenter", hfss.Expression("1m")) hfss.add_property(oDesign, "ycenter", hfss.Expression("2m")) hfss.add_property(oDesign, "zcenter", hfss.Expression("3m")) hfss.add_property(oDesign, "diam", hfss.Expression("1m")) raw_input('Press "Enter" to draw a circle using the properties.>') hfss.create_sphere(oEditor, hfss.Expression("xcenter"), hfss.Expression("ycenter"), hfss.Expression("zcenter"), hfss.Expression("diameter")/2) raw_input('Press "Enter" to quit HFSS.>') hfss.quit_application(oDesktop) del oEditor del oDesign del oProject del oDesktop del oAnsoftApp
bsd-2-clause
oceanobservatories/mi-instrument
mi/core/instrument/instrument_driver.py
3
51213
#!/usr/bin/env python """ @package ion.services.mi.instrument_driver Instrument driver structures @file ion/services/mi/instrument_driver.py @author Edward Hunter @brief Instrument driver classes that provide structure towards interaction with individual instruments in the system. """ import json import random import time from collections import deque from threading import Thread from requests import ConnectionError from mi.core.common import BaseEnum from mi.core.exceptions import TestModeException from mi.core.exceptions import NotImplementedException from mi.core.exceptions import InstrumentException from mi.core.exceptions import InstrumentParameterException from mi.core.exceptions import InstrumentConnectionException from mi.core.instrument.instrument_fsm import ThreadSafeFSM from mi.core.instrument.port_agent_client import PortAgentClient, PortAgentPacket from mi.core.log import get_logger, get_logging_metaclass from mi.core.service_registry import ConsulServiceRegistry __author__ = 'Steve Foley' __license__ = 'Apache 2.0' log = get_logger() META_LOGGER = get_logging_metaclass('trace') STARTING_RECONNECT_INTERVAL = .5 MAXIMUM_RECONNECT_INTERVAL = 256 MAXIMUM_CONSUL_QUERIES = 5 MAXIMUM_BACKOFF = 5 # seconds MAX_DATA_BUFFER_LEN = 1000 class AutoDiscoverFailure(Exception): pass class ConfigMetadataKey(BaseEnum): """ Keys used in the metadata structure that describes the driver, commands, and parameters used in the driver and protocol. """ DRIVER = 'driver' COMMANDS = 'commands' PARAMETERS = 'parameters' class DriverConfigKey(BaseEnum): """ Dictionary keys for driver config objects """ PARAMETERS = 'parameters' SCHEDULER = 'scheduler' # This is a copy since we can't import from pyon. class ResourceAgentState(BaseEnum): """ Resource agent common states. """ POWERED_DOWN = 'RESOURCE_AGENT_STATE_POWERED_DOWN' UNINITIALIZED = 'RESOURCE_AGENT_STATE_UNINITIALIZED' INACTIVE = 'RESOURCE_AGENT_STATE_INACTIVE' IDLE = 'RESOURCE_AGENT_STATE_IDLE' STOPPED = 'RESOURCE_AGENT_STATE_STOPPED' COMMAND = 'RESOURCE_AGENT_STATE_COMMAND' STREAMING = 'RESOURCE_AGENT_STATE_STREAMING' TEST = 'RESOURCE_AGENT_STATE_TEST' CALIBRATE = 'RESOURCE_AGENT_STATE_CALIBRATE' DIRECT_ACCESS = 'RESOURCE_AGENT_STATE_DIRECT_ACCESS' BUSY = 'RESOURCE_AGENT_STATE_BUSY' LOST_CONNECTION = 'RESOURCE_AGENT_STATE_LOST_CONNECTION' ACTIVE_UNKNOWN = 'RESOURCE_AGENT_STATE_ACTIVE_UNKNOWN' class ResourceAgentEvent(BaseEnum): """ Resource agent common events. """ ENTER = 'RESOURCE_AGENT_EVENT_ENTER' EXIT = 'RESOURCE_AGENT_EVENT_EXIT' POWER_UP = 'RESOURCE_AGENT_EVENT_POWER_UP' POWER_DOWN = 'RESOURCE_AGENT_EVENT_POWER_DOWN' INITIALIZE = 'RESOURCE_AGENT_EVENT_INITIALIZE' RESET = 'RESOURCE_AGENT_EVENT_RESET' GO_ACTIVE = 'RESOURCE_AGENT_EVENT_GO_ACTIVE' GO_INACTIVE = 'RESOURCE_AGENT_EVENT_GO_INACTIVE' RUN = 'RESOURCE_AGENT_EVENT_RUN' CLEAR = 'RESOURCE_AGENT_EVENT_CLEAR' PAUSE = 'RESOURCE_AGENT_EVENT_PAUSE' RESUME = 'RESOURCE_AGENT_EVENT_RESUME' GO_COMMAND = 'RESOURCE_AGENT_EVENT_GO_COMMAND' GO_DIRECT_ACCESS = 'RESOURCE_AGENT_EVENT_GO_DIRECT_ACCESS' GET_RESOURCE = 'RESOURCE_AGENT_EVENT_GET_RESOURCE' SET_RESOURCE = 'RESOURCE_AGENT_EVENT_SET_RESOURCE' EXECUTE_RESOURCE = 'RESOURCE_AGENT_EVENT_EXECUTE_RESOURCE' GET_RESOURCE_STATE = 'RESOURCE_AGENT_EVENT_GET_RESOURCE_STATE' GET_RESOURCE_CAPABILITIES = 'RESOURCE_AGENT_EVENT_GET_RESOURCE_CAPABILITIES' DONE = 'RESOURCE_AGENT_EVENT_DONE' PING_RESOURCE = 'RESOURCE_AGENT_PING_RESOURCE' LOST_CONNECTION = 'RESOURCE_AGENT_EVENT_LOST_CONNECTION' AUTORECONNECT = 'RESOURCE_AGENT_EVENT_AUTORECONNECT' GET_RESOURCE_SCHEMA = 'RESOURCE_AGENT_EVENT_GET_RESOURCE_SCHEMA' CHANGE_STATE_ASYNC = 'RESOURCE_AGENT_EVENT_CHANGE_STATE_ASYNC' class DriverState(BaseEnum): """Common driver state enum""" UNCONFIGURED = 'DRIVER_STATE_UNCONFIGURED' DISCONNECTED = 'DRIVER_STATE_DISCONNECTED' CONNECTING = 'DRIVER_STATE_CONNECTING' DISCONNECTING = 'DRIVER_STATE_DISCONNECTING' CONNECTED = 'DRIVER_STATE_CONNECTED' ACQUIRE_SAMPLE = 'DRIVER_STATE_ACQUIRE_SAMPLE' UPDATE_PARAMS = 'DRIVER_STATE_UPDATE_PARAMS' SET = 'DRIVER_STATE_SET' SLEEP = 'DRIVER_STATE_SLEEP' class DriverProtocolState(BaseEnum): """ Base states for driver protocols. Subclassed for specific driver protocols. """ AUTOSAMPLE = 'DRIVER_STATE_AUTOSAMPLE' TEST = 'DRIVER_STATE_TEST' CALIBRATE = 'DRIVER_STATE_CALIBRATE' COMMAND = 'DRIVER_STATE_COMMAND' DIRECT_ACCESS = 'DRIVER_STATE_DIRECT_ACCESS' UNKNOWN = 'DRIVER_STATE_UNKNOWN' POLL = 'DRIVER_STATE_POLL' class DriverConnectionState(BaseEnum): """ Base states for driver connections. """ UNCONFIGURED = 'DRIVER_STATE_UNCONFIGURED' DISCONNECTED = 'DRIVER_STATE_DISCONNECTED' CONNECTED = 'DRIVER_STATE_CONNECTED' INST_DISCONNECTED = 'DRIVER_STATE_INSTRUMENT_DISCONNECTED' class DriverEvent(BaseEnum): """ Base events for driver state machines. Commands and other events are transformed into state machine events for handling. """ ENTER = 'DRIVER_EVENT_ENTER' EXIT = 'DRIVER_EVENT_EXIT' INITIALIZE = 'DRIVER_EVENT_INITIALIZE' CONFIGURE = 'DRIVER_EVENT_CONFIGURE' CONNECT = 'DRIVER_EVENT_CONNECT' CONNECTION_LOST = 'DRIVER_CONNECTION_LOST' PA_CONNECTION_LOST = 'DRIVER_PA_CONNECTION_LOST' DISCONNECT = 'DRIVER_EVENT_DISCONNECT' SET = 'DRIVER_EVENT_SET' GET = 'DRIVER_EVENT_GET' DISCOVER = 'DRIVER_EVENT_DISCOVER' EXECUTE = 'DRIVER_EVENT_EXECUTE' ACQUIRE_SAMPLE = 'DRIVER_EVENT_ACQUIRE_SAMPLE' START_AUTOSAMPLE = 'DRIVER_EVENT_START_AUTOSAMPLE' STOP_AUTOSAMPLE = 'DRIVER_EVENT_STOP_AUTOSAMPLE' TEST = 'DRIVER_EVENT_TEST' RUN_TEST = 'DRIVER_EVENT_RUN_TEST' STOP_TEST = 'DRIVER_EVENT_STOP_TEST' CALIBRATE = 'DRIVER_EVENT_CALIBRATE' RESET = 'DRIVER_EVENT_RESET' UPDATE_PARAMS = 'DRIVER_EVENT_UPDATE_PARAMS' BREAK = 'DRIVER_EVENT_BREAK' EXECUTE_DIRECT = 'EXECUTE_DIRECT' START_DIRECT = 'DRIVER_EVENT_START_DIRECT' STOP_DIRECT = 'DRIVER_EVENT_STOP_DIRECT' PING_DRIVER = 'DRIVER_EVENT_PING_DRIVER' FORCE_STATE = 'DRIVER_FORCE_STATE' CLOCK_SYNC = 'DRIVER_EVENT_CLOCK_SYNC' SCHEDULED_CLOCK_SYNC = 'DRIVER_EVENT_SCHEDULED_CLOCK_SYNC' ACQUIRE_STATUS = 'DRIVER_EVENT_ACQUIRE_STATUS' INIT_PARAMS = 'DRIVER_EVENT_INIT_PARAMS' GAP_RECOVERY = 'DRIVER_EVENT_GAP_RECOVERY' GAP_RECOVERY_COMPLETE = 'DRIVER_EVENT_GAP_RECOVERY_COMPLETE' class DriverAsyncEvent(BaseEnum): """ Asynchronous driver event types. """ STATE_CHANGE = 'DRIVER_ASYNC_EVENT_STATE_CHANGE' CONFIG_CHANGE = 'DRIVER_ASYNC_EVENT_CONFIG_CHANGE' SAMPLE = 'DRIVER_ASYNC_EVENT_SAMPLE' ERROR = 'DRIVER_ASYNC_EVENT_ERROR' RESULT = 'DRIVER_ASYNC_RESULT' DIRECT_ACCESS = 'DRIVER_ASYNC_EVENT_DIRECT_ACCESS' AGENT_EVENT = 'DRIVER_ASYNC_EVENT_AGENT_EVENT' DRIVER_CONFIG = 'DRIVER_ASYNC_EVENT_DRIVER_CONFIG' class DriverParameter(BaseEnum): """ Base driver parameters. Subclassed by specific drivers with device specific parameters. """ ALL = 'DRIVER_PARAMETER_ALL' class InstrumentDriver(object): """ Base class for instrument drivers. """ def __init__(self, event_callback): """ Constructor. @param event_callback The driver process callback used to send asynchronous driver events to the agent. """ self._send_event = event_callback self._test_mode = False ############################################################# # Device connection interface. ############################################################# def set_test_mode(self, mode): """ Enable test mode for the driver. If this mode is invoked then the user has access to test_ commands. @param mode: test mode state """ self._test_mode = True if mode else False def initialize(self, *args, **kwargs): """ Initialize driver connection, bringing communications parameters into unconfigured state (no connection object). @raises InstrumentStateException if command not allowed in current state @raises NotImplementedException if not implemented by subclass. """ raise NotImplementedException('initialize() not implemented.') def configure(self, *args, **kwargs): """ Configure the driver for communications with the device via port agent / logger (valid but unconnected connection object). @param arg[0] comms config dict. @raises InstrumentStateException if command not allowed in current state @throws InstrumentParameterException if missing comms or invalid config dict. @raises NotImplementedException if not implemented by subclass. """ raise NotImplementedException('configure() not implemented.') def connect(self, *args, **kwargs): """ Establish communications with the device via port agent / logger (connected connection object). @raises InstrumentStateException if command not allowed in current state @throws InstrumentConnectionException if the connection failed. @raises NotImplementedException if not implemented by subclass. """ raise NotImplementedException('connect() not implemented.') def disconnect(self, *args, **kwargs): """ Disconnect from device via port agent / logger. @raises InstrumentStateException if command not allowed in current state @raises NotImplementedException if not implemented by subclass. """ raise NotImplementedException('disconnect() not implemented.') ############################################################# # Command and control interface. ############################################################# def discover_state(self, *args, **kwargs): """ Determine initial state upon establishing communications. @param timeout=timeout Optional command timeout. @retval Current device state. @raises InstrumentTimeoutException if could not wake device. @raises InstrumentStateException if command not allowed in current state or if device state not recognized. @raises NotImplementedException if not implemented by subclass. """ raise NotImplementedException('discover_state() is not implemented.') def get_resource_capabilities(self, *args, **kwargs): """ Return driver commands and parameters. @param current_state True to retrieve commands available in current state, otherwise return all commands. @retval list of AgentCapability objects representing the drivers capabilities. @raises NotImplementedException if not implemented by subclass. """ raise NotImplementedException('get_resource_capabilities() is not implemented.') def get_resource_state(self, *args, **kwargs): """ Return the current state of the driver. @retval str current driver state. @raises NotImplementedException if not implemented by subclass. """ raise NotImplementedException('get_resource_state() is not implemented.') def get_resource(self, *args, **kwargs): """ Retrieve device parameters. @param args[0] DriverParameter.ALL or a list of parameters to retrieve @retval parameter : value dict. @raises InstrumentParameterException if missing or invalid get parameters. @raises InstrumentStateException if command not allowed in current state @raises NotImplementedException if not implemented by subclass. """ raise NotImplementedException('get_resource() is not implemented.') def set_resource(self, *args, **kwargs): """ Set device parameters. @param args[0] parameter : value dict of parameters to set. @param timeout=timeout Optional command timeout. @raises InstrumentParameterException if missing or invalid set parameters. @raises InstrumentTimeoutException if could not wake device or no response. @raises InstrumentProtocolException if set command not recognized. @raises InstrumentStateException if command not allowed in current state. @raises NotImplementedException if not implemented by subclass. """ raise NotImplementedException('set_resource() not implemented.') def execute_resource(self, *args, **kwargs): """ Execute a driver command. @param timeout=timeout Optional command timeout. @ retval Command specific. @raises InstrumentTimeoutException if could not wake device or no response. @raises InstrumentProtocolException if command not recognized. @raises InstrumentStateException if command not allowed in current state. @raises NotImplementedException if not implemented by subclass. """ raise NotImplementedException('execute_resource() not implemented.') def start_direct(self, *args, **kwargs): """ Start direct access mode @param timeout=timeout Optional command timeout. @ retval Command specific. @raises NotImplementedException if not implemented by subclass. """ raise NotImplementedException('execute_resource() not implemented.') def stop_direct(self, *args, **kwargs): """ Stop direct access mode @param timeout=timeout Optional command timeout. @ retval Command specific. @raises NotImplementedException if not implemented by subclass. """ raise NotImplementedException('execute_resource() not implemented.') ######################################################################## # Event interface. ######################################################################## def _driver_event(self, event_type, val=None): """ Construct and send an asynchronous driver event. @param event_type a DriverAsyncEvent type specifier. @param val event value for sample and test result events. """ event = { 'type': event_type, 'value': None, 'time': time.time() } if event_type == DriverAsyncEvent.STATE_CHANGE: state = self.get_resource_state() event['value'] = state log.info('STATE CHANGE: %r', event) self._send_event(event) elif event_type == DriverAsyncEvent.CONFIG_CHANGE: config = self.get_resource(DriverParameter.ALL) event['value'] = config self._send_event(event) elif event_type == DriverAsyncEvent.SAMPLE: event['value'] = val self._send_event(event) elif event_type == DriverAsyncEvent.ERROR: event['value'] = val self._send_event(event) elif event_type == DriverAsyncEvent.RESULT: event['value'] = val self._send_event(event) elif event_type == DriverAsyncEvent.DIRECT_ACCESS: event['value'] = val self._send_event(event) elif event_type == DriverAsyncEvent.AGENT_EVENT: event['value'] = val self._send_event(event) elif event_type == DriverAsyncEvent.DRIVER_CONFIG: event['value'] = val self._send_event(event) ######################################################################## # Test interface. ######################################################################## def driver_ping(self, msg): """ Echo a message. @param msg the message to prepend and echo back to the caller. """ reply = 'driver_ping: %r %s' % (self, msg) return reply def test_exceptions(self, msg): """ Test exception handling in the driver process. @param msg message string to put in a raised exception to be caught in a test. @raises InstrumentException always. """ raise InstrumentException(msg) # noinspection PyMethodMayBeStatic,PyProtectedMember,PyUnusedLocal class SingleConnectionInstrumentDriver(InstrumentDriver): """ Base class for instrument drivers with a single device connection. Provides connection state logic for single connection drivers. This is the base class for the majority of driver implementation classes. """ __metaclass__ = META_LOGGER def __init__(self, event_callback, refdes=None): """ Constructor for singly connected instrument drivers. @param event_callback Callback to the driver process to send asynchronous driver events back to the agent. """ InstrumentDriver.__init__(self, event_callback) # The one and only instrument connection. # Exists in the connected state. self._connection = None # The one and only instrument protocol. self._protocol = None # Reference Designator to the port agent service self.refdes = refdes # Build connection state machine. self._connection_fsm = ThreadSafeFSM(DriverConnectionState, DriverEvent, DriverEvent.ENTER, DriverEvent.EXIT) # Add handlers for all events. handlers = { DriverState.UNCONFIGURED: [ (DriverEvent.ENTER, self._handler_unconfigured_enter), (DriverEvent.EXIT, self._handler_unconfigured_exit), (DriverEvent.INITIALIZE, self._handler_unconfigured_initialize), (DriverEvent.CONFIGURE, self._handler_unconfigured_configure), ], DriverConnectionState.DISCONNECTED: [ (DriverEvent.ENTER, self._handler_disconnected_enter), (DriverEvent.EXIT, self._handler_disconnected_exit), (DriverEvent.INITIALIZE, self._handler_disconnected_initialize), (DriverEvent.CONFIGURE, self._handler_disconnected_configure), (DriverEvent.CONNECT, self._handler_disconnected_connect), ], DriverConnectionState.INST_DISCONNECTED: [ (DriverEvent.ENTER, self._handler_inst_disconnected_enter), (DriverEvent.EXIT, self._handler_inst_disconnected_exit), (DriverEvent.CONNECT, self._handler_inst_disconnected_connect), (DriverEvent.CONNECTION_LOST, self._handler_connected_connection_lost), (DriverEvent.PA_CONNECTION_LOST, self._handler_inst_disconnected_pa_connection_lost), ], DriverConnectionState.CONNECTED: [ (DriverEvent.ENTER, self._handler_connected_enter), (DriverEvent.EXIT, self._handler_connected_exit), (DriverEvent.DISCONNECT, self._handler_connected_disconnect), (DriverEvent.CONNECTION_LOST, self._handler_connected_connection_lost), (DriverEvent.PA_CONNECTION_LOST, self._handler_connected_pa_connection_lost), (DriverEvent.DISCOVER, self._handler_connected_protocol_event), (DriverEvent.GET, self._handler_connected_protocol_event), (DriverEvent.SET, self._handler_connected_protocol_event), (DriverEvent.EXECUTE, self._handler_connected_protocol_event), (DriverEvent.FORCE_STATE, self._handler_connected_protocol_event), (DriverEvent.START_DIRECT, self._handler_connected_start_direct_event), (DriverEvent.STOP_DIRECT, self._handler_connected_stop_direct_event), ], } for state in handlers: for event, handler in handlers[state]: self._connection_fsm.add_handler(state, event, handler) self._pre_da_config = {} self._port_agent_config = {} self._startup_config = {} # Idempotency flag for lost connections. # This set to false when a connection is established to # allow for lost callback to become activated. self._connection_lost = True # Autoconnect flag # Set this to false to disable autoconnect self._autoconnect = True self._reconnect_interval = STARTING_RECONNECT_INTERVAL self._max_reconnect_interval = MAXIMUM_RECONNECT_INTERVAL # Start state machine. self._connection_fsm.start(DriverConnectionState.UNCONFIGURED) # initialize the data buffer # queue any data received while transitioning # between INST_DISCONNECTED and CONNECTED self._data_buffer = deque(maxlen=MAX_DATA_BUFFER_LEN) ############################################################# # Device connection interface. ############################################################# def initialize(self, *args, **kwargs): """ Initialize driver connection, bringing communications parameters into unconfigured state (no connection object). @raises InstrumentStateException if command not allowed in current state """ # Forward event and argument to the connection FSM. return self._connection_fsm.on_event(DriverEvent.INITIALIZE, *args, **kwargs) def configure(self, *args, **kwargs): """ Configure the driver for communications with the device via port agent / logger (valid but unconnected connection object). @param arg[0] comms config dict. @raises InstrumentStateException if command not allowed in current state @throws InstrumentParameterException if missing comms or invalid config dict. """ # Forward event and argument to the connection FSM. return self._connection_fsm.on_event(DriverEvent.CONFIGURE, *args, **kwargs) def connect(self, *args, **kwargs): """ Establish communications with the device via port agent / logger (connected connection object). @raises InstrumentStateException if command not allowed in current state @throws InstrumentConnectionException if the connection failed. """ # Forward event and argument to the connection FSM. result = self._connection_fsm.on_event(DriverEvent.CONNECT, *args, **kwargs) def disconnect(self, *args, **kwargs): """ Disconnect from device via port agent / logger. @raises InstrumentStateException if command not allowed in current state """ # Disable autoconnect if manually disconnected self._autoconnect = False # Forward event and argument to the connection FSM. return self._connection_fsm.on_event(DriverEvent.DISCONNECT, *args, **kwargs) ############################################################# # Configuration logic ############################################################# def get_init_params(self): """ get the driver initialization parameters @return driver configuration dictionary """ return self._startup_config def set_init_params(self, config): """ Set the initialization parameters down in the protocol and store the driver configuration in the driver. If the protocol hasn't been setup yet cache the config. Next time this method is called, if you call it with an empty config it will read from the cache. @param config This default configuration assumes a structure driver configuration dict with keys named in DriverConfigKey. Stranger parameters can be adjusted by over riding this method. @raise InstrumentParameterException If the config cannot be applied """ if not isinstance(config, dict): raise InstrumentParameterException("Incompatible initialization parameters") if self._protocol: param_config = None if config: param_config = config elif self._startup_config: param_config = self._startup_config if param_config: self._protocol.set_init_params(param_config) self._protocol.initialize_scheduler() if config: self._startup_config = config def apply_startup_params(self): """ Apply the startup values previously stored in the protocol to the running config of the live instrument. The startup values are the values that are (1) marked as startup parameters and are (2) the "best" value to use at startup. Preference is given to the previously-set init value, then the default value, then the currently used value. This default implementation simply pushes the logic down into the protocol for processing should the action be better accomplished down there. The driver writer can decide to overload this method in the derived driver class and apply startup parameters in the driver (likely calling some get and set methods for the resource). If the driver does not implement an apply_startup_params() method in the driver, this method will call into the protocol. Deriving protocol classes are expected to implement an apply_startup_params() method lest they get the exception from the base InstrumentProtocol implementation. """ log.debug("Base driver applying startup params...") self._protocol.apply_startup_params() def get_cached_config(self): """ Return the configuration object that shows the instrument's configuration as cached in the protocol parameter dictionary. @retval The running configuration in the instruments config format. By default, it is a dictionary of parameter names and values. """ if self._protocol: return self._protocol.get_cached_config() def get_config_metadata(self): """ Return the configuration metadata object in JSON format @retval The description of the parameters, commands, and driver info in a JSON string @see https://confluence.oceanobservatories.org/display/syseng/ CIAD+MI+SV+Instrument+Driver-Agent+parameter+and+command+metadata+exchange """ log.debug("Getting metadata from driver...") protocol = self._protocol if protocol: log.debug("Getting metadata from protocol...") return self._protocol.get_config_metadata_dict() def restore_direct_access_params(self, config): """ Restore the correct values out of the full config that is given when returning from direct access. By default, this takes a simple dict of param name and value. Override this class as needed as it makes some simple assumptions about how your instrument sets things. @param config The configuration that was previously saved (presumably to disk somewhere by the driver that is working with this protocol) """ vals = {} # for each parameter that is read only, restore da_params = self._protocol.get_direct_access_params() for param in da_params: vals[param] = config[param] log.debug("Restore DA Parameters: %r", vals) self.set_resource(vals, True) ############################################################# # Command and control interface. ############################################################# def discover_state(self, *args, **kwargs): """ Determine initial state upon establishing communications. @param timeout=timeout Optional command timeout. @retval Current device state. @raises InstrumentTimeoutException if could not wake device. @raises InstrumentStateException if command not allowed in current state or if device state not recognized. @raises NotImplementedException if not implemented by subclass. """ # Forward event and argument to the protocol FSM. return self._connection_fsm.on_event( DriverEvent.DISCOVER, DriverEvent.DISCOVER, *args, **kwargs) def get_resource_capabilities(self, current_state=True, *args, **kwargs): """ Return driver commands and parameters. @param current_state True to retrieve commands available in current state, otherwise return all commands. @retval list of AgentCapability objects representing the drivers capabilities. @raises NotImplementedException if not implemented by subclass. """ if self._protocol: return self._protocol.get_resource_capabilities(current_state) else: return [[], []] def get_resource_state(self, *args, **kwargs): """ Return the current state of the driver. @retval str current driver state. @raises NotImplementedException if not implemented by subclass. """ connection_state = self._connection_fsm.get_current_state() if self._protocol: return self._protocol.get_current_state() else: return connection_state def get_resource(self, *args, **kwargs): """ Retrieve device parameters. @param args[0] DriverParameter.ALL or a list of parameters to retrieve. @retval parameter : value dict. @raises InstrumentParameterException if missing or invalid get parameters. @raises InstrumentStateException if command not allowed in current state @raises NotImplementedException if not implemented by subclass. """ # Forward event and argument to the protocol FSM. return self._connection_fsm.on_event(DriverEvent.GET, DriverEvent.GET, *args, **kwargs) def set_resource(self, *args, **kwargs): """ Set device parameters. @param args[0] parameter : value dict of parameters to set. @param timeout=timeout Optional command timeout. @raises InstrumentParameterException if missing or invalid set parameters. @raises InstrumentTimeoutException if could not wake device or no response. @raises InstrumentProtocolException if set command not recognized. @raises InstrumentStateException if command not allowed in current state. @raises NotImplementedException if not implemented by subclass. """ # Forward event and argument to the protocol FSM. return self._connection_fsm.on_event(DriverEvent.SET, DriverEvent.SET, *args, **kwargs) def execute_resource(self, resource_cmd, *args, **kwargs): """ Poll for a sample. @param timeout=timeout Optional command timeout. @ retval Device sample dict. @raises InstrumentTimeoutException if could not wake device or no response. @raises InstrumentProtocolException if acquire command not recognized. @raises InstrumentStateException if command not allowed in current state. @raises NotImplementedException if not implemented by subclass. """ # Forward event and argument to the protocol FSM. return self._connection_fsm.on_event(DriverEvent.EXECUTE, resource_cmd, *args, **kwargs) def start_direct(self, *args, **kwargs): """ start direct access mode @param timeout=timeout Optional command timeout. @ retval Device sample dict. @raises InstrumentTimeoutException if could not wake device or no response. @raises InstrumentProtocolException if acquire command not recognized. @raises InstrumentStateException if command not allowed in current state. @raises NotImplementedException if not implemented by subclass. """ # Need to pass the event as a parameter because the event handler to capture the current # pre-da config requires it. return self._connection_fsm.on_event(DriverEvent.START_DIRECT, DriverEvent.START_DIRECT) def execute_direct(self, *args, **kwargs): """ execute direct access command @param timeout=timeout Optional command timeout. @ retval Device sample dict. @raises InstrumentTimeoutException if could not wake device or no response. @raises InstrumentProtocolException if acquire command not recognized. @raises InstrumentStateException if command not allowed in current state. @raises NotImplementedException if not implemented by subclass. """ return self.execute_resource(DriverEvent.EXECUTE_DIRECT, *args, **kwargs) def stop_direct(self, *args, **kwargs): """ stop direct access mode @param timeout=timeout Optional command timeout. @ retval Device sample dict. @raises InstrumentTimeoutException if could not wake device or no response. @raises InstrumentProtocolException if acquire command not recognized. @raises InstrumentStateException if command not allowed in current state. @raises NotImplementedException if not implemented by subclass. """ return self._connection_fsm.on_event(DriverEvent.STOP_DIRECT, DriverEvent.STOP_DIRECT) def test_force_state(self, *args, **kwargs): """ Force driver into a given state for the purposes of unit testing @param state=desired_state Required desired state to change to. @raises InstrumentParameterException if no state parameter. @raises TestModeException if not in test mode """ if not self._test_mode: raise TestModeException() # Get the required param state = kwargs.get('state', None) # via kwargs if state is None: raise InstrumentParameterException('Missing state parameter.') # We are mucking with internal FSM parameters which may be bad. # The alternative was to raise an event to change the state. Don't # know which is better. self._protocol._protocol_fsm.current_state = state ######################################################################## # Unconfigured handlers. ######################################################################## def _handler_unconfigured_enter(self, *args, **kwargs): """ Enter unconfigured state. """ # Send state change event to agent. self._driver_event(DriverAsyncEvent.STATE_CHANGE) # attempt to auto-configure from consul self._auto_config_with_backoff() def _handler_unconfigured_exit(self, *args, **kwargs): """ Exit unconfigured state. """ pass def _handler_unconfigured_initialize(self, *args, **kwargs): """ Initialize handler. We are already in unconfigured state, do nothing. @retval (next_state, result) tuple, (None, None). """ return None, None def _handler_unconfigured_configure(self, *args, **kwargs): """ Configure driver for device comms. @param args[0] Communications config dictionary. @retval (next_state, result) tuple, (DriverConnectionState.DISCONNECTED, None) if successful, (None, None) otherwise. @raises InstrumentParameterException if missing or invalid param dict. """ # Verify configuration dict, and update connection if possible. try: self._connection = self._build_connection(*args, **kwargs) except AutoDiscoverFailure: self._auto_config_with_backoff() return None, None except InstrumentException: self._auto_config_with_backoff() raise return DriverConnectionState.DISCONNECTED, None ######################################################################## # Disconnected handlers. ######################################################################## def _handler_disconnected_enter(self, *args, **kwargs): """ Enter disconnected state. """ # Send state change event to agent. self._driver_event(DriverAsyncEvent.STATE_CHANGE) if self._autoconnect: self._async_raise_event(DriverEvent.CONNECT, *args, **kwargs) def _handler_disconnected_exit(self, *args, **kwargs): """ Exit disconnected state. """ pass def _handler_disconnected_initialize(self, *args, **kwargs): """ Initialize device communications. Causes the connection parameters to be reset. @retval (next_state, result) tuple, (DriverConnectionState.UNCONFIGURED, None). """ self._connection = None return DriverConnectionState.UNCONFIGURED, None def _handler_disconnected_configure(self, *args, **kwargs): """ Configure driver for device comms. @param args[0] Communications config dictionary. @retval (next_state, result) tuple, (None, None). @raises InstrumentParameterException if missing or invalid param dict. """ # Verify configuration dict, and update connection if possible. try: self._connection = self._build_connection(*args, **kwargs) except AutoDiscoverFailure: return DriverConnectionState.UNCONFIGURED, None return None, None def _handler_disconnected_connect(self, *args, **kwargs): """ Establish communications with the device via port agent / logger and construct and initialize a protocol FSM for device interaction. @retval (next_state, result) tuple, (DriverConnectionState.CONNECTED, None) if successful. """ result = None try: self._connection.init_comms() next_state = DriverConnectionState.INST_DISCONNECTED except InstrumentConnectionException as e: log.error("Connection Exception: %s", e) log.error("Instrument Driver returning to unconfigured state.") next_state = DriverConnectionState.UNCONFIGURED init_config = {} if len(args) > 0 and isinstance(args[0], dict): init_config = args[0] self.set_init_params(init_config) return next_state, result ######################################################################## # PA Connected handlers. ######################################################################## def _handler_inst_disconnected_enter(self, *args, **kwargs): """ Enter connected state. """ self._connection_lost = False # reset the reconnection interval to 1 self._reconnect_interval = STARTING_RECONNECT_INTERVAL # Send state change event to agent. self._driver_event(DriverAsyncEvent.STATE_CHANGE) def _handler_inst_disconnected_exit(self, *args, **kwargs): """ Exit connected state. """ pass def _handler_inst_disconnected_connect(self, *args, **kwargs): self._build_protocol() self.set_init_params({}) self._protocol._connection = self._connection return DriverConnectionState.CONNECTED, None def _handler_inst_disconnected_pa_connection_lost(self, *args, **kwargs): return None, None ######################################################################## # Connected handlers. ######################################################################## def _handler_connected_enter(self, *args, **kwargs): """ Enter connected state. """ # Send state change event to agent. self._driver_event(DriverAsyncEvent.STATE_CHANGE) # If we have any buffered data, feed to protocol if self._data_buffer: for each in self._data_buffer: try: self._protocol.got_data(each) except Exception as e: self._driver_event(DriverAsyncEvent.ERROR, e) self._data_buffer.clear() def _handler_connected_exit(self, *args, **kwargs): """ Exit connected state. """ pass def _handler_connected_disconnect(self, *args, **kwargs): """ Disconnect to the device via port agent / logger and destroy the protocol FSM. @retval (next_state, result) tuple, (DriverConnectionState.DISCONNECTED, None) if successful. """ log.info("_handler_connected_disconnect: invoking stop_comms().") self._connection.stop_comms() self._destroy_protocol() return DriverConnectionState.UNCONFIGURED, None def _handler_connected_connection_lost(self, *args, **kwargs): """ The device connection was lost. Stop comms, destroy protocol FSM and revert to disconnected state. @retval (next_state, result) tuple, (DriverConnectionState.DISCONNECTED, None). """ log.info("_handler_connected_connection_lost: invoking stop_comms().") self._connection.stop_comms() self._destroy_protocol() # Send async agent state change event. log.info("_handler_connected_connection_lost: sending LOST_CONNECTION " "event, moving to UNCONFIGURED state.") self._driver_event(DriverAsyncEvent.AGENT_EVENT, ResourceAgentEvent.LOST_CONNECTION) return DriverConnectionState.UNCONFIGURED, None def _handler_connected_pa_connection_lost(self, *args, **kwargs): self._destroy_protocol() self._data_buffer.clear() return DriverConnectionState.INST_DISCONNECTED, None def _handler_connected_protocol_event(self, event, *args, **kwargs): """ Forward a driver command event to the protocol FSM. @param args positional arguments to pass on. @param kwargs keyword arguments to pass on. @retval (next_state, result) tuple, (None, protocol result). """ next_state = None if event == DriverEvent.START_DIRECT: return self._handler_connected_start_direct_event(event, *args, **kwargs) elif event == DriverEvent.STOP_DIRECT: return self._handler_connected_stop_direct_event(event, *args, **kwargs) result = self._protocol._protocol_fsm.on_event(event, *args, **kwargs) return next_state, result def _handler_connected_start_direct_event(self, event, *args, **kwargs): """ Stash the current config first, then forward a driver command event to the protocol FSM. @param args positional arguments to pass on. @param kwargs keyword arguments to pass on. @retval (next_state, result) tuple, (None, protocol result). """ next_state = None # Get the value for all direct access parameters and store them in the protocol self._pre_da_config = self.get_resource(self._protocol.get_direct_access_params()) self._protocol.store_direct_access_config(self._pre_da_config) self._protocol.enable_da_initialization() log.debug("starting DA. Storing DA parameters for restore: %s", self._pre_da_config) result = self._protocol._protocol_fsm.on_event(event, *args, **kwargs) return next_state, result def _handler_connected_stop_direct_event(self, event, *args, **kwargs): """ Restore previous config first, then forward a driver command event to the protocol FSM. @param args positional arguments to pass on. @param kwargs keyword arguments to pass on. @retval (next_state, result) tuple, (None, protocol result). """ next_state = None result = self._protocol._protocol_fsm.on_event(event, *args, **kwargs) # Moving the responsibility for applying DA parameters to the # protocol. # self.restore_direct_access_params(self._pre_da_config) return next_state, result ######################################################################## # Helpers. ######################################################################## def _build_connection(self, *args, **kwargs): """ Constructs and returns a Connection object according to the given configuration. The connection object is a LoggerClient instance in this base class. Subclasses can overwrite this operation as needed. The value returned by this operation is assigned to self._connection and also to self._protocol._connection upon entering in the DriverConnectionState.CONNECTED state. @param config configuration dict @retval a Connection instance, which will be assigned to self._connection @throws InstrumentParameterException Invalid configuration. """ # Get required config param dict. config = kwargs.get('config', None) # via kwargs if config is None and len(args) > 0: config = args[0] # via first argument if config is None: config = self._get_port_agent_config(self.refdes) if config is None: raise AutoDiscoverFailure() if 'mock_port_agent' in config: mock_port_agent = config['mock_port_agent'] # check for validity here... if mock_port_agent is not None: return mock_port_agent try: addr = config['addr'] port = config['port'] cmd_port = config.get('cmd_port') if isinstance(addr, basestring) and isinstance(port, int) and len(addr) > 0: return PortAgentClient(addr, port, cmd_port, self._got_data, self._lost_connection_callback) else: raise InstrumentParameterException('Invalid comms config dict.') except (TypeError, KeyError): raise InstrumentParameterException('Invalid comms config dict.') def _get_port_agent_config(self, tag): """ Query consul for the port agent service configuration parameters: data port, command port, and address This will retry a specified number of times with exponential backoff. """ return ConsulServiceRegistry.locate_port_agent(tag) def _got_exception(self, exception): """ Callback for the client for exception handling with async data. Exceptions are wrapped in an event and sent up to the agent. """ try: log.error("ASYNC Data Exception Detected: %s (%s)", exception.__class__.__name__, str(exception)) finally: self._driver_event(DriverAsyncEvent.ERROR, exception) def _got_data(self, port_agent_packet): if isinstance(port_agent_packet, Exception): return self._got_exception(port_agent_packet) if isinstance(port_agent_packet, PortAgentPacket): packet_type = port_agent_packet.get_header_type() data = port_agent_packet.get_data() if packet_type == PortAgentPacket.PORT_AGENT_CONFIG: try: paconfig = json.loads(data) self._port_agent_config = paconfig self._driver_event(DriverAsyncEvent.DRIVER_CONFIG, paconfig) except ValueError as e: log.exception('Unable to parse port agent config: %r %r', data, e) elif packet_type == PortAgentPacket.PORT_AGENT_STATUS: log.debug('Received PORT AGENT STATUS: %r', data) current_state = self._connection_fsm.get_current_state() if data == 'DISCONNECTED': if current_state == DriverConnectionState.CONNECTED: self._async_raise_event(DriverEvent.PA_CONNECTION_LOST) elif data == 'CONNECTED': if current_state == DriverConnectionState.INST_DISCONNECTED: self._async_raise_event(DriverEvent.CONNECT) else: if self._protocol: self._protocol.got_data(port_agent_packet) else: # queue this data up for once the protocol has been started self._data_buffer.append(port_agent_packet) def _lost_connection_callback(self): """ A callback invoked by the port agent client when it loses connectivity to the port agent. """ if not self._connection_lost: log.info("_lost_connection_callback: starting thread to send " "CONNECTION_LOST event to instrument driver.") self._connection_lost = True self._async_raise_event(DriverEvent.CONNECTION_LOST) else: log.info("_lost_connection_callback: connection_lost flag true.") def _build_protocol(self): """ Construct device specific single connection protocol FSM. Overridden in device specific subclasses. """ pass def _auto_config_with_backoff(self): # randomness to prevent all instrument drivers from trying to reconnect at the same exact time. self._reconnect_interval = self._reconnect_interval * 2 + random.uniform(-.5, .5) self._reconnect_interval = min(self._reconnect_interval, self._max_reconnect_interval) self._async_raise_event(DriverEvent.CONFIGURE, event_delay=self._reconnect_interval, check_state=True) log.info('Created delayed CONFIGURE event with %.2f second delay', self._reconnect_interval) def _async_raise_event(self, event, *args, **kwargs): delay = kwargs.pop('event_delay', 0) check_state = kwargs.pop('check_state', False) def event_in_state(): fsm_state = self._connection_fsm.current_state return (fsm_state, event) in self._connection_fsm.state_handlers def inner(): try: time.sleep(delay) if not check_state or event_in_state(): log.info('Async raise event: %r', event) self._connection_fsm.on_event(event) except Exception as exc: log.exception('Exception in asynchronous thread: %r', exc) self._driver_event(DriverAsyncEvent.ERROR, exc) log.info('_async_raise_fsm_event: event complete. bub bye thread. (%r)', args) thread = Thread(target=inner) thread.daemon = True thread.start() def _destroy_protocol(self): if self._protocol: self._protocol.shutdown() self._protocol = None
bsd-2-clause
bnprk/django-oscar
src/oscar/apps/basket/abstract_models.py
22
28676
from decimal import Decimal as D import zlib from django.db import models from django.db.models import Sum from django.conf import settings from django.utils.encoding import python_2_unicode_compatible, smart_text from django.utils.timezone import now from django.utils.translation import ugettext_lazy as _ from django.core.exceptions import ObjectDoesNotExist, PermissionDenied from oscar.apps.basket.managers import OpenBasketManager, SavedBasketManager from oscar.apps.offer import results from oscar.core.utils import get_default_currency from oscar.core.compat import AUTH_USER_MODEL from oscar.templatetags.currency_filters import currency @python_2_unicode_compatible class AbstractBasket(models.Model): """ Basket object """ # Baskets can be anonymously owned - hence this field is nullable. When a # anon user signs in, their two baskets are merged. owner = models.ForeignKey( AUTH_USER_MODEL, related_name='baskets', null=True, verbose_name=_("Owner")) # Basket statuses # - Frozen is for when a basket is in the process of being submitted # and we need to prevent any changes to it. OPEN, MERGED, SAVED, FROZEN, SUBMITTED = ( "Open", "Merged", "Saved", "Frozen", "Submitted") STATUS_CHOICES = ( (OPEN, _("Open - currently active")), (MERGED, _("Merged - superceded by another basket")), (SAVED, _("Saved - for items to be purchased later")), (FROZEN, _("Frozen - the basket cannot be modified")), (SUBMITTED, _("Submitted - has been ordered at the checkout")), ) status = models.CharField( _("Status"), max_length=128, default=OPEN, choices=STATUS_CHOICES) # A basket can have many vouchers attached to it. However, it is common # for sites to only allow one voucher per basket - this will need to be # enforced in the project's codebase. vouchers = models.ManyToManyField( 'voucher.Voucher', verbose_name=_("Vouchers"), blank=True) date_created = models.DateTimeField(_("Date created"), auto_now_add=True) date_merged = models.DateTimeField(_("Date merged"), null=True, blank=True) date_submitted = models.DateTimeField(_("Date submitted"), null=True, blank=True) # Only if a basket is in one of these statuses can it be edited editable_statuses = (OPEN, SAVED) class Meta: abstract = True app_label = 'basket' verbose_name = _('Basket') verbose_name_plural = _('Baskets') objects = models.Manager() open = OpenBasketManager() saved = SavedBasketManager() def __init__(self, *args, **kwargs): super(AbstractBasket, self).__init__(*args, **kwargs) # We keep a cached copy of the basket lines as we refer to them often # within the same request cycle. Also, applying offers will append # discount data to the basket lines which isn't persisted to the DB and # so we want to avoid reloading them as this would drop the discount # information. self._lines = None self.offer_applications = results.OfferApplications() def __str__(self): return _( u"%(status)s basket (owner: %(owner)s, lines: %(num_lines)d)") \ % {'status': self.status, 'owner': self.owner, 'num_lines': self.num_lines} # ======== # Strategy # ======== @property def has_strategy(self): return hasattr(self, '_strategy') def _get_strategy(self): if not self.has_strategy: raise RuntimeError( "No strategy class has been assigned to this basket. " "This is normally assigned to the incoming request in " "oscar.apps.basket.middleware.BasketMiddleware. " "Since it is missing, you must be doing something different. " "Ensure that a strategy instance is assigned to the basket!" ) return self._strategy def _set_strategy(self, strategy): self._strategy = strategy strategy = property(_get_strategy, _set_strategy) def all_lines(self): """ Return a cached set of basket lines. This is important for offers as they alter the line models and you don't want to reload them from the DB as that information would be lost. """ if self.id is None: return self.lines.none() if self._lines is None: self._lines = ( self.lines .select_related('product', 'stockrecord') .prefetch_related( 'attributes', 'product__images')) return self._lines def is_quantity_allowed(self, qty): """ Test whether the passed quantity of items can be added to the basket """ # We enfore a max threshold to prevent a DOS attack via the offers # system. basket_threshold = settings.OSCAR_MAX_BASKET_QUANTITY_THRESHOLD if basket_threshold: total_basket_quantity = self.num_items max_allowed = basket_threshold - total_basket_quantity if qty > max_allowed: return False, _( "Due to technical limitations we are not able " "to ship more than %(threshold)d items in one order.") \ % {'threshold': basket_threshold} return True, None # ============ # Manipulation # ============ def flush(self): """ Remove all lines from basket. """ if self.status == self.FROZEN: raise PermissionDenied("A frozen basket cannot be flushed") self.lines.all().delete() self._lines = None def add_product(self, product, quantity=1, options=None): """ Add a product to the basket 'stock_info' is the price and availability data returned from a partner strategy class. The 'options' list should contains dicts with keys 'option' and 'value' which link the relevant product.Option model and string value respectively. Returns (line, created). line: the matching basket line created: whether the line was created or updated """ if options is None: options = [] if not self.id: self.save() # Ensure that all lines are the same currency price_currency = self.currency stock_info = self.strategy.fetch_for_product(product) if price_currency and stock_info.price.currency != price_currency: raise ValueError(( "Basket lines must all have the same currency. Proposed " "line has currency %s, while basket has currency %s") % (stock_info.price.currency, price_currency)) if stock_info.stockrecord is None: raise ValueError(( "Basket lines must all have stock records. Strategy hasn't " "found any stock record for product %s") % product) # Line reference is used to distinguish between variations of the same # product (eg T-shirts with different personalisations) line_ref = self._create_line_reference( product, stock_info.stockrecord, options) # Determine price to store (if one exists). It is only stored for # audit and sometimes caching. defaults = { 'quantity': quantity, 'price_excl_tax': stock_info.price.excl_tax, 'price_currency': stock_info.price.currency, } if stock_info.price.is_tax_known: defaults['price_incl_tax'] = stock_info.price.incl_tax line, created = self.lines.get_or_create( line_reference=line_ref, product=product, stockrecord=stock_info.stockrecord, defaults=defaults) if created: for option_dict in options: line.attributes.create(option=option_dict['option'], value=option_dict['value']) else: line.quantity += quantity line.save() self.reset_offer_applications() # Returning the line is useful when overriding this method. return line, created add_product.alters_data = True add = add_product def applied_offers(self): """ Return a dict of offers successfully applied to the basket. This is used to compare offers before and after a basket change to see if there is a difference. """ return self.offer_applications.offers def reset_offer_applications(self): """ Remove any discounts so they get recalculated """ self.offer_applications = results.OfferApplications() self._lines = None def merge_line(self, line, add_quantities=True): """ For transferring a line from another basket to this one. This is used with the "Saved" basket functionality. """ try: existing_line = self.lines.get(line_reference=line.line_reference) except ObjectDoesNotExist: # Line does not already exist - reassign its basket line.basket = self line.save() else: # Line already exists - assume the max quantity is correct and # delete the old if add_quantities: existing_line.quantity += line.quantity else: existing_line.quantity = max(existing_line.quantity, line.quantity) existing_line.save() line.delete() finally: self._lines = None merge_line.alters_data = True def merge(self, basket, add_quantities=True): """ Merges another basket with this one. :basket: The basket to merge into this one. :add_quantities: Whether to add line quantities when they are merged. """ # Use basket.lines.all instead of all_lines as this function is called # before a strategy has been assigned. for line_to_merge in basket.lines.all(): self.merge_line(line_to_merge, add_quantities) basket.status = self.MERGED basket.date_merged = now() basket._lines = None basket.save() # Ensure all vouchers are moved to the new basket for voucher in basket.vouchers.all(): basket.vouchers.remove(voucher) self.vouchers.add(voucher) merge.alters_data = True def freeze(self): """ Freezes the basket so it cannot be modified. """ self.status = self.FROZEN self.save() freeze.alters_data = True def thaw(self): """ Unfreezes a basket so it can be modified again """ self.status = self.OPEN self.save() thaw.alters_data = True def submit(self): """ Mark this basket as submitted """ self.status = self.SUBMITTED self.date_submitted = now() self.save() submit.alters_data = True # Kept for backwards compatibility set_as_submitted = submit def is_shipping_required(self): """ Test whether the basket contains physical products that require shipping. """ for line in self.all_lines(): if line.product.is_shipping_required: return True return False # ======= # Helpers # ======= def _create_line_reference(self, product, stockrecord, options): """ Returns a reference string for a line based on the item and its options. """ base = '%s_%s' % (product.id, stockrecord.id) if not options: return base return "%s_%s" % (base, zlib.crc32(repr(options).encode('utf8'))) def _get_total(self, property): """ For executing a named method on each line of the basket and returning the total. """ total = D('0.00') for line in self.all_lines(): try: total += getattr(line, property) except ObjectDoesNotExist: # Handle situation where the product may have been deleted pass return total # ========== # Properties # ========== @property def is_empty(self): """ Test if this basket is empty """ return self.id is None or self.num_lines == 0 @property def is_tax_known(self): """ Test if tax values are known for this basket """ return all([line.is_tax_known for line in self.all_lines()]) @property def total_excl_tax(self): """ Return total line price excluding tax """ return self._get_total('line_price_excl_tax_incl_discounts') @property def total_tax(self): """Return total tax for a line""" return self._get_total('line_tax') @property def total_incl_tax(self): """ Return total price inclusive of tax and discounts """ return self._get_total('line_price_incl_tax_incl_discounts') @property def total_incl_tax_excl_discounts(self): """ Return total price inclusive of tax but exclusive discounts """ return self._get_total('line_price_incl_tax') @property def total_discount(self): return self._get_total('discount_value') @property def offer_discounts(self): """ Return basket discounts from non-voucher sources. Does not include shipping discounts. """ return self.offer_applications.offer_discounts @property def voucher_discounts(self): """ Return discounts from vouchers """ return self.offer_applications.voucher_discounts @property def has_shipping_discounts(self): return len(self.shipping_discounts) > 0 @property def shipping_discounts(self): """ Return discounts from vouchers """ return self.offer_applications.shipping_discounts @property def post_order_actions(self): """ Return discounts from vouchers """ return self.offer_applications.post_order_actions @property def grouped_voucher_discounts(self): """ Return discounts from vouchers but grouped so that a voucher which links to multiple offers is aggregated into one object. """ return self.offer_applications.grouped_voucher_discounts @property def total_excl_tax_excl_discounts(self): """ Return total price excluding tax and discounts """ return self._get_total('line_price_excl_tax') @property def num_lines(self): """Return number of lines""" return self.all_lines().count() @property def num_items(self): """Return number of items""" return sum(line.quantity for line in self.lines.all()) @property def num_items_without_discount(self): num = 0 for line in self.all_lines(): num += line.quantity_without_discount return num @property def num_items_with_discount(self): num = 0 for line in self.all_lines(): num += line.quantity_with_discount return num @property def time_before_submit(self): if not self.date_submitted: return None return self.date_submitted - self.date_created @property def time_since_creation(self, test_datetime=None): if not test_datetime: test_datetime = now() return test_datetime - self.date_created @property def contains_a_voucher(self): if not self.id: return False return self.vouchers.exists() @property def is_submitted(self): return self.status == self.SUBMITTED @property def can_be_edited(self): """ Test if a basket can be edited """ return self.status in self.editable_statuses @property def currency(self): # Since all lines should have the same currency, return the currency of # the first one found. for line in self.all_lines(): return line.price_currency # ============= # Query methods # ============= def contains_voucher(self, code): """ Test whether the basket contains a voucher with a given code """ if self.id is None: return False try: self.vouchers.get(code=code) except ObjectDoesNotExist: return False else: return True def product_quantity(self, product): """ Return the quantity of a product in the basket The basket can contain multiple lines with the same product, but different options and stockrecords. Those quantities are summed up. """ matching_lines = self.lines.filter(product=product) quantity = matching_lines.aggregate(Sum('quantity'))['quantity__sum'] return quantity or 0 def line_quantity(self, product, stockrecord, options=None): """ Return the current quantity of a specific product and options """ ref = self._create_line_reference(product, stockrecord, options) try: return self.lines.get(line_reference=ref).quantity except ObjectDoesNotExist: return 0 @python_2_unicode_compatible class AbstractLine(models.Model): """ A line of a basket (product and a quantity) """ basket = models.ForeignKey('basket.Basket', related_name='lines', verbose_name=_("Basket")) # This is to determine which products belong to the same line # We can't just use product.id as you can have customised products # which should be treated as separate lines. Set as a # SlugField as it is included in the path for certain views. line_reference = models.SlugField( _("Line Reference"), max_length=128, db_index=True) product = models.ForeignKey( 'catalogue.Product', related_name='basket_lines', verbose_name=_("Product")) # We store the stockrecord that should be used to fulfil this line. stockrecord = models.ForeignKey( 'partner.StockRecord', related_name='basket_lines') quantity = models.PositiveIntegerField(_('Quantity'), default=1) # We store the unit price incl tax of the product when it is first added to # the basket. This allows us to tell if a product has changed price since # a person first added it to their basket. price_currency = models.CharField( _("Currency"), max_length=12, default=get_default_currency) price_excl_tax = models.DecimalField( _('Price excl. Tax'), decimal_places=2, max_digits=12, null=True) price_incl_tax = models.DecimalField( _('Price incl. Tax'), decimal_places=2, max_digits=12, null=True) # Track date of first addition date_created = models.DateTimeField(_("Date Created"), auto_now_add=True) def __init__(self, *args, **kwargs): super(AbstractLine, self).__init__(*args, **kwargs) # Instance variables used to persist discount information self._discount_excl_tax = D('0.00') self._discount_incl_tax = D('0.00') self._affected_quantity = 0 class Meta: abstract = True app_label = 'basket' unique_together = ("basket", "line_reference") verbose_name = _('Basket line') verbose_name_plural = _('Basket lines') def __str__(self): return _( u"Basket #%(basket_id)d, Product #%(product_id)d, quantity" u" %(quantity)d") % {'basket_id': self.basket.pk, 'product_id': self.product.pk, 'quantity': self.quantity} def save(self, *args, **kwargs): if not self.basket.can_be_edited: raise PermissionDenied( _("You cannot modify a %s basket") % ( self.basket.status.lower(),)) return super(AbstractLine, self).save(*args, **kwargs) # ============= # Offer methods # ============= def clear_discount(self): """ Remove any discounts from this line. """ self._discount_excl_tax = D('0.00') self._discount_incl_tax = D('0.00') self._affected_quantity = 0 def discount(self, discount_value, affected_quantity, incl_tax=True): """ Apply a discount to this line """ if incl_tax: if self._discount_excl_tax > 0: raise RuntimeError( "Attempting to discount the tax-inclusive price of a line " "when tax-exclusive discounts are already applied") self._discount_incl_tax += discount_value else: if self._discount_incl_tax > 0: raise RuntimeError( "Attempting to discount the tax-exclusive price of a line " "when tax-inclusive discounts are already applied") self._discount_excl_tax += discount_value self._affected_quantity += int(affected_quantity) def consume(self, quantity): """ Mark all or part of the line as 'consumed' Consumed items are no longer available to be used in offers. """ if quantity > self.quantity - self._affected_quantity: inc = self.quantity - self._affected_quantity else: inc = quantity self._affected_quantity += int(inc) def get_price_breakdown(self): """ Return a breakdown of line prices after discounts have been applied. Returns a list of (unit_price_incl_tax, unit_price_excl_tax, quantity) tuples. """ if not self.is_tax_known: raise RuntimeError("A price breakdown can only be determined " "when taxes are known") prices = [] if not self.discount_value: prices.append((self.unit_price_incl_tax, self.unit_price_excl_tax, self.quantity)) else: # Need to split the discount among the affected quantity # of products. item_incl_tax_discount = ( self.discount_value / int(self._affected_quantity)) item_excl_tax_discount = item_incl_tax_discount * self._tax_ratio item_excl_tax_discount = item_excl_tax_discount.quantize(D('0.01')) prices.append((self.unit_price_incl_tax - item_incl_tax_discount, self.unit_price_excl_tax - item_excl_tax_discount, self._affected_quantity)) if self.quantity_without_discount: prices.append((self.unit_price_incl_tax, self.unit_price_excl_tax, self.quantity_without_discount)) return prices # ======= # Helpers # ======= @property def _tax_ratio(self): if not self.unit_price_incl_tax: return 0 return self.unit_price_excl_tax / self.unit_price_incl_tax # ========== # Properties # ========== @property def has_discount(self): return self.quantity > self.quantity_without_discount @property def quantity_with_discount(self): return self._affected_quantity @property def quantity_without_discount(self): return int(self.quantity - self._affected_quantity) @property def is_available_for_discount(self): return self.quantity_without_discount > 0 @property def discount_value(self): # Only one of the incl- and excl- discounts should be non-zero return max(self._discount_incl_tax, self._discount_excl_tax) @property def purchase_info(self): """ Return the stock/price info """ if not hasattr(self, '_info'): # Cache the PurchaseInfo instance. self._info = self.basket.strategy.fetch_for_line( self, self.stockrecord) return self._info @property def is_tax_known(self): return self.purchase_info.price.is_tax_known @property def unit_effective_price(self): """ The price to use for offer calculations """ return self.purchase_info.price.effective_price @property def unit_price_excl_tax(self): return self.purchase_info.price.excl_tax @property def unit_price_incl_tax(self): return self.purchase_info.price.incl_tax @property def unit_tax(self): return self.purchase_info.price.tax @property def line_price_excl_tax(self): return self.quantity * self.unit_price_excl_tax @property def line_price_excl_tax_incl_discounts(self): if self._discount_excl_tax: return self.line_price_excl_tax - self._discount_excl_tax if self._discount_incl_tax: # This is a tricky situation. We know the discount as calculated # against tax inclusive prices but we need to guess how much of the # discount applies to tax-exclusive prices. We do this by # assuming a linear tax and scaling down the original discount. return self.line_price_excl_tax \ - self._tax_ratio * self._discount_incl_tax return self.line_price_excl_tax @property def line_price_incl_tax_incl_discounts(self): # We use whichever discount value is set. If the discount value was # calculated against the tax-exclusive prices, then the line price # including tax return self.line_price_incl_tax - self.discount_value @property def line_tax(self): return self.quantity * self.unit_tax @property def line_price_incl_tax(self): return self.quantity * self.unit_price_incl_tax @property def description(self): d = smart_text(self.product) ops = [] for attribute in self.attributes.all(): ops.append("%s = '%s'" % (attribute.option.name, attribute.value)) if ops: d = "%s (%s)" % (d, ", ".join(ops)) return d def get_warning(self): """ Return a warning message about this basket line if one is applicable This could be things like the price has changed """ if not self.stockrecord: msg = u"'%(product)s' is no longer available" return _(msg) % {'product': self.product.get_title()} if not self.price_incl_tax: return if not self.purchase_info.price.is_tax_known: return # Compare current price to price when added to basket current_price_incl_tax = self.purchase_info.price.incl_tax if current_price_incl_tax != self.price_incl_tax: product_prices = { 'product': self.product.get_title(), 'old_price': currency(self.price_incl_tax), 'new_price': currency(current_price_incl_tax) } if current_price_incl_tax > self.price_incl_tax: warning = _("The price of '%(product)s' has increased from" " %(old_price)s to %(new_price)s since you added" " it to your basket") return warning % product_prices else: warning = _("The price of '%(product)s' has decreased from" " %(old_price)s to %(new_price)s since you added" " it to your basket") return warning % product_prices class AbstractLineAttribute(models.Model): """ An attribute of a basket line """ line = models.ForeignKey('basket.Line', related_name='attributes', verbose_name=_("Line")) option = models.ForeignKey('catalogue.Option', verbose_name=_("Option")) value = models.CharField(_("Value"), max_length=255) class Meta: abstract = True app_label = 'basket' verbose_name = _('Line attribute') verbose_name_plural = _('Line attributes')
bsd-3-clause
pavelchristof/gomoku-ai
tensorflow/contrib/distributions/python/kernel_tests/bijectors/sigmoid_centered_test.py
76
2003
# Copyright 2016 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for Bijector.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import numpy as np from tensorflow.contrib.distributions.python.ops.bijectors.sigmoid_centered import SigmoidCentered from tensorflow.python.platform import test class SigmoidCenteredBijectorTest(test.TestCase): """Tests correctness of the Y = g(X) = (1 + exp(-X))^-1 transformation.""" def testBijector(self): with self.test_session(): sigmoid = SigmoidCentered() self.assertEqual("sigmoid_centered", sigmoid.name) x = np.log([[2., 3, 4], [4., 8, 12]]) y = [[[2. / 3, 1. / 3], [3. / 4, 1. / 4], [4. / 5, 1. / 5]], [[4. / 5, 1. / 5], [8. / 9, 1. / 9], [12. / 13, 1. / 13]]] self.assertAllClose(y, sigmoid.forward(x).eval()) self.assertAllClose(x, sigmoid.inverse(y).eval()) self.assertAllClose( -np.sum(np.log(y), axis=2), sigmoid.inverse_log_det_jacobian(y).eval(), atol=0., rtol=1e-7) self.assertAllClose( -sigmoid.inverse_log_det_jacobian(y).eval(), sigmoid.forward_log_det_jacobian(x).eval(), atol=0., rtol=1e-7) if __name__ == "__main__": test.main()
apache-2.0
ocefpaf/paegan
tests/cdm/dsg/collections/base/test_profile_collection.py
2
2207
# -*- coding: utf-8 -*- import random import unittest from datetime import datetime, timedelta from shapely.geometry import Point as sPoint from paegan.cdm.dsg.member import Member from paegan.cdm.dsg.features.base.point import Point from paegan.cdm.dsg.features.base.profile import Profile from paegan.cdm.dsg.collections.base.profile_collection import ProfileCollection class ProfileCollectionTest(unittest.TestCase): def test_profile_collection(self): day = 1 pc = ProfileCollection() dt = None # 10 profiles for x in range(0,10): lat = random.randint(40,44) lon = random.randint(-74,-70) loc = sPoint(lon,lat,0) hour = 0 minute = 0 dt = datetime(2012, 4, day, hour, minute) prof = Profile() prof.location = loc prof.time = dt # Each with 20 depths for y in range(0,20): p = Point() p.time = dt p.location = sPoint(loc.x, loc.y, y) m1 = Member(value=random.uniform(30,40), unit='°C', name='Water Temperature', description='water temperature', standard='sea_water_temperature') m2 = Member(value=random.uniform(80,100), unit='PSU', name='Salinity', description='salinity', standard='salinity') p.add_member(m1) p.add_member(m2) prof.add_element(p) # Next depth is 2 minutes from now dt = dt + timedelta(minutes=2) pc.add_element(prof) pc.calculate_bounds() assert pc.size == 10 assert pc.point_size == 200 assert len(pc.time_range) == 200 assert pc.time_range[0] == datetime(2012, 4, 1, 0, 0) assert pc.time_range[-1] == dt - timedelta(minutes=2) assert len(pc.depth_range) == 200 assert pc.depth_range[0] == 0 assert pc.depth_range[-1] == 19 for profile in pc: assert profile.type == "Profile" for point in profile: assert point.type == "Point" for point in pc.flatten(): assert point.type == "Point"
gpl-3.0
mrkarthik07/libforensics
unittests/tests/dtypes/dal.py
13
6774
# Copyright 2010 Michael Murr # # This file is part of LibForensics. # # LibForensics is free software: you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # LibForensics is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public License # along with LibForensics. If not, see <http://www.gnu.org/licenses/>. """Unit tests for the lf.dtypes.dal module.""" # stdlib imports from unittest import TestCase # local imports from lf.dec import ByteIStream from lf.dtypes.composite import LERecord from lf.dtypes.native import int8, uint8 from lf.dtypes.dal import structuple, Structuple, CtypesWrapper __docformat__ = "restructuredtext en" __all__ = [ "structupleTestCase", "StructupleTestCase", "CtypesWrapperTestCase" ] class structupleTestCase(TestCase): def test_structuple(self): ae = self.assertEqual af = self.assertFalse at = self.assertTrue fields = ("field1", "field2") dup_fields = ["field1", "field2", "field1"] dup_fields_renamed = ("field1", "field2", "field1__2") aliases = {"field3": "field1", "field4": "field2"} st_init = [0x64, 0x53] st3_init = [0x64, 0x53, 0xCC] field_vals = { "field1": 0x64, "field3": 0x64, "field2": 0x53, "field4": 0x53, "field1__2": 0xCC } st1 = structuple("st1_name", fields, aliases, True, False) st2 = structuple("st2_name", fields, aliases, False, False) st3 = structuple("st3_name", dup_fields, aliases, True, True) # Check name and rename ae(st1.__name__, "st1_name") ae(st2.__name__, "st2_name") ae(st3.__name__, "st3_name") # Check fields ae(st1._fields_, fields) ae(st2._fields_, fields) ae(st3._fields_, dup_fields_renamed) st1_vals = st1(st_init) st2_vals = st2(st_init) st3_vals = st3(st3_init) for field in fields: ae(getattr(st1_vals, field), field_vals[field]) ae(getattr(st2_vals, field), field_vals[field]) # end for for field in dup_fields_renamed: ae(getattr(st3_vals, field), field_vals[field]) # end for # Check aliases for alias in aliases.keys(): ae(getattr(st1_vals, alias), field_vals[alias]) ae(getattr(st2_vals, alias), field_vals[alias]) ae(getattr(st3_vals, alias), field_vals[alias]) # end for # Check auto_slots class AutoSlots1(st1): extra_field = None # end class as1 = AutoSlots1() class AutoSlots2(st2): extra_field = None # end class AutoSlots2 as2 = AutoSlots2() af(hasattr(as1, "__dict__")) at(hasattr(as2, "__dict__")) # end def test_structuple # end class structupleTestCase class StructupleTestCase(TestCase): def test_Structuple(self): ae = self.assertEqual af = self.assertFalse at = self.assertTrue fields1 = ("field1_0", "field1_1") fields2 = ("field2_0", "field2_1") fields3 = ("field3_0", "field3_1") aliases = {"field1_0_A": "field1_0", "field3_0_A": "field3_0"} fields2_full = ("field1_0", "field1_1", "field2_0", "field2_1") fields3_full = ("field1_0", "field1_1", "field3_0", "field3_1") st_init = (0x64, 0x53) st2_init = (0x64, 0x53, 0xAA, 0x55) field_vals = { "field1_0": 0x64, "field1_0_A": 0x64, "field1_1": 0x53, "field2_0": 0xAA, "field3_0": 0xAA, "field3_0_A": 0xAA, "field2_1": 0x55, "field3_1": 0x55, } class TestStructuple1(Structuple): _fields_ = fields1 _aliases_ = aliases _auto_slots_ = True # end class TestStructuple1 class TestStructuple2(TestStructuple1): _fields_ = fields2 # end class TestStructuple2 class TestStructuple3(TestStructuple1): _fields_ = fields3 _auto_slots_ = False # end class TestStructuple3 # Check _fields_ ae(TestStructuple1._fields_, fields1) ae(TestStructuple2._fields_, fields2_full) ae(TestStructuple3._fields_, fields3_full) st1_vals = TestStructuple1(st_init) st2_vals = TestStructuple2(st2_init) st3_vals = TestStructuple3(st2_init) for field in fields1: ae(getattr(st1_vals, field), field_vals[field]) # end for for field in fields2_full: ae(getattr(st2_vals, field), field_vals[field]) # end for for field in fields3_full: ae(getattr(st3_vals, field), field_vals[field]) # end for # Check _aliases_ ae(st1_vals.field1_0_A, field_vals["field1_0_A"]) ae(st2_vals.field1_0_A, field_vals["field1_0_A"]) ae(st3_vals.field3_0_A, field_vals["field3_0_A"]) af(hasattr(st1_vals, "field3_0_A")) af(hasattr(st2_vals, "field3_0_A")) at(hasattr(st3_vals, "field1_0_A")) # Check auto_slots ts1 = TestStructuple1() ts2 = TestStructuple2() ts3 = TestStructuple3() af(hasattr(ts1, "__dict__")) af(hasattr(ts2, "__dict__")) at(hasattr(ts3, "__dict__")) # end def test_Structuple # end class StructupleTestCase class CtypesWrapperTestCase(TestCase): def test_CtypesWrapper(self): ae = self.assertEqual class TestDataType(LERecord): field1 = int8 field2 = uint8 # end class TestDataType ctype = TestDataType._ctype_ class CtypesWrapperTest(CtypesWrapper): _ctype_ = ctype _fields_ = [x[0] for x in TestDataType._fields_] # end class CtypesWrapperTest cwt1 = CtypesWrapperTest.from_stream(ByteIStream(b"\x64\x53")) ctype_with_val = ctype.from_buffer_copy(b"\x64\x53") cwt2 = CtypesWrapperTest.from_ctype(ctype_with_val) cwt3 = CtypesWrapperTest.from_bytes(b"\x64\x53") for cwt in (cwt1, cwt2, cwt3): ae(cwt.field1, 0x64) ae(cwt.field2, 0x53) ae(cwt, (0x64, 0x53)) # end for # end def test_CtypesWrapper # end class CtypesWrapperTestCase
gpl-3.0
djb1815/Essex-MuSoc
musoc_web/schedule/views.py
1
1465
from django.shortcuts import render, redirect from django.db import transaction from django.contrib.auth.decorators import login_required from .forms import ProfileNameForm, ProfileDetailForm from django.contrib import messages # Create your views here. def index(request): # Add variables in the custom_variables dict below to make them available within the rendered page title = "Welcome" custom_variables = { 'title': title } return render(request, "schedule/home.html", custom_variables) @login_required @transaction.atomic def profile(request): title = "Account Settings" if request.method == 'POST': name_form = ProfileNameForm(request.POST, instance=request.user) detail_form = ProfileDetailForm(request.POST, instance=request.user.profile) if name_form.is_valid() and detail_form.is_valid(): name_form.save() detail_form.save() messages.success(request, 'Your profile has been successfully updated!') return redirect('profile') else: messages.error(request, 'Please correct the error below.') else: name_form = ProfileNameForm(instance=request.user) detail_form = ProfileDetailForm(instance=request.user.profile) custom_variables = { 'title': title, 'name_form': name_form, 'detail_form': detail_form } return render(request, "account/profile.html", custom_variables)
mit
Opshun/API
venv/lib/python2.7/site-packages/pip/_vendor/requests/packages/chardet/sbcsgroupprober.py
2936
3291
######################## BEGIN LICENSE BLOCK ######################## # The Original Code is Mozilla Universal charset detector code. # # The Initial Developer of the Original Code is # Netscape Communications Corporation. # Portions created by the Initial Developer are Copyright (C) 2001 # the Initial Developer. All Rights Reserved. # # Contributor(s): # Mark Pilgrim - port to Python # Shy Shalom - original C code # # This library is free software; you can redistribute it and/or # modify it under the terms of the GNU Lesser General Public # License as published by the Free Software Foundation; either # version 2.1 of the License, or (at your option) any later version. # # This library is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public # License along with this library; if not, write to the Free Software # Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA # 02110-1301 USA ######################### END LICENSE BLOCK ######################### from .charsetgroupprober import CharSetGroupProber from .sbcharsetprober import SingleByteCharSetProber from .langcyrillicmodel import (Win1251CyrillicModel, Koi8rModel, Latin5CyrillicModel, MacCyrillicModel, Ibm866Model, Ibm855Model) from .langgreekmodel import Latin7GreekModel, Win1253GreekModel from .langbulgarianmodel import Latin5BulgarianModel, Win1251BulgarianModel from .langhungarianmodel import Latin2HungarianModel, Win1250HungarianModel from .langthaimodel import TIS620ThaiModel from .langhebrewmodel import Win1255HebrewModel from .hebrewprober import HebrewProber class SBCSGroupProber(CharSetGroupProber): def __init__(self): CharSetGroupProber.__init__(self) self._mProbers = [ SingleByteCharSetProber(Win1251CyrillicModel), SingleByteCharSetProber(Koi8rModel), SingleByteCharSetProber(Latin5CyrillicModel), SingleByteCharSetProber(MacCyrillicModel), SingleByteCharSetProber(Ibm866Model), SingleByteCharSetProber(Ibm855Model), SingleByteCharSetProber(Latin7GreekModel), SingleByteCharSetProber(Win1253GreekModel), SingleByteCharSetProber(Latin5BulgarianModel), SingleByteCharSetProber(Win1251BulgarianModel), SingleByteCharSetProber(Latin2HungarianModel), SingleByteCharSetProber(Win1250HungarianModel), SingleByteCharSetProber(TIS620ThaiModel), ] hebrewProber = HebrewProber() logicalHebrewProber = SingleByteCharSetProber(Win1255HebrewModel, False, hebrewProber) visualHebrewProber = SingleByteCharSetProber(Win1255HebrewModel, True, hebrewProber) hebrewProber.set_model_probers(logicalHebrewProber, visualHebrewProber) self._mProbers.extend([hebrewProber, logicalHebrewProber, visualHebrewProber]) self.reset()
mit
ionutcipriananescu/python
Yixiaohan/0000/avatar_add_number.py
40
1076
#!/usr /bin/env python # -*- coding: utf-8 -*- """ 将你的 QQ 头像(或者微博头像)右上角加上红色的数字,类似于微信未读信息数量那种提示效果。 Pillow:Python Imaging Library PIL.ImageDraw.Draw.text(xy, text, fill=None, font=None, anchor=None) """ from PIL import Image, ImageDraw, ImageFont original_avatar = 'weChat_avatar.png' saved_avatar = 'new_avatar.png' windows_font = 'Arial.ttf' color = (255, 0, 0) def draw_text(text, fill_color, windows_font): try: im = Image.open(original_avatar) x, y = im.size print "The size of the Image is: " print(im.format, im.size, im.mode) im.show() draw = ImageDraw.Draw(im) font = ImageFont.truetype(windows_font, 35) draw.text((x-20, 7), text, fill_color, font) im.save(saved_avatar) im.show() except: print "Unable to load image" if __name__ == "__main__": #number = str(raw_input('please input number: ')) number = str(4) draw_text(number, color, windows_font)
mit
usakhelo/FreeCAD
src/Mod/Fem/PyGui/_CommandFemResultShow.py
2
2797
# *************************************************************************** # * * # * Copyright (c) 2013-2015 - Juergen Riegel <FreeCAD@juergen-riegel.net> * # * * # * This program is free software; you can redistribute it and/or modify * # * it under the terms of the GNU Lesser General Public License (LGPL) * # * as published by the Free Software Foundation; either version 2 of * # * the License, or (at your option) any later version. * # * for detail see the LICENCE text file. * # * * # * This program is distributed in the hope that it will be useful, * # * but WITHOUT ANY WARRANTY; without even the implied warranty of * # * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * # * GNU Library General Public License for more details. * # * * # * You should have received a copy of the GNU Library General Public * # * License along with this program; if not, write to the Free Software * # * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 * # * USA * # * * # *************************************************************************** __title__ = "Command Show Result" __author__ = "Juergen Riegel, Bernd Hahnebach" __url__ = "http://www.freecadweb.org" ## @package CommandFemResultShow # \ingroup FEM # \brief FreeCAD Command show results for FEM workbench from FemCommands import FemCommands import FreeCADGui from PySide import QtCore class _CommandFemResultShow(FemCommands): "the FEM_ResultShow command definition" def __init__(self): super(_CommandFemResultShow, self).__init__() self.resources = {'Pixmap': 'fem-result', 'MenuText': QtCore.QT_TRANSLATE_NOOP("FEM_ResultShow", "Show result"), 'Accel': "S, R", 'ToolTip': QtCore.QT_TRANSLATE_NOOP("FEM_ResultShow", "Shows and visualizes selected result data")} self.is_active = 'with_selresult' def Activated(self): sel = FreeCADGui.Selection.getSelection() if (len(sel) == 1): if sel[0].isDerivedFrom("Fem::FemResultObject"): result_object = sel[0] result_object.ViewObject.startEditing() FreeCADGui.addCommand('FEM_ResultShow', _CommandFemResultShow())
lgpl-2.1
trezorg/django
django/contrib/localflavor/it/it_province.py
406
2740
# -*- coding: utf-8 -* PROVINCE_CHOICES = ( ('AG', 'Agrigento'), ('AL', 'Alessandria'), ('AN', 'Ancona'), ('AO', 'Aosta'), ('AR', 'Arezzo'), ('AP', 'Ascoli Piceno'), ('AT', 'Asti'), ('AV', 'Avellino'), ('BA', 'Bari'), ('BT', 'Barletta-Andria-Trani'), # active starting from 2009 ('BL', 'Belluno'), ('BN', 'Benevento'), ('BG', 'Bergamo'), ('BI', 'Biella'), ('BO', 'Bologna'), ('BZ', 'Bolzano/Bozen'), ('BS', 'Brescia'), ('BR', 'Brindisi'), ('CA', 'Cagliari'), ('CL', 'Caltanissetta'), ('CB', 'Campobasso'), ('CI', 'Carbonia-Iglesias'), ('CE', 'Caserta'), ('CT', 'Catania'), ('CZ', 'Catanzaro'), ('CH', 'Chieti'), ('CO', 'Como'), ('CS', 'Cosenza'), ('CR', 'Cremona'), ('KR', 'Crotone'), ('CN', 'Cuneo'), ('EN', 'Enna'), ('FM', 'Fermo'), # active starting from 2009 ('FE', 'Ferrara'), ('FI', 'Firenze'), ('FG', 'Foggia'), ('FC', 'Forlì-Cesena'), ('FR', 'Frosinone'), ('GE', 'Genova'), ('GO', 'Gorizia'), ('GR', 'Grosseto'), ('IM', 'Imperia'), ('IS', 'Isernia'), ('SP', 'La Spezia'), ('AQ', u'L’Aquila'), ('LT', 'Latina'), ('LE', 'Lecce'), ('LC', 'Lecco'), ('LI', 'Livorno'), ('LO', 'Lodi'), ('LU', 'Lucca'), ('MC', 'Macerata'), ('MN', 'Mantova'), ('MS', 'Massa-Carrara'), ('MT', 'Matera'), ('VS', 'Medio Campidano'), ('ME', 'Messina'), ('MI', 'Milano'), ('MO', 'Modena'), ('MB', 'Monza e Brianza'), # active starting from 2009 ('NA', 'Napoli'), ('NO', 'Novara'), ('NU', 'Nuoro'), ('OG', 'Ogliastra'), ('OT', 'Olbia-Tempio'), ('OR', 'Oristano'), ('PD', 'Padova'), ('PA', 'Palermo'), ('PR', 'Parma'), ('PV', 'Pavia'), ('PG', 'Perugia'), ('PU', 'Pesaro e Urbino'), ('PE', 'Pescara'), ('PC', 'Piacenza'), ('PI', 'Pisa'), ('PT', 'Pistoia'), ('PN', 'Pordenone'), ('PZ', 'Potenza'), ('PO', 'Prato'), ('RG', 'Ragusa'), ('RA', 'Ravenna'), ('RC', 'Reggio Calabria'), ('RE', 'Reggio Emilia'), ('RI', 'Rieti'), ('RN', 'Rimini'), ('RM', 'Roma'), ('RO', 'Rovigo'), ('SA', 'Salerno'), ('SS', 'Sassari'), ('SV', 'Savona'), ('SI', 'Siena'), ('SR', 'Siracusa'), ('SO', 'Sondrio'), ('TA', 'Taranto'), ('TE', 'Teramo'), ('TR', 'Terni'), ('TO', 'Torino'), ('TP', 'Trapani'), ('TN', 'Trento'), ('TV', 'Treviso'), ('TS', 'Trieste'), ('UD', 'Udine'), ('VA', 'Varese'), ('VE', 'Venezia'), ('VB', 'Verbano Cusio Ossola'), ('VC', 'Vercelli'), ('VR', 'Verona'), ('VV', 'Vibo Valentia'), ('VI', 'Vicenza'), ('VT', 'Viterbo'), )
bsd-3-clause
DavidAndreev/indico
indico/MaKaC/webinterface/pages/static.py
2
2989
# This file is part of Indico. # Copyright (C) 2002 - 2016 European Organization for Nuclear Research (CERN). # # Indico is free software; you can redistribute it and/or # modify it under the terms of the GNU General Public License as # published by the Free Software Foundation; either version 3 of the # License, or (at your option) any later version. # # Indico is distributed in the hope that it will be useful, but # WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Indico; if not, see <http://www.gnu.org/licenses/>. from indico.modules.events.contributions.views import WPAuthorList, WPContributions, WPSpeakerList from indico.modules.events.layout.views import WPPage from indico.modules.events.registration.views import WPDisplayRegistrationParticipantList from indico.modules.events.sessions.views import WPDisplaySession from indico.modules.events.timetable.views import WPDisplayTimetable from MaKaC.webinterface.pages.conferences import WPTPLConferenceDisplay, WPConferenceDisplay, WPConferenceProgram class WPStaticEventBase: def _getBaseURL(self): return "static" def _getHeader(self): return "" def _getFooter(self): return "" class WPTPLStaticConferenceDisplay(WPStaticEventBase, WPTPLConferenceDisplay): pass class WPStaticConferenceDisplay(WPStaticEventBase, WPConferenceDisplay): pass class WPStaticTimetable(WPStaticEventBase, WPDisplayTimetable): endpoint = 'timetable.timetable' menu_entry_name = 'timetable' class WPStaticConferenceProgram(WPStaticEventBase, WPConferenceProgram): endpoint = 'event.conferenceProgram' class WPStaticDisplayRegistrationParticipantList(WPStaticEventBase, WPDisplayRegistrationParticipantList): endpoint = 'event_registration.participant_list' class WPStaticContributionList(WPStaticEventBase, WPContributions): endpoint = 'contributions.contribution_list' template_prefix = 'events/contributions/' menu_entry_name = 'contributions' class WPStaticCustomPage(WPStaticEventBase, WPPage): pass class WPStaticAuthorList(WPStaticEventBase, WPAuthorList): endpoint = 'contributions.author_list' template_prefix = 'events/contributions/' menu_entry_name = 'author_index' class WPStaticSpeakerList(WPStaticEventBase, WPSpeakerList): endpoint = 'contributions.speaker_list' template_prefix = 'events/contributions/' menu_entry_name = 'speaker_index' class WPStaticSessionDisplay(WPStaticEventBase, WPDisplaySession): endpoint = 'sessions.display_session' class WPStaticContributionDisplay(WPStaticEventBase, WPContributions): endpoint = 'contributions.display_contribution' class WPStaticSubcontributionDisplay(WPStaticEventBase, WPContributions): endpoint = 'contributions.display_subcontribution'
gpl-3.0
cloughrm/Flask-Angular-Template
backend/pastry/resources/v1/users.py
1
1720
from pastry.db import mongo from pastry.models import User from pastry.resources.auth import login_required from pastry.resources import validators, httpcodes from bson.objectid import ObjectId from flask import request from flask.ext.restful import Resource, reqparse class UsersResource(Resource): @login_required def get(self, id): return mongo.db.users.find_one_or_404({'_id': ObjectId(id)}) @login_required def delete(self, id): return mongo.db.users.remove({'_id': ObjectId(id)}) class UsersListResource(Resource): def __init__(self): self.parser = reqparse.RequestParser() if request.method == 'GET': self.parser.add_argument('limit', type=int, default=20) self.parser.add_argument('offset', type=int, default=0) elif request.method == 'POST': self.parser.add_argument('username', type=validators.email_address, required=True) self.parser.add_argument('password', type=str, required=True) super(UsersListResource, self).__init__() @login_required def get(self): args = self.parser.parse_args() users = mongo.db.users.find().skip(args.offset).limit(args.limit) return { 'objects': users, 'offset': args.offset, 'limit': args.limit, } @login_required def post(self): args = self.parser.parse_args() user = User(args.username, args.password) if mongo.db.users.find_one({'username': user.username}): return {'message': 'User {} already exists'.format(user.username)}, httpcodes.BAD_REQUEST user_id = user.create() return {'id': user_id}, httpcodes.CREATED
mit
peerster/CouchPotatoServer
libs/pyutil/verlib.py
106
12275
# -*- coding: utf-8 -*- """ "Rational" version definition and parsing for DistutilsVersionFight discussion at PyCon 2009. This was written by Tarek Ziadé. Zooko copied it from http://bitbucket.org/tarek/distutilsversion/ on 2010-07-29. """ import re class IrrationalVersionError(Exception): """This is an irrational version.""" pass class HugeMajorVersionNumError(IrrationalVersionError): """An irrational version because the major version number is huge (often because a year or date was used). See `error_on_huge_major_num` option in `NormalizedVersion` for details. This guard can be disabled by setting that option False. """ pass class PreconditionViolationException(Exception): pass # A marker used in the second and third parts of the `parts` tuple, for # versions that don't have those segments, to sort properly. An example # of versions in sort order ('highest' last): # 1.0b1 ((1,0), ('b',1), ('f',)) # 1.0.dev345 ((1,0), ('f',), ('dev', 345)) # 1.0 ((1,0), ('f',), ('f',)) # 1.0.post256.dev345 ((1,0), ('f',), ('f', 'post', 256, 'dev', 345)) # 1.0.post345 ((1,0), ('f',), ('f', 'post', 345, 'f')) # ^ ^ ^ # 'b' < 'f' ---------------------/ | | # | | # 'dev' < 'f' < 'post' -------------------/ | # | # 'dev' < 'f' ----------------------------------------------/ # Other letters would do, but 'f' for 'final' is kind of nice. FINAL_MARKER = ('f',) VERSION_RE = re.compile(r''' ^ (?P<version>\d+\.\d+) # minimum 'N.N' (?P<extraversion>(?:\.\d+)*) # any number of extra '.N' segments (?: (?P<prerel>[abc]|rc) # 'a'=alpha, 'b'=beta, 'c'=release candidate # 'rc'= alias for release candidate (?P<prerelversion>\d+(?:\.\d+)*) )? (?P<postdev>(\.post(?P<post>\d+)|-r(?P<oldpost>\d+))?(\.dev(?P<dev>\d+))?)? $''', re.VERBOSE) class NormalizedVersion(object): """A rational version. Good: 1.2 # equivalent to "1.2.0" 1.2.0 1.2a1 1.2.3a2 1.2.3b1 1.2.3c1 1.2.3.4 TODO: fill this out Bad: 1 # mininum two numbers 1.2a # release level must have a release serial 1.2.3b """ def __init__(self, s, error_on_huge_major_num=True): """Create a NormalizedVersion instance from a version string. @param s {str} The version string. @param error_on_huge_major_num {bool} Whether to consider an apparent use of a year or full date as the major version number an error. Default True. One of the observed patterns on PyPI before the introduction of `NormalizedVersion` was version numbers like this: 2009.01.03 20040603 2005.01 This guard is here to strongly encourage the package author to use an alternate version, because a release deployed into PyPI and, e.g. downstream Linux package managers, will forever remove the possibility of using a version number like "1.0" (i.e. where the major number is less than that huge major number). """ self._parse(s, error_on_huge_major_num) @classmethod def from_parts(cls, version, prerelease=FINAL_MARKER, devpost=FINAL_MARKER): return cls(cls.parts_to_str((version, prerelease, devpost))) def _parse(self, s, error_on_huge_major_num=True): """Parses a string version into parts.""" if not isinstance(s, basestring): raise PreconditionViolationException("s is required to be a string: %s :: %s" % (s, type(s))) match = VERSION_RE.search(s) if not match: raise IrrationalVersionError(s) groups = match.groupdict() parts = [] # main version block = self._parse_numdots(groups['version'], s, False, 2) extraversion = groups.get('extraversion') if extraversion not in ('', None): block += self._parse_numdots(extraversion[1:], s) parts.append(tuple(block)) # prerelease prerel = groups.get('prerel') if prerel is not None: block = [prerel] block += self._parse_numdots(groups.get('prerelversion'), s, pad_zeros_length=1) parts.append(tuple(block)) else: parts.append(FINAL_MARKER) # postdev if groups.get('postdev'): post = groups.get('post') or groups.get('oldpost') dev = groups.get('dev') postdev = [] if post is not None: postdev.extend([FINAL_MARKER[0], 'post', int(post)]) if dev is None: postdev.append(FINAL_MARKER[0]) if dev is not None: postdev.extend(['dev', int(dev)]) parts.append(tuple(postdev)) else: parts.append(FINAL_MARKER) self.parts = tuple(parts) if error_on_huge_major_num and self.parts[0][0] > 1980: raise HugeMajorVersionNumError("huge major version number, %r, " "which might cause future problems: %r" % (self.parts[0][0], s)) def _parse_numdots(self, s, full_ver_str, drop_trailing_zeros=True, pad_zeros_length=0): """Parse 'N.N.N' sequences, return a list of ints. @param s {str} 'N.N.N...' sequence to be parsed @param full_ver_str {str} The full version string from which this comes. Used for error strings. @param drop_trailing_zeros {bool} Whether to drop trailing zeros from the returned list. Default True. @param pad_zeros_length {int} The length to which to pad the returned list with zeros, if necessary. Default 0. """ nums = [] for n in s.split("."): if len(n) > 1 and n[0] == '0': raise IrrationalVersionError("cannot have leading zero in " "version number segment: '%s' in %r" % (n, full_ver_str)) nums.append(int(n)) if drop_trailing_zeros: while nums and nums[-1] == 0: nums.pop() while len(nums) < pad_zeros_length: nums.append(0) return nums def __str__(self): return self.parts_to_str(self.parts) @classmethod def parts_to_str(cls, parts): """Transforms a version expressed in tuple into its string representation.""" # XXX This doesn't check for invalid tuples main, prerel, postdev = parts s = '.'.join(str(v) for v in main) if prerel is not FINAL_MARKER: s += prerel[0] s += '.'.join(str(v) for v in prerel[1:]) if postdev and postdev is not FINAL_MARKER: if postdev[0] == 'f': postdev = postdev[1:] i = 0 while i < len(postdev): if i % 2 == 0: s += '.' s += str(postdev[i]) i += 1 return s def __repr__(self): return "%s('%s')" % (self.__class__.__name__, self) def _cannot_compare(self, other): raise TypeError("cannot compare %s and %s" % (type(self).__name__, type(other).__name__)) def __eq__(self, other): if not isinstance(other, NormalizedVersion): self._cannot_compare(other) return self.parts == other.parts def __lt__(self, other): if not isinstance(other, NormalizedVersion): self._cannot_compare(other) return self.parts < other.parts def __ne__(self, other): return not self.__eq__(other) def __gt__(self, other): return not (self.__lt__(other) or self.__eq__(other)) def __le__(self, other): return self.__eq__(other) or self.__lt__(other) def __ge__(self, other): return self.__eq__(other) or self.__gt__(other) def suggest_normalized_version(s): """Suggest a normalized version close to the given version string. If you have a version string that isn't rational (i.e. NormalizedVersion doesn't like it) then you might be able to get an equivalent (or close) rational version from this function. This does a number of simple normalizations to the given string, based on observation of versions currently in use on PyPI. Given a dump of those version during PyCon 2009, 4287 of them: - 2312 (53.93%) match NormalizedVersion without change - with the automatic suggestion - 3474 (81.04%) match when using this suggestion method @param s {str} An irrational version string. @returns A rational version string, or None, if couldn't determine one. """ try: NormalizedVersion(s) return s # already rational except IrrationalVersionError: pass rs = s.lower() # part of this could use maketrans for orig, repl in (('-alpha', 'a'), ('-beta', 'b'), ('alpha', 'a'), ('beta', 'b'), ('rc', 'c'), ('-final', ''), ('-pre', 'c'), ('-release', ''), ('.release', ''), ('-stable', ''), ('+', '.'), ('_', '.'), (' ', ''), ('.final', ''), ('final', '')): rs = rs.replace(orig, repl) # if something ends with dev or pre, we add a 0 rs = re.sub(r"pre$", r"pre0", rs) rs = re.sub(r"dev$", r"dev0", rs) # if we have something like "b-2" or "a.2" at the end of the # version, that is pobably beta, alpha, etc # let's remove the dash or dot rs = re.sub(r"([abc|rc])[\-\.](\d+)$", r"\1\2", rs) # 1.0-dev-r371 -> 1.0.dev371 # 0.1-dev-r79 -> 0.1.dev79 rs = re.sub(r"[\-\.](dev)[\-\.]?r?(\d+)$", r".\1\2", rs) # Clean: 2.0.a.3, 2.0.b1, 0.9.0~c1 rs = re.sub(r"[.~]?([abc])\.?", r"\1", rs) # Clean: v0.3, v1.0 if rs.startswith('v'): rs = rs[1:] # Clean leading '0's on numbers. #TODO: unintended side-effect on, e.g., "2003.05.09" # PyPI stats: 77 (~2%) better rs = re.sub(r"\b0+(\d+)(?!\d)", r"\1", rs) # Clean a/b/c with no version. E.g. "1.0a" -> "1.0a0". Setuptools infers # zero. # PyPI stats: 245 (7.56%) better rs = re.sub(r"(\d+[abc])$", r"\g<1>0", rs) # the 'dev-rNNN' tag is a dev tag rs = re.sub(r"\.?(dev-r|dev\.r)\.?(\d+)$", r".dev\2", rs) # clean the - when used as a pre delimiter rs = re.sub(r"-(a|b|c)(\d+)$", r"\1\2", rs) # a terminal "dev" or "devel" can be changed into ".dev0" rs = re.sub(r"[\.\-](dev|devel)$", r".dev0", rs) # a terminal "dev" can be changed into ".dev0" rs = re.sub(r"(?![\.\-])dev$", r".dev0", rs) # a terminal "final" or "stable" can be removed rs = re.sub(r"(final|stable)$", "", rs) # The 'r' and the '-' tags are post release tags # 0.4a1.r10 -> 0.4a1.post10 # 0.9.33-17222 -> 0.9.3.post17222 # 0.9.33-r17222 -> 0.9.3.post17222 rs = re.sub(r"\.?(r|-|-r)\.?(\d+)$", r".post\2", rs) # Clean 'r' instead of 'dev' usage: # 0.9.33+r17222 -> 0.9.3.dev17222 # 1.0dev123 -> 1.0.dev123 # 1.0.git123 -> 1.0.dev123 # 1.0.bzr123 -> 1.0.dev123 # 0.1a0dev.123 -> 0.1a0.dev123 # PyPI stats: ~150 (~4%) better rs = re.sub(r"\.?(dev|git|bzr)\.?(\d+)$", r".dev\2", rs) # Clean '.pre' (normalized from '-pre' above) instead of 'c' usage: # 0.2.pre1 -> 0.2c1 # 0.2-c1 -> 0.2c1 # 1.0preview123 -> 1.0c123 # PyPI stats: ~21 (0.62%) better rs = re.sub(r"\.?(pre|preview|-c)(\d+)$", r"c\g<2>", rs) # Tcl/Tk uses "px" for their post release markers rs = re.sub(r"p(\d+)$", r".post\1", rs) try: NormalizedVersion(rs) return rs # already rational except IrrationalVersionError: pass return None
gpl-3.0
jaxxstorm/fullerite
src/diamond/collectors/jolokia/jolokia.py
21
7813
# coding=utf-8 """ Collects JMX metrics from the Jolokia Agent. Jolokia is an HTTP bridge that provides access to JMX MBeans without the need to write Java code. See the [Reference Guide](http://www.jolokia.org/reference/html/index.html) for more information. By default, all MBeans will be queried for metrics. All numerical values will be published to Graphite; anything else will be ignored. JolokiaCollector will create a reasonable namespace for each metric based on each MBeans domain and name. e.g) ```java.lang:name=ParNew,type=GarbageCollector``` would become ```java.lang.name_ParNew.type_GarbageCollector```. #### Dependencies * Jolokia * A running JVM with Jolokia installed/configured #### Example Configuration If desired, JolokiaCollector can be configured to query specific MBeans by providing a list of ```mbeans```. If ```mbeans``` is not provided, all MBeans will be queried for metrics. Note that the mbean prefix is checked both with and without rewrites (including fixup re-writes) applied. This allows you to specify "java.lang:name=ParNew,type=GarbageCollector" (the raw name from jolokia) or "java.lang.name_ParNew.type_GarbageCollector" (the fixed name as used for output) If the ```regex``` flag is set to True, mbeans will match based on regular expressions rather than a plain textual match. The ```rewrite``` section provides a way of renaming the data keys before it sent out to the handler. The section consists of pairs of from-to regular expressions. If the resultant name is completely blank, the metric is not published, providing a way to exclude specific metrics within an mbean. ``` host = localhost port = 8778 mbeans = "java.lang:name=ParNew,type=GarbageCollector", "org.apache.cassandra.metrics:name=WriteTimeouts,type=ClientRequestMetrics" [rewrite] java = coffee "-v\d+\.\d+\.\d+" = "-AllVersions" ".*GetS2Activities.*" = "" ``` """ import diamond.collector import json import re import urllib import urllib2 class JolokiaCollector(diamond.collector.Collector): LIST_URL = "/list" READ_URL = "/?ignoreErrors=true&p=read/%s:*" """ These domains contain MBeans that are for management purposes, or otherwise do not contain useful metrics """ IGNORE_DOMAINS = ['JMImplementation', 'jmx4perl', 'jolokia', 'com.sun.management', 'java.util.logging'] def get_default_config_help(self): config_help = super(JolokiaCollector, self).get_default_config_help() config_help.update({ 'mbeans': "Pipe delimited list of MBeans for which to collect" " stats. If not provided, all stats will" " be collected.", 'regex': "Contols if mbeans option matches with regex," " False by default.", 'host': 'Hostname', 'port': 'Port', 'rewrite': "This sub-section of the config contains pairs of" " from-to regex rewrites.", 'path': 'Path to jolokia. typically "jmx" or "jolokia"' }) return config_help def get_default_config(self): config = super(JolokiaCollector, self).get_default_config() config.update({ 'mbeans': [], 'regex': False, 'rewrite': [], 'path': 'jolokia', 'host': 'localhost', 'port': 8778, }) return config def __init__(self, *args, **kwargs): super(JolokiaCollector, self).__init__(*args, **kwargs) self.mbeans = [] self.rewrite = {} if isinstance(self.config['mbeans'], basestring): for mbean in self.config['mbeans'].split('|'): self.mbeans.append(mbean.strip()) elif isinstance(self.config['mbeans'], list): self.mbeans = self.config['mbeans'] if isinstance(self.config['rewrite'], dict): self.rewrite = self.config['rewrite'] def check_mbean(self, mbean): if not self.mbeans: return True mbeanfix = self.clean_up(mbean) if self.config['regex'] is not None: for chkbean in self.mbeans: if re.match(chkbean, mbean) is not None or \ re.match(chkbean, mbeanfix) is not None: return True else: if mbean in self.mbeans or mbeanfix in self.mbeans: return True def collect(self): listing = self.list_request() try: domains = listing['value'] if listing['status'] == 200 else {} for domain in domains.keys(): if domain not in self.IGNORE_DOMAINS: obj = self.read_request(domain) mbeans = obj['value'] if obj['status'] == 200 else {} for k, v in mbeans.iteritems(): if self.check_mbean(k): self.collect_bean(k, v) except KeyError: # The reponse was totally empty, or not an expected format self.log.error('Unable to retrieve MBean listing.') def read_json(self, request): json_str = request.read() return json.loads(json_str) def list_request(self): try: url = "http://%s:%s/%s%s" % (self.config['host'], self.config['port'], self.config['path'], self.LIST_URL) response = urllib2.urlopen(url) return self.read_json(response) except (urllib2.HTTPError, ValueError): self.log.error('Unable to read JSON response.') return {} def read_request(self, domain): try: url_path = self.READ_URL % self.escape_domain(domain) url = "http://%s:%s/%s%s" % (self.config['host'], self.config['port'], self.config['path'], url_path) response = urllib2.urlopen(url) return self.read_json(response) except (urllib2.HTTPError, ValueError): self.log.error('Unable to read JSON response.') return {} # escape the JMX domain per https://jolokia.org/reference/html/protocol.html # the Jolokia documentation suggests that, when using the p query parameter, # simply urlencoding should be sufficient, but in practice, the '!' appears # necessary (and not harmful) def escape_domain(self, domain): domain = re.sub('!', '!!', domain) domain = re.sub('/', '!/', domain) domain = re.sub('"', '!"', domain) domain = urllib.quote(domain) return domain def clean_up(self, text): text = re.sub('["\'(){}<>\[\]]', '', text) text = re.sub('[:,.]+', '.', text) text = re.sub('[^a-zA-Z0-9_.+-]+', '_', text) for (oldstr, newstr) in self.rewrite.items(): text = re.sub(oldstr, newstr, text) return text def collect_bean(self, prefix, obj): for k, v in obj.iteritems(): if type(v) in [int, float, long]: key = "%s.%s" % (prefix, k) key = self.clean_up(key) if key != "": self.publish(key, v) elif type(v) in [dict]: self.collect_bean("%s.%s" % (prefix, k), v) elif type(v) in [list]: self.interpret_bean_with_list("%s.%s" % (prefix, k), v) # There's no unambiguous way to interpret list values, so # this hook lets subclasses handle them. def interpret_bean_with_list(self, prefix, values): pass
apache-2.0